repository
stringclasses
11 values
repo_id
stringlengths
1
3
target_module_path
stringlengths
16
72
prompt
stringlengths
298
21.7k
relavent_test_path
stringlengths
50
99
full_function
stringlengths
336
33.8k
function_name
stringlengths
2
51
sympy
17
sympy/integrals/integrals.py
def as_sum(self, n=None, method="midpoint", evaluate=True): """ Approximates a definite integral by a sum. Parameters ========== n : The number of subintervals to use, optional. method : One of: 'left', 'right', 'midpoint', 'trapezoid'. evaluate : bool If False, returns an unevaluated Sum expression. The default is True, evaluate the sum. Notes ===== These methods of approximate integration are described in [1]. Examples ======== >>> from sympy import Integral, sin, sqrt >>> from sympy.abc import x, n >>> e = Integral(sin(x), (x, 3, 7)) >>> e Integral(sin(x), (x, 3, 7)) For demonstration purposes, this interval will only be split into 2 regions, bounded by [3, 5] and [5, 7]. The left-hand rule uses function evaluations at the left of each interval: >>> e.as_sum(2, 'left') 2*sin(5) + 2*sin(3) The midpoint rule uses evaluations at the center of each interval: >>> e.as_sum(2, 'midpoint') 2*sin(4) + 2*sin(6) The right-hand rule uses function evaluations at the right of each interval: >>> e.as_sum(2, 'right') 2*sin(5) + 2*sin(7) The trapezoid rule uses function evaluations on both sides of the intervals. This is equivalent to taking the average of the left and right hand rule results: >>> s = e.as_sum(2, 'trapezoid') >>> s 2*sin(5) + sin(3) + sin(7) >>> (e.as_sum(2, 'left') + e.as_sum(2, 'right'))/2 == s True Here, the discontinuity at x = 0 can be avoided by using the midpoint or right-hand method: >>> e = Integral(1/sqrt(x), (x, 0, 1)) >>> e.as_sum(5).n(4) 1.730 >>> e.as_sum(10).n(4) 1.809 >>> e.doit().n(4) # the actual value is 2 2.000 The left- or trapezoid method will encounter the discontinuity and return infinity: >>> e.as_sum(5, 'left') zoo The number of intervals can be symbolic. If omitted, a dummy symbol will be used for it. >>> e = Integral(x**2, (x, 0, 2)) >>> e.as_sum(n, 'right').expand() 8/3 + 4/n + 4/(3*n**2) This shows that the midpoint rule is more accurate, as its error term decays as the square of n: >>> e.as_sum(method='midpoint').expand() 8/3 - 2/(3*_n**2) A symbolic sum is returned with evaluate=False: >>> e.as_sum(n, 'midpoint', evaluate=False) 2*Sum((2*_k/n - 1/n)**2, (_k, 1, n))/n See Also ======== Integral.doit : Perform the integration using any hints References ========== .. [1] https://en.wikipedia.org/wiki/Riemann_sum#Riemann_summation_methods """
/usr/src/app/target_test_cases/failed_tests_Integral.as_sum.txt
def as_sum(self, n=None, method="midpoint", evaluate=True): """ Approximates a definite integral by a sum. Parameters ========== n : The number of subintervals to use, optional. method : One of: 'left', 'right', 'midpoint', 'trapezoid'. evaluate : bool If False, returns an unevaluated Sum expression. The default is True, evaluate the sum. Notes ===== These methods of approximate integration are described in [1]. Examples ======== >>> from sympy import Integral, sin, sqrt >>> from sympy.abc import x, n >>> e = Integral(sin(x), (x, 3, 7)) >>> e Integral(sin(x), (x, 3, 7)) For demonstration purposes, this interval will only be split into 2 regions, bounded by [3, 5] and [5, 7]. The left-hand rule uses function evaluations at the left of each interval: >>> e.as_sum(2, 'left') 2*sin(5) + 2*sin(3) The midpoint rule uses evaluations at the center of each interval: >>> e.as_sum(2, 'midpoint') 2*sin(4) + 2*sin(6) The right-hand rule uses function evaluations at the right of each interval: >>> e.as_sum(2, 'right') 2*sin(5) + 2*sin(7) The trapezoid rule uses function evaluations on both sides of the intervals. This is equivalent to taking the average of the left and right hand rule results: >>> s = e.as_sum(2, 'trapezoid') >>> s 2*sin(5) + sin(3) + sin(7) >>> (e.as_sum(2, 'left') + e.as_sum(2, 'right'))/2 == s True Here, the discontinuity at x = 0 can be avoided by using the midpoint or right-hand method: >>> e = Integral(1/sqrt(x), (x, 0, 1)) >>> e.as_sum(5).n(4) 1.730 >>> e.as_sum(10).n(4) 1.809 >>> e.doit().n(4) # the actual value is 2 2.000 The left- or trapezoid method will encounter the discontinuity and return infinity: >>> e.as_sum(5, 'left') zoo The number of intervals can be symbolic. If omitted, a dummy symbol will be used for it. >>> e = Integral(x**2, (x, 0, 2)) >>> e.as_sum(n, 'right').expand() 8/3 + 4/n + 4/(3*n**2) This shows that the midpoint rule is more accurate, as its error term decays as the square of n: >>> e.as_sum(method='midpoint').expand() 8/3 - 2/(3*_n**2) A symbolic sum is returned with evaluate=False: >>> e.as_sum(n, 'midpoint', evaluate=False) 2*Sum((2*_k/n - 1/n)**2, (_k, 1, n))/n See Also ======== Integral.doit : Perform the integration using any hints References ========== .. [1] https://en.wikipedia.org/wiki/Riemann_sum#Riemann_summation_methods """ from sympy.concrete.summations import Sum limits = self.limits if len(limits) > 1: raise NotImplementedError( "Multidimensional midpoint rule not implemented yet") else: limit = limits[0] if (len(limit) != 3 or limit[1].is_finite is False or limit[2].is_finite is False): raise ValueError("Expecting a definite integral over " "a finite interval.") if n is None: n = Dummy('n', integer=True, positive=True) else: n = sympify(n) if (n.is_positive is False or n.is_integer is False or n.is_finite is False): raise ValueError("n must be a positive integer, got %s" % n) x, a, b = limit dx = (b - a)/n k = Dummy('k', integer=True, positive=True) f = self.function if method == "left": result = dx*Sum(f.subs(x, a + (k-1)*dx), (k, 1, n)) elif method == "right": result = dx*Sum(f.subs(x, a + k*dx), (k, 1, n)) elif method == "midpoint": result = dx*Sum(f.subs(x, a + k*dx - dx/2), (k, 1, n)) elif method == "trapezoid": result = dx*((f.subs(x, a) + f.subs(x, b))/2 + Sum(f.subs(x, a + k*dx), (k, 1, n - 1))) else: raise ValueError("Unknown method %s" % method) return result.doit() if evaluate else result
Integral.as_sum
sympy
18
sympy/physics/mechanics/pathway.py
def to_loads(self, force): """Loads required by the equations of motion method classes. Explanation =========== ``KanesMethod`` requires a list of ``Point``-``Vector`` tuples to be passed to the ``loads`` parameters of its ``kanes_equations`` method when constructing the equations of motion. This method acts as a utility to produce the correctly-structred pairs of points and vectors required so that these can be easily concatenated with other items in the list of loads and passed to ``KanesMethod.kanes_equations``. These loads are also in the correct form to also be passed to the other equations of motion method classes, e.g. ``LagrangesMethod``. Examples ======== The below example shows how to generate the loads produced in an actuator that follows an obstacle-set pathway between four points and produces an expansile force ``F``. First, create a pair of reference frames, ``A`` and ``B``, in which the four points ``pA``, ``pB``, ``pC``, and ``pD`` will be located. The first two points in frame ``A`` and the second two in frame ``B``. Frame ``B`` will also be oriented such that it relates to ``A`` via a rotation of ``q`` about an axis ``N.z`` in a global frame (``N.z``, ``A.z``, and ``B.z`` are parallel). >>> from sympy.physics.mechanics import (ObstacleSetPathway, Point, ... ReferenceFrame) >>> from sympy.physics.vector import dynamicsymbols >>> q = dynamicsymbols('q') >>> N = ReferenceFrame('N') >>> N = ReferenceFrame('N') >>> A = N.orientnew('A', 'axis', (0, N.x)) >>> B = A.orientnew('B', 'axis', (q, N.z)) >>> pO = Point('pO') >>> pA, pB, pC, pD = Point('pA'), Point('pB'), Point('pC'), Point('pD') >>> pA.set_pos(pO, A.x) >>> pB.set_pos(pO, -A.y) >>> pC.set_pos(pO, B.y) >>> pD.set_pos(pO, B.x) >>> obstacle_set_pathway = ObstacleSetPathway(pA, pB, pC, pD) Now create a symbol ``F`` to describe the magnitude of the (expansile) force that will be produced along the pathway. The list of loads that ``KanesMethod`` requires can be produced by calling the pathway's ``to_loads`` method with ``F`` passed as the only argument. >>> from sympy import Symbol >>> F = Symbol('F') >>> obstacle_set_pathway.to_loads(F) [(pA, sqrt(2)*F/2*A.x + sqrt(2)*F/2*A.y), (pB, - sqrt(2)*F/2*A.x - sqrt(2)*F/2*A.y), (pB, - F/sqrt(2*cos(q(t)) + 2)*A.y - F/sqrt(2*cos(q(t)) + 2)*B.y), (pC, F/sqrt(2*cos(q(t)) + 2)*A.y + F/sqrt(2*cos(q(t)) + 2)*B.y), (pC, - sqrt(2)*F/2*B.x + sqrt(2)*F/2*B.y), (pD, sqrt(2)*F/2*B.x - sqrt(2)*F/2*B.y)] Parameters ========== force : Expr The force acting along the length of the pathway. It is assumed that this ``Expr`` represents an expansile force. """
/usr/src/app/target_test_cases/failed_tests_ObstacleSetPathway.to_loads.txt
def to_loads(self, force): """Loads required by the equations of motion method classes. Explanation =========== ``KanesMethod`` requires a list of ``Point``-``Vector`` tuples to be passed to the ``loads`` parameters of its ``kanes_equations`` method when constructing the equations of motion. This method acts as a utility to produce the correctly-structred pairs of points and vectors required so that these can be easily concatenated with other items in the list of loads and passed to ``KanesMethod.kanes_equations``. These loads are also in the correct form to also be passed to the other equations of motion method classes, e.g. ``LagrangesMethod``. Examples ======== The below example shows how to generate the loads produced in an actuator that follows an obstacle-set pathway between four points and produces an expansile force ``F``. First, create a pair of reference frames, ``A`` and ``B``, in which the four points ``pA``, ``pB``, ``pC``, and ``pD`` will be located. The first two points in frame ``A`` and the second two in frame ``B``. Frame ``B`` will also be oriented such that it relates to ``A`` via a rotation of ``q`` about an axis ``N.z`` in a global frame (``N.z``, ``A.z``, and ``B.z`` are parallel). >>> from sympy.physics.mechanics import (ObstacleSetPathway, Point, ... ReferenceFrame) >>> from sympy.physics.vector import dynamicsymbols >>> q = dynamicsymbols('q') >>> N = ReferenceFrame('N') >>> N = ReferenceFrame('N') >>> A = N.orientnew('A', 'axis', (0, N.x)) >>> B = A.orientnew('B', 'axis', (q, N.z)) >>> pO = Point('pO') >>> pA, pB, pC, pD = Point('pA'), Point('pB'), Point('pC'), Point('pD') >>> pA.set_pos(pO, A.x) >>> pB.set_pos(pO, -A.y) >>> pC.set_pos(pO, B.y) >>> pD.set_pos(pO, B.x) >>> obstacle_set_pathway = ObstacleSetPathway(pA, pB, pC, pD) Now create a symbol ``F`` to describe the magnitude of the (expansile) force that will be produced along the pathway. The list of loads that ``KanesMethod`` requires can be produced by calling the pathway's ``to_loads`` method with ``F`` passed as the only argument. >>> from sympy import Symbol >>> F = Symbol('F') >>> obstacle_set_pathway.to_loads(F) [(pA, sqrt(2)*F/2*A.x + sqrt(2)*F/2*A.y), (pB, - sqrt(2)*F/2*A.x - sqrt(2)*F/2*A.y), (pB, - F/sqrt(2*cos(q(t)) + 2)*A.y - F/sqrt(2*cos(q(t)) + 2)*B.y), (pC, F/sqrt(2*cos(q(t)) + 2)*A.y + F/sqrt(2*cos(q(t)) + 2)*B.y), (pC, - sqrt(2)*F/2*B.x + sqrt(2)*F/2*B.y), (pD, sqrt(2)*F/2*B.x - sqrt(2)*F/2*B.y)] Parameters ========== force : Expr The force acting along the length of the pathway. It is assumed that this ``Expr`` represents an expansile force. """ loads = [] attachment_pairs = zip(self.attachments[:-1], self.attachments[1:]) for attachment_pair in attachment_pairs: relative_position = _point_pair_relative_position(*attachment_pair) length = _point_pair_length(*attachment_pair) loads.extend([ Force(attachment_pair[0], -force*relative_position/length), Force(attachment_pair[1], force*relative_position/length), ]) return loads
ObstacleSetPathway.to_loads
sympy
19
sympy/logic/boolalg.py
def POSform(variables, minterms, dontcares=None): """ The POSform function uses simplified_pairs and a redundant-group eliminating algorithm to convert the list of all input combinations that generate '1' (the minterms) into the smallest product-of-sums form. The variables must be given as the first argument. Return a logical :py:class:`~.And` function (i.e., the "product of sums" or "POS" form) that gives the desired outcome. If there are inputs that can be ignored, pass them as a list, too. The result will be one of the (perhaps many) functions that satisfy the conditions. Examples ======== >>> from sympy.logic import POSform >>> from sympy import symbols >>> w, x, y, z = symbols('w x y z') >>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], ... [1, 0, 1, 1], [1, 1, 1, 1]] >>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]] >>> POSform([w, x, y, z], minterms, dontcares) z & (y | ~w) The terms can also be represented as integers: >>> minterms = [1, 3, 7, 11, 15] >>> dontcares = [0, 2, 5] >>> POSform([w, x, y, z], minterms, dontcares) z & (y | ~w) They can also be specified using dicts, which does not have to be fully specified: >>> minterms = [{w: 0, x: 1}, {y: 1, z: 1, x: 0}] >>> POSform([w, x, y, z], minterms) (x | y) & (x | z) & (~w | ~x) Or a combination: >>> minterms = [4, 7, 11, [1, 1, 1, 1]] >>> dontcares = [{w : 0, x : 0, y: 0}, 5] >>> POSform([w, x, y, z], minterms, dontcares) (w | x) & (y | ~w) & (z | ~y) See also ======== SOPform References ========== .. [1] https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm .. [2] https://en.wikipedia.org/wiki/Don%27t-care_term """
/usr/src/app/target_test_cases/failed_tests_POSform.txt
def POSform(variables, minterms, dontcares=None): """ The POSform function uses simplified_pairs and a redundant-group eliminating algorithm to convert the list of all input combinations that generate '1' (the minterms) into the smallest product-of-sums form. The variables must be given as the first argument. Return a logical :py:class:`~.And` function (i.e., the "product of sums" or "POS" form) that gives the desired outcome. If there are inputs that can be ignored, pass them as a list, too. The result will be one of the (perhaps many) functions that satisfy the conditions. Examples ======== >>> from sympy.logic import POSform >>> from sympy import symbols >>> w, x, y, z = symbols('w x y z') >>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 1, 1], ... [1, 0, 1, 1], [1, 1, 1, 1]] >>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]] >>> POSform([w, x, y, z], minterms, dontcares) z & (y | ~w) The terms can also be represented as integers: >>> minterms = [1, 3, 7, 11, 15] >>> dontcares = [0, 2, 5] >>> POSform([w, x, y, z], minterms, dontcares) z & (y | ~w) They can also be specified using dicts, which does not have to be fully specified: >>> minterms = [{w: 0, x: 1}, {y: 1, z: 1, x: 0}] >>> POSform([w, x, y, z], minterms) (x | y) & (x | z) & (~w | ~x) Or a combination: >>> minterms = [4, 7, 11, [1, 1, 1, 1]] >>> dontcares = [{w : 0, x : 0, y: 0}, 5] >>> POSform([w, x, y, z], minterms, dontcares) (w | x) & (y | ~w) & (z | ~y) See also ======== SOPform References ========== .. [1] https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm .. [2] https://en.wikipedia.org/wiki/Don%27t-care_term """ if not minterms: return false variables = tuple(map(sympify, variables)) minterms = _input_to_binlist(minterms, variables) dontcares = _input_to_binlist((dontcares or []), variables) for d in dontcares: if d in minterms: raise ValueError('%s in minterms is also in dontcares' % d) maxterms = [] for t in product((0, 1), repeat=len(variables)): t = list(t) if (t not in minterms) and (t not in dontcares): maxterms.append(t) new = _simplified_pairs(maxterms + dontcares) essential = _rem_redundancy(new, maxterms) return And(*[_convert_to_varsPOS(x, variables) for x in essential])
POSform
sympy
20
sympy/combinatorics/perm_groups.py
def schreier_sims_incremental(self, base=None, gens=None, slp_dict=False): """Extend a sequence of points and generating set to a base and strong generating set. Parameters ========== base The sequence of points to be extended to a base. Optional parameter with default value ``[]``. gens The generating set to be extended to a strong generating set relative to the base obtained. Optional parameter with default value ``self.generators``. slp_dict If `True`, return a dictionary `{g: gens}` for each strong generator `g` where `gens` is a list of strong generators coming before `g` in `strong_gens`, such that the product of the elements of `gens` is equal to `g`. Returns ======= (base, strong_gens) ``base`` is the base obtained, and ``strong_gens`` is the strong generating set relative to it. The original parameters ``base``, ``gens`` remain unchanged. Examples ======== >>> from sympy.combinatorics.named_groups import AlternatingGroup >>> from sympy.combinatorics.testutil import _verify_bsgs >>> A = AlternatingGroup(7) >>> base = [2, 3] >>> seq = [2, 3] >>> base, strong_gens = A.schreier_sims_incremental(base=seq) >>> _verify_bsgs(A, base, strong_gens) True >>> base[:2] [2, 3] Notes ===== This version of the Schreier-Sims algorithm runs in polynomial time. There are certain assumptions in the implementation - if the trivial group is provided, ``base`` and ``gens`` are returned immediately, as any sequence of points is a base for the trivial group. If the identity is present in the generators ``gens``, it is removed as it is a redundant generator. The implementation is described in [1], pp. 90-93. See Also ======== schreier_sims, schreier_sims_random """
/usr/src/app/target_test_cases/failed_tests_PermutationGroup.schreier_sims_incremental.txt
def schreier_sims_incremental(self, base=None, gens=None, slp_dict=False): """Extend a sequence of points and generating set to a base and strong generating set. Parameters ========== base The sequence of points to be extended to a base. Optional parameter with default value ``[]``. gens The generating set to be extended to a strong generating set relative to the base obtained. Optional parameter with default value ``self.generators``. slp_dict If `True`, return a dictionary `{g: gens}` for each strong generator `g` where `gens` is a list of strong generators coming before `g` in `strong_gens`, such that the product of the elements of `gens` is equal to `g`. Returns ======= (base, strong_gens) ``base`` is the base obtained, and ``strong_gens`` is the strong generating set relative to it. The original parameters ``base``, ``gens`` remain unchanged. Examples ======== >>> from sympy.combinatorics.named_groups import AlternatingGroup >>> from sympy.combinatorics.testutil import _verify_bsgs >>> A = AlternatingGroup(7) >>> base = [2, 3] >>> seq = [2, 3] >>> base, strong_gens = A.schreier_sims_incremental(base=seq) >>> _verify_bsgs(A, base, strong_gens) True >>> base[:2] [2, 3] Notes ===== This version of the Schreier-Sims algorithm runs in polynomial time. There are certain assumptions in the implementation - if the trivial group is provided, ``base`` and ``gens`` are returned immediately, as any sequence of points is a base for the trivial group. If the identity is present in the generators ``gens``, it is removed as it is a redundant generator. The implementation is described in [1], pp. 90-93. See Also ======== schreier_sims, schreier_sims_random """ if base is None: base = [] if gens is None: gens = self.generators[:] degree = self.degree id_af = list(range(degree)) # handle the trivial group if len(gens) == 1 and gens[0].is_Identity: if slp_dict: return base, gens, {gens[0]: [gens[0]]} return base, gens # prevent side effects _base, _gens = base[:], gens[:] # remove the identity as a generator _gens = [x for x in _gens if not x.is_Identity] # make sure no generator fixes all base points for gen in _gens: if all(x == gen._array_form[x] for x in _base): for new in id_af: if gen._array_form[new] != new: break else: assert None # can this ever happen? _base.append(new) # distribute generators according to basic stabilizers strong_gens_distr = _distribute_gens_by_base(_base, _gens) strong_gens_slp = [] # initialize the basic stabilizers, basic orbits and basic transversals orbs = {} transversals = {} slps = {} base_len = len(_base) for i in range(base_len): transversals[i], slps[i] = _orbit_transversal(degree, strong_gens_distr[i], _base[i], pairs=True, af=True, slp=True) transversals[i] = dict(transversals[i]) orbs[i] = list(transversals[i].keys()) # main loop: amend the stabilizer chain until we have generators # for all stabilizers i = base_len - 1 while i >= 0: # this flag is used to continue with the main loop from inside # a nested loop continue_i = False # test the generators for being a strong generating set db = {} for beta, u_beta in list(transversals[i].items()): for j, gen in enumerate(strong_gens_distr[i]): gb = gen._array_form[beta] u1 = transversals[i][gb] g1 = _af_rmul(gen._array_form, u_beta) slp = [(i, g) for g in slps[i][beta]] slp = [(i, j)] + slp if g1 != u1: # test if the schreier generator is in the i+1-th # would-be basic stabilizer y = True try: u1_inv = db[gb] except KeyError: u1_inv = db[gb] = _af_invert(u1) schreier_gen = _af_rmul(u1_inv, g1) u1_inv_slp = slps[i][gb][:] u1_inv_slp.reverse() u1_inv_slp = [(i, (g,)) for g in u1_inv_slp] slp = u1_inv_slp + slp h, j, slp = _strip_af(schreier_gen, _base, orbs, transversals, i, slp=slp, slps=slps) if j <= base_len: # new strong generator h at level j y = False elif h: # h fixes all base points y = False moved = 0 while h[moved] == moved: moved += 1 _base.append(moved) base_len += 1 strong_gens_distr.append([]) if y is False: # if a new strong generator is found, update the # data structures and start over h = _af_new(h) strong_gens_slp.append((h, slp)) for l in range(i + 1, j): strong_gens_distr[l].append(h) transversals[l], slps[l] =\ _orbit_transversal(degree, strong_gens_distr[l], _base[l], pairs=True, af=True, slp=True) transversals[l] = dict(transversals[l]) orbs[l] = list(transversals[l].keys()) i = j - 1 # continue main loop using the flag continue_i = True if continue_i is True: break if continue_i is True: break if continue_i is True: continue i -= 1 strong_gens = _gens[:] if slp_dict: # create the list of the strong generators strong_gens and # rewrite the indices of strong_gens_slp in terms of the # elements of strong_gens for k, slp in strong_gens_slp: strong_gens.append(k) for i in range(len(slp)): s = slp[i] if isinstance(s[1], tuple): slp[i] = strong_gens_distr[s[0]][s[1][0]]**-1 else: slp[i] = strong_gens_distr[s[0]][s[1]] strong_gens_slp = dict(strong_gens_slp) # add the original generators for g in _gens: strong_gens_slp[g] = [g] return (_base, strong_gens, strong_gens_slp) strong_gens.extend([k for k, _ in strong_gens_slp]) return _base, strong_gens
PermutationGroup.schreier_sims_incremental
sympy
21
sympy/polys/polytools.py
def which_all_roots(f, candidates): """ Find roots of a square-free polynomial ``f`` from ``candidates``. Explanation =========== If ``f`` is a square-free polynomial and ``candidates`` is a superset of the roots of ``f``, then ``f.which_all_roots(candidates)`` returns a list containing exactly the set of roots of ``f``. The polynomial``f`` must be univariate and square-free. The list ``candidates`` must be a superset of the complex roots of ``f`` and ``f.which_all_roots(candidates)`` returns exactly the set of all complex roots of ``f``. The output preserves the order of the order of ``candidates``. Examples ======== >>> from sympy import Poly, I >>> from sympy.abc import x >>> f = Poly(x**4 - 1) >>> f.which_all_roots([-1, 1, -I, I, 0]) [-1, 1, -I, I] >>> f.which_all_roots([-1, 1, -I, I, I, I]) [-1, 1, -I, I] This method is useful as lifting to rational coefficients produced extraneous roots, which we can filter out with this method. >>> f = Poly(x**2 + I*x - 1, x, extension=True) >>> f.lift() Poly(x**4 - x**2 + 1, x, domain='ZZ') >>> f.lift().all_roots() [CRootOf(x**4 - x**2 + 1, 0), CRootOf(x**4 - x**2 + 1, 1), CRootOf(x**4 - x**2 + 1, 2), CRootOf(x**4 - x**2 + 1, 3)] >>> f.which_all_roots(f.lift().all_roots()) [CRootOf(x**4 - x**2 + 1, 0), CRootOf(x**4 - x**2 + 1, 2)] This procedure is already done internally when calling `.all_roots()` on a polynomial with algebraic coefficients, or polynomials with Gaussian domains. >>> f.all_roots() [CRootOf(x**4 - x**2 + 1, 0), CRootOf(x**4 - x**2 + 1, 2)] See Also ======== same_root which_real_roots """
/usr/src/app/target_test_cases/failed_tests_Poly.which_all_roots.txt
def which_all_roots(f, candidates): """ Find roots of a square-free polynomial ``f`` from ``candidates``. Explanation =========== If ``f`` is a square-free polynomial and ``candidates`` is a superset of the roots of ``f``, then ``f.which_all_roots(candidates)`` returns a list containing exactly the set of roots of ``f``. The polynomial``f`` must be univariate and square-free. The list ``candidates`` must be a superset of the complex roots of ``f`` and ``f.which_all_roots(candidates)`` returns exactly the set of all complex roots of ``f``. The output preserves the order of the order of ``candidates``. Examples ======== >>> from sympy import Poly, I >>> from sympy.abc import x >>> f = Poly(x**4 - 1) >>> f.which_all_roots([-1, 1, -I, I, 0]) [-1, 1, -I, I] >>> f.which_all_roots([-1, 1, -I, I, I, I]) [-1, 1, -I, I] This method is useful as lifting to rational coefficients produced extraneous roots, which we can filter out with this method. >>> f = Poly(x**2 + I*x - 1, x, extension=True) >>> f.lift() Poly(x**4 - x**2 + 1, x, domain='ZZ') >>> f.lift().all_roots() [CRootOf(x**4 - x**2 + 1, 0), CRootOf(x**4 - x**2 + 1, 1), CRootOf(x**4 - x**2 + 1, 2), CRootOf(x**4 - x**2 + 1, 3)] >>> f.which_all_roots(f.lift().all_roots()) [CRootOf(x**4 - x**2 + 1, 0), CRootOf(x**4 - x**2 + 1, 2)] This procedure is already done internally when calling `.all_roots()` on a polynomial with algebraic coefficients, or polynomials with Gaussian domains. >>> f.all_roots() [CRootOf(x**4 - x**2 + 1, 0), CRootOf(x**4 - x**2 + 1, 2)] See Also ======== same_root which_real_roots """ if f.is_multivariate: raise MultivariatePolynomialError( "Must be a univariate polynomial") return f._which_roots(candidates, f.degree())
Poly.which_all_roots
sympy
22
sympy/combinatorics/polyhedron.py
def rotate(self, perm): """ Apply a permutation to the polyhedron *in place*. The permutation may be given as a Permutation instance or an integer indicating which permutation from pgroup of the Polyhedron should be applied. This is an operation that is analogous to rotation about an axis by a fixed increment. Notes ===== When a Permutation is applied, no check is done to see if that is a valid permutation for the Polyhedron. For example, a cube could be given a permutation which effectively swaps only 2 vertices. A valid permutation (that rotates the object in a physical way) will be obtained if one only uses permutations from the ``pgroup`` of the Polyhedron. On the other hand, allowing arbitrary rotations (applications of permutations) gives a way to follow named elements rather than indices since Polyhedron allows vertices to be named while Permutation works only with indices. Examples ======== >>> from sympy.combinatorics import Polyhedron, Permutation >>> from sympy.combinatorics.polyhedron import cube >>> cube = cube.copy() >>> cube.corners (0, 1, 2, 3, 4, 5, 6, 7) >>> cube.rotate(0) >>> cube.corners (1, 2, 3, 0, 5, 6, 7, 4) A non-physical "rotation" that is not prohibited by this method: >>> cube.reset() >>> cube.rotate(Permutation([[1, 2]], size=8)) >>> cube.corners (0, 2, 1, 3, 4, 5, 6, 7) Polyhedron can be used to follow elements of set that are identified by letters instead of integers: >>> shadow = h5 = Polyhedron(list('abcde')) >>> p = Permutation([3, 0, 1, 2, 4]) >>> h5.rotate(p) >>> h5.corners (d, a, b, c, e) >>> _ == shadow.corners True >>> copy = h5.copy() >>> h5.rotate(p) >>> h5.corners == copy.corners False """
/usr/src/app/target_test_cases/failed_tests_Polyhedron.rotate.txt
def rotate(self, perm): """ Apply a permutation to the polyhedron *in place*. The permutation may be given as a Permutation instance or an integer indicating which permutation from pgroup of the Polyhedron should be applied. This is an operation that is analogous to rotation about an axis by a fixed increment. Notes ===== When a Permutation is applied, no check is done to see if that is a valid permutation for the Polyhedron. For example, a cube could be given a permutation which effectively swaps only 2 vertices. A valid permutation (that rotates the object in a physical way) will be obtained if one only uses permutations from the ``pgroup`` of the Polyhedron. On the other hand, allowing arbitrary rotations (applications of permutations) gives a way to follow named elements rather than indices since Polyhedron allows vertices to be named while Permutation works only with indices. Examples ======== >>> from sympy.combinatorics import Polyhedron, Permutation >>> from sympy.combinatorics.polyhedron import cube >>> cube = cube.copy() >>> cube.corners (0, 1, 2, 3, 4, 5, 6, 7) >>> cube.rotate(0) >>> cube.corners (1, 2, 3, 0, 5, 6, 7, 4) A non-physical "rotation" that is not prohibited by this method: >>> cube.reset() >>> cube.rotate(Permutation([[1, 2]], size=8)) >>> cube.corners (0, 2, 1, 3, 4, 5, 6, 7) Polyhedron can be used to follow elements of set that are identified by letters instead of integers: >>> shadow = h5 = Polyhedron(list('abcde')) >>> p = Permutation([3, 0, 1, 2, 4]) >>> h5.rotate(p) >>> h5.corners (d, a, b, c, e) >>> _ == shadow.corners True >>> copy = h5.copy() >>> h5.rotate(p) >>> h5.corners == copy.corners False """ if not isinstance(perm, Perm): perm = self.pgroup[perm] # and we know it's valid else: if perm.size != self.size: raise ValueError('Polyhedron and Permutation sizes differ.') a = perm.array_form corners = [self.corners[a[i]] for i in range(len(self.corners))] self._corners = tuple(corners)
Polyhedron.rotate
sympy
23
sympy/physics/hydrogen.py
def Psi_nlm(n, l, m, r, phi, theta, Z=1): """ Returns the Hydrogen wave function psi_{nlm}. It's the product of the radial wavefunction R_{nl} and the spherical harmonic Y_{l}^{m}. Parameters ========== n : integer Principal Quantum Number which is an integer with possible values as 1, 2, 3, 4,... l : integer ``l`` is the Angular Momentum Quantum Number with values ranging from 0 to ``n-1``. m : integer ``m`` is the Magnetic Quantum Number with values ranging from ``-l`` to ``l``. r : radial coordinate phi : azimuthal angle theta : polar angle Z : atomic number (1 for Hydrogen, 2 for Helium, ...) Everything is in Hartree atomic units. Examples ======== >>> from sympy.physics.hydrogen import Psi_nlm >>> from sympy import Symbol >>> r=Symbol("r", positive=True) >>> phi=Symbol("phi", real=True) >>> theta=Symbol("theta", real=True) >>> Z=Symbol("Z", positive=True, integer=True, nonzero=True) >>> Psi_nlm(1,0,0,r,phi,theta,Z) Z**(3/2)*exp(-Z*r)/sqrt(pi) >>> Psi_nlm(2,1,1,r,phi,theta,Z) -Z**(5/2)*r*exp(I*phi)*exp(-Z*r/2)*sin(theta)/(8*sqrt(pi)) Integrating the absolute square of a hydrogen wavefunction psi_{nlm} over the whole space leads 1. The normalization of the hydrogen wavefunctions Psi_nlm is: >>> from sympy import integrate, conjugate, pi, oo, sin >>> wf=Psi_nlm(2,1,1,r,phi,theta,Z) >>> abs_sqrd=wf*conjugate(wf) >>> jacobi=r**2*sin(theta) >>> integrate(abs_sqrd*jacobi, (r,0,oo), (phi,0,2*pi), (theta,0,pi)) 1 """
/usr/src/app/target_test_cases/failed_tests_Psi_nlm.txt
def Psi_nlm(n, l, m, r, phi, theta, Z=1): """ Returns the Hydrogen wave function psi_{nlm}. It's the product of the radial wavefunction R_{nl} and the spherical harmonic Y_{l}^{m}. Parameters ========== n : integer Principal Quantum Number which is an integer with possible values as 1, 2, 3, 4,... l : integer ``l`` is the Angular Momentum Quantum Number with values ranging from 0 to ``n-1``. m : integer ``m`` is the Magnetic Quantum Number with values ranging from ``-l`` to ``l``. r : radial coordinate phi : azimuthal angle theta : polar angle Z : atomic number (1 for Hydrogen, 2 for Helium, ...) Everything is in Hartree atomic units. Examples ======== >>> from sympy.physics.hydrogen import Psi_nlm >>> from sympy import Symbol >>> r=Symbol("r", positive=True) >>> phi=Symbol("phi", real=True) >>> theta=Symbol("theta", real=True) >>> Z=Symbol("Z", positive=True, integer=True, nonzero=True) >>> Psi_nlm(1,0,0,r,phi,theta,Z) Z**(3/2)*exp(-Z*r)/sqrt(pi) >>> Psi_nlm(2,1,1,r,phi,theta,Z) -Z**(5/2)*r*exp(I*phi)*exp(-Z*r/2)*sin(theta)/(8*sqrt(pi)) Integrating the absolute square of a hydrogen wavefunction psi_{nlm} over the whole space leads 1. The normalization of the hydrogen wavefunctions Psi_nlm is: >>> from sympy import integrate, conjugate, pi, oo, sin >>> wf=Psi_nlm(2,1,1,r,phi,theta,Z) >>> abs_sqrd=wf*conjugate(wf) >>> jacobi=r**2*sin(theta) >>> integrate(abs_sqrd*jacobi, (r,0,oo), (phi,0,2*pi), (theta,0,pi)) 1 """ # sympify arguments n, l, m, r, phi, theta, Z = map(S, [n, l, m, r, phi, theta, Z]) # check if values for n,l,m make physically sense if n.is_integer and n < 1: raise ValueError("'n' must be positive integer") if l.is_integer and not (n > l): raise ValueError("'n' must be greater than 'l'") if m.is_integer and not (abs(m) <= l): raise ValueError("|'m'| must be less or equal 'l'") # return the hydrogen wave function return R_nl(n, l, r, Z)*Ynm(l, m, theta, phi).expand(func=True)
Psi_nlm
sympy
24
sympy/physics/vector/frame.py
def orient_body_fixed(self, parent, angles, rotation_order): """Rotates this reference frame relative to the parent reference frame by right hand rotating through three successive body fixed simple axis rotations. Each subsequent axis of rotation is about the "body fixed" unit vectors of a new intermediate reference frame. This type of rotation is also referred to rotating through the `Euler and Tait-Bryan Angles`_. .. _Euler and Tait-Bryan Angles: https://en.wikipedia.org/wiki/Euler_angles The computed angular velocity in this method is by default expressed in the child's frame, so it is most preferable to use ``u1 * child.x + u2 * child.y + u3 * child.z`` as generalized speeds. Parameters ========== parent : ReferenceFrame Reference frame that this reference frame will be rotated relative to. angles : 3-tuple of sympifiable Three angles in radians used for the successive rotations. rotation_order : 3 character string or 3 digit integer Order of the rotations about each intermediate reference frames' unit vectors. The Euler rotation about the X, Z', X'' axes can be specified by the strings ``'XZX'``, ``'131'``, or the integer ``131``. There are 12 unique valid rotation orders (6 Euler and 6 Tait-Bryan): zxz, xyx, yzy, zyz, xzx, yxy, xyz, yzx, zxy, xzy, zyx, and yxz. Warns ====== UserWarning If the orientation creates a kinematic loop. Examples ======== Setup variables for the examples: >>> from sympy import symbols >>> from sympy.physics.vector import ReferenceFrame >>> q1, q2, q3 = symbols('q1, q2, q3') >>> N = ReferenceFrame('N') >>> B = ReferenceFrame('B') >>> B1 = ReferenceFrame('B1') >>> B2 = ReferenceFrame('B2') >>> B3 = ReferenceFrame('B3') For example, a classic Euler Angle rotation can be done by: >>> B.orient_body_fixed(N, (q1, q2, q3), 'XYX') >>> B.dcm(N) Matrix([ [ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)], [sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)], [sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]]) This rotates reference frame B relative to reference frame N through ``q1`` about ``N.x``, then rotates B again through ``q2`` about ``B.y``, and finally through ``q3`` about ``B.x``. It is equivalent to three successive ``orient_axis()`` calls: >>> B1.orient_axis(N, N.x, q1) >>> B2.orient_axis(B1, B1.y, q2) >>> B3.orient_axis(B2, B2.x, q3) >>> B3.dcm(N) Matrix([ [ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)], [sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)], [sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]]) Acceptable rotation orders are of length 3, expressed in as a string ``'XYZ'`` or ``'123'`` or integer ``123``. Rotations about an axis twice in a row are prohibited. >>> B.orient_body_fixed(N, (q1, q2, 0), 'ZXZ') >>> B.orient_body_fixed(N, (q1, q2, 0), '121') >>> B.orient_body_fixed(N, (q1, q2, q3), 123) """
/usr/src/app/target_test_cases/failed_tests_ReferenceFrame.orient_body_fixed.txt
def orient_body_fixed(self, parent, angles, rotation_order): """Rotates this reference frame relative to the parent reference frame by right hand rotating through three successive body fixed simple axis rotations. Each subsequent axis of rotation is about the "body fixed" unit vectors of a new intermediate reference frame. This type of rotation is also referred to rotating through the `Euler and Tait-Bryan Angles`_. .. _Euler and Tait-Bryan Angles: https://en.wikipedia.org/wiki/Euler_angles The computed angular velocity in this method is by default expressed in the child's frame, so it is most preferable to use ``u1 * child.x + u2 * child.y + u3 * child.z`` as generalized speeds. Parameters ========== parent : ReferenceFrame Reference frame that this reference frame will be rotated relative to. angles : 3-tuple of sympifiable Three angles in radians used for the successive rotations. rotation_order : 3 character string or 3 digit integer Order of the rotations about each intermediate reference frames' unit vectors. The Euler rotation about the X, Z', X'' axes can be specified by the strings ``'XZX'``, ``'131'``, or the integer ``131``. There are 12 unique valid rotation orders (6 Euler and 6 Tait-Bryan): zxz, xyx, yzy, zyz, xzx, yxy, xyz, yzx, zxy, xzy, zyx, and yxz. Warns ====== UserWarning If the orientation creates a kinematic loop. Examples ======== Setup variables for the examples: >>> from sympy import symbols >>> from sympy.physics.vector import ReferenceFrame >>> q1, q2, q3 = symbols('q1, q2, q3') >>> N = ReferenceFrame('N') >>> B = ReferenceFrame('B') >>> B1 = ReferenceFrame('B1') >>> B2 = ReferenceFrame('B2') >>> B3 = ReferenceFrame('B3') For example, a classic Euler Angle rotation can be done by: >>> B.orient_body_fixed(N, (q1, q2, q3), 'XYX') >>> B.dcm(N) Matrix([ [ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)], [sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)], [sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]]) This rotates reference frame B relative to reference frame N through ``q1`` about ``N.x``, then rotates B again through ``q2`` about ``B.y``, and finally through ``q3`` about ``B.x``. It is equivalent to three successive ``orient_axis()`` calls: >>> B1.orient_axis(N, N.x, q1) >>> B2.orient_axis(B1, B1.y, q2) >>> B3.orient_axis(B2, B2.x, q3) >>> B3.dcm(N) Matrix([ [ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)], [sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)], [sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]]) Acceptable rotation orders are of length 3, expressed in as a string ``'XYZ'`` or ``'123'`` or integer ``123``. Rotations about an axis twice in a row are prohibited. >>> B.orient_body_fixed(N, (q1, q2, 0), 'ZXZ') >>> B.orient_body_fixed(N, (q1, q2, 0), '121') >>> B.orient_body_fixed(N, (q1, q2, q3), 123) """ from sympy.physics.vector.functions import dynamicsymbols _check_frame(parent) amounts, rot_order, rot_matrices = self._parse_consecutive_rotations( angles, rotation_order) self._dcm(parent, rot_matrices[0] * rot_matrices[1] * rot_matrices[2]) rot_vecs = [zeros(3, 1) for _ in range(3)] for i, order in enumerate(rot_order): rot_vecs[i][order - 1] = amounts[i].diff(dynamicsymbols._t) u1, u2, u3 = rot_vecs[2] + rot_matrices[2].T * ( rot_vecs[1] + rot_matrices[1].T * rot_vecs[0]) wvec = u1 * self.x + u2 * self.y + u3 * self.z # There is a double - self._ang_vel_dict.update({parent: wvec}) parent._ang_vel_dict.update({self: -wvec}) self._var_dict = {}
ReferenceFrame.orient_body_fixed
sympy
25
sympy/physics/vector/frame.py
def orient_explicit(self, parent, dcm): """Sets the orientation of this reference frame relative to another (parent) reference frame using a direction cosine matrix that describes the rotation from the parent to the child. Parameters ========== parent : ReferenceFrame Reference frame that this reference frame will be rotated relative to. dcm : Matrix, shape(3, 3) Direction cosine matrix that specifies the relative rotation between the two reference frames. Warns ====== UserWarning If the orientation creates a kinematic loop. Examples ======== Setup variables for the examples: >>> from sympy import symbols, Matrix, sin, cos >>> from sympy.physics.vector import ReferenceFrame >>> q1 = symbols('q1') >>> A = ReferenceFrame('A') >>> B = ReferenceFrame('B') >>> N = ReferenceFrame('N') A simple rotation of ``A`` relative to ``N`` about ``N.x`` is defined by the following direction cosine matrix: >>> dcm = Matrix([[1, 0, 0], ... [0, cos(q1), -sin(q1)], ... [0, sin(q1), cos(q1)]]) >>> A.orient_explicit(N, dcm) >>> A.dcm(N) Matrix([ [1, 0, 0], [0, cos(q1), sin(q1)], [0, -sin(q1), cos(q1)]]) This is equivalent to using ``orient_axis()``: >>> B.orient_axis(N, N.x, q1) >>> B.dcm(N) Matrix([ [1, 0, 0], [0, cos(q1), sin(q1)], [0, -sin(q1), cos(q1)]]) **Note carefully that** ``N.dcm(B)`` **(the transpose) would be passed into** ``orient_explicit()`` **for** ``A.dcm(N)`` **to match** ``B.dcm(N)``: >>> A.orient_explicit(N, N.dcm(B)) >>> A.dcm(N) Matrix([ [1, 0, 0], [0, cos(q1), sin(q1)], [0, -sin(q1), cos(q1)]]) """
/usr/src/app/target_test_cases/failed_tests_ReferenceFrame.orient_explicit.txt
def orient_explicit(self, parent, dcm): """Sets the orientation of this reference frame relative to another (parent) reference frame using a direction cosine matrix that describes the rotation from the parent to the child. Parameters ========== parent : ReferenceFrame Reference frame that this reference frame will be rotated relative to. dcm : Matrix, shape(3, 3) Direction cosine matrix that specifies the relative rotation between the two reference frames. Warns ====== UserWarning If the orientation creates a kinematic loop. Examples ======== Setup variables for the examples: >>> from sympy import symbols, Matrix, sin, cos >>> from sympy.physics.vector import ReferenceFrame >>> q1 = symbols('q1') >>> A = ReferenceFrame('A') >>> B = ReferenceFrame('B') >>> N = ReferenceFrame('N') A simple rotation of ``A`` relative to ``N`` about ``N.x`` is defined by the following direction cosine matrix: >>> dcm = Matrix([[1, 0, 0], ... [0, cos(q1), -sin(q1)], ... [0, sin(q1), cos(q1)]]) >>> A.orient_explicit(N, dcm) >>> A.dcm(N) Matrix([ [1, 0, 0], [0, cos(q1), sin(q1)], [0, -sin(q1), cos(q1)]]) This is equivalent to using ``orient_axis()``: >>> B.orient_axis(N, N.x, q1) >>> B.dcm(N) Matrix([ [1, 0, 0], [0, cos(q1), sin(q1)], [0, -sin(q1), cos(q1)]]) **Note carefully that** ``N.dcm(B)`` **(the transpose) would be passed into** ``orient_explicit()`` **for** ``A.dcm(N)`` **to match** ``B.dcm(N)``: >>> A.orient_explicit(N, N.dcm(B)) >>> A.dcm(N) Matrix([ [1, 0, 0], [0, cos(q1), sin(q1)], [0, -sin(q1), cos(q1)]]) """ _check_frame(parent) # amounts must be a Matrix type object # (e.g. sympy.matrices.dense.MutableDenseMatrix). if not isinstance(dcm, MatrixBase): raise TypeError("Amounts must be a SymPy Matrix type object.") self.orient_dcm(parent, dcm.T)
ReferenceFrame.orient_explicit
sympy
26
sympy/physics/vector/frame.py
def orient_space_fixed(self, parent, angles, rotation_order): """Rotates this reference frame relative to the parent reference frame by right hand rotating through three successive space fixed simple axis rotations. Each subsequent axis of rotation is about the "space fixed" unit vectors of the parent reference frame. The computed angular velocity in this method is by default expressed in the child's frame, so it is most preferable to use ``u1 * child.x + u2 * child.y + u3 * child.z`` as generalized speeds. Parameters ========== parent : ReferenceFrame Reference frame that this reference frame will be rotated relative to. angles : 3-tuple of sympifiable Three angles in radians used for the successive rotations. rotation_order : 3 character string or 3 digit integer Order of the rotations about the parent reference frame's unit vectors. The order can be specified by the strings ``'XZX'``, ``'131'``, or the integer ``131``. There are 12 unique valid rotation orders. Warns ====== UserWarning If the orientation creates a kinematic loop. Examples ======== Setup variables for the examples: >>> from sympy import symbols >>> from sympy.physics.vector import ReferenceFrame >>> q1, q2, q3 = symbols('q1, q2, q3') >>> N = ReferenceFrame('N') >>> B = ReferenceFrame('B') >>> B1 = ReferenceFrame('B1') >>> B2 = ReferenceFrame('B2') >>> B3 = ReferenceFrame('B3') >>> B.orient_space_fixed(N, (q1, q2, q3), '312') >>> B.dcm(N) Matrix([ [ sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3), sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1)], [-sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1), cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3)], [ sin(q3)*cos(q2), -sin(q2), cos(q2)*cos(q3)]]) is equivalent to: >>> B1.orient_axis(N, N.z, q1) >>> B2.orient_axis(B1, N.x, q2) >>> B3.orient_axis(B2, N.y, q3) >>> B3.dcm(N).simplify() Matrix([ [ sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3), sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1)], [-sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1), cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3)], [ sin(q3)*cos(q2), -sin(q2), cos(q2)*cos(q3)]]) It is worth noting that space-fixed and body-fixed rotations are related by the order of the rotations, i.e. the reverse order of body fixed will give space fixed and vice versa. >>> B.orient_space_fixed(N, (q1, q2, q3), '231') >>> B.dcm(N) Matrix([ [cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3), -sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1)], [ -sin(q2), cos(q2)*cos(q3), sin(q3)*cos(q2)], [sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1), sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3)]]) >>> B.orient_body_fixed(N, (q3, q2, q1), '132') >>> B.dcm(N) Matrix([ [cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3), -sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1)], [ -sin(q2), cos(q2)*cos(q3), sin(q3)*cos(q2)], [sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1), sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3)]]) """
/usr/src/app/target_test_cases/failed_tests_ReferenceFrame.orient_space_fixed.txt
def orient_space_fixed(self, parent, angles, rotation_order): """Rotates this reference frame relative to the parent reference frame by right hand rotating through three successive space fixed simple axis rotations. Each subsequent axis of rotation is about the "space fixed" unit vectors of the parent reference frame. The computed angular velocity in this method is by default expressed in the child's frame, so it is most preferable to use ``u1 * child.x + u2 * child.y + u3 * child.z`` as generalized speeds. Parameters ========== parent : ReferenceFrame Reference frame that this reference frame will be rotated relative to. angles : 3-tuple of sympifiable Three angles in radians used for the successive rotations. rotation_order : 3 character string or 3 digit integer Order of the rotations about the parent reference frame's unit vectors. The order can be specified by the strings ``'XZX'``, ``'131'``, or the integer ``131``. There are 12 unique valid rotation orders. Warns ====== UserWarning If the orientation creates a kinematic loop. Examples ======== Setup variables for the examples: >>> from sympy import symbols >>> from sympy.physics.vector import ReferenceFrame >>> q1, q2, q3 = symbols('q1, q2, q3') >>> N = ReferenceFrame('N') >>> B = ReferenceFrame('B') >>> B1 = ReferenceFrame('B1') >>> B2 = ReferenceFrame('B2') >>> B3 = ReferenceFrame('B3') >>> B.orient_space_fixed(N, (q1, q2, q3), '312') >>> B.dcm(N) Matrix([ [ sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3), sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1)], [-sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1), cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3)], [ sin(q3)*cos(q2), -sin(q2), cos(q2)*cos(q3)]]) is equivalent to: >>> B1.orient_axis(N, N.z, q1) >>> B2.orient_axis(B1, N.x, q2) >>> B3.orient_axis(B2, N.y, q3) >>> B3.dcm(N).simplify() Matrix([ [ sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3), sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1)], [-sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1), cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3)], [ sin(q3)*cos(q2), -sin(q2), cos(q2)*cos(q3)]]) It is worth noting that space-fixed and body-fixed rotations are related by the order of the rotations, i.e. the reverse order of body fixed will give space fixed and vice versa. >>> B.orient_space_fixed(N, (q1, q2, q3), '231') >>> B.dcm(N) Matrix([ [cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3), -sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1)], [ -sin(q2), cos(q2)*cos(q3), sin(q3)*cos(q2)], [sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1), sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3)]]) >>> B.orient_body_fixed(N, (q3, q2, q1), '132') >>> B.dcm(N) Matrix([ [cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3), -sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1)], [ -sin(q2), cos(q2)*cos(q3), sin(q3)*cos(q2)], [sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1), sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3)]]) """ from sympy.physics.vector.functions import dynamicsymbols _check_frame(parent) amounts, rot_order, rot_matrices = self._parse_consecutive_rotations( angles, rotation_order) self._dcm(parent, rot_matrices[2] * rot_matrices[1] * rot_matrices[0]) rot_vecs = [zeros(3, 1) for _ in range(3)] for i, order in enumerate(rot_order): rot_vecs[i][order - 1] = amounts[i].diff(dynamicsymbols._t) u1, u2, u3 = rot_vecs[0] + rot_matrices[0].T * ( rot_vecs[1] + rot_matrices[1].T * rot_vecs[2]) wvec = u1 * self.x + u2 * self.y + u3 * self.z # There is a double - self._ang_vel_dict.update({parent: wvec}) parent._ang_vel_dict.update({self: -wvec}) self._var_dict = {}
ReferenceFrame.orient_space_fixed
sympy
27
sympy/logic/boolalg.py
def SOPform(variables, minterms, dontcares=None): """ The SOPform function uses simplified_pairs and a redundant group- eliminating algorithm to convert the list of all input combos that generate '1' (the minterms) into the smallest sum-of-products form. The variables must be given as the first argument. Return a logical :py:class:`~.Or` function (i.e., the "sum of products" or "SOP" form) that gives the desired outcome. If there are inputs that can be ignored, pass them as a list, too. The result will be one of the (perhaps many) functions that satisfy the conditions. Examples ======== >>> from sympy.logic import SOPform >>> from sympy import symbols >>> w, x, y, z = symbols('w x y z') >>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1], ... [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]] >>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]] >>> SOPform([w, x, y, z], minterms, dontcares) (y & z) | (~w & ~x) The terms can also be represented as integers: >>> minterms = [1, 3, 7, 11, 15] >>> dontcares = [0, 2, 5] >>> SOPform([w, x, y, z], minterms, dontcares) (y & z) | (~w & ~x) They can also be specified using dicts, which does not have to be fully specified: >>> minterms = [{w: 0, x: 1}, {y: 1, z: 1, x: 0}] >>> SOPform([w, x, y, z], minterms) (x & ~w) | (y & z & ~x) Or a combination: >>> minterms = [4, 7, 11, [1, 1, 1, 1]] >>> dontcares = [{w : 0, x : 0, y: 0}, 5] >>> SOPform([w, x, y, z], minterms, dontcares) (w & y & z) | (~w & ~y) | (x & z & ~w) See also ======== POSform References ========== .. [1] https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm .. [2] https://en.wikipedia.org/wiki/Don%27t-care_term """
/usr/src/app/target_test_cases/failed_tests_SOPform.txt
def SOPform(variables, minterms, dontcares=None): """ The SOPform function uses simplified_pairs and a redundant group- eliminating algorithm to convert the list of all input combos that generate '1' (the minterms) into the smallest sum-of-products form. The variables must be given as the first argument. Return a logical :py:class:`~.Or` function (i.e., the "sum of products" or "SOP" form) that gives the desired outcome. If there are inputs that can be ignored, pass them as a list, too. The result will be one of the (perhaps many) functions that satisfy the conditions. Examples ======== >>> from sympy.logic import SOPform >>> from sympy import symbols >>> w, x, y, z = symbols('w x y z') >>> minterms = [[0, 0, 0, 1], [0, 0, 1, 1], ... [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]] >>> dontcares = [[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1]] >>> SOPform([w, x, y, z], minterms, dontcares) (y & z) | (~w & ~x) The terms can also be represented as integers: >>> minterms = [1, 3, 7, 11, 15] >>> dontcares = [0, 2, 5] >>> SOPform([w, x, y, z], minterms, dontcares) (y & z) | (~w & ~x) They can also be specified using dicts, which does not have to be fully specified: >>> minterms = [{w: 0, x: 1}, {y: 1, z: 1, x: 0}] >>> SOPform([w, x, y, z], minterms) (x & ~w) | (y & z & ~x) Or a combination: >>> minterms = [4, 7, 11, [1, 1, 1, 1]] >>> dontcares = [{w : 0, x : 0, y: 0}, 5] >>> SOPform([w, x, y, z], minterms, dontcares) (w & y & z) | (~w & ~y) | (x & z & ~w) See also ======== POSform References ========== .. [1] https://en.wikipedia.org/wiki/Quine-McCluskey_algorithm .. [2] https://en.wikipedia.org/wiki/Don%27t-care_term """ if not minterms: return false variables = tuple(map(sympify, variables)) minterms = _input_to_binlist(minterms, variables) dontcares = _input_to_binlist((dontcares or []), variables) for d in dontcares: if d in minterms: raise ValueError('%s in minterms is also in dontcares' % d) return _sop_form(variables, minterms, dontcares)
SOPform
sympy
28
sympy/physics/mechanics/wrapping_geometry.py
def geodesic_length(self, point_1, point_2): """The shortest distance between two points on a geometry's surface. Explanation =========== The geodesic length, i.e. the shortest arc along the surface of a cylinder, connecting two points. It can be calculated using Pythagoras' theorem. The first short side is the distance between the two points on the cylinder's surface parallel to the cylinder's axis. The second short side is the arc of a circle between the two points of the cylinder's surface perpendicular to the cylinder's axis. The resulting hypotenuse is the geodesic length. Examples ======== A geodesic length can only be calculated between two points on the cylinder's surface. Firstly, a ``WrappingCylinder`` instance must be created along with two points that will lie on its surface: >>> from sympy import symbols, cos, sin >>> from sympy.physics.mechanics import (Point, ReferenceFrame, ... WrappingCylinder, dynamicsymbols) >>> N = ReferenceFrame('N') >>> r = symbols('r') >>> pO = Point('pO') >>> pO.set_vel(N, 0) >>> cylinder = WrappingCylinder(r, pO, N.x) >>> p1 = Point('p1') >>> p2 = Point('p2') Let's assume that ``p1`` is located at ``N.x + r*N.y`` relative to ``pO`` and that ``p2`` is located at ``r*(cos(q)*N.y + sin(q)*N.z)`` relative to ``pO``, where ``q(t)`` is a generalized coordinate specifying the angle rotated around the ``N.x`` axis according to the right-hand rule where ``N.y`` is zero. These positions can be set with: >>> q = dynamicsymbols('q') >>> p1.set_pos(pO, N.x + r*N.y) >>> p1.pos_from(pO) N.x + r*N.y >>> p2.set_pos(pO, r*(cos(q)*N.y + sin(q)*N.z).normalize()) >>> p2.pos_from(pO).simplify() r*cos(q(t))*N.y + r*sin(q(t))*N.z The geodesic length, which is in this case a is the hypotenuse of a right triangle where the other two side lengths are ``1`` (parallel to the cylinder's axis) and ``r*q(t)`` (parallel to the cylinder's cross section), can be calculated using the ``geodesic_length`` method: >>> cylinder.geodesic_length(p1, p2).simplify() sqrt(r**2*q(t)**2 + 1) If the ``geodesic_length`` method is passed an argument ``Point`` that doesn't lie on the sphere's surface then a ``ValueError`` is raised because it's not possible to calculate a value in this case. Parameters ========== point_1 : Point Point from which the geodesic length should be calculated. point_2 : Point Point to which the geodesic length should be calculated. """
/usr/src/app/target_test_cases/failed_tests_WrappingCylinder.geodesic_length.txt
def geodesic_length(self, point_1, point_2): """The shortest distance between two points on a geometry's surface. Explanation =========== The geodesic length, i.e. the shortest arc along the surface of a cylinder, connecting two points. It can be calculated using Pythagoras' theorem. The first short side is the distance between the two points on the cylinder's surface parallel to the cylinder's axis. The second short side is the arc of a circle between the two points of the cylinder's surface perpendicular to the cylinder's axis. The resulting hypotenuse is the geodesic length. Examples ======== A geodesic length can only be calculated between two points on the cylinder's surface. Firstly, a ``WrappingCylinder`` instance must be created along with two points that will lie on its surface: >>> from sympy import symbols, cos, sin >>> from sympy.physics.mechanics import (Point, ReferenceFrame, ... WrappingCylinder, dynamicsymbols) >>> N = ReferenceFrame('N') >>> r = symbols('r') >>> pO = Point('pO') >>> pO.set_vel(N, 0) >>> cylinder = WrappingCylinder(r, pO, N.x) >>> p1 = Point('p1') >>> p2 = Point('p2') Let's assume that ``p1`` is located at ``N.x + r*N.y`` relative to ``pO`` and that ``p2`` is located at ``r*(cos(q)*N.y + sin(q)*N.z)`` relative to ``pO``, where ``q(t)`` is a generalized coordinate specifying the angle rotated around the ``N.x`` axis according to the right-hand rule where ``N.y`` is zero. These positions can be set with: >>> q = dynamicsymbols('q') >>> p1.set_pos(pO, N.x + r*N.y) >>> p1.pos_from(pO) N.x + r*N.y >>> p2.set_pos(pO, r*(cos(q)*N.y + sin(q)*N.z).normalize()) >>> p2.pos_from(pO).simplify() r*cos(q(t))*N.y + r*sin(q(t))*N.z The geodesic length, which is in this case a is the hypotenuse of a right triangle where the other two side lengths are ``1`` (parallel to the cylinder's axis) and ``r*q(t)`` (parallel to the cylinder's cross section), can be calculated using the ``geodesic_length`` method: >>> cylinder.geodesic_length(p1, p2).simplify() sqrt(r**2*q(t)**2 + 1) If the ``geodesic_length`` method is passed an argument ``Point`` that doesn't lie on the sphere's surface then a ``ValueError`` is raised because it's not possible to calculate a value in this case. Parameters ========== point_1 : Point Point from which the geodesic length should be calculated. point_2 : Point Point to which the geodesic length should be calculated. """ for point in (point_1, point_2): if not self.point_on_surface(point): msg = ( f'Geodesic length cannot be calculated as point {point} ' f'with radius {point.pos_from(self.point).magnitude()} ' f'from the cylinder\'s center {self.point} does not lie on ' f'the surface of {self} with radius {self.radius} and axis ' f'{self.axis}.' ) raise ValueError(msg) relative_position = point_2.pos_from(point_1) parallel_length = relative_position.dot(self.axis) point_1_relative_position = point_1.pos_from(self.point) point_1_perpendicular_vector = ( point_1_relative_position - point_1_relative_position.dot(self.axis)*self.axis ).normalize() point_2_relative_position = point_2.pos_from(self.point) point_2_perpendicular_vector = ( point_2_relative_position - point_2_relative_position.dot(self.axis)*self.axis ).normalize() central_angle = _directional_atan( cancel(point_1_perpendicular_vector .cross(point_2_perpendicular_vector) .dot(self.axis)), cancel(point_1_perpendicular_vector.dot(point_2_perpendicular_vector)), ) planar_arc_length = self.radius*central_angle geodesic_length = sqrt(parallel_length**2 + planar_arc_length**2) return geodesic_length
WrappingCylinder.geodesic_length
sympy
29
sympy/physics/mechanics/pathway.py
def to_loads(self, force): """Loads required by the equations of motion method classes. Explanation =========== ``KanesMethod`` requires a list of ``Point``-``Vector`` tuples to be passed to the ``loads`` parameters of its ``kanes_equations`` method when constructing the equations of motion. This method acts as a utility to produce the correctly-structred pairs of points and vectors required so that these can be easily concatenated with other items in the list of loads and passed to ``KanesMethod.kanes_equations``. These loads are also in the correct form to also be passed to the other equations of motion method classes, e.g. ``LagrangesMethod``. Examples ======== The below example shows how to generate the loads produced in an actuator that produces an expansile force ``F`` while wrapping around a cylinder. First, create a cylinder with radius ``r`` and an axis parallel to the ``N.z`` direction of the global frame ``N`` that also passes through a point ``pO``. >>> from sympy import symbols >>> from sympy.physics.mechanics import (Point, ReferenceFrame, ... WrappingCylinder) >>> N = ReferenceFrame('N') >>> r = symbols('r', positive=True) >>> pO = Point('pO') >>> cylinder = WrappingCylinder(r, pO, N.z) Create the pathway of the actuator using the ``WrappingPathway`` class, defined to span between two points ``pA`` and ``pB``. Both points lie on the surface of the cylinder and the location of ``pB`` is defined relative to ``pA`` by the dynamics symbol ``q``. >>> from sympy import cos, sin >>> from sympy.physics.mechanics import WrappingPathway, dynamicsymbols >>> q = dynamicsymbols('q') >>> pA = Point('pA') >>> pB = Point('pB') >>> pA.set_pos(pO, r*N.x) >>> pB.set_pos(pO, r*(cos(q)*N.x + sin(q)*N.y)) >>> pB.pos_from(pA) (r*cos(q(t)) - r)*N.x + r*sin(q(t))*N.y >>> pathway = WrappingPathway(pA, pB, cylinder) Now create a symbol ``F`` to describe the magnitude of the (expansile) force that will be produced along the pathway. The list of loads that ``KanesMethod`` requires can be produced by calling the pathway's ``to_loads`` method with ``F`` passed as the only argument. >>> F = symbols('F') >>> loads = pathway.to_loads(F) >>> [load.__class__(load.location, load.vector.simplify()) for load in loads] [(pA, F*N.y), (pB, F*sin(q(t))*N.x - F*cos(q(t))*N.y), (pO, - F*sin(q(t))*N.x + F*(cos(q(t)) - 1)*N.y)] Parameters ========== force : Expr Magnitude of the force acting along the length of the pathway. It is assumed that this ``Expr`` represents an expansile force. """
/usr/src/app/target_test_cases/failed_tests_WrappingPathway.to_loads.txt
def to_loads(self, force): """Loads required by the equations of motion method classes. Explanation =========== ``KanesMethod`` requires a list of ``Point``-``Vector`` tuples to be passed to the ``loads`` parameters of its ``kanes_equations`` method when constructing the equations of motion. This method acts as a utility to produce the correctly-structred pairs of points and vectors required so that these can be easily concatenated with other items in the list of loads and passed to ``KanesMethod.kanes_equations``. These loads are also in the correct form to also be passed to the other equations of motion method classes, e.g. ``LagrangesMethod``. Examples ======== The below example shows how to generate the loads produced in an actuator that produces an expansile force ``F`` while wrapping around a cylinder. First, create a cylinder with radius ``r`` and an axis parallel to the ``N.z`` direction of the global frame ``N`` that also passes through a point ``pO``. >>> from sympy import symbols >>> from sympy.physics.mechanics import (Point, ReferenceFrame, ... WrappingCylinder) >>> N = ReferenceFrame('N') >>> r = symbols('r', positive=True) >>> pO = Point('pO') >>> cylinder = WrappingCylinder(r, pO, N.z) Create the pathway of the actuator using the ``WrappingPathway`` class, defined to span between two points ``pA`` and ``pB``. Both points lie on the surface of the cylinder and the location of ``pB`` is defined relative to ``pA`` by the dynamics symbol ``q``. >>> from sympy import cos, sin >>> from sympy.physics.mechanics import WrappingPathway, dynamicsymbols >>> q = dynamicsymbols('q') >>> pA = Point('pA') >>> pB = Point('pB') >>> pA.set_pos(pO, r*N.x) >>> pB.set_pos(pO, r*(cos(q)*N.x + sin(q)*N.y)) >>> pB.pos_from(pA) (r*cos(q(t)) - r)*N.x + r*sin(q(t))*N.y >>> pathway = WrappingPathway(pA, pB, cylinder) Now create a symbol ``F`` to describe the magnitude of the (expansile) force that will be produced along the pathway. The list of loads that ``KanesMethod`` requires can be produced by calling the pathway's ``to_loads`` method with ``F`` passed as the only argument. >>> F = symbols('F') >>> loads = pathway.to_loads(F) >>> [load.__class__(load.location, load.vector.simplify()) for load in loads] [(pA, F*N.y), (pB, F*sin(q(t))*N.x - F*cos(q(t))*N.y), (pO, - F*sin(q(t))*N.x + F*(cos(q(t)) - 1)*N.y)] Parameters ========== force : Expr Magnitude of the force acting along the length of the pathway. It is assumed that this ``Expr`` represents an expansile force. """ pA, pB = self.attachments pO = self.geometry.point pA_force, pB_force = self.geometry.geodesic_end_vectors(pA, pB) pO_force = -(pA_force + pB_force) loads = [ Force(pA, force * pA_force), Force(pB, force * pB_force), Force(pO, force * pO_force), ] return loads
WrappingPathway.to_loads
sympy
30
sympy/calculus/finite_diff.py
def _as_finite_diff(derivative, points=1, x0=None, wrt=None): """ Returns an approximation of a derivative of a function in the form of a finite difference formula. The expression is a weighted sum of the function at a number of discrete values of (one of) the independent variable(s). Parameters ========== derivative: a Derivative instance points: sequence or coefficient, optional If sequence: discrete values (length >= order+1) of the independent variable used for generating the finite difference weights. If it is a coefficient, it will be used as the step-size for generating an equidistant sequence of length order+1 centered around ``x0``. default: 1 (step-size 1) x0: number or Symbol, optional the value of the independent variable (``wrt``) at which the derivative is to be approximated. Default: same as ``wrt``. wrt: Symbol, optional "with respect to" the variable for which the (partial) derivative is to be approximated for. If not provided it is required that the Derivative is ordinary. Default: ``None``. Examples ======== >>> from sympy import symbols, Function, exp, sqrt, Symbol >>> from sympy.calculus.finite_diff import _as_finite_diff >>> x, h = symbols('x h') >>> f = Function('f') >>> _as_finite_diff(f(x).diff(x)) -f(x - 1/2) + f(x + 1/2) The default step size and number of points are 1 and ``order + 1`` respectively. We can change the step size by passing a symbol as a parameter: >>> _as_finite_diff(f(x).diff(x), h) -f(-h/2 + x)/h + f(h/2 + x)/h We can also specify the discretized values to be used in a sequence: >>> _as_finite_diff(f(x).diff(x), [x, x+h, x+2*h]) -3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h) The algorithm is not restricted to use equidistant spacing, nor do we need to make the approximation around ``x0``, but we can get an expression estimating the derivative at an offset: >>> e, sq2 = exp(1), sqrt(2) >>> xl = [x-h, x+h, x+e*h] >>> _as_finite_diff(f(x).diff(x, 1), xl, x+h*sq2) 2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/((-h + E*h)*(h + E*h)) + (-(-sqrt(2)*h + h)/(2*h) - (-sqrt(2)*h + E*h)/(2*h))*f(-h + x)/(h + E*h) + (-(h + sqrt(2)*h)/(2*h) + (-sqrt(2)*h + E*h)/(2*h))*f(h + x)/(-h + E*h) Partial derivatives are also supported: >>> y = Symbol('y') >>> d2fdxdy=f(x,y).diff(x,y) >>> _as_finite_diff(d2fdxdy, wrt=x) -Derivative(f(x - 1/2, y), y) + Derivative(f(x + 1/2, y), y) See also ======== sympy.calculus.finite_diff.apply_finite_diff sympy.calculus.finite_diff.finite_diff_weights """
/usr/src/app/target_test_cases/failed_tests__as_finite_diff.txt
def _as_finite_diff(derivative, points=1, x0=None, wrt=None): """ Returns an approximation of a derivative of a function in the form of a finite difference formula. The expression is a weighted sum of the function at a number of discrete values of (one of) the independent variable(s). Parameters ========== derivative: a Derivative instance points: sequence or coefficient, optional If sequence: discrete values (length >= order+1) of the independent variable used for generating the finite difference weights. If it is a coefficient, it will be used as the step-size for generating an equidistant sequence of length order+1 centered around ``x0``. default: 1 (step-size 1) x0: number or Symbol, optional the value of the independent variable (``wrt``) at which the derivative is to be approximated. Default: same as ``wrt``. wrt: Symbol, optional "with respect to" the variable for which the (partial) derivative is to be approximated for. If not provided it is required that the Derivative is ordinary. Default: ``None``. Examples ======== >>> from sympy import symbols, Function, exp, sqrt, Symbol >>> from sympy.calculus.finite_diff import _as_finite_diff >>> x, h = symbols('x h') >>> f = Function('f') >>> _as_finite_diff(f(x).diff(x)) -f(x - 1/2) + f(x + 1/2) The default step size and number of points are 1 and ``order + 1`` respectively. We can change the step size by passing a symbol as a parameter: >>> _as_finite_diff(f(x).diff(x), h) -f(-h/2 + x)/h + f(h/2 + x)/h We can also specify the discretized values to be used in a sequence: >>> _as_finite_diff(f(x).diff(x), [x, x+h, x+2*h]) -3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h) The algorithm is not restricted to use equidistant spacing, nor do we need to make the approximation around ``x0``, but we can get an expression estimating the derivative at an offset: >>> e, sq2 = exp(1), sqrt(2) >>> xl = [x-h, x+h, x+e*h] >>> _as_finite_diff(f(x).diff(x, 1), xl, x+h*sq2) 2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/((-h + E*h)*(h + E*h)) + (-(-sqrt(2)*h + h)/(2*h) - (-sqrt(2)*h + E*h)/(2*h))*f(-h + x)/(h + E*h) + (-(h + sqrt(2)*h)/(2*h) + (-sqrt(2)*h + E*h)/(2*h))*f(h + x)/(-h + E*h) Partial derivatives are also supported: >>> y = Symbol('y') >>> d2fdxdy=f(x,y).diff(x,y) >>> _as_finite_diff(d2fdxdy, wrt=x) -Derivative(f(x - 1/2, y), y) + Derivative(f(x + 1/2, y), y) See also ======== sympy.calculus.finite_diff.apply_finite_diff sympy.calculus.finite_diff.finite_diff_weights """ if derivative.is_Derivative: pass elif derivative.is_Atom: return derivative else: return derivative.fromiter( [_as_finite_diff(ar, points, x0, wrt) for ar in derivative.args], **derivative.assumptions0) if wrt is None: old = None for v in derivative.variables: if old is v: continue derivative = _as_finite_diff(derivative, points, x0, v) old = v return derivative order = derivative.variables.count(wrt) if x0 is None: x0 = wrt if not iterable(points): if getattr(points, 'is_Function', False) and wrt in points.args: points = points.subs(wrt, x0) # points is simply the step-size, let's make it a # equidistant sequence centered around x0 if order % 2 == 0: # even order => odd number of points, grid point included points = [x0 + points*i for i in range(-order//2, order//2 + 1)] else: # odd order => even number of points, half-way wrt grid point points = [x0 + points*S(i)/2 for i in range(-order, order + 1, 2)] others = [wrt, 0] for v in set(derivative.variables): if v == wrt: continue others += [v, derivative.variables.count(v)] if len(points) < order+1: raise ValueError("Too few points for order %d" % order) return apply_finite_diff(order, points, [ Derivative(derivative.expr.subs({wrt: x}), *others) for x in points], x0)
_as_finite_diff
sympy
31
sympy/core/exprtools.py
def _mask_nc(eq, name=None): """ Return ``eq`` with non-commutative objects replaced with Dummy symbols. A dictionary that can be used to restore the original values is returned: if it is None, the expression is noncommutative and cannot be made commutative. The third value returned is a list of any non-commutative symbols that appear in the returned equation. Explanation =========== All non-commutative objects other than Symbols are replaced with a non-commutative Symbol. Identical objects will be identified by identical symbols. If there is only 1 non-commutative object in an expression it will be replaced with a commutative symbol. Otherwise, the non-commutative entities are retained and the calling routine should handle replacements in this case since some care must be taken to keep track of the ordering of symbols when they occur within Muls. Parameters ========== name : str ``name``, if given, is the name that will be used with numbered Dummy variables that will replace the non-commutative objects and is mainly used for doctesting purposes. Examples ======== >>> from sympy.physics.secondquant import Commutator, NO, F, Fd >>> from sympy import symbols >>> from sympy.core.exprtools import _mask_nc >>> from sympy.abc import x, y >>> A, B, C = symbols('A,B,C', commutative=False) One nc-symbol: >>> _mask_nc(A**2 - x**2, 'd') (_d0**2 - x**2, {_d0: A}, []) Multiple nc-symbols: >>> _mask_nc(A**2 - B**2, 'd') (A**2 - B**2, {}, [A, B]) An nc-object with nc-symbols but no others outside of it: >>> _mask_nc(1 + x*Commutator(A, B), 'd') (_d0*x + 1, {_d0: Commutator(A, B)}, []) >>> _mask_nc(NO(Fd(x)*F(y)), 'd') (_d0, {_d0: NO(CreateFermion(x)*AnnihilateFermion(y))}, []) Multiple nc-objects: >>> eq = x*Commutator(A, B) + x*Commutator(A, C)*Commutator(A, B) >>> _mask_nc(eq, 'd') (x*_d0 + x*_d1*_d0, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1]) Multiple nc-objects and nc-symbols: >>> eq = A*Commutator(A, B) + B*Commutator(A, C) >>> _mask_nc(eq, 'd') (A*_d0 + B*_d1, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1, A, B]) """
/usr/src/app/target_test_cases/failed_tests__mask_nc.txt
def _mask_nc(eq, name=None): """ Return ``eq`` with non-commutative objects replaced with Dummy symbols. A dictionary that can be used to restore the original values is returned: if it is None, the expression is noncommutative and cannot be made commutative. The third value returned is a list of any non-commutative symbols that appear in the returned equation. Explanation =========== All non-commutative objects other than Symbols are replaced with a non-commutative Symbol. Identical objects will be identified by identical symbols. If there is only 1 non-commutative object in an expression it will be replaced with a commutative symbol. Otherwise, the non-commutative entities are retained and the calling routine should handle replacements in this case since some care must be taken to keep track of the ordering of symbols when they occur within Muls. Parameters ========== name : str ``name``, if given, is the name that will be used with numbered Dummy variables that will replace the non-commutative objects and is mainly used for doctesting purposes. Examples ======== >>> from sympy.physics.secondquant import Commutator, NO, F, Fd >>> from sympy import symbols >>> from sympy.core.exprtools import _mask_nc >>> from sympy.abc import x, y >>> A, B, C = symbols('A,B,C', commutative=False) One nc-symbol: >>> _mask_nc(A**2 - x**2, 'd') (_d0**2 - x**2, {_d0: A}, []) Multiple nc-symbols: >>> _mask_nc(A**2 - B**2, 'd') (A**2 - B**2, {}, [A, B]) An nc-object with nc-symbols but no others outside of it: >>> _mask_nc(1 + x*Commutator(A, B), 'd') (_d0*x + 1, {_d0: Commutator(A, B)}, []) >>> _mask_nc(NO(Fd(x)*F(y)), 'd') (_d0, {_d0: NO(CreateFermion(x)*AnnihilateFermion(y))}, []) Multiple nc-objects: >>> eq = x*Commutator(A, B) + x*Commutator(A, C)*Commutator(A, B) >>> _mask_nc(eq, 'd') (x*_d0 + x*_d1*_d0, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1]) Multiple nc-objects and nc-symbols: >>> eq = A*Commutator(A, B) + B*Commutator(A, C) >>> _mask_nc(eq, 'd') (A*_d0 + B*_d1, {_d0: Commutator(A, B), _d1: Commutator(A, C)}, [_d0, _d1, A, B]) """ name = name or 'mask' # Make Dummy() append sequential numbers to the name def numbered_names(): i = 0 while True: yield name + str(i) i += 1 names = numbered_names() def Dummy(*args, **kwargs): from .symbol import Dummy return Dummy(next(names), *args, **kwargs) expr = eq if expr.is_commutative: return eq, {}, [] # identify nc-objects; symbols and other rep = [] nc_obj = set() nc_syms = set() pot = preorder_traversal(expr, keys=default_sort_key) for i, a in enumerate(pot): if any(a == r[0] for r in rep): pot.skip() elif not a.is_commutative: if a.is_symbol: nc_syms.add(a) pot.skip() elif not (a.is_Add or a.is_Mul or a.is_Pow): nc_obj.add(a) pot.skip() # If there is only one nc symbol or object, it can be factored regularly # but polys is going to complain, so replace it with a Dummy. if len(nc_obj) == 1 and not nc_syms: rep.append((nc_obj.pop(), Dummy())) elif len(nc_syms) == 1 and not nc_obj: rep.append((nc_syms.pop(), Dummy())) # Any remaining nc-objects will be replaced with an nc-Dummy and # identified as an nc-Symbol to watch out for nc_obj = sorted(nc_obj, key=default_sort_key) for n in nc_obj: nc = Dummy(commutative=False) rep.append((n, nc)) nc_syms.add(nc) expr = expr.subs(rep) nc_syms = list(nc_syms) nc_syms.sort(key=default_sort_key) return expr, {v: k for k, v in rep}, nc_syms
_mask_nc
sympy
32
sympy/solvers/simplex.py
def _primal_dual(M, factor=True): """return primal and dual function and constraints assuming that ``M = Matrix([[A, b], [c, d]])`` and the function ``c*x - d`` is being minimized with ``Ax >= b`` for nonnegative values of ``x``. The dual and its constraints will be for maximizing `b.T*y - d` subject to ``A.T*y <= c.T``. Examples ======== >>> from sympy.solvers.simplex import _primal_dual, lpmin, lpmax >>> from sympy import Matrix The following matrix represents the primal task of minimizing x + y + 7 for y >= x + 1 and y >= -2*x + 3. The dual task seeks to maximize x + 3*y + 7 with 2*y - x <= 1 and and x + y <= 1: >>> M = Matrix([ ... [-1, 1, 1], ... [ 2, 1, 3], ... [ 1, 1, -7]]) >>> p, d = _primal_dual(M) The minimum of the primal and maximum of the dual are the same (though they occur at different points): >>> lpmin(*p) (28/3, {x1: 2/3, x2: 5/3}) >>> lpmax(*d) (28/3, {y1: 1/3, y2: 2/3}) If the equivalent (but canonical) inequalities are desired, leave `factor=True`, otherwise the unmodified inequalities for M will be returned. >>> m = Matrix([ ... [-3, -2, 4, -2], ... [ 2, 0, 0, -2], ... [ 0, 1, -3, 0]]) >>> _primal_dual(m, False) # last condition is 2*x1 >= -2 ((x2 - 3*x3, [-3*x1 - 2*x2 + 4*x3 >= -2, 2*x1 >= -2]), (-2*y1 - 2*y2, [-3*y1 + 2*y2 <= 0, -2*y1 <= 1, 4*y1 <= -3])) >>> _primal_dual(m) # condition now x1 >= -1 ((x2 - 3*x3, [-3*x1 - 2*x2 + 4*x3 >= -2, x1 >= -1]), (-2*y1 - 2*y2, [-3*y1 + 2*y2 <= 0, -2*y1 <= 1, 4*y1 <= -3])) If you pass the transpose of the matrix, the primal will be identified as the standard minimization problem and the dual as the standard maximization: >>> _primal_dual(m.T) ((-2*x1 - 2*x2, [-3*x1 + 2*x2 >= 0, -2*x1 >= 1, 4*x1 >= -3]), (y2 - 3*y3, [-3*y1 - 2*y2 + 4*y3 <= -2, y1 <= -1])) A matrix must have some size or else None will be returned for the functions: >>> _primal_dual(Matrix([[1, 2]])) ((x1 - 2, []), (-2, [])) >>> _primal_dual(Matrix([])) ((None, []), (None, [])) References ========== .. [1] David Galvin, Relations between Primal and Dual www3.nd.edu/~dgalvin1/30210/30210_F07/presentations/dual_opt.pdf """
/usr/src/app/target_test_cases/failed_tests__primal_dual.txt
def _primal_dual(M, factor=True): """return primal and dual function and constraints assuming that ``M = Matrix([[A, b], [c, d]])`` and the function ``c*x - d`` is being minimized with ``Ax >= b`` for nonnegative values of ``x``. The dual and its constraints will be for maximizing `b.T*y - d` subject to ``A.T*y <= c.T``. Examples ======== >>> from sympy.solvers.simplex import _primal_dual, lpmin, lpmax >>> from sympy import Matrix The following matrix represents the primal task of minimizing x + y + 7 for y >= x + 1 and y >= -2*x + 3. The dual task seeks to maximize x + 3*y + 7 with 2*y - x <= 1 and and x + y <= 1: >>> M = Matrix([ ... [-1, 1, 1], ... [ 2, 1, 3], ... [ 1, 1, -7]]) >>> p, d = _primal_dual(M) The minimum of the primal and maximum of the dual are the same (though they occur at different points): >>> lpmin(*p) (28/3, {x1: 2/3, x2: 5/3}) >>> lpmax(*d) (28/3, {y1: 1/3, y2: 2/3}) If the equivalent (but canonical) inequalities are desired, leave `factor=True`, otherwise the unmodified inequalities for M will be returned. >>> m = Matrix([ ... [-3, -2, 4, -2], ... [ 2, 0, 0, -2], ... [ 0, 1, -3, 0]]) >>> _primal_dual(m, False) # last condition is 2*x1 >= -2 ((x2 - 3*x3, [-3*x1 - 2*x2 + 4*x3 >= -2, 2*x1 >= -2]), (-2*y1 - 2*y2, [-3*y1 + 2*y2 <= 0, -2*y1 <= 1, 4*y1 <= -3])) >>> _primal_dual(m) # condition now x1 >= -1 ((x2 - 3*x3, [-3*x1 - 2*x2 + 4*x3 >= -2, x1 >= -1]), (-2*y1 - 2*y2, [-3*y1 + 2*y2 <= 0, -2*y1 <= 1, 4*y1 <= -3])) If you pass the transpose of the matrix, the primal will be identified as the standard minimization problem and the dual as the standard maximization: >>> _primal_dual(m.T) ((-2*x1 - 2*x2, [-3*x1 + 2*x2 >= 0, -2*x1 >= 1, 4*x1 >= -3]), (y2 - 3*y3, [-3*y1 - 2*y2 + 4*y3 <= -2, y1 <= -1])) A matrix must have some size or else None will be returned for the functions: >>> _primal_dual(Matrix([[1, 2]])) ((x1 - 2, []), (-2, [])) >>> _primal_dual(Matrix([])) ((None, []), (None, [])) References ========== .. [1] David Galvin, Relations between Primal and Dual www3.nd.edu/~dgalvin1/30210/30210_F07/presentations/dual_opt.pdf """ if not M: return (None, []), (None, []) if not hasattr(M, "shape"): if len(M) not in (3, 4): raise ValueError("expecting Matrix or 3 or 4 lists") M = _m(*M) m, n = [i - 1 for i in M.shape] A, b, c, d = _abcd(M) d = d[0] _ = lambda x: numbered_symbols(x, start=1) x = Matrix([i for i, j in zip(_("x"), range(n))]) yT = Matrix([i for i, j in zip(_("y"), range(m))]).T def ineq(L, r, op): rv = [] for r in (op(i, j) for i, j in zip(L, r)): if r == True: continue elif r == False: return [False] if factor: f = factor_terms(r) if f.lhs.is_Mul and f.rhs % f.lhs.args[0] == 0: assert len(f.lhs.args) == 2, f.lhs k = f.lhs.args[0] r = r.func(sign(k) * f.lhs.args[1], f.rhs // abs(k)) rv.append(r) return rv eq = lambda x, d: x[0] - d if x else -d F = eq(c * x, d) f = eq(yT * b, d) return (F, ineq(A * x, b, Ge)), (f, ineq(yT * A, c, Le))
_primal_dual
sympy
33
sympy/solvers/simplex.py
def _simplex(A, B, C, D=None, dual=False): """Return ``(o, x, y)`` obtained from the two-phase simplex method using Bland's rule: ``o`` is the minimum value of primal, ``Cx - D``, under constraints ``Ax <= B`` (with ``x >= 0``) and the maximum of the dual, ``y^{T}B - D``, under constraints ``A^{T}*y >= C^{T}`` (with ``y >= 0``). To compute the dual of the system, pass `dual=True` and ``(o, y, x)`` will be returned. Note: the nonnegative constraints for ``x`` and ``y`` supercede any values of ``A`` and ``B`` that are inconsistent with that assumption, so if a constraint of ``x >= -1`` is represented in ``A`` and ``B``, no value will be obtained that is negative; if a constraint of ``x <= -1`` is represented, an error will be raised since no solution is possible. This routine relies on the ability of determining whether an expression is 0 or not. This is guaranteed if the input contains only Float or Rational entries. It will raise a TypeError if a relationship does not evaluate to True or False. Examples ======== >>> from sympy.solvers.simplex import _simplex >>> from sympy import Matrix Consider the simple minimization of ``f = x + y + 1`` under the constraint that ``y + 2*x >= 4``. This is the "standard form" of a minimization. In the nonnegative quadrant, this inequality describes a area above a triangle with vertices at (0, 4), (0, 0) and (2, 0). The minimum of ``f`` occurs at (2, 0). Define A, B, C, D for the standard minimization: >>> A = Matrix([[2, 1]]) >>> B = Matrix([4]) >>> C = Matrix([[1, 1]]) >>> D = Matrix([-1]) Confirm that this is the system of interest: >>> from sympy.abc import x, y >>> X = Matrix([x, y]) >>> (C*X - D)[0] x + y + 1 >>> [i >= j for i, j in zip(A*X, B)] [2*x + y >= 4] Since `_simplex` will do a minimization for constraints given as ``A*x <= B``, the signs of ``A`` and ``B`` must be negated since the currently correspond to a greater-than inequality: >>> _simplex(-A, -B, C, D) (3, [2, 0], [1/2]) The dual of minimizing ``f`` is maximizing ``F = c*y - d`` for ``a*y <= b`` where ``a``, ``b``, ``c``, ``d`` are derived from the transpose of the matrix representation of the standard minimization: >>> tr = lambda a, b, c, d: [i.T for i in (a, c, b, d)] >>> a, b, c, d = tr(A, B, C, D) This time ``a*x <= b`` is the expected inequality for the `_simplex` method, but to maximize ``F``, the sign of ``c`` and ``d`` must be changed (so that minimizing the negative will give the negative of the maximum of ``F``): >>> _simplex(a, b, -c, -d) (-3, [1/2], [2, 0]) The negative of ``F`` and the min of ``f`` are the same. The dual point `[1/2]` is the value of ``y`` that minimized ``F = c*y - d`` under constraints a*x <= b``: >>> y = Matrix(['y']) >>> (c*y - d)[0] 4*y + 1 >>> [i <= j for i, j in zip(a*y,b)] [2*y <= 1, y <= 1] In this 1-dimensional dual system, the more restrictive contraint is the first which limits ``y`` between 0 and 1/2 and the maximum of ``F`` is attained at the nonzero value, hence is ``4*(1/2) + 1 = 3``. In this case the values for ``x`` and ``y`` were the same when the dual representation was solved. This is not always the case (though the value of the function will be the same). >>> l = [[1, 1], [-1, 1], [0, 1], [-1, 0]], [5, 1, 2, -1], [[1, 1]], [-1] >>> A, B, C, D = [Matrix(i) for i in l] >>> _simplex(A, B, -C, -D) (-6, [3, 2], [1, 0, 0, 0]) >>> _simplex(A, B, -C, -D, dual=True) # [5, 0] != [3, 2] (-6, [1, 0, 0, 0], [5, 0]) In both cases the function has the same value: >>> Matrix(C)*Matrix([3, 2]) == Matrix(C)*Matrix([5, 0]) True See Also ======== _lp - poses min/max problem in form compatible with _simplex lpmin - minimization which calls _lp lpmax - maximimzation which calls _lp References ========== .. [1] Thomas S. Ferguson, LINEAR PROGRAMMING: A Concise Introduction web.tecnico.ulisboa.pt/mcasquilho/acad/or/ftp/FergusonUCLA_lp.pdf """
/usr/src/app/target_test_cases/failed_tests__simplex.txt
def _simplex(A, B, C, D=None, dual=False): """Return ``(o, x, y)`` obtained from the two-phase simplex method using Bland's rule: ``o`` is the minimum value of primal, ``Cx - D``, under constraints ``Ax <= B`` (with ``x >= 0``) and the maximum of the dual, ``y^{T}B - D``, under constraints ``A^{T}*y >= C^{T}`` (with ``y >= 0``). To compute the dual of the system, pass `dual=True` and ``(o, y, x)`` will be returned. Note: the nonnegative constraints for ``x`` and ``y`` supercede any values of ``A`` and ``B`` that are inconsistent with that assumption, so if a constraint of ``x >= -1`` is represented in ``A`` and ``B``, no value will be obtained that is negative; if a constraint of ``x <= -1`` is represented, an error will be raised since no solution is possible. This routine relies on the ability of determining whether an expression is 0 or not. This is guaranteed if the input contains only Float or Rational entries. It will raise a TypeError if a relationship does not evaluate to True or False. Examples ======== >>> from sympy.solvers.simplex import _simplex >>> from sympy import Matrix Consider the simple minimization of ``f = x + y + 1`` under the constraint that ``y + 2*x >= 4``. This is the "standard form" of a minimization. In the nonnegative quadrant, this inequality describes a area above a triangle with vertices at (0, 4), (0, 0) and (2, 0). The minimum of ``f`` occurs at (2, 0). Define A, B, C, D for the standard minimization: >>> A = Matrix([[2, 1]]) >>> B = Matrix([4]) >>> C = Matrix([[1, 1]]) >>> D = Matrix([-1]) Confirm that this is the system of interest: >>> from sympy.abc import x, y >>> X = Matrix([x, y]) >>> (C*X - D)[0] x + y + 1 >>> [i >= j for i, j in zip(A*X, B)] [2*x + y >= 4] Since `_simplex` will do a minimization for constraints given as ``A*x <= B``, the signs of ``A`` and ``B`` must be negated since the currently correspond to a greater-than inequality: >>> _simplex(-A, -B, C, D) (3, [2, 0], [1/2]) The dual of minimizing ``f`` is maximizing ``F = c*y - d`` for ``a*y <= b`` where ``a``, ``b``, ``c``, ``d`` are derived from the transpose of the matrix representation of the standard minimization: >>> tr = lambda a, b, c, d: [i.T for i in (a, c, b, d)] >>> a, b, c, d = tr(A, B, C, D) This time ``a*x <= b`` is the expected inequality for the `_simplex` method, but to maximize ``F``, the sign of ``c`` and ``d`` must be changed (so that minimizing the negative will give the negative of the maximum of ``F``): >>> _simplex(a, b, -c, -d) (-3, [1/2], [2, 0]) The negative of ``F`` and the min of ``f`` are the same. The dual point `[1/2]` is the value of ``y`` that minimized ``F = c*y - d`` under constraints a*x <= b``: >>> y = Matrix(['y']) >>> (c*y - d)[0] 4*y + 1 >>> [i <= j for i, j in zip(a*y,b)] [2*y <= 1, y <= 1] In this 1-dimensional dual system, the more restrictive contraint is the first which limits ``y`` between 0 and 1/2 and the maximum of ``F`` is attained at the nonzero value, hence is ``4*(1/2) + 1 = 3``. In this case the values for ``x`` and ``y`` were the same when the dual representation was solved. This is not always the case (though the value of the function will be the same). >>> l = [[1, 1], [-1, 1], [0, 1], [-1, 0]], [5, 1, 2, -1], [[1, 1]], [-1] >>> A, B, C, D = [Matrix(i) for i in l] >>> _simplex(A, B, -C, -D) (-6, [3, 2], [1, 0, 0, 0]) >>> _simplex(A, B, -C, -D, dual=True) # [5, 0] != [3, 2] (-6, [1, 0, 0, 0], [5, 0]) In both cases the function has the same value: >>> Matrix(C)*Matrix([3, 2]) == Matrix(C)*Matrix([5, 0]) True See Also ======== _lp - poses min/max problem in form compatible with _simplex lpmin - minimization which calls _lp lpmax - maximimzation which calls _lp References ========== .. [1] Thomas S. Ferguson, LINEAR PROGRAMMING: A Concise Introduction web.tecnico.ulisboa.pt/mcasquilho/acad/or/ftp/FergusonUCLA_lp.pdf """ A, B, C, D = [Matrix(i) for i in (A, B, C, D or [0])] if dual: _o, d, p = _simplex(-A.T, C.T, B.T, -D) return -_o, d, p if A and B: M = Matrix([[A, B], [C, D]]) else: if A or B: raise ValueError("must give A and B") # no constraints given M = Matrix([[C, D]]) n = M.cols - 1 m = M.rows - 1 if not all(i.is_Float or i.is_Rational for i in M): # with literal Float and Rational we are guaranteed the # ability of determining whether an expression is 0 or not raise TypeError(filldedent(""" Only rationals and floats are allowed. """ ) ) # x variables have priority over y variables during Bland's rule # since False < True X = [(False, j) for j in range(n)] Y = [(True, i) for i in range(m)] # Phase 1: find a feasible solution or determine none exist ## keep track of last pivot row and column last = None while True: B = M[:-1, -1] A = M[:-1, :-1] if all(B[i] >= 0 for i in range(B.rows)): # We have found a feasible solution break # Find k: first row with a negative rightmost entry for k in range(B.rows): if B[k] < 0: break # use current value of k below else: pass # error will raise below # Choose pivot column, c piv_cols = [_ for _ in range(A.cols) if A[k, _] < 0] if not piv_cols: raise InfeasibleLPError(filldedent(""" The constraint set is empty!""")) _, c = min((X[i], i) for i in piv_cols) # Bland's rule # Choose pivot row, r piv_rows = [_ for _ in range(A.rows) if A[_, c] > 0 and B[_] > 0] piv_rows.append(k) r = _choose_pivot_row(A, B, piv_rows, c, Y) # check for oscillation if (r, c) == last: # Not sure what to do here; it looks like there will be # oscillations; see o1 test added at this commit to # see a system with no solution and the o2 for one # with a solution. In the case of o2, the solution # from linprog is the same as the one from lpmin, but # the matrices created in the lpmin case are different # than those created without replacements in linprog and # the matrices in the linprog case lead to oscillations. # If the matrices could be re-written in linprog like # lpmin does, this behavior could be avoided and then # perhaps the oscillating case would only occur when # there is no solution. For now, the output is checked # before exit if oscillations were detected and an # error is raised there if the solution was invalid. # # cf section 6 of Ferguson for a non-cycling modification last = True break last = r, c M = _pivot(M, r, c) X[c], Y[r] = Y[r], X[c] # Phase 2: from a feasible solution, pivot to optimal while True: B = M[:-1, -1] A = M[:-1, :-1] C = M[-1, :-1] # Choose a pivot column, c piv_cols = [_ for _ in range(n) if C[_] < 0] if not piv_cols: break _, c = min((X[i], i) for i in piv_cols) # Bland's rule # Choose a pivot row, r piv_rows = [_ for _ in range(m) if A[_, c] > 0] if not piv_rows: raise UnboundedLPError(filldedent(""" Objective function can assume arbitrarily large values!""")) r = _choose_pivot_row(A, B, piv_rows, c, Y) M = _pivot(M, r, c) X[c], Y[r] = Y[r], X[c] argmax = [None] * n argmin_dual = [None] * m for i, (v, n) in enumerate(X): if v == False: argmax[n] = 0 else: argmin_dual[n] = M[-1, i] for i, (v, n) in enumerate(Y): if v == True: argmin_dual[n] = 0 else: argmax[n] = M[i, -1] if last and not all(i >= 0 for i in argmax + argmin_dual): raise InfeasibleLPError(filldedent(""" Oscillating system led to invalid solution. If you believe there was a valid solution, please report this as a bug.""")) return -M[-1, -1], argmax, argmin_dual
_simplex
sympy
34
sympy/solvers/inequalities.py
def _solve_inequality(ie, s, linear=False): """Return the inequality with s isolated on the left, if possible. If the relationship is non-linear, a solution involving And or Or may be returned. False or True are returned if the relationship is never True or always True, respectively. If `linear` is True (default is False) an `s`-dependent expression will be isolated on the left, if possible but it will not be solved for `s` unless the expression is linear in `s`. Furthermore, only "safe" operations which do not change the sense of the relationship are applied: no division by an unsigned value is attempted unless the relationship involves Eq or Ne and no division by a value not known to be nonzero is ever attempted. Examples ======== >>> from sympy import Eq, Symbol >>> from sympy.solvers.inequalities import _solve_inequality as f >>> from sympy.abc import x, y For linear expressions, the symbol can be isolated: >>> f(x - 2 < 0, x) x < 2 >>> f(-x - 6 < x, x) x > -3 Sometimes nonlinear relationships will be False >>> f(x**2 + 4 < 0, x) False Or they may involve more than one region of values: >>> f(x**2 - 4 < 0, x) (-2 < x) & (x < 2) To restrict the solution to a relational, set linear=True and only the x-dependent portion will be isolated on the left: >>> f(x**2 - 4 < 0, x, linear=True) x**2 < 4 Division of only nonzero quantities is allowed, so x cannot be isolated by dividing by y: >>> y.is_nonzero is None # it is unknown whether it is 0 or not True >>> f(x*y < 1, x) x*y < 1 And while an equality (or inequality) still holds after dividing by a non-zero quantity >>> nz = Symbol('nz', nonzero=True) >>> f(Eq(x*nz, 1), x) Eq(x, 1/nz) the sign must be known for other inequalities involving > or <: >>> f(x*nz <= 1, x) nz*x <= 1 >>> p = Symbol('p', positive=True) >>> f(x*p <= 1, x) x <= 1/p When there are denominators in the original expression that are removed by expansion, conditions for them will be returned as part of the result: >>> f(x < x*(2/x - 1), x) (x < 1) & Ne(x, 0) """
/usr/src/app/target_test_cases/failed_tests__solve_inequality.txt
def _solve_inequality(ie, s, linear=False): """Return the inequality with s isolated on the left, if possible. If the relationship is non-linear, a solution involving And or Or may be returned. False or True are returned if the relationship is never True or always True, respectively. If `linear` is True (default is False) an `s`-dependent expression will be isolated on the left, if possible but it will not be solved for `s` unless the expression is linear in `s`. Furthermore, only "safe" operations which do not change the sense of the relationship are applied: no division by an unsigned value is attempted unless the relationship involves Eq or Ne and no division by a value not known to be nonzero is ever attempted. Examples ======== >>> from sympy import Eq, Symbol >>> from sympy.solvers.inequalities import _solve_inequality as f >>> from sympy.abc import x, y For linear expressions, the symbol can be isolated: >>> f(x - 2 < 0, x) x < 2 >>> f(-x - 6 < x, x) x > -3 Sometimes nonlinear relationships will be False >>> f(x**2 + 4 < 0, x) False Or they may involve more than one region of values: >>> f(x**2 - 4 < 0, x) (-2 < x) & (x < 2) To restrict the solution to a relational, set linear=True and only the x-dependent portion will be isolated on the left: >>> f(x**2 - 4 < 0, x, linear=True) x**2 < 4 Division of only nonzero quantities is allowed, so x cannot be isolated by dividing by y: >>> y.is_nonzero is None # it is unknown whether it is 0 or not True >>> f(x*y < 1, x) x*y < 1 And while an equality (or inequality) still holds after dividing by a non-zero quantity >>> nz = Symbol('nz', nonzero=True) >>> f(Eq(x*nz, 1), x) Eq(x, 1/nz) the sign must be known for other inequalities involving > or <: >>> f(x*nz <= 1, x) nz*x <= 1 >>> p = Symbol('p', positive=True) >>> f(x*p <= 1, x) x <= 1/p When there are denominators in the original expression that are removed by expansion, conditions for them will be returned as part of the result: >>> f(x < x*(2/x - 1), x) (x < 1) & Ne(x, 0) """ from sympy.solvers.solvers import denoms if s not in ie.free_symbols: return ie if ie.rhs == s: ie = ie.reversed if ie.lhs == s and s not in ie.rhs.free_symbols: return ie def classify(ie, s, i): # return True or False if ie evaluates when substituting s with # i else None (if unevaluated) or NaN (when there is an error # in evaluating) try: v = ie.subs(s, i) if v is S.NaN: return v elif v not in (True, False): return return v except TypeError: return S.NaN rv = None oo = S.Infinity expr = ie.lhs - ie.rhs try: p = Poly(expr, s) if p.degree() == 0: rv = ie.func(p.as_expr(), 0) elif not linear and p.degree() > 1: # handle in except clause raise NotImplementedError except (PolynomialError, NotImplementedError): if not linear: try: rv = reduce_rational_inequalities([[ie]], s) except PolynomialError: rv = solve_univariate_inequality(ie, s) # remove restrictions wrt +/-oo that may have been # applied when using sets to simplify the relationship okoo = classify(ie, s, oo) if okoo is S.true and classify(rv, s, oo) is S.false: rv = rv.subs(s < oo, True) oknoo = classify(ie, s, -oo) if (oknoo is S.true and classify(rv, s, -oo) is S.false): rv = rv.subs(-oo < s, True) rv = rv.subs(s > -oo, True) if rv is S.true: rv = (s <= oo) if okoo is S.true else (s < oo) if oknoo is not S.true: rv = And(-oo < s, rv) else: p = Poly(expr) conds = [] if rv is None: e = p.as_expr() # this is in expanded form # Do a safe inversion of e, moving non-s terms # to the rhs and dividing by a nonzero factor if # the relational is Eq/Ne; for other relationals # the sign must also be positive or negative rhs = 0 b, ax = e.as_independent(s, as_Add=True) e -= b rhs -= b ef = factor_terms(e) a, e = ef.as_independent(s, as_Add=False) if (a.is_zero != False or # don't divide by potential 0 a.is_negative == a.is_positive is None and # if sign is not known then ie.rel_op not in ('!=', '==')): # reject if not Eq/Ne e = ef a = S.One rhs /= a if a.is_positive: rv = ie.func(e, rhs) else: rv = ie.reversed.func(e, rhs) # return conditions under which the value is # valid, too. beginning_denoms = denoms(ie.lhs) | denoms(ie.rhs) current_denoms = denoms(rv) for d in beginning_denoms - current_denoms: c = _solve_inequality(Eq(d, 0), s, linear=linear) if isinstance(c, Eq) and c.lhs == s: if classify(rv, s, c.rhs) is S.true: # rv is permitting this value but it shouldn't conds.append(~c) for i in (-oo, oo): if (classify(rv, s, i) is S.true and classify(ie, s, i) is not S.true): conds.append(s < i if i is oo else i < s) conds.append(rv) return And(*conds)
_solve_inequality
sympy
35
sympy/combinatorics/util.py
def _strip(g, base, orbits, transversals): """ Attempt to decompose a permutation using a (possibly partial) BSGS structure. Explanation =========== This is done by treating the sequence ``base`` as an actual base, and the orbits ``orbits`` and transversals ``transversals`` as basic orbits and transversals relative to it. This process is called "sifting". A sift is unsuccessful when a certain orbit element is not found or when after the sift the decomposition does not end with the identity element. The argument ``transversals`` is a list of dictionaries that provides transversal elements for the orbits ``orbits``. Parameters ========== g : permutation to be decomposed base : sequence of points orbits : list A list in which the ``i``-th entry is an orbit of ``base[i]`` under some subgroup of the pointwise stabilizer of ` `base[0], base[1], ..., base[i - 1]``. The groups themselves are implicit in this function since the only information we need is encoded in the orbits and transversals transversals : list A list of orbit transversals associated with the orbits *orbits*. Examples ======== >>> from sympy.combinatorics import Permutation, SymmetricGroup >>> from sympy.combinatorics.util import _strip >>> S = SymmetricGroup(5) >>> S.schreier_sims() >>> g = Permutation([0, 2, 3, 1, 4]) >>> _strip(g, S.base, S.basic_orbits, S.basic_transversals) ((4), 5) Notes ===== The algorithm is described in [1],pp.89-90. The reason for returning both the current state of the element being decomposed and the level at which the sifting ends is that they provide important information for the randomized version of the Schreier-Sims algorithm. References ========== .. [1] Holt, D., Eick, B., O'Brien, E."Handbook of computational group theory" See Also ======== sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims_random """
/usr/src/app/target_test_cases/failed_tests__strip.txt
def _strip(g, base, orbits, transversals): """ Attempt to decompose a permutation using a (possibly partial) BSGS structure. Explanation =========== This is done by treating the sequence ``base`` as an actual base, and the orbits ``orbits`` and transversals ``transversals`` as basic orbits and transversals relative to it. This process is called "sifting". A sift is unsuccessful when a certain orbit element is not found or when after the sift the decomposition does not end with the identity element. The argument ``transversals`` is a list of dictionaries that provides transversal elements for the orbits ``orbits``. Parameters ========== g : permutation to be decomposed base : sequence of points orbits : list A list in which the ``i``-th entry is an orbit of ``base[i]`` under some subgroup of the pointwise stabilizer of ` `base[0], base[1], ..., base[i - 1]``. The groups themselves are implicit in this function since the only information we need is encoded in the orbits and transversals transversals : list A list of orbit transversals associated with the orbits *orbits*. Examples ======== >>> from sympy.combinatorics import Permutation, SymmetricGroup >>> from sympy.combinatorics.util import _strip >>> S = SymmetricGroup(5) >>> S.schreier_sims() >>> g = Permutation([0, 2, 3, 1, 4]) >>> _strip(g, S.base, S.basic_orbits, S.basic_transversals) ((4), 5) Notes ===== The algorithm is described in [1],pp.89-90. The reason for returning both the current state of the element being decomposed and the level at which the sifting ends is that they provide important information for the randomized version of the Schreier-Sims algorithm. References ========== .. [1] Holt, D., Eick, B., O'Brien, E."Handbook of computational group theory" See Also ======== sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims_random """ h = g._array_form base_len = len(base) for i in range(base_len): beta = h[base[i]] if beta == base[i]: continue if beta not in orbits[i]: return _af_new(h), i + 1 u = transversals[i][beta]._array_form h = _af_rmul(_af_invert(u), h) return _af_new(h), base_len + 1
_strip
sympy
36
sympy/calculus/finite_diff.py
def apply_finite_diff(order, x_list, y_list, x0=S.Zero): """ Calculates the finite difference approximation of the derivative of requested order at ``x0`` from points provided in ``x_list`` and ``y_list``. Parameters ========== order: int order of derivative to approximate. 0 corresponds to interpolation. x_list: sequence Sequence of (unique) values for the independent variable. y_list: sequence The function value at corresponding values for the independent variable in x_list. x0: Number or Symbol At what value of the independent variable the derivative should be evaluated. Defaults to 0. Returns ======= sympy.core.add.Add or sympy.core.numbers.Number The finite difference expression approximating the requested derivative order at ``x0``. Examples ======== >>> from sympy import apply_finite_diff >>> cube = lambda arg: (1.0*arg)**3 >>> xlist = range(-3,3+1) >>> apply_finite_diff(2, xlist, map(cube, xlist), 2) - 12 # doctest: +SKIP -3.55271367880050e-15 we see that the example above only contain rounding errors. apply_finite_diff can also be used on more abstract objects: >>> from sympy import IndexedBase, Idx >>> x, y = map(IndexedBase, 'xy') >>> i = Idx('i') >>> x_list, y_list = zip(*[(x[i+j], y[i+j]) for j in range(-1,2)]) >>> apply_finite_diff(1, x_list, y_list, x[i]) ((x[i + 1] - x[i])/(-x[i - 1] + x[i]) - 1)*y[i]/(x[i + 1] - x[i]) - (x[i + 1] - x[i])*y[i - 1]/((x[i + 1] - x[i - 1])*(-x[i - 1] + x[i])) + (-x[i - 1] + x[i])*y[i + 1]/((x[i + 1] - x[i - 1])*(x[i + 1] - x[i])) Notes ===== Order = 0 corresponds to interpolation. Only supply so many points you think makes sense to around x0 when extracting the derivative (the function need to be well behaved within that region). Also beware of Runge's phenomenon. See also ======== sympy.calculus.finite_diff.finite_diff_weights References ========== Fortran 90 implementation with Python interface for numerics: finitediff_ .. _finitediff: https://github.com/bjodah/finitediff """
/usr/src/app/target_test_cases/failed_tests_apply_finite_diff.txt
def apply_finite_diff(order, x_list, y_list, x0=S.Zero): """ Calculates the finite difference approximation of the derivative of requested order at ``x0`` from points provided in ``x_list`` and ``y_list``. Parameters ========== order: int order of derivative to approximate. 0 corresponds to interpolation. x_list: sequence Sequence of (unique) values for the independent variable. y_list: sequence The function value at corresponding values for the independent variable in x_list. x0: Number or Symbol At what value of the independent variable the derivative should be evaluated. Defaults to 0. Returns ======= sympy.core.add.Add or sympy.core.numbers.Number The finite difference expression approximating the requested derivative order at ``x0``. Examples ======== >>> from sympy import apply_finite_diff >>> cube = lambda arg: (1.0*arg)**3 >>> xlist = range(-3,3+1) >>> apply_finite_diff(2, xlist, map(cube, xlist), 2) - 12 # doctest: +SKIP -3.55271367880050e-15 we see that the example above only contain rounding errors. apply_finite_diff can also be used on more abstract objects: >>> from sympy import IndexedBase, Idx >>> x, y = map(IndexedBase, 'xy') >>> i = Idx('i') >>> x_list, y_list = zip(*[(x[i+j], y[i+j]) for j in range(-1,2)]) >>> apply_finite_diff(1, x_list, y_list, x[i]) ((x[i + 1] - x[i])/(-x[i - 1] + x[i]) - 1)*y[i]/(x[i + 1] - x[i]) - (x[i + 1] - x[i])*y[i - 1]/((x[i + 1] - x[i - 1])*(-x[i - 1] + x[i])) + (-x[i - 1] + x[i])*y[i + 1]/((x[i + 1] - x[i - 1])*(x[i + 1] - x[i])) Notes ===== Order = 0 corresponds to interpolation. Only supply so many points you think makes sense to around x0 when extracting the derivative (the function need to be well behaved within that region). Also beware of Runge's phenomenon. See also ======== sympy.calculus.finite_diff.finite_diff_weights References ========== Fortran 90 implementation with Python interface for numerics: finitediff_ .. _finitediff: https://github.com/bjodah/finitediff """ # In the original paper the following holds for the notation: # M = order # N = len(x_list) - 1 N = len(x_list) - 1 if len(x_list) != len(y_list): raise ValueError("x_list and y_list not equal in length.") delta = finite_diff_weights(order, x_list, x0) derivative = 0 for nu in range(len(x_list)): derivative += delta[order][N][nu]*y_list[nu] return derivative
apply_finite_diff
sympy
37
sympy/assumptions/ask.py
def ask(proposition, assumptions=True, context=global_assumptions): """ Function to evaluate the proposition with assumptions. Explanation =========== This function evaluates the proposition to ``True`` or ``False`` if the truth value can be determined. If not, it returns ``None``. It should be discerned from :func:`~.refine` which, when applied to a proposition, simplifies the argument to symbolic ``Boolean`` instead of Python built-in ``True``, ``False`` or ``None``. **Syntax** * ask(proposition) Evaluate the *proposition* in global assumption context. * ask(proposition, assumptions) Evaluate the *proposition* with respect to *assumptions* in global assumption context. Parameters ========== proposition : Boolean Proposition which will be evaluated to boolean value. If this is not ``AppliedPredicate``, it will be wrapped by ``Q.is_true``. assumptions : Boolean, optional Local assumptions to evaluate the *proposition*. context : AssumptionsContext, optional Default assumptions to evaluate the *proposition*. By default, this is ``sympy.assumptions.global_assumptions`` variable. Returns ======= ``True``, ``False``, or ``None`` Raises ====== TypeError : *proposition* or *assumptions* is not valid logical expression. ValueError : assumptions are inconsistent. Examples ======== >>> from sympy import ask, Q, pi >>> from sympy.abc import x, y >>> ask(Q.rational(pi)) False >>> ask(Q.even(x*y), Q.even(x) & Q.integer(y)) True >>> ask(Q.prime(4*x), Q.integer(x)) False If the truth value cannot be determined, ``None`` will be returned. >>> print(ask(Q.odd(3*x))) # cannot determine unless we know x None ``ValueError`` is raised if assumptions are inconsistent. >>> ask(Q.integer(x), Q.even(x) & Q.odd(x)) Traceback (most recent call last): ... ValueError: inconsistent assumptions Q.even(x) & Q.odd(x) Notes ===== Relations in assumptions are not implemented (yet), so the following will not give a meaningful result. >>> ask(Q.positive(x), x > 0) It is however a work in progress. See Also ======== sympy.assumptions.refine.refine : Simplification using assumptions. Proposition is not reduced to ``None`` if the truth value cannot be determined. """
/usr/src/app/target_test_cases/failed_tests_ask.txt
def ask(proposition, assumptions=True, context=global_assumptions): """ Function to evaluate the proposition with assumptions. Explanation =========== This function evaluates the proposition to ``True`` or ``False`` if the truth value can be determined. If not, it returns ``None``. It should be discerned from :func:`~.refine` which, when applied to a proposition, simplifies the argument to symbolic ``Boolean`` instead of Python built-in ``True``, ``False`` or ``None``. **Syntax** * ask(proposition) Evaluate the *proposition* in global assumption context. * ask(proposition, assumptions) Evaluate the *proposition* with respect to *assumptions* in global assumption context. Parameters ========== proposition : Boolean Proposition which will be evaluated to boolean value. If this is not ``AppliedPredicate``, it will be wrapped by ``Q.is_true``. assumptions : Boolean, optional Local assumptions to evaluate the *proposition*. context : AssumptionsContext, optional Default assumptions to evaluate the *proposition*. By default, this is ``sympy.assumptions.global_assumptions`` variable. Returns ======= ``True``, ``False``, or ``None`` Raises ====== TypeError : *proposition* or *assumptions* is not valid logical expression. ValueError : assumptions are inconsistent. Examples ======== >>> from sympy import ask, Q, pi >>> from sympy.abc import x, y >>> ask(Q.rational(pi)) False >>> ask(Q.even(x*y), Q.even(x) & Q.integer(y)) True >>> ask(Q.prime(4*x), Q.integer(x)) False If the truth value cannot be determined, ``None`` will be returned. >>> print(ask(Q.odd(3*x))) # cannot determine unless we know x None ``ValueError`` is raised if assumptions are inconsistent. >>> ask(Q.integer(x), Q.even(x) & Q.odd(x)) Traceback (most recent call last): ... ValueError: inconsistent assumptions Q.even(x) & Q.odd(x) Notes ===== Relations in assumptions are not implemented (yet), so the following will not give a meaningful result. >>> ask(Q.positive(x), x > 0) It is however a work in progress. See Also ======== sympy.assumptions.refine.refine : Simplification using assumptions. Proposition is not reduced to ``None`` if the truth value cannot be determined. """ from sympy.assumptions.satask import satask from sympy.assumptions.lra_satask import lra_satask from sympy.logic.algorithms.lra_theory import UnhandledInput proposition = sympify(proposition) assumptions = sympify(assumptions) if isinstance(proposition, Predicate) or proposition.kind is not BooleanKind: raise TypeError("proposition must be a valid logical expression") if isinstance(assumptions, Predicate) or assumptions.kind is not BooleanKind: raise TypeError("assumptions must be a valid logical expression") binrelpreds = {Eq: Q.eq, Ne: Q.ne, Gt: Q.gt, Lt: Q.lt, Ge: Q.ge, Le: Q.le} if isinstance(proposition, AppliedPredicate): key, args = proposition.function, proposition.arguments elif proposition.func in binrelpreds: key, args = binrelpreds[type(proposition)], proposition.args else: key, args = Q.is_true, (proposition,) # convert local and global assumptions to CNF assump_cnf = CNF.from_prop(assumptions) assump_cnf.extend(context) # extract the relevant facts from assumptions with respect to args local_facts = _extract_all_facts(assump_cnf, args) # convert default facts and assumed facts to encoded CNF known_facts_cnf = get_all_known_facts() enc_cnf = EncodedCNF() enc_cnf.from_cnf(CNF(known_facts_cnf)) enc_cnf.add_from_cnf(local_facts) # check the satisfiability of given assumptions if local_facts.clauses and satisfiable(enc_cnf) is False: raise ValueError("inconsistent assumptions %s" % assumptions) # quick computation for single fact res = _ask_single_fact(key, local_facts) if res is not None: return res # direct resolution method, no logic res = key(*args)._eval_ask(assumptions) if res is not None: return bool(res) # using satask (still costly) res = satask(proposition, assumptions=assumptions, context=context) if res is not None: return res try: res = lra_satask(proposition, assumptions=assumptions, context=context) except UnhandledInput: return None return res
ask
sympy
38
sympy/matrices/sparsetools.py
def banded(*args, **kwargs): """Returns a SparseMatrix from the given dictionary describing the diagonals of the matrix. The keys are positive for upper diagonals and negative for those below the main diagonal. The values may be: * expressions or single-argument functions, * lists or tuples of values, * matrices Unless dimensions are given, the size of the returned matrix will be large enough to contain the largest non-zero value provided. kwargs ====== rows : rows of the resulting matrix; computed if not given. cols : columns of the resulting matrix; computed if not given. Examples ======== >>> from sympy import banded, ones, Matrix >>> from sympy.abc import x If explicit values are given in tuples, the matrix will autosize to contain all values, otherwise a single value is filled onto the entire diagonal: >>> banded({1: (1, 2, 3), -1: (4, 5, 6), 0: x}) Matrix([ [x, 1, 0, 0], [4, x, 2, 0], [0, 5, x, 3], [0, 0, 6, x]]) A function accepting a single argument can be used to fill the diagonal as a function of diagonal index (which starts at 0). The size (or shape) of the matrix must be given to obtain more than a 1x1 matrix: >>> s = lambda d: (1 + d)**2 >>> banded(5, {0: s, 2: s, -2: 2}) Matrix([ [1, 0, 1, 0, 0], [0, 4, 0, 4, 0], [2, 0, 9, 0, 9], [0, 2, 0, 16, 0], [0, 0, 2, 0, 25]]) The diagonal of matrices placed on a diagonal will coincide with the indicated diagonal: >>> vert = Matrix([1, 2, 3]) >>> banded({0: vert}, cols=3) Matrix([ [1, 0, 0], [2, 1, 0], [3, 2, 1], [0, 3, 2], [0, 0, 3]]) >>> banded(4, {0: ones(2)}) Matrix([ [1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]) Errors are raised if the designated size will not hold all values an integral number of times. Here, the rows are designated as odd (but an even number is required to hold the off-diagonal 2x2 ones): >>> banded({0: 2, 1: ones(2)}, rows=5) Traceback (most recent call last): ... ValueError: sequence does not fit an integral number of times in the matrix And here, an even number of rows is given...but the square matrix has an even number of columns, too. As we saw in the previous example, an odd number is required: >>> banded(4, {0: 2, 1: ones(2)}) # trying to make 4x4 and cols must be odd Traceback (most recent call last): ... ValueError: sequence does not fit an integral number of times in the matrix A way around having to count rows is to enclosing matrix elements in a tuple and indicate the desired number of them to the right: >>> banded({0: 2, 2: (ones(2),)*3}) Matrix([ [2, 0, 1, 1, 0, 0, 0, 0], [0, 2, 1, 1, 0, 0, 0, 0], [0, 0, 2, 0, 1, 1, 0, 0], [0, 0, 0, 2, 1, 1, 0, 0], [0, 0, 0, 0, 2, 0, 1, 1], [0, 0, 0, 0, 0, 2, 1, 1]]) An error will be raised if more than one value is written to a given entry. Here, the ones overlap with the main diagonal if they are placed on the first diagonal: >>> banded({0: (2,)*5, 1: (ones(2),)*3}) Traceback (most recent call last): ... ValueError: collision at (1, 1) By placing a 0 at the bottom left of the 2x2 matrix of ones, the collision is avoided: >>> u2 = Matrix([ ... [1, 1], ... [0, 1]]) >>> banded({0: [2]*5, 1: [u2]*3}) Matrix([ [2, 1, 1, 0, 0, 0, 0], [0, 2, 1, 0, 0, 0, 0], [0, 0, 2, 1, 1, 0, 0], [0, 0, 0, 2, 1, 0, 0], [0, 0, 0, 0, 2, 1, 1], [0, 0, 0, 0, 0, 0, 1]]) """
/usr/src/app/target_test_cases/failed_tests_banded.txt
def banded(*args, **kwargs): """Returns a SparseMatrix from the given dictionary describing the diagonals of the matrix. The keys are positive for upper diagonals and negative for those below the main diagonal. The values may be: * expressions or single-argument functions, * lists or tuples of values, * matrices Unless dimensions are given, the size of the returned matrix will be large enough to contain the largest non-zero value provided. kwargs ====== rows : rows of the resulting matrix; computed if not given. cols : columns of the resulting matrix; computed if not given. Examples ======== >>> from sympy import banded, ones, Matrix >>> from sympy.abc import x If explicit values are given in tuples, the matrix will autosize to contain all values, otherwise a single value is filled onto the entire diagonal: >>> banded({1: (1, 2, 3), -1: (4, 5, 6), 0: x}) Matrix([ [x, 1, 0, 0], [4, x, 2, 0], [0, 5, x, 3], [0, 0, 6, x]]) A function accepting a single argument can be used to fill the diagonal as a function of diagonal index (which starts at 0). The size (or shape) of the matrix must be given to obtain more than a 1x1 matrix: >>> s = lambda d: (1 + d)**2 >>> banded(5, {0: s, 2: s, -2: 2}) Matrix([ [1, 0, 1, 0, 0], [0, 4, 0, 4, 0], [2, 0, 9, 0, 9], [0, 2, 0, 16, 0], [0, 0, 2, 0, 25]]) The diagonal of matrices placed on a diagonal will coincide with the indicated diagonal: >>> vert = Matrix([1, 2, 3]) >>> banded({0: vert}, cols=3) Matrix([ [1, 0, 0], [2, 1, 0], [3, 2, 1], [0, 3, 2], [0, 0, 3]]) >>> banded(4, {0: ones(2)}) Matrix([ [1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]) Errors are raised if the designated size will not hold all values an integral number of times. Here, the rows are designated as odd (but an even number is required to hold the off-diagonal 2x2 ones): >>> banded({0: 2, 1: ones(2)}, rows=5) Traceback (most recent call last): ... ValueError: sequence does not fit an integral number of times in the matrix And here, an even number of rows is given...but the square matrix has an even number of columns, too. As we saw in the previous example, an odd number is required: >>> banded(4, {0: 2, 1: ones(2)}) # trying to make 4x4 and cols must be odd Traceback (most recent call last): ... ValueError: sequence does not fit an integral number of times in the matrix A way around having to count rows is to enclosing matrix elements in a tuple and indicate the desired number of them to the right: >>> banded({0: 2, 2: (ones(2),)*3}) Matrix([ [2, 0, 1, 1, 0, 0, 0, 0], [0, 2, 1, 1, 0, 0, 0, 0], [0, 0, 2, 0, 1, 1, 0, 0], [0, 0, 0, 2, 1, 1, 0, 0], [0, 0, 0, 0, 2, 0, 1, 1], [0, 0, 0, 0, 0, 2, 1, 1]]) An error will be raised if more than one value is written to a given entry. Here, the ones overlap with the main diagonal if they are placed on the first diagonal: >>> banded({0: (2,)*5, 1: (ones(2),)*3}) Traceback (most recent call last): ... ValueError: collision at (1, 1) By placing a 0 at the bottom left of the 2x2 matrix of ones, the collision is avoided: >>> u2 = Matrix([ ... [1, 1], ... [0, 1]]) >>> banded({0: [2]*5, 1: [u2]*3}) Matrix([ [2, 1, 1, 0, 0, 0, 0], [0, 2, 1, 0, 0, 0, 0], [0, 0, 2, 1, 1, 0, 0], [0, 0, 0, 2, 1, 0, 0], [0, 0, 0, 0, 2, 1, 1], [0, 0, 0, 0, 0, 0, 1]]) """ try: if len(args) not in (1, 2, 3): raise TypeError if not isinstance(args[-1], (dict, Dict)): raise TypeError if len(args) == 1: rows = kwargs.get('rows', None) cols = kwargs.get('cols', None) if rows is not None: rows = as_int(rows) if cols is not None: cols = as_int(cols) elif len(args) == 2: rows = cols = as_int(args[0]) else: rows, cols = map(as_int, args[:2]) # fails with ValueError if any keys are not ints _ = all(as_int(k) for k in args[-1]) except (ValueError, TypeError): raise TypeError(filldedent( '''unrecognized input to banded: expecting [[row,] col,] {int: value}''')) def rc(d): # return row,col coord of diagonal start r = -d if d < 0 else 0 c = 0 if r else d return r, c smat = {} undone = [] tba = Dummy() # first handle objects with size for d, v in args[-1].items(): r, c = rc(d) # note: only list and tuple are recognized since this # will allow other Basic objects like Tuple # into the matrix if so desired if isinstance(v, (list, tuple)): extra = 0 for i, vi in enumerate(v): i += extra if is_sequence(vi): vi = SparseMatrix(vi) smat[r + i, c + i] = vi extra += min(vi.shape) - 1 else: smat[r + i, c + i] = vi elif is_sequence(v): v = SparseMatrix(v) rv, cv = v.shape if rows and cols: nr, xr = divmod(rows - r, rv) nc, xc = divmod(cols - c, cv) x = xr or xc do = min(nr, nc) elif rows: do, x = divmod(rows - r, rv) elif cols: do, x = divmod(cols - c, cv) else: do = 1 x = 0 if x: raise ValueError(filldedent(''' sequence does not fit an integral number of times in the matrix''')) j = min(v.shape) for i in range(do): smat[r, c] = v r += j c += j elif v: smat[r, c] = tba undone.append((d, v)) s = SparseMatrix(None, smat) # to expand matrices smat = s.todok() # check for dim errors here if rows is not None and rows < s.rows: raise ValueError('Designated rows %s < needed %s' % (rows, s.rows)) if cols is not None and cols < s.cols: raise ValueError('Designated cols %s < needed %s' % (cols, s.cols)) if rows is cols is None: rows = s.rows cols = s.cols elif rows is not None and cols is None: cols = max(rows, s.cols) elif cols is not None and rows is None: rows = max(cols, s.rows) def update(i, j, v): # update smat and make sure there are # no collisions if v: if (i, j) in smat and smat[i, j] not in (tba, v): raise ValueError('collision at %s' % ((i, j),)) smat[i, j] = v if undone: for d, vi in undone: r, c = rc(d) v = vi if callable(vi) else lambda _: vi i = 0 while r + i < rows and c + i < cols: update(r + i, c + i, v(i)) i += 1 return SparseMatrix(rows, cols, smat)
banded
sympy
39
sympy/combinatorics/tensor_can.py
def canonicalize(g, dummies, msym, *v): """ canonicalize tensor formed by tensors Parameters ========== g : permutation representing the tensor dummies : list representing the dummy indices it can be a list of dummy indices of the same type or a list of lists of dummy indices, one list for each type of index; the dummy indices must come after the free indices, and put in order contravariant, covariant [d0, -d0, d1,-d1,...] msym : symmetry of the metric(s) it can be an integer or a list; in the first case it is the symmetry of the dummy index metric; in the second case it is the list of the symmetries of the index metric for each type v : list, (base_i, gens_i, n_i, sym_i) for tensors of type `i` base_i, gens_i : BSGS for tensors of this type. The BSGS should have minimal base under lexicographic ordering; if not, an attempt is made do get the minimal BSGS; in case of failure, canonicalize_naive is used, which is much slower. n_i : number of tensors of type `i`. sym_i : symmetry under exchange of component tensors of type `i`. Both for msym and sym_i the cases are * None no symmetry * 0 commuting * 1 anticommuting Returns ======= 0 if the tensor is zero, else return the array form of the permutation representing the canonical form of the tensor. Algorithm ========= First one uses canonical_free to get the minimum tensor under lexicographic order, using only the slot symmetries. If the component tensors have not minimal BSGS, it is attempted to find it; if the attempt fails canonicalize_naive is used instead. Compute the residual slot symmetry keeping fixed the free indices using tensor_gens(base, gens, list_free_indices, sym). Reduce the problem eliminating the free indices. Then use double_coset_can_rep and lift back the result reintroducing the free indices. Examples ======== one type of index with commuting metric; `A_{a b}` and `B_{a b}` antisymmetric and commuting `T = A_{d0 d1} * B^{d0}{}_{d2} * B^{d2 d1}` `ord = [d0,-d0,d1,-d1,d2,-d2]` order of the indices g = [1, 3, 0, 5, 4, 2, 6, 7] `T_c = 0` >>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize, bsgs_direct_product >>> from sympy.combinatorics import Permutation >>> base2a, gens2a = get_symmetric_group_sgs(2, 1) >>> t0 = (base2a, gens2a, 1, 0) >>> t1 = (base2a, gens2a, 2, 0) >>> g = Permutation([1, 3, 0, 5, 4, 2, 6, 7]) >>> canonicalize(g, range(6), 0, t0, t1) 0 same as above, but with `B_{a b}` anticommuting `T_c = -A^{d0 d1} * B_{d0}{}^{d2} * B_{d1 d2}` can = [0,2,1,4,3,5,7,6] >>> t1 = (base2a, gens2a, 2, 1) >>> canonicalize(g, range(6), 0, t0, t1) [0, 2, 1, 4, 3, 5, 7, 6] two types of indices `[a,b,c,d,e,f]` and `[m,n]`, in this order, both with commuting metric `f^{a b c}` antisymmetric, commuting `A_{m a}` no symmetry, commuting `T = f^c{}_{d a} * f^f{}_{e b} * A_m{}^d * A^{m b} * A_n{}^a * A^{n e}` ord = [c,f,a,-a,b,-b,d,-d,e,-e,m,-m,n,-n] g = [0,7,3, 1,9,5, 11,6, 10,4, 13,2, 12,8, 14,15] The canonical tensor is `T_c = -f^{c a b} * f^{f d e} * A^m{}_a * A_{m d} * A^n{}_b * A_{n e}` can = [0,2,4, 1,6,8, 10,3, 11,7, 12,5, 13,9, 15,14] >>> base_f, gens_f = get_symmetric_group_sgs(3, 1) >>> base1, gens1 = get_symmetric_group_sgs(1) >>> base_A, gens_A = bsgs_direct_product(base1, gens1, base1, gens1) >>> t0 = (base_f, gens_f, 2, 0) >>> t1 = (base_A, gens_A, 4, 0) >>> dummies = [range(2, 10), range(10, 14)] >>> g = Permutation([0, 7, 3, 1, 9, 5, 11, 6, 10, 4, 13, 2, 12, 8, 14, 15]) >>> canonicalize(g, dummies, [0, 0], t0, t1) [0, 2, 4, 1, 6, 8, 10, 3, 11, 7, 12, 5, 13, 9, 15, 14] """
/usr/src/app/target_test_cases/failed_tests_canonicalize.txt
def canonicalize(g, dummies, msym, *v): """ canonicalize tensor formed by tensors Parameters ========== g : permutation representing the tensor dummies : list representing the dummy indices it can be a list of dummy indices of the same type or a list of lists of dummy indices, one list for each type of index; the dummy indices must come after the free indices, and put in order contravariant, covariant [d0, -d0, d1,-d1,...] msym : symmetry of the metric(s) it can be an integer or a list; in the first case it is the symmetry of the dummy index metric; in the second case it is the list of the symmetries of the index metric for each type v : list, (base_i, gens_i, n_i, sym_i) for tensors of type `i` base_i, gens_i : BSGS for tensors of this type. The BSGS should have minimal base under lexicographic ordering; if not, an attempt is made do get the minimal BSGS; in case of failure, canonicalize_naive is used, which is much slower. n_i : number of tensors of type `i`. sym_i : symmetry under exchange of component tensors of type `i`. Both for msym and sym_i the cases are * None no symmetry * 0 commuting * 1 anticommuting Returns ======= 0 if the tensor is zero, else return the array form of the permutation representing the canonical form of the tensor. Algorithm ========= First one uses canonical_free to get the minimum tensor under lexicographic order, using only the slot symmetries. If the component tensors have not minimal BSGS, it is attempted to find it; if the attempt fails canonicalize_naive is used instead. Compute the residual slot symmetry keeping fixed the free indices using tensor_gens(base, gens, list_free_indices, sym). Reduce the problem eliminating the free indices. Then use double_coset_can_rep and lift back the result reintroducing the free indices. Examples ======== one type of index with commuting metric; `A_{a b}` and `B_{a b}` antisymmetric and commuting `T = A_{d0 d1} * B^{d0}{}_{d2} * B^{d2 d1}` `ord = [d0,-d0,d1,-d1,d2,-d2]` order of the indices g = [1, 3, 0, 5, 4, 2, 6, 7] `T_c = 0` >>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize, bsgs_direct_product >>> from sympy.combinatorics import Permutation >>> base2a, gens2a = get_symmetric_group_sgs(2, 1) >>> t0 = (base2a, gens2a, 1, 0) >>> t1 = (base2a, gens2a, 2, 0) >>> g = Permutation([1, 3, 0, 5, 4, 2, 6, 7]) >>> canonicalize(g, range(6), 0, t0, t1) 0 same as above, but with `B_{a b}` anticommuting `T_c = -A^{d0 d1} * B_{d0}{}^{d2} * B_{d1 d2}` can = [0,2,1,4,3,5,7,6] >>> t1 = (base2a, gens2a, 2, 1) >>> canonicalize(g, range(6), 0, t0, t1) [0, 2, 1, 4, 3, 5, 7, 6] two types of indices `[a,b,c,d,e,f]` and `[m,n]`, in this order, both with commuting metric `f^{a b c}` antisymmetric, commuting `A_{m a}` no symmetry, commuting `T = f^c{}_{d a} * f^f{}_{e b} * A_m{}^d * A^{m b} * A_n{}^a * A^{n e}` ord = [c,f,a,-a,b,-b,d,-d,e,-e,m,-m,n,-n] g = [0,7,3, 1,9,5, 11,6, 10,4, 13,2, 12,8, 14,15] The canonical tensor is `T_c = -f^{c a b} * f^{f d e} * A^m{}_a * A_{m d} * A^n{}_b * A_{n e}` can = [0,2,4, 1,6,8, 10,3, 11,7, 12,5, 13,9, 15,14] >>> base_f, gens_f = get_symmetric_group_sgs(3, 1) >>> base1, gens1 = get_symmetric_group_sgs(1) >>> base_A, gens_A = bsgs_direct_product(base1, gens1, base1, gens1) >>> t0 = (base_f, gens_f, 2, 0) >>> t1 = (base_A, gens_A, 4, 0) >>> dummies = [range(2, 10), range(10, 14)] >>> g = Permutation([0, 7, 3, 1, 9, 5, 11, 6, 10, 4, 13, 2, 12, 8, 14, 15]) >>> canonicalize(g, dummies, [0, 0], t0, t1) [0, 2, 4, 1, 6, 8, 10, 3, 11, 7, 12, 5, 13, 9, 15, 14] """ from sympy.combinatorics.testutil import canonicalize_naive if not isinstance(msym, list): if msym not in (0, 1, None): raise ValueError('msym must be 0, 1 or None') num_types = 1 else: num_types = len(msym) if not all(msymx in (0, 1, None) for msymx in msym): raise ValueError('msym entries must be 0, 1 or None') if len(dummies) != num_types: raise ValueError( 'dummies and msym must have the same number of elements') size = g.size num_tensors = 0 v1 = [] for base_i, gens_i, n_i, sym_i in v: # check that the BSGS is minimal; # this property is used in double_coset_can_rep; # if it is not minimal use canonicalize_naive if not _is_minimal_bsgs(base_i, gens_i): mbsgs = get_minimal_bsgs(base_i, gens_i) if not mbsgs: can = canonicalize_naive(g, dummies, msym, *v) return can base_i, gens_i = mbsgs v1.append((base_i, gens_i, [[]] * n_i, sym_i)) num_tensors += n_i if num_types == 1 and not isinstance(msym, list): dummies = [dummies] msym = [msym] flat_dummies = [] for dumx in dummies: flat_dummies.extend(dumx) if flat_dummies and flat_dummies != list(range(flat_dummies[0], flat_dummies[-1] + 1)): raise ValueError('dummies is not valid') # slot symmetry of the tensor size1, sbase, sgens = gens_products(*v1) if size != size1: raise ValueError( 'g has size %d, generators have size %d' % (size, size1)) free = [i for i in range(size - 2) if i not in flat_dummies] num_free = len(free) # g1 minimal tensor under slot symmetry g1 = canonical_free(sbase, sgens, g, num_free) if not flat_dummies: return g1 # save the sign of g1 sign = 0 if g1[-1] == size - 1 else 1 # the free indices are kept fixed. # Determine free_i, the list of slots of tensors which are fixed # since they are occupied by free indices, which are fixed. start = 0 for i, (base_i, gens_i, n_i, sym_i) in enumerate(v): free_i = [] len_tens = gens_i[0].size - 2 # for each component tensor get a list od fixed islots for j in range(n_i): # get the elements corresponding to the component tensor h = g1[start:(start + len_tens)] fr = [] # get the positions of the fixed elements in h for k in free: if k in h: fr.append(h.index(k)) free_i.append(fr) start += len_tens v1[i] = (base_i, gens_i, free_i, sym_i) # BSGS of the tensor with fixed free indices # if tensor_gens fails in gens_product, use canonicalize_naive size, sbase, sgens = gens_products(*v1) # reduce the permutations getting rid of the free indices pos_free = [g1.index(x) for x in range(num_free)] size_red = size - num_free g1_red = [x - num_free for x in g1 if x in flat_dummies] if sign: g1_red.extend([size_red - 1, size_red - 2]) else: g1_red.extend([size_red - 2, size_red - 1]) map_slots = _get_map_slots(size, pos_free) sbase_red = [map_slots[i] for i in sbase if i not in pos_free] sgens_red = [_af_new([map_slots[i] for i in y._array_form if i not in pos_free]) for y in sgens] dummies_red = [[x - num_free for x in y] for y in dummies] transv_red = get_transversals(sbase_red, sgens_red) g1_red = _af_new(g1_red) g2 = double_coset_can_rep( dummies_red, msym, sbase_red, sgens_red, transv_red, g1_red) if g2 == 0: return 0 # lift to the case with the free indices g3 = _lift_sgens(size, pos_free, free, g2) return g3
canonicalize
sympy
40
sympy/printing/codeprinter.py
def ccode(expr, assign_to=None, standard='c99', **settings): """Converts an expr to a string of c code Parameters ========== expr : Expr A SymPy expression to be converted. assign_to : optional When given, the argument is used as the name of the variable to which the expression is assigned. Can be a string, ``Symbol``, ``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of line-wrapping, or for expressions that generate multi-line statements. standard : str, optional String specifying the standard. If your compiler supports a more modern standard you may set this to 'c99' to allow the printer to use more math functions. [default='c89']. precision : integer, optional The precision for numbers such as pi [default=17]. user_functions : dict, optional A dictionary where the keys are string representations of either ``FunctionClass`` or ``UndefinedFunction`` instances and the values are their desired C string representations. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, cfunction_string)] or [(argument_test, cfunction_formater)]. See below for examples. dereference : iterable, optional An iterable of symbols that should be dereferenced in the printed code expression. These would be values passed by address to the function. For example, if ``dereference=[a]``, the resulting code would print ``(*a)`` instead of ``a``. human : bool, optional If True, the result is a single string that may contain some constant declarations for the number symbols. If False, the same information is returned in a tuple of (symbols_to_declare, not_supported_functions, code_text). [default=True]. contract: bool, optional If True, ``Indexed`` instances are assumed to obey tensor contraction rules and the corresponding nested loops over indices are generated. Setting contract=False will not generate loops, instead the user is responsible to provide values for the indices in the code. [default=True]. Examples ======== >>> from sympy import ccode, symbols, Rational, sin, ceiling, Abs, Function >>> x, tau = symbols("x, tau") >>> expr = (2*tau)**Rational(7, 2) >>> ccode(expr) '8*M_SQRT2*pow(tau, 7.0/2.0)' >>> ccode(expr, math_macros={}) '8*sqrt(2)*pow(tau, 7.0/2.0)' >>> ccode(sin(x), assign_to="s") 's = sin(x);' >>> from sympy.codegen.ast import real, float80 >>> ccode(expr, type_aliases={real: float80}) '8*M_SQRT2l*powl(tau, 7.0L/2.0L)' Simple custom printing can be defined for certain types by passing a dictionary of {"type" : "function"} to the ``user_functions`` kwarg. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, cfunction_string)]. >>> custom_functions = { ... "ceiling": "CEIL", ... "Abs": [(lambda x: not x.is_integer, "fabs"), ... (lambda x: x.is_integer, "ABS")], ... "func": "f" ... } >>> func = Function('func') >>> ccode(func(Abs(x) + ceiling(x)), standard='C89', user_functions=custom_functions) 'f(fabs(x) + CEIL(x))' or if the C-function takes a subset of the original arguments: >>> ccode(2**x + 3**x, standard='C99', user_functions={'Pow': [ ... (lambda b, e: b == 2, lambda b, e: 'exp2(%s)' % e), ... (lambda b, e: b != 2, 'pow')]}) 'exp2(x) + pow(3, x)' ``Piecewise`` expressions are converted into conditionals. If an ``assign_to`` variable is provided an if statement is created, otherwise the ternary operator is used. Note that if the ``Piecewise`` lacks a default term, represented by ``(expr, True)`` then an error will be thrown. This is to prevent generating an expression that may not evaluate to anything. >>> from sympy import Piecewise >>> expr = Piecewise((x + 1, x > 0), (x, True)) >>> print(ccode(expr, tau, standard='C89')) if (x > 0) { tau = x + 1; } else { tau = x; } Support for loops is provided through ``Indexed`` types. With ``contract=True`` these expressions will be turned into loops, whereas ``contract=False`` will just print the assignment expression that should be looped over: >>> from sympy import Eq, IndexedBase, Idx >>> len_y = 5 >>> y = IndexedBase('y', shape=(len_y,)) >>> t = IndexedBase('t', shape=(len_y,)) >>> Dy = IndexedBase('Dy', shape=(len_y-1,)) >>> i = Idx('i', len_y-1) >>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i])) >>> ccode(e.rhs, assign_to=e.lhs, contract=False, standard='C89') 'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);' Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions must be provided to ``assign_to``. Note that any expression that can be generated normally can also exist inside a Matrix: >>> from sympy import Matrix, MatrixSymbol >>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)]) >>> A = MatrixSymbol('A', 3, 1) >>> print(ccode(mat, A, standard='C89')) A[0] = pow(x, 2); if (x > 0) { A[1] = x + 1; } else { A[1] = x; } A[2] = sin(x); """
/usr/src/app/target_test_cases/failed_tests_ccode.txt
def ccode(expr, assign_to=None, standard='c99', **settings): """Converts an expr to a string of c code Parameters ========== expr : Expr A SymPy expression to be converted. assign_to : optional When given, the argument is used as the name of the variable to which the expression is assigned. Can be a string, ``Symbol``, ``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of line-wrapping, or for expressions that generate multi-line statements. standard : str, optional String specifying the standard. If your compiler supports a more modern standard you may set this to 'c99' to allow the printer to use more math functions. [default='c89']. precision : integer, optional The precision for numbers such as pi [default=17]. user_functions : dict, optional A dictionary where the keys are string representations of either ``FunctionClass`` or ``UndefinedFunction`` instances and the values are their desired C string representations. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, cfunction_string)] or [(argument_test, cfunction_formater)]. See below for examples. dereference : iterable, optional An iterable of symbols that should be dereferenced in the printed code expression. These would be values passed by address to the function. For example, if ``dereference=[a]``, the resulting code would print ``(*a)`` instead of ``a``. human : bool, optional If True, the result is a single string that may contain some constant declarations for the number symbols. If False, the same information is returned in a tuple of (symbols_to_declare, not_supported_functions, code_text). [default=True]. contract: bool, optional If True, ``Indexed`` instances are assumed to obey tensor contraction rules and the corresponding nested loops over indices are generated. Setting contract=False will not generate loops, instead the user is responsible to provide values for the indices in the code. [default=True]. Examples ======== >>> from sympy import ccode, symbols, Rational, sin, ceiling, Abs, Function >>> x, tau = symbols("x, tau") >>> expr = (2*tau)**Rational(7, 2) >>> ccode(expr) '8*M_SQRT2*pow(tau, 7.0/2.0)' >>> ccode(expr, math_macros={}) '8*sqrt(2)*pow(tau, 7.0/2.0)' >>> ccode(sin(x), assign_to="s") 's = sin(x);' >>> from sympy.codegen.ast import real, float80 >>> ccode(expr, type_aliases={real: float80}) '8*M_SQRT2l*powl(tau, 7.0L/2.0L)' Simple custom printing can be defined for certain types by passing a dictionary of {"type" : "function"} to the ``user_functions`` kwarg. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, cfunction_string)]. >>> custom_functions = { ... "ceiling": "CEIL", ... "Abs": [(lambda x: not x.is_integer, "fabs"), ... (lambda x: x.is_integer, "ABS")], ... "func": "f" ... } >>> func = Function('func') >>> ccode(func(Abs(x) + ceiling(x)), standard='C89', user_functions=custom_functions) 'f(fabs(x) + CEIL(x))' or if the C-function takes a subset of the original arguments: >>> ccode(2**x + 3**x, standard='C99', user_functions={'Pow': [ ... (lambda b, e: b == 2, lambda b, e: 'exp2(%s)' % e), ... (lambda b, e: b != 2, 'pow')]}) 'exp2(x) + pow(3, x)' ``Piecewise`` expressions are converted into conditionals. If an ``assign_to`` variable is provided an if statement is created, otherwise the ternary operator is used. Note that if the ``Piecewise`` lacks a default term, represented by ``(expr, True)`` then an error will be thrown. This is to prevent generating an expression that may not evaluate to anything. >>> from sympy import Piecewise >>> expr = Piecewise((x + 1, x > 0), (x, True)) >>> print(ccode(expr, tau, standard='C89')) if (x > 0) { tau = x + 1; } else { tau = x; } Support for loops is provided through ``Indexed`` types. With ``contract=True`` these expressions will be turned into loops, whereas ``contract=False`` will just print the assignment expression that should be looped over: >>> from sympy import Eq, IndexedBase, Idx >>> len_y = 5 >>> y = IndexedBase('y', shape=(len_y,)) >>> t = IndexedBase('t', shape=(len_y,)) >>> Dy = IndexedBase('Dy', shape=(len_y-1,)) >>> i = Idx('i', len_y-1) >>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i])) >>> ccode(e.rhs, assign_to=e.lhs, contract=False, standard='C89') 'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);' Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions must be provided to ``assign_to``. Note that any expression that can be generated normally can also exist inside a Matrix: >>> from sympy import Matrix, MatrixSymbol >>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)]) >>> A = MatrixSymbol('A', 3, 1) >>> print(ccode(mat, A, standard='C89')) A[0] = pow(x, 2); if (x > 0) { A[1] = x + 1; } else { A[1] = x; } A[2] = sin(x); """ from sympy.printing.c import c_code_printers return c_code_printers[standard.lower()](settings).doprint(expr, assign_to)
ccode
sympy
41
sympy/utilities/codegen.py
def codegen(name_expr, language=None, prefix=None, project="project", to_files=False, header=True, empty=True, argument_sequence=None, global_vars=None, standard=None, code_gen=None, printer=None): """Generate source code for expressions in a given language. Parameters ========== name_expr : tuple, or list of tuples A single (name, expression) tuple or a list of (name, expression) tuples. Each tuple corresponds to a routine. If the expression is an equality (an instance of class Equality) the left hand side is considered an output argument. If expression is an iterable, then the routine will have multiple outputs. language : string, A string that indicates the source code language. This is case insensitive. Currently, 'C', 'F95' and 'Octave' are supported. 'Octave' generates code compatible with both Octave and Matlab. prefix : string, optional A prefix for the names of the files that contain the source code. Language-dependent suffixes will be appended. If omitted, the name of the first name_expr tuple is used. project : string, optional A project name, used for making unique preprocessor instructions. [default: "project"] to_files : bool, optional When True, the code will be written to one or more files with the given prefix, otherwise strings with the names and contents of these files are returned. [default: False] header : bool, optional When True, a header is written on top of each source file. [default: True] empty : bool, optional When True, empty lines are used to structure the code. [default: True] argument_sequence : iterable, optional Sequence of arguments for the routine in a preferred order. A CodeGenError is raised if required arguments are missing. Redundant arguments are used without warning. If omitted, arguments will be ordered alphabetically, but with all input arguments first, and then output or in-out arguments. global_vars : iterable, optional Sequence of global variables used by the routine. Variables listed here will not show up as function arguments. standard : string, optional code_gen : CodeGen instance, optional An instance of a CodeGen subclass. Overrides ``language``. printer : Printer instance, optional An instance of a Printer subclass. Examples ======== >>> from sympy.utilities.codegen import codegen >>> from sympy.abc import x, y, z >>> [(c_name, c_code), (h_name, c_header)] = codegen( ... ("f", x+y*z), "C89", "test", header=False, empty=False) >>> print(c_name) test.c >>> print(c_code) #include "test.h" #include <math.h> double f(double x, double y, double z) { double f_result; f_result = x + y*z; return f_result; } <BLANKLINE> >>> print(h_name) test.h >>> print(c_header) #ifndef PROJECT__TEST__H #define PROJECT__TEST__H double f(double x, double y, double z); #endif <BLANKLINE> Another example using Equality objects to give named outputs. Here the filename (prefix) is taken from the first (name, expr) pair. >>> from sympy.abc import f, g >>> from sympy import Eq >>> [(c_name, c_code), (h_name, c_header)] = codegen( ... [("myfcn", x + y), ("fcn2", [Eq(f, 2*x), Eq(g, y)])], ... "C99", header=False, empty=False) >>> print(c_name) myfcn.c >>> print(c_code) #include "myfcn.h" #include <math.h> double myfcn(double x, double y) { double myfcn_result; myfcn_result = x + y; return myfcn_result; } void fcn2(double x, double y, double *f, double *g) { (*f) = 2*x; (*g) = y; } <BLANKLINE> If the generated function(s) will be part of a larger project where various global variables have been defined, the 'global_vars' option can be used to remove the specified variables from the function signature >>> from sympy.utilities.codegen import codegen >>> from sympy.abc import x, y, z >>> [(f_name, f_code), header] = codegen( ... ("f", x+y*z), "F95", header=False, empty=False, ... argument_sequence=(x, y), global_vars=(z,)) >>> print(f_code) REAL*8 function f(x, y) implicit none REAL*8, intent(in) :: x REAL*8, intent(in) :: y f = x + y*z end function <BLANKLINE> """
/usr/src/app/target_test_cases/failed_tests_codegen.txt
def codegen(name_expr, language=None, prefix=None, project="project", to_files=False, header=True, empty=True, argument_sequence=None, global_vars=None, standard=None, code_gen=None, printer=None): """Generate source code for expressions in a given language. Parameters ========== name_expr : tuple, or list of tuples A single (name, expression) tuple or a list of (name, expression) tuples. Each tuple corresponds to a routine. If the expression is an equality (an instance of class Equality) the left hand side is considered an output argument. If expression is an iterable, then the routine will have multiple outputs. language : string, A string that indicates the source code language. This is case insensitive. Currently, 'C', 'F95' and 'Octave' are supported. 'Octave' generates code compatible with both Octave and Matlab. prefix : string, optional A prefix for the names of the files that contain the source code. Language-dependent suffixes will be appended. If omitted, the name of the first name_expr tuple is used. project : string, optional A project name, used for making unique preprocessor instructions. [default: "project"] to_files : bool, optional When True, the code will be written to one or more files with the given prefix, otherwise strings with the names and contents of these files are returned. [default: False] header : bool, optional When True, a header is written on top of each source file. [default: True] empty : bool, optional When True, empty lines are used to structure the code. [default: True] argument_sequence : iterable, optional Sequence of arguments for the routine in a preferred order. A CodeGenError is raised if required arguments are missing. Redundant arguments are used without warning. If omitted, arguments will be ordered alphabetically, but with all input arguments first, and then output or in-out arguments. global_vars : iterable, optional Sequence of global variables used by the routine. Variables listed here will not show up as function arguments. standard : string, optional code_gen : CodeGen instance, optional An instance of a CodeGen subclass. Overrides ``language``. printer : Printer instance, optional An instance of a Printer subclass. Examples ======== >>> from sympy.utilities.codegen import codegen >>> from sympy.abc import x, y, z >>> [(c_name, c_code), (h_name, c_header)] = codegen( ... ("f", x+y*z), "C89", "test", header=False, empty=False) >>> print(c_name) test.c >>> print(c_code) #include "test.h" #include <math.h> double f(double x, double y, double z) { double f_result; f_result = x + y*z; return f_result; } <BLANKLINE> >>> print(h_name) test.h >>> print(c_header) #ifndef PROJECT__TEST__H #define PROJECT__TEST__H double f(double x, double y, double z); #endif <BLANKLINE> Another example using Equality objects to give named outputs. Here the filename (prefix) is taken from the first (name, expr) pair. >>> from sympy.abc import f, g >>> from sympy import Eq >>> [(c_name, c_code), (h_name, c_header)] = codegen( ... [("myfcn", x + y), ("fcn2", [Eq(f, 2*x), Eq(g, y)])], ... "C99", header=False, empty=False) >>> print(c_name) myfcn.c >>> print(c_code) #include "myfcn.h" #include <math.h> double myfcn(double x, double y) { double myfcn_result; myfcn_result = x + y; return myfcn_result; } void fcn2(double x, double y, double *f, double *g) { (*f) = 2*x; (*g) = y; } <BLANKLINE> If the generated function(s) will be part of a larger project where various global variables have been defined, the 'global_vars' option can be used to remove the specified variables from the function signature >>> from sympy.utilities.codegen import codegen >>> from sympy.abc import x, y, z >>> [(f_name, f_code), header] = codegen( ... ("f", x+y*z), "F95", header=False, empty=False, ... argument_sequence=(x, y), global_vars=(z,)) >>> print(f_code) REAL*8 function f(x, y) implicit none REAL*8, intent(in) :: x REAL*8, intent(in) :: y f = x + y*z end function <BLANKLINE> """ # Initialize the code generator. if language is None: if code_gen is None: raise ValueError("Need either language or code_gen") else: if code_gen is not None: raise ValueError("You cannot specify both language and code_gen.") code_gen = get_code_generator(language, project, standard, printer) if isinstance(name_expr[0], str): # single tuple is given, turn it into a singleton list with a tuple. name_expr = [name_expr] if prefix is None: prefix = name_expr[0][0] # Construct Routines appropriate for this code_gen from (name, expr) pairs. routines = [] for name, expr in name_expr: routines.append(code_gen.routine(name, expr, argument_sequence, global_vars)) # Write the code. return code_gen.write(routines, prefix, to_files, header, empty)
codegen
sympy
42
sympy/discrete/convolutions.py
def convolution(a, b, cycle=0, dps=None, prime=None, dyadic=None, subset=None): """ Performs convolution by determining the type of desired convolution using hints. Exactly one of ``dps``, ``prime``, ``dyadic``, ``subset`` arguments should be specified explicitly for identifying the type of convolution, and the argument ``cycle`` can be specified optionally. For the default arguments, linear convolution is performed using **FFT**. Parameters ========== a, b : iterables The sequences for which convolution is performed. cycle : Integer Specifies the length for doing cyclic convolution. dps : Integer Specifies the number of decimal digits for precision for performing **FFT** on the sequence. prime : Integer Prime modulus of the form `(m 2^k + 1)` to be used for performing **NTT** on the sequence. dyadic : bool Identifies the convolution type as dyadic (*bitwise-XOR*) convolution, which is performed using **FWHT**. subset : bool Identifies the convolution type as subset convolution. Examples ======== >>> from sympy import convolution, symbols, S, I >>> u, v, w, x, y, z = symbols('u v w x y z') >>> convolution([1 + 2*I, 4 + 3*I], [S(5)/4, 6], dps=3) [1.25 + 2.5*I, 11.0 + 15.8*I, 24.0 + 18.0*I] >>> convolution([1, 2, 3], [4, 5, 6], cycle=3) [31, 31, 28] >>> convolution([111, 777], [888, 444], prime=19*2**10 + 1) [1283, 19351, 14219] >>> convolution([111, 777], [888, 444], prime=19*2**10 + 1, cycle=2) [15502, 19351] >>> convolution([u, v], [x, y, z], dyadic=True) [u*x + v*y, u*y + v*x, u*z, v*z] >>> convolution([u, v], [x, y, z], dyadic=True, cycle=2) [u*x + u*z + v*y, u*y + v*x + v*z] >>> convolution([u, v, w], [x, y, z], subset=True) [u*x, u*y + v*x, u*z + w*x, v*z + w*y] >>> convolution([u, v, w], [x, y, z], subset=True, cycle=3) [u*x + v*z + w*y, u*y + v*x, u*z + w*x] """
/usr/src/app/target_test_cases/failed_tests_convolution.txt
def convolution(a, b, cycle=0, dps=None, prime=None, dyadic=None, subset=None): """ Performs convolution by determining the type of desired convolution using hints. Exactly one of ``dps``, ``prime``, ``dyadic``, ``subset`` arguments should be specified explicitly for identifying the type of convolution, and the argument ``cycle`` can be specified optionally. For the default arguments, linear convolution is performed using **FFT**. Parameters ========== a, b : iterables The sequences for which convolution is performed. cycle : Integer Specifies the length for doing cyclic convolution. dps : Integer Specifies the number of decimal digits for precision for performing **FFT** on the sequence. prime : Integer Prime modulus of the form `(m 2^k + 1)` to be used for performing **NTT** on the sequence. dyadic : bool Identifies the convolution type as dyadic (*bitwise-XOR*) convolution, which is performed using **FWHT**. subset : bool Identifies the convolution type as subset convolution. Examples ======== >>> from sympy import convolution, symbols, S, I >>> u, v, w, x, y, z = symbols('u v w x y z') >>> convolution([1 + 2*I, 4 + 3*I], [S(5)/4, 6], dps=3) [1.25 + 2.5*I, 11.0 + 15.8*I, 24.0 + 18.0*I] >>> convolution([1, 2, 3], [4, 5, 6], cycle=3) [31, 31, 28] >>> convolution([111, 777], [888, 444], prime=19*2**10 + 1) [1283, 19351, 14219] >>> convolution([111, 777], [888, 444], prime=19*2**10 + 1, cycle=2) [15502, 19351] >>> convolution([u, v], [x, y, z], dyadic=True) [u*x + v*y, u*y + v*x, u*z, v*z] >>> convolution([u, v], [x, y, z], dyadic=True, cycle=2) [u*x + u*z + v*y, u*y + v*x + v*z] >>> convolution([u, v, w], [x, y, z], subset=True) [u*x, u*y + v*x, u*z + w*x, v*z + w*y] >>> convolution([u, v, w], [x, y, z], subset=True, cycle=3) [u*x + v*z + w*y, u*y + v*x, u*z + w*x] """ c = as_int(cycle) if c < 0: raise ValueError("The length for cyclic convolution " "must be non-negative") dyadic = True if dyadic else None subset = True if subset else None if sum(x is not None for x in (prime, dps, dyadic, subset)) > 1: raise TypeError("Ambiguity in determining the type of convolution") if prime is not None: ls = convolution_ntt(a, b, prime=prime) return ls if not c else [sum(ls[i::c]) % prime for i in range(c)] if dyadic: ls = convolution_fwht(a, b) elif subset: ls = convolution_subset(a, b) else: def loop(a): dens = [] for i in a: if isinstance(i, Rational) and i.q - 1: dens.append(i.q) elif not isinstance(i, int): return if dens: l = lcm(*dens) return [i*l if type(i) is int else i.p*(l//i.q) for i in a], l # no lcm of den to deal with return a, 1 ls = None da = loop(a) if da is not None: db = loop(b) if db is not None: (ia, ma), (ib, mb) = da, db den = ma*mb ls = convolution_int(ia, ib) if den != 1: ls = [Rational(i, den) for i in ls] if ls is None: ls = convolution_fft(a, b, dps) return ls if not c else [sum(ls[i::c]) for i in range(c)]
convolution
sympy
43
sympy/combinatorics/coset_table.py
def coset_enumeration_r(fp_grp, Y, max_cosets=None, draft=None, incomplete=False, modified=False): """ This is easier of the two implemented methods of coset enumeration. and is often called the HLT method, after Hazelgrove, Leech, Trotter The idea is that we make use of ``scan_and_fill`` makes new definitions whenever the scan is incomplete to enable the scan to complete; this way we fill in the gaps in the scan of the relator or subgroup generator, that's why the name relator-based method. An instance of `CosetTable` for `fp_grp` can be passed as the keyword argument `draft` in which case the coset enumeration will start with that instance and attempt to complete it. When `incomplete` is `True` and the function is unable to complete for some reason, the partially complete table will be returned. # TODO: complete the docstring See Also ======== scan_and_fill, Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r >>> F, x, y = free_group("x, y") # Example 5.1 from [1] >>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y]) >>> C = coset_enumeration_r(f, [x]) >>> for i in range(len(C.p)): ... if C.p[i] == i: ... print(C.table[i]) [0, 0, 1, 2] [1, 1, 2, 0] [2, 2, 0, 1] >>> C.p [0, 1, 2, 1, 1] # Example from exercises Q2 [1] >>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3]) >>> C = coset_enumeration_r(f, []) >>> C.compress(); C.standardize() >>> C.table [[1, 2, 3, 4], [5, 0, 6, 7], [0, 5, 7, 6], [7, 6, 5, 0], [6, 7, 0, 5], [2, 1, 4, 3], [3, 4, 2, 1], [4, 3, 1, 2]] # Example 5.2 >>> f = FpGroup(F, [x**2, y**3, (x*y)**3]) >>> Y = [x*y] >>> C = coset_enumeration_r(f, Y) >>> for i in range(len(C.p)): ... if C.p[i] == i: ... print(C.table[i]) [1, 1, 2, 1] [0, 0, 0, 2] [3, 3, 1, 0] [2, 2, 3, 3] # Example 5.3 >>> f = FpGroup(F, [x**2*y**2, x**3*y**5]) >>> Y = [] >>> C = coset_enumeration_r(f, Y) >>> for i in range(len(C.p)): ... if C.p[i] == i: ... print(C.table[i]) [1, 3, 1, 3] [2, 0, 2, 0] [3, 1, 3, 1] [0, 2, 0, 2] # Example 5.4 >>> F, a, b, c, d, e = free_group("a, b, c, d, e") >>> f = FpGroup(F, [a*b*c**-1, b*c*d**-1, c*d*e**-1, d*e*a**-1, e*a*b**-1]) >>> Y = [a] >>> C = coset_enumeration_r(f, Y) >>> for i in range(len(C.p)): ... if C.p[i] == i: ... print(C.table[i]) [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # example of "compress" method >>> C.compress() >>> C.table [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] # Exercises Pg. 161, Q2. >>> F, x, y = free_group("x, y") >>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3]) >>> Y = [] >>> C = coset_enumeration_r(f, Y) >>> C.compress() >>> C.standardize() >>> C.table [[1, 2, 3, 4], [5, 0, 6, 7], [0, 5, 7, 6], [7, 6, 5, 0], [6, 7, 0, 5], [2, 1, 4, 3], [3, 4, 2, 1], [4, 3, 1, 2]] # John J. Cannon; Lucien A. Dimino; George Havas; Jane M. Watson # Mathematics of Computation, Vol. 27, No. 123. (Jul., 1973), pp. 463-490 # from 1973chwd.pdf # Table 1. Ex. 1 >>> F, r, s, t = free_group("r, s, t") >>> E1 = FpGroup(F, [t**-1*r*t*r**-2, r**-1*s*r*s**-2, s**-1*t*s*t**-2]) >>> C = coset_enumeration_r(E1, [r]) >>> for i in range(len(C.p)): ... if C.p[i] == i: ... print(C.table[i]) [0, 0, 0, 0, 0, 0] Ex. 2 >>> F, a, b = free_group("a, b") >>> Cox = FpGroup(F, [a**6, b**6, (a*b)**2, (a**2*b**2)**2, (a**3*b**3)**5]) >>> C = coset_enumeration_r(Cox, [a]) >>> index = 0 >>> for i in range(len(C.p)): ... if C.p[i] == i: ... index += 1 >>> index 500 # Ex. 3 >>> F, a, b = free_group("a, b") >>> B_2_4 = FpGroup(F, [a**4, b**4, (a*b)**4, (a**-1*b)**4, (a**2*b)**4, \ (a*b**2)**4, (a**2*b**2)**4, (a**-1*b*a*b)**4, (a*b**-1*a*b)**4]) >>> C = coset_enumeration_r(B_2_4, [a]) >>> index = 0 >>> for i in range(len(C.p)): ... if C.p[i] == i: ... index += 1 >>> index 1024 References ========== .. [1] Holt, D., Eick, B., O'Brien, E. "Handbook of computational group theory" """
/usr/src/app/target_test_cases/failed_tests_coset_enumeration_r.txt
def coset_enumeration_r(fp_grp, Y, max_cosets=None, draft=None, incomplete=False, modified=False): """ This is easier of the two implemented methods of coset enumeration. and is often called the HLT method, after Hazelgrove, Leech, Trotter The idea is that we make use of ``scan_and_fill`` makes new definitions whenever the scan is incomplete to enable the scan to complete; this way we fill in the gaps in the scan of the relator or subgroup generator, that's why the name relator-based method. An instance of `CosetTable` for `fp_grp` can be passed as the keyword argument `draft` in which case the coset enumeration will start with that instance and attempt to complete it. When `incomplete` is `True` and the function is unable to complete for some reason, the partially complete table will be returned. # TODO: complete the docstring See Also ======== scan_and_fill, Examples ======== >>> from sympy.combinatorics.free_groups import free_group >>> from sympy.combinatorics.fp_groups import FpGroup, coset_enumeration_r >>> F, x, y = free_group("x, y") # Example 5.1 from [1] >>> f = FpGroup(F, [x**3, y**3, x**-1*y**-1*x*y]) >>> C = coset_enumeration_r(f, [x]) >>> for i in range(len(C.p)): ... if C.p[i] == i: ... print(C.table[i]) [0, 0, 1, 2] [1, 1, 2, 0] [2, 2, 0, 1] >>> C.p [0, 1, 2, 1, 1] # Example from exercises Q2 [1] >>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3]) >>> C = coset_enumeration_r(f, []) >>> C.compress(); C.standardize() >>> C.table [[1, 2, 3, 4], [5, 0, 6, 7], [0, 5, 7, 6], [7, 6, 5, 0], [6, 7, 0, 5], [2, 1, 4, 3], [3, 4, 2, 1], [4, 3, 1, 2]] # Example 5.2 >>> f = FpGroup(F, [x**2, y**3, (x*y)**3]) >>> Y = [x*y] >>> C = coset_enumeration_r(f, Y) >>> for i in range(len(C.p)): ... if C.p[i] == i: ... print(C.table[i]) [1, 1, 2, 1] [0, 0, 0, 2] [3, 3, 1, 0] [2, 2, 3, 3] # Example 5.3 >>> f = FpGroup(F, [x**2*y**2, x**3*y**5]) >>> Y = [] >>> C = coset_enumeration_r(f, Y) >>> for i in range(len(C.p)): ... if C.p[i] == i: ... print(C.table[i]) [1, 3, 1, 3] [2, 0, 2, 0] [3, 1, 3, 1] [0, 2, 0, 2] # Example 5.4 >>> F, a, b, c, d, e = free_group("a, b, c, d, e") >>> f = FpGroup(F, [a*b*c**-1, b*c*d**-1, c*d*e**-1, d*e*a**-1, e*a*b**-1]) >>> Y = [a] >>> C = coset_enumeration_r(f, Y) >>> for i in range(len(C.p)): ... if C.p[i] == i: ... print(C.table[i]) [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # example of "compress" method >>> C.compress() >>> C.table [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] # Exercises Pg. 161, Q2. >>> F, x, y = free_group("x, y") >>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3]) >>> Y = [] >>> C = coset_enumeration_r(f, Y) >>> C.compress() >>> C.standardize() >>> C.table [[1, 2, 3, 4], [5, 0, 6, 7], [0, 5, 7, 6], [7, 6, 5, 0], [6, 7, 0, 5], [2, 1, 4, 3], [3, 4, 2, 1], [4, 3, 1, 2]] # John J. Cannon; Lucien A. Dimino; George Havas; Jane M. Watson # Mathematics of Computation, Vol. 27, No. 123. (Jul., 1973), pp. 463-490 # from 1973chwd.pdf # Table 1. Ex. 1 >>> F, r, s, t = free_group("r, s, t") >>> E1 = FpGroup(F, [t**-1*r*t*r**-2, r**-1*s*r*s**-2, s**-1*t*s*t**-2]) >>> C = coset_enumeration_r(E1, [r]) >>> for i in range(len(C.p)): ... if C.p[i] == i: ... print(C.table[i]) [0, 0, 0, 0, 0, 0] Ex. 2 >>> F, a, b = free_group("a, b") >>> Cox = FpGroup(F, [a**6, b**6, (a*b)**2, (a**2*b**2)**2, (a**3*b**3)**5]) >>> C = coset_enumeration_r(Cox, [a]) >>> index = 0 >>> for i in range(len(C.p)): ... if C.p[i] == i: ... index += 1 >>> index 500 # Ex. 3 >>> F, a, b = free_group("a, b") >>> B_2_4 = FpGroup(F, [a**4, b**4, (a*b)**4, (a**-1*b)**4, (a**2*b)**4, \ (a*b**2)**4, (a**2*b**2)**4, (a**-1*b*a*b)**4, (a*b**-1*a*b)**4]) >>> C = coset_enumeration_r(B_2_4, [a]) >>> index = 0 >>> for i in range(len(C.p)): ... if C.p[i] == i: ... index += 1 >>> index 1024 References ========== .. [1] Holt, D., Eick, B., O'Brien, E. "Handbook of computational group theory" """ # 1. Initialize a coset table C for < X|R > C = CosetTable(fp_grp, Y, max_cosets=max_cosets) # Define coset table methods. if modified: _scan_and_fill = C.modified_scan_and_fill _define = C.modified_define else: _scan_and_fill = C.scan_and_fill _define = C.define if draft: C.table = draft.table[:] C.p = draft.p[:] R = fp_grp.relators A_dict = C.A_dict p = C.p for i in range(len(Y)): if modified: _scan_and_fill(0, Y[i], C._grp.generators[i]) else: _scan_and_fill(0, Y[i]) alpha = 0 while alpha < C.n: if p[alpha] == alpha: try: for w in R: if modified: _scan_and_fill(alpha, w, C._grp.identity) else: _scan_and_fill(alpha, w) # if alpha was eliminated during the scan then break if p[alpha] < alpha: break if p[alpha] == alpha: for x in A_dict: if C.table[alpha][A_dict[x]] is None: _define(alpha, x) except ValueError as e: if incomplete: return C raise e alpha += 1 return C
coset_enumeration_r
sympy
44
sympy/simplify/cse_main.py
def cse(exprs, symbols=None, optimizations=None, postprocess=None, order='canonical', ignore=(), list=True): """ Perform common subexpression elimination on an expression. Parameters ========== exprs : list of SymPy expressions, or a single SymPy expression The expressions to reduce. symbols : infinite iterator yielding unique Symbols The symbols used to label the common subexpressions which are pulled out. The ``numbered_symbols`` generator is useful. The default is a stream of symbols of the form "x0", "x1", etc. This must be an infinite iterator. optimizations : list of (callable, callable) pairs The (preprocessor, postprocessor) pairs of external optimization functions. Optionally 'basic' can be passed for a set of predefined basic optimizations. Such 'basic' optimizations were used by default in old implementation, however they can be really slow on larger expressions. Now, no pre or post optimizations are made by default. postprocess : a function which accepts the two return values of cse and returns the desired form of output from cse, e.g. if you want the replacements reversed the function might be the following lambda: lambda r, e: return reversed(r), e order : string, 'none' or 'canonical' The order by which Mul and Add arguments are processed. If set to 'canonical', arguments will be canonically ordered. If set to 'none', ordering will be faster but dependent on expressions hashes, thus machine dependent and variable. For large expressions where speed is a concern, use the setting order='none'. ignore : iterable of Symbols Substitutions containing any Symbol from ``ignore`` will be ignored. list : bool, (default True) Returns expression in list or else with same type as input (when False). Returns ======= replacements : list of (Symbol, expression) pairs All of the common subexpressions that were replaced. Subexpressions earlier in this list might show up in subexpressions later in this list. reduced_exprs : list of SymPy expressions The reduced expressions with all of the replacements above. Examples ======== >>> from sympy import cse, SparseMatrix >>> from sympy.abc import x, y, z, w >>> cse(((w + x + y + z)*(w + y + z))/(w + x)**3) ([(x0, y + z), (x1, w + x)], [(w + x0)*(x0 + x1)/x1**3]) List of expressions with recursive substitutions: >>> m = SparseMatrix([x + y, x + y + z]) >>> cse([(x+y)**2, x + y + z, y + z, x + z + y, m]) ([(x0, x + y), (x1, x0 + z)], [x0**2, x1, y + z, x1, Matrix([ [x0], [x1]])]) Note: the type and mutability of input matrices is retained. >>> isinstance(_[1][-1], SparseMatrix) True The user may disallow substitutions containing certain symbols: >>> cse([y**2*(x + 1), 3*y**2*(x + 1)], ignore=(y,)) ([(x0, x + 1)], [x0*y**2, 3*x0*y**2]) The default return value for the reduced expression(s) is a list, even if there is only one expression. The `list` flag preserves the type of the input in the output: >>> cse(x) ([], [x]) >>> cse(x, list=False) ([], x) """
/usr/src/app/target_test_cases/failed_tests_cse.txt
def cse(exprs, symbols=None, optimizations=None, postprocess=None, order='canonical', ignore=(), list=True): """ Perform common subexpression elimination on an expression. Parameters ========== exprs : list of SymPy expressions, or a single SymPy expression The expressions to reduce. symbols : infinite iterator yielding unique Symbols The symbols used to label the common subexpressions which are pulled out. The ``numbered_symbols`` generator is useful. The default is a stream of symbols of the form "x0", "x1", etc. This must be an infinite iterator. optimizations : list of (callable, callable) pairs The (preprocessor, postprocessor) pairs of external optimization functions. Optionally 'basic' can be passed for a set of predefined basic optimizations. Such 'basic' optimizations were used by default in old implementation, however they can be really slow on larger expressions. Now, no pre or post optimizations are made by default. postprocess : a function which accepts the two return values of cse and returns the desired form of output from cse, e.g. if you want the replacements reversed the function might be the following lambda: lambda r, e: return reversed(r), e order : string, 'none' or 'canonical' The order by which Mul and Add arguments are processed. If set to 'canonical', arguments will be canonically ordered. If set to 'none', ordering will be faster but dependent on expressions hashes, thus machine dependent and variable. For large expressions where speed is a concern, use the setting order='none'. ignore : iterable of Symbols Substitutions containing any Symbol from ``ignore`` will be ignored. list : bool, (default True) Returns expression in list or else with same type as input (when False). Returns ======= replacements : list of (Symbol, expression) pairs All of the common subexpressions that were replaced. Subexpressions earlier in this list might show up in subexpressions later in this list. reduced_exprs : list of SymPy expressions The reduced expressions with all of the replacements above. Examples ======== >>> from sympy import cse, SparseMatrix >>> from sympy.abc import x, y, z, w >>> cse(((w + x + y + z)*(w + y + z))/(w + x)**3) ([(x0, y + z), (x1, w + x)], [(w + x0)*(x0 + x1)/x1**3]) List of expressions with recursive substitutions: >>> m = SparseMatrix([x + y, x + y + z]) >>> cse([(x+y)**2, x + y + z, y + z, x + z + y, m]) ([(x0, x + y), (x1, x0 + z)], [x0**2, x1, y + z, x1, Matrix([ [x0], [x1]])]) Note: the type and mutability of input matrices is retained. >>> isinstance(_[1][-1], SparseMatrix) True The user may disallow substitutions containing certain symbols: >>> cse([y**2*(x + 1), 3*y**2*(x + 1)], ignore=(y,)) ([(x0, x + 1)], [x0*y**2, 3*x0*y**2]) The default return value for the reduced expression(s) is a list, even if there is only one expression. The `list` flag preserves the type of the input in the output: >>> cse(x) ([], [x]) >>> cse(x, list=False) ([], x) """ if not list: return _cse_homogeneous(exprs, symbols=symbols, optimizations=optimizations, postprocess=postprocess, order=order, ignore=ignore) if isinstance(exprs, (int, float)): exprs = sympify(exprs) # Handle the case if just one expression was passed. if isinstance(exprs, (Basic, MatrixBase)): exprs = [exprs] copy = exprs temp = [] for e in exprs: if isinstance(e, (Matrix, ImmutableMatrix)): temp.append(Tuple(*e.flat())) elif isinstance(e, (SparseMatrix, ImmutableSparseMatrix)): temp.append(Tuple(*e.todok().items())) else: temp.append(e) exprs = temp del temp if optimizations is None: optimizations = [] elif optimizations == 'basic': optimizations = basic_optimizations # Preprocess the expressions to give us better optimization opportunities. reduced_exprs = [preprocess_for_cse(e, optimizations) for e in exprs] if symbols is None: symbols = numbered_symbols(cls=Symbol) else: # In case we get passed an iterable with an __iter__ method instead of # an actual iterator. symbols = iter(symbols) # Find other optimization opportunities. opt_subs = opt_cse(reduced_exprs, order) # Main CSE algorithm. replacements, reduced_exprs = tree_cse(reduced_exprs, symbols, opt_subs, order, ignore) # Postprocess the expressions to return the expressions to canonical form. exprs = copy replacements = [(sym, postprocess_for_cse(subtree, optimizations)) for sym, subtree in replacements] reduced_exprs = [postprocess_for_cse(e, optimizations) for e in reduced_exprs] # Get the matrices back for i, e in enumerate(exprs): if isinstance(e, (Matrix, ImmutableMatrix)): reduced_exprs[i] = Matrix(e.rows, e.cols, reduced_exprs[i]) if isinstance(e, ImmutableMatrix): reduced_exprs[i] = reduced_exprs[i].as_immutable() elif isinstance(e, (SparseMatrix, ImmutableSparseMatrix)): m = SparseMatrix(e.rows, e.cols, {}) for k, v in reduced_exprs[i]: m[k] = v if isinstance(e, ImmutableSparseMatrix): m = m.as_immutable() reduced_exprs[i] = m if postprocess is None: return replacements, reduced_exprs return postprocess(replacements, reduced_exprs)
cse
sympy
45
sympy/polys/matrices/dense.py
def ddm_irref_den(a, K): """a <-- rref(a); return (den, pivots) Compute the fraction-free reduced row echelon form (RREF) of $a$. Modifies $a$ in place and returns a tuple containing the denominator of the RREF and a list of the pivot columns. Explanation =========== The algorithm used is the fraction-free version of Gauss-Jordan elimination described as FFGJ in [1]_. Here it is modified to handle zero or missing pivots and to avoid redundant arithmetic. The domain $K$ must support exact division (``K.exquo``) but does not need to be a field. This method is suitable for most exact rings and fields like :ref:`ZZ`, :ref:`QQ` and :ref:`QQ(a)`. In the case of :ref:`QQ` or :ref:`K(x)` it might be more efficient to clear denominators and use :ref:`ZZ` or :ref:`K[x]` instead. For inexact domains like :ref:`RR` and :ref:`CC` use ``ddm_irref`` instead. Examples ======== >>> from sympy.polys.matrices.dense import ddm_irref_den >>> from sympy import ZZ, Matrix >>> M = [[ZZ(1), ZZ(2), ZZ(3)], [ZZ(4), ZZ(5), ZZ(6)]] >>> den, pivots = ddm_irref_den(M, ZZ) >>> M [[-3, 0, 3], [0, -3, -6]] >>> den -3 >>> pivots [0, 1] >>> Matrix(M).rref()[0] Matrix([ [1, 0, -1], [0, 1, 2]]) See Also ======== ddm_irref A version of this routine that uses field division. sdm_irref A sparse version of :func:`ddm_irref`. sdm_rref_den A sparse version of :func:`ddm_irref_den`. sympy.polys.matrices.domainmatrix.DomainMatrix.rref_den Higher level interface. References ========== .. [1] Fraction-free algorithms for linear and polynomial equations. George C. Nakos , Peter R. Turner , Robert M. Williams. https://dl.acm.org/doi/10.1145/271130.271133 """
/usr/src/app/target_test_cases/failed_tests_ddm_irref_den.txt
def ddm_irref_den(a, K): """a <-- rref(a); return (den, pivots) Compute the fraction-free reduced row echelon form (RREF) of $a$. Modifies $a$ in place and returns a tuple containing the denominator of the RREF and a list of the pivot columns. Explanation =========== The algorithm used is the fraction-free version of Gauss-Jordan elimination described as FFGJ in [1]_. Here it is modified to handle zero or missing pivots and to avoid redundant arithmetic. The domain $K$ must support exact division (``K.exquo``) but does not need to be a field. This method is suitable for most exact rings and fields like :ref:`ZZ`, :ref:`QQ` and :ref:`QQ(a)`. In the case of :ref:`QQ` or :ref:`K(x)` it might be more efficient to clear denominators and use :ref:`ZZ` or :ref:`K[x]` instead. For inexact domains like :ref:`RR` and :ref:`CC` use ``ddm_irref`` instead. Examples ======== >>> from sympy.polys.matrices.dense import ddm_irref_den >>> from sympy import ZZ, Matrix >>> M = [[ZZ(1), ZZ(2), ZZ(3)], [ZZ(4), ZZ(5), ZZ(6)]] >>> den, pivots = ddm_irref_den(M, ZZ) >>> M [[-3, 0, 3], [0, -3, -6]] >>> den -3 >>> pivots [0, 1] >>> Matrix(M).rref()[0] Matrix([ [1, 0, -1], [0, 1, 2]]) See Also ======== ddm_irref A version of this routine that uses field division. sdm_irref A sparse version of :func:`ddm_irref`. sdm_rref_den A sparse version of :func:`ddm_irref_den`. sympy.polys.matrices.domainmatrix.DomainMatrix.rref_den Higher level interface. References ========== .. [1] Fraction-free algorithms for linear and polynomial equations. George C. Nakos , Peter R. Turner , Robert M. Williams. https://dl.acm.org/doi/10.1145/271130.271133 """ # # A simpler presentation of this algorithm is given in [1]: # # Given an n x n matrix A and n x 1 matrix b: # # for i in range(n): # if i != 0: # d = a[i-1][i-1] # for j in range(n): # if j == i: # continue # b[j] = a[i][i]*b[j] - a[j][i]*b[i] # for k in range(n): # a[j][k] = a[i][i]*a[j][k] - a[j][i]*a[i][k] # if i != 0: # a[j][k] /= d # # Our version here is a bit more complicated because: # # 1. We use row-swaps to avoid zero pivots. # 2. We allow for some columns to be missing pivots. # 3. We avoid a lot of redundant arithmetic. # # TODO: Use a non-trivial pivoting strategy. Even just row swapping makes a # big difference to performance if e.g. the upper-left entry of the matrix # is a huge polynomial. # a is (m x n) m = len(a) if not m: return K.one, [] n = len(a[0]) d = None pivots = [] no_pivots = [] # i, j will be the row and column indices of the current pivot i = 0 for j in range(n): # next pivot? aij = a[i][j] # swap rows if zero if not aij: for ip in range(i+1, m): aij = a[ip][j] # row-swap if aij: a[i], a[ip] = a[ip], a[i] break else: # go to next column no_pivots.append(j) continue # Now aij is the pivot and i,j are the row and column. We need to clear # the column above and below but we also need to keep track of the # denominator of the RREF which means also multiplying everything above # and to the left by the current pivot aij and dividing by d (which we # multiplied everything by in the previous iteration so this is an # exact division). # # First handle the upper left corner which is usually already diagonal # with all diagonal entries equal to the current denominator but there # can be other non-zero entries in any column that has no pivot. # Update previous pivots in the matrix if pivots: pivot_val = aij * a[0][pivots[0]] # Divide out the common factor if d is not None: pivot_val = K.exquo(pivot_val, d) # Could defer this until the end but it is pretty cheap and # helps when debugging. for ip, jp in enumerate(pivots): a[ip][jp] = pivot_val # Update columns without pivots for jnp in no_pivots: for ip in range(i): aijp = a[ip][jnp] if aijp: aijp *= aij if d is not None: aijp = K.exquo(aijp, d) a[ip][jnp] = aijp # Eliminate above, below and to the right as in ordinary division free # Gauss-Jordan elmination except also dividing out d from every entry. for jp, aj in enumerate(a): # Skip the current row if jp == i: continue # Eliminate to the right in all rows for kp in range(j+1, n): ajk = aij * aj[kp] - aj[j] * a[i][kp] if d is not None: ajk = K.exquo(ajk, d) aj[kp] = ajk # Set to zero above and below the pivot aj[j] = K.zero # next row pivots.append(j) i += 1 # no more rows left? if i >= m: break if not K.is_one(aij): d = aij else: d = None if not pivots: denom = K.one else: denom = a[0][pivots[0]] return denom, pivots
ddm_irref_den
sympy
46
sympy/core/sorting.py
def default_sort_key(item, order=None): """Return a key that can be used for sorting. The key has the structure: (class_key, (len(args), args), exponent.sort_key(), coefficient) This key is supplied by the sort_key routine of Basic objects when ``item`` is a Basic object or an object (other than a string) that sympifies to a Basic object. Otherwise, this function produces the key. The ``order`` argument is passed along to the sort_key routine and is used to determine how the terms *within* an expression are ordered. (See examples below) ``order`` options are: 'lex', 'grlex', 'grevlex', and reversed values of the same (e.g. 'rev-lex'). The default order value is None (which translates to 'lex'). Examples ======== >>> from sympy import S, I, default_sort_key, sin, cos, sqrt >>> from sympy.core.function import UndefinedFunction >>> from sympy.abc import x The following are equivalent ways of getting the key for an object: >>> x.sort_key() == default_sort_key(x) True Here are some examples of the key that is produced: >>> default_sort_key(UndefinedFunction('f')) ((0, 0, 'UndefinedFunction'), (1, ('f',)), ((1, 0, 'Number'), (0, ()), (), 1), 1) >>> default_sort_key('1') ((0, 0, 'str'), (1, ('1',)), ((1, 0, 'Number'), (0, ()), (), 1), 1) >>> default_sort_key(S.One) ((1, 0, 'Number'), (0, ()), (), 1) >>> default_sort_key(2) ((1, 0, 'Number'), (0, ()), (), 2) While sort_key is a method only defined for SymPy objects, default_sort_key will accept anything as an argument so it is more robust as a sorting key. For the following, using key= lambda i: i.sort_key() would fail because 2 does not have a sort_key method; that's why default_sort_key is used. Note, that it also handles sympification of non-string items likes ints: >>> a = [2, I, -I] >>> sorted(a, key=default_sort_key) [2, -I, I] The returned key can be used anywhere that a key can be specified for a function, e.g. sort, min, max, etc...: >>> a.sort(key=default_sort_key); a[0] 2 >>> min(a, key=default_sort_key) 2 Notes ===== The key returned is useful for getting items into a canonical order that will be the same across platforms. It is not directly useful for sorting lists of expressions: >>> a, b = x, 1/x Since ``a`` has only 1 term, its value of sort_key is unaffected by ``order``: >>> a.sort_key() == a.sort_key('rev-lex') True If ``a`` and ``b`` are combined then the key will differ because there are terms that can be ordered: >>> eq = a + b >>> eq.sort_key() == eq.sort_key('rev-lex') False >>> eq.as_ordered_terms() [x, 1/x] >>> eq.as_ordered_terms('rev-lex') [1/x, x] But since the keys for each of these terms are independent of ``order``'s value, they do not sort differently when they appear separately in a list: >>> sorted(eq.args, key=default_sort_key) [1/x, x] >>> sorted(eq.args, key=lambda i: default_sort_key(i, order='rev-lex')) [1/x, x] The order of terms obtained when using these keys is the order that would be obtained if those terms were *factors* in a product. Although it is useful for quickly putting expressions in canonical order, it does not sort expressions based on their complexity defined by the number of operations, power of variables and others: >>> sorted([sin(x)*cos(x), sin(x)], key=default_sort_key) [sin(x)*cos(x), sin(x)] >>> sorted([x, x**2, sqrt(x), x**3], key=default_sort_key) [sqrt(x), x, x**2, x**3] See Also ======== ordered, sympy.core.expr.Expr.as_ordered_factors, sympy.core.expr.Expr.as_ordered_terms """
/usr/src/app/target_test_cases/failed_tests_default_sort_key.txt
def default_sort_key(item, order=None): """Return a key that can be used for sorting. The key has the structure: (class_key, (len(args), args), exponent.sort_key(), coefficient) This key is supplied by the sort_key routine of Basic objects when ``item`` is a Basic object or an object (other than a string) that sympifies to a Basic object. Otherwise, this function produces the key. The ``order`` argument is passed along to the sort_key routine and is used to determine how the terms *within* an expression are ordered. (See examples below) ``order`` options are: 'lex', 'grlex', 'grevlex', and reversed values of the same (e.g. 'rev-lex'). The default order value is None (which translates to 'lex'). Examples ======== >>> from sympy import S, I, default_sort_key, sin, cos, sqrt >>> from sympy.core.function import UndefinedFunction >>> from sympy.abc import x The following are equivalent ways of getting the key for an object: >>> x.sort_key() == default_sort_key(x) True Here are some examples of the key that is produced: >>> default_sort_key(UndefinedFunction('f')) ((0, 0, 'UndefinedFunction'), (1, ('f',)), ((1, 0, 'Number'), (0, ()), (), 1), 1) >>> default_sort_key('1') ((0, 0, 'str'), (1, ('1',)), ((1, 0, 'Number'), (0, ()), (), 1), 1) >>> default_sort_key(S.One) ((1, 0, 'Number'), (0, ()), (), 1) >>> default_sort_key(2) ((1, 0, 'Number'), (0, ()), (), 2) While sort_key is a method only defined for SymPy objects, default_sort_key will accept anything as an argument so it is more robust as a sorting key. For the following, using key= lambda i: i.sort_key() would fail because 2 does not have a sort_key method; that's why default_sort_key is used. Note, that it also handles sympification of non-string items likes ints: >>> a = [2, I, -I] >>> sorted(a, key=default_sort_key) [2, -I, I] The returned key can be used anywhere that a key can be specified for a function, e.g. sort, min, max, etc...: >>> a.sort(key=default_sort_key); a[0] 2 >>> min(a, key=default_sort_key) 2 Notes ===== The key returned is useful for getting items into a canonical order that will be the same across platforms. It is not directly useful for sorting lists of expressions: >>> a, b = x, 1/x Since ``a`` has only 1 term, its value of sort_key is unaffected by ``order``: >>> a.sort_key() == a.sort_key('rev-lex') True If ``a`` and ``b`` are combined then the key will differ because there are terms that can be ordered: >>> eq = a + b >>> eq.sort_key() == eq.sort_key('rev-lex') False >>> eq.as_ordered_terms() [x, 1/x] >>> eq.as_ordered_terms('rev-lex') [1/x, x] But since the keys for each of these terms are independent of ``order``'s value, they do not sort differently when they appear separately in a list: >>> sorted(eq.args, key=default_sort_key) [1/x, x] >>> sorted(eq.args, key=lambda i: default_sort_key(i, order='rev-lex')) [1/x, x] The order of terms obtained when using these keys is the order that would be obtained if those terms were *factors* in a product. Although it is useful for quickly putting expressions in canonical order, it does not sort expressions based on their complexity defined by the number of operations, power of variables and others: >>> sorted([sin(x)*cos(x), sin(x)], key=default_sort_key) [sin(x)*cos(x), sin(x)] >>> sorted([x, x**2, sqrt(x), x**3], key=default_sort_key) [sqrt(x), x, x**2, x**3] See Also ======== ordered, sympy.core.expr.Expr.as_ordered_factors, sympy.core.expr.Expr.as_ordered_terms """ from .basic import Basic from .singleton import S if isinstance(item, Basic): return item.sort_key(order=order) if iterable(item, exclude=str): if isinstance(item, dict): args = item.items() unordered = True elif isinstance(item, set): args = item unordered = True else: # e.g. tuple, list args = list(item) unordered = False args = [default_sort_key(arg, order=order) for arg in args] if unordered: # e.g. dict, set args = sorted(args) cls_index, args = 10, (len(args), tuple(args)) else: if not isinstance(item, str): try: item = sympify(item, strict=True) except SympifyError: # e.g. lambda x: x pass else: if isinstance(item, Basic): # e.g int -> Integer return default_sort_key(item) # e.g. UndefinedFunction # e.g. str cls_index, args = 0, (1, (str(item),)) return (cls_index, 0, item.__class__.__name__ ), args, S.One.sort_key(), S.One
default_sort_key
sympy
47
sympy/solvers/diophantine/diophantine.py
def diophantine(eq, param=symbols("t", integer=True), syms=None, permute=False): """ Simplify the solution procedure of diophantine equation ``eq`` by converting it into a product of terms which should equal zero. Explanation =========== For example, when solving, `x^2 - y^2 = 0` this is treated as `(x + y)(x - y) = 0` and `x + y = 0` and `x - y = 0` are solved independently and combined. Each term is solved by calling ``diop_solve()``. (Although it is possible to call ``diop_solve()`` directly, one must be careful to pass an equation in the correct form and to interpret the output correctly; ``diophantine()`` is the public-facing function to use in general.) Output of ``diophantine()`` is a set of tuples. The elements of the tuple are the solutions for each variable in the equation and are arranged according to the alphabetic ordering of the variables. e.g. For an equation with two variables, `a` and `b`, the first element of the tuple is the solution for `a` and the second for `b`. Usage ===== ``diophantine(eq, t, syms)``: Solve the diophantine equation ``eq``. ``t`` is the optional parameter to be used by ``diop_solve()``. ``syms`` is an optional list of symbols which determines the order of the elements in the returned tuple. By default, only the base solution is returned. If ``permute`` is set to True then permutations of the base solution and/or permutations of the signs of the values will be returned when applicable. Details ======= ``eq`` should be an expression which is assumed to be zero. ``t`` is the parameter to be used in the solution. Examples ======== >>> from sympy import diophantine >>> from sympy.abc import a, b >>> eq = a**4 + b**4 - (2**4 + 3**4) >>> diophantine(eq) {(2, 3)} >>> diophantine(eq, permute=True) {(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)} >>> from sympy.abc import x, y, z >>> diophantine(x**2 - y**2) {(t_0, -t_0), (t_0, t_0)} >>> diophantine(x*(2*x + 3*y - z)) {(0, n1, n2), (t_0, t_1, 2*t_0 + 3*t_1)} >>> diophantine(x**2 + 3*x*y + 4*x) {(0, n1), (-3*t_0 - 4, t_0)} See Also ======== diop_solve sympy.utilities.iterables.permute_signs sympy.utilities.iterables.signed_permutations """
/usr/src/app/target_test_cases/failed_tests_diophantine.txt
def diophantine(eq, param=symbols("t", integer=True), syms=None, permute=False): """ Simplify the solution procedure of diophantine equation ``eq`` by converting it into a product of terms which should equal zero. Explanation =========== For example, when solving, `x^2 - y^2 = 0` this is treated as `(x + y)(x - y) = 0` and `x + y = 0` and `x - y = 0` are solved independently and combined. Each term is solved by calling ``diop_solve()``. (Although it is possible to call ``diop_solve()`` directly, one must be careful to pass an equation in the correct form and to interpret the output correctly; ``diophantine()`` is the public-facing function to use in general.) Output of ``diophantine()`` is a set of tuples. The elements of the tuple are the solutions for each variable in the equation and are arranged according to the alphabetic ordering of the variables. e.g. For an equation with two variables, `a` and `b`, the first element of the tuple is the solution for `a` and the second for `b`. Usage ===== ``diophantine(eq, t, syms)``: Solve the diophantine equation ``eq``. ``t`` is the optional parameter to be used by ``diop_solve()``. ``syms`` is an optional list of symbols which determines the order of the elements in the returned tuple. By default, only the base solution is returned. If ``permute`` is set to True then permutations of the base solution and/or permutations of the signs of the values will be returned when applicable. Details ======= ``eq`` should be an expression which is assumed to be zero. ``t`` is the parameter to be used in the solution. Examples ======== >>> from sympy import diophantine >>> from sympy.abc import a, b >>> eq = a**4 + b**4 - (2**4 + 3**4) >>> diophantine(eq) {(2, 3)} >>> diophantine(eq, permute=True) {(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)} >>> from sympy.abc import x, y, z >>> diophantine(x**2 - y**2) {(t_0, -t_0), (t_0, t_0)} >>> diophantine(x*(2*x + 3*y - z)) {(0, n1, n2), (t_0, t_1, 2*t_0 + 3*t_1)} >>> diophantine(x**2 + 3*x*y + 4*x) {(0, n1), (-3*t_0 - 4, t_0)} See Also ======== diop_solve sympy.utilities.iterables.permute_signs sympy.utilities.iterables.signed_permutations """ eq = _sympify(eq) if isinstance(eq, Eq): eq = eq.lhs - eq.rhs try: var = list(eq.expand(force=True).free_symbols) var.sort(key=default_sort_key) if syms: if not is_sequence(syms): raise TypeError( 'syms should be given as a sequence, e.g. a list') syms = [i for i in syms if i in var] if syms != var: dict_sym_index = dict(zip(syms, range(len(syms)))) return {tuple([t[dict_sym_index[i]] for i in var]) for t in diophantine(eq, param, permute=permute)} n, d = eq.as_numer_denom() if n.is_number: return set() if not d.is_number: dsol = diophantine(d) good = diophantine(n) - dsol return {s for s in good if _mexpand(d.subs(zip(var, s)))} eq = factor_terms(n) assert not eq.is_number eq = eq.as_independent(*var, as_Add=False)[1] p = Poly(eq) assert not any(g.is_number for g in p.gens) eq = p.as_expr() assert eq.is_polynomial() except (GeneratorsNeeded, AssertionError): raise TypeError(filldedent(''' Equation should be a polynomial with Rational coefficients.''')) # permute only sign do_permute_signs = False # permute sign and values do_permute_signs_var = False # permute few signs permute_few_signs = False try: # if we know that factoring should not be attempted, skip # the factoring step v, c, t = classify_diop(eq) # check for permute sign if permute: len_var = len(v) permute_signs_for = [ GeneralSumOfSquares.name, GeneralSumOfEvenPowers.name] permute_signs_check = [ HomogeneousTernaryQuadratic.name, HomogeneousTernaryQuadraticNormal.name, BinaryQuadratic.name] if t in permute_signs_for: do_permute_signs_var = True elif t in permute_signs_check: # if all the variables in eq have even powers # then do_permute_sign = True if len_var == 3: var_mul = list(subsets(v, 2)) # here var_mul is like [(x, y), (x, z), (y, z)] xy_coeff = True x_coeff = True var1_mul_var2 = (a[0]*a[1] for a in var_mul) # if coeff(y*z), coeff(y*x), coeff(x*z) is not 0 then # `xy_coeff` => True and do_permute_sign => False. # Means no permuted solution. for v1_mul_v2 in var1_mul_var2: try: coeff = c[v1_mul_v2] except KeyError: coeff = 0 xy_coeff = bool(xy_coeff) and bool(coeff) var_mul = list(subsets(v, 1)) # here var_mul is like [(x,), (y, )] for v1 in var_mul: try: coeff = c[v1[0]] except KeyError: coeff = 0 x_coeff = bool(x_coeff) and bool(coeff) if not any((xy_coeff, x_coeff)): # means only x**2, y**2, z**2, const is present do_permute_signs = True elif not x_coeff: permute_few_signs = True elif len_var == 2: var_mul = list(subsets(v, 2)) # here var_mul is like [(x, y)] xy_coeff = True x_coeff = True var1_mul_var2 = (x[0]*x[1] for x in var_mul) for v1_mul_v2 in var1_mul_var2: try: coeff = c[v1_mul_v2] except KeyError: coeff = 0 xy_coeff = bool(xy_coeff) and bool(coeff) var_mul = list(subsets(v, 1)) # here var_mul is like [(x,), (y, )] for v1 in var_mul: try: coeff = c[v1[0]] except KeyError: coeff = 0 x_coeff = bool(x_coeff) and bool(coeff) if not any((xy_coeff, x_coeff)): # means only x**2, y**2 and const is present # so we can get more soln by permuting this soln. do_permute_signs = True elif not x_coeff: # when coeff(x), coeff(y) is not present then signs of # x, y can be permuted such that their sign are same # as sign of x*y. # e.g 1. (x_val,y_val)=> (x_val,y_val), (-x_val,-y_val) # 2. (-x_vall, y_val)=> (-x_val,y_val), (x_val,-y_val) permute_few_signs = True if t == 'general_sum_of_squares': # trying to factor such expressions will sometimes hang terms = [(eq, 1)] else: raise TypeError except (TypeError, NotImplementedError): fl = factor_list(eq) if fl[0].is_Rational and fl[0] != 1: return diophantine(eq/fl[0], param=param, syms=syms, permute=permute) terms = fl[1] sols = set() for term in terms: base, _ = term var_t, _, eq_type = classify_diop(base, _dict=False) _, base = signsimp(base, evaluate=False).as_coeff_Mul() solution = diop_solve(base, param) if eq_type in [ Linear.name, HomogeneousTernaryQuadratic.name, HomogeneousTernaryQuadraticNormal.name, GeneralPythagorean.name]: sols.add(merge_solution(var, var_t, solution)) elif eq_type in [ BinaryQuadratic.name, GeneralSumOfSquares.name, GeneralSumOfEvenPowers.name, Univariate.name]: sols.update(merge_solution(var, var_t, sol) for sol in solution) else: raise NotImplementedError('unhandled type: %s' % eq_type) # remove null merge results if () in sols: sols.remove(()) null = tuple([0]*len(var)) # if there is no solution, return trivial solution if not sols and eq.subs(zip(var, null)).is_zero: if all(check_assumptions(val, **s.assumptions0) is not False for val, s in zip(null, var)): sols.add(null) final_soln = set() for sol in sols: if all(int_valued(s) for s in sol): if do_permute_signs: permuted_sign = set(permute_signs(sol)) final_soln.update(permuted_sign) elif permute_few_signs: lst = list(permute_signs(sol)) lst = list(filter(lambda x: x[0]*x[1] == sol[1]*sol[0], lst)) permuted_sign = set(lst) final_soln.update(permuted_sign) elif do_permute_signs_var: permuted_sign_var = set(signed_permutations(sol)) final_soln.update(permuted_sign_var) else: final_soln.add(sol) else: final_soln.add(sol) return final_soln
diophantine
sympy
48
sympy/printing/dot.py
def dotprint(expr, styles=default_styles, atom=lambda x: not isinstance(x, Basic), maxdepth=None, repeat=True, labelfunc=str, **kwargs): """DOT description of a SymPy expression tree Parameters ========== styles : list of lists composed of (Class, mapping), optional Styles for different classes. The default is .. code-block:: python ( (Basic, {'color': 'blue', 'shape': 'ellipse'}), (Expr, {'color': 'black'}) ) atom : function, optional Function used to determine if an arg is an atom. A good choice is ``lambda x: not x.args``. The default is ``lambda x: not isinstance(x, Basic)``. maxdepth : integer, optional The maximum depth. The default is ``None``, meaning no limit. repeat : boolean, optional Whether to use different nodes for common subexpressions. The default is ``True``. For example, for ``x + x*y`` with ``repeat=True``, it will have two nodes for ``x``; with ``repeat=False``, it will have one node. .. warning:: Even if a node appears twice in the same object like ``x`` in ``Pow(x, x)``, it will still only appear once. Hence, with ``repeat=False``, the number of arrows out of an object might not equal the number of args it has. labelfunc : function, optional A function to create a label for a given leaf node. The default is ``str``. Another good option is ``srepr``. For example with ``str``, the leaf nodes of ``x + 1`` are labeled, ``x`` and ``1``. With ``srepr``, they are labeled ``Symbol('x')`` and ``Integer(1)``. **kwargs : optional Additional keyword arguments are included as styles for the graph. Examples ======== >>> from sympy import dotprint >>> from sympy.abc import x >>> print(dotprint(x+2)) # doctest: +NORMALIZE_WHITESPACE digraph{ <BLANKLINE> # Graph style "ordering"="out" "rankdir"="TD" <BLANKLINE> ######### # Nodes # ######### <BLANKLINE> "Add(Integer(2), Symbol('x'))_()" ["color"="black", "label"="Add", "shape"="ellipse"]; "Integer(2)_(0,)" ["color"="black", "label"="2", "shape"="ellipse"]; "Symbol('x')_(1,)" ["color"="black", "label"="x", "shape"="ellipse"]; <BLANKLINE> ######### # Edges # ######### <BLANKLINE> "Add(Integer(2), Symbol('x'))_()" -> "Integer(2)_(0,)"; "Add(Integer(2), Symbol('x'))_()" -> "Symbol('x')_(1,)"; } """
/usr/src/app/target_test_cases/failed_tests_dotprint.txt
def dotprint(expr, styles=default_styles, atom=lambda x: not isinstance(x, Basic), maxdepth=None, repeat=True, labelfunc=str, **kwargs): """DOT description of a SymPy expression tree Parameters ========== styles : list of lists composed of (Class, mapping), optional Styles for different classes. The default is .. code-block:: python ( (Basic, {'color': 'blue', 'shape': 'ellipse'}), (Expr, {'color': 'black'}) ) atom : function, optional Function used to determine if an arg is an atom. A good choice is ``lambda x: not x.args``. The default is ``lambda x: not isinstance(x, Basic)``. maxdepth : integer, optional The maximum depth. The default is ``None``, meaning no limit. repeat : boolean, optional Whether to use different nodes for common subexpressions. The default is ``True``. For example, for ``x + x*y`` with ``repeat=True``, it will have two nodes for ``x``; with ``repeat=False``, it will have one node. .. warning:: Even if a node appears twice in the same object like ``x`` in ``Pow(x, x)``, it will still only appear once. Hence, with ``repeat=False``, the number of arrows out of an object might not equal the number of args it has. labelfunc : function, optional A function to create a label for a given leaf node. The default is ``str``. Another good option is ``srepr``. For example with ``str``, the leaf nodes of ``x + 1`` are labeled, ``x`` and ``1``. With ``srepr``, they are labeled ``Symbol('x')`` and ``Integer(1)``. **kwargs : optional Additional keyword arguments are included as styles for the graph. Examples ======== >>> from sympy import dotprint >>> from sympy.abc import x >>> print(dotprint(x+2)) # doctest: +NORMALIZE_WHITESPACE digraph{ <BLANKLINE> # Graph style "ordering"="out" "rankdir"="TD" <BLANKLINE> ######### # Nodes # ######### <BLANKLINE> "Add(Integer(2), Symbol('x'))_()" ["color"="black", "label"="Add", "shape"="ellipse"]; "Integer(2)_(0,)" ["color"="black", "label"="2", "shape"="ellipse"]; "Symbol('x')_(1,)" ["color"="black", "label"="x", "shape"="ellipse"]; <BLANKLINE> ######### # Edges # ######### <BLANKLINE> "Add(Integer(2), Symbol('x'))_()" -> "Integer(2)_(0,)"; "Add(Integer(2), Symbol('x'))_()" -> "Symbol('x')_(1,)"; } """ # repeat works by adding a signature tuple to the end of each node for its # position in the graph. For example, for expr = Add(x, Pow(x, 2)), the x in the # Pow will have the tuple (1, 0), meaning it is expr.args[1].args[0]. graphstyle = _graphstyle.copy() graphstyle.update(kwargs) nodes = [] edges = [] def traverse(e, depth, pos=()): nodes.append(dotnode(e, styles, labelfunc=labelfunc, pos=pos, repeat=repeat)) if maxdepth and depth >= maxdepth: return edges.extend(dotedges(e, atom=atom, pos=pos, repeat=repeat)) [traverse(arg, depth+1, pos + (i,)) for i, arg in enumerate(e.args) if not atom(arg)] traverse(expr, 0) return template%{'graphstyle': attrprint(graphstyle, delimiter='\n'), 'nodes': '\n'.join(nodes), 'edges': '\n'.join(edges)}
dotprint
sympy
49
sympy/ntheory/egyptian_fraction.py
def egyptian_fraction(r, algorithm="Greedy"): """ Return the list of denominators of an Egyptian fraction expansion [1]_ of the said rational `r`. Parameters ========== r : Rational or (p, q) a positive rational number, ``p/q``. algorithm : { "Greedy", "Graham Jewett", "Takenouchi", "Golomb" }, optional Denotes the algorithm to be used (the default is "Greedy"). Examples ======== >>> from sympy import Rational >>> from sympy.ntheory.egyptian_fraction import egyptian_fraction >>> egyptian_fraction(Rational(3, 7)) [3, 11, 231] >>> egyptian_fraction((3, 7), "Graham Jewett") [7, 8, 9, 56, 57, 72, 3192] >>> egyptian_fraction((3, 7), "Takenouchi") [4, 7, 28] >>> egyptian_fraction((3, 7), "Golomb") [3, 15, 35] >>> egyptian_fraction((11, 5), "Golomb") [1, 2, 3, 4, 9, 234, 1118, 2580] See Also ======== sympy.core.numbers.Rational Notes ===== Currently the following algorithms are supported: 1) Greedy Algorithm Also called the Fibonacci-Sylvester algorithm [2]_. At each step, extract the largest unit fraction less than the target and replace the target with the remainder. It has some distinct properties: a) Given `p/q` in lowest terms, generates an expansion of maximum length `p`. Even as the numerators get large, the number of terms is seldom more than a handful. b) Uses minimal memory. c) The terms can blow up (standard examples of this are 5/121 and 31/311). The denominator is at most squared at each step (doubly-exponential growth) and typically exhibits singly-exponential growth. 2) Graham Jewett Algorithm The algorithm suggested by the result of Graham and Jewett. Note that this has a tendency to blow up: the length of the resulting expansion is always ``2**(x/gcd(x, y)) - 1``. See [3]_. 3) Takenouchi Algorithm The algorithm suggested by Takenouchi (1921). Differs from the Graham-Jewett algorithm only in the handling of duplicates. See [3]_. 4) Golomb's Algorithm A method given by Golumb (1962), using modular arithmetic and inverses. It yields the same results as a method using continued fractions proposed by Bleicher (1972). See [4]_. If the given rational is greater than or equal to 1, a greedy algorithm of summing the harmonic sequence 1/1 + 1/2 + 1/3 + ... is used, taking all the unit fractions of this sequence until adding one more would be greater than the given number. This list of denominators is prefixed to the result from the requested algorithm used on the remainder. For example, if r is 8/3, using the Greedy algorithm, we get [1, 2, 3, 4, 5, 6, 7, 14, 420], where the beginning of the sequence, [1, 2, 3, 4, 5, 6, 7] is part of the harmonic sequence summing to 363/140, leaving a remainder of 31/420, which yields [14, 420] by the Greedy algorithm. The result of egyptian_fraction(Rational(8, 3), "Golomb") is [1, 2, 3, 4, 5, 6, 7, 14, 574, 2788, 6460, 11590, 33062, 113820], and so on. References ========== .. [1] https://en.wikipedia.org/wiki/Egyptian_fraction .. [2] https://en.wikipedia.org/wiki/Greedy_algorithm_for_Egyptian_fractions .. [3] https://www.ics.uci.edu/~eppstein/numth/egypt/conflict.html .. [4] https://web.archive.org/web/20180413004012/https://ami.ektf.hu/uploads/papers/finalpdf/AMI_42_from129to134.pdf """
/usr/src/app/target_test_cases/failed_tests_egyptian_fraction.txt
def egyptian_fraction(r, algorithm="Greedy"): """ Return the list of denominators of an Egyptian fraction expansion [1]_ of the said rational `r`. Parameters ========== r : Rational or (p, q) a positive rational number, ``p/q``. algorithm : { "Greedy", "Graham Jewett", "Takenouchi", "Golomb" }, optional Denotes the algorithm to be used (the default is "Greedy"). Examples ======== >>> from sympy import Rational >>> from sympy.ntheory.egyptian_fraction import egyptian_fraction >>> egyptian_fraction(Rational(3, 7)) [3, 11, 231] >>> egyptian_fraction((3, 7), "Graham Jewett") [7, 8, 9, 56, 57, 72, 3192] >>> egyptian_fraction((3, 7), "Takenouchi") [4, 7, 28] >>> egyptian_fraction((3, 7), "Golomb") [3, 15, 35] >>> egyptian_fraction((11, 5), "Golomb") [1, 2, 3, 4, 9, 234, 1118, 2580] See Also ======== sympy.core.numbers.Rational Notes ===== Currently the following algorithms are supported: 1) Greedy Algorithm Also called the Fibonacci-Sylvester algorithm [2]_. At each step, extract the largest unit fraction less than the target and replace the target with the remainder. It has some distinct properties: a) Given `p/q` in lowest terms, generates an expansion of maximum length `p`. Even as the numerators get large, the number of terms is seldom more than a handful. b) Uses minimal memory. c) The terms can blow up (standard examples of this are 5/121 and 31/311). The denominator is at most squared at each step (doubly-exponential growth) and typically exhibits singly-exponential growth. 2) Graham Jewett Algorithm The algorithm suggested by the result of Graham and Jewett. Note that this has a tendency to blow up: the length of the resulting expansion is always ``2**(x/gcd(x, y)) - 1``. See [3]_. 3) Takenouchi Algorithm The algorithm suggested by Takenouchi (1921). Differs from the Graham-Jewett algorithm only in the handling of duplicates. See [3]_. 4) Golomb's Algorithm A method given by Golumb (1962), using modular arithmetic and inverses. It yields the same results as a method using continued fractions proposed by Bleicher (1972). See [4]_. If the given rational is greater than or equal to 1, a greedy algorithm of summing the harmonic sequence 1/1 + 1/2 + 1/3 + ... is used, taking all the unit fractions of this sequence until adding one more would be greater than the given number. This list of denominators is prefixed to the result from the requested algorithm used on the remainder. For example, if r is 8/3, using the Greedy algorithm, we get [1, 2, 3, 4, 5, 6, 7, 14, 420], where the beginning of the sequence, [1, 2, 3, 4, 5, 6, 7] is part of the harmonic sequence summing to 363/140, leaving a remainder of 31/420, which yields [14, 420] by the Greedy algorithm. The result of egyptian_fraction(Rational(8, 3), "Golomb") is [1, 2, 3, 4, 5, 6, 7, 14, 574, 2788, 6460, 11590, 33062, 113820], and so on. References ========== .. [1] https://en.wikipedia.org/wiki/Egyptian_fraction .. [2] https://en.wikipedia.org/wiki/Greedy_algorithm_for_Egyptian_fractions .. [3] https://www.ics.uci.edu/~eppstein/numth/egypt/conflict.html .. [4] https://web.archive.org/web/20180413004012/https://ami.ektf.hu/uploads/papers/finalpdf/AMI_42_from129to134.pdf """ if not isinstance(r, Rational): if isinstance(r, (Tuple, tuple)) and len(r) == 2: r = Rational(*r) else: raise ValueError("Value must be a Rational or tuple of ints") if r <= 0: raise ValueError("Value must be positive") # common cases that all methods agree on x, y = r.as_numer_denom() if y == 1 and x == 2: return [Integer(i) for i in [1, 2, 3, 6]] if x == y + 1: return [S.One, y] prefix, rem = egypt_harmonic(r) if rem == 0: return prefix # work in Python ints x, y = rem.p, rem.q # assert x < y and gcd(x, y) = 1 if algorithm == "Greedy": postfix = egypt_greedy(x, y) elif algorithm == "Graham Jewett": postfix = egypt_graham_jewett(x, y) elif algorithm == "Takenouchi": postfix = egypt_takenouchi(x, y) elif algorithm == "Golomb": postfix = egypt_golomb(x, y) else: raise ValueError("Entered invalid algorithm") return prefix + [Integer(i) for i in postfix]
egyptian_fraction
sympy
50
sympy/crypto/crypto.py
def encipher_shift(msg, key, symbols=None): """ Performs shift cipher encryption on plaintext msg, and returns the ciphertext. Parameters ========== key : int The secret key. msg : str Plaintext of upper-case letters. Returns ======= str Ciphertext of upper-case letters. Examples ======== >>> from sympy.crypto.crypto import encipher_shift, decipher_shift >>> msg = "GONAVYBEATARMY" >>> ct = encipher_shift(msg, 1); ct 'HPOBWZCFBUBSNZ' To decipher the shifted text, change the sign of the key: >>> encipher_shift(ct, -1) 'GONAVYBEATARMY' There is also a convenience function that does this with the original key: >>> decipher_shift(ct, 1) 'GONAVYBEATARMY' Notes ===== ALGORITHM: STEPS: 0. Number the letters of the alphabet from 0, ..., N 1. Compute from the string ``msg`` a list ``L1`` of corresponding integers. 2. Compute from the list ``L1`` a new list ``L2``, given by adding ``(k mod 26)`` to each element in ``L1``. 3. Compute from the list ``L2`` a string ``ct`` of corresponding letters. The shift cipher is also called the Caesar cipher, after Julius Caesar, who, according to Suetonius, used it with a shift of three to protect messages of military significance. Caesar's nephew Augustus reportedly used a similar cipher, but with a right shift of 1. References ========== .. [1] https://en.wikipedia.org/wiki/Caesar_cipher .. [2] https://mathworld.wolfram.com/CaesarsMethod.html See Also ======== decipher_shift """
/usr/src/app/target_test_cases/failed_tests_encipher_shift.txt
def encipher_shift(msg, key, symbols=None): """ Performs shift cipher encryption on plaintext msg, and returns the ciphertext. Parameters ========== key : int The secret key. msg : str Plaintext of upper-case letters. Returns ======= str Ciphertext of upper-case letters. Examples ======== >>> from sympy.crypto.crypto import encipher_shift, decipher_shift >>> msg = "GONAVYBEATARMY" >>> ct = encipher_shift(msg, 1); ct 'HPOBWZCFBUBSNZ' To decipher the shifted text, change the sign of the key: >>> encipher_shift(ct, -1) 'GONAVYBEATARMY' There is also a convenience function that does this with the original key: >>> decipher_shift(ct, 1) 'GONAVYBEATARMY' Notes ===== ALGORITHM: STEPS: 0. Number the letters of the alphabet from 0, ..., N 1. Compute from the string ``msg`` a list ``L1`` of corresponding integers. 2. Compute from the list ``L1`` a new list ``L2``, given by adding ``(k mod 26)`` to each element in ``L1``. 3. Compute from the list ``L2`` a string ``ct`` of corresponding letters. The shift cipher is also called the Caesar cipher, after Julius Caesar, who, according to Suetonius, used it with a shift of three to protect messages of military significance. Caesar's nephew Augustus reportedly used a similar cipher, but with a right shift of 1. References ========== .. [1] https://en.wikipedia.org/wiki/Caesar_cipher .. [2] https://mathworld.wolfram.com/CaesarsMethod.html See Also ======== decipher_shift """ msg, _, A = _prep(msg, '', symbols) shift = len(A) - key % len(A) key = A[shift:] + A[:shift] return translate(msg, key, A)
encipher_shift
sympy
51
sympy/crypto/crypto.py
def encipher_vigenere(msg, key, symbols=None): """ Performs the Vigenere cipher encryption on plaintext ``msg``, and returns the ciphertext. Examples ======== >>> from sympy.crypto.crypto import encipher_vigenere, AZ >>> key = "encrypt" >>> msg = "meet me on monday" >>> encipher_vigenere(msg, key) 'QRGKKTHRZQEBPR' Section 1 of the Kryptos sculpture at the CIA headquarters uses this cipher and also changes the order of the alphabet [2]_. Here is the first line of that section of the sculpture: >>> from sympy.crypto.crypto import decipher_vigenere, padded_key >>> alp = padded_key('KRYPTOS', AZ()) >>> key = 'PALIMPSEST' >>> msg = 'EMUFPHZLRFAXYUSDJKZLDKRNSHGNFIVJ' >>> decipher_vigenere(msg, key, alp) 'BETWEENSUBTLESHADINGANDTHEABSENC' Explanation =========== The Vigenere cipher is named after Blaise de Vigenere, a sixteenth century diplomat and cryptographer, by a historical accident. Vigenere actually invented a different and more complicated cipher. The so-called *Vigenere cipher* was actually invented by Giovan Batista Belaso in 1553. This cipher was used in the 1800's, for example, during the American Civil War. The Confederacy used a brass cipher disk to implement the Vigenere cipher (now on display in the NSA Museum in Fort Meade) [1]_. The Vigenere cipher is a generalization of the shift cipher. Whereas the shift cipher shifts each letter by the same amount (that amount being the key of the shift cipher) the Vigenere cipher shifts a letter by an amount determined by the key (which is a word or phrase known only to the sender and receiver). For example, if the key was a single letter, such as "C", then the so-called Vigenere cipher is actually a shift cipher with a shift of `2` (since "C" is the 2nd letter of the alphabet, if you start counting at `0`). If the key was a word with two letters, such as "CA", then the so-called Vigenere cipher will shift letters in even positions by `2` and letters in odd positions are left alone (shifted by `0`, since "A" is the 0th letter, if you start counting at `0`). ALGORITHM: INPUT: ``msg``: string of characters that appear in ``symbols`` (the plaintext) ``key``: a string of characters that appear in ``symbols`` (the secret key) ``symbols``: a string of letters defining the alphabet OUTPUT: ``ct``: string of characters (the ciphertext message) STEPS: 0. Number the letters of the alphabet from 0, ..., N 1. Compute from the string ``key`` a list ``L1`` of corresponding integers. Let ``n1 = len(L1)``. 2. Compute from the string ``msg`` a list ``L2`` of corresponding integers. Let ``n2 = len(L2)``. 3. Break ``L2`` up sequentially into sublists of size ``n1``; the last sublist may be smaller than ``n1`` 4. For each of these sublists ``L`` of ``L2``, compute a new list ``C`` given by ``C[i] = L[i] + L1[i] (mod N)`` to the ``i``-th element in the sublist, for each ``i``. 5. Assemble these lists ``C`` by concatenation into a new list of length ``n2``. 6. Compute from the new list a string ``ct`` of corresponding letters. Once it is known that the key is, say, `n` characters long, frequency analysis can be applied to every `n`-th letter of the ciphertext to determine the plaintext. This method is called *Kasiski examination* (although it was first discovered by Babbage). If they key is as long as the message and is comprised of randomly selected characters -- a one-time pad -- the message is theoretically unbreakable. The cipher Vigenere actually discovered is an "auto-key" cipher described as follows. ALGORITHM: INPUT: ``key``: a string of letters (the secret key) ``msg``: string of letters (the plaintext message) OUTPUT: ``ct``: string of upper-case letters (the ciphertext message) STEPS: 0. Number the letters of the alphabet from 0, ..., N 1. Compute from the string ``msg`` a list ``L2`` of corresponding integers. Let ``n2 = len(L2)``. 2. Let ``n1`` be the length of the key. Append to the string ``key`` the first ``n2 - n1`` characters of the plaintext message. Compute from this string (also of length ``n2``) a list ``L1`` of integers corresponding to the letter numbers in the first step. 3. Compute a new list ``C`` given by ``C[i] = L1[i] + L2[i] (mod N)``. 4. Compute from the new list a string ``ct`` of letters corresponding to the new integers. To decipher the auto-key ciphertext, the key is used to decipher the first ``n1`` characters and then those characters become the key to decipher the next ``n1`` characters, etc...: >>> m = AZ('go navy, beat army! yes you can'); m 'GONAVYBEATARMYYESYOUCAN' >>> key = AZ('gold bug'); n1 = len(key); n2 = len(m) >>> auto_key = key + m[:n2 - n1]; auto_key 'GOLDBUGGONAVYBEATARMYYE' >>> ct = encipher_vigenere(m, auto_key); ct 'MCYDWSHKOGAMKZCELYFGAYR' >>> n1 = len(key) >>> pt = [] >>> while ct: ... part, ct = ct[:n1], ct[n1:] ... pt.append(decipher_vigenere(part, key)) ... key = pt[-1] ... >>> ''.join(pt) == m True References ========== .. [1] https://en.wikipedia.org/wiki/Vigenere_cipher .. [2] https://web.archive.org/web/20071116100808/https://filebox.vt.edu/users/batman/kryptos.html (short URL: https://goo.gl/ijr22d) """
/usr/src/app/target_test_cases/failed_tests_encipher_vigenere.txt
def encipher_vigenere(msg, key, symbols=None): """ Performs the Vigenere cipher encryption on plaintext ``msg``, and returns the ciphertext. Examples ======== >>> from sympy.crypto.crypto import encipher_vigenere, AZ >>> key = "encrypt" >>> msg = "meet me on monday" >>> encipher_vigenere(msg, key) 'QRGKKTHRZQEBPR' Section 1 of the Kryptos sculpture at the CIA headquarters uses this cipher and also changes the order of the alphabet [2]_. Here is the first line of that section of the sculpture: >>> from sympy.crypto.crypto import decipher_vigenere, padded_key >>> alp = padded_key('KRYPTOS', AZ()) >>> key = 'PALIMPSEST' >>> msg = 'EMUFPHZLRFAXYUSDJKZLDKRNSHGNFIVJ' >>> decipher_vigenere(msg, key, alp) 'BETWEENSUBTLESHADINGANDTHEABSENC' Explanation =========== The Vigenere cipher is named after Blaise de Vigenere, a sixteenth century diplomat and cryptographer, by a historical accident. Vigenere actually invented a different and more complicated cipher. The so-called *Vigenere cipher* was actually invented by Giovan Batista Belaso in 1553. This cipher was used in the 1800's, for example, during the American Civil War. The Confederacy used a brass cipher disk to implement the Vigenere cipher (now on display in the NSA Museum in Fort Meade) [1]_. The Vigenere cipher is a generalization of the shift cipher. Whereas the shift cipher shifts each letter by the same amount (that amount being the key of the shift cipher) the Vigenere cipher shifts a letter by an amount determined by the key (which is a word or phrase known only to the sender and receiver). For example, if the key was a single letter, such as "C", then the so-called Vigenere cipher is actually a shift cipher with a shift of `2` (since "C" is the 2nd letter of the alphabet, if you start counting at `0`). If the key was a word with two letters, such as "CA", then the so-called Vigenere cipher will shift letters in even positions by `2` and letters in odd positions are left alone (shifted by `0`, since "A" is the 0th letter, if you start counting at `0`). ALGORITHM: INPUT: ``msg``: string of characters that appear in ``symbols`` (the plaintext) ``key``: a string of characters that appear in ``symbols`` (the secret key) ``symbols``: a string of letters defining the alphabet OUTPUT: ``ct``: string of characters (the ciphertext message) STEPS: 0. Number the letters of the alphabet from 0, ..., N 1. Compute from the string ``key`` a list ``L1`` of corresponding integers. Let ``n1 = len(L1)``. 2. Compute from the string ``msg`` a list ``L2`` of corresponding integers. Let ``n2 = len(L2)``. 3. Break ``L2`` up sequentially into sublists of size ``n1``; the last sublist may be smaller than ``n1`` 4. For each of these sublists ``L`` of ``L2``, compute a new list ``C`` given by ``C[i] = L[i] + L1[i] (mod N)`` to the ``i``-th element in the sublist, for each ``i``. 5. Assemble these lists ``C`` by concatenation into a new list of length ``n2``. 6. Compute from the new list a string ``ct`` of corresponding letters. Once it is known that the key is, say, `n` characters long, frequency analysis can be applied to every `n`-th letter of the ciphertext to determine the plaintext. This method is called *Kasiski examination* (although it was first discovered by Babbage). If they key is as long as the message and is comprised of randomly selected characters -- a one-time pad -- the message is theoretically unbreakable. The cipher Vigenere actually discovered is an "auto-key" cipher described as follows. ALGORITHM: INPUT: ``key``: a string of letters (the secret key) ``msg``: string of letters (the plaintext message) OUTPUT: ``ct``: string of upper-case letters (the ciphertext message) STEPS: 0. Number the letters of the alphabet from 0, ..., N 1. Compute from the string ``msg`` a list ``L2`` of corresponding integers. Let ``n2 = len(L2)``. 2. Let ``n1`` be the length of the key. Append to the string ``key`` the first ``n2 - n1`` characters of the plaintext message. Compute from this string (also of length ``n2``) a list ``L1`` of integers corresponding to the letter numbers in the first step. 3. Compute a new list ``C`` given by ``C[i] = L1[i] + L2[i] (mod N)``. 4. Compute from the new list a string ``ct`` of letters corresponding to the new integers. To decipher the auto-key ciphertext, the key is used to decipher the first ``n1`` characters and then those characters become the key to decipher the next ``n1`` characters, etc...: >>> m = AZ('go navy, beat army! yes you can'); m 'GONAVYBEATARMYYESYOUCAN' >>> key = AZ('gold bug'); n1 = len(key); n2 = len(m) >>> auto_key = key + m[:n2 - n1]; auto_key 'GOLDBUGGONAVYBEATARMYYE' >>> ct = encipher_vigenere(m, auto_key); ct 'MCYDWSHKOGAMKZCELYFGAYR' >>> n1 = len(key) >>> pt = [] >>> while ct: ... part, ct = ct[:n1], ct[n1:] ... pt.append(decipher_vigenere(part, key)) ... key = pt[-1] ... >>> ''.join(pt) == m True References ========== .. [1] https://en.wikipedia.org/wiki/Vigenere_cipher .. [2] https://web.archive.org/web/20071116100808/https://filebox.vt.edu/users/batman/kryptos.html (short URL: https://goo.gl/ijr22d) """ msg, key, A = _prep(msg, key, symbols) map = {c: i for i, c in enumerate(A)} key = [map[c] for c in key] N = len(map) k = len(key) rv = [] for i, m in enumerate(msg): rv.append(A[(map[m] + key[i % k]) % N]) rv = ''.join(rv) return rv
encipher_vigenere
sympy
52
sympy/physics/secondquant.py
def evaluate_deltas(e): """ We evaluate KroneckerDelta symbols in the expression assuming Einstein summation. Explanation =========== If one index is repeated it is summed over and in effect substituted with the other one. If both indices are repeated we substitute according to what is the preferred index. this is determined by KroneckerDelta.preferred_index and KroneckerDelta.killable_index. In case there are no possible substitutions or if a substitution would imply a loss of information, nothing is done. In case an index appears in more than one KroneckerDelta, the resulting substitution depends on the order of the factors. Since the ordering is platform dependent, the literal expression resulting from this function may be hard to predict. Examples ======== We assume the following: >>> from sympy import symbols, Function, Dummy, KroneckerDelta >>> from sympy.physics.secondquant import evaluate_deltas >>> i,j = symbols('i j', below_fermi=True, cls=Dummy) >>> a,b = symbols('a b', above_fermi=True, cls=Dummy) >>> p,q = symbols('p q', cls=Dummy) >>> f = Function('f') >>> t = Function('t') The order of preference for these indices according to KroneckerDelta is (a, b, i, j, p, q). Trivial cases: >>> evaluate_deltas(KroneckerDelta(i,j)*f(i)) # d_ij f(i) -> f(j) f(_j) >>> evaluate_deltas(KroneckerDelta(i,j)*f(j)) # d_ij f(j) -> f(i) f(_i) >>> evaluate_deltas(KroneckerDelta(i,p)*f(p)) # d_ip f(p) -> f(i) f(_i) >>> evaluate_deltas(KroneckerDelta(q,p)*f(p)) # d_qp f(p) -> f(q) f(_q) >>> evaluate_deltas(KroneckerDelta(q,p)*f(q)) # d_qp f(q) -> f(p) f(_p) More interesting cases: >>> evaluate_deltas(KroneckerDelta(i,p)*t(a,i)*f(p,q)) f(_i, _q)*t(_a, _i) >>> evaluate_deltas(KroneckerDelta(a,p)*t(a,i)*f(p,q)) f(_a, _q)*t(_a, _i) >>> evaluate_deltas(KroneckerDelta(p,q)*f(p,q)) f(_p, _p) Finally, here are some cases where nothing is done, because that would imply a loss of information: >>> evaluate_deltas(KroneckerDelta(i,p)*f(q)) f(_q)*KroneckerDelta(_i, _p) >>> evaluate_deltas(KroneckerDelta(i,p)*f(i)) f(_i)*KroneckerDelta(_i, _p) """
/usr/src/app/target_test_cases/failed_tests_evaluate_deltas.txt
def evaluate_deltas(e): """ We evaluate KroneckerDelta symbols in the expression assuming Einstein summation. Explanation =========== If one index is repeated it is summed over and in effect substituted with the other one. If both indices are repeated we substitute according to what is the preferred index. this is determined by KroneckerDelta.preferred_index and KroneckerDelta.killable_index. In case there are no possible substitutions or if a substitution would imply a loss of information, nothing is done. In case an index appears in more than one KroneckerDelta, the resulting substitution depends on the order of the factors. Since the ordering is platform dependent, the literal expression resulting from this function may be hard to predict. Examples ======== We assume the following: >>> from sympy import symbols, Function, Dummy, KroneckerDelta >>> from sympy.physics.secondquant import evaluate_deltas >>> i,j = symbols('i j', below_fermi=True, cls=Dummy) >>> a,b = symbols('a b', above_fermi=True, cls=Dummy) >>> p,q = symbols('p q', cls=Dummy) >>> f = Function('f') >>> t = Function('t') The order of preference for these indices according to KroneckerDelta is (a, b, i, j, p, q). Trivial cases: >>> evaluate_deltas(KroneckerDelta(i,j)*f(i)) # d_ij f(i) -> f(j) f(_j) >>> evaluate_deltas(KroneckerDelta(i,j)*f(j)) # d_ij f(j) -> f(i) f(_i) >>> evaluate_deltas(KroneckerDelta(i,p)*f(p)) # d_ip f(p) -> f(i) f(_i) >>> evaluate_deltas(KroneckerDelta(q,p)*f(p)) # d_qp f(p) -> f(q) f(_q) >>> evaluate_deltas(KroneckerDelta(q,p)*f(q)) # d_qp f(q) -> f(p) f(_p) More interesting cases: >>> evaluate_deltas(KroneckerDelta(i,p)*t(a,i)*f(p,q)) f(_i, _q)*t(_a, _i) >>> evaluate_deltas(KroneckerDelta(a,p)*t(a,i)*f(p,q)) f(_a, _q)*t(_a, _i) >>> evaluate_deltas(KroneckerDelta(p,q)*f(p,q)) f(_p, _p) Finally, here are some cases where nothing is done, because that would imply a loss of information: >>> evaluate_deltas(KroneckerDelta(i,p)*f(q)) f(_q)*KroneckerDelta(_i, _p) >>> evaluate_deltas(KroneckerDelta(i,p)*f(i)) f(_i)*KroneckerDelta(_i, _p) """ # We treat Deltas only in mul objects # for general function objects we don't evaluate KroneckerDeltas in arguments, # but here we hard code exceptions to this rule accepted_functions = ( Add, ) if isinstance(e, accepted_functions): return e.func(*[evaluate_deltas(arg) for arg in e.args]) elif isinstance(e, Mul): # find all occurrences of delta function and count each index present in # expression. deltas = [] indices = {} for i in e.args: for s in i.free_symbols: if s in indices: indices[s] += 1 else: indices[s] = 0 # geek counting simplifies logic below if isinstance(i, KroneckerDelta): deltas.append(i) for d in deltas: # If we do something, and there are more deltas, we should recurse # to treat the resulting expression properly if d.killable_index.is_Symbol and indices[d.killable_index]: e = e.subs(d.killable_index, d.preferred_index) if len(deltas) > 1: return evaluate_deltas(e) elif (d.preferred_index.is_Symbol and indices[d.preferred_index] and d.indices_contain_equal_information): e = e.subs(d.preferred_index, d.killable_index) if len(deltas) > 1: return evaluate_deltas(e) else: pass return e # nothing to do, maybe we hit a Symbol or a number else: return e
evaluate_deltas
sympy
53
sympy/core/function.py
def expand_power_base(expr, deep=True, force=False): """ Wrapper around expand that only uses the power_base hint. A wrapper to expand(power_base=True) which separates a power with a base that is a Mul into a product of powers, without performing any other expansions, provided that assumptions about the power's base and exponent allow. deep=False (default is True) will only apply to the top-level expression. force=True (default is False) will cause the expansion to ignore assumptions about the base and exponent. When False, the expansion will only happen if the base is non-negative or the exponent is an integer. >>> from sympy.abc import x, y, z >>> from sympy import expand_power_base, sin, cos, exp, Symbol >>> (x*y)**2 x**2*y**2 >>> (2*x)**y (2*x)**y >>> expand_power_base(_) 2**y*x**y >>> expand_power_base((x*y)**z) (x*y)**z >>> expand_power_base((x*y)**z, force=True) x**z*y**z >>> expand_power_base(sin((x*y)**z), deep=False) sin((x*y)**z) >>> expand_power_base(sin((x*y)**z), force=True) sin(x**z*y**z) >>> expand_power_base((2*sin(x))**y + (2*cos(x))**y) 2**y*sin(x)**y + 2**y*cos(x)**y >>> expand_power_base((2*exp(y))**x) 2**x*exp(y)**x >>> expand_power_base((2*cos(x))**y) 2**y*cos(x)**y Notice that sums are left untouched. If this is not the desired behavior, apply full ``expand()`` to the expression: >>> expand_power_base(((x+y)*z)**2) z**2*(x + y)**2 >>> (((x+y)*z)**2).expand() x**2*z**2 + 2*x*y*z**2 + y**2*z**2 >>> expand_power_base((2*y)**(1+z)) 2**(z + 1)*y**(z + 1) >>> ((2*y)**(1+z)).expand() 2*2**z*y**(z + 1) The power that is unexpanded can be expanded safely when ``y != 0``, otherwise different values might be obtained for the expression: >>> prev = _ If we indicate that ``y`` is positive but then replace it with a value of 0 after expansion, the expression becomes 0: >>> p = Symbol('p', positive=True) >>> prev.subs(y, p).expand().subs(p, 0) 0 But if ``z = -1`` the expression would not be zero: >>> prev.subs(y, 0).subs(z, -1) 1 See Also ======== expand """
/usr/src/app/target_test_cases/failed_tests_expand_power_base.txt
def expand_power_base(expr, deep=True, force=False): """ Wrapper around expand that only uses the power_base hint. A wrapper to expand(power_base=True) which separates a power with a base that is a Mul into a product of powers, without performing any other expansions, provided that assumptions about the power's base and exponent allow. deep=False (default is True) will only apply to the top-level expression. force=True (default is False) will cause the expansion to ignore assumptions about the base and exponent. When False, the expansion will only happen if the base is non-negative or the exponent is an integer. >>> from sympy.abc import x, y, z >>> from sympy import expand_power_base, sin, cos, exp, Symbol >>> (x*y)**2 x**2*y**2 >>> (2*x)**y (2*x)**y >>> expand_power_base(_) 2**y*x**y >>> expand_power_base((x*y)**z) (x*y)**z >>> expand_power_base((x*y)**z, force=True) x**z*y**z >>> expand_power_base(sin((x*y)**z), deep=False) sin((x*y)**z) >>> expand_power_base(sin((x*y)**z), force=True) sin(x**z*y**z) >>> expand_power_base((2*sin(x))**y + (2*cos(x))**y) 2**y*sin(x)**y + 2**y*cos(x)**y >>> expand_power_base((2*exp(y))**x) 2**x*exp(y)**x >>> expand_power_base((2*cos(x))**y) 2**y*cos(x)**y Notice that sums are left untouched. If this is not the desired behavior, apply full ``expand()`` to the expression: >>> expand_power_base(((x+y)*z)**2) z**2*(x + y)**2 >>> (((x+y)*z)**2).expand() x**2*z**2 + 2*x*y*z**2 + y**2*z**2 >>> expand_power_base((2*y)**(1+z)) 2**(z + 1)*y**(z + 1) >>> ((2*y)**(1+z)).expand() 2*2**z*y**(z + 1) The power that is unexpanded can be expanded safely when ``y != 0``, otherwise different values might be obtained for the expression: >>> prev = _ If we indicate that ``y`` is positive but then replace it with a value of 0 after expansion, the expression becomes 0: >>> p = Symbol('p', positive=True) >>> prev.subs(y, p).expand().subs(p, 0) 0 But if ``z = -1`` the expression would not be zero: >>> prev.subs(y, 0).subs(z, -1) 1 See Also ======== expand """ return sympify(expr).expand(deep=deep, log=False, mul=False, power_exp=False, power_base=True, multinomial=False, basic=False, force=force)
expand_power_base
sympy
54
sympy/printing/codeprinter.py
def fcode(expr, assign_to=None, **settings): """Converts an expr to a string of fortran code Parameters ========== expr : Expr A SymPy expression to be converted. assign_to : optional When given, the argument is used as the name of the variable to which the expression is assigned. Can be a string, ``Symbol``, ``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of line-wrapping, or for expressions that generate multi-line statements. precision : integer, optional DEPRECATED. Use type_mappings instead. The precision for numbers such as pi [default=17]. user_functions : dict, optional A dictionary where keys are ``FunctionClass`` instances and values are their string representations. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, cfunction_string)]. See below for examples. human : bool, optional If True, the result is a single string that may contain some constant declarations for the number symbols. If False, the same information is returned in a tuple of (symbols_to_declare, not_supported_functions, code_text). [default=True]. contract: bool, optional If True, ``Indexed`` instances are assumed to obey tensor contraction rules and the corresponding nested loops over indices are generated. Setting contract=False will not generate loops, instead the user is responsible to provide values for the indices in the code. [default=True]. source_format : optional The source format can be either 'fixed' or 'free'. [default='fixed'] standard : integer, optional The Fortran standard to be followed. This is specified as an integer. Acceptable standards are 66, 77, 90, 95, 2003, and 2008. Default is 77. Note that currently the only distinction internally is between standards before 95, and those 95 and after. This may change later as more features are added. name_mangling : bool, optional If True, then the variables that would become identical in case-insensitive Fortran are mangled by appending different number of ``_`` at the end. If False, SymPy Will not interfere with naming of variables. [default=True] Examples ======== >>> from sympy import fcode, symbols, Rational, sin, ceiling, floor >>> x, tau = symbols("x, tau") >>> fcode((2*tau)**Rational(7, 2)) ' 8*sqrt(2.0d0)*tau**(7.0d0/2.0d0)' >>> fcode(sin(x), assign_to="s") ' s = sin(x)' Custom printing can be defined for certain types by passing a dictionary of "type" : "function" to the ``user_functions`` kwarg. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, cfunction_string)]. >>> custom_functions = { ... "ceiling": "CEIL", ... "floor": [(lambda x: not x.is_integer, "FLOOR1"), ... (lambda x: x.is_integer, "FLOOR2")] ... } >>> fcode(floor(x) + ceiling(x), user_functions=custom_functions) ' CEIL(x) + FLOOR1(x)' ``Piecewise`` expressions are converted into conditionals. If an ``assign_to`` variable is provided an if statement is created, otherwise the ternary operator is used. Note that if the ``Piecewise`` lacks a default term, represented by ``(expr, True)`` then an error will be thrown. This is to prevent generating an expression that may not evaluate to anything. >>> from sympy import Piecewise >>> expr = Piecewise((x + 1, x > 0), (x, True)) >>> print(fcode(expr, tau)) if (x > 0) then tau = x + 1 else tau = x end if Support for loops is provided through ``Indexed`` types. With ``contract=True`` these expressions will be turned into loops, whereas ``contract=False`` will just print the assignment expression that should be looped over: >>> from sympy import Eq, IndexedBase, Idx >>> len_y = 5 >>> y = IndexedBase('y', shape=(len_y,)) >>> t = IndexedBase('t', shape=(len_y,)) >>> Dy = IndexedBase('Dy', shape=(len_y-1,)) >>> i = Idx('i', len_y-1) >>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i])) >>> fcode(e.rhs, assign_to=e.lhs, contract=False) ' Dy(i) = (y(i + 1) - y(i))/(t(i + 1) - t(i))' Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions must be provided to ``assign_to``. Note that any expression that can be generated normally can also exist inside a Matrix: >>> from sympy import Matrix, MatrixSymbol >>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)]) >>> A = MatrixSymbol('A', 3, 1) >>> print(fcode(mat, A)) A(1, 1) = x**2 if (x > 0) then A(2, 1) = x + 1 else A(2, 1) = x end if A(3, 1) = sin(x) """
/usr/src/app/target_test_cases/failed_tests_fcode.txt
def fcode(expr, assign_to=None, **settings): """Converts an expr to a string of fortran code Parameters ========== expr : Expr A SymPy expression to be converted. assign_to : optional When given, the argument is used as the name of the variable to which the expression is assigned. Can be a string, ``Symbol``, ``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of line-wrapping, or for expressions that generate multi-line statements. precision : integer, optional DEPRECATED. Use type_mappings instead. The precision for numbers such as pi [default=17]. user_functions : dict, optional A dictionary where keys are ``FunctionClass`` instances and values are their string representations. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, cfunction_string)]. See below for examples. human : bool, optional If True, the result is a single string that may contain some constant declarations for the number symbols. If False, the same information is returned in a tuple of (symbols_to_declare, not_supported_functions, code_text). [default=True]. contract: bool, optional If True, ``Indexed`` instances are assumed to obey tensor contraction rules and the corresponding nested loops over indices are generated. Setting contract=False will not generate loops, instead the user is responsible to provide values for the indices in the code. [default=True]. source_format : optional The source format can be either 'fixed' or 'free'. [default='fixed'] standard : integer, optional The Fortran standard to be followed. This is specified as an integer. Acceptable standards are 66, 77, 90, 95, 2003, and 2008. Default is 77. Note that currently the only distinction internally is between standards before 95, and those 95 and after. This may change later as more features are added. name_mangling : bool, optional If True, then the variables that would become identical in case-insensitive Fortran are mangled by appending different number of ``_`` at the end. If False, SymPy Will not interfere with naming of variables. [default=True] Examples ======== >>> from sympy import fcode, symbols, Rational, sin, ceiling, floor >>> x, tau = symbols("x, tau") >>> fcode((2*tau)**Rational(7, 2)) ' 8*sqrt(2.0d0)*tau**(7.0d0/2.0d0)' >>> fcode(sin(x), assign_to="s") ' s = sin(x)' Custom printing can be defined for certain types by passing a dictionary of "type" : "function" to the ``user_functions`` kwarg. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, cfunction_string)]. >>> custom_functions = { ... "ceiling": "CEIL", ... "floor": [(lambda x: not x.is_integer, "FLOOR1"), ... (lambda x: x.is_integer, "FLOOR2")] ... } >>> fcode(floor(x) + ceiling(x), user_functions=custom_functions) ' CEIL(x) + FLOOR1(x)' ``Piecewise`` expressions are converted into conditionals. If an ``assign_to`` variable is provided an if statement is created, otherwise the ternary operator is used. Note that if the ``Piecewise`` lacks a default term, represented by ``(expr, True)`` then an error will be thrown. This is to prevent generating an expression that may not evaluate to anything. >>> from sympy import Piecewise >>> expr = Piecewise((x + 1, x > 0), (x, True)) >>> print(fcode(expr, tau)) if (x > 0) then tau = x + 1 else tau = x end if Support for loops is provided through ``Indexed`` types. With ``contract=True`` these expressions will be turned into loops, whereas ``contract=False`` will just print the assignment expression that should be looped over: >>> from sympy import Eq, IndexedBase, Idx >>> len_y = 5 >>> y = IndexedBase('y', shape=(len_y,)) >>> t = IndexedBase('t', shape=(len_y,)) >>> Dy = IndexedBase('Dy', shape=(len_y-1,)) >>> i = Idx('i', len_y-1) >>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i])) >>> fcode(e.rhs, assign_to=e.lhs, contract=False) ' Dy(i) = (y(i + 1) - y(i))/(t(i + 1) - t(i))' Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions must be provided to ``assign_to``. Note that any expression that can be generated normally can also exist inside a Matrix: >>> from sympy import Matrix, MatrixSymbol >>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)]) >>> A = MatrixSymbol('A', 3, 1) >>> print(fcode(mat, A)) A(1, 1) = x**2 if (x > 0) then A(2, 1) = x + 1 else A(2, 1) = x end if A(3, 1) = sin(x) """ from sympy.printing.fortran import FCodePrinter return FCodePrinter(settings).doprint(expr, assign_to)
fcode
sympy
55
sympy/calculus/finite_diff.py
def finite_diff_weights(order, x_list, x0=S.One): """ Calculates the finite difference weights for an arbitrarily spaced one-dimensional grid (``x_list``) for derivatives at ``x0`` of order 0, 1, ..., up to ``order`` using a recursive formula. Order of accuracy is at least ``len(x_list) - order``, if ``x_list`` is defined correctly. Parameters ========== order: int Up to what derivative order weights should be calculated. 0 corresponds to interpolation. x_list: sequence Sequence of (unique) values for the independent variable. It is useful (but not necessary) to order ``x_list`` from nearest to furthest from ``x0``; see examples below. x0: Number or Symbol Root or value of the independent variable for which the finite difference weights should be generated. Default is ``S.One``. Returns ======= list A list of sublists, each corresponding to coefficients for increasing derivative order, and each containing lists of coefficients for increasing subsets of x_list. Examples ======== >>> from sympy import finite_diff_weights, S >>> res = finite_diff_weights(1, [-S(1)/2, S(1)/2, S(3)/2, S(5)/2], 0) >>> res [[[1, 0, 0, 0], [1/2, 1/2, 0, 0], [3/8, 3/4, -1/8, 0], [5/16, 15/16, -5/16, 1/16]], [[0, 0, 0, 0], [-1, 1, 0, 0], [-1, 1, 0, 0], [-23/24, 7/8, 1/8, -1/24]]] >>> res[0][-1] # FD weights for 0th derivative, using full x_list [5/16, 15/16, -5/16, 1/16] >>> res[1][-1] # FD weights for 1st derivative [-23/24, 7/8, 1/8, -1/24] >>> res[1][-2] # FD weights for 1st derivative, using x_list[:-1] [-1, 1, 0, 0] >>> res[1][-1][0] # FD weight for 1st deriv. for x_list[0] -23/24 >>> res[1][-1][1] # FD weight for 1st deriv. for x_list[1], etc. 7/8 Each sublist contains the most accurate formula at the end. Note, that in the above example ``res[1][1]`` is the same as ``res[1][2]``. Since res[1][2] has an order of accuracy of ``len(x_list[:3]) - order = 3 - 1 = 2``, the same is true for ``res[1][1]``! >>> res = finite_diff_weights(1, [S(0), S(1), -S(1), S(2), -S(2)], 0)[1] >>> res [[0, 0, 0, 0, 0], [-1, 1, 0, 0, 0], [0, 1/2, -1/2, 0, 0], [-1/2, 1, -1/3, -1/6, 0], [0, 2/3, -2/3, -1/12, 1/12]] >>> res[0] # no approximation possible, using x_list[0] only [0, 0, 0, 0, 0] >>> res[1] # classic forward step approximation [-1, 1, 0, 0, 0] >>> res[2] # classic centered approximation [0, 1/2, -1/2, 0, 0] >>> res[3:] # higher order approximations [[-1/2, 1, -1/3, -1/6, 0], [0, 2/3, -2/3, -1/12, 1/12]] Let us compare this to a differently defined ``x_list``. Pay attention to ``foo[i][k]`` corresponding to the gridpoint defined by ``x_list[k]``. >>> foo = finite_diff_weights(1, [-S(2), -S(1), S(0), S(1), S(2)], 0)[1] >>> foo [[0, 0, 0, 0, 0], [-1, 1, 0, 0, 0], [1/2, -2, 3/2, 0, 0], [1/6, -1, 1/2, 1/3, 0], [1/12, -2/3, 0, 2/3, -1/12]] >>> foo[1] # not the same and of lower accuracy as res[1]! [-1, 1, 0, 0, 0] >>> foo[2] # classic double backward step approximation [1/2, -2, 3/2, 0, 0] >>> foo[4] # the same as res[4] [1/12, -2/3, 0, 2/3, -1/12] Note that, unless you plan on using approximations based on subsets of ``x_list``, the order of gridpoints does not matter. The capability to generate weights at arbitrary points can be used e.g. to minimize Runge's phenomenon by using Chebyshev nodes: >>> from sympy import cos, symbols, pi, simplify >>> N, (h, x) = 4, symbols('h x') >>> x_list = [x+h*cos(i*pi/(N)) for i in range(N,-1,-1)] # chebyshev nodes >>> print(x_list) [-h + x, -sqrt(2)*h/2 + x, x, sqrt(2)*h/2 + x, h + x] >>> mycoeffs = finite_diff_weights(1, x_list, 0)[1][4] >>> [simplify(c) for c in mycoeffs] #doctest: +NORMALIZE_WHITESPACE [(h**3/2 + h**2*x - 3*h*x**2 - 4*x**3)/h**4, (-sqrt(2)*h**3 - 4*h**2*x + 3*sqrt(2)*h*x**2 + 8*x**3)/h**4, (6*h**2*x - 8*x**3)/h**4, (sqrt(2)*h**3 - 4*h**2*x - 3*sqrt(2)*h*x**2 + 8*x**3)/h**4, (-h**3/2 + h**2*x + 3*h*x**2 - 4*x**3)/h**4] Notes ===== If weights for a finite difference approximation of 3rd order derivative is wanted, weights for 0th, 1st and 2nd order are calculated "for free", so are formulae using subsets of ``x_list``. This is something one can take advantage of to save computational cost. Be aware that one should define ``x_list`` from nearest to furthest from ``x0``. If not, subsets of ``x_list`` will yield poorer approximations, which might not grand an order of accuracy of ``len(x_list) - order``. See also ======== sympy.calculus.finite_diff.apply_finite_diff References ========== .. [1] Generation of Finite Difference Formulas on Arbitrarily Spaced Grids, Bengt Fornberg; Mathematics of computation; 51; 184; (1988); 699-706; doi:10.1090/S0025-5718-1988-0935077-0 """
/usr/src/app/target_test_cases/failed_tests_finite_diff_weights.txt
def finite_diff_weights(order, x_list, x0=S.One): """ Calculates the finite difference weights for an arbitrarily spaced one-dimensional grid (``x_list``) for derivatives at ``x0`` of order 0, 1, ..., up to ``order`` using a recursive formula. Order of accuracy is at least ``len(x_list) - order``, if ``x_list`` is defined correctly. Parameters ========== order: int Up to what derivative order weights should be calculated. 0 corresponds to interpolation. x_list: sequence Sequence of (unique) values for the independent variable. It is useful (but not necessary) to order ``x_list`` from nearest to furthest from ``x0``; see examples below. x0: Number or Symbol Root or value of the independent variable for which the finite difference weights should be generated. Default is ``S.One``. Returns ======= list A list of sublists, each corresponding to coefficients for increasing derivative order, and each containing lists of coefficients for increasing subsets of x_list. Examples ======== >>> from sympy import finite_diff_weights, S >>> res = finite_diff_weights(1, [-S(1)/2, S(1)/2, S(3)/2, S(5)/2], 0) >>> res [[[1, 0, 0, 0], [1/2, 1/2, 0, 0], [3/8, 3/4, -1/8, 0], [5/16, 15/16, -5/16, 1/16]], [[0, 0, 0, 0], [-1, 1, 0, 0], [-1, 1, 0, 0], [-23/24, 7/8, 1/8, -1/24]]] >>> res[0][-1] # FD weights for 0th derivative, using full x_list [5/16, 15/16, -5/16, 1/16] >>> res[1][-1] # FD weights for 1st derivative [-23/24, 7/8, 1/8, -1/24] >>> res[1][-2] # FD weights for 1st derivative, using x_list[:-1] [-1, 1, 0, 0] >>> res[1][-1][0] # FD weight for 1st deriv. for x_list[0] -23/24 >>> res[1][-1][1] # FD weight for 1st deriv. for x_list[1], etc. 7/8 Each sublist contains the most accurate formula at the end. Note, that in the above example ``res[1][1]`` is the same as ``res[1][2]``. Since res[1][2] has an order of accuracy of ``len(x_list[:3]) - order = 3 - 1 = 2``, the same is true for ``res[1][1]``! >>> res = finite_diff_weights(1, [S(0), S(1), -S(1), S(2), -S(2)], 0)[1] >>> res [[0, 0, 0, 0, 0], [-1, 1, 0, 0, 0], [0, 1/2, -1/2, 0, 0], [-1/2, 1, -1/3, -1/6, 0], [0, 2/3, -2/3, -1/12, 1/12]] >>> res[0] # no approximation possible, using x_list[0] only [0, 0, 0, 0, 0] >>> res[1] # classic forward step approximation [-1, 1, 0, 0, 0] >>> res[2] # classic centered approximation [0, 1/2, -1/2, 0, 0] >>> res[3:] # higher order approximations [[-1/2, 1, -1/3, -1/6, 0], [0, 2/3, -2/3, -1/12, 1/12]] Let us compare this to a differently defined ``x_list``. Pay attention to ``foo[i][k]`` corresponding to the gridpoint defined by ``x_list[k]``. >>> foo = finite_diff_weights(1, [-S(2), -S(1), S(0), S(1), S(2)], 0)[1] >>> foo [[0, 0, 0, 0, 0], [-1, 1, 0, 0, 0], [1/2, -2, 3/2, 0, 0], [1/6, -1, 1/2, 1/3, 0], [1/12, -2/3, 0, 2/3, -1/12]] >>> foo[1] # not the same and of lower accuracy as res[1]! [-1, 1, 0, 0, 0] >>> foo[2] # classic double backward step approximation [1/2, -2, 3/2, 0, 0] >>> foo[4] # the same as res[4] [1/12, -2/3, 0, 2/3, -1/12] Note that, unless you plan on using approximations based on subsets of ``x_list``, the order of gridpoints does not matter. The capability to generate weights at arbitrary points can be used e.g. to minimize Runge's phenomenon by using Chebyshev nodes: >>> from sympy import cos, symbols, pi, simplify >>> N, (h, x) = 4, symbols('h x') >>> x_list = [x+h*cos(i*pi/(N)) for i in range(N,-1,-1)] # chebyshev nodes >>> print(x_list) [-h + x, -sqrt(2)*h/2 + x, x, sqrt(2)*h/2 + x, h + x] >>> mycoeffs = finite_diff_weights(1, x_list, 0)[1][4] >>> [simplify(c) for c in mycoeffs] #doctest: +NORMALIZE_WHITESPACE [(h**3/2 + h**2*x - 3*h*x**2 - 4*x**3)/h**4, (-sqrt(2)*h**3 - 4*h**2*x + 3*sqrt(2)*h*x**2 + 8*x**3)/h**4, (6*h**2*x - 8*x**3)/h**4, (sqrt(2)*h**3 - 4*h**2*x - 3*sqrt(2)*h*x**2 + 8*x**3)/h**4, (-h**3/2 + h**2*x + 3*h*x**2 - 4*x**3)/h**4] Notes ===== If weights for a finite difference approximation of 3rd order derivative is wanted, weights for 0th, 1st and 2nd order are calculated "for free", so are formulae using subsets of ``x_list``. This is something one can take advantage of to save computational cost. Be aware that one should define ``x_list`` from nearest to furthest from ``x0``. If not, subsets of ``x_list`` will yield poorer approximations, which might not grand an order of accuracy of ``len(x_list) - order``. See also ======== sympy.calculus.finite_diff.apply_finite_diff References ========== .. [1] Generation of Finite Difference Formulas on Arbitrarily Spaced Grids, Bengt Fornberg; Mathematics of computation; 51; 184; (1988); 699-706; doi:10.1090/S0025-5718-1988-0935077-0 """ # The notation below closely corresponds to the one used in the paper. order = S(order) if not order.is_number: raise ValueError("Cannot handle symbolic order.") if order < 0: raise ValueError("Negative derivative order illegal.") if int(order) != order: raise ValueError("Non-integer order illegal") M = order N = len(x_list) - 1 delta = [[[0 for nu in range(N+1)] for n in range(N+1)] for m in range(M+1)] delta[0][0][0] = S.One c1 = S.One for n in range(1, N+1): c2 = S.One for nu in range(n): c3 = x_list[n] - x_list[nu] c2 = c2 * c3 if n <= M: delta[n][n-1][nu] = 0 for m in range(min(n, M)+1): delta[m][n][nu] = (x_list[n]-x0)*delta[m][n-1][nu] -\ m*delta[m-1][n-1][nu] delta[m][n][nu] /= c3 for m in range(min(n, M)+1): delta[m][n][n] = c1/c2*(m*delta[m-1][n-1][n-1] - (x_list[n-1]-x0)*delta[m][n-1][n-1]) c1 = c2 return delta
finite_diff_weights
sympy
56
sympy/series/formal.py
def fps(f, x=None, x0=0, dir=1, hyper=True, order=4, rational=True, full=False): """ Generates Formal Power Series of ``f``. Explanation =========== Returns the formal series expansion of ``f`` around ``x = x0`` with respect to ``x`` in the form of a ``FormalPowerSeries`` object. Formal Power Series is represented using an explicit formula computed using different algorithms. See :func:`compute_fps` for the more details regarding the computation of formula. Parameters ========== x : Symbol, optional If x is None and ``f`` is univariate, the univariate symbols will be supplied, otherwise an error will be raised. x0 : number, optional Point to perform series expansion about. Default is 0. dir : {1, -1, '+', '-'}, optional If dir is 1 or '+' the series is calculated from the right and for -1 or '-' the series is calculated from the left. For smooth functions this flag will not alter the results. Default is 1. hyper : {True, False}, optional Set hyper to False to skip the hypergeometric algorithm. By default it is set to False. order : int, optional Order of the derivative of ``f``, Default is 4. rational : {True, False}, optional Set rational to False to skip rational algorithm. By default it is set to True. full : {True, False}, optional Set full to True to increase the range of rational algorithm. See :func:`rational_algorithm` for details. By default it is set to False. Examples ======== >>> from sympy import fps, ln, atan, sin >>> from sympy.abc import x, n Rational Functions >>> fps(ln(1 + x)).truncate() x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6) >>> fps(atan(x), full=True).truncate() x - x**3/3 + x**5/5 + O(x**6) Symbolic Functions >>> fps(x**n*sin(x**2), x).truncate(8) -x**(n + 6)/6 + x**(n + 2) + O(x**(n + 8)) See Also ======== sympy.series.formal.FormalPowerSeries sympy.series.formal.compute_fps """
/usr/src/app/target_test_cases/failed_tests_fps.txt
def fps(f, x=None, x0=0, dir=1, hyper=True, order=4, rational=True, full=False): """ Generates Formal Power Series of ``f``. Explanation =========== Returns the formal series expansion of ``f`` around ``x = x0`` with respect to ``x`` in the form of a ``FormalPowerSeries`` object. Formal Power Series is represented using an explicit formula computed using different algorithms. See :func:`compute_fps` for the more details regarding the computation of formula. Parameters ========== x : Symbol, optional If x is None and ``f`` is univariate, the univariate symbols will be supplied, otherwise an error will be raised. x0 : number, optional Point to perform series expansion about. Default is 0. dir : {1, -1, '+', '-'}, optional If dir is 1 or '+' the series is calculated from the right and for -1 or '-' the series is calculated from the left. For smooth functions this flag will not alter the results. Default is 1. hyper : {True, False}, optional Set hyper to False to skip the hypergeometric algorithm. By default it is set to False. order : int, optional Order of the derivative of ``f``, Default is 4. rational : {True, False}, optional Set rational to False to skip rational algorithm. By default it is set to True. full : {True, False}, optional Set full to True to increase the range of rational algorithm. See :func:`rational_algorithm` for details. By default it is set to False. Examples ======== >>> from sympy import fps, ln, atan, sin >>> from sympy.abc import x, n Rational Functions >>> fps(ln(1 + x)).truncate() x - x**2/2 + x**3/3 - x**4/4 + x**5/5 + O(x**6) >>> fps(atan(x), full=True).truncate() x - x**3/3 + x**5/5 + O(x**6) Symbolic Functions >>> fps(x**n*sin(x**2), x).truncate(8) -x**(n + 6)/6 + x**(n + 2) + O(x**(n + 8)) See Also ======== sympy.series.formal.FormalPowerSeries sympy.series.formal.compute_fps """ f = sympify(f) if x is None: free = f.free_symbols if len(free) == 1: x = free.pop() elif not free: return f else: raise NotImplementedError("multivariate formal power series") result = compute_fps(f, x, x0, dir, hyper, order, rational, full) if result is None: return f return FormalPowerSeries(f, x, x0, dir, result)
fps
sympy
57
sympy/simplify/fu.py
def fu(rv, measure=lambda x: (L(x), x.count_ops())): """Attempt to simplify expression by using transformation rules given in the algorithm by Fu et al. :func:`fu` will try to minimize the objective function ``measure``. By default this first minimizes the number of trig terms and then minimizes the number of total operations. Examples ======== >>> from sympy.simplify.fu import fu >>> from sympy import cos, sin, tan, pi, S, sqrt >>> from sympy.abc import x, y, a, b >>> fu(sin(50)**2 + cos(50)**2 + sin(pi/6)) 3/2 >>> fu(sqrt(6)*cos(x) + sqrt(2)*sin(x)) 2*sqrt(2)*sin(x + pi/3) CTR1 example >>> eq = sin(x)**4 - cos(y)**2 + sin(y)**2 + 2*cos(x)**2 >>> fu(eq) cos(x)**4 - 2*cos(y)**2 + 2 CTR2 example >>> fu(S.Half - cos(2*x)/2) sin(x)**2 CTR3 example >>> fu(sin(a)*(cos(b) - sin(b)) + cos(a)*(sin(b) + cos(b))) sqrt(2)*sin(a + b + pi/4) CTR4 example >>> fu(sqrt(3)*cos(x)/2 + sin(x)/2) sin(x + pi/3) Example 1 >>> fu(1-sin(2*x)**2/4-sin(y)**2-cos(x)**4) -cos(x)**2 + cos(y)**2 Example 2 >>> fu(cos(4*pi/9)) sin(pi/18) >>> fu(cos(pi/9)*cos(2*pi/9)*cos(3*pi/9)*cos(4*pi/9)) 1/16 Example 3 >>> fu(tan(7*pi/18)+tan(5*pi/18)-sqrt(3)*tan(5*pi/18)*tan(7*pi/18)) -sqrt(3) Objective function example >>> fu(sin(x)/cos(x)) # default objective function tan(x) >>> fu(sin(x)/cos(x), measure=lambda x: -x.count_ops()) # maximize op count sin(x)/cos(x) References ========== .. [1] https://www.sciencedirect.com/science/article/pii/S0895717706001609 """
/usr/src/app/target_test_cases/failed_tests_fu.txt
def fu(rv, measure=lambda x: (L(x), x.count_ops())): """Attempt to simplify expression by using transformation rules given in the algorithm by Fu et al. :func:`fu` will try to minimize the objective function ``measure``. By default this first minimizes the number of trig terms and then minimizes the number of total operations. Examples ======== >>> from sympy.simplify.fu import fu >>> from sympy import cos, sin, tan, pi, S, sqrt >>> from sympy.abc import x, y, a, b >>> fu(sin(50)**2 + cos(50)**2 + sin(pi/6)) 3/2 >>> fu(sqrt(6)*cos(x) + sqrt(2)*sin(x)) 2*sqrt(2)*sin(x + pi/3) CTR1 example >>> eq = sin(x)**4 - cos(y)**2 + sin(y)**2 + 2*cos(x)**2 >>> fu(eq) cos(x)**4 - 2*cos(y)**2 + 2 CTR2 example >>> fu(S.Half - cos(2*x)/2) sin(x)**2 CTR3 example >>> fu(sin(a)*(cos(b) - sin(b)) + cos(a)*(sin(b) + cos(b))) sqrt(2)*sin(a + b + pi/4) CTR4 example >>> fu(sqrt(3)*cos(x)/2 + sin(x)/2) sin(x + pi/3) Example 1 >>> fu(1-sin(2*x)**2/4-sin(y)**2-cos(x)**4) -cos(x)**2 + cos(y)**2 Example 2 >>> fu(cos(4*pi/9)) sin(pi/18) >>> fu(cos(pi/9)*cos(2*pi/9)*cos(3*pi/9)*cos(4*pi/9)) 1/16 Example 3 >>> fu(tan(7*pi/18)+tan(5*pi/18)-sqrt(3)*tan(5*pi/18)*tan(7*pi/18)) -sqrt(3) Objective function example >>> fu(sin(x)/cos(x)) # default objective function tan(x) >>> fu(sin(x)/cos(x), measure=lambda x: -x.count_ops()) # maximize op count sin(x)/cos(x) References ========== .. [1] https://www.sciencedirect.com/science/article/pii/S0895717706001609 """ fRL1 = greedy(RL1, measure) fRL2 = greedy(RL2, measure) was = rv rv = sympify(rv) if not isinstance(rv, Expr): return rv.func(*[fu(a, measure=measure) for a in rv.args]) rv = TR1(rv) if rv.has(tan, cot): rv1 = fRL1(rv) if (measure(rv1) < measure(rv)): rv = rv1 if rv.has(tan, cot): rv = TR2(rv) if rv.has(sin, cos): rv1 = fRL2(rv) rv2 = TR8(TRmorrie(rv1)) rv = min([was, rv, rv1, rv2], key=measure) return min(TR2i(rv), rv, key=measure)
fu
sympy
58
sympy/utilities/iterables.py
def generate_bell(n): """Return permutations of [0, 1, ..., n - 1] such that each permutation differs from the last by the exchange of a single pair of neighbors. The ``n!`` permutations are returned as an iterator. In order to obtain the next permutation from a random starting permutation, use the ``next_trotterjohnson`` method of the Permutation class (which generates the same sequence in a different manner). Examples ======== >>> from itertools import permutations >>> from sympy.utilities.iterables import generate_bell >>> from sympy import zeros, Matrix This is the sort of permutation used in the ringing of physical bells, and does not produce permutations in lexicographical order. Rather, the permutations differ from each other by exactly one inversion, and the position at which the swapping occurs varies periodically in a simple fashion. Consider the first few permutations of 4 elements generated by ``permutations`` and ``generate_bell``: >>> list(permutations(range(4)))[:5] [(0, 1, 2, 3), (0, 1, 3, 2), (0, 2, 1, 3), (0, 2, 3, 1), (0, 3, 1, 2)] >>> list(generate_bell(4))[:5] [(0, 1, 2, 3), (0, 1, 3, 2), (0, 3, 1, 2), (3, 0, 1, 2), (3, 0, 2, 1)] Notice how the 2nd and 3rd lexicographical permutations have 3 elements out of place whereas each "bell" permutation always has only two elements out of place relative to the previous permutation (and so the signature (+/-1) of a permutation is opposite of the signature of the previous permutation). How the position of inversion varies across the elements can be seen by tracing out where the largest number appears in the permutations: >>> m = zeros(4, 24) >>> for i, p in enumerate(generate_bell(4)): ... m[:, i] = Matrix([j - 3 for j in list(p)]) # make largest zero >>> m.print_nonzero('X') [XXX XXXXXX XXXXXX XXX] [XX XX XXXX XX XXXX XX XX] [X XXXX XX XXXX XX XXXX X] [ XXXXXX XXXXXX XXXXXX ] See Also ======== sympy.combinatorics.permutations.Permutation.next_trotterjohnson References ========== .. [1] https://en.wikipedia.org/wiki/Method_ringing .. [2] https://stackoverflow.com/questions/4856615/recursive-permutation/4857018 .. [3] https://web.archive.org/web/20160313023044/http://programminggeeks.com/bell-algorithm-for-permutation/ .. [4] https://en.wikipedia.org/wiki/Steinhaus%E2%80%93Johnson%E2%80%93Trotter_algorithm .. [5] Generating involutions, derangements, and relatives by ECO Vincent Vajnovszki, DMTCS vol 1 issue 12, 2010 """
/usr/src/app/target_test_cases/failed_tests_generate_bell.txt
def generate_bell(n): """Return permutations of [0, 1, ..., n - 1] such that each permutation differs from the last by the exchange of a single pair of neighbors. The ``n!`` permutations are returned as an iterator. In order to obtain the next permutation from a random starting permutation, use the ``next_trotterjohnson`` method of the Permutation class (which generates the same sequence in a different manner). Examples ======== >>> from itertools import permutations >>> from sympy.utilities.iterables import generate_bell >>> from sympy import zeros, Matrix This is the sort of permutation used in the ringing of physical bells, and does not produce permutations in lexicographical order. Rather, the permutations differ from each other by exactly one inversion, and the position at which the swapping occurs varies periodically in a simple fashion. Consider the first few permutations of 4 elements generated by ``permutations`` and ``generate_bell``: >>> list(permutations(range(4)))[:5] [(0, 1, 2, 3), (0, 1, 3, 2), (0, 2, 1, 3), (0, 2, 3, 1), (0, 3, 1, 2)] >>> list(generate_bell(4))[:5] [(0, 1, 2, 3), (0, 1, 3, 2), (0, 3, 1, 2), (3, 0, 1, 2), (3, 0, 2, 1)] Notice how the 2nd and 3rd lexicographical permutations have 3 elements out of place whereas each "bell" permutation always has only two elements out of place relative to the previous permutation (and so the signature (+/-1) of a permutation is opposite of the signature of the previous permutation). How the position of inversion varies across the elements can be seen by tracing out where the largest number appears in the permutations: >>> m = zeros(4, 24) >>> for i, p in enumerate(generate_bell(4)): ... m[:, i] = Matrix([j - 3 for j in list(p)]) # make largest zero >>> m.print_nonzero('X') [XXX XXXXXX XXXXXX XXX] [XX XX XXXX XX XXXX XX XX] [X XXXX XX XXXX XX XXXX X] [ XXXXXX XXXXXX XXXXXX ] See Also ======== sympy.combinatorics.permutations.Permutation.next_trotterjohnson References ========== .. [1] https://en.wikipedia.org/wiki/Method_ringing .. [2] https://stackoverflow.com/questions/4856615/recursive-permutation/4857018 .. [3] https://web.archive.org/web/20160313023044/http://programminggeeks.com/bell-algorithm-for-permutation/ .. [4] https://en.wikipedia.org/wiki/Steinhaus%E2%80%93Johnson%E2%80%93Trotter_algorithm .. [5] Generating involutions, derangements, and relatives by ECO Vincent Vajnovszki, DMTCS vol 1 issue 12, 2010 """ n = as_int(n) if n < 1: raise ValueError('n must be a positive integer') if n == 1: yield (0,) elif n == 2: yield (0, 1) yield (1, 0) elif n == 3: yield from [(0, 1, 2), (0, 2, 1), (2, 0, 1), (2, 1, 0), (1, 2, 0), (1, 0, 2)] else: m = n - 1 op = [0] + [-1]*m l = list(range(n)) while True: yield tuple(l) # find biggest element with op big = None, -1 # idx, value for i in range(n): if op[i] and l[i] > big[1]: big = i, l[i] i, _ = big if i is None: break # there are no ops left # swap it with neighbor in the indicated direction j = i + op[i] l[i], l[j] = l[j], l[i] op[i], op[j] = op[j], op[i] # if it landed at the end or if the neighbor in the same # direction is bigger then turn off op if j == 0 or j == m or l[j + op[j]] > l[j]: op[j] = 0 # any element bigger to the left gets +1 op for i in range(j): if l[i] > l[j]: op[i] = 1 # any element bigger to the right gets -1 op for i in range(j + 1, n): if l[i] > l[j]: op[i] = -1
generate_bell
sympy
59
sympy/physics/quantum/identitysearch.py
def generate_gate_rules(gate_seq, return_as_muls=False): """Returns a set of gate rules. Each gate rules is represented as a 2-tuple of tuples or Muls. An empty tuple represents an arbitrary scalar value. This function uses the four operations (LL, LR, RL, RR) to generate the gate rules. A gate rule is an expression such as ABC = D or AB = CD, where A, B, C, and D are gates. Each value on either side of the equal sign represents a circuit. The four operations allow one to find a set of equivalent circuits from a gate identity. The letters denoting the operation tell the user what activities to perform on each expression. The first letter indicates which side of the equal sign to focus on. The second letter indicates which gate to focus on given the side. Once this information is determined, the inverse of the gate is multiplied on both circuits to create a new gate rule. For example, given the identity, ABCD = 1, a LL operation means look at the left value and multiply both left sides by the inverse of the leftmost gate A. If A is Hermitian, the inverse of A is still A. The resulting new rule is BCD = A. The following is a summary of the four operations. Assume that in the examples, all gates are Hermitian. LL : left circuit, left multiply ABCD = E -> AABCD = AE -> BCD = AE LR : left circuit, right multiply ABCD = E -> ABCDD = ED -> ABC = ED RL : right circuit, left multiply ABC = ED -> EABC = EED -> EABC = D RR : right circuit, right multiply AB = CD -> ABD = CDD -> ABD = C The number of gate rules generated is n*(n+1), where n is the number of gates in the sequence (unproven). Parameters ========== gate_seq : Gate tuple, Mul, or Number A variable length tuple or Mul of Gates whose product is equal to a scalar matrix return_as_muls : bool True to return a set of Muls; False to return a set of tuples Examples ======== Find the gate rules of the current circuit using tuples: >>> from sympy.physics.quantum.identitysearch import generate_gate_rules >>> from sympy.physics.quantum.gate import X, Y, Z >>> x = X(0); y = Y(0); z = Z(0) >>> generate_gate_rules((x, x)) {((X(0),), (X(0),)), ((X(0), X(0)), ())} >>> generate_gate_rules((x, y, z)) {((), (X(0), Z(0), Y(0))), ((), (Y(0), X(0), Z(0))), ((), (Z(0), Y(0), X(0))), ((X(0),), (Z(0), Y(0))), ((Y(0),), (X(0), Z(0))), ((Z(0),), (Y(0), X(0))), ((X(0), Y(0)), (Z(0),)), ((Y(0), Z(0)), (X(0),)), ((Z(0), X(0)), (Y(0),)), ((X(0), Y(0), Z(0)), ()), ((Y(0), Z(0), X(0)), ()), ((Z(0), X(0), Y(0)), ())} Find the gate rules of the current circuit using Muls: >>> generate_gate_rules(x*x, return_as_muls=True) {(1, 1)} >>> generate_gate_rules(x*y*z, return_as_muls=True) {(1, X(0)*Z(0)*Y(0)), (1, Y(0)*X(0)*Z(0)), (1, Z(0)*Y(0)*X(0)), (X(0)*Y(0), Z(0)), (Y(0)*Z(0), X(0)), (Z(0)*X(0), Y(0)), (X(0)*Y(0)*Z(0), 1), (Y(0)*Z(0)*X(0), 1), (Z(0)*X(0)*Y(0), 1), (X(0), Z(0)*Y(0)), (Y(0), X(0)*Z(0)), (Z(0), Y(0)*X(0))} """
/usr/src/app/target_test_cases/failed_tests_generate_gate_rules.txt
def generate_gate_rules(gate_seq, return_as_muls=False): """Returns a set of gate rules. Each gate rules is represented as a 2-tuple of tuples or Muls. An empty tuple represents an arbitrary scalar value. This function uses the four operations (LL, LR, RL, RR) to generate the gate rules. A gate rule is an expression such as ABC = D or AB = CD, where A, B, C, and D are gates. Each value on either side of the equal sign represents a circuit. The four operations allow one to find a set of equivalent circuits from a gate identity. The letters denoting the operation tell the user what activities to perform on each expression. The first letter indicates which side of the equal sign to focus on. The second letter indicates which gate to focus on given the side. Once this information is determined, the inverse of the gate is multiplied on both circuits to create a new gate rule. For example, given the identity, ABCD = 1, a LL operation means look at the left value and multiply both left sides by the inverse of the leftmost gate A. If A is Hermitian, the inverse of A is still A. The resulting new rule is BCD = A. The following is a summary of the four operations. Assume that in the examples, all gates are Hermitian. LL : left circuit, left multiply ABCD = E -> AABCD = AE -> BCD = AE LR : left circuit, right multiply ABCD = E -> ABCDD = ED -> ABC = ED RL : right circuit, left multiply ABC = ED -> EABC = EED -> EABC = D RR : right circuit, right multiply AB = CD -> ABD = CDD -> ABD = C The number of gate rules generated is n*(n+1), where n is the number of gates in the sequence (unproven). Parameters ========== gate_seq : Gate tuple, Mul, or Number A variable length tuple or Mul of Gates whose product is equal to a scalar matrix return_as_muls : bool True to return a set of Muls; False to return a set of tuples Examples ======== Find the gate rules of the current circuit using tuples: >>> from sympy.physics.quantum.identitysearch import generate_gate_rules >>> from sympy.physics.quantum.gate import X, Y, Z >>> x = X(0); y = Y(0); z = Z(0) >>> generate_gate_rules((x, x)) {((X(0),), (X(0),)), ((X(0), X(0)), ())} >>> generate_gate_rules((x, y, z)) {((), (X(0), Z(0), Y(0))), ((), (Y(0), X(0), Z(0))), ((), (Z(0), Y(0), X(0))), ((X(0),), (Z(0), Y(0))), ((Y(0),), (X(0), Z(0))), ((Z(0),), (Y(0), X(0))), ((X(0), Y(0)), (Z(0),)), ((Y(0), Z(0)), (X(0),)), ((Z(0), X(0)), (Y(0),)), ((X(0), Y(0), Z(0)), ()), ((Y(0), Z(0), X(0)), ()), ((Z(0), X(0), Y(0)), ())} Find the gate rules of the current circuit using Muls: >>> generate_gate_rules(x*x, return_as_muls=True) {(1, 1)} >>> generate_gate_rules(x*y*z, return_as_muls=True) {(1, X(0)*Z(0)*Y(0)), (1, Y(0)*X(0)*Z(0)), (1, Z(0)*Y(0)*X(0)), (X(0)*Y(0), Z(0)), (Y(0)*Z(0), X(0)), (Z(0)*X(0), Y(0)), (X(0)*Y(0)*Z(0), 1), (Y(0)*Z(0)*X(0), 1), (Z(0)*X(0)*Y(0), 1), (X(0), Z(0)*Y(0)), (Y(0), X(0)*Z(0)), (Z(0), Y(0)*X(0))} """ if isinstance(gate_seq, Number): if return_as_muls: return {(S.One, S.One)} else: return {((), ())} elif isinstance(gate_seq, Mul): gate_seq = gate_seq.args # Each item in queue is a 3-tuple: # i) first item is the left side of an equality # ii) second item is the right side of an equality # iii) third item is the number of operations performed # The argument, gate_seq, will start on the left side, and # the right side will be empty, implying the presence of an # identity. queue = deque() # A set of gate rules rules = set() # Maximum number of operations to perform max_ops = len(gate_seq) def process_new_rule(new_rule, ops): if new_rule is not None: new_left, new_right = new_rule if new_rule not in rules and (new_right, new_left) not in rules: rules.add(new_rule) # If haven't reached the max limit on operations if ops + 1 < max_ops: queue.append(new_rule + (ops + 1,)) queue.append((gate_seq, (), 0)) rules.add((gate_seq, ())) while len(queue) > 0: left, right, ops = queue.popleft() # Do a LL new_rule = ll_op(left, right) process_new_rule(new_rule, ops) # Do a LR new_rule = lr_op(left, right) process_new_rule(new_rule, ops) # Do a RL new_rule = rl_op(left, right) process_new_rule(new_rule, ops) # Do a RR new_rule = rr_op(left, right) process_new_rule(new_rule, ops) if return_as_muls: # Convert each rule as tuples into a rule as muls mul_rules = set() for rule in rules: left, right = rule mul_rules.add((Mul(*left), Mul(*right))) rules = mul_rules return rules
generate_gate_rules
sympy
60
sympy/tensor/index_methods.py
def get_contraction_structure(expr): """Determine dummy indices of ``expr`` and describe its structure By *dummy* we mean indices that are summation indices. The structure of the expression is determined and described as follows: 1) A conforming summation of Indexed objects is described with a dict where the keys are summation indices and the corresponding values are sets containing all terms for which the summation applies. All Add objects in the SymPy expression tree are described like this. 2) For all nodes in the SymPy expression tree that are *not* of type Add, the following applies: If a node discovers contractions in one of its arguments, the node itself will be stored as a key in the dict. For that key, the corresponding value is a list of dicts, each of which is the result of a recursive call to get_contraction_structure(). The list contains only dicts for the non-trivial deeper contractions, omitting dicts with None as the one and only key. .. Note:: The presence of expressions among the dictionary keys indicates multiple levels of index contractions. A nested dict displays nested contractions and may itself contain dicts from a deeper level. In practical calculations the summation in the deepest nested level must be calculated first so that the outer expression can access the resulting indexed object. Examples ======== >>> from sympy.tensor.index_methods import get_contraction_structure >>> from sympy import default_sort_key >>> from sympy.tensor import IndexedBase, Idx >>> x, y, A = map(IndexedBase, ['x', 'y', 'A']) >>> i, j, k, l = map(Idx, ['i', 'j', 'k', 'l']) >>> get_contraction_structure(x[i]*y[i] + A[j, j]) {(i,): {x[i]*y[i]}, (j,): {A[j, j]}} >>> get_contraction_structure(x[i]*y[j]) {None: {x[i]*y[j]}} A multiplication of contracted factors results in nested dicts representing the internal contractions. >>> d = get_contraction_structure(x[i, i]*y[j, j]) >>> sorted(d.keys(), key=default_sort_key) [None, x[i, i]*y[j, j]] In this case, the product has no contractions: >>> d[None] {x[i, i]*y[j, j]} Factors are contracted "first": >>> sorted(d[x[i, i]*y[j, j]], key=default_sort_key) [{(i,): {x[i, i]}}, {(j,): {y[j, j]}}] A parenthesized Add object is also returned as a nested dictionary. The term containing the parenthesis is a Mul with a contraction among the arguments, so it will be found as a key in the result. It stores the dictionary resulting from a recursive call on the Add expression. >>> d = get_contraction_structure(x[i]*(y[i] + A[i, j]*x[j])) >>> sorted(d.keys(), key=default_sort_key) [(A[i, j]*x[j] + y[i])*x[i], (i,)] >>> d[(i,)] {(A[i, j]*x[j] + y[i])*x[i]} >>> d[x[i]*(A[i, j]*x[j] + y[i])] [{None: {y[i]}, (j,): {A[i, j]*x[j]}}] Powers with contractions in either base or exponent will also be found as keys in the dictionary, mapping to a list of results from recursive calls: >>> d = get_contraction_structure(A[j, j]**A[i, i]) >>> d[None] {A[j, j]**A[i, i]} >>> nested_contractions = d[A[j, j]**A[i, i]] >>> nested_contractions[0] {(j,): {A[j, j]}} >>> nested_contractions[1] {(i,): {A[i, i]}} The description of the contraction structure may appear complicated when represented with a string in the above examples, but it is easy to iterate over: >>> from sympy import Expr >>> for key in d: ... if isinstance(key, Expr): ... continue ... for term in d[key]: ... if term in d: ... # treat deepest contraction first ... pass ... # treat outermost contactions here """
/usr/src/app/target_test_cases/failed_tests_get_contraction_structure.txt
def get_contraction_structure(expr): """Determine dummy indices of ``expr`` and describe its structure By *dummy* we mean indices that are summation indices. The structure of the expression is determined and described as follows: 1) A conforming summation of Indexed objects is described with a dict where the keys are summation indices and the corresponding values are sets containing all terms for which the summation applies. All Add objects in the SymPy expression tree are described like this. 2) For all nodes in the SymPy expression tree that are *not* of type Add, the following applies: If a node discovers contractions in one of its arguments, the node itself will be stored as a key in the dict. For that key, the corresponding value is a list of dicts, each of which is the result of a recursive call to get_contraction_structure(). The list contains only dicts for the non-trivial deeper contractions, omitting dicts with None as the one and only key. .. Note:: The presence of expressions among the dictionary keys indicates multiple levels of index contractions. A nested dict displays nested contractions and may itself contain dicts from a deeper level. In practical calculations the summation in the deepest nested level must be calculated first so that the outer expression can access the resulting indexed object. Examples ======== >>> from sympy.tensor.index_methods import get_contraction_structure >>> from sympy import default_sort_key >>> from sympy.tensor import IndexedBase, Idx >>> x, y, A = map(IndexedBase, ['x', 'y', 'A']) >>> i, j, k, l = map(Idx, ['i', 'j', 'k', 'l']) >>> get_contraction_structure(x[i]*y[i] + A[j, j]) {(i,): {x[i]*y[i]}, (j,): {A[j, j]}} >>> get_contraction_structure(x[i]*y[j]) {None: {x[i]*y[j]}} A multiplication of contracted factors results in nested dicts representing the internal contractions. >>> d = get_contraction_structure(x[i, i]*y[j, j]) >>> sorted(d.keys(), key=default_sort_key) [None, x[i, i]*y[j, j]] In this case, the product has no contractions: >>> d[None] {x[i, i]*y[j, j]} Factors are contracted "first": >>> sorted(d[x[i, i]*y[j, j]], key=default_sort_key) [{(i,): {x[i, i]}}, {(j,): {y[j, j]}}] A parenthesized Add object is also returned as a nested dictionary. The term containing the parenthesis is a Mul with a contraction among the arguments, so it will be found as a key in the result. It stores the dictionary resulting from a recursive call on the Add expression. >>> d = get_contraction_structure(x[i]*(y[i] + A[i, j]*x[j])) >>> sorted(d.keys(), key=default_sort_key) [(A[i, j]*x[j] + y[i])*x[i], (i,)] >>> d[(i,)] {(A[i, j]*x[j] + y[i])*x[i]} >>> d[x[i]*(A[i, j]*x[j] + y[i])] [{None: {y[i]}, (j,): {A[i, j]*x[j]}}] Powers with contractions in either base or exponent will also be found as keys in the dictionary, mapping to a list of results from recursive calls: >>> d = get_contraction_structure(A[j, j]**A[i, i]) >>> d[None] {A[j, j]**A[i, i]} >>> nested_contractions = d[A[j, j]**A[i, i]] >>> nested_contractions[0] {(j,): {A[j, j]}} >>> nested_contractions[1] {(i,): {A[i, i]}} The description of the contraction structure may appear complicated when represented with a string in the above examples, but it is easy to iterate over: >>> from sympy import Expr >>> for key in d: ... if isinstance(key, Expr): ... continue ... for term in d[key]: ... if term in d: ... # treat deepest contraction first ... pass ... # treat outermost contactions here """ # We call ourself recursively to inspect sub expressions. if isinstance(expr, Indexed): junk, key = _remove_repeated(expr.indices) return {key or None: {expr}} elif expr.is_Atom: return {None: {expr}} elif expr.is_Mul: junk, junk, key = _get_indices_Mul(expr, return_dummies=True) result = {key or None: {expr}} # recurse on every factor nested = [] for fac in expr.args: facd = get_contraction_structure(fac) if not (None in facd and len(facd) == 1): nested.append(facd) if nested: result[expr] = nested return result elif expr.is_Pow or isinstance(expr, exp): # recurse in base and exp separately. If either has internal # contractions we must include ourselves as a key in the returned dict b, e = expr.as_base_exp() dbase = get_contraction_structure(b) dexp = get_contraction_structure(e) dicts = [] for d in dbase, dexp: if not (None in d and len(d) == 1): dicts.append(d) result = {None: {expr}} if dicts: result[expr] = dicts return result elif expr.is_Add: # Note: we just collect all terms with identical summation indices, We # do nothing to identify equivalent terms here, as this would require # substitutions or pattern matching in expressions of unknown # complexity. result = {} for term in expr.args: # recurse on every term d = get_contraction_structure(term) for key in d: if key in result: result[key] |= d[key] else: result[key] = d[key] return result elif isinstance(expr, Piecewise): # FIXME: No support for Piecewise yet return {None: expr} elif isinstance(expr, Function): # Collect non-trivial contraction structures in each argument # We do not report repeated indices in separate arguments as a # contraction deeplist = [] for arg in expr.args: deep = get_contraction_structure(arg) if not (None in deep and len(deep) == 1): deeplist.append(deep) d = {None: {expr}} if deeplist: d[expr] = deeplist return d # this test is expensive, so it should be at the end elif not expr.has(Indexed): return {None: {expr}} raise NotImplementedError( "FIXME: No specialized handling of type %s" % type(expr))
get_contraction_structure
sympy
61
sympy/physics/vector/functions.py
def get_motion_params(frame, **kwargs): """ Returns the three motion parameters - (acceleration, velocity, and position) as vectorial functions of time in the given frame. If a higher order differential function is provided, the lower order functions are used as boundary conditions. For example, given the acceleration, the velocity and position parameters are taken as boundary conditions. The values of time at which the boundary conditions are specified are taken from timevalue1(for position boundary condition) and timevalue2(for velocity boundary condition). If any of the boundary conditions are not provided, they are taken to be zero by default (zero vectors, in case of vectorial inputs). If the boundary conditions are also functions of time, they are converted to constants by substituting the time values in the dynamicsymbols._t time Symbol. This function can also be used for calculating rotational motion parameters. Have a look at the Parameters and Examples for more clarity. Parameters ========== frame : ReferenceFrame The frame to express the motion parameters in acceleration : Vector Acceleration of the object/frame as a function of time velocity : Vector Velocity as function of time or as boundary condition of velocity at time = timevalue1 position : Vector Velocity as function of time or as boundary condition of velocity at time = timevalue1 timevalue1 : sympyfiable Value of time for position boundary condition timevalue2 : sympyfiable Value of time for velocity boundary condition Examples ======== >>> from sympy.physics.vector import ReferenceFrame, get_motion_params, dynamicsymbols >>> from sympy.physics.vector import init_vprinting >>> init_vprinting(pretty_print=False) >>> from sympy import symbols >>> R = ReferenceFrame('R') >>> v1, v2, v3 = dynamicsymbols('v1 v2 v3') >>> v = v1*R.x + v2*R.y + v3*R.z >>> get_motion_params(R, position = v) (v1''*R.x + v2''*R.y + v3''*R.z, v1'*R.x + v2'*R.y + v3'*R.z, v1*R.x + v2*R.y + v3*R.z) >>> a, b, c = symbols('a b c') >>> v = a*R.x + b*R.y + c*R.z >>> get_motion_params(R, velocity = v) (0, a*R.x + b*R.y + c*R.z, a*t*R.x + b*t*R.y + c*t*R.z) >>> parameters = get_motion_params(R, acceleration = v) >>> parameters[1] a*t*R.x + b*t*R.y + c*t*R.z >>> parameters[2] a*t**2/2*R.x + b*t**2/2*R.y + c*t**2/2*R.z """
/usr/src/app/target_test_cases/failed_tests_get_motion_params.txt
def get_motion_params(frame, **kwargs): """ Returns the three motion parameters - (acceleration, velocity, and position) as vectorial functions of time in the given frame. If a higher order differential function is provided, the lower order functions are used as boundary conditions. For example, given the acceleration, the velocity and position parameters are taken as boundary conditions. The values of time at which the boundary conditions are specified are taken from timevalue1(for position boundary condition) and timevalue2(for velocity boundary condition). If any of the boundary conditions are not provided, they are taken to be zero by default (zero vectors, in case of vectorial inputs). If the boundary conditions are also functions of time, they are converted to constants by substituting the time values in the dynamicsymbols._t time Symbol. This function can also be used for calculating rotational motion parameters. Have a look at the Parameters and Examples for more clarity. Parameters ========== frame : ReferenceFrame The frame to express the motion parameters in acceleration : Vector Acceleration of the object/frame as a function of time velocity : Vector Velocity as function of time or as boundary condition of velocity at time = timevalue1 position : Vector Velocity as function of time or as boundary condition of velocity at time = timevalue1 timevalue1 : sympyfiable Value of time for position boundary condition timevalue2 : sympyfiable Value of time for velocity boundary condition Examples ======== >>> from sympy.physics.vector import ReferenceFrame, get_motion_params, dynamicsymbols >>> from sympy.physics.vector import init_vprinting >>> init_vprinting(pretty_print=False) >>> from sympy import symbols >>> R = ReferenceFrame('R') >>> v1, v2, v3 = dynamicsymbols('v1 v2 v3') >>> v = v1*R.x + v2*R.y + v3*R.z >>> get_motion_params(R, position = v) (v1''*R.x + v2''*R.y + v3''*R.z, v1'*R.x + v2'*R.y + v3'*R.z, v1*R.x + v2*R.y + v3*R.z) >>> a, b, c = symbols('a b c') >>> v = a*R.x + b*R.y + c*R.z >>> get_motion_params(R, velocity = v) (0, a*R.x + b*R.y + c*R.z, a*t*R.x + b*t*R.y + c*t*R.z) >>> parameters = get_motion_params(R, acceleration = v) >>> parameters[1] a*t*R.x + b*t*R.y + c*t*R.z >>> parameters[2] a*t**2/2*R.x + b*t**2/2*R.y + c*t**2/2*R.z """ def _process_vector_differential(vectdiff, condition, variable, ordinate, frame): """ Helper function for get_motion methods. Finds derivative of vectdiff wrt variable, and its integral using the specified boundary condition at value of variable = ordinate. Returns a tuple of - (derivative, function and integral) wrt vectdiff """ # Make sure boundary condition is independent of 'variable' if condition != 0: condition = express(condition, frame, variables=True) # Special case of vectdiff == 0 if vectdiff == Vector(0): return (0, 0, condition) # Express vectdiff completely in condition's frame to give vectdiff1 vectdiff1 = express(vectdiff, frame) # Find derivative of vectdiff vectdiff2 = time_derivative(vectdiff, frame) # Integrate and use boundary condition vectdiff0 = Vector(0) lims = (variable, ordinate, variable) for dim in frame: function1 = vectdiff1.dot(dim) abscissa = dim.dot(condition).subs({variable: ordinate}) # Indefinite integral of 'function1' wrt 'variable', using # the given initial condition (ordinate, abscissa). vectdiff0 += (integrate(function1, lims) + abscissa) * dim # Return tuple return (vectdiff2, vectdiff, vectdiff0) _check_frame(frame) # Decide mode of operation based on user's input if 'acceleration' in kwargs: mode = 2 elif 'velocity' in kwargs: mode = 1 else: mode = 0 # All the possible parameters in kwargs # Not all are required for every case # If not specified, set to default values(may or may not be used in # calculations) conditions = ['acceleration', 'velocity', 'position', 'timevalue', 'timevalue1', 'timevalue2'] for i, x in enumerate(conditions): if x not in kwargs: if i < 3: kwargs[x] = Vector(0) else: kwargs[x] = S.Zero elif i < 3: _check_vector(kwargs[x]) else: kwargs[x] = sympify(kwargs[x]) if mode == 2: vel = _process_vector_differential(kwargs['acceleration'], kwargs['velocity'], dynamicsymbols._t, kwargs['timevalue2'], frame)[2] pos = _process_vector_differential(vel, kwargs['position'], dynamicsymbols._t, kwargs['timevalue1'], frame)[2] return (kwargs['acceleration'], vel, pos) elif mode == 1: return _process_vector_differential(kwargs['velocity'], kwargs['position'], dynamicsymbols._t, kwargs['timevalue1'], frame) else: vel = time_derivative(kwargs['position'], frame) acc = time_derivative(vel, frame) return (acc, vel, kwargs['position'])
get_motion_params
sympy
62
sympy/printing/glsl.py
def glsl_code(expr,assign_to=None,**settings): """Converts an expr to a string of GLSL code Parameters ========== expr : Expr A SymPy expression to be converted. assign_to : optional When given, the argument is used for naming the variable or variables to which the expression is assigned. Can be a string, ``Symbol``, ``MatrixSymbol`` or ``Indexed`` type object. In cases where ``expr`` would be printed as an array, a list of string or ``Symbol`` objects can also be passed. This is helpful in case of line-wrapping, or for expressions that generate multi-line statements. It can also be used to spread an array-like expression into multiple assignments. use_operators: bool, optional If set to False, then *,/,+,- operators will be replaced with functions mul, add, and sub, which must be implemented by the user, e.g. for implementing non-standard rings or emulated quad/octal precision. [default=True] glsl_types: bool, optional Set this argument to ``False`` in order to avoid using the ``vec`` and ``mat`` types. The printer will instead use arrays (or nested arrays). [default=True] mat_nested: bool, optional GLSL version 4.3 and above support nested arrays (arrays of arrays). Set this to ``True`` to render matrices as nested arrays. [default=False] mat_separator: str, optional By default, matrices are rendered with newlines using this separator, making them easier to read, but less compact. By removing the newline this option can be used to make them more vertically compact. [default=',\n'] mat_transpose: bool, optional GLSL's matrix multiplication implementation assumes column-major indexing. By default, this printer ignores that convention. Setting this option to ``True`` transposes all matrix output. [default=False] array_type: str, optional The GLSL array constructor type. [default='float'] precision : integer, optional The precision for numbers such as pi [default=15]. user_functions : dict, optional A dictionary where keys are ``FunctionClass`` instances and values are their string representations. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, js_function_string)]. See below for examples. human : bool, optional If True, the result is a single string that may contain some constant declarations for the number symbols. If False, the same information is returned in a tuple of (symbols_to_declare, not_supported_functions, code_text). [default=True]. contract: bool, optional If True, ``Indexed`` instances are assumed to obey tensor contraction rules and the corresponding nested loops over indices are generated. Setting contract=False will not generate loops, instead the user is responsible to provide values for the indices in the code. [default=True]. Examples ======== >>> from sympy import glsl_code, symbols, Rational, sin, ceiling, Abs >>> x, tau = symbols("x, tau") >>> glsl_code((2*tau)**Rational(7, 2)) '8*sqrt(2)*pow(tau, 3.5)' >>> glsl_code(sin(x), assign_to="float y") 'float y = sin(x);' Various GLSL types are supported: >>> from sympy import Matrix, glsl_code >>> glsl_code(Matrix([1,2,3])) 'vec3(1, 2, 3)' >>> glsl_code(Matrix([[1, 2],[3, 4]])) 'mat2(1, 2, 3, 4)' Pass ``mat_transpose = True`` to switch to column-major indexing: >>> glsl_code(Matrix([[1, 2],[3, 4]]), mat_transpose = True) 'mat2(1, 3, 2, 4)' By default, larger matrices get collapsed into float arrays: >>> print(glsl_code( Matrix([[1,2,3,4,5],[6,7,8,9,10]]) )) float[10]( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ) /* a 2x5 matrix */ The type of array constructor used to print GLSL arrays can be controlled via the ``array_type`` parameter: >>> glsl_code(Matrix([1,2,3,4,5]), array_type='int') 'int[5](1, 2, 3, 4, 5)' Passing a list of strings or ``symbols`` to the ``assign_to`` parameter will yield a multi-line assignment for each item in an array-like expression: >>> x_struct_members = symbols('x.a x.b x.c x.d') >>> print(glsl_code(Matrix([1,2,3,4]), assign_to=x_struct_members)) x.a = 1; x.b = 2; x.c = 3; x.d = 4; This could be useful in cases where it's desirable to modify members of a GLSL ``Struct``. It could also be used to spread items from an array-like expression into various miscellaneous assignments: >>> misc_assignments = ('x[0]', 'x[1]', 'float y', 'float z') >>> print(glsl_code(Matrix([1,2,3,4]), assign_to=misc_assignments)) x[0] = 1; x[1] = 2; float y = 3; float z = 4; Passing ``mat_nested = True`` instead prints out nested float arrays, which are supported in GLSL 4.3 and above. >>> mat = Matrix([ ... [ 0, 1, 2], ... [ 3, 4, 5], ... [ 6, 7, 8], ... [ 9, 10, 11], ... [12, 13, 14]]) >>> print(glsl_code( mat, mat_nested = True )) float[5][3]( float[]( 0, 1, 2), float[]( 3, 4, 5), float[]( 6, 7, 8), float[]( 9, 10, 11), float[](12, 13, 14) ) Custom printing can be defined for certain types by passing a dictionary of "type" : "function" to the ``user_functions`` kwarg. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, js_function_string)]. >>> custom_functions = { ... "ceiling": "CEIL", ... "Abs": [(lambda x: not x.is_integer, "fabs"), ... (lambda x: x.is_integer, "ABS")] ... } >>> glsl_code(Abs(x) + ceiling(x), user_functions=custom_functions) 'fabs(x) + CEIL(x)' If further control is needed, addition, subtraction, multiplication and division operators can be replaced with ``add``, ``sub``, and ``mul`` functions. This is done by passing ``use_operators = False``: >>> x,y,z = symbols('x,y,z') >>> glsl_code(x*(y+z), use_operators = False) 'mul(x, add(y, z))' >>> glsl_code(x*(y+z*(x-y)**z), use_operators = False) 'mul(x, add(y, mul(z, pow(sub(x, y), z))))' ``Piecewise`` expressions are converted into conditionals. If an ``assign_to`` variable is provided an if statement is created, otherwise the ternary operator is used. Note that if the ``Piecewise`` lacks a default term, represented by ``(expr, True)`` then an error will be thrown. This is to prevent generating an expression that may not evaluate to anything. >>> from sympy import Piecewise >>> expr = Piecewise((x + 1, x > 0), (x, True)) >>> print(glsl_code(expr, tau)) if (x > 0) { tau = x + 1; } else { tau = x; } Support for loops is provided through ``Indexed`` types. With ``contract=True`` these expressions will be turned into loops, whereas ``contract=False`` will just print the assignment expression that should be looped over: >>> from sympy import Eq, IndexedBase, Idx >>> len_y = 5 >>> y = IndexedBase('y', shape=(len_y,)) >>> t = IndexedBase('t', shape=(len_y,)) >>> Dy = IndexedBase('Dy', shape=(len_y-1,)) >>> i = Idx('i', len_y-1) >>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i])) >>> glsl_code(e.rhs, assign_to=e.lhs, contract=False) 'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);' >>> from sympy import Matrix, MatrixSymbol >>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)]) >>> A = MatrixSymbol('A', 3, 1) >>> print(glsl_code(mat, A)) A[0][0] = pow(x, 2.0); if (x > 0) { A[1][0] = x + 1; } else { A[1][0] = x; } A[2][0] = sin(x); """
/usr/src/app/target_test_cases/failed_tests_glsl_code.txt
def glsl_code(expr,assign_to=None,**settings): """Converts an expr to a string of GLSL code Parameters ========== expr : Expr A SymPy expression to be converted. assign_to : optional When given, the argument is used for naming the variable or variables to which the expression is assigned. Can be a string, ``Symbol``, ``MatrixSymbol`` or ``Indexed`` type object. In cases where ``expr`` would be printed as an array, a list of string or ``Symbol`` objects can also be passed. This is helpful in case of line-wrapping, or for expressions that generate multi-line statements. It can also be used to spread an array-like expression into multiple assignments. use_operators: bool, optional If set to False, then *,/,+,- operators will be replaced with functions mul, add, and sub, which must be implemented by the user, e.g. for implementing non-standard rings or emulated quad/octal precision. [default=True] glsl_types: bool, optional Set this argument to ``False`` in order to avoid using the ``vec`` and ``mat`` types. The printer will instead use arrays (or nested arrays). [default=True] mat_nested: bool, optional GLSL version 4.3 and above support nested arrays (arrays of arrays). Set this to ``True`` to render matrices as nested arrays. [default=False] mat_separator: str, optional By default, matrices are rendered with newlines using this separator, making them easier to read, but less compact. By removing the newline this option can be used to make them more vertically compact. [default=',\n'] mat_transpose: bool, optional GLSL's matrix multiplication implementation assumes column-major indexing. By default, this printer ignores that convention. Setting this option to ``True`` transposes all matrix output. [default=False] array_type: str, optional The GLSL array constructor type. [default='float'] precision : integer, optional The precision for numbers such as pi [default=15]. user_functions : dict, optional A dictionary where keys are ``FunctionClass`` instances and values are their string representations. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, js_function_string)]. See below for examples. human : bool, optional If True, the result is a single string that may contain some constant declarations for the number symbols. If False, the same information is returned in a tuple of (symbols_to_declare, not_supported_functions, code_text). [default=True]. contract: bool, optional If True, ``Indexed`` instances are assumed to obey tensor contraction rules and the corresponding nested loops over indices are generated. Setting contract=False will not generate loops, instead the user is responsible to provide values for the indices in the code. [default=True]. Examples ======== >>> from sympy import glsl_code, symbols, Rational, sin, ceiling, Abs >>> x, tau = symbols("x, tau") >>> glsl_code((2*tau)**Rational(7, 2)) '8*sqrt(2)*pow(tau, 3.5)' >>> glsl_code(sin(x), assign_to="float y") 'float y = sin(x);' Various GLSL types are supported: >>> from sympy import Matrix, glsl_code >>> glsl_code(Matrix([1,2,3])) 'vec3(1, 2, 3)' >>> glsl_code(Matrix([[1, 2],[3, 4]])) 'mat2(1, 2, 3, 4)' Pass ``mat_transpose = True`` to switch to column-major indexing: >>> glsl_code(Matrix([[1, 2],[3, 4]]), mat_transpose = True) 'mat2(1, 3, 2, 4)' By default, larger matrices get collapsed into float arrays: >>> print(glsl_code( Matrix([[1,2,3,4,5],[6,7,8,9,10]]) )) float[10]( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ) /* a 2x5 matrix */ The type of array constructor used to print GLSL arrays can be controlled via the ``array_type`` parameter: >>> glsl_code(Matrix([1,2,3,4,5]), array_type='int') 'int[5](1, 2, 3, 4, 5)' Passing a list of strings or ``symbols`` to the ``assign_to`` parameter will yield a multi-line assignment for each item in an array-like expression: >>> x_struct_members = symbols('x.a x.b x.c x.d') >>> print(glsl_code(Matrix([1,2,3,4]), assign_to=x_struct_members)) x.a = 1; x.b = 2; x.c = 3; x.d = 4; This could be useful in cases where it's desirable to modify members of a GLSL ``Struct``. It could also be used to spread items from an array-like expression into various miscellaneous assignments: >>> misc_assignments = ('x[0]', 'x[1]', 'float y', 'float z') >>> print(glsl_code(Matrix([1,2,3,4]), assign_to=misc_assignments)) x[0] = 1; x[1] = 2; float y = 3; float z = 4; Passing ``mat_nested = True`` instead prints out nested float arrays, which are supported in GLSL 4.3 and above. >>> mat = Matrix([ ... [ 0, 1, 2], ... [ 3, 4, 5], ... [ 6, 7, 8], ... [ 9, 10, 11], ... [12, 13, 14]]) >>> print(glsl_code( mat, mat_nested = True )) float[5][3]( float[]( 0, 1, 2), float[]( 3, 4, 5), float[]( 6, 7, 8), float[]( 9, 10, 11), float[](12, 13, 14) ) Custom printing can be defined for certain types by passing a dictionary of "type" : "function" to the ``user_functions`` kwarg. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, js_function_string)]. >>> custom_functions = { ... "ceiling": "CEIL", ... "Abs": [(lambda x: not x.is_integer, "fabs"), ... (lambda x: x.is_integer, "ABS")] ... } >>> glsl_code(Abs(x) + ceiling(x), user_functions=custom_functions) 'fabs(x) + CEIL(x)' If further control is needed, addition, subtraction, multiplication and division operators can be replaced with ``add``, ``sub``, and ``mul`` functions. This is done by passing ``use_operators = False``: >>> x,y,z = symbols('x,y,z') >>> glsl_code(x*(y+z), use_operators = False) 'mul(x, add(y, z))' >>> glsl_code(x*(y+z*(x-y)**z), use_operators = False) 'mul(x, add(y, mul(z, pow(sub(x, y), z))))' ``Piecewise`` expressions are converted into conditionals. If an ``assign_to`` variable is provided an if statement is created, otherwise the ternary operator is used. Note that if the ``Piecewise`` lacks a default term, represented by ``(expr, True)`` then an error will be thrown. This is to prevent generating an expression that may not evaluate to anything. >>> from sympy import Piecewise >>> expr = Piecewise((x + 1, x > 0), (x, True)) >>> print(glsl_code(expr, tau)) if (x > 0) { tau = x + 1; } else { tau = x; } Support for loops is provided through ``Indexed`` types. With ``contract=True`` these expressions will be turned into loops, whereas ``contract=False`` will just print the assignment expression that should be looped over: >>> from sympy import Eq, IndexedBase, Idx >>> len_y = 5 >>> y = IndexedBase('y', shape=(len_y,)) >>> t = IndexedBase('t', shape=(len_y,)) >>> Dy = IndexedBase('Dy', shape=(len_y-1,)) >>> i = Idx('i', len_y-1) >>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i])) >>> glsl_code(e.rhs, assign_to=e.lhs, contract=False) 'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);' >>> from sympy import Matrix, MatrixSymbol >>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)]) >>> A = MatrixSymbol('A', 3, 1) >>> print(glsl_code(mat, A)) A[0][0] = pow(x, 2.0); if (x > 0) { A[1][0] = x + 1; } else { A[1][0] = x; } A[2][0] = sin(x); """ return GLSLPrinter(settings).doprint(expr,assign_to)
glsl_code
sympy
63
sympy/integrals/heurisch.py
def heurisch(f, x, rewrite=False, hints=None, mappings=None, retries=3, degree_offset=0, unnecessary_permutations=None, _try_heurisch=None): """ Compute indefinite integral using heuristic Risch algorithm. Explanation =========== This is a heuristic approach to indefinite integration in finite terms using the extended heuristic (parallel) Risch algorithm, based on Manuel Bronstein's "Poor Man's Integrator". The algorithm supports various classes of functions including transcendental elementary or special functions like Airy, Bessel, Whittaker and Lambert. Note that this algorithm is not a decision procedure. If it isn't able to compute the antiderivative for a given function, then this is not a proof that such a functions does not exist. One should use recursive Risch algorithm in such case. It's an open question if this algorithm can be made a full decision procedure. This is an internal integrator procedure. You should use top level 'integrate' function in most cases, as this procedure needs some preprocessing steps and otherwise may fail. Specification ============= heurisch(f, x, rewrite=False, hints=None) where f : expression x : symbol rewrite -> force rewrite 'f' in terms of 'tan' and 'tanh' hints -> a list of functions that may appear in anti-derivate - hints = None --> no suggestions at all - hints = [ ] --> try to figure out - hints = [f1, ..., fn] --> we know better Examples ======== >>> from sympy import tan >>> from sympy.integrals.heurisch import heurisch >>> from sympy.abc import x, y >>> heurisch(y*tan(x), x) y*log(tan(x)**2 + 1)/2 See Manuel Bronstein's "Poor Man's Integrator": References ========== .. [1] https://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/index.html For more information on the implemented algorithm refer to: .. [2] K. Geddes, L. Stefanus, On the Risch-Norman Integration Method and its Implementation in Maple, Proceedings of ISSAC'89, ACM Press, 212-217. .. [3] J. H. Davenport, On the Parallel Risch Algorithm (I), Proceedings of EUROCAM'82, LNCS 144, Springer, 144-157. .. [4] J. H. Davenport, On the Parallel Risch Algorithm (III): Use of Tangents, SIGSAM Bulletin 16 (1982), 3-6. .. [5] J. H. Davenport, B. M. Trager, On the Parallel Risch Algorithm (II), ACM Transactions on Mathematical Software 11 (1985), 356-362. See Also ======== sympy.integrals.integrals.Integral.doit sympy.integrals.integrals.Integral sympy.integrals.heurisch.components """
/usr/src/app/target_test_cases/failed_tests_heurisch.txt
def heurisch(f, x, rewrite=False, hints=None, mappings=None, retries=3, degree_offset=0, unnecessary_permutations=None, _try_heurisch=None): """ Compute indefinite integral using heuristic Risch algorithm. Explanation =========== This is a heuristic approach to indefinite integration in finite terms using the extended heuristic (parallel) Risch algorithm, based on Manuel Bronstein's "Poor Man's Integrator". The algorithm supports various classes of functions including transcendental elementary or special functions like Airy, Bessel, Whittaker and Lambert. Note that this algorithm is not a decision procedure. If it isn't able to compute the antiderivative for a given function, then this is not a proof that such a functions does not exist. One should use recursive Risch algorithm in such case. It's an open question if this algorithm can be made a full decision procedure. This is an internal integrator procedure. You should use top level 'integrate' function in most cases, as this procedure needs some preprocessing steps and otherwise may fail. Specification ============= heurisch(f, x, rewrite=False, hints=None) where f : expression x : symbol rewrite -> force rewrite 'f' in terms of 'tan' and 'tanh' hints -> a list of functions that may appear in anti-derivate - hints = None --> no suggestions at all - hints = [ ] --> try to figure out - hints = [f1, ..., fn] --> we know better Examples ======== >>> from sympy import tan >>> from sympy.integrals.heurisch import heurisch >>> from sympy.abc import x, y >>> heurisch(y*tan(x), x) y*log(tan(x)**2 + 1)/2 See Manuel Bronstein's "Poor Man's Integrator": References ========== .. [1] https://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/index.html For more information on the implemented algorithm refer to: .. [2] K. Geddes, L. Stefanus, On the Risch-Norman Integration Method and its Implementation in Maple, Proceedings of ISSAC'89, ACM Press, 212-217. .. [3] J. H. Davenport, On the Parallel Risch Algorithm (I), Proceedings of EUROCAM'82, LNCS 144, Springer, 144-157. .. [4] J. H. Davenport, On the Parallel Risch Algorithm (III): Use of Tangents, SIGSAM Bulletin 16 (1982), 3-6. .. [5] J. H. Davenport, B. M. Trager, On the Parallel Risch Algorithm (II), ACM Transactions on Mathematical Software 11 (1985), 356-362. See Also ======== sympy.integrals.integrals.Integral.doit sympy.integrals.integrals.Integral sympy.integrals.heurisch.components """ f = sympify(f) # There are some functions that Heurisch cannot currently handle, # so do not even try. # Set _try_heurisch=True to skip this check if _try_heurisch is not True: if f.has(Abs, re, im, sign, Heaviside, DiracDelta, floor, ceiling, arg): return if not f.has_free(x): return f*x if not f.is_Add: indep, f = f.as_independent(x) else: indep = S.One rewritables = { (sin, cos, cot): tan, (sinh, cosh, coth): tanh, } if rewrite: for candidates, rule in rewritables.items(): f = f.rewrite(candidates, rule) else: for candidates in rewritables.keys(): if f.has(*candidates): break else: rewrite = True terms = components(f, x) dcache = DiffCache(x) if hints is not None: if not hints: a = Wild('a', exclude=[x]) b = Wild('b', exclude=[x]) c = Wild('c', exclude=[x]) for g in set(terms): # using copy of terms if g.is_Function: if isinstance(g, li): M = g.args[0].match(a*x**b) if M is not None: terms.add( x*(li(M[a]*x**M[b]) - (M[a]*x**M[b])**(-1/M[b])*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) ) #terms.add( x*(li(M[a]*x**M[b]) - (x**M[b])**(-1/M[b])*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) ) #terms.add( x*(li(M[a]*x**M[b]) - x*Ei((M[b]+1)*log(M[a]*x**M[b])/M[b])) ) #terms.add( li(M[a]*x**M[b]) - Ei((M[b]+1)*log(M[a]*x**M[b])/M[b]) ) elif isinstance(g, exp): M = g.args[0].match(a*x**2) if M is not None: if M[a].is_positive: terms.add(erfi(sqrt(M[a])*x)) else: # M[a].is_negative or unknown terms.add(erf(sqrt(-M[a])*x)) M = g.args[0].match(a*x**2 + b*x + c) if M is not None: if M[a].is_positive: terms.add(sqrt(pi/4*(-M[a]))*exp(M[c] - M[b]**2/(4*M[a]))* erfi(sqrt(M[a])*x + M[b]/(2*sqrt(M[a])))) elif M[a].is_negative: terms.add(sqrt(pi/4*(-M[a]))*exp(M[c] - M[b]**2/(4*M[a]))* erf(sqrt(-M[a])*x - M[b]/(2*sqrt(-M[a])))) M = g.args[0].match(a*log(x)**2) if M is not None: if M[a].is_positive: terms.add(erfi(sqrt(M[a])*log(x) + 1/(2*sqrt(M[a])))) if M[a].is_negative: terms.add(erf(sqrt(-M[a])*log(x) - 1/(2*sqrt(-M[a])))) elif g.is_Pow: if g.exp.is_Rational and g.exp.q == 2: M = g.base.match(a*x**2 + b) if M is not None and M[b].is_positive: if M[a].is_positive: terms.add(asinh(sqrt(M[a]/M[b])*x)) elif M[a].is_negative: terms.add(asin(sqrt(-M[a]/M[b])*x)) M = g.base.match(a*x**2 - b) if M is not None and M[b].is_positive: if M[a].is_positive: dF = 1/sqrt(M[a]*x**2 - M[b]) F = log(2*sqrt(M[a])*sqrt(M[a]*x**2 - M[b]) + 2*M[a]*x)/sqrt(M[a]) dcache.cache[F] = dF # hack: F.diff(x) doesn't automatically simplify to f terms.add(F) elif M[a].is_negative: terms.add(-M[b]/2*sqrt(-M[a])* atan(sqrt(-M[a])*x/sqrt(M[a]*x**2 - M[b]))) else: terms |= set(hints) for g in set(terms): # using copy of terms terms |= components(dcache.get_diff(g), x) # XXX: The commented line below makes heurisch more deterministic wrt # PYTHONHASHSEED and the iteration order of sets. There are other places # where sets are iterated over but this one is possibly the most important. # Theoretically the order here should not matter but different orderings # can expose potential bugs in the different code paths so potentially it # is better to keep the non-determinism. # # terms = list(ordered(terms)) # TODO: caching is significant factor for why permutations work at all. Change this. V = _symbols('x', len(terms)) # sort mapping expressions from largest to smallest (last is always x). mapping = list(reversed(list(zip(*ordered( # [(a[0].as_independent(x)[1], a) for a in zip(terms, V)])))[1])) # rev_mapping = {v: k for k, v in mapping} # if mappings is None: # # optimizing the number of permutations of mapping # assert mapping[-1][0] == x # if not, find it and correct this comment unnecessary_permutations = [mapping.pop(-1)] # permute types of objects types = defaultdict(list) for i in mapping: e, _ = i types[type(e)].append(i) mapping = [types[i] for i in types] def _iter_mappings(): for i in permutations(mapping): # make the expression of a given type be ordered yield [j for i in i for j in ordered(i)] mappings = _iter_mappings() else: unnecessary_permutations = unnecessary_permutations or [] def _substitute(expr): return expr.subs(mapping) for mapping in mappings: mapping = list(mapping) mapping = mapping + unnecessary_permutations diffs = [ _substitute(dcache.get_diff(g)) for g in terms ] denoms = [ g.as_numer_denom()[1] for g in diffs ] if all(h.is_polynomial(*V) for h in denoms) and _substitute(f).is_rational_function(*V): denom = reduce(lambda p, q: lcm(p, q, *V), denoms) break else: if not rewrite: result = heurisch(f, x, rewrite=True, hints=hints, unnecessary_permutations=unnecessary_permutations) if result is not None: return indep*result return None numers = [ cancel(denom*g) for g in diffs ] def _derivation(h): return Add(*[ d * h.diff(v) for d, v in zip(numers, V) ]) def _deflation(p): for y in V: if not p.has(y): continue if _derivation(p) is not S.Zero: c, q = p.as_poly(y).primitive() return _deflation(c)*gcd(q, q.diff(y)).as_expr() return p def _splitter(p): for y in V: if not p.has(y): continue if _derivation(y) is not S.Zero: c, q = p.as_poly(y).primitive() q = q.as_expr() h = gcd(q, _derivation(q), y) s = quo(h, gcd(q, q.diff(y), y), y) c_split = _splitter(c) if s.as_poly(y).degree() == 0: return (c_split[0], q * c_split[1]) q_split = _splitter(cancel(q / s)) return (c_split[0]*q_split[0]*s, c_split[1]*q_split[1]) return (S.One, p) special = {} for term in terms: if term.is_Function: if isinstance(term, tan): special[1 + _substitute(term)**2] = False elif isinstance(term, tanh): special[1 + _substitute(term)] = False special[1 - _substitute(term)] = False elif isinstance(term, LambertW): special[_substitute(term)] = True F = _substitute(f) P, Q = F.as_numer_denom() u_split = _splitter(denom) v_split = _splitter(Q) polys = set(list(v_split) + [ u_split[0] ] + list(special.keys())) s = u_split[0] * Mul(*[ k for k, v in special.items() if v ]) polified = [ p.as_poly(*V) for p in [s, P, Q] ] if None in polified: return None #--- definitions for _integrate a, b, c = [ p.total_degree() for p in polified ] poly_denom = (s * v_split[0] * _deflation(v_split[1])).as_expr() def _exponent(g): if g.is_Pow: if g.exp.is_Rational and g.exp.q != 1: if g.exp.p > 0: return g.exp.p + g.exp.q - 1 else: return abs(g.exp.p + g.exp.q) else: return 1 elif not g.is_Atom and g.args: return max(_exponent(h) for h in g.args) else: return 1 A, B = _exponent(f), a + max(b, c) if A > 1 and B > 1: monoms = tuple(ordered(itermonomials(V, A + B - 1 + degree_offset))) else: monoms = tuple(ordered(itermonomials(V, A + B + degree_offset))) poly_coeffs = _symbols('A', len(monoms)) poly_part = Add(*[ poly_coeffs[i]*monomial for i, monomial in enumerate(monoms) ]) reducibles = set() for poly in ordered(polys): coeff, factors = factor_list(poly, *V) reducibles.add(coeff) reducibles.update(fact for fact, mul in factors) def _integrate(field=None): atans = set() pairs = set() if field == 'Q': irreducibles = set(reducibles) else: setV = set(V) irreducibles = set() for poly in ordered(reducibles): zV = setV & set(iterfreeargs(poly)) for z in ordered(zV): s = set(root_factors(poly, z, filter=field)) irreducibles |= s break log_part, atan_part = [], [] for poly in ordered(irreducibles): m = collect(poly, I, evaluate=False) y = m.get(I, S.Zero) if y: x = m.get(S.One, S.Zero) if x.has(I) or y.has(I): continue # nontrivial x + I*y pairs.add((x, y)) irreducibles.remove(poly) while pairs: x, y = pairs.pop() if (x, -y) in pairs: pairs.remove((x, -y)) # Choosing b with no minus sign if y.could_extract_minus_sign(): y = -y irreducibles.add(x*x + y*y) atans.add(atan(x/y)) else: irreducibles.add(x + I*y) B = _symbols('B', len(irreducibles)) C = _symbols('C', len(atans)) # Note: the ordering matters here for poly, b in reversed(list(zip(ordered(irreducibles), B))): if poly.has(*V): poly_coeffs.append(b) log_part.append(b * log(poly)) for poly, c in reversed(list(zip(ordered(atans), C))): if poly.has(*V): poly_coeffs.append(c) atan_part.append(c * poly) # TODO: Currently it's better to use symbolic expressions here instead # of rational functions, because it's simpler and FracElement doesn't # give big speed improvement yet. This is because cancellation is slow # due to slow polynomial GCD algorithms. If this gets improved then # revise this code. candidate = poly_part/poly_denom + Add(*log_part) + Add(*atan_part) h = F - _derivation(candidate) / denom raw_numer = h.as_numer_denom()[0] # Rewrite raw_numer as a polynomial in K[coeffs][V] where K is a field # that we have to determine. We can't use simply atoms() because log(3), # sqrt(y) and similar expressions can appear, leading to non-trivial # domains. syms = set(poly_coeffs) | set(V) non_syms = set() def find_non_syms(expr): if expr.is_Integer or expr.is_Rational: pass # ignore trivial numbers elif expr in syms: pass # ignore variables elif not expr.has_free(*syms): non_syms.add(expr) elif expr.is_Add or expr.is_Mul or expr.is_Pow: list(map(find_non_syms, expr.args)) else: # TODO: Non-polynomial expression. This should have been # filtered out at an earlier stage. raise PolynomialError try: find_non_syms(raw_numer) except PolynomialError: return None else: ground, _ = construct_domain(non_syms, field=True) coeff_ring = PolyRing(poly_coeffs, ground) ring = PolyRing(V, coeff_ring) try: numer = ring.from_expr(raw_numer) except ValueError: raise PolynomialError solution = solve_lin_sys(numer.coeffs(), coeff_ring, _raw=False) if solution is None: return None else: return candidate.xreplace(solution).xreplace( dict(zip(poly_coeffs, [S.Zero]*len(poly_coeffs)))) if all(isinstance(_, Symbol) for _ in V): more_free = F.free_symbols - set(V) else: Fd = F.as_dummy() more_free = Fd.xreplace(dict(zip(V, (Dummy() for _ in V))) ).free_symbols & Fd.free_symbols if not more_free: # all free generators are identified in V solution = _integrate('Q') if solution is None: solution = _integrate() else: solution = _integrate() if solution is not None: antideriv = solution.subs(rev_mapping) antideriv = cancel(antideriv).expand() if antideriv.is_Add: antideriv = antideriv.as_independent(x)[1] return indep*antideriv else: if retries >= 0: result = heurisch(f, x, mappings=mappings, rewrite=rewrite, hints=hints, retries=retries - 1, unnecessary_permutations=unnecessary_permutations) if result is not None: return indep*result return None
heurisch
sympy
64
sympy/integrals/integrals.py
def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs): """integrate(f, var, ...) .. deprecated:: 1.6 Using ``integrate()`` with :class:`~.Poly` is deprecated. Use :meth:`.Poly.integrate` instead. See :ref:`deprecated-integrate-poly`. Explanation =========== Compute definite or indefinite integral of one or more variables using Risch-Norman algorithm and table lookup. This procedure is able to handle elementary algebraic and transcendental functions and also a huge class of special functions, including Airy, Bessel, Whittaker and Lambert. var can be: - a symbol -- indefinite integration - a tuple (symbol, a) -- indefinite integration with result given with ``a`` replacing ``symbol`` - a tuple (symbol, a, b) -- definite integration Several variables can be specified, in which case the result is multiple integration. (If var is omitted and the integrand is univariate, the indefinite integral in that variable will be performed.) Indefinite integrals are returned without terms that are independent of the integration variables. (see examples) Definite improper integrals often entail delicate convergence conditions. Pass conds='piecewise', 'separate' or 'none' to have these returned, respectively, as a Piecewise function, as a separate result (i.e. result will be a tuple), or not at all (default is 'piecewise'). **Strategy** SymPy uses various approaches to definite integration. One method is to find an antiderivative for the integrand, and then use the fundamental theorem of calculus. Various functions are implemented to integrate polynomial, rational and trigonometric functions, and integrands containing DiracDelta terms. SymPy also implements the part of the Risch algorithm, which is a decision procedure for integrating elementary functions, i.e., the algorithm can either find an elementary antiderivative, or prove that one does not exist. There is also a (very successful, albeit somewhat slow) general implementation of the heuristic Risch algorithm. This algorithm will eventually be phased out as more of the full Risch algorithm is implemented. See the docstring of Integral._eval_integral() for more details on computing the antiderivative using algebraic methods. The option risch=True can be used to use only the (full) Risch algorithm. This is useful if you want to know if an elementary function has an elementary antiderivative. If the indefinite Integral returned by this function is an instance of NonElementaryIntegral, that means that the Risch algorithm has proven that integral to be non-elementary. Note that by default, additional methods (such as the Meijer G method outlined below) are tried on these integrals, as they may be expressible in terms of special functions, so if you only care about elementary answers, use risch=True. Also note that an unevaluated Integral returned by this function is not necessarily a NonElementaryIntegral, even with risch=True, as it may just be an indication that the particular part of the Risch algorithm needed to integrate that function is not yet implemented. Another family of strategies comes from re-writing the integrand in terms of so-called Meijer G-functions. Indefinite integrals of a single G-function can always be computed, and the definite integral of a product of two G-functions can be computed from zero to infinity. Various strategies are implemented to rewrite integrands as G-functions, and use this information to compute integrals (see the ``meijerint`` module). The option manual=True can be used to use only an algorithm that tries to mimic integration by hand. This algorithm does not handle as many integrands as the other algorithms implemented but may return results in a more familiar form. The ``manualintegrate`` module has functions that return the steps used (see the module docstring for more information). In general, the algebraic methods work best for computing antiderivatives of (possibly complicated) combinations of elementary functions. The G-function methods work best for computing definite integrals from zero to infinity of moderately complicated combinations of special functions, or indefinite integrals of very simple combinations of special functions. The strategy employed by the integration code is as follows: - If computing a definite integral, and both limits are real, and at least one limit is +- oo, try the G-function method of definite integration first. - Try to find an antiderivative, using all available methods, ordered by performance (that is try fastest method first, slowest last; in particular polynomial integration is tried first, Meijer G-functions second to last, and heuristic Risch last). - If still not successful, try G-functions irrespective of the limits. The option meijerg=True, False, None can be used to, respectively: always use G-function methods and no others, never use G-function methods, or use all available methods (in order as described above). It defaults to None. Examples ======== >>> from sympy import integrate, log, exp, oo >>> from sympy.abc import a, x, y >>> integrate(x*y, x) x**2*y/2 >>> integrate(log(x), x) x*log(x) - x >>> integrate(log(x), (x, 1, a)) a*log(a) - a + 1 >>> integrate(x) x**2/2 Terms that are independent of x are dropped by indefinite integration: >>> from sympy import sqrt >>> integrate(sqrt(1 + x), (x, 0, x)) 2*(x + 1)**(3/2)/3 - 2/3 >>> integrate(sqrt(1 + x), x) 2*(x + 1)**(3/2)/3 >>> integrate(x*y) Traceback (most recent call last): ... ValueError: specify integration variables to integrate x*y Note that ``integrate(x)`` syntax is meant only for convenience in interactive sessions and should be avoided in library code. >>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise' Piecewise((gamma(a + 1), re(a) > -1), (Integral(x**a*exp(-x), (x, 0, oo)), True)) >>> integrate(x**a*exp(-x), (x, 0, oo), conds='none') gamma(a + 1) >>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate') (gamma(a + 1), re(a) > -1) See Also ======== Integral, Integral.doit """
/usr/src/app/target_test_cases/failed_tests_integrate.txt
def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs): """integrate(f, var, ...) .. deprecated:: 1.6 Using ``integrate()`` with :class:`~.Poly` is deprecated. Use :meth:`.Poly.integrate` instead. See :ref:`deprecated-integrate-poly`. Explanation =========== Compute definite or indefinite integral of one or more variables using Risch-Norman algorithm and table lookup. This procedure is able to handle elementary algebraic and transcendental functions and also a huge class of special functions, including Airy, Bessel, Whittaker and Lambert. var can be: - a symbol -- indefinite integration - a tuple (symbol, a) -- indefinite integration with result given with ``a`` replacing ``symbol`` - a tuple (symbol, a, b) -- definite integration Several variables can be specified, in which case the result is multiple integration. (If var is omitted and the integrand is univariate, the indefinite integral in that variable will be performed.) Indefinite integrals are returned without terms that are independent of the integration variables. (see examples) Definite improper integrals often entail delicate convergence conditions. Pass conds='piecewise', 'separate' or 'none' to have these returned, respectively, as a Piecewise function, as a separate result (i.e. result will be a tuple), or not at all (default is 'piecewise'). **Strategy** SymPy uses various approaches to definite integration. One method is to find an antiderivative for the integrand, and then use the fundamental theorem of calculus. Various functions are implemented to integrate polynomial, rational and trigonometric functions, and integrands containing DiracDelta terms. SymPy also implements the part of the Risch algorithm, which is a decision procedure for integrating elementary functions, i.e., the algorithm can either find an elementary antiderivative, or prove that one does not exist. There is also a (very successful, albeit somewhat slow) general implementation of the heuristic Risch algorithm. This algorithm will eventually be phased out as more of the full Risch algorithm is implemented. See the docstring of Integral._eval_integral() for more details on computing the antiderivative using algebraic methods. The option risch=True can be used to use only the (full) Risch algorithm. This is useful if you want to know if an elementary function has an elementary antiderivative. If the indefinite Integral returned by this function is an instance of NonElementaryIntegral, that means that the Risch algorithm has proven that integral to be non-elementary. Note that by default, additional methods (such as the Meijer G method outlined below) are tried on these integrals, as they may be expressible in terms of special functions, so if you only care about elementary answers, use risch=True. Also note that an unevaluated Integral returned by this function is not necessarily a NonElementaryIntegral, even with risch=True, as it may just be an indication that the particular part of the Risch algorithm needed to integrate that function is not yet implemented. Another family of strategies comes from re-writing the integrand in terms of so-called Meijer G-functions. Indefinite integrals of a single G-function can always be computed, and the definite integral of a product of two G-functions can be computed from zero to infinity. Various strategies are implemented to rewrite integrands as G-functions, and use this information to compute integrals (see the ``meijerint`` module). The option manual=True can be used to use only an algorithm that tries to mimic integration by hand. This algorithm does not handle as many integrands as the other algorithms implemented but may return results in a more familiar form. The ``manualintegrate`` module has functions that return the steps used (see the module docstring for more information). In general, the algebraic methods work best for computing antiderivatives of (possibly complicated) combinations of elementary functions. The G-function methods work best for computing definite integrals from zero to infinity of moderately complicated combinations of special functions, or indefinite integrals of very simple combinations of special functions. The strategy employed by the integration code is as follows: - If computing a definite integral, and both limits are real, and at least one limit is +- oo, try the G-function method of definite integration first. - Try to find an antiderivative, using all available methods, ordered by performance (that is try fastest method first, slowest last; in particular polynomial integration is tried first, Meijer G-functions second to last, and heuristic Risch last). - If still not successful, try G-functions irrespective of the limits. The option meijerg=True, False, None can be used to, respectively: always use G-function methods and no others, never use G-function methods, or use all available methods (in order as described above). It defaults to None. Examples ======== >>> from sympy import integrate, log, exp, oo >>> from sympy.abc import a, x, y >>> integrate(x*y, x) x**2*y/2 >>> integrate(log(x), x) x*log(x) - x >>> integrate(log(x), (x, 1, a)) a*log(a) - a + 1 >>> integrate(x) x**2/2 Terms that are independent of x are dropped by indefinite integration: >>> from sympy import sqrt >>> integrate(sqrt(1 + x), (x, 0, x)) 2*(x + 1)**(3/2)/3 - 2/3 >>> integrate(sqrt(1 + x), x) 2*(x + 1)**(3/2)/3 >>> integrate(x*y) Traceback (most recent call last): ... ValueError: specify integration variables to integrate x*y Note that ``integrate(x)`` syntax is meant only for convenience in interactive sessions and should be avoided in library code. >>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise' Piecewise((gamma(a + 1), re(a) > -1), (Integral(x**a*exp(-x), (x, 0, oo)), True)) >>> integrate(x**a*exp(-x), (x, 0, oo), conds='none') gamma(a + 1) >>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate') (gamma(a + 1), re(a) > -1) See Also ======== Integral, Integral.doit """ doit_flags = { 'deep': False, 'meijerg': meijerg, 'conds': conds, 'risch': risch, 'heurisch': heurisch, 'manual': manual } integral = Integral(*args, **kwargs) if isinstance(integral, Integral): return integral.doit(**doit_flags) else: new_args = [a.doit(**doit_flags) if isinstance(a, Integral) else a for a in integral.args] return integral.func(*new_args)
integrate
sympy
65
sympy/integrals/intpoly.py
def integration_reduction_dynamic(facets, index, a, b, expr, degree, dims, x_index, y_index, max_index, x0, monomial_values, monom_index, vertices=None, hp_param=None): """The same integration_reduction function which uses a dynamic programming approach to compute terms by using the values of the integral of previously computed terms. Parameters ========== facets : Facets of the Polytope. index : Index of facet to find intersections with.(Used in left_integral()). a, b : Hyperplane parameters. expr : Input monomial. degree : Total degree of ``expr``. dims : Tuple denoting axes variables. x_index : Exponent of 'x' in ``expr``. y_index : Exponent of 'y' in ``expr``. max_index : Maximum exponent of any monomial in ``monomial_values``. x0 : First point on ``facets[index]``. monomial_values : List of monomial values constituting the polynomial. monom_index : Index of monomial whose integration is being found. vertices : optional Coordinates of vertices constituting the 3-Polytope. hp_param : optional Hyperplane Parameter of the face of the facets[index]. Examples ======== >>> from sympy.abc import x, y >>> from sympy.integrals.intpoly import (integration_reduction_dynamic, \ hyperplane_parameters) >>> from sympy import Point, Polygon >>> triangle = Polygon(Point(0, 3), Point(5, 3), Point(1, 1)) >>> facets = triangle.sides >>> a, b = hyperplane_parameters(triangle)[0] >>> x0 = facets[0].points[0] >>> monomial_values = [[0, 0, 0, 0], [1, 0, 0, 5],\ [y, 0, 1, 15], [x, 1, 0, None]] >>> integration_reduction_dynamic(facets, 0, a, b, x, 1, (x, y), 1, 0, 1,\ x0, monomial_values, 3) 25/2 """
/usr/src/app/target_test_cases/failed_tests_integration_reduction_dynamic.txt
def integration_reduction_dynamic(facets, index, a, b, expr, degree, dims, x_index, y_index, max_index, x0, monomial_values, monom_index, vertices=None, hp_param=None): """The same integration_reduction function which uses a dynamic programming approach to compute terms by using the values of the integral of previously computed terms. Parameters ========== facets : Facets of the Polytope. index : Index of facet to find intersections with.(Used in left_integral()). a, b : Hyperplane parameters. expr : Input monomial. degree : Total degree of ``expr``. dims : Tuple denoting axes variables. x_index : Exponent of 'x' in ``expr``. y_index : Exponent of 'y' in ``expr``. max_index : Maximum exponent of any monomial in ``monomial_values``. x0 : First point on ``facets[index]``. monomial_values : List of monomial values constituting the polynomial. monom_index : Index of monomial whose integration is being found. vertices : optional Coordinates of vertices constituting the 3-Polytope. hp_param : optional Hyperplane Parameter of the face of the facets[index]. Examples ======== >>> from sympy.abc import x, y >>> from sympy.integrals.intpoly import (integration_reduction_dynamic, \ hyperplane_parameters) >>> from sympy import Point, Polygon >>> triangle = Polygon(Point(0, 3), Point(5, 3), Point(1, 1)) >>> facets = triangle.sides >>> a, b = hyperplane_parameters(triangle)[0] >>> x0 = facets[0].points[0] >>> monomial_values = [[0, 0, 0, 0], [1, 0, 0, 5],\ [y, 0, 1, 15], [x, 1, 0, None]] >>> integration_reduction_dynamic(facets, 0, a, b, x, 1, (x, y), 1, 0, 1,\ x0, monomial_values, 3) 25/2 """ value = S.Zero m = len(facets) if expr == S.Zero: return expr if len(dims) == 2: if not expr.is_number: _, x_degree, y_degree, _ = monomial_values[monom_index] x_index = monom_index - max_index + \ x_index - 2 if x_degree > 0 else 0 y_index = monom_index - 1 if y_degree > 0 else 0 x_value, y_value =\ monomial_values[x_index][3], monomial_values[y_index][3] value += x_degree * x_value * x0[0] + y_degree * y_value * x0[1] value += left_integral2D(m, index, facets, x0, expr, dims) else: # For 3D use case the max_index contains the z_degree of the term z_index = max_index if not expr.is_number: x_degree, y_degree, z_degree = y_index,\ z_index - x_index - y_index, x_index x_value = monomial_values[z_index - 1][y_index - 1][x_index][7]\ if x_degree > 0 else 0 y_value = monomial_values[z_index - 1][y_index][x_index][7]\ if y_degree > 0 else 0 z_value = monomial_values[z_index - 1][y_index][x_index - 1][7]\ if z_degree > 0 else 0 value += x_degree * x_value * x0[0] + y_degree * y_value * x0[1] \ + z_degree * z_value * x0[2] value += left_integral3D(facets, index, expr, vertices, hp_param, degree) return value / (len(dims) + degree - 1)
integration_reduction_dynamic
sympy
66
sympy/geometry/util.py
def intersection(*entities, pairwise=False, **kwargs): """The intersection of a collection of GeometryEntity instances. Parameters ========== entities : sequence of GeometryEntity pairwise (keyword argument) : Can be either True or False Returns ======= intersection : list of GeometryEntity Raises ====== NotImplementedError When unable to calculate intersection. Notes ===== The intersection of any geometrical entity with itself should return a list with one item: the entity in question. An intersection requires two or more entities. If only a single entity is given then the function will return an empty list. It is possible for `intersection` to miss intersections that one knows exists because the required quantities were not fully simplified internally. Reals should be converted to Rationals, e.g. Rational(str(real_num)) or else failures due to floating point issues may result. Case 1: When the keyword argument 'pairwise' is False (default value): In this case, the function returns a list of intersections common to all entities. Case 2: When the keyword argument 'pairwise' is True: In this case, the functions returns a list intersections that occur between any pair of entities. See Also ======== sympy.geometry.entity.GeometryEntity.intersection Examples ======== >>> from sympy import Ray, Circle, intersection >>> c = Circle((0, 1), 1) >>> intersection(c, c.center) [] >>> right = Ray((0, 0), (1, 0)) >>> up = Ray((0, 0), (0, 1)) >>> intersection(c, right, up) [Point2D(0, 0)] >>> intersection(c, right, up, pairwise=True) [Point2D(0, 0), Point2D(0, 2)] >>> left = Ray((1, 0), (0, 0)) >>> intersection(right, left) [Segment2D(Point2D(0, 0), Point2D(1, 0))] """
/usr/src/app/target_test_cases/failed_tests_intersection.txt
def intersection(*entities, pairwise=False, **kwargs): """The intersection of a collection of GeometryEntity instances. Parameters ========== entities : sequence of GeometryEntity pairwise (keyword argument) : Can be either True or False Returns ======= intersection : list of GeometryEntity Raises ====== NotImplementedError When unable to calculate intersection. Notes ===== The intersection of any geometrical entity with itself should return a list with one item: the entity in question. An intersection requires two or more entities. If only a single entity is given then the function will return an empty list. It is possible for `intersection` to miss intersections that one knows exists because the required quantities were not fully simplified internally. Reals should be converted to Rationals, e.g. Rational(str(real_num)) or else failures due to floating point issues may result. Case 1: When the keyword argument 'pairwise' is False (default value): In this case, the function returns a list of intersections common to all entities. Case 2: When the keyword argument 'pairwise' is True: In this case, the functions returns a list intersections that occur between any pair of entities. See Also ======== sympy.geometry.entity.GeometryEntity.intersection Examples ======== >>> from sympy import Ray, Circle, intersection >>> c = Circle((0, 1), 1) >>> intersection(c, c.center) [] >>> right = Ray((0, 0), (1, 0)) >>> up = Ray((0, 0), (0, 1)) >>> intersection(c, right, up) [Point2D(0, 0)] >>> intersection(c, right, up, pairwise=True) [Point2D(0, 0), Point2D(0, 2)] >>> left = Ray((1, 0), (0, 0)) >>> intersection(right, left) [Segment2D(Point2D(0, 0), Point2D(1, 0))] """ if len(entities) <= 1: return [] entities = list(entities) prec = None for i, e in enumerate(entities): if not isinstance(e, GeometryEntity): # entities may be an immutable tuple e = Point(e) # convert to exact Rationals d = {} for f in e.atoms(Float): prec = f._prec if prec is None else min(f._prec, prec) d.setdefault(f, nsimplify(f, rational=True)) entities[i] = e.xreplace(d) if not pairwise: # find the intersection common to all objects res = entities[0].intersection(entities[1]) for entity in entities[2:]: newres = [] for x in res: newres.extend(x.intersection(entity)) res = newres else: # find all pairwise intersections ans = [] for j in range(len(entities)): for k in range(j + 1, len(entities)): ans.extend(intersection(entities[j], entities[k])) res = list(ordered(set(ans))) # convert back to Floats if prec is not None: p = prec_to_dps(prec) res = [i.n(p) for i in res] return res
intersection
sympy
67
sympy/printing/jscode.py
def jscode(expr, assign_to=None, **settings): """Converts an expr to a string of javascript code Parameters ========== expr : Expr A SymPy expression to be converted. assign_to : optional When given, the argument is used as the name of the variable to which the expression is assigned. Can be a string, ``Symbol``, ``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of line-wrapping, or for expressions that generate multi-line statements. precision : integer, optional The precision for numbers such as pi [default=15]. user_functions : dict, optional A dictionary where keys are ``FunctionClass`` instances and values are their string representations. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, js_function_string)]. See below for examples. human : bool, optional If True, the result is a single string that may contain some constant declarations for the number symbols. If False, the same information is returned in a tuple of (symbols_to_declare, not_supported_functions, code_text). [default=True]. contract: bool, optional If True, ``Indexed`` instances are assumed to obey tensor contraction rules and the corresponding nested loops over indices are generated. Setting contract=False will not generate loops, instead the user is responsible to provide values for the indices in the code. [default=True]. Examples ======== >>> from sympy import jscode, symbols, Rational, sin, ceiling, Abs >>> x, tau = symbols("x, tau") >>> jscode((2*tau)**Rational(7, 2)) '8*Math.sqrt(2)*Math.pow(tau, 7/2)' >>> jscode(sin(x), assign_to="s") 's = Math.sin(x);' Custom printing can be defined for certain types by passing a dictionary of "type" : "function" to the ``user_functions`` kwarg. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, js_function_string)]. >>> custom_functions = { ... "ceiling": "CEIL", ... "Abs": [(lambda x: not x.is_integer, "fabs"), ... (lambda x: x.is_integer, "ABS")] ... } >>> jscode(Abs(x) + ceiling(x), user_functions=custom_functions) 'fabs(x) + CEIL(x)' ``Piecewise`` expressions are converted into conditionals. If an ``assign_to`` variable is provided an if statement is created, otherwise the ternary operator is used. Note that if the ``Piecewise`` lacks a default term, represented by ``(expr, True)`` then an error will be thrown. This is to prevent generating an expression that may not evaluate to anything. >>> from sympy import Piecewise >>> expr = Piecewise((x + 1, x > 0), (x, True)) >>> print(jscode(expr, tau)) if (x > 0) { tau = x + 1; } else { tau = x; } Support for loops is provided through ``Indexed`` types. With ``contract=True`` these expressions will be turned into loops, whereas ``contract=False`` will just print the assignment expression that should be looped over: >>> from sympy import Eq, IndexedBase, Idx >>> len_y = 5 >>> y = IndexedBase('y', shape=(len_y,)) >>> t = IndexedBase('t', shape=(len_y,)) >>> Dy = IndexedBase('Dy', shape=(len_y-1,)) >>> i = Idx('i', len_y-1) >>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i])) >>> jscode(e.rhs, assign_to=e.lhs, contract=False) 'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);' Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions must be provided to ``assign_to``. Note that any expression that can be generated normally can also exist inside a Matrix: >>> from sympy import Matrix, MatrixSymbol >>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)]) >>> A = MatrixSymbol('A', 3, 1) >>> print(jscode(mat, A)) A[0] = Math.pow(x, 2); if (x > 0) { A[1] = x + 1; } else { A[1] = x; } A[2] = Math.sin(x); """
/usr/src/app/target_test_cases/failed_tests_jscode.txt
def jscode(expr, assign_to=None, **settings): """Converts an expr to a string of javascript code Parameters ========== expr : Expr A SymPy expression to be converted. assign_to : optional When given, the argument is used as the name of the variable to which the expression is assigned. Can be a string, ``Symbol``, ``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of line-wrapping, or for expressions that generate multi-line statements. precision : integer, optional The precision for numbers such as pi [default=15]. user_functions : dict, optional A dictionary where keys are ``FunctionClass`` instances and values are their string representations. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, js_function_string)]. See below for examples. human : bool, optional If True, the result is a single string that may contain some constant declarations for the number symbols. If False, the same information is returned in a tuple of (symbols_to_declare, not_supported_functions, code_text). [default=True]. contract: bool, optional If True, ``Indexed`` instances are assumed to obey tensor contraction rules and the corresponding nested loops over indices are generated. Setting contract=False will not generate loops, instead the user is responsible to provide values for the indices in the code. [default=True]. Examples ======== >>> from sympy import jscode, symbols, Rational, sin, ceiling, Abs >>> x, tau = symbols("x, tau") >>> jscode((2*tau)**Rational(7, 2)) '8*Math.sqrt(2)*Math.pow(tau, 7/2)' >>> jscode(sin(x), assign_to="s") 's = Math.sin(x);' Custom printing can be defined for certain types by passing a dictionary of "type" : "function" to the ``user_functions`` kwarg. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, js_function_string)]. >>> custom_functions = { ... "ceiling": "CEIL", ... "Abs": [(lambda x: not x.is_integer, "fabs"), ... (lambda x: x.is_integer, "ABS")] ... } >>> jscode(Abs(x) + ceiling(x), user_functions=custom_functions) 'fabs(x) + CEIL(x)' ``Piecewise`` expressions are converted into conditionals. If an ``assign_to`` variable is provided an if statement is created, otherwise the ternary operator is used. Note that if the ``Piecewise`` lacks a default term, represented by ``(expr, True)`` then an error will be thrown. This is to prevent generating an expression that may not evaluate to anything. >>> from sympy import Piecewise >>> expr = Piecewise((x + 1, x > 0), (x, True)) >>> print(jscode(expr, tau)) if (x > 0) { tau = x + 1; } else { tau = x; } Support for loops is provided through ``Indexed`` types. With ``contract=True`` these expressions will be turned into loops, whereas ``contract=False`` will just print the assignment expression that should be looped over: >>> from sympy import Eq, IndexedBase, Idx >>> len_y = 5 >>> y = IndexedBase('y', shape=(len_y,)) >>> t = IndexedBase('t', shape=(len_y,)) >>> Dy = IndexedBase('Dy', shape=(len_y-1,)) >>> i = Idx('i', len_y-1) >>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i])) >>> jscode(e.rhs, assign_to=e.lhs, contract=False) 'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);' Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions must be provided to ``assign_to``. Note that any expression that can be generated normally can also exist inside a Matrix: >>> from sympy import Matrix, MatrixSymbol >>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)]) >>> A = MatrixSymbol('A', 3, 1) >>> print(jscode(mat, A)) A[0] = Math.pow(x, 2); if (x > 0) { A[1] = x + 1; } else { A[1] = x; } A[2] = Math.sin(x); """ return JavascriptCodePrinter(settings).doprint(expr, assign_to)
jscode
sympy
68
sympy/utilities/iterables.py
def kbins(l, k, ordered=None): """ Return sequence ``l`` partitioned into ``k`` bins. Examples ======== The default is to give the items in the same order, but grouped into k partitions without any reordering: >>> from sympy.utilities.iterables import kbins >>> for p in kbins(list(range(5)), 2): ... print(p) ... [[0], [1, 2, 3, 4]] [[0, 1], [2, 3, 4]] [[0, 1, 2], [3, 4]] [[0, 1, 2, 3], [4]] The ``ordered`` flag is either None (to give the simple partition of the elements) or is a 2 digit integer indicating whether the order of the bins and the order of the items in the bins matters. Given:: A = [[0], [1, 2]] B = [[1, 2], [0]] C = [[2, 1], [0]] D = [[0], [2, 1]] the following values for ``ordered`` have the shown meanings:: 00 means A == B == C == D 01 means A == B 10 means A == D 11 means A == A >>> for ordered_flag in [None, 0, 1, 10, 11]: ... print('ordered = %s' % ordered_flag) ... for p in kbins(list(range(3)), 2, ordered=ordered_flag): ... print(' %s' % p) ... ordered = None [[0], [1, 2]] [[0, 1], [2]] ordered = 0 [[0, 1], [2]] [[0, 2], [1]] [[0], [1, 2]] ordered = 1 [[0], [1, 2]] [[0], [2, 1]] [[1], [0, 2]] [[1], [2, 0]] [[2], [0, 1]] [[2], [1, 0]] ordered = 10 [[0, 1], [2]] [[2], [0, 1]] [[0, 2], [1]] [[1], [0, 2]] [[0], [1, 2]] [[1, 2], [0]] ordered = 11 [[0], [1, 2]] [[0, 1], [2]] [[0], [2, 1]] [[0, 2], [1]] [[1], [0, 2]] [[1, 0], [2]] [[1], [2, 0]] [[1, 2], [0]] [[2], [0, 1]] [[2, 0], [1]] [[2], [1, 0]] [[2, 1], [0]] See Also ======== partitions, multiset_partitions """
/usr/src/app/target_test_cases/failed_tests_kbins.txt
def kbins(l, k, ordered=None): """ Return sequence ``l`` partitioned into ``k`` bins. Examples ======== The default is to give the items in the same order, but grouped into k partitions without any reordering: >>> from sympy.utilities.iterables import kbins >>> for p in kbins(list(range(5)), 2): ... print(p) ... [[0], [1, 2, 3, 4]] [[0, 1], [2, 3, 4]] [[0, 1, 2], [3, 4]] [[0, 1, 2, 3], [4]] The ``ordered`` flag is either None (to give the simple partition of the elements) or is a 2 digit integer indicating whether the order of the bins and the order of the items in the bins matters. Given:: A = [[0], [1, 2]] B = [[1, 2], [0]] C = [[2, 1], [0]] D = [[0], [2, 1]] the following values for ``ordered`` have the shown meanings:: 00 means A == B == C == D 01 means A == B 10 means A == D 11 means A == A >>> for ordered_flag in [None, 0, 1, 10, 11]: ... print('ordered = %s' % ordered_flag) ... for p in kbins(list(range(3)), 2, ordered=ordered_flag): ... print(' %s' % p) ... ordered = None [[0], [1, 2]] [[0, 1], [2]] ordered = 0 [[0, 1], [2]] [[0, 2], [1]] [[0], [1, 2]] ordered = 1 [[0], [1, 2]] [[0], [2, 1]] [[1], [0, 2]] [[1], [2, 0]] [[2], [0, 1]] [[2], [1, 0]] ordered = 10 [[0, 1], [2]] [[2], [0, 1]] [[0, 2], [1]] [[1], [0, 2]] [[0], [1, 2]] [[1, 2], [0]] ordered = 11 [[0], [1, 2]] [[0, 1], [2]] [[0], [2, 1]] [[0, 2], [1]] [[1], [0, 2]] [[1, 0], [2]] [[1], [2, 0]] [[1, 2], [0]] [[2], [0, 1]] [[2, 0], [1]] [[2], [1, 0]] [[2, 1], [0]] See Also ======== partitions, multiset_partitions """ if ordered is None: yield from sequence_partitions(l, k) elif ordered == 11: for pl in multiset_permutations(l): pl = list(pl) yield from sequence_partitions(pl, k) elif ordered == 00: yield from multiset_partitions(l, k) elif ordered == 10: for p in multiset_partitions(l, k): for perm in permutations(p): yield list(perm) elif ordered == 1: for kgot, p in partitions(len(l), k, size=True): if kgot != k: continue for li in multiset_permutations(l): rv = [] i = j = 0 li = list(li) for size, multiplicity in sorted(p.items()): for m in range(multiplicity): j = i + size rv.append(li[i: j]) i = j yield rv else: raise ValueError( 'ordered must be one of 00, 01, 10 or 11, not %s' % ordered)
kbins
sympy
69
sympy/matrices/expressions/kronecker.py
def kronecker_product(*matrices): """ The Kronecker product of two or more arguments. This computes the explicit Kronecker product for subclasses of ``MatrixBase`` i.e. explicit matrices. Otherwise, a symbolic ``KroneckerProduct`` object is returned. Examples ======== For ``MatrixSymbol`` arguments a ``KroneckerProduct`` object is returned. Elements of this matrix can be obtained by indexing, or for MatrixSymbols with known dimension the explicit matrix can be obtained with ``.as_explicit()`` >>> from sympy import kronecker_product, MatrixSymbol >>> A = MatrixSymbol('A', 2, 2) >>> B = MatrixSymbol('B', 2, 2) >>> kronecker_product(A) A >>> kronecker_product(A, B) KroneckerProduct(A, B) >>> kronecker_product(A, B)[0, 1] A[0, 0]*B[0, 1] >>> kronecker_product(A, B).as_explicit() Matrix([ [A[0, 0]*B[0, 0], A[0, 0]*B[0, 1], A[0, 1]*B[0, 0], A[0, 1]*B[0, 1]], [A[0, 0]*B[1, 0], A[0, 0]*B[1, 1], A[0, 1]*B[1, 0], A[0, 1]*B[1, 1]], [A[1, 0]*B[0, 0], A[1, 0]*B[0, 1], A[1, 1]*B[0, 0], A[1, 1]*B[0, 1]], [A[1, 0]*B[1, 0], A[1, 0]*B[1, 1], A[1, 1]*B[1, 0], A[1, 1]*B[1, 1]]]) For explicit matrices the Kronecker product is returned as a Matrix >>> from sympy import Matrix, kronecker_product >>> sigma_x = Matrix([ ... [0, 1], ... [1, 0]]) ... >>> Isigma_y = Matrix([ ... [0, 1], ... [-1, 0]]) ... >>> kronecker_product(sigma_x, Isigma_y) Matrix([ [ 0, 0, 0, 1], [ 0, 0, -1, 0], [ 0, 1, 0, 0], [-1, 0, 0, 0]]) See Also ======== KroneckerProduct """
/usr/src/app/target_test_cases/failed_tests_kronecker_product.txt
def kronecker_product(*matrices): """ The Kronecker product of two or more arguments. This computes the explicit Kronecker product for subclasses of ``MatrixBase`` i.e. explicit matrices. Otherwise, a symbolic ``KroneckerProduct`` object is returned. Examples ======== For ``MatrixSymbol`` arguments a ``KroneckerProduct`` object is returned. Elements of this matrix can be obtained by indexing, or for MatrixSymbols with known dimension the explicit matrix can be obtained with ``.as_explicit()`` >>> from sympy import kronecker_product, MatrixSymbol >>> A = MatrixSymbol('A', 2, 2) >>> B = MatrixSymbol('B', 2, 2) >>> kronecker_product(A) A >>> kronecker_product(A, B) KroneckerProduct(A, B) >>> kronecker_product(A, B)[0, 1] A[0, 0]*B[0, 1] >>> kronecker_product(A, B).as_explicit() Matrix([ [A[0, 0]*B[0, 0], A[0, 0]*B[0, 1], A[0, 1]*B[0, 0], A[0, 1]*B[0, 1]], [A[0, 0]*B[1, 0], A[0, 0]*B[1, 1], A[0, 1]*B[1, 0], A[0, 1]*B[1, 1]], [A[1, 0]*B[0, 0], A[1, 0]*B[0, 1], A[1, 1]*B[0, 0], A[1, 1]*B[0, 1]], [A[1, 0]*B[1, 0], A[1, 0]*B[1, 1], A[1, 1]*B[1, 0], A[1, 1]*B[1, 1]]]) For explicit matrices the Kronecker product is returned as a Matrix >>> from sympy import Matrix, kronecker_product >>> sigma_x = Matrix([ ... [0, 1], ... [1, 0]]) ... >>> Isigma_y = Matrix([ ... [0, 1], ... [-1, 0]]) ... >>> kronecker_product(sigma_x, Isigma_y) Matrix([ [ 0, 0, 0, 1], [ 0, 0, -1, 0], [ 0, 1, 0, 0], [-1, 0, 0, 0]]) See Also ======== KroneckerProduct """ if not matrices: raise TypeError("Empty Kronecker product is undefined") if len(matrices) == 1: return matrices[0] else: return KroneckerProduct(*matrices).doit()
kronecker_product
sympy
70
sympy/utilities/lambdify.py
def lambdify(args, expr, modules=None, printer=None, use_imps=True, dummify=False, cse=False, docstring_limit=1000): """Convert a SymPy expression into a function that allows for fast numeric evaluation. .. warning:: This function uses ``exec``, and thus should not be used on unsanitized input. .. deprecated:: 1.7 Passing a set for the *args* parameter is deprecated as sets are unordered. Use an ordered iterable such as a list or tuple. Explanation =========== For example, to convert the SymPy expression ``sin(x) + cos(x)`` to an equivalent NumPy function that numerically evaluates it: >>> from sympy import sin, cos, symbols, lambdify >>> import numpy as np >>> x = symbols('x') >>> expr = sin(x) + cos(x) >>> expr sin(x) + cos(x) >>> f = lambdify(x, expr, 'numpy') >>> a = np.array([1, 2]) >>> f(a) [1.38177329 0.49315059] The primary purpose of this function is to provide a bridge from SymPy expressions to numerical libraries such as NumPy, SciPy, NumExpr, mpmath, and tensorflow. In general, SymPy functions do not work with objects from other libraries, such as NumPy arrays, and functions from numeric libraries like NumPy or mpmath do not work on SymPy expressions. ``lambdify`` bridges the two by converting a SymPy expression to an equivalent numeric function. The basic workflow with ``lambdify`` is to first create a SymPy expression representing whatever mathematical function you wish to evaluate. This should be done using only SymPy functions and expressions. Then, use ``lambdify`` to convert this to an equivalent function for numerical evaluation. For instance, above we created ``expr`` using the SymPy symbol ``x`` and SymPy functions ``sin`` and ``cos``, then converted it to an equivalent NumPy function ``f``, and called it on a NumPy array ``a``. Parameters ========== args : List[Symbol] A variable or a list of variables whose nesting represents the nesting of the arguments that will be passed to the function. Variables can be symbols, undefined functions, or matrix symbols. >>> from sympy import Eq >>> from sympy.abc import x, y, z The list of variables should match the structure of how the arguments will be passed to the function. Simply enclose the parameters as they will be passed in a list. To call a function like ``f(x)`` then ``[x]`` should be the first argument to ``lambdify``; for this case a single ``x`` can also be used: >>> f = lambdify(x, x + 1) >>> f(1) 2 >>> f = lambdify([x], x + 1) >>> f(1) 2 To call a function like ``f(x, y)`` then ``[x, y]`` will be the first argument of the ``lambdify``: >>> f = lambdify([x, y], x + y) >>> f(1, 1) 2 To call a function with a single 3-element tuple like ``f((x, y, z))`` then ``[(x, y, z)]`` will be the first argument of the ``lambdify``: >>> f = lambdify([(x, y, z)], Eq(z**2, x**2 + y**2)) >>> f((3, 4, 5)) True If two args will be passed and the first is a scalar but the second is a tuple with two arguments then the items in the list should match that structure: >>> f = lambdify([x, (y, z)], x + y + z) >>> f(1, (2, 3)) 6 expr : Expr An expression, list of expressions, or matrix to be evaluated. Lists may be nested. If the expression is a list, the output will also be a list. >>> f = lambdify(x, [x, [x + 1, x + 2]]) >>> f(1) [1, [2, 3]] If it is a matrix, an array will be returned (for the NumPy module). >>> from sympy import Matrix >>> f = lambdify(x, Matrix([x, x + 1])) >>> f(1) [[1] [2]] Note that the argument order here (variables then expression) is used to emulate the Python ``lambda`` keyword. ``lambdify(x, expr)`` works (roughly) like ``lambda x: expr`` (see :ref:`lambdify-how-it-works` below). modules : str, optional Specifies the numeric library to use. If not specified, *modules* defaults to: - ``["scipy", "numpy"]`` if SciPy is installed - ``["numpy"]`` if only NumPy is installed - ``["math", "mpmath", "sympy"]`` if neither is installed. That is, SymPy functions are replaced as far as possible by either ``scipy`` or ``numpy`` functions if available, and Python's standard library ``math``, or ``mpmath`` functions otherwise. *modules* can be one of the following types: - The strings ``"math"``, ``"mpmath"``, ``"numpy"``, ``"numexpr"``, ``"scipy"``, ``"sympy"``, or ``"tensorflow"`` or ``"jax"``. This uses the corresponding printer and namespace mapping for that module. - A module (e.g., ``math``). This uses the global namespace of the module. If the module is one of the above known modules, it will also use the corresponding printer and namespace mapping (i.e., ``modules=numpy`` is equivalent to ``modules="numpy"``). - A dictionary that maps names of SymPy functions to arbitrary functions (e.g., ``{'sin': custom_sin}``). - A list that contains a mix of the arguments above, with higher priority given to entries appearing first (e.g., to use the NumPy module but override the ``sin`` function with a custom version, you can use ``[{'sin': custom_sin}, 'numpy']``). dummify : bool, optional Whether or not the variables in the provided expression that are not valid Python identifiers are substituted with dummy symbols. This allows for undefined functions like ``Function('f')(t)`` to be supplied as arguments. By default, the variables are only dummified if they are not valid Python identifiers. Set ``dummify=True`` to replace all arguments with dummy symbols (if ``args`` is not a string) - for example, to ensure that the arguments do not redefine any built-in names. cse : bool, or callable, optional Large expressions can be computed more efficiently when common subexpressions are identified and precomputed before being used multiple time. Finding the subexpressions will make creation of the 'lambdify' function slower, however. When ``True``, ``sympy.simplify.cse`` is used, otherwise (the default) the user may pass a function matching the ``cse`` signature. docstring_limit : int or None When lambdifying large expressions, a significant proportion of the time spent inside ``lambdify`` is spent producing a string representation of the expression for use in the automatically generated docstring of the returned function. For expressions containing hundreds or more nodes the resulting docstring often becomes so long and dense that it is difficult to read. To reduce the runtime of lambdify, the rendering of the full expression inside the docstring can be disabled. When ``None``, the full expression is rendered in the docstring. When ``0`` or a negative ``int``, an ellipsis is rendering in the docstring instead of the expression. When a strictly positive ``int``, if the number of nodes in the expression exceeds ``docstring_limit`` an ellipsis is rendered in the docstring, otherwise a string representation of the expression is rendered as normal. The default is ``1000``. Examples ======== >>> from sympy.utilities.lambdify import implemented_function >>> from sympy import sqrt, sin, Matrix >>> from sympy import Function >>> from sympy.abc import w, x, y, z >>> f = lambdify(x, x**2) >>> f(2) 4 >>> f = lambdify((x, y, z), [z, y, x]) >>> f(1,2,3) [3, 2, 1] >>> f = lambdify(x, sqrt(x)) >>> f(4) 2.0 >>> f = lambdify((x, y), sin(x*y)**2) >>> f(0, 5) 0.0 >>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy') >>> row(1, 2) Matrix([[1, 3]]) ``lambdify`` can be used to translate SymPy expressions into mpmath functions. This may be preferable to using ``evalf`` (which uses mpmath on the backend) in some cases. >>> f = lambdify(x, sin(x), 'mpmath') >>> f(1) 0.8414709848078965 Tuple arguments are handled and the lambdified function should be called with the same type of arguments as were used to create the function: >>> f = lambdify((x, (y, z)), x + y) >>> f(1, (2, 4)) 3 The ``flatten`` function can be used to always work with flattened arguments: >>> from sympy.utilities.iterables import flatten >>> args = w, (x, (y, z)) >>> vals = 1, (2, (3, 4)) >>> f = lambdify(flatten(args), w + x + y + z) >>> f(*flatten(vals)) 10 Functions present in ``expr`` can also carry their own numerical implementations, in a callable attached to the ``_imp_`` attribute. This can be used with undefined functions using the ``implemented_function`` factory: >>> f = implemented_function(Function('f'), lambda x: x+1) >>> func = lambdify(x, f(x)) >>> func(4) 5 ``lambdify`` always prefers ``_imp_`` implementations to implementations in other namespaces, unless the ``use_imps`` input parameter is False. Usage with Tensorflow: >>> import tensorflow as tf >>> from sympy import Max, sin, lambdify >>> from sympy.abc import x >>> f = Max(x, sin(x)) >>> func = lambdify(x, f, 'tensorflow') After tensorflow v2, eager execution is enabled by default. If you want to get the compatible result across tensorflow v1 and v2 as same as this tutorial, run this line. >>> tf.compat.v1.enable_eager_execution() If you have eager execution enabled, you can get the result out immediately as you can use numpy. If you pass tensorflow objects, you may get an ``EagerTensor`` object instead of value. >>> result = func(tf.constant(1.0)) >>> print(result) tf.Tensor(1.0, shape=(), dtype=float32) >>> print(result.__class__) <class 'tensorflow.python.framework.ops.EagerTensor'> You can use ``.numpy()`` to get the numpy value of the tensor. >>> result.numpy() 1.0 >>> var = tf.Variable(2.0) >>> result = func(var) # also works for tf.Variable and tf.Placeholder >>> result.numpy() 2.0 And it works with any shape array. >>> tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]]) >>> result = func(tensor) >>> result.numpy() [[1. 2.] [3. 4.]] Notes ===== - For functions involving large array calculations, numexpr can provide a significant speedup over numpy. Please note that the available functions for numexpr are more limited than numpy but can be expanded with ``implemented_function`` and user defined subclasses of Function. If specified, numexpr may be the only option in modules. The official list of numexpr functions can be found at: https://numexpr.readthedocs.io/en/latest/user_guide.html#supported-functions - In the above examples, the generated functions can accept scalar values or numpy arrays as arguments. However, in some cases the generated function relies on the input being a numpy array: >>> import numpy >>> from sympy import Piecewise >>> from sympy.testing.pytest import ignore_warnings >>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "numpy") >>> with ignore_warnings(RuntimeWarning): ... f(numpy.array([-1, 0, 1, 2])) [-1. 0. 1. 0.5] >>> f(0) Traceback (most recent call last): ... ZeroDivisionError: division by zero In such cases, the input should be wrapped in a numpy array: >>> with ignore_warnings(RuntimeWarning): ... float(f(numpy.array([0]))) 0.0 Or if numpy functionality is not required another module can be used: >>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "math") >>> f(0) 0 .. _lambdify-how-it-works: How it works ============ When using this function, it helps a great deal to have an idea of what it is doing. At its core, lambdify is nothing more than a namespace translation, on top of a special printer that makes some corner cases work properly. To understand lambdify, first we must properly understand how Python namespaces work. Say we had two files. One called ``sin_cos_sympy.py``, with .. code:: python # sin_cos_sympy.py from sympy.functions.elementary.trigonometric import (cos, sin) def sin_cos(x): return sin(x) + cos(x) and one called ``sin_cos_numpy.py`` with .. code:: python # sin_cos_numpy.py from numpy import sin, cos def sin_cos(x): return sin(x) + cos(x) The two files define an identical function ``sin_cos``. However, in the first file, ``sin`` and ``cos`` are defined as the SymPy ``sin`` and ``cos``. In the second, they are defined as the NumPy versions. If we were to import the first file and use the ``sin_cos`` function, we would get something like >>> from sin_cos_sympy import sin_cos # doctest: +SKIP >>> sin_cos(1) # doctest: +SKIP cos(1) + sin(1) On the other hand, if we imported ``sin_cos`` from the second file, we would get >>> from sin_cos_numpy import sin_cos # doctest: +SKIP >>> sin_cos(1) # doctest: +SKIP 1.38177329068 In the first case we got a symbolic output, because it used the symbolic ``sin`` and ``cos`` functions from SymPy. In the second, we got a numeric result, because ``sin_cos`` used the numeric ``sin`` and ``cos`` functions from NumPy. But notice that the versions of ``sin`` and ``cos`` that were used was not inherent to the ``sin_cos`` function definition. Both ``sin_cos`` definitions are exactly the same. Rather, it was based on the names defined at the module where the ``sin_cos`` function was defined. The key point here is that when function in Python references a name that is not defined in the function, that name is looked up in the "global" namespace of the module where that function is defined. Now, in Python, we can emulate this behavior without actually writing a file to disk using the ``exec`` function. ``exec`` takes a string containing a block of Python code, and a dictionary that should contain the global variables of the module. It then executes the code "in" that dictionary, as if it were the module globals. The following is equivalent to the ``sin_cos`` defined in ``sin_cos_sympy.py``: >>> import sympy >>> module_dictionary = {'sin': sympy.sin, 'cos': sympy.cos} >>> exec(''' ... def sin_cos(x): ... return sin(x) + cos(x) ... ''', module_dictionary) >>> sin_cos = module_dictionary['sin_cos'] >>> sin_cos(1) cos(1) + sin(1) and similarly with ``sin_cos_numpy``: >>> import numpy >>> module_dictionary = {'sin': numpy.sin, 'cos': numpy.cos} >>> exec(''' ... def sin_cos(x): ... return sin(x) + cos(x) ... ''', module_dictionary) >>> sin_cos = module_dictionary['sin_cos'] >>> sin_cos(1) 1.38177329068 So now we can get an idea of how ``lambdify`` works. The name "lambdify" comes from the fact that we can think of something like ``lambdify(x, sin(x) + cos(x), 'numpy')`` as ``lambda x: sin(x) + cos(x)``, where ``sin`` and ``cos`` come from the ``numpy`` namespace. This is also why the symbols argument is first in ``lambdify``, as opposed to most SymPy functions where it comes after the expression: to better mimic the ``lambda`` keyword. ``lambdify`` takes the input expression (like ``sin(x) + cos(x)``) and 1. Converts it to a string 2. Creates a module globals dictionary based on the modules that are passed in (by default, it uses the NumPy module) 3. Creates the string ``"def func({vars}): return {expr}"``, where ``{vars}`` is the list of variables separated by commas, and ``{expr}`` is the string created in step 1., then ``exec``s that string with the module globals namespace and returns ``func``. In fact, functions returned by ``lambdify`` support inspection. So you can see exactly how they are defined by using ``inspect.getsource``, or ``??`` if you are using IPython or the Jupyter notebook. >>> f = lambdify(x, sin(x) + cos(x)) >>> import inspect >>> print(inspect.getsource(f)) def _lambdifygenerated(x): return sin(x) + cos(x) This shows us the source code of the function, but not the namespace it was defined in. We can inspect that by looking at the ``__globals__`` attribute of ``f``: >>> f.__globals__['sin'] <ufunc 'sin'> >>> f.__globals__['cos'] <ufunc 'cos'> >>> f.__globals__['sin'] is numpy.sin True This shows us that ``sin`` and ``cos`` in the namespace of ``f`` will be ``numpy.sin`` and ``numpy.cos``. Note that there are some convenience layers in each of these steps, but at the core, this is how ``lambdify`` works. Step 1 is done using the ``LambdaPrinter`` printers defined in the printing module (see :mod:`sympy.printing.lambdarepr`). This allows different SymPy expressions to define how they should be converted to a string for different modules. You can change which printer ``lambdify`` uses by passing a custom printer in to the ``printer`` argument. Step 2 is augmented by certain translations. There are default translations for each module, but you can provide your own by passing a list to the ``modules`` argument. For instance, >>> def mysin(x): ... print('taking the sin of', x) ... return numpy.sin(x) ... >>> f = lambdify(x, sin(x), [{'sin': mysin}, 'numpy']) >>> f(1) taking the sin of 1 0.8414709848078965 The globals dictionary is generated from the list by merging the dictionary ``{'sin': mysin}`` and the module dictionary for NumPy. The merging is done so that earlier items take precedence, which is why ``mysin`` is used above instead of ``numpy.sin``. If you want to modify the way ``lambdify`` works for a given function, it is usually easiest to do so by modifying the globals dictionary as such. In more complicated cases, it may be necessary to create and pass in a custom printer. Finally, step 3 is augmented with certain convenience operations, such as the addition of a docstring. Understanding how ``lambdify`` works can make it easier to avoid certain gotchas when using it. For instance, a common mistake is to create a lambdified function for one module (say, NumPy), and pass it objects from another (say, a SymPy expression). For instance, say we create >>> from sympy.abc import x >>> f = lambdify(x, x + 1, 'numpy') Now if we pass in a NumPy array, we get that array plus 1 >>> import numpy >>> a = numpy.array([1, 2]) >>> f(a) [2 3] But what happens if you make the mistake of passing in a SymPy expression instead of a NumPy array: >>> f(x + 1) x + 2 This worked, but it was only by accident. Now take a different lambdified function: >>> from sympy import sin >>> g = lambdify(x, x + sin(x), 'numpy') This works as expected on NumPy arrays: >>> g(a) [1.84147098 2.90929743] But if we try to pass in a SymPy expression, it fails >>> g(x + 1) Traceback (most recent call last): ... TypeError: loop of ufunc does not support argument 0 of type Add which has no callable sin method Now, let's look at what happened. The reason this fails is that ``g`` calls ``numpy.sin`` on the input expression, and ``numpy.sin`` does not know how to operate on a SymPy object. **As a general rule, NumPy functions do not know how to operate on SymPy expressions, and SymPy functions do not know how to operate on NumPy arrays. This is why lambdify exists: to provide a bridge between SymPy and NumPy.** However, why is it that ``f`` did work? That's because ``f`` does not call any functions, it only adds 1. So the resulting function that is created, ``def _lambdifygenerated(x): return x + 1`` does not depend on the globals namespace it is defined in. Thus it works, but only by accident. A future version of ``lambdify`` may remove this behavior. Be aware that certain implementation details described here may change in future versions of SymPy. The API of passing in custom modules and printers will not change, but the details of how a lambda function is created may change. However, the basic idea will remain the same, and understanding it will be helpful to understanding the behavior of lambdify. **In general: you should create lambdified functions for one module (say, NumPy), and only pass it input types that are compatible with that module (say, NumPy arrays).** Remember that by default, if the ``module`` argument is not provided, ``lambdify`` creates functions using the NumPy and SciPy namespaces. """
/usr/src/app/target_test_cases/failed_tests_lambdify.txt
def lambdify(args, expr, modules=None, printer=None, use_imps=True, dummify=False, cse=False, docstring_limit=1000): """Convert a SymPy expression into a function that allows for fast numeric evaluation. .. warning:: This function uses ``exec``, and thus should not be used on unsanitized input. .. deprecated:: 1.7 Passing a set for the *args* parameter is deprecated as sets are unordered. Use an ordered iterable such as a list or tuple. Explanation =========== For example, to convert the SymPy expression ``sin(x) + cos(x)`` to an equivalent NumPy function that numerically evaluates it: >>> from sympy import sin, cos, symbols, lambdify >>> import numpy as np >>> x = symbols('x') >>> expr = sin(x) + cos(x) >>> expr sin(x) + cos(x) >>> f = lambdify(x, expr, 'numpy') >>> a = np.array([1, 2]) >>> f(a) [1.38177329 0.49315059] The primary purpose of this function is to provide a bridge from SymPy expressions to numerical libraries such as NumPy, SciPy, NumExpr, mpmath, and tensorflow. In general, SymPy functions do not work with objects from other libraries, such as NumPy arrays, and functions from numeric libraries like NumPy or mpmath do not work on SymPy expressions. ``lambdify`` bridges the two by converting a SymPy expression to an equivalent numeric function. The basic workflow with ``lambdify`` is to first create a SymPy expression representing whatever mathematical function you wish to evaluate. This should be done using only SymPy functions and expressions. Then, use ``lambdify`` to convert this to an equivalent function for numerical evaluation. For instance, above we created ``expr`` using the SymPy symbol ``x`` and SymPy functions ``sin`` and ``cos``, then converted it to an equivalent NumPy function ``f``, and called it on a NumPy array ``a``. Parameters ========== args : List[Symbol] A variable or a list of variables whose nesting represents the nesting of the arguments that will be passed to the function. Variables can be symbols, undefined functions, or matrix symbols. >>> from sympy import Eq >>> from sympy.abc import x, y, z The list of variables should match the structure of how the arguments will be passed to the function. Simply enclose the parameters as they will be passed in a list. To call a function like ``f(x)`` then ``[x]`` should be the first argument to ``lambdify``; for this case a single ``x`` can also be used: >>> f = lambdify(x, x + 1) >>> f(1) 2 >>> f = lambdify([x], x + 1) >>> f(1) 2 To call a function like ``f(x, y)`` then ``[x, y]`` will be the first argument of the ``lambdify``: >>> f = lambdify([x, y], x + y) >>> f(1, 1) 2 To call a function with a single 3-element tuple like ``f((x, y, z))`` then ``[(x, y, z)]`` will be the first argument of the ``lambdify``: >>> f = lambdify([(x, y, z)], Eq(z**2, x**2 + y**2)) >>> f((3, 4, 5)) True If two args will be passed and the first is a scalar but the second is a tuple with two arguments then the items in the list should match that structure: >>> f = lambdify([x, (y, z)], x + y + z) >>> f(1, (2, 3)) 6 expr : Expr An expression, list of expressions, or matrix to be evaluated. Lists may be nested. If the expression is a list, the output will also be a list. >>> f = lambdify(x, [x, [x + 1, x + 2]]) >>> f(1) [1, [2, 3]] If it is a matrix, an array will be returned (for the NumPy module). >>> from sympy import Matrix >>> f = lambdify(x, Matrix([x, x + 1])) >>> f(1) [[1] [2]] Note that the argument order here (variables then expression) is used to emulate the Python ``lambda`` keyword. ``lambdify(x, expr)`` works (roughly) like ``lambda x: expr`` (see :ref:`lambdify-how-it-works` below). modules : str, optional Specifies the numeric library to use. If not specified, *modules* defaults to: - ``["scipy", "numpy"]`` if SciPy is installed - ``["numpy"]`` if only NumPy is installed - ``["math", "mpmath", "sympy"]`` if neither is installed. That is, SymPy functions are replaced as far as possible by either ``scipy`` or ``numpy`` functions if available, and Python's standard library ``math``, or ``mpmath`` functions otherwise. *modules* can be one of the following types: - The strings ``"math"``, ``"mpmath"``, ``"numpy"``, ``"numexpr"``, ``"scipy"``, ``"sympy"``, or ``"tensorflow"`` or ``"jax"``. This uses the corresponding printer and namespace mapping for that module. - A module (e.g., ``math``). This uses the global namespace of the module. If the module is one of the above known modules, it will also use the corresponding printer and namespace mapping (i.e., ``modules=numpy`` is equivalent to ``modules="numpy"``). - A dictionary that maps names of SymPy functions to arbitrary functions (e.g., ``{'sin': custom_sin}``). - A list that contains a mix of the arguments above, with higher priority given to entries appearing first (e.g., to use the NumPy module but override the ``sin`` function with a custom version, you can use ``[{'sin': custom_sin}, 'numpy']``). dummify : bool, optional Whether or not the variables in the provided expression that are not valid Python identifiers are substituted with dummy symbols. This allows for undefined functions like ``Function('f')(t)`` to be supplied as arguments. By default, the variables are only dummified if they are not valid Python identifiers. Set ``dummify=True`` to replace all arguments with dummy symbols (if ``args`` is not a string) - for example, to ensure that the arguments do not redefine any built-in names. cse : bool, or callable, optional Large expressions can be computed more efficiently when common subexpressions are identified and precomputed before being used multiple time. Finding the subexpressions will make creation of the 'lambdify' function slower, however. When ``True``, ``sympy.simplify.cse`` is used, otherwise (the default) the user may pass a function matching the ``cse`` signature. docstring_limit : int or None When lambdifying large expressions, a significant proportion of the time spent inside ``lambdify`` is spent producing a string representation of the expression for use in the automatically generated docstring of the returned function. For expressions containing hundreds or more nodes the resulting docstring often becomes so long and dense that it is difficult to read. To reduce the runtime of lambdify, the rendering of the full expression inside the docstring can be disabled. When ``None``, the full expression is rendered in the docstring. When ``0`` or a negative ``int``, an ellipsis is rendering in the docstring instead of the expression. When a strictly positive ``int``, if the number of nodes in the expression exceeds ``docstring_limit`` an ellipsis is rendered in the docstring, otherwise a string representation of the expression is rendered as normal. The default is ``1000``. Examples ======== >>> from sympy.utilities.lambdify import implemented_function >>> from sympy import sqrt, sin, Matrix >>> from sympy import Function >>> from sympy.abc import w, x, y, z >>> f = lambdify(x, x**2) >>> f(2) 4 >>> f = lambdify((x, y, z), [z, y, x]) >>> f(1,2,3) [3, 2, 1] >>> f = lambdify(x, sqrt(x)) >>> f(4) 2.0 >>> f = lambdify((x, y), sin(x*y)**2) >>> f(0, 5) 0.0 >>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy') >>> row(1, 2) Matrix([[1, 3]]) ``lambdify`` can be used to translate SymPy expressions into mpmath functions. This may be preferable to using ``evalf`` (which uses mpmath on the backend) in some cases. >>> f = lambdify(x, sin(x), 'mpmath') >>> f(1) 0.8414709848078965 Tuple arguments are handled and the lambdified function should be called with the same type of arguments as were used to create the function: >>> f = lambdify((x, (y, z)), x + y) >>> f(1, (2, 4)) 3 The ``flatten`` function can be used to always work with flattened arguments: >>> from sympy.utilities.iterables import flatten >>> args = w, (x, (y, z)) >>> vals = 1, (2, (3, 4)) >>> f = lambdify(flatten(args), w + x + y + z) >>> f(*flatten(vals)) 10 Functions present in ``expr`` can also carry their own numerical implementations, in a callable attached to the ``_imp_`` attribute. This can be used with undefined functions using the ``implemented_function`` factory: >>> f = implemented_function(Function('f'), lambda x: x+1) >>> func = lambdify(x, f(x)) >>> func(4) 5 ``lambdify`` always prefers ``_imp_`` implementations to implementations in other namespaces, unless the ``use_imps`` input parameter is False. Usage with Tensorflow: >>> import tensorflow as tf >>> from sympy import Max, sin, lambdify >>> from sympy.abc import x >>> f = Max(x, sin(x)) >>> func = lambdify(x, f, 'tensorflow') After tensorflow v2, eager execution is enabled by default. If you want to get the compatible result across tensorflow v1 and v2 as same as this tutorial, run this line. >>> tf.compat.v1.enable_eager_execution() If you have eager execution enabled, you can get the result out immediately as you can use numpy. If you pass tensorflow objects, you may get an ``EagerTensor`` object instead of value. >>> result = func(tf.constant(1.0)) >>> print(result) tf.Tensor(1.0, shape=(), dtype=float32) >>> print(result.__class__) <class 'tensorflow.python.framework.ops.EagerTensor'> You can use ``.numpy()`` to get the numpy value of the tensor. >>> result.numpy() 1.0 >>> var = tf.Variable(2.0) >>> result = func(var) # also works for tf.Variable and tf.Placeholder >>> result.numpy() 2.0 And it works with any shape array. >>> tensor = tf.constant([[1.0, 2.0], [3.0, 4.0]]) >>> result = func(tensor) >>> result.numpy() [[1. 2.] [3. 4.]] Notes ===== - For functions involving large array calculations, numexpr can provide a significant speedup over numpy. Please note that the available functions for numexpr are more limited than numpy but can be expanded with ``implemented_function`` and user defined subclasses of Function. If specified, numexpr may be the only option in modules. The official list of numexpr functions can be found at: https://numexpr.readthedocs.io/en/latest/user_guide.html#supported-functions - In the above examples, the generated functions can accept scalar values or numpy arrays as arguments. However, in some cases the generated function relies on the input being a numpy array: >>> import numpy >>> from sympy import Piecewise >>> from sympy.testing.pytest import ignore_warnings >>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "numpy") >>> with ignore_warnings(RuntimeWarning): ... f(numpy.array([-1, 0, 1, 2])) [-1. 0. 1. 0.5] >>> f(0) Traceback (most recent call last): ... ZeroDivisionError: division by zero In such cases, the input should be wrapped in a numpy array: >>> with ignore_warnings(RuntimeWarning): ... float(f(numpy.array([0]))) 0.0 Or if numpy functionality is not required another module can be used: >>> f = lambdify(x, Piecewise((x, x <= 1), (1/x, x > 1)), "math") >>> f(0) 0 .. _lambdify-how-it-works: How it works ============ When using this function, it helps a great deal to have an idea of what it is doing. At its core, lambdify is nothing more than a namespace translation, on top of a special printer that makes some corner cases work properly. To understand lambdify, first we must properly understand how Python namespaces work. Say we had two files. One called ``sin_cos_sympy.py``, with .. code:: python # sin_cos_sympy.py from sympy.functions.elementary.trigonometric import (cos, sin) def sin_cos(x): return sin(x) + cos(x) and one called ``sin_cos_numpy.py`` with .. code:: python # sin_cos_numpy.py from numpy import sin, cos def sin_cos(x): return sin(x) + cos(x) The two files define an identical function ``sin_cos``. However, in the first file, ``sin`` and ``cos`` are defined as the SymPy ``sin`` and ``cos``. In the second, they are defined as the NumPy versions. If we were to import the first file and use the ``sin_cos`` function, we would get something like >>> from sin_cos_sympy import sin_cos # doctest: +SKIP >>> sin_cos(1) # doctest: +SKIP cos(1) + sin(1) On the other hand, if we imported ``sin_cos`` from the second file, we would get >>> from sin_cos_numpy import sin_cos # doctest: +SKIP >>> sin_cos(1) # doctest: +SKIP 1.38177329068 In the first case we got a symbolic output, because it used the symbolic ``sin`` and ``cos`` functions from SymPy. In the second, we got a numeric result, because ``sin_cos`` used the numeric ``sin`` and ``cos`` functions from NumPy. But notice that the versions of ``sin`` and ``cos`` that were used was not inherent to the ``sin_cos`` function definition. Both ``sin_cos`` definitions are exactly the same. Rather, it was based on the names defined at the module where the ``sin_cos`` function was defined. The key point here is that when function in Python references a name that is not defined in the function, that name is looked up in the "global" namespace of the module where that function is defined. Now, in Python, we can emulate this behavior without actually writing a file to disk using the ``exec`` function. ``exec`` takes a string containing a block of Python code, and a dictionary that should contain the global variables of the module. It then executes the code "in" that dictionary, as if it were the module globals. The following is equivalent to the ``sin_cos`` defined in ``sin_cos_sympy.py``: >>> import sympy >>> module_dictionary = {'sin': sympy.sin, 'cos': sympy.cos} >>> exec(''' ... def sin_cos(x): ... return sin(x) + cos(x) ... ''', module_dictionary) >>> sin_cos = module_dictionary['sin_cos'] >>> sin_cos(1) cos(1) + sin(1) and similarly with ``sin_cos_numpy``: >>> import numpy >>> module_dictionary = {'sin': numpy.sin, 'cos': numpy.cos} >>> exec(''' ... def sin_cos(x): ... return sin(x) + cos(x) ... ''', module_dictionary) >>> sin_cos = module_dictionary['sin_cos'] >>> sin_cos(1) 1.38177329068 So now we can get an idea of how ``lambdify`` works. The name "lambdify" comes from the fact that we can think of something like ``lambdify(x, sin(x) + cos(x), 'numpy')`` as ``lambda x: sin(x) + cos(x)``, where ``sin`` and ``cos`` come from the ``numpy`` namespace. This is also why the symbols argument is first in ``lambdify``, as opposed to most SymPy functions where it comes after the expression: to better mimic the ``lambda`` keyword. ``lambdify`` takes the input expression (like ``sin(x) + cos(x)``) and 1. Converts it to a string 2. Creates a module globals dictionary based on the modules that are passed in (by default, it uses the NumPy module) 3. Creates the string ``"def func({vars}): return {expr}"``, where ``{vars}`` is the list of variables separated by commas, and ``{expr}`` is the string created in step 1., then ``exec``s that string with the module globals namespace and returns ``func``. In fact, functions returned by ``lambdify`` support inspection. So you can see exactly how they are defined by using ``inspect.getsource``, or ``??`` if you are using IPython or the Jupyter notebook. >>> f = lambdify(x, sin(x) + cos(x)) >>> import inspect >>> print(inspect.getsource(f)) def _lambdifygenerated(x): return sin(x) + cos(x) This shows us the source code of the function, but not the namespace it was defined in. We can inspect that by looking at the ``__globals__`` attribute of ``f``: >>> f.__globals__['sin'] <ufunc 'sin'> >>> f.__globals__['cos'] <ufunc 'cos'> >>> f.__globals__['sin'] is numpy.sin True This shows us that ``sin`` and ``cos`` in the namespace of ``f`` will be ``numpy.sin`` and ``numpy.cos``. Note that there are some convenience layers in each of these steps, but at the core, this is how ``lambdify`` works. Step 1 is done using the ``LambdaPrinter`` printers defined in the printing module (see :mod:`sympy.printing.lambdarepr`). This allows different SymPy expressions to define how they should be converted to a string for different modules. You can change which printer ``lambdify`` uses by passing a custom printer in to the ``printer`` argument. Step 2 is augmented by certain translations. There are default translations for each module, but you can provide your own by passing a list to the ``modules`` argument. For instance, >>> def mysin(x): ... print('taking the sin of', x) ... return numpy.sin(x) ... >>> f = lambdify(x, sin(x), [{'sin': mysin}, 'numpy']) >>> f(1) taking the sin of 1 0.8414709848078965 The globals dictionary is generated from the list by merging the dictionary ``{'sin': mysin}`` and the module dictionary for NumPy. The merging is done so that earlier items take precedence, which is why ``mysin`` is used above instead of ``numpy.sin``. If you want to modify the way ``lambdify`` works for a given function, it is usually easiest to do so by modifying the globals dictionary as such. In more complicated cases, it may be necessary to create and pass in a custom printer. Finally, step 3 is augmented with certain convenience operations, such as the addition of a docstring. Understanding how ``lambdify`` works can make it easier to avoid certain gotchas when using it. For instance, a common mistake is to create a lambdified function for one module (say, NumPy), and pass it objects from another (say, a SymPy expression). For instance, say we create >>> from sympy.abc import x >>> f = lambdify(x, x + 1, 'numpy') Now if we pass in a NumPy array, we get that array plus 1 >>> import numpy >>> a = numpy.array([1, 2]) >>> f(a) [2 3] But what happens if you make the mistake of passing in a SymPy expression instead of a NumPy array: >>> f(x + 1) x + 2 This worked, but it was only by accident. Now take a different lambdified function: >>> from sympy import sin >>> g = lambdify(x, x + sin(x), 'numpy') This works as expected on NumPy arrays: >>> g(a) [1.84147098 2.90929743] But if we try to pass in a SymPy expression, it fails >>> g(x + 1) Traceback (most recent call last): ... TypeError: loop of ufunc does not support argument 0 of type Add which has no callable sin method Now, let's look at what happened. The reason this fails is that ``g`` calls ``numpy.sin`` on the input expression, and ``numpy.sin`` does not know how to operate on a SymPy object. **As a general rule, NumPy functions do not know how to operate on SymPy expressions, and SymPy functions do not know how to operate on NumPy arrays. This is why lambdify exists: to provide a bridge between SymPy and NumPy.** However, why is it that ``f`` did work? That's because ``f`` does not call any functions, it only adds 1. So the resulting function that is created, ``def _lambdifygenerated(x): return x + 1`` does not depend on the globals namespace it is defined in. Thus it works, but only by accident. A future version of ``lambdify`` may remove this behavior. Be aware that certain implementation details described here may change in future versions of SymPy. The API of passing in custom modules and printers will not change, but the details of how a lambda function is created may change. However, the basic idea will remain the same, and understanding it will be helpful to understanding the behavior of lambdify. **In general: you should create lambdified functions for one module (say, NumPy), and only pass it input types that are compatible with that module (say, NumPy arrays).** Remember that by default, if the ``module`` argument is not provided, ``lambdify`` creates functions using the NumPy and SciPy namespaces. """ from sympy.core.symbol import Symbol from sympy.core.expr import Expr # If the user hasn't specified any modules, use what is available. if modules is None: try: _import("scipy") except ImportError: try: _import("numpy") except ImportError: # Use either numpy (if available) or python.math where possible. # XXX: This leads to different behaviour on different systems and # might be the reason for irreproducible errors. modules = ["math", "mpmath", "sympy"] else: modules = ["numpy"] else: modules = ["numpy", "scipy"] # Get the needed namespaces. namespaces = [] # First find any function implementations if use_imps: namespaces.append(_imp_namespace(expr)) # Check for dict before iterating if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'): namespaces.append(modules) else: # consistency check if _module_present('numexpr', modules) and len(modules) > 1: raise TypeError("numexpr must be the only item in 'modules'") namespaces += list(modules) # fill namespace with first having highest priority namespace = {} for m in namespaces[::-1]: buf = _get_namespace(m) namespace.update(buf) if hasattr(expr, "atoms"): #Try if you can extract symbols from the expression. #Move on if expr.atoms in not implemented. syms = expr.atoms(Symbol) for term in syms: namespace.update({str(term): term}) if printer is None: if _module_present('mpmath', namespaces): from sympy.printing.pycode import MpmathPrinter as Printer # type: ignore elif _module_present('scipy', namespaces): from sympy.printing.numpy import SciPyPrinter as Printer # type: ignore elif _module_present('numpy', namespaces): from sympy.printing.numpy import NumPyPrinter as Printer # type: ignore elif _module_present('cupy', namespaces): from sympy.printing.numpy import CuPyPrinter as Printer # type: ignore elif _module_present('jax', namespaces): from sympy.printing.numpy import JaxPrinter as Printer # type: ignore elif _module_present('numexpr', namespaces): from sympy.printing.lambdarepr import NumExprPrinter as Printer # type: ignore elif _module_present('tensorflow', namespaces): from sympy.printing.tensorflow import TensorflowPrinter as Printer # type: ignore elif _module_present('sympy', namespaces): from sympy.printing.pycode import SymPyPrinter as Printer # type: ignore else: from sympy.printing.pycode import PythonCodePrinter as Printer # type: ignore user_functions = {} for m in namespaces[::-1]: if isinstance(m, dict): for k in m: user_functions[k] = k printer = Printer({'fully_qualified_modules': False, 'inline': True, 'allow_unknown_functions': True, 'user_functions': user_functions}) if isinstance(args, set): sympy_deprecation_warning( """ Passing the function arguments to lambdify() as a set is deprecated. This leads to unpredictable results since sets are unordered. Instead, use a list or tuple for the function arguments. """, deprecated_since_version="1.6.3", active_deprecations_target="deprecated-lambdify-arguments-set", ) # Get the names of the args, for creating a docstring iterable_args = (args,) if isinstance(args, Expr) else args names = [] # Grab the callers frame, for getting the names by inspection (if needed) callers_local_vars = inspect.currentframe().f_back.f_locals.items() # type: ignore for n, var in enumerate(iterable_args): if hasattr(var, 'name'): names.append(var.name) else: # It's an iterable. Try to get name by inspection of calling frame. name_list = [var_name for var_name, var_val in callers_local_vars if var_val is var] if len(name_list) == 1: names.append(name_list[0]) else: # Cannot infer name with certainty. arg_# will have to do. names.append('arg_' + str(n)) # Create the function definition code and execute it funcname = '_lambdifygenerated' if _module_present('tensorflow', namespaces): funcprinter = _TensorflowEvaluatorPrinter(printer, dummify) else: funcprinter = _EvaluatorPrinter(printer, dummify) if cse == True: from sympy.simplify.cse_main import cse as _cse cses, _expr = _cse(expr, list=False) elif callable(cse): cses, _expr = cse(expr) else: cses, _expr = (), expr funcstr = funcprinter.doprint(funcname, iterable_args, _expr, cses=cses) # Collect the module imports from the code printers. imp_mod_lines = [] for mod, keys in (getattr(printer, 'module_imports', None) or {}).items(): for k in keys: if k not in namespace: ln = "from %s import %s" % (mod, k) try: exec(ln, {}, namespace) except ImportError: # Tensorflow 2.0 has issues with importing a specific # function from its submodule. # https://github.com/tensorflow/tensorflow/issues/33022 ln = "%s = %s.%s" % (k, mod, k) exec(ln, {}, namespace) imp_mod_lines.append(ln) # Provide lambda expression with builtins, and compatible implementation of range namespace.update({'builtins':builtins, 'range':range}) funclocals = {} global _lambdify_generated_counter filename = '<lambdifygenerated-%s>' % _lambdify_generated_counter _lambdify_generated_counter += 1 c = compile(funcstr, filename, 'exec') exec(c, namespace, funclocals) # mtime has to be None or else linecache.checkcache will remove it linecache.cache[filename] = (len(funcstr), None, funcstr.splitlines(True), filename) # type: ignore func = funclocals[funcname] # Apply the docstring sig = "func({})".format(", ".join(str(i) for i in names)) sig = textwrap.fill(sig, subsequent_indent=' '*8) if _too_large_for_docstring(expr, docstring_limit): expr_str = "EXPRESSION REDACTED DUE TO LENGTH, (see lambdify's `docstring_limit`)" src_str = "SOURCE CODE REDACTED DUE TO LENGTH, (see lambdify's `docstring_limit`)" else: expr_str = str(expr) if len(expr_str) > 78: expr_str = textwrap.wrap(expr_str, 75)[0] + '...' src_str = funcstr func.__doc__ = ( "Created with lambdify. Signature:\n\n" "{sig}\n\n" "Expression:\n\n" "{expr}\n\n" "Source code:\n\n" "{src}\n\n" "Imported modules:\n\n" "{imp_mods}" ).format(sig=sig, expr=expr_str, src=src_str, imp_mods='\n'.join(imp_mod_lines)) return func
lambdify
sympy
71
sympy/crypto/crypto.py
def lfsr_connection_polynomial(s): """ This function computes the LFSR connection polynomial. Parameters ========== s A sequence of elements of even length, with entries in a finite field. Returns ======= C(x) The connection polynomial of a minimal LFSR yielding s. This implements the algorithm in section 3 of J. L. Massey's article [M]_. Examples ======== >>> from sympy.crypto.crypto import ( ... lfsr_sequence, lfsr_connection_polynomial) >>> from sympy.polys.domains import FF >>> F = FF(2) >>> fill = [F(1), F(1), F(0), F(1)] >>> key = [F(1), F(0), F(0), F(1)] >>> s = lfsr_sequence(key, fill, 20) >>> lfsr_connection_polynomial(s) x**4 + x + 1 >>> fill = [F(1), F(0), F(0), F(1)] >>> key = [F(1), F(1), F(0), F(1)] >>> s = lfsr_sequence(key, fill, 20) >>> lfsr_connection_polynomial(s) x**3 + 1 >>> fill = [F(1), F(0), F(1)] >>> key = [F(1), F(1), F(0)] >>> s = lfsr_sequence(key, fill, 20) >>> lfsr_connection_polynomial(s) x**3 + x**2 + 1 >>> fill = [F(1), F(0), F(1)] >>> key = [F(1), F(0), F(1)] >>> s = lfsr_sequence(key, fill, 20) >>> lfsr_connection_polynomial(s) x**3 + x + 1 References ========== .. [M] James L. Massey, "Shift-Register Synthesis and BCH Decoding." IEEE Trans. on Information Theory, vol. 15(1), pp. 122-127, Jan 1969. """
/usr/src/app/target_test_cases/failed_tests_lfsr_connection_polynomial.txt
def lfsr_connection_polynomial(s): """ This function computes the LFSR connection polynomial. Parameters ========== s A sequence of elements of even length, with entries in a finite field. Returns ======= C(x) The connection polynomial of a minimal LFSR yielding s. This implements the algorithm in section 3 of J. L. Massey's article [M]_. Examples ======== >>> from sympy.crypto.crypto import ( ... lfsr_sequence, lfsr_connection_polynomial) >>> from sympy.polys.domains import FF >>> F = FF(2) >>> fill = [F(1), F(1), F(0), F(1)] >>> key = [F(1), F(0), F(0), F(1)] >>> s = lfsr_sequence(key, fill, 20) >>> lfsr_connection_polynomial(s) x**4 + x + 1 >>> fill = [F(1), F(0), F(0), F(1)] >>> key = [F(1), F(1), F(0), F(1)] >>> s = lfsr_sequence(key, fill, 20) >>> lfsr_connection_polynomial(s) x**3 + 1 >>> fill = [F(1), F(0), F(1)] >>> key = [F(1), F(1), F(0)] >>> s = lfsr_sequence(key, fill, 20) >>> lfsr_connection_polynomial(s) x**3 + x**2 + 1 >>> fill = [F(1), F(0), F(1)] >>> key = [F(1), F(0), F(1)] >>> s = lfsr_sequence(key, fill, 20) >>> lfsr_connection_polynomial(s) x**3 + x + 1 References ========== .. [M] James L. Massey, "Shift-Register Synthesis and BCH Decoding." IEEE Trans. on Information Theory, vol. 15(1), pp. 122-127, Jan 1969. """ # Initialization: p = s[0].modulus() x = Symbol("x") C = 1*x**0 B = 1*x**0 m = 1 b = 1*x**0 L = 0 N = 0 while N < len(s): if L > 0: dC = Poly(C).degree() r = min(L + 1, dC + 1) coeffsC = [C.subs(x, 0)] + [C.coeff(x**i) for i in range(1, dC + 1)] d = (int(s[N]) + sum(coeffsC[i]*int(s[N - i]) for i in range(1, r))) % p if L == 0: d = int(s[N])*x**0 if d == 0: m += 1 N += 1 if d > 0: if 2*L > N: C = (C - d*((b**(p - 2)) % p)*x**m*B).expand() m += 1 N += 1 else: T = C C = (C - d*((b**(p - 2)) % p)*x**m*B).expand() L = N + 1 - L m = 1 b = d B = T N += 1 dC = Poly(C).degree() coeffsC = [C.subs(x, 0)] + [C.coeff(x**i) for i in range(1, dC + 1)] return sum(coeffsC[i] % p*x**i for i in range(dC + 1) if coeffsC[i] is not None)
lfsr_connection_polynomial
sympy
72
sympy/utilities/iterables.py
def multiset_partitions(multiset, m=None): """ Return unique partitions of the given multiset (in list form). If ``m`` is None, all multisets will be returned, otherwise only partitions with ``m`` parts will be returned. If ``multiset`` is an integer, a range [0, 1, ..., multiset - 1] will be supplied. Examples ======== >>> from sympy.utilities.iterables import multiset_partitions >>> list(multiset_partitions([1, 2, 3, 4], 2)) [[[1, 2, 3], [4]], [[1, 2, 4], [3]], [[1, 2], [3, 4]], [[1, 3, 4], [2]], [[1, 3], [2, 4]], [[1, 4], [2, 3]], [[1], [2, 3, 4]]] >>> list(multiset_partitions([1, 2, 3, 4], 1)) [[[1, 2, 3, 4]]] Only unique partitions are returned and these will be returned in a canonical order regardless of the order of the input: >>> a = [1, 2, 2, 1] >>> ans = list(multiset_partitions(a, 2)) >>> a.sort() >>> list(multiset_partitions(a, 2)) == ans True >>> a = range(3, 1, -1) >>> (list(multiset_partitions(a)) == ... list(multiset_partitions(sorted(a)))) True If m is omitted then all partitions will be returned: >>> list(multiset_partitions([1, 1, 2])) [[[1, 1, 2]], [[1, 1], [2]], [[1, 2], [1]], [[1], [1], [2]]] >>> list(multiset_partitions([1]*3)) [[[1, 1, 1]], [[1], [1, 1]], [[1], [1], [1]]] Counting ======== The number of partitions of a set is given by the bell number: >>> from sympy import bell >>> len(list(multiset_partitions(5))) == bell(5) == 52 True The number of partitions of length k from a set of size n is given by the Stirling Number of the 2nd kind: >>> from sympy.functions.combinatorial.numbers import stirling >>> stirling(5, 2) == len(list(multiset_partitions(5, 2))) == 15 True These comments on counting apply to *sets*, not multisets. Notes ===== When all the elements are the same in the multiset, the order of the returned partitions is determined by the ``partitions`` routine. If one is counting partitions then it is better to use the ``nT`` function. See Also ======== partitions sympy.combinatorics.partitions.Partition sympy.combinatorics.partitions.IntegerPartition sympy.functions.combinatorial.numbers.nT """
/usr/src/app/target_test_cases/failed_tests_multiset_partitions.txt
def multiset_partitions(multiset, m=None): """ Return unique partitions of the given multiset (in list form). If ``m`` is None, all multisets will be returned, otherwise only partitions with ``m`` parts will be returned. If ``multiset`` is an integer, a range [0, 1, ..., multiset - 1] will be supplied. Examples ======== >>> from sympy.utilities.iterables import multiset_partitions >>> list(multiset_partitions([1, 2, 3, 4], 2)) [[[1, 2, 3], [4]], [[1, 2, 4], [3]], [[1, 2], [3, 4]], [[1, 3, 4], [2]], [[1, 3], [2, 4]], [[1, 4], [2, 3]], [[1], [2, 3, 4]]] >>> list(multiset_partitions([1, 2, 3, 4], 1)) [[[1, 2, 3, 4]]] Only unique partitions are returned and these will be returned in a canonical order regardless of the order of the input: >>> a = [1, 2, 2, 1] >>> ans = list(multiset_partitions(a, 2)) >>> a.sort() >>> list(multiset_partitions(a, 2)) == ans True >>> a = range(3, 1, -1) >>> (list(multiset_partitions(a)) == ... list(multiset_partitions(sorted(a)))) True If m is omitted then all partitions will be returned: >>> list(multiset_partitions([1, 1, 2])) [[[1, 1, 2]], [[1, 1], [2]], [[1, 2], [1]], [[1], [1], [2]]] >>> list(multiset_partitions([1]*3)) [[[1, 1, 1]], [[1], [1, 1]], [[1], [1], [1]]] Counting ======== The number of partitions of a set is given by the bell number: >>> from sympy import bell >>> len(list(multiset_partitions(5))) == bell(5) == 52 True The number of partitions of length k from a set of size n is given by the Stirling Number of the 2nd kind: >>> from sympy.functions.combinatorial.numbers import stirling >>> stirling(5, 2) == len(list(multiset_partitions(5, 2))) == 15 True These comments on counting apply to *sets*, not multisets. Notes ===== When all the elements are the same in the multiset, the order of the returned partitions is determined by the ``partitions`` routine. If one is counting partitions then it is better to use the ``nT`` function. See Also ======== partitions sympy.combinatorics.partitions.Partition sympy.combinatorics.partitions.IntegerPartition sympy.functions.combinatorial.numbers.nT """ # This function looks at the supplied input and dispatches to # several special-case routines as they apply. if isinstance(multiset, int): n = multiset if m and m > n: return multiset = list(range(n)) if m == 1: yield [multiset[:]] return # If m is not None, it can sometimes be faster to use # MultisetPartitionTraverser.enum_range() even for inputs # which are sets. Since the _set_partitions code is quite # fast, this is only advantageous when the overall set # partitions outnumber those with the desired number of parts # by a large factor. (At least 60.) Such a switch is not # currently implemented. for nc, q in _set_partitions(n): if m is None or nc == m: rv = [[] for i in range(nc)] for i in range(n): rv[q[i]].append(multiset[i]) yield rv return if len(multiset) == 1 and isinstance(multiset, str): multiset = [multiset] if not has_variety(multiset): # Only one component, repeated n times. The resulting # partitions correspond to partitions of integer n. n = len(multiset) if m and m > n: return if m == 1: yield [multiset[:]] return x = multiset[:1] for size, p in partitions(n, m, size=True): if m is None or size == m: rv = [] for k in sorted(p): rv.extend([x*k]*p[k]) yield rv else: from sympy.core.sorting import ordered multiset = list(ordered(multiset)) n = len(multiset) if m and m > n: return if m == 1: yield [multiset[:]] return # Split the information of the multiset into two lists - # one of the elements themselves, and one (of the same length) # giving the number of repeats for the corresponding element. elements, multiplicities = zip(*group(multiset, False)) if len(elements) < len(multiset): # General case - multiset with more than one distinct element # and at least one element repeated more than once. if m: mpt = MultisetPartitionTraverser() for state in mpt.enum_range(multiplicities, m-1, m): yield list_visitor(state, elements) else: for state in multiset_partitions_taocp(multiplicities): yield list_visitor(state, elements) else: # Set partitions case - no repeated elements. Pretty much # same as int argument case above, with same possible, but # currently unimplemented optimization for some cases when # m is not None for nc, q in _set_partitions(n): if m is None or nc == m: rv = [[] for i in range(nc)] for i in range(n): rv[q[i]].append(i) yield [[multiset[j] for j in i] for i in rv]
multiset_partitions
sympy
73
sympy/utilities/enumerative.py
def multiset_partitions_taocp(multiplicities): """Enumerates partitions of a multiset. Parameters ========== multiplicities list of integer multiplicities of the components of the multiset. Yields ====== state Internal data structure which encodes a particular partition. This output is then usually processed by a visitor function which combines the information from this data structure with the components themselves to produce an actual partition. Unless they wish to create their own visitor function, users will have little need to look inside this data structure. But, for reference, it is a 3-element list with components: f is a frame array, which is used to divide pstack into parts. lpart points to the base of the topmost part. pstack is an array of PartComponent objects. The ``state`` output offers a peek into the internal data structures of the enumeration function. The client should treat this as read-only; any modification of the data structure will cause unpredictable (and almost certainly incorrect) results. Also, the components of ``state`` are modified in place at each iteration. Hence, the visitor must be called at each loop iteration. Accumulating the ``state`` instances and processing them later will not work. Examples ======== >>> from sympy.utilities.enumerative import list_visitor >>> from sympy.utilities.enumerative import multiset_partitions_taocp >>> # variables components and multiplicities represent the multiset 'abb' >>> components = 'ab' >>> multiplicities = [1, 2] >>> states = multiset_partitions_taocp(multiplicities) >>> list(list_visitor(state, components) for state in states) [[['a', 'b', 'b']], [['a', 'b'], ['b']], [['a'], ['b', 'b']], [['a'], ['b'], ['b']]] See Also ======== sympy.utilities.iterables.multiset_partitions: Takes a multiset as input and directly yields multiset partitions. It dispatches to a number of functions, including this one, for implementation. Most users will find it more convenient to use than multiset_partitions_taocp. """
/usr/src/app/target_test_cases/failed_tests_multiset_partitions_taocp.txt
def multiset_partitions_taocp(multiplicities): """Enumerates partitions of a multiset. Parameters ========== multiplicities list of integer multiplicities of the components of the multiset. Yields ====== state Internal data structure which encodes a particular partition. This output is then usually processed by a visitor function which combines the information from this data structure with the components themselves to produce an actual partition. Unless they wish to create their own visitor function, users will have little need to look inside this data structure. But, for reference, it is a 3-element list with components: f is a frame array, which is used to divide pstack into parts. lpart points to the base of the topmost part. pstack is an array of PartComponent objects. The ``state`` output offers a peek into the internal data structures of the enumeration function. The client should treat this as read-only; any modification of the data structure will cause unpredictable (and almost certainly incorrect) results. Also, the components of ``state`` are modified in place at each iteration. Hence, the visitor must be called at each loop iteration. Accumulating the ``state`` instances and processing them later will not work. Examples ======== >>> from sympy.utilities.enumerative import list_visitor >>> from sympy.utilities.enumerative import multiset_partitions_taocp >>> # variables components and multiplicities represent the multiset 'abb' >>> components = 'ab' >>> multiplicities = [1, 2] >>> states = multiset_partitions_taocp(multiplicities) >>> list(list_visitor(state, components) for state in states) [[['a', 'b', 'b']], [['a', 'b'], ['b']], [['a'], ['b', 'b']], [['a'], ['b'], ['b']]] See Also ======== sympy.utilities.iterables.multiset_partitions: Takes a multiset as input and directly yields multiset partitions. It dispatches to a number of functions, including this one, for implementation. Most users will find it more convenient to use than multiset_partitions_taocp. """ # Important variables. # m is the number of components, i.e., number of distinct elements m = len(multiplicities) # n is the cardinality, total number of elements whether or not distinct n = sum(multiplicities) # The main data structure, f segments pstack into parts. See # list_visitor() for example code indicating how this internal # state corresponds to a partition. # Note: allocation of space for stack is conservative. Knuth's # exercise 7.2.1.5.68 gives some indication of how to tighten this # bound, but this is not implemented. pstack = [PartComponent() for i in range(n * m + 1)] f = [0] * (n + 1) # Step M1 in Knuth (Initialize) # Initial state - entire multiset in one part. for j in range(m): ps = pstack[j] ps.c = j ps.u = multiplicities[j] ps.v = multiplicities[j] # Other variables f[0] = 0 a = 0 lpart = 0 f[1] = m b = m # in general, current stack frame is from a to b - 1 while True: while True: # Step M2 (Subtract v from u) k = b x = False for j in range(a, b): pstack[k].u = pstack[j].u - pstack[j].v if pstack[k].u == 0: x = True elif not x: pstack[k].c = pstack[j].c pstack[k].v = min(pstack[j].v, pstack[k].u) x = pstack[k].u < pstack[j].v k = k + 1 else: # x is True pstack[k].c = pstack[j].c pstack[k].v = pstack[k].u k = k + 1 # Note: x is True iff v has changed # Step M3 (Push if nonzero.) if k > b: a = b b = k lpart = lpart + 1 f[lpart + 1] = b # Return to M2 else: break # Continue to M4 # M4 Visit a partition state = [f, lpart, pstack] yield state # M5 (Decrease v) while True: j = b-1 while (pstack[j].v == 0): j = j - 1 if j == a and pstack[j].v == 1: # M6 (Backtrack) if lpart == 0: return lpart = lpart - 1 b = a a = f[lpart] # Return to M5 else: pstack[j].v = pstack[j].v - 1 for k in range(j + 1, b): pstack[k].v = pstack[k].u break # GOTO M2
multiset_partitions_taocp
sympy
74
sympy/codegen/algorithms.py
def newtons_method(expr, wrt, atol=1e-12, delta=None, *, rtol=4e-16, debug=False, itermax=None, counter=None, delta_fn=lambda e, x: -e/e.diff(x), cse=False, handle_nan=None, bounds=None): """ Generates an AST for Newton-Raphson method (a root-finding algorithm). Explanation =========== Returns an abstract syntax tree (AST) based on ``sympy.codegen.ast`` for Netwon's method of root-finding. Parameters ========== expr : expression wrt : Symbol With respect to, i.e. what is the variable. atol : number or expression Absolute tolerance (stopping criterion) rtol : number or expression Relative tolerance (stopping criterion) delta : Symbol Will be a ``Dummy`` if ``None``. debug : bool Whether to print convergence information during iterations itermax : number or expr Maximum number of iterations. counter : Symbol Will be a ``Dummy`` if ``None``. delta_fn: Callable[[Expr, Symbol], Expr] computes the step, default is newtons method. For e.g. Halley's method use delta_fn=lambda e, x: -2*e*e.diff(x)/(2*e.diff(x)**2 - e*e.diff(x, 2)) cse: bool Perform common sub-expression elimination on delta expression handle_nan: Token How to handle occurrence of not-a-number (NaN). bounds: Optional[tuple[Expr, Expr]] Perform optimization within bounds Examples ======== >>> from sympy import symbols, cos >>> from sympy.codegen.ast import Assignment >>> from sympy.codegen.algorithms import newtons_method >>> x, dx, atol = symbols('x dx atol') >>> expr = cos(x) - x**3 >>> algo = newtons_method(expr, x, atol=atol, delta=dx) >>> algo.has(Assignment(dx, -expr/expr.diff(x))) True References ========== .. [1] https://en.wikipedia.org/wiki/Newton%27s_method """
/usr/src/app/target_test_cases/failed_tests_newtons_method.txt
def newtons_method(expr, wrt, atol=1e-12, delta=None, *, rtol=4e-16, debug=False, itermax=None, counter=None, delta_fn=lambda e, x: -e/e.diff(x), cse=False, handle_nan=None, bounds=None): """ Generates an AST for Newton-Raphson method (a root-finding algorithm). Explanation =========== Returns an abstract syntax tree (AST) based on ``sympy.codegen.ast`` for Netwon's method of root-finding. Parameters ========== expr : expression wrt : Symbol With respect to, i.e. what is the variable. atol : number or expression Absolute tolerance (stopping criterion) rtol : number or expression Relative tolerance (stopping criterion) delta : Symbol Will be a ``Dummy`` if ``None``. debug : bool Whether to print convergence information during iterations itermax : number or expr Maximum number of iterations. counter : Symbol Will be a ``Dummy`` if ``None``. delta_fn: Callable[[Expr, Symbol], Expr] computes the step, default is newtons method. For e.g. Halley's method use delta_fn=lambda e, x: -2*e*e.diff(x)/(2*e.diff(x)**2 - e*e.diff(x, 2)) cse: bool Perform common sub-expression elimination on delta expression handle_nan: Token How to handle occurrence of not-a-number (NaN). bounds: Optional[tuple[Expr, Expr]] Perform optimization within bounds Examples ======== >>> from sympy import symbols, cos >>> from sympy.codegen.ast import Assignment >>> from sympy.codegen.algorithms import newtons_method >>> x, dx, atol = symbols('x dx atol') >>> expr = cos(x) - x**3 >>> algo = newtons_method(expr, x, atol=atol, delta=dx) >>> algo.has(Assignment(dx, -expr/expr.diff(x))) True References ========== .. [1] https://en.wikipedia.org/wiki/Newton%27s_method """ if delta is None: delta = Dummy() Wrapper = Scope name_d = 'delta' else: Wrapper = lambda x: x name_d = delta.name delta_expr = delta_fn(expr, wrt) if cse: from sympy.simplify.cse_main import cse cses, (red,) = cse([delta_expr.factor()]) whl_bdy = [Assignment(dum, sub_e) for dum, sub_e in cses] whl_bdy += [Assignment(delta, red)] else: whl_bdy = [Assignment(delta, delta_expr)] if handle_nan is not None: whl_bdy += [While(isnan(delta), CodeBlock(handle_nan, break_))] whl_bdy += [AddAugmentedAssignment(wrt, delta)] if bounds is not None: whl_bdy += [Assignment(wrt, Min(Max(wrt, bounds[0]), bounds[1]))] if debug: prnt = Print([wrt, delta], r"{}=%12.5g {}=%12.5g\n".format(wrt.name, name_d)) whl_bdy += [prnt] req = Gt(Abs(delta), atol + rtol*Abs(wrt)) declars = [Declaration(Variable(delta, type=real, value=oo))] if itermax is not None: counter = counter or Dummy(integer=True) v_counter = Variable.deduced(counter, 0) declars.append(Declaration(v_counter)) whl_bdy.append(AddAugmentedAssignment(counter, 1)) req = And(req, Lt(counter, itermax)) whl = While(req, CodeBlock(*whl_bdy)) blck = declars if debug: blck.append(Print([wrt], r"{}=%12.5g\n".format(wrt.name))) blck += [whl] return Wrapper(CodeBlock(*blck))
newtons_method
sympy
75
sympy/utilities/iterables.py
def partitions(n, m=None, k=None, size=False): """Generate all partitions of positive integer, n. Each partition is represented as a dictionary, mapping an integer to the number of copies of that integer in the partition. For example, the first partition of 4 returned is {4: 1}, "4: one of them". Parameters ========== n : int m : int, optional limits number of parts in partition (mnemonic: m, maximum parts) k : int, optional limits the numbers that are kept in the partition (mnemonic: k, keys) size : bool, default: False If ``True``, (M, P) is returned where M is the sum of the multiplicities and P is the generated partition. If ``False``, only the generated partition is returned. Examples ======== >>> from sympy.utilities.iterables import partitions The numbers appearing in the partition (the key of the returned dict) are limited with k: >>> for p in partitions(6, k=2): # doctest: +SKIP ... print(p) {2: 3} {1: 2, 2: 2} {1: 4, 2: 1} {1: 6} The maximum number of parts in the partition (the sum of the values in the returned dict) are limited with m (default value, None, gives partitions from 1 through n): >>> for p in partitions(6, m=2): # doctest: +SKIP ... print(p) ... {6: 1} {1: 1, 5: 1} {2: 1, 4: 1} {3: 2} References ========== .. [1] modified from Tim Peter's version to allow for k and m values: https://code.activestate.com/recipes/218332-generator-for-integer-partitions/ See Also ======== sympy.combinatorics.partitions.Partition sympy.combinatorics.partitions.IntegerPartition """
/usr/src/app/target_test_cases/failed_tests_partitions.txt
def partitions(n, m=None, k=None, size=False): """Generate all partitions of positive integer, n. Each partition is represented as a dictionary, mapping an integer to the number of copies of that integer in the partition. For example, the first partition of 4 returned is {4: 1}, "4: one of them". Parameters ========== n : int m : int, optional limits number of parts in partition (mnemonic: m, maximum parts) k : int, optional limits the numbers that are kept in the partition (mnemonic: k, keys) size : bool, default: False If ``True``, (M, P) is returned where M is the sum of the multiplicities and P is the generated partition. If ``False``, only the generated partition is returned. Examples ======== >>> from sympy.utilities.iterables import partitions The numbers appearing in the partition (the key of the returned dict) are limited with k: >>> for p in partitions(6, k=2): # doctest: +SKIP ... print(p) {2: 3} {1: 2, 2: 2} {1: 4, 2: 1} {1: 6} The maximum number of parts in the partition (the sum of the values in the returned dict) are limited with m (default value, None, gives partitions from 1 through n): >>> for p in partitions(6, m=2): # doctest: +SKIP ... print(p) ... {6: 1} {1: 1, 5: 1} {2: 1, 4: 1} {3: 2} References ========== .. [1] modified from Tim Peter's version to allow for k and m values: https://code.activestate.com/recipes/218332-generator-for-integer-partitions/ See Also ======== sympy.combinatorics.partitions.Partition sympy.combinatorics.partitions.IntegerPartition """ if (n <= 0 or m is not None and m < 1 or k is not None and k < 1 or m and k and m*k < n): # the empty set is the only way to handle these inputs # and returning {} to represent it is consistent with # the counting convention, e.g. nT(0) == 1. if size: yield 0, {} else: yield {} return if m is None: m = n else: m = min(m, n) k = min(k or n, n) n, m, k = as_int(n), as_int(m), as_int(k) q, r = divmod(n, k) ms = {k: q} keys = [k] # ms.keys(), from largest to smallest if r: ms[r] = 1 keys.append(r) room = m - q - bool(r) if size: yield sum(ms.values()), ms.copy() else: yield ms.copy() while keys != [1]: # Reuse any 1's. if keys[-1] == 1: del keys[-1] reuse = ms.pop(1) room += reuse else: reuse = 0 while 1: # Let i be the smallest key larger than 1. Reuse one # instance of i. i = keys[-1] newcount = ms[i] = ms[i] - 1 reuse += i if newcount == 0: del keys[-1], ms[i] room += 1 # Break the remainder into pieces of size i-1. i -= 1 q, r = divmod(reuse, i) need = q + bool(r) if need > room: if not keys: return continue ms[i] = q keys.append(i) if r: ms[r] = 1 keys.append(r) break room -= need if size: yield sum(ms.values()), ms.copy() else: yield ms.copy()
partitions
sympy
76
sympy/solvers/pde.py
def pdsolve(eq, func=None, hint='default', dict=False, solvefun=None, **kwargs): """ Solves any (supported) kind of partial differential equation. **Usage** pdsolve(eq, f(x,y), hint) -> Solve partial differential equation eq for function f(x,y), using method hint. **Details** ``eq`` can be any supported partial differential equation (see the pde docstring for supported methods). This can either be an Equality, or an expression, which is assumed to be equal to 0. ``f(x,y)`` is a function of two variables whose derivatives in that variable make up the partial differential equation. In many cases it is not necessary to provide this; it will be autodetected (and an error raised if it could not be detected). ``hint`` is the solving method that you want pdsolve to use. Use classify_pde(eq, f(x,y)) to get all of the possible hints for a PDE. The default hint, 'default', will use whatever hint is returned first by classify_pde(). See Hints below for more options that you can use for hint. ``solvefun`` is the convention used for arbitrary functions returned by the PDE solver. If not set by the user, it is set by default to be F. **Hints** Aside from the various solving methods, there are also some meta-hints that you can pass to pdsolve(): "default": This uses whatever hint is returned first by classify_pde(). This is the default argument to pdsolve(). "all": To make pdsolve apply all relevant classification hints, use pdsolve(PDE, func, hint="all"). This will return a dictionary of hint:solution terms. If a hint causes pdsolve to raise the NotImplementedError, value of that hint's key will be the exception object raised. The dictionary will also include some special keys: - order: The order of the PDE. See also ode_order() in deutils.py - default: The solution that would be returned by default. This is the one produced by the hint that appears first in the tuple returned by classify_pde(). "all_Integral": This is the same as "all", except if a hint also has a corresponding "_Integral" hint, it only returns the "_Integral" hint. This is useful if "all" causes pdsolve() to hang because of a difficult or impossible integral. This meta-hint will also be much faster than "all", because integrate() is an expensive routine. See also the classify_pde() docstring for more info on hints, and the pde docstring for a list of all supported hints. **Tips** - You can declare the derivative of an unknown function this way: >>> from sympy import Function, Derivative >>> from sympy.abc import x, y # x and y are the independent variables >>> f = Function("f")(x, y) # f is a function of x and y >>> # fx will be the partial derivative of f with respect to x >>> fx = Derivative(f, x) >>> # fy will be the partial derivative of f with respect to y >>> fy = Derivative(f, y) - See test_pde.py for many tests, which serves also as a set of examples for how to use pdsolve(). - pdsolve always returns an Equality class (except for the case when the hint is "all" or "all_Integral"). Note that it is not possible to get an explicit solution for f(x, y) as in the case of ODE's - Do help(pde.pde_hintname) to get help more information on a specific hint Examples ======== >>> from sympy.solvers.pde import pdsolve >>> from sympy import Function, Eq >>> from sympy.abc import x, y >>> f = Function('f') >>> u = f(x, y) >>> ux = u.diff(x) >>> uy = u.diff(y) >>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0) >>> pdsolve(eq) Eq(f(x, y), F(3*x - 2*y)*exp(-2*x/13 - 3*y/13)) """
/usr/src/app/target_test_cases/failed_tests_pdsolve.txt
def pdsolve(eq, func=None, hint='default', dict=False, solvefun=None, **kwargs): """ Solves any (supported) kind of partial differential equation. **Usage** pdsolve(eq, f(x,y), hint) -> Solve partial differential equation eq for function f(x,y), using method hint. **Details** ``eq`` can be any supported partial differential equation (see the pde docstring for supported methods). This can either be an Equality, or an expression, which is assumed to be equal to 0. ``f(x,y)`` is a function of two variables whose derivatives in that variable make up the partial differential equation. In many cases it is not necessary to provide this; it will be autodetected (and an error raised if it could not be detected). ``hint`` is the solving method that you want pdsolve to use. Use classify_pde(eq, f(x,y)) to get all of the possible hints for a PDE. The default hint, 'default', will use whatever hint is returned first by classify_pde(). See Hints below for more options that you can use for hint. ``solvefun`` is the convention used for arbitrary functions returned by the PDE solver. If not set by the user, it is set by default to be F. **Hints** Aside from the various solving methods, there are also some meta-hints that you can pass to pdsolve(): "default": This uses whatever hint is returned first by classify_pde(). This is the default argument to pdsolve(). "all": To make pdsolve apply all relevant classification hints, use pdsolve(PDE, func, hint="all"). This will return a dictionary of hint:solution terms. If a hint causes pdsolve to raise the NotImplementedError, value of that hint's key will be the exception object raised. The dictionary will also include some special keys: - order: The order of the PDE. See also ode_order() in deutils.py - default: The solution that would be returned by default. This is the one produced by the hint that appears first in the tuple returned by classify_pde(). "all_Integral": This is the same as "all", except if a hint also has a corresponding "_Integral" hint, it only returns the "_Integral" hint. This is useful if "all" causes pdsolve() to hang because of a difficult or impossible integral. This meta-hint will also be much faster than "all", because integrate() is an expensive routine. See also the classify_pde() docstring for more info on hints, and the pde docstring for a list of all supported hints. **Tips** - You can declare the derivative of an unknown function this way: >>> from sympy import Function, Derivative >>> from sympy.abc import x, y # x and y are the independent variables >>> f = Function("f")(x, y) # f is a function of x and y >>> # fx will be the partial derivative of f with respect to x >>> fx = Derivative(f, x) >>> # fy will be the partial derivative of f with respect to y >>> fy = Derivative(f, y) - See test_pde.py for many tests, which serves also as a set of examples for how to use pdsolve(). - pdsolve always returns an Equality class (except for the case when the hint is "all" or "all_Integral"). Note that it is not possible to get an explicit solution for f(x, y) as in the case of ODE's - Do help(pde.pde_hintname) to get help more information on a specific hint Examples ======== >>> from sympy.solvers.pde import pdsolve >>> from sympy import Function, Eq >>> from sympy.abc import x, y >>> f = Function('f') >>> u = f(x, y) >>> ux = u.diff(x) >>> uy = u.diff(y) >>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0) >>> pdsolve(eq) Eq(f(x, y), F(3*x - 2*y)*exp(-2*x/13 - 3*y/13)) """ if not solvefun: solvefun = Function('F') # See the docstring of _desolve for more details. hints = _desolve(eq, func=func, hint=hint, simplify=True, type='pde', **kwargs) eq = hints.pop('eq', False) all_ = hints.pop('all', False) if all_: # TODO : 'best' hint should be implemented when adequate # number of hints are added. pdedict = {} failed_hints = {} gethints = classify_pde(eq, dict=True) pdedict.update({'order': gethints['order'], 'default': gethints['default']}) for hint in hints: try: rv = _helper_simplify(eq, hint, hints[hint]['func'], hints[hint]['order'], hints[hint][hint], solvefun) except NotImplementedError as detail: failed_hints[hint] = detail else: pdedict[hint] = rv pdedict.update(failed_hints) return pdedict else: return _helper_simplify(eq, hints['hint'], hints['func'], hints['order'], hints[hints['hint']], solvefun)
pdsolve
sympy
77
sympy/calculus/util.py
def periodicity(f, symbol, check=False): """ Tests the given function for periodicity in the given symbol. Parameters ========== f : :py:class:`~.Expr` The concerned function. symbol : :py:class:`~.Symbol` The variable for which the period is to be determined. check : bool, optional The flag to verify whether the value being returned is a period or not. Returns ======= period The period of the function is returned. ``None`` is returned when the function is aperiodic or has a complex period. The value of $0$ is returned as the period of a constant function. Raises ====== NotImplementedError The value of the period computed cannot be verified. Notes ===== Currently, we do not support functions with a complex period. The period of functions having complex periodic values such as ``exp``, ``sinh`` is evaluated to ``None``. The value returned might not be the "fundamental" period of the given function i.e. it may not be the smallest periodic value of the function. The verification of the period through the ``check`` flag is not reliable due to internal simplification of the given expression. Hence, it is set to ``False`` by default. Examples ======== >>> from sympy import periodicity, Symbol, sin, cos, tan, exp >>> x = Symbol('x') >>> f = sin(x) + sin(2*x) + sin(3*x) >>> periodicity(f, x) 2*pi >>> periodicity(sin(x)*cos(x), x) pi >>> periodicity(exp(tan(2*x) - 1), x) pi/2 >>> periodicity(sin(4*x)**cos(2*x), x) pi >>> periodicity(exp(x), x) """
/usr/src/app/target_test_cases/failed_tests_periodicity.txt
def periodicity(f, symbol, check=False): """ Tests the given function for periodicity in the given symbol. Parameters ========== f : :py:class:`~.Expr` The concerned function. symbol : :py:class:`~.Symbol` The variable for which the period is to be determined. check : bool, optional The flag to verify whether the value being returned is a period or not. Returns ======= period The period of the function is returned. ``None`` is returned when the function is aperiodic or has a complex period. The value of $0$ is returned as the period of a constant function. Raises ====== NotImplementedError The value of the period computed cannot be verified. Notes ===== Currently, we do not support functions with a complex period. The period of functions having complex periodic values such as ``exp``, ``sinh`` is evaluated to ``None``. The value returned might not be the "fundamental" period of the given function i.e. it may not be the smallest periodic value of the function. The verification of the period through the ``check`` flag is not reliable due to internal simplification of the given expression. Hence, it is set to ``False`` by default. Examples ======== >>> from sympy import periodicity, Symbol, sin, cos, tan, exp >>> x = Symbol('x') >>> f = sin(x) + sin(2*x) + sin(3*x) >>> periodicity(f, x) 2*pi >>> periodicity(sin(x)*cos(x), x) pi >>> periodicity(exp(tan(2*x) - 1), x) pi/2 >>> periodicity(sin(4*x)**cos(2*x), x) pi >>> periodicity(exp(x), x) """ if symbol.kind is not NumberKind: raise NotImplementedError("Cannot use symbol of kind %s" % symbol.kind) temp = Dummy('x', real=True) f = f.subs(symbol, temp) symbol = temp def _check(orig_f, period): '''Return the checked period or raise an error.''' new_f = orig_f.subs(symbol, symbol + period) if new_f.equals(orig_f): return period else: raise NotImplementedError(filldedent(''' The period of the given function cannot be verified. When `%s` was replaced with `%s + %s` in `%s`, the result was `%s` which was not recognized as being the same as the original function. So either the period was wrong or the two forms were not recognized as being equal. Set check=False to obtain the value.''' % (symbol, symbol, period, orig_f, new_f))) orig_f = f period = None if isinstance(f, Relational): f = f.lhs - f.rhs f = f.simplify() if symbol not in f.free_symbols: return S.Zero if isinstance(f, TrigonometricFunction): try: period = f.period(symbol) except NotImplementedError: pass if isinstance(f, Abs): arg = f.args[0] if isinstance(arg, (sec, csc, cos)): # all but tan and cot might have a # a period that is half as large # so recast as sin arg = sin(arg.args[0]) period = periodicity(arg, symbol) if period is not None and isinstance(arg, sin): # the argument of Abs was a trigonometric other than # cot or tan; test to see if the half-period # is valid. Abs(arg) has behaviour equivalent to # orig_f, so use that for test: orig_f = Abs(arg) try: return _check(orig_f, period/2) except NotImplementedError as err: if check: raise NotImplementedError(err) # else let new orig_f and period be # checked below if isinstance(f, exp) or (f.is_Pow and f.base == S.Exp1): f = Pow(S.Exp1, expand_mul(f.exp)) if im(f) != 0: period_real = periodicity(re(f), symbol) period_imag = periodicity(im(f), symbol) if period_real is not None and period_imag is not None: period = lcim([period_real, period_imag]) if f.is_Pow and f.base != S.Exp1: base, expo = f.args base_has_sym = base.has(symbol) expo_has_sym = expo.has(symbol) if base_has_sym and not expo_has_sym: period = periodicity(base, symbol) elif expo_has_sym and not base_has_sym: period = periodicity(expo, symbol) else: period = _periodicity(f.args, symbol) elif f.is_Mul: coeff, g = f.as_independent(symbol, as_Add=False) if isinstance(g, TrigonometricFunction) or not equal_valued(coeff, 1): period = periodicity(g, symbol) else: period = _periodicity(g.args, symbol) elif f.is_Add: k, g = f.as_independent(symbol) if k is not S.Zero: return periodicity(g, symbol) period = _periodicity(g.args, symbol) elif isinstance(f, Mod): a, n = f.args if a == symbol: period = n elif isinstance(a, TrigonometricFunction): period = periodicity(a, symbol) #check if 'f' is linear in 'symbol' elif (a.is_polynomial(symbol) and degree(a, symbol) == 1 and symbol not in n.free_symbols): period = Abs(n / a.diff(symbol)) elif isinstance(f, Piecewise): pass # not handling Piecewise yet as the return type is not favorable elif period is None: from sympy.solvers.decompogen import compogen, decompogen g_s = decompogen(f, symbol) num_of_gs = len(g_s) if num_of_gs > 1: for index, g in enumerate(reversed(g_s)): start_index = num_of_gs - 1 - index g = compogen(g_s[start_index:], symbol) if g not in (orig_f, f): # Fix for issue 12620 period = periodicity(g, symbol) if period is not None: break if period is not None: if check: return _check(orig_f, period) return period return None
periodicity
sympy
78
sympy/functions/elementary/piecewise.py
def piecewise_exclusive(expr, *, skip_nan=False, deep=True): """ Rewrite :class:`Piecewise` with mutually exclusive conditions. Explanation =========== SymPy represents the conditions of a :class:`Piecewise` in an "if-elif"-fashion, allowing more than one condition to be simultaneously True. The interpretation is that the first condition that is True is the case that holds. While this is a useful representation computationally it is not how a piecewise formula is typically shown in a mathematical text. The :func:`piecewise_exclusive` function can be used to rewrite any :class:`Piecewise` with more typical mutually exclusive conditions. Note that further manipulation of the resulting :class:`Piecewise`, e.g. simplifying it, will most likely make it non-exclusive. Hence, this is primarily a function to be used in conjunction with printing the Piecewise or if one would like to reorder the expression-condition pairs. If it is not possible to determine that all possibilities are covered by the different cases of the :class:`Piecewise` then a final :class:`~sympy.core.numbers.NaN` case will be included explicitly. This can be prevented by passing ``skip_nan=True``. Examples ======== >>> from sympy import piecewise_exclusive, Symbol, Piecewise, S >>> x = Symbol('x', real=True) >>> p = Piecewise((0, x < 0), (S.Half, x <= 0), (1, True)) >>> piecewise_exclusive(p) Piecewise((0, x < 0), (1/2, Eq(x, 0)), (1, x > 0)) >>> piecewise_exclusive(Piecewise((2, x > 1))) Piecewise((2, x > 1), (nan, x <= 1)) >>> piecewise_exclusive(Piecewise((2, x > 1)), skip_nan=True) Piecewise((2, x > 1)) Parameters ========== expr: a SymPy expression. Any :class:`Piecewise` in the expression will be rewritten. skip_nan: ``bool`` (default ``False``) If ``skip_nan`` is set to ``True`` then a final :class:`~sympy.core.numbers.NaN` case will not be included. deep: ``bool`` (default ``True``) If ``deep`` is ``True`` then :func:`piecewise_exclusive` will rewrite any :class:`Piecewise` subexpressions in ``expr`` rather than just rewriting ``expr`` itself. Returns ======= An expression equivalent to ``expr`` but where all :class:`Piecewise` have been rewritten with mutually exclusive conditions. See Also ======== Piecewise piecewise_fold """
/usr/src/app/target_test_cases/failed_tests_piecewise_exclusive.txt
def piecewise_exclusive(expr, *, skip_nan=False, deep=True): """ Rewrite :class:`Piecewise` with mutually exclusive conditions. Explanation =========== SymPy represents the conditions of a :class:`Piecewise` in an "if-elif"-fashion, allowing more than one condition to be simultaneously True. The interpretation is that the first condition that is True is the case that holds. While this is a useful representation computationally it is not how a piecewise formula is typically shown in a mathematical text. The :func:`piecewise_exclusive` function can be used to rewrite any :class:`Piecewise` with more typical mutually exclusive conditions. Note that further manipulation of the resulting :class:`Piecewise`, e.g. simplifying it, will most likely make it non-exclusive. Hence, this is primarily a function to be used in conjunction with printing the Piecewise or if one would like to reorder the expression-condition pairs. If it is not possible to determine that all possibilities are covered by the different cases of the :class:`Piecewise` then a final :class:`~sympy.core.numbers.NaN` case will be included explicitly. This can be prevented by passing ``skip_nan=True``. Examples ======== >>> from sympy import piecewise_exclusive, Symbol, Piecewise, S >>> x = Symbol('x', real=True) >>> p = Piecewise((0, x < 0), (S.Half, x <= 0), (1, True)) >>> piecewise_exclusive(p) Piecewise((0, x < 0), (1/2, Eq(x, 0)), (1, x > 0)) >>> piecewise_exclusive(Piecewise((2, x > 1))) Piecewise((2, x > 1), (nan, x <= 1)) >>> piecewise_exclusive(Piecewise((2, x > 1)), skip_nan=True) Piecewise((2, x > 1)) Parameters ========== expr: a SymPy expression. Any :class:`Piecewise` in the expression will be rewritten. skip_nan: ``bool`` (default ``False``) If ``skip_nan`` is set to ``True`` then a final :class:`~sympy.core.numbers.NaN` case will not be included. deep: ``bool`` (default ``True``) If ``deep`` is ``True`` then :func:`piecewise_exclusive` will rewrite any :class:`Piecewise` subexpressions in ``expr`` rather than just rewriting ``expr`` itself. Returns ======= An expression equivalent to ``expr`` but where all :class:`Piecewise` have been rewritten with mutually exclusive conditions. See Also ======== Piecewise piecewise_fold """ def make_exclusive(*pwargs): cumcond = false newargs = [] # Handle the first n-1 cases for expr_i, cond_i in pwargs[:-1]: cancond = And(cond_i, Not(cumcond)).simplify() cumcond = Or(cond_i, cumcond).simplify() newargs.append((expr_i, cancond)) # For the nth case defer simplification of cumcond expr_n, cond_n = pwargs[-1] cancond_n = And(cond_n, Not(cumcond)).simplify() newargs.append((expr_n, cancond_n)) if not skip_nan: cumcond = Or(cond_n, cumcond).simplify() if cumcond is not true: newargs.append((Undefined, Not(cumcond).simplify())) return Piecewise(*newargs, evaluate=False) if deep: return expr.replace(Piecewise, make_exclusive) elif isinstance(expr, Piecewise): return make_exclusive(*expr.args) else: return expr
piecewise_exclusive
sympy
79
sympy/plotting/plot.py
def plot(*args, show=True, **kwargs): """Plots a function of a single variable as a curve. Parameters ========== args : The first argument is the expression representing the function of single variable to be plotted. The last argument is a 3-tuple denoting the range of the free variable. e.g. ``(x, 0, 5)`` Typical usage examples are in the following: - Plotting a single expression with a single range. ``plot(expr, range, **kwargs)`` - Plotting a single expression with the default range (-10, 10). ``plot(expr, **kwargs)`` - Plotting multiple expressions with a single range. ``plot(expr1, expr2, ..., range, **kwargs)`` - Plotting multiple expressions with multiple ranges. ``plot((expr1, range1), (expr2, range2), ..., **kwargs)`` It is best practice to specify range explicitly because default range may change in the future if a more advanced default range detection algorithm is implemented. show : bool, optional The default value is set to ``True``. Set show to ``False`` and the function will not display the plot. The returned instance of the ``Plot`` class can then be used to save or display the plot by calling the ``save()`` and ``show()`` methods respectively. line_color : string, or float, or function, optional Specifies the color for the plot. See ``Plot`` to see how to set color for the plots. Note that by setting ``line_color``, it would be applied simultaneously to all the series. title : str, optional Title of the plot. It is set to the latex representation of the expression, if the plot has only one expression. label : str, optional The label of the expression in the plot. It will be used when called with ``legend``. Default is the name of the expression. e.g. ``sin(x)`` xlabel : str or expression, optional Label for the x-axis. ylabel : str or expression, optional Label for the y-axis. xscale : 'linear' or 'log', optional Sets the scaling of the x-axis. yscale : 'linear' or 'log', optional Sets the scaling of the y-axis. axis_center : (float, float), optional Tuple of two floats denoting the coordinates of the center or {'center', 'auto'} xlim : (float, float), optional Denotes the x-axis limits, ``(min, max)```. ylim : (float, float), optional Denotes the y-axis limits, ``(min, max)```. annotations : list, optional A list of dictionaries specifying the type of annotation required. The keys in the dictionary should be equivalent to the arguments of the :external:mod:`matplotlib`'s :external:meth:`~matplotlib.axes.Axes.annotate` method. markers : list, optional A list of dictionaries specifying the type the markers required. The keys in the dictionary should be equivalent to the arguments of the :external:mod:`matplotlib`'s :external:func:`~matplotlib.pyplot.plot()` function along with the marker related keyworded arguments. rectangles : list, optional A list of dictionaries specifying the dimensions of the rectangles to be plotted. The keys in the dictionary should be equivalent to the arguments of the :external:mod:`matplotlib`'s :external:class:`~matplotlib.patches.Rectangle` class. fill : dict, optional A dictionary specifying the type of color filling required in the plot. The keys in the dictionary should be equivalent to the arguments of the :external:mod:`matplotlib`'s :external:meth:`~matplotlib.axes.Axes.fill_between` method. adaptive : bool, optional The default value is set to ``True``. Set adaptive to ``False`` and specify ``n`` if uniform sampling is required. The plotting uses an adaptive algorithm which samples recursively to accurately plot. The adaptive algorithm uses a random point near the midpoint of two points that has to be further sampled. Hence the same plots can appear slightly different. depth : int, optional Recursion depth of the adaptive algorithm. A depth of value `n` samples a maximum of `2^{n}` points. If the ``adaptive`` flag is set to ``False``, this will be ignored. n : int, optional Used when the ``adaptive`` is set to ``False``. The function is uniformly sampled at ``n`` number of points. If the ``adaptive`` flag is set to ``True``, this will be ignored. This keyword argument replaces ``nb_of_points``, which should be considered deprecated. size : (float, float), optional A tuple in the form (width, height) in inches to specify the size of the overall figure. The default value is set to ``None``, meaning the size will be set by the default backend. Examples ======== .. plot:: :context: close-figs :format: doctest :include-source: True >>> from sympy import symbols >>> from sympy.plotting import plot >>> x = symbols('x') Single Plot .. plot:: :context: close-figs :format: doctest :include-source: True >>> plot(x**2, (x, -5, 5)) Plot object containing: [0]: cartesian line: x**2 for x over (-5.0, 5.0) Multiple plots with single range. .. plot:: :context: close-figs :format: doctest :include-source: True >>> plot(x, x**2, x**3, (x, -5, 5)) Plot object containing: [0]: cartesian line: x for x over (-5.0, 5.0) [1]: cartesian line: x**2 for x over (-5.0, 5.0) [2]: cartesian line: x**3 for x over (-5.0, 5.0) Multiple plots with different ranges. .. plot:: :context: close-figs :format: doctest :include-source: True >>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5))) Plot object containing: [0]: cartesian line: x**2 for x over (-6.0, 6.0) [1]: cartesian line: x for x over (-5.0, 5.0) No adaptive sampling. .. plot:: :context: close-figs :format: doctest :include-source: True >>> plot(x**2, adaptive=False, n=400) Plot object containing: [0]: cartesian line: x**2 for x over (-10.0, 10.0) See Also ======== Plot, LineOver1DRangeSeries """
/usr/src/app/target_test_cases/failed_tests_plot.txt
def plot(*args, show=True, **kwargs): """Plots a function of a single variable as a curve. Parameters ========== args : The first argument is the expression representing the function of single variable to be plotted. The last argument is a 3-tuple denoting the range of the free variable. e.g. ``(x, 0, 5)`` Typical usage examples are in the following: - Plotting a single expression with a single range. ``plot(expr, range, **kwargs)`` - Plotting a single expression with the default range (-10, 10). ``plot(expr, **kwargs)`` - Plotting multiple expressions with a single range. ``plot(expr1, expr2, ..., range, **kwargs)`` - Plotting multiple expressions with multiple ranges. ``plot((expr1, range1), (expr2, range2), ..., **kwargs)`` It is best practice to specify range explicitly because default range may change in the future if a more advanced default range detection algorithm is implemented. show : bool, optional The default value is set to ``True``. Set show to ``False`` and the function will not display the plot. The returned instance of the ``Plot`` class can then be used to save or display the plot by calling the ``save()`` and ``show()`` methods respectively. line_color : string, or float, or function, optional Specifies the color for the plot. See ``Plot`` to see how to set color for the plots. Note that by setting ``line_color``, it would be applied simultaneously to all the series. title : str, optional Title of the plot. It is set to the latex representation of the expression, if the plot has only one expression. label : str, optional The label of the expression in the plot. It will be used when called with ``legend``. Default is the name of the expression. e.g. ``sin(x)`` xlabel : str or expression, optional Label for the x-axis. ylabel : str or expression, optional Label for the y-axis. xscale : 'linear' or 'log', optional Sets the scaling of the x-axis. yscale : 'linear' or 'log', optional Sets the scaling of the y-axis. axis_center : (float, float), optional Tuple of two floats denoting the coordinates of the center or {'center', 'auto'} xlim : (float, float), optional Denotes the x-axis limits, ``(min, max)```. ylim : (float, float), optional Denotes the y-axis limits, ``(min, max)```. annotations : list, optional A list of dictionaries specifying the type of annotation required. The keys in the dictionary should be equivalent to the arguments of the :external:mod:`matplotlib`'s :external:meth:`~matplotlib.axes.Axes.annotate` method. markers : list, optional A list of dictionaries specifying the type the markers required. The keys in the dictionary should be equivalent to the arguments of the :external:mod:`matplotlib`'s :external:func:`~matplotlib.pyplot.plot()` function along with the marker related keyworded arguments. rectangles : list, optional A list of dictionaries specifying the dimensions of the rectangles to be plotted. The keys in the dictionary should be equivalent to the arguments of the :external:mod:`matplotlib`'s :external:class:`~matplotlib.patches.Rectangle` class. fill : dict, optional A dictionary specifying the type of color filling required in the plot. The keys in the dictionary should be equivalent to the arguments of the :external:mod:`matplotlib`'s :external:meth:`~matplotlib.axes.Axes.fill_between` method. adaptive : bool, optional The default value is set to ``True``. Set adaptive to ``False`` and specify ``n`` if uniform sampling is required. The plotting uses an adaptive algorithm which samples recursively to accurately plot. The adaptive algorithm uses a random point near the midpoint of two points that has to be further sampled. Hence the same plots can appear slightly different. depth : int, optional Recursion depth of the adaptive algorithm. A depth of value `n` samples a maximum of `2^{n}` points. If the ``adaptive`` flag is set to ``False``, this will be ignored. n : int, optional Used when the ``adaptive`` is set to ``False``. The function is uniformly sampled at ``n`` number of points. If the ``adaptive`` flag is set to ``True``, this will be ignored. This keyword argument replaces ``nb_of_points``, which should be considered deprecated. size : (float, float), optional A tuple in the form (width, height) in inches to specify the size of the overall figure. The default value is set to ``None``, meaning the size will be set by the default backend. Examples ======== .. plot:: :context: close-figs :format: doctest :include-source: True >>> from sympy import symbols >>> from sympy.plotting import plot >>> x = symbols('x') Single Plot .. plot:: :context: close-figs :format: doctest :include-source: True >>> plot(x**2, (x, -5, 5)) Plot object containing: [0]: cartesian line: x**2 for x over (-5.0, 5.0) Multiple plots with single range. .. plot:: :context: close-figs :format: doctest :include-source: True >>> plot(x, x**2, x**3, (x, -5, 5)) Plot object containing: [0]: cartesian line: x for x over (-5.0, 5.0) [1]: cartesian line: x**2 for x over (-5.0, 5.0) [2]: cartesian line: x**3 for x over (-5.0, 5.0) Multiple plots with different ranges. .. plot:: :context: close-figs :format: doctest :include-source: True >>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5))) Plot object containing: [0]: cartesian line: x**2 for x over (-6.0, 6.0) [1]: cartesian line: x for x over (-5.0, 5.0) No adaptive sampling. .. plot:: :context: close-figs :format: doctest :include-source: True >>> plot(x**2, adaptive=False, n=400) Plot object containing: [0]: cartesian line: x**2 for x over (-10.0, 10.0) See Also ======== Plot, LineOver1DRangeSeries """ args = _plot_sympify(args) plot_expr = _check_arguments(args, 1, 1, **kwargs) params = kwargs.get("params", None) free = set() for p in plot_expr: if not isinstance(p[1][0], str): free |= {p[1][0]} else: free |= {Symbol(p[1][0])} if params: free = free.difference(params.keys()) x = free.pop() if free else Symbol("x") kwargs.setdefault('xlabel', x) kwargs.setdefault('ylabel', Function('f')(x)) labels = kwargs.pop("label", []) rendering_kw = kwargs.pop("rendering_kw", None) series = _build_line_series(*plot_expr, **kwargs) _set_labels(series, labels, rendering_kw) plots = plot_factory(*series, **kwargs) if show: plots.show() return plots
plot
sympy
80
sympy/ntheory/factor_.py
def pollard_pm1(n, B=10, a=2, retries=0, seed=1234): """ Use Pollard's p-1 method to try to extract a nontrivial factor of ``n``. Either a divisor (perhaps composite) or ``None`` is returned. The value of ``a`` is the base that is used in the test gcd(a**M - 1, n). The default is 2. If ``retries`` > 0 then if no factor is found after the first attempt, a new ``a`` will be generated randomly (using the ``seed``) and the process repeated. Note: the value of M is lcm(1..B) = reduce(ilcm, range(2, B + 1)). A search is made for factors next to even numbers having a power smoothness less than ``B``. Choosing a larger B increases the likelihood of finding a larger factor but takes longer. Whether a factor of n is found or not depends on ``a`` and the power smoothness of the even number just less than the factor p (hence the name p - 1). Although some discussion of what constitutes a good ``a`` some descriptions are hard to interpret. At the modular.math site referenced below it is stated that if gcd(a**M - 1, n) = N then a**M % q**r is 1 for every prime power divisor of N. But consider the following: >>> from sympy.ntheory.factor_ import smoothness_p, pollard_pm1 >>> n=257*1009 >>> smoothness_p(n) (-1, [(257, (1, 2, 256)), (1009, (1, 7, 16))]) So we should (and can) find a root with B=16: >>> pollard_pm1(n, B=16, a=3) 1009 If we attempt to increase B to 256 we find that it does not work: >>> pollard_pm1(n, B=256) >>> But if the value of ``a`` is changed we find that only multiples of 257 work, e.g.: >>> pollard_pm1(n, B=256, a=257) 1009 Checking different ``a`` values shows that all the ones that did not work had a gcd value not equal to ``n`` but equal to one of the factors: >>> from sympy import ilcm, igcd, factorint, Pow >>> M = 1 >>> for i in range(2, 256): ... M = ilcm(M, i) ... >>> set([igcd(pow(a, M, n) - 1, n) for a in range(2, 256) if ... igcd(pow(a, M, n) - 1, n) != n]) {1009} But does aM % d for every divisor of n give 1? >>> aM = pow(255, M, n) >>> [(d, aM%Pow(*d.args)) for d in factorint(n, visual=True).args] [(257**1, 1), (1009**1, 1)] No, only one of them. So perhaps the principle is that a root will be found for a given value of B provided that: 1) the power smoothness of the p - 1 value next to the root does not exceed B 2) a**M % p != 1 for any of the divisors of n. By trying more than one ``a`` it is possible that one of them will yield a factor. Examples ======== With the default smoothness bound, this number cannot be cracked: >>> from sympy.ntheory import pollard_pm1 >>> pollard_pm1(21477639576571) Increasing the smoothness bound helps: >>> pollard_pm1(21477639576571, B=2000) 4410317 Looking at the smoothness of the factors of this number we find: >>> from sympy.ntheory.factor_ import smoothness_p, factorint >>> print(smoothness_p(21477639576571, visual=1)) p**i=4410317**1 has p-1 B=1787, B-pow=1787 p**i=4869863**1 has p-1 B=2434931, B-pow=2434931 The B and B-pow are the same for the p - 1 factorizations of the divisors because those factorizations had a very large prime factor: >>> factorint(4410317 - 1) {2: 2, 617: 1, 1787: 1} >>> factorint(4869863-1) {2: 1, 2434931: 1} Note that until B reaches the B-pow value of 1787, the number is not cracked; >>> pollard_pm1(21477639576571, B=1786) >>> pollard_pm1(21477639576571, B=1787) 4410317 The B value has to do with the factors of the number next to the divisor, not the divisors themselves. A worst case scenario is that the number next to the factor p has a large prime divisisor or is a perfect power. If these conditions apply then the power-smoothness will be about p/2 or p. The more realistic is that there will be a large prime factor next to p requiring a B value on the order of p/2. Although primes may have been searched for up to this level, the p/2 is a factor of p - 1, something that we do not know. The modular.math reference below states that 15% of numbers in the range of 10**15 to 15**15 + 10**4 are 10**6 power smooth so a B of 10**6 will fail 85% of the time in that range. From 10**8 to 10**8 + 10**3 the percentages are nearly reversed...but in that range the simple trial division is quite fast. References ========== .. [1] Richard Crandall & Carl Pomerance (2005), "Prime Numbers: A Computational Perspective", Springer, 2nd edition, 236-238 .. [2] https://web.archive.org/web/20150716201437/http://modular.math.washington.edu/edu/2007/spring/ent/ent-html/node81.html .. [3] https://www.cs.toronto.edu/~yuvalf/Factorization.pdf """
/usr/src/app/target_test_cases/failed_tests_pollard_pm1.txt
def pollard_pm1(n, B=10, a=2, retries=0, seed=1234): """ Use Pollard's p-1 method to try to extract a nontrivial factor of ``n``. Either a divisor (perhaps composite) or ``None`` is returned. The value of ``a`` is the base that is used in the test gcd(a**M - 1, n). The default is 2. If ``retries`` > 0 then if no factor is found after the first attempt, a new ``a`` will be generated randomly (using the ``seed``) and the process repeated. Note: the value of M is lcm(1..B) = reduce(ilcm, range(2, B + 1)). A search is made for factors next to even numbers having a power smoothness less than ``B``. Choosing a larger B increases the likelihood of finding a larger factor but takes longer. Whether a factor of n is found or not depends on ``a`` and the power smoothness of the even number just less than the factor p (hence the name p - 1). Although some discussion of what constitutes a good ``a`` some descriptions are hard to interpret. At the modular.math site referenced below it is stated that if gcd(a**M - 1, n) = N then a**M % q**r is 1 for every prime power divisor of N. But consider the following: >>> from sympy.ntheory.factor_ import smoothness_p, pollard_pm1 >>> n=257*1009 >>> smoothness_p(n) (-1, [(257, (1, 2, 256)), (1009, (1, 7, 16))]) So we should (and can) find a root with B=16: >>> pollard_pm1(n, B=16, a=3) 1009 If we attempt to increase B to 256 we find that it does not work: >>> pollard_pm1(n, B=256) >>> But if the value of ``a`` is changed we find that only multiples of 257 work, e.g.: >>> pollard_pm1(n, B=256, a=257) 1009 Checking different ``a`` values shows that all the ones that did not work had a gcd value not equal to ``n`` but equal to one of the factors: >>> from sympy import ilcm, igcd, factorint, Pow >>> M = 1 >>> for i in range(2, 256): ... M = ilcm(M, i) ... >>> set([igcd(pow(a, M, n) - 1, n) for a in range(2, 256) if ... igcd(pow(a, M, n) - 1, n) != n]) {1009} But does aM % d for every divisor of n give 1? >>> aM = pow(255, M, n) >>> [(d, aM%Pow(*d.args)) for d in factorint(n, visual=True).args] [(257**1, 1), (1009**1, 1)] No, only one of them. So perhaps the principle is that a root will be found for a given value of B provided that: 1) the power smoothness of the p - 1 value next to the root does not exceed B 2) a**M % p != 1 for any of the divisors of n. By trying more than one ``a`` it is possible that one of them will yield a factor. Examples ======== With the default smoothness bound, this number cannot be cracked: >>> from sympy.ntheory import pollard_pm1 >>> pollard_pm1(21477639576571) Increasing the smoothness bound helps: >>> pollard_pm1(21477639576571, B=2000) 4410317 Looking at the smoothness of the factors of this number we find: >>> from sympy.ntheory.factor_ import smoothness_p, factorint >>> print(smoothness_p(21477639576571, visual=1)) p**i=4410317**1 has p-1 B=1787, B-pow=1787 p**i=4869863**1 has p-1 B=2434931, B-pow=2434931 The B and B-pow are the same for the p - 1 factorizations of the divisors because those factorizations had a very large prime factor: >>> factorint(4410317 - 1) {2: 2, 617: 1, 1787: 1} >>> factorint(4869863-1) {2: 1, 2434931: 1} Note that until B reaches the B-pow value of 1787, the number is not cracked; >>> pollard_pm1(21477639576571, B=1786) >>> pollard_pm1(21477639576571, B=1787) 4410317 The B value has to do with the factors of the number next to the divisor, not the divisors themselves. A worst case scenario is that the number next to the factor p has a large prime divisisor or is a perfect power. If these conditions apply then the power-smoothness will be about p/2 or p. The more realistic is that there will be a large prime factor next to p requiring a B value on the order of p/2. Although primes may have been searched for up to this level, the p/2 is a factor of p - 1, something that we do not know. The modular.math reference below states that 15% of numbers in the range of 10**15 to 15**15 + 10**4 are 10**6 power smooth so a B of 10**6 will fail 85% of the time in that range. From 10**8 to 10**8 + 10**3 the percentages are nearly reversed...but in that range the simple trial division is quite fast. References ========== .. [1] Richard Crandall & Carl Pomerance (2005), "Prime Numbers: A Computational Perspective", Springer, 2nd edition, 236-238 .. [2] https://web.archive.org/web/20150716201437/http://modular.math.washington.edu/edu/2007/spring/ent/ent-html/node81.html .. [3] https://www.cs.toronto.edu/~yuvalf/Factorization.pdf """ n = int(n) if n < 4 or B < 3: raise ValueError('pollard_pm1 should receive n > 3 and B > 2') randint = _randint(seed + B) # computing a**lcm(1,2,3,..B) % n for B > 2 # it looks weird, but it's right: primes run [2, B] # and the answer's not right until the loop is done. for i in range(retries + 1): aM = a for p in sieve.primerange(2, B + 1): e = int(math.log(B, p)) aM = pow(aM, pow(p, e), n) g = gcd(aM - 1, n) if 1 < g < n: return int(g) # get a new a: # since the exponent, lcm(1..B), is even, if we allow 'a' to be 'n-1' # then (n - 1)**even % n will be 1 which will give a g of 0 and 1 will # give a zero, too, so we set the range as [2, n-2]. Some references # say 'a' should be coprime to n, but either will detect factors. a = randint(2, n - 2)
pollard_pm1
sympy
81
sympy/simplify/powsimp.py
def powsimp(expr, deep=False, combine='all', force=False, measure=count_ops): """ Reduce expression by combining powers with similar bases and exponents. Explanation =========== If ``deep`` is ``True`` then powsimp() will also simplify arguments of functions. By default ``deep`` is set to ``False``. If ``force`` is ``True`` then bases will be combined without checking for assumptions, e.g. sqrt(x)*sqrt(y) -> sqrt(x*y) which is not true if x and y are both negative. You can make powsimp() only combine bases or only combine exponents by changing combine='base' or combine='exp'. By default, combine='all', which does both. combine='base' will only combine:: a a a 2x x x * y => (x*y) as well as things like 2 => 4 and combine='exp' will only combine :: a b (a + b) x * x => x combine='exp' will strictly only combine exponents in the way that used to be automatic. Also use deep=True if you need the old behavior. When combine='all', 'exp' is evaluated first. Consider the first example below for when there could be an ambiguity relating to this. This is done so things like the second example can be completely combined. If you want 'base' combined first, do something like powsimp(powsimp(expr, combine='base'), combine='exp'). Examples ======== >>> from sympy import powsimp, exp, log, symbols >>> from sympy.abc import x, y, z, n >>> powsimp(x**y*x**z*y**z, combine='all') x**(y + z)*y**z >>> powsimp(x**y*x**z*y**z, combine='exp') x**(y + z)*y**z >>> powsimp(x**y*x**z*y**z, combine='base', force=True) x**y*(x*y)**z >>> powsimp(x**z*x**y*n**z*n**y, combine='all', force=True) (n*x)**(y + z) >>> powsimp(x**z*x**y*n**z*n**y, combine='exp') n**(y + z)*x**(y + z) >>> powsimp(x**z*x**y*n**z*n**y, combine='base', force=True) (n*x)**y*(n*x)**z >>> x, y = symbols('x y', positive=True) >>> powsimp(log(exp(x)*exp(y))) log(exp(x)*exp(y)) >>> powsimp(log(exp(x)*exp(y)), deep=True) x + y Radicals with Mul bases will be combined if combine='exp' >>> from sympy import sqrt >>> x, y = symbols('x y') Two radicals are automatically joined through Mul: >>> a=sqrt(x*sqrt(y)) >>> a*a**3 == a**4 True But if an integer power of that radical has been autoexpanded then Mul does not join the resulting factors: >>> a**4 # auto expands to a Mul, no longer a Pow x**2*y >>> _*a # so Mul doesn't combine them x**2*y*sqrt(x*sqrt(y)) >>> powsimp(_) # but powsimp will (x*sqrt(y))**(5/2) >>> powsimp(x*y*a) # but won't when doing so would violate assumptions x*y*sqrt(x*sqrt(y)) """
/usr/src/app/target_test_cases/failed_tests_powsimp.txt
def powsimp(expr, deep=False, combine='all', force=False, measure=count_ops): """ Reduce expression by combining powers with similar bases and exponents. Explanation =========== If ``deep`` is ``True`` then powsimp() will also simplify arguments of functions. By default ``deep`` is set to ``False``. If ``force`` is ``True`` then bases will be combined without checking for assumptions, e.g. sqrt(x)*sqrt(y) -> sqrt(x*y) which is not true if x and y are both negative. You can make powsimp() only combine bases or only combine exponents by changing combine='base' or combine='exp'. By default, combine='all', which does both. combine='base' will only combine:: a a a 2x x x * y => (x*y) as well as things like 2 => 4 and combine='exp' will only combine :: a b (a + b) x * x => x combine='exp' will strictly only combine exponents in the way that used to be automatic. Also use deep=True if you need the old behavior. When combine='all', 'exp' is evaluated first. Consider the first example below for when there could be an ambiguity relating to this. This is done so things like the second example can be completely combined. If you want 'base' combined first, do something like powsimp(powsimp(expr, combine='base'), combine='exp'). Examples ======== >>> from sympy import powsimp, exp, log, symbols >>> from sympy.abc import x, y, z, n >>> powsimp(x**y*x**z*y**z, combine='all') x**(y + z)*y**z >>> powsimp(x**y*x**z*y**z, combine='exp') x**(y + z)*y**z >>> powsimp(x**y*x**z*y**z, combine='base', force=True) x**y*(x*y)**z >>> powsimp(x**z*x**y*n**z*n**y, combine='all', force=True) (n*x)**(y + z) >>> powsimp(x**z*x**y*n**z*n**y, combine='exp') n**(y + z)*x**(y + z) >>> powsimp(x**z*x**y*n**z*n**y, combine='base', force=True) (n*x)**y*(n*x)**z >>> x, y = symbols('x y', positive=True) >>> powsimp(log(exp(x)*exp(y))) log(exp(x)*exp(y)) >>> powsimp(log(exp(x)*exp(y)), deep=True) x + y Radicals with Mul bases will be combined if combine='exp' >>> from sympy import sqrt >>> x, y = symbols('x y') Two radicals are automatically joined through Mul: >>> a=sqrt(x*sqrt(y)) >>> a*a**3 == a**4 True But if an integer power of that radical has been autoexpanded then Mul does not join the resulting factors: >>> a**4 # auto expands to a Mul, no longer a Pow x**2*y >>> _*a # so Mul doesn't combine them x**2*y*sqrt(x*sqrt(y)) >>> powsimp(_) # but powsimp will (x*sqrt(y))**(5/2) >>> powsimp(x*y*a) # but won't when doing so would violate assumptions x*y*sqrt(x*sqrt(y)) """ def recurse(arg, **kwargs): _deep = kwargs.get('deep', deep) _combine = kwargs.get('combine', combine) _force = kwargs.get('force', force) _measure = kwargs.get('measure', measure) return powsimp(arg, _deep, _combine, _force, _measure) expr = sympify(expr) if (not isinstance(expr, Basic) or isinstance(expr, MatrixSymbol) or ( expr.is_Atom or expr in (exp_polar(0), exp_polar(1)))): return expr if deep or expr.is_Add or expr.is_Mul and _y not in expr.args: expr = expr.func(*[recurse(w) for w in expr.args]) if expr.is_Pow: return recurse(expr*_y, deep=False)/_y if not expr.is_Mul: return expr # handle the Mul if combine in ('exp', 'all'): # Collect base/exp data, while maintaining order in the # non-commutative parts of the product c_powers = defaultdict(list) nc_part = [] newexpr = [] coeff = S.One for term in expr.args: if term.is_Rational: coeff *= term continue if term.is_Pow: term = _denest_pow(term) if term.is_commutative: b, e = term.as_base_exp() if deep: b, e = [recurse(i) for i in [b, e]] if b.is_Pow or isinstance(b, exp): # don't let smthg like sqrt(x**a) split into x**a, 1/2 # or else it will be joined as x**(a/2) later b, e = b**e, S.One c_powers[b].append(e) else: # This is the logic that combines exponents for equal, # but non-commutative bases: A**x*A**y == A**(x+y). if nc_part: b1, e1 = nc_part[-1].as_base_exp() b2, e2 = term.as_base_exp() if (b1 == b2 and e1.is_commutative and e2.is_commutative): nc_part[-1] = Pow(b1, Add(e1, e2)) continue nc_part.append(term) # add up exponents of common bases for b, e in ordered(iter(c_powers.items())): # allow 2**x/4 -> 2**(x - 2); don't do this when b and e are # Numbers since autoevaluation will undo it, e.g. # 2**(1/3)/4 -> 2**(1/3 - 2) -> 2**(1/3)/4 if (b and b.is_Rational and not all(ei.is_Number for ei in e) and \ coeff is not S.One and b not in (S.One, S.NegativeOne)): m = multiplicity(abs(b), abs(coeff)) if m: e.append(m) coeff /= b**m c_powers[b] = Add(*e) if coeff is not S.One: if coeff in c_powers: c_powers[coeff] += S.One else: c_powers[coeff] = S.One # convert to plain dictionary c_powers = dict(c_powers) # check for base and inverted base pairs be = list(c_powers.items()) skip = set() # skip if we already saw them for b, e in be: if b in skip: continue bpos = b.is_positive or b.is_polar if bpos: binv = 1/b if b != binv and binv in c_powers: if b.as_numer_denom()[0] is S.One: c_powers.pop(b) c_powers[binv] -= e else: skip.add(binv) e = c_powers.pop(binv) c_powers[b] -= e # check for base and negated base pairs be = list(c_powers.items()) _n = S.NegativeOne for b, e in be: if (b.is_Symbol or b.is_Add) and -b in c_powers and b in c_powers: if (b.is_positive is not None or e.is_integer): if e.is_integer or b.is_negative: c_powers[-b] += c_powers.pop(b) else: # (-b).is_positive so use its e e = c_powers.pop(-b) c_powers[b] += e if _n in c_powers: c_powers[_n] += e else: c_powers[_n] = e # filter c_powers and convert to a list c_powers = [(b, e) for b, e in c_powers.items() if e] # ============================================================== # check for Mul bases of Rational powers that can be combined with # separated bases, e.g. x*sqrt(x*y)*sqrt(x*sqrt(x*y)) -> # (x*sqrt(x*y))**(3/2) # ---------------- helper functions def ratq(x): '''Return Rational part of x's exponent as it appears in the bkey. ''' return bkey(x)[0][1] def bkey(b, e=None): '''Return (b**s, c.q), c.p where e -> c*s. If e is not given then it will be taken by using as_base_exp() on the input b. e.g. x**3/2 -> (x, 2), 3 x**y -> (x**y, 1), 1 x**(2*y/3) -> (x**y, 3), 2 exp(x/2) -> (exp(a), 2), 1 ''' if e is not None: # coming from c_powers or from below if e.is_Integer: return (b, S.One), e elif e.is_Rational: return (b, Integer(e.q)), Integer(e.p) else: c, m = e.as_coeff_Mul(rational=True) if c is not S.One: if m.is_integer: return (b, Integer(c.q)), m*Integer(c.p) return (b**m, Integer(c.q)), Integer(c.p) else: return (b**e, S.One), S.One else: return bkey(*b.as_base_exp()) def update(b): '''Decide what to do with base, b. If its exponent is now an integer multiple of the Rational denominator, then remove it and put the factors of its base in the common_b dictionary or update the existing bases if necessary. If it has been zeroed out, simply remove the base. ''' newe, r = divmod(common_b[b], b[1]) if not r: common_b.pop(b) if newe: for m in Mul.make_args(b[0]**newe): b, e = bkey(m) if b not in common_b: common_b[b] = 0 common_b[b] += e if b[1] != 1: bases.append(b) # ---------------- end of helper functions # assemble a dictionary of the factors having a Rational power common_b = {} done = [] bases = [] for b, e in c_powers: b, e = bkey(b, e) if b in common_b: common_b[b] = common_b[b] + e else: common_b[b] = e if b[1] != 1 and b[0].is_Mul: bases.append(b) bases.sort(key=default_sort_key) # this makes tie-breaking canonical bases.sort(key=measure, reverse=True) # handle longest first for base in bases: if base not in common_b: # it may have been removed already continue b, exponent = base last = False # True when no factor of base is a radical qlcm = 1 # the lcm of the radical denominators while True: bstart = b qstart = qlcm bb = [] # list of factors ee = [] # (factor's expo. and it's current value in common_b) for bi in Mul.make_args(b): bib, bie = bkey(bi) if bib not in common_b or common_b[bib] < bie: ee = bb = [] # failed break ee.append([bie, common_b[bib]]) bb.append(bib) if ee: # find the number of integral extractions possible # e.g. [(1, 2), (2, 2)] -> min(2/1, 2/2) -> 1 min1 = ee[0][1]//ee[0][0] for i in range(1, len(ee)): rat = ee[i][1]//ee[i][0] if rat < 1: break min1 = min(min1, rat) else: # update base factor counts # e.g. if ee = [(2, 5), (3, 6)] then min1 = 2 # and the new base counts will be 5-2*2 and 6-2*3 for i in range(len(bb)): common_b[bb[i]] -= min1*ee[i][0] update(bb[i]) # update the count of the base # e.g. x**2*y*sqrt(x*sqrt(y)) the count of x*sqrt(y) # will increase by 4 to give bkey (x*sqrt(y), 2, 5) common_b[base] += min1*qstart*exponent if (last # no more radicals in base or len(common_b) == 1 # nothing left to join with or all(k[1] == 1 for k in common_b) # no rad's in common_b ): break # see what we can exponentiate base by to remove any radicals # so we know what to search for # e.g. if base were x**(1/2)*y**(1/3) then we should # exponentiate by 6 and look for powers of x and y in the ratio # of 2 to 3 qlcm = lcm([ratq(bi) for bi in Mul.make_args(bstart)]) if qlcm == 1: break # we are done b = bstart**qlcm qlcm *= qstart if all(ratq(bi) == 1 for bi in Mul.make_args(b)): last = True # we are going to be done after this next pass # this base no longer can find anything to join with and # since it was longer than any other we are done with it b, q = base done.append((b, common_b.pop(base)*Rational(1, q))) # update c_powers and get ready to continue with powsimp c_powers = done # there may be terms still in common_b that were bases that were # identified as needing processing, so remove those, too for (b, q), e in common_b.items(): if (b.is_Pow or isinstance(b, exp)) and \ q is not S.One and not b.exp.is_Rational: b, be = b.as_base_exp() b = b**(be/q) else: b = root(b, q) c_powers.append((b, e)) check = len(c_powers) c_powers = dict(c_powers) assert len(c_powers) == check # there should have been no duplicates # ============================================================== # rebuild the expression newexpr = expr.func(*(newexpr + [Pow(b, e) for b, e in c_powers.items()])) if combine == 'exp': return expr.func(newexpr, expr.func(*nc_part)) else: return recurse(expr.func(*nc_part), combine='base') * \ recurse(newexpr, combine='base') elif combine == 'base': # Build c_powers and nc_part. These must both be lists not # dicts because exp's are not combined. c_powers = [] nc_part = [] for term in expr.args: if term.is_commutative: c_powers.append(list(term.as_base_exp())) else: nc_part.append(term) # Pull out numerical coefficients from exponent if assumptions allow # e.g., 2**(2*x) => 4**x for i in range(len(c_powers)): b, e = c_powers[i] if not (all(x.is_nonnegative for x in b.as_numer_denom()) or e.is_integer or force or b.is_polar): continue exp_c, exp_t = e.as_coeff_Mul(rational=True) if exp_c is not S.One and exp_t is not S.One: c_powers[i] = [Pow(b, exp_c), exp_t] # Combine bases whenever they have the same exponent and # assumptions allow # first gather the potential bases under the common exponent c_exp = defaultdict(list) for b, e in c_powers: if deep: e = recurse(e) if e.is_Add and (b.is_positive or e.is_integer): e = factor_terms(e) if _coeff_isneg(e): e = -e b = 1/b c_exp[e].append(b) del c_powers # Merge back in the results of the above to form a new product c_powers = defaultdict(list) for e in c_exp: bases = c_exp[e] # calculate the new base for e if len(bases) == 1: new_base = bases[0] elif e.is_integer or force: new_base = expr.func(*bases) else: # see which ones can be joined unk = [] nonneg = [] neg = [] for bi in bases: if bi.is_negative: neg.append(bi) elif bi.is_nonnegative: nonneg.append(bi) elif bi.is_polar: nonneg.append( bi) # polar can be treated like non-negative else: unk.append(bi) if len(unk) == 1 and not neg or len(neg) == 1 and not unk: # a single neg or a single unk can join the rest nonneg.extend(unk + neg) unk = neg = [] elif neg: # their negative signs cancel in groups of 2*q if we know # that e = p/q else we have to treat them as unknown israt = False if e.is_Rational: israt = True else: p, d = e.as_numer_denom() if p.is_integer and d.is_integer: israt = True if israt: neg = [-w for w in neg] unk.extend([S.NegativeOne]*len(neg)) else: unk.extend(neg) neg = [] del israt # these shouldn't be joined for b in unk: c_powers[b].append(e) # here is a new joined base new_base = expr.func(*(nonneg + neg)) # if there are positive parts they will just get separated # again unless some change is made def _terms(e): # return the number of terms of this expression # when multiplied out -- assuming no joining of terms if e.is_Add: return sum(_terms(ai) for ai in e.args) if e.is_Mul: return prod([_terms(mi) for mi in e.args]) return 1 xnew_base = expand_mul(new_base, deep=False) if len(Add.make_args(xnew_base)) < _terms(new_base): new_base = factor_terms(xnew_base) c_powers[new_base].append(e) # break out the powers from c_powers now c_part = [Pow(b, ei) for b, e in c_powers.items() for ei in e] # we're done return expr.func(*(c_part + nc_part)) else: raise ValueError("combine must be one of ('all', 'exp', 'base').")
powsimp
sympy
82
sympy/series/formal.py
def rational_algorithm(f, x, k, order=4, full=False): """ Rational algorithm for computing formula of coefficients of Formal Power Series of a function. Explanation =========== Applicable when f(x) or some derivative of f(x) is a rational function in x. :func:`rational_algorithm` uses :func:`~.apart` function for partial fraction decomposition. :func:`~.apart` by default uses 'undetermined coefficients method'. By setting ``full=True``, 'Bronstein's algorithm' can be used instead. Looks for derivative of a function up to 4'th order (by default). This can be overridden using order option. Parameters ========== x : Symbol order : int, optional Order of the derivative of ``f``, Default is 4. full : bool Returns ======= formula : Expr ind : Expr Independent terms. order : int full : bool Examples ======== >>> from sympy import log, atan >>> from sympy.series.formal import rational_algorithm as ra >>> from sympy.abc import x, k >>> ra(1 / (1 - x), x, k) (1, 0, 0) >>> ra(log(1 + x), x, k) (-1/((-1)**k*k), 0, 1) >>> ra(atan(x), x, k, full=True) ((-I/(2*(-I)**k) + I/(2*I**k))/k, 0, 1) Notes ===== By setting ``full=True``, range of admissible functions to be solved using ``rational_algorithm`` can be increased. This option should be used carefully as it can significantly slow down the computation as ``doit`` is performed on the :class:`~.RootSum` object returned by the :func:`~.apart` function. Use ``full=False`` whenever possible. See Also ======== sympy.polys.partfrac.apart References ========== .. [1] Formal Power Series - Dominik Gruntz, Wolfram Koepf .. [2] Power Series in Computer Algebra - Wolfram Koepf """
/usr/src/app/target_test_cases/failed_tests_rational_algorithm.txt
def rational_algorithm(f, x, k, order=4, full=False): """ Rational algorithm for computing formula of coefficients of Formal Power Series of a function. Explanation =========== Applicable when f(x) or some derivative of f(x) is a rational function in x. :func:`rational_algorithm` uses :func:`~.apart` function for partial fraction decomposition. :func:`~.apart` by default uses 'undetermined coefficients method'. By setting ``full=True``, 'Bronstein's algorithm' can be used instead. Looks for derivative of a function up to 4'th order (by default). This can be overridden using order option. Parameters ========== x : Symbol order : int, optional Order of the derivative of ``f``, Default is 4. full : bool Returns ======= formula : Expr ind : Expr Independent terms. order : int full : bool Examples ======== >>> from sympy import log, atan >>> from sympy.series.formal import rational_algorithm as ra >>> from sympy.abc import x, k >>> ra(1 / (1 - x), x, k) (1, 0, 0) >>> ra(log(1 + x), x, k) (-1/((-1)**k*k), 0, 1) >>> ra(atan(x), x, k, full=True) ((-I/(2*(-I)**k) + I/(2*I**k))/k, 0, 1) Notes ===== By setting ``full=True``, range of admissible functions to be solved using ``rational_algorithm`` can be increased. This option should be used carefully as it can significantly slow down the computation as ``doit`` is performed on the :class:`~.RootSum` object returned by the :func:`~.apart` function. Use ``full=False`` whenever possible. See Also ======== sympy.polys.partfrac.apart References ========== .. [1] Formal Power Series - Dominik Gruntz, Wolfram Koepf .. [2] Power Series in Computer Algebra - Wolfram Koepf """ from sympy.polys import RootSum, apart from sympy.integrals import integrate diff = f ds = [] # list of diff for i in range(order + 1): if i: diff = diff.diff(x) if diff.is_rational_function(x): coeff, sep = S.Zero, S.Zero terms = apart(diff, x, full=full) if terms.has(RootSum): terms = terms.doit() for t in Add.make_args(terms): num, den = t.as_numer_denom() if not den.has(x): sep += t else: if isinstance(den, Mul): # m*(n*x - a)**j -> (n*x - a)**j ind = den.as_independent(x) den = ind[1] num /= ind[0] # (n*x - a)**j -> (x - b) den, j = den.as_base_exp() a, xterm = den.as_coeff_add(x) # term -> m/x**n if not a: sep += t continue xc = xterm[0].coeff(x) a /= -xc num /= xc**j ak = ((-1)**j * num * binomial(j + k - 1, k).rewrite(factorial) / a**(j + k)) coeff += ak # Hacky, better way? if coeff.is_zero: return None if (coeff.has(x) or coeff.has(zoo) or coeff.has(oo) or coeff.has(nan)): return None for j in range(i): coeff = (coeff / (k + j + 1)) sep = integrate(sep, x) sep += (ds.pop() - sep).limit(x, 0) # constant of integration return (coeff.subs(k, k - i), sep, i) else: ds.append(diff) return None
rational_algorithm
sympy
83
sympy/utilities/misc.py
def rawlines(s): """Return a cut-and-pastable string that, when printed, is equivalent to the input. Use this when there is more than one line in the string. The string returned is formatted so it can be indented nicely within tests; in some cases it is wrapped in the dedent function which has to be imported from textwrap. Examples ======== Note: because there are characters in the examples below that need to be escaped because they are themselves within a triple quoted docstring, expressions below look more complicated than they would be if they were printed in an interpreter window. >>> from sympy.utilities.misc import rawlines >>> from sympy import TableForm >>> s = str(TableForm([[1, 10]], headings=(None, ['a', 'bee']))) >>> print(rawlines(s)) ( 'a bee\\n' '-----\\n' '1 10 ' ) >>> print(rawlines('''this ... that''')) dedent('''\\ this that''') >>> print(rawlines('''this ... that ... ''')) dedent('''\\ this that ''') >>> s = \"\"\"this ... is a triple ''' ... \"\"\" >>> print(rawlines(s)) dedent(\"\"\"\\ this is a triple ''' \"\"\") >>> print(rawlines('''this ... that ... ''')) ( 'this\\n' 'that\\n' ' ' ) See Also ======== filldedent, strlines """
/usr/src/app/target_test_cases/failed_tests_rawlines.txt
def rawlines(s): """Return a cut-and-pastable string that, when printed, is equivalent to the input. Use this when there is more than one line in the string. The string returned is formatted so it can be indented nicely within tests; in some cases it is wrapped in the dedent function which has to be imported from textwrap. Examples ======== Note: because there are characters in the examples below that need to be escaped because they are themselves within a triple quoted docstring, expressions below look more complicated than they would be if they were printed in an interpreter window. >>> from sympy.utilities.misc import rawlines >>> from sympy import TableForm >>> s = str(TableForm([[1, 10]], headings=(None, ['a', 'bee']))) >>> print(rawlines(s)) ( 'a bee\\n' '-----\\n' '1 10 ' ) >>> print(rawlines('''this ... that''')) dedent('''\\ this that''') >>> print(rawlines('''this ... that ... ''')) dedent('''\\ this that ''') >>> s = \"\"\"this ... is a triple ''' ... \"\"\" >>> print(rawlines(s)) dedent(\"\"\"\\ this is a triple ''' \"\"\") >>> print(rawlines('''this ... that ... ''')) ( 'this\\n' 'that\\n' ' ' ) See Also ======== filldedent, strlines """ lines = s.split('\n') if len(lines) == 1: return repr(lines[0]) triple = ["'''" in s, '"""' in s] if any(li.endswith(' ') for li in lines) or '\\' in s or all(triple): rv = [] # add on the newlines trailing = s.endswith('\n') last = len(lines) - 1 for i, li in enumerate(lines): if i != last or trailing: rv.append(repr(li + '\n')) else: rv.append(repr(li)) return '(\n %s\n)' % '\n '.join(rv) else: rv = '\n '.join(lines) if triple[0]: return 'dedent("""\\\n %s""")' % rv else: return "dedent('''\\\n %s''')" % rv
rawlines
sympy
84
sympy/printing/rcode.py
def rcode(expr, assign_to=None, **settings): """Converts an expr to a string of r code Parameters ========== expr : Expr A SymPy expression to be converted. assign_to : optional When given, the argument is used as the name of the variable to which the expression is assigned. Can be a string, ``Symbol``, ``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of line-wrapping, or for expressions that generate multi-line statements. precision : integer, optional The precision for numbers such as pi [default=15]. user_functions : dict, optional A dictionary where the keys are string representations of either ``FunctionClass`` or ``UndefinedFunction`` instances and the values are their desired R string representations. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, rfunction_string)] or [(argument_test, rfunction_formater)]. See below for examples. human : bool, optional If True, the result is a single string that may contain some constant declarations for the number symbols. If False, the same information is returned in a tuple of (symbols_to_declare, not_supported_functions, code_text). [default=True]. contract: bool, optional If True, ``Indexed`` instances are assumed to obey tensor contraction rules and the corresponding nested loops over indices are generated. Setting contract=False will not generate loops, instead the user is responsible to provide values for the indices in the code. [default=True]. Examples ======== >>> from sympy import rcode, symbols, Rational, sin, ceiling, Abs, Function >>> x, tau = symbols("x, tau") >>> rcode((2*tau)**Rational(7, 2)) '8*sqrt(2)*tau^(7.0/2.0)' >>> rcode(sin(x), assign_to="s") 's = sin(x);' Simple custom printing can be defined for certain types by passing a dictionary of {"type" : "function"} to the ``user_functions`` kwarg. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, cfunction_string)]. >>> custom_functions = { ... "ceiling": "CEIL", ... "Abs": [(lambda x: not x.is_integer, "fabs"), ... (lambda x: x.is_integer, "ABS")], ... "func": "f" ... } >>> func = Function('func') >>> rcode(func(Abs(x) + ceiling(x)), user_functions=custom_functions) 'f(fabs(x) + CEIL(x))' or if the R-function takes a subset of the original arguments: >>> rcode(2**x + 3**x, user_functions={'Pow': [ ... (lambda b, e: b == 2, lambda b, e: 'exp2(%s)' % e), ... (lambda b, e: b != 2, 'pow')]}) 'exp2(x) + pow(3, x)' ``Piecewise`` expressions are converted into conditionals. If an ``assign_to`` variable is provided an if statement is created, otherwise the ternary operator is used. Note that if the ``Piecewise`` lacks a default term, represented by ``(expr, True)`` then an error will be thrown. This is to prevent generating an expression that may not evaluate to anything. >>> from sympy import Piecewise >>> expr = Piecewise((x + 1, x > 0), (x, True)) >>> print(rcode(expr, assign_to=tau)) tau = ifelse(x > 0,x + 1,x); Support for loops is provided through ``Indexed`` types. With ``contract=True`` these expressions will be turned into loops, whereas ``contract=False`` will just print the assignment expression that should be looped over: >>> from sympy import Eq, IndexedBase, Idx >>> len_y = 5 >>> y = IndexedBase('y', shape=(len_y,)) >>> t = IndexedBase('t', shape=(len_y,)) >>> Dy = IndexedBase('Dy', shape=(len_y-1,)) >>> i = Idx('i', len_y-1) >>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i])) >>> rcode(e.rhs, assign_to=e.lhs, contract=False) 'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);' Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions must be provided to ``assign_to``. Note that any expression that can be generated normally can also exist inside a Matrix: >>> from sympy import Matrix, MatrixSymbol >>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)]) >>> A = MatrixSymbol('A', 3, 1) >>> print(rcode(mat, A)) A[0] = x^2; A[1] = ifelse(x > 0,x + 1,x); A[2] = sin(x); """
/usr/src/app/target_test_cases/failed_tests_rcode.txt
def rcode(expr, assign_to=None, **settings): """Converts an expr to a string of r code Parameters ========== expr : Expr A SymPy expression to be converted. assign_to : optional When given, the argument is used as the name of the variable to which the expression is assigned. Can be a string, ``Symbol``, ``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of line-wrapping, or for expressions that generate multi-line statements. precision : integer, optional The precision for numbers such as pi [default=15]. user_functions : dict, optional A dictionary where the keys are string representations of either ``FunctionClass`` or ``UndefinedFunction`` instances and the values are their desired R string representations. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, rfunction_string)] or [(argument_test, rfunction_formater)]. See below for examples. human : bool, optional If True, the result is a single string that may contain some constant declarations for the number symbols. If False, the same information is returned in a tuple of (symbols_to_declare, not_supported_functions, code_text). [default=True]. contract: bool, optional If True, ``Indexed`` instances are assumed to obey tensor contraction rules and the corresponding nested loops over indices are generated. Setting contract=False will not generate loops, instead the user is responsible to provide values for the indices in the code. [default=True]. Examples ======== >>> from sympy import rcode, symbols, Rational, sin, ceiling, Abs, Function >>> x, tau = symbols("x, tau") >>> rcode((2*tau)**Rational(7, 2)) '8*sqrt(2)*tau^(7.0/2.0)' >>> rcode(sin(x), assign_to="s") 's = sin(x);' Simple custom printing can be defined for certain types by passing a dictionary of {"type" : "function"} to the ``user_functions`` kwarg. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, cfunction_string)]. >>> custom_functions = { ... "ceiling": "CEIL", ... "Abs": [(lambda x: not x.is_integer, "fabs"), ... (lambda x: x.is_integer, "ABS")], ... "func": "f" ... } >>> func = Function('func') >>> rcode(func(Abs(x) + ceiling(x)), user_functions=custom_functions) 'f(fabs(x) + CEIL(x))' or if the R-function takes a subset of the original arguments: >>> rcode(2**x + 3**x, user_functions={'Pow': [ ... (lambda b, e: b == 2, lambda b, e: 'exp2(%s)' % e), ... (lambda b, e: b != 2, 'pow')]}) 'exp2(x) + pow(3, x)' ``Piecewise`` expressions are converted into conditionals. If an ``assign_to`` variable is provided an if statement is created, otherwise the ternary operator is used. Note that if the ``Piecewise`` lacks a default term, represented by ``(expr, True)`` then an error will be thrown. This is to prevent generating an expression that may not evaluate to anything. >>> from sympy import Piecewise >>> expr = Piecewise((x + 1, x > 0), (x, True)) >>> print(rcode(expr, assign_to=tau)) tau = ifelse(x > 0,x + 1,x); Support for loops is provided through ``Indexed`` types. With ``contract=True`` these expressions will be turned into loops, whereas ``contract=False`` will just print the assignment expression that should be looped over: >>> from sympy import Eq, IndexedBase, Idx >>> len_y = 5 >>> y = IndexedBase('y', shape=(len_y,)) >>> t = IndexedBase('t', shape=(len_y,)) >>> Dy = IndexedBase('Dy', shape=(len_y-1,)) >>> i = Idx('i', len_y-1) >>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i])) >>> rcode(e.rhs, assign_to=e.lhs, contract=False) 'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);' Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions must be provided to ``assign_to``. Note that any expression that can be generated normally can also exist inside a Matrix: >>> from sympy import Matrix, MatrixSymbol >>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)]) >>> A = MatrixSymbol('A', 3, 1) >>> print(rcode(mat, A)) A[0] = x^2; A[1] = ifelse(x > 0,x + 1,x); A[2] = sin(x); """ return RCodePrinter(settings).doprint(expr, assign_to)
rcode
sympy
85
sympy/physics/optics/utils.py
def refraction_angle(incident, medium1, medium2, normal=None, plane=None): """ This function calculates transmitted vector after refraction at planar surface. ``medium1`` and ``medium2`` can be ``Medium`` or any sympifiable object. If ``incident`` is a number then treated as angle of incidence (in radians) in which case refraction angle is returned. If ``incident`` is an object of `Ray3D`, `normal` also has to be an instance of `Ray3D` in order to get the output as a `Ray3D`. Please note that if plane of separation is not provided and normal is an instance of `Ray3D`, ``normal`` will be assumed to be intersecting incident ray at the plane of separation. This will not be the case when `normal` is a `Matrix` or any other sequence. If ``incident`` is an instance of `Ray3D` and `plane` has not been provided and ``normal`` is not `Ray3D`, output will be a `Matrix`. Parameters ========== incident : Matrix, Ray3D, sequence or a number Incident vector or angle of incidence medium1 : sympy.physics.optics.medium.Medium or sympifiable Medium 1 or its refractive index medium2 : sympy.physics.optics.medium.Medium or sympifiable Medium 2 or its refractive index normal : Matrix, Ray3D, or sequence Normal vector plane : Plane Plane of separation of the two media. Returns ======= Returns an angle of refraction or a refracted ray depending on inputs. Examples ======== >>> from sympy.physics.optics import refraction_angle >>> from sympy.geometry import Point3D, Ray3D, Plane >>> from sympy.matrices import Matrix >>> from sympy import symbols, pi >>> n = Matrix([0, 0, 1]) >>> P = Plane(Point3D(0, 0, 0), normal_vector=[0, 0, 1]) >>> r1 = Ray3D(Point3D(-1, -1, 1), Point3D(0, 0, 0)) >>> refraction_angle(r1, 1, 1, n) Matrix([ [ 1], [ 1], [-1]]) >>> refraction_angle(r1, 1, 1, plane=P) Ray3D(Point3D(0, 0, 0), Point3D(1, 1, -1)) With different index of refraction of the two media >>> n1, n2 = symbols('n1, n2') >>> refraction_angle(r1, n1, n2, n) Matrix([ [ n1/n2], [ n1/n2], [-sqrt(3)*sqrt(-2*n1**2/(3*n2**2) + 1)]]) >>> refraction_angle(r1, n1, n2, plane=P) Ray3D(Point3D(0, 0, 0), Point3D(n1/n2, n1/n2, -sqrt(3)*sqrt(-2*n1**2/(3*n2**2) + 1))) >>> round(refraction_angle(pi/6, 1.2, 1.5), 5) 0.41152 """
/usr/src/app/target_test_cases/failed_tests_refraction_angle.txt
def refraction_angle(incident, medium1, medium2, normal=None, plane=None): """ This function calculates transmitted vector after refraction at planar surface. ``medium1`` and ``medium2`` can be ``Medium`` or any sympifiable object. If ``incident`` is a number then treated as angle of incidence (in radians) in which case refraction angle is returned. If ``incident`` is an object of `Ray3D`, `normal` also has to be an instance of `Ray3D` in order to get the output as a `Ray3D`. Please note that if plane of separation is not provided and normal is an instance of `Ray3D`, ``normal`` will be assumed to be intersecting incident ray at the plane of separation. This will not be the case when `normal` is a `Matrix` or any other sequence. If ``incident`` is an instance of `Ray3D` and `plane` has not been provided and ``normal`` is not `Ray3D`, output will be a `Matrix`. Parameters ========== incident : Matrix, Ray3D, sequence or a number Incident vector or angle of incidence medium1 : sympy.physics.optics.medium.Medium or sympifiable Medium 1 or its refractive index medium2 : sympy.physics.optics.medium.Medium or sympifiable Medium 2 or its refractive index normal : Matrix, Ray3D, or sequence Normal vector plane : Plane Plane of separation of the two media. Returns ======= Returns an angle of refraction or a refracted ray depending on inputs. Examples ======== >>> from sympy.physics.optics import refraction_angle >>> from sympy.geometry import Point3D, Ray3D, Plane >>> from sympy.matrices import Matrix >>> from sympy import symbols, pi >>> n = Matrix([0, 0, 1]) >>> P = Plane(Point3D(0, 0, 0), normal_vector=[0, 0, 1]) >>> r1 = Ray3D(Point3D(-1, -1, 1), Point3D(0, 0, 0)) >>> refraction_angle(r1, 1, 1, n) Matrix([ [ 1], [ 1], [-1]]) >>> refraction_angle(r1, 1, 1, plane=P) Ray3D(Point3D(0, 0, 0), Point3D(1, 1, -1)) With different index of refraction of the two media >>> n1, n2 = symbols('n1, n2') >>> refraction_angle(r1, n1, n2, n) Matrix([ [ n1/n2], [ n1/n2], [-sqrt(3)*sqrt(-2*n1**2/(3*n2**2) + 1)]]) >>> refraction_angle(r1, n1, n2, plane=P) Ray3D(Point3D(0, 0, 0), Point3D(n1/n2, n1/n2, -sqrt(3)*sqrt(-2*n1**2/(3*n2**2) + 1))) >>> round(refraction_angle(pi/6, 1.2, 1.5), 5) 0.41152 """ n1 = refractive_index_of_medium(medium1) n2 = refractive_index_of_medium(medium2) # check if an incidence angle was supplied instead of a ray try: angle_of_incidence = float(incident) except TypeError: angle_of_incidence = None try: critical_angle_ = critical_angle(medium1, medium2) except (ValueError, TypeError): critical_angle_ = None if angle_of_incidence is not None: if normal is not None or plane is not None: raise ValueError('Normal/plane not allowed if incident is an angle') if not 0.0 <= angle_of_incidence < pi*0.5: raise ValueError('Angle of incidence not in range [0:pi/2)') if critical_angle_ and angle_of_incidence > critical_angle_: raise ValueError('Ray undergoes total internal reflection') return asin(n1*sin(angle_of_incidence)/n2) # Treat the incident as ray below # A flag to check whether to return Ray3D or not return_ray = False if plane is not None and normal is not None: raise ValueError("Either plane or normal is acceptable.") if not isinstance(incident, Matrix): if is_sequence(incident): _incident = Matrix(incident) elif isinstance(incident, Ray3D): _incident = Matrix(incident.direction_ratio) else: raise TypeError( "incident should be a Matrix, Ray3D, or sequence") else: _incident = incident # If plane is provided, get direction ratios of the normal # to the plane from the plane else go with `normal` param. if plane is not None: if not isinstance(plane, Plane): raise TypeError("plane should be an instance of geometry.plane.Plane") # If we have the plane, we can get the intersection # point of incident ray and the plane and thus return # an instance of Ray3D. if isinstance(incident, Ray3D): return_ray = True intersection_pt = plane.intersection(incident)[0] _normal = Matrix(plane.normal_vector) else: if not isinstance(normal, Matrix): if is_sequence(normal): _normal = Matrix(normal) elif isinstance(normal, Ray3D): _normal = Matrix(normal.direction_ratio) if isinstance(incident, Ray3D): intersection_pt = intersection(incident, normal) if len(intersection_pt) == 0: raise ValueError( "Normal isn't concurrent with the incident ray.") else: return_ray = True intersection_pt = intersection_pt[0] else: raise TypeError( "Normal should be a Matrix, Ray3D, or sequence") else: _normal = normal eta = n1/n2 # Relative index of refraction # Calculating magnitude of the vectors mag_incident = sqrt(sum(i**2 for i in _incident)) mag_normal = sqrt(sum(i**2 for i in _normal)) # Converting vectors to unit vectors by dividing # them with their magnitudes _incident /= mag_incident _normal /= mag_normal c1 = -_incident.dot(_normal) # cos(angle_of_incidence) cs2 = 1 - eta**2*(1 - c1**2) # cos(angle_of_refraction)**2 if cs2.is_negative: # This is the case of total internal reflection(TIR). return S.Zero drs = eta*_incident + (eta*c1 - sqrt(cs2))*_normal # Multiplying unit vector by its magnitude drs = drs*mag_incident if not return_ray: return drs else: return Ray3D(intersection_pt, direction_ratio=drs)
refraction_angle
sympy
86
sympy/printing/codeprinter.py
def rust_code(expr, assign_to=None, **settings): """Converts an expr to a string of Rust code Parameters ========== expr : Expr A SymPy expression to be converted. assign_to : optional When given, the argument is used as the name of the variable to which the expression is assigned. Can be a string, ``Symbol``, ``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of line-wrapping, or for expressions that generate multi-line statements. precision : integer, optional The precision for numbers such as pi [default=15]. user_functions : dict, optional A dictionary where the keys are string representations of either ``FunctionClass`` or ``UndefinedFunction`` instances and the values are their desired C string representations. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, cfunction_string)]. See below for examples. dereference : iterable, optional An iterable of symbols that should be dereferenced in the printed code expression. These would be values passed by address to the function. For example, if ``dereference=[a]``, the resulting code would print ``(*a)`` instead of ``a``. human : bool, optional If True, the result is a single string that may contain some constant declarations for the number symbols. If False, the same information is returned in a tuple of (symbols_to_declare, not_supported_functions, code_text). [default=True]. contract: bool, optional If True, ``Indexed`` instances are assumed to obey tensor contraction rules and the corresponding nested loops over indices are generated. Setting contract=False will not generate loops, instead the user is responsible to provide values for the indices in the code. [default=True]. Examples ======== >>> from sympy import rust_code, symbols, Rational, sin, ceiling, Abs, Function >>> x, tau = symbols("x, tau") >>> rust_code((2*tau)**Rational(7, 2)) '8.0*1.4142135623731*tau.powf(7_f64/2.0)' >>> rust_code(sin(x), assign_to="s") 's = x.sin();' Simple custom printing can be defined for certain types by passing a dictionary of {"type" : "function"} to the ``user_functions`` kwarg. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, cfunction_string)]. >>> custom_functions = { ... "ceiling": "CEIL", ... "Abs": [(lambda x: not x.is_integer, "fabs", 4), ... (lambda x: x.is_integer, "ABS", 4)], ... "func": "f" ... } >>> func = Function('func') >>> rust_code(func(Abs(x) + ceiling(x)), user_functions=custom_functions) '(fabs(x) + x.ceil()).f()' ``Piecewise`` expressions are converted into conditionals. If an ``assign_to`` variable is provided an if statement is created, otherwise the ternary operator is used. Note that if the ``Piecewise`` lacks a default term, represented by ``(expr, True)`` then an error will be thrown. This is to prevent generating an expression that may not evaluate to anything. >>> from sympy import Piecewise >>> expr = Piecewise((x + 1, x > 0), (x, True)) >>> print(rust_code(expr, tau)) tau = if (x > 0.0) { x + 1 } else { x }; Support for loops is provided through ``Indexed`` types. With ``contract=True`` these expressions will be turned into loops, whereas ``contract=False`` will just print the assignment expression that should be looped over: >>> from sympy import Eq, IndexedBase, Idx >>> len_y = 5 >>> y = IndexedBase('y', shape=(len_y,)) >>> t = IndexedBase('t', shape=(len_y,)) >>> Dy = IndexedBase('Dy', shape=(len_y-1,)) >>> i = Idx('i', len_y-1) >>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i])) >>> rust_code(e.rhs, assign_to=e.lhs, contract=False) 'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);' Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions must be provided to ``assign_to``. Note that any expression that can be generated normally can also exist inside a Matrix: >>> from sympy import Matrix, MatrixSymbol >>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)]) >>> A = MatrixSymbol('A', 3, 1) >>> print(rust_code(mat, A)) A = [x.powi(2), if (x > 0.0) { x + 1 } else { x }, x.sin()]; """
/usr/src/app/target_test_cases/failed_tests_rust_code.txt
def rust_code(expr, assign_to=None, **settings): """Converts an expr to a string of Rust code Parameters ========== expr : Expr A SymPy expression to be converted. assign_to : optional When given, the argument is used as the name of the variable to which the expression is assigned. Can be a string, ``Symbol``, ``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of line-wrapping, or for expressions that generate multi-line statements. precision : integer, optional The precision for numbers such as pi [default=15]. user_functions : dict, optional A dictionary where the keys are string representations of either ``FunctionClass`` or ``UndefinedFunction`` instances and the values are their desired C string representations. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, cfunction_string)]. See below for examples. dereference : iterable, optional An iterable of symbols that should be dereferenced in the printed code expression. These would be values passed by address to the function. For example, if ``dereference=[a]``, the resulting code would print ``(*a)`` instead of ``a``. human : bool, optional If True, the result is a single string that may contain some constant declarations for the number symbols. If False, the same information is returned in a tuple of (symbols_to_declare, not_supported_functions, code_text). [default=True]. contract: bool, optional If True, ``Indexed`` instances are assumed to obey tensor contraction rules and the corresponding nested loops over indices are generated. Setting contract=False will not generate loops, instead the user is responsible to provide values for the indices in the code. [default=True]. Examples ======== >>> from sympy import rust_code, symbols, Rational, sin, ceiling, Abs, Function >>> x, tau = symbols("x, tau") >>> rust_code((2*tau)**Rational(7, 2)) '8.0*1.4142135623731*tau.powf(7_f64/2.0)' >>> rust_code(sin(x), assign_to="s") 's = x.sin();' Simple custom printing can be defined for certain types by passing a dictionary of {"type" : "function"} to the ``user_functions`` kwarg. Alternatively, the dictionary value can be a list of tuples i.e. [(argument_test, cfunction_string)]. >>> custom_functions = { ... "ceiling": "CEIL", ... "Abs": [(lambda x: not x.is_integer, "fabs", 4), ... (lambda x: x.is_integer, "ABS", 4)], ... "func": "f" ... } >>> func = Function('func') >>> rust_code(func(Abs(x) + ceiling(x)), user_functions=custom_functions) '(fabs(x) + x.ceil()).f()' ``Piecewise`` expressions are converted into conditionals. If an ``assign_to`` variable is provided an if statement is created, otherwise the ternary operator is used. Note that if the ``Piecewise`` lacks a default term, represented by ``(expr, True)`` then an error will be thrown. This is to prevent generating an expression that may not evaluate to anything. >>> from sympy import Piecewise >>> expr = Piecewise((x + 1, x > 0), (x, True)) >>> print(rust_code(expr, tau)) tau = if (x > 0.0) { x + 1 } else { x }; Support for loops is provided through ``Indexed`` types. With ``contract=True`` these expressions will be turned into loops, whereas ``contract=False`` will just print the assignment expression that should be looped over: >>> from sympy import Eq, IndexedBase, Idx >>> len_y = 5 >>> y = IndexedBase('y', shape=(len_y,)) >>> t = IndexedBase('t', shape=(len_y,)) >>> Dy = IndexedBase('Dy', shape=(len_y-1,)) >>> i = Idx('i', len_y-1) >>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i])) >>> rust_code(e.rhs, assign_to=e.lhs, contract=False) 'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);' Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions must be provided to ``assign_to``. Note that any expression that can be generated normally can also exist inside a Matrix: >>> from sympy import Matrix, MatrixSymbol >>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)]) >>> A = MatrixSymbol('A', 3, 1) >>> print(rust_code(mat, A)) A = [x.powi(2), if (x > 0.0) { x + 1 } else { x }, x.sin()]; """ from sympy.printing.rust import RustCodePrinter printer = RustCodePrinter(settings) expr = printer._rewrite_known_functions(expr) if isinstance(expr, Expr): for src_func, dst_func in printer.function_overrides.values(): expr = expr.replace(src_func, dst_func) return printer.doprint(expr, assign_to)
rust_code
sympy
87
sympy/polys/matrices/sdm.py
def sdm_irref(A): """RREF and pivots of a sparse matrix *A*. Compute the reduced row echelon form (RREF) of the matrix *A* and return a list of the pivot columns. This routine does not work in place and leaves the original matrix *A* unmodified. The domain of the matrix must be a field. Examples ======== This routine works with a dict of dicts sparse representation of a matrix: >>> from sympy import QQ >>> from sympy.polys.matrices.sdm import sdm_irref >>> A = {0: {0: QQ(1), 1: QQ(2)}, 1: {0: QQ(3), 1: QQ(4)}} >>> Arref, pivots, _ = sdm_irref(A) >>> Arref {0: {0: 1}, 1: {1: 1}} >>> pivots [0, 1] The analogous calculation with :py:class:`~.MutableDenseMatrix` would be >>> from sympy import Matrix >>> M = Matrix([[1, 2], [3, 4]]) >>> Mrref, pivots = M.rref() >>> Mrref Matrix([ [1, 0], [0, 1]]) >>> pivots (0, 1) Notes ===== The cost of this algorithm is determined purely by the nonzero elements of the matrix. No part of the cost of any step in this algorithm depends on the number of rows or columns in the matrix. No step depends even on the number of nonzero rows apart from the primary loop over those rows. The implementation is much faster than ddm_rref for sparse matrices. In fact at the time of writing it is also (slightly) faster than the dense implementation even if the input is a fully dense matrix so it seems to be faster in all cases. The elements of the matrix should support exact division with ``/``. For example elements of any domain that is a field (e.g. ``QQ``) should be fine. No attempt is made to handle inexact arithmetic. See Also ======== sympy.polys.matrices.domainmatrix.DomainMatrix.rref The higher-level function that would normally be used to call this routine. sympy.polys.matrices.dense.ddm_irref The dense equivalent of this routine. sdm_rref_den Fraction-free version of this routine. """
/usr/src/app/target_test_cases/failed_tests_sdm_irref.txt
def sdm_irref(A): """RREF and pivots of a sparse matrix *A*. Compute the reduced row echelon form (RREF) of the matrix *A* and return a list of the pivot columns. This routine does not work in place and leaves the original matrix *A* unmodified. The domain of the matrix must be a field. Examples ======== This routine works with a dict of dicts sparse representation of a matrix: >>> from sympy import QQ >>> from sympy.polys.matrices.sdm import sdm_irref >>> A = {0: {0: QQ(1), 1: QQ(2)}, 1: {0: QQ(3), 1: QQ(4)}} >>> Arref, pivots, _ = sdm_irref(A) >>> Arref {0: {0: 1}, 1: {1: 1}} >>> pivots [0, 1] The analogous calculation with :py:class:`~.MutableDenseMatrix` would be >>> from sympy import Matrix >>> M = Matrix([[1, 2], [3, 4]]) >>> Mrref, pivots = M.rref() >>> Mrref Matrix([ [1, 0], [0, 1]]) >>> pivots (0, 1) Notes ===== The cost of this algorithm is determined purely by the nonzero elements of the matrix. No part of the cost of any step in this algorithm depends on the number of rows or columns in the matrix. No step depends even on the number of nonzero rows apart from the primary loop over those rows. The implementation is much faster than ddm_rref for sparse matrices. In fact at the time of writing it is also (slightly) faster than the dense implementation even if the input is a fully dense matrix so it seems to be faster in all cases. The elements of the matrix should support exact division with ``/``. For example elements of any domain that is a field (e.g. ``QQ``) should be fine. No attempt is made to handle inexact arithmetic. See Also ======== sympy.polys.matrices.domainmatrix.DomainMatrix.rref The higher-level function that would normally be used to call this routine. sympy.polys.matrices.dense.ddm_irref The dense equivalent of this routine. sdm_rref_den Fraction-free version of this routine. """ # # Any zeros in the matrix are not stored at all so an element is zero if # its row dict has no index at that key. A row is entirely zero if its # row index is not in the outer dict. Since rref reorders the rows and # removes zero rows we can completely discard the row indices. The first # step then copies the row dicts into a list sorted by the index of the # first nonzero column in each row. # # The algorithm then processes each row Ai one at a time. Previously seen # rows are used to cancel their pivot columns from Ai. Then a pivot from # Ai is chosen and is cancelled from all previously seen rows. At this # point Ai joins the previously seen rows. Once all rows are seen all # elimination has occurred and the rows are sorted by pivot column index. # # The previously seen rows are stored in two separate groups. The reduced # group consists of all rows that have been reduced to a single nonzero # element (the pivot). There is no need to attempt any further reduction # with these. Rows that still have other nonzeros need to be considered # when Ai is cancelled from the previously seen rows. # # A dict nonzerocolumns is used to map from a column index to a set of # previously seen rows that still have a nonzero element in that column. # This means that we can cancel the pivot from Ai into the previously seen # rows without needing to loop over each row that might have a zero in # that column. # # Row dicts sorted by index of first nonzero column # (Maybe sorting is not needed/useful.) Arows = sorted((Ai.copy() for Ai in A.values()), key=min) # Each processed row has an associated pivot column. # pivot_row_map maps from the pivot column index to the row dict. # This means that we can represent a set of rows purely as a set of their # pivot indices. pivot_row_map = {} # Set of pivot indices for rows that are fully reduced to a single nonzero. reduced_pivots = set() # Set of pivot indices for rows not fully reduced nonreduced_pivots = set() # Map from column index to a set of pivot indices representing the rows # that have a nonzero at that column. nonzero_columns = defaultdict(set) while Arows: # Select pivot element and row Ai = Arows.pop() # Nonzero columns from fully reduced pivot rows can be removed Ai = {j: Aij for j, Aij in Ai.items() if j not in reduced_pivots} # Others require full row cancellation for j in nonreduced_pivots & set(Ai): Aj = pivot_row_map[j] Aij = Ai[j] Ainz = set(Ai) Ajnz = set(Aj) for k in Ajnz - Ainz: Ai[k] = - Aij * Aj[k] Ai.pop(j) Ainz.remove(j) for k in Ajnz & Ainz: Aik = Ai[k] - Aij * Aj[k] if Aik: Ai[k] = Aik else: Ai.pop(k) # We have now cancelled previously seen pivots from Ai. # If it is zero then discard it. if not Ai: continue # Choose a pivot from Ai: j = min(Ai) Aij = Ai[j] pivot_row_map[j] = Ai Ainz = set(Ai) # Normalise the pivot row to make the pivot 1. # # This approach is slow for some domains. Cross cancellation might be # better for e.g. QQ(x) with division delayed to the final steps. Aijinv = Aij**-1 for l in Ai: Ai[l] *= Aijinv # Use Aij to cancel column j from all previously seen rows for k in nonzero_columns.pop(j, ()): Ak = pivot_row_map[k] Akj = Ak[j] Aknz = set(Ak) for l in Ainz - Aknz: Ak[l] = - Akj * Ai[l] nonzero_columns[l].add(k) Ak.pop(j) Aknz.remove(j) for l in Ainz & Aknz: Akl = Ak[l] - Akj * Ai[l] if Akl: Ak[l] = Akl else: # Drop nonzero elements Ak.pop(l) if l != j: nonzero_columns[l].remove(k) if len(Ak) == 1: reduced_pivots.add(k) nonreduced_pivots.remove(k) if len(Ai) == 1: reduced_pivots.add(j) else: nonreduced_pivots.add(j) for l in Ai: if l != j: nonzero_columns[l].add(j) # All done! pivots = sorted(reduced_pivots | nonreduced_pivots) pivot2row = {p: n for n, p in enumerate(pivots)} nonzero_columns = {c: {pivot2row[p] for p in s} for c, s in nonzero_columns.items()} rows = [pivot_row_map[i] for i in pivots] rref = dict(enumerate(rows)) return rref, pivots, nonzero_columns
sdm_irref
sympy
88
sympy/calculus/singularities.py
def singularities(expression, symbol, domain=None): """ Find singularities of a given function. Parameters ========== expression : Expr The target function in which singularities need to be found. symbol : Symbol The symbol over the values of which the singularity in expression in being searched for. Returns ======= Set A set of values for ``symbol`` for which ``expression`` has a singularity. An ``EmptySet`` is returned if ``expression`` has no singularities for any given value of ``Symbol``. Raises ====== NotImplementedError Methods for determining the singularities of this function have not been developed. Notes ===== This function does not find non-isolated singularities nor does it find branch points of the expression. Currently supported functions are: - univariate continuous (real or complex) functions References ========== .. [1] https://en.wikipedia.org/wiki/Mathematical_singularity Examples ======== >>> from sympy import singularities, Symbol, log >>> x = Symbol('x', real=True) >>> y = Symbol('y', real=False) >>> singularities(x**2 + x + 1, x) EmptySet >>> singularities(1/(x + 1), x) {-1} >>> singularities(1/(y**2 + 1), y) {-I, I} >>> singularities(1/(y**3 + 1), y) {-1, 1/2 - sqrt(3)*I/2, 1/2 + sqrt(3)*I/2} >>> singularities(log(x), x) {0} """
/usr/src/app/target_test_cases/failed_tests_singularities.txt
def singularities(expression, symbol, domain=None): """ Find singularities of a given function. Parameters ========== expression : Expr The target function in which singularities need to be found. symbol : Symbol The symbol over the values of which the singularity in expression in being searched for. Returns ======= Set A set of values for ``symbol`` for which ``expression`` has a singularity. An ``EmptySet`` is returned if ``expression`` has no singularities for any given value of ``Symbol``. Raises ====== NotImplementedError Methods for determining the singularities of this function have not been developed. Notes ===== This function does not find non-isolated singularities nor does it find branch points of the expression. Currently supported functions are: - univariate continuous (real or complex) functions References ========== .. [1] https://en.wikipedia.org/wiki/Mathematical_singularity Examples ======== >>> from sympy import singularities, Symbol, log >>> x = Symbol('x', real=True) >>> y = Symbol('y', real=False) >>> singularities(x**2 + x + 1, x) EmptySet >>> singularities(1/(x + 1), x) {-1} >>> singularities(1/(y**2 + 1), y) {-I, I} >>> singularities(1/(y**3 + 1), y) {-1, 1/2 - sqrt(3)*I/2, 1/2 + sqrt(3)*I/2} >>> singularities(log(x), x) {0} """ from sympy.solvers.solveset import solveset if domain is None: domain = S.Reals if symbol.is_real else S.Complexes try: sings = S.EmptySet e = expression.rewrite([sec, csc, cot, tan], cos) e = e.rewrite([sech, csch, coth, tanh], cosh) for i in e.atoms(Pow): if i.exp.is_infinite: raise NotImplementedError if i.exp.is_negative: # XXX: exponent of varying sign not handled sings += solveset(i.base, symbol, domain) for i in expression.atoms(log, asech, acsch): sings += solveset(i.args[0], symbol, domain) for i in expression.atoms(atanh, acoth): sings += solveset(i.args[0] - 1, symbol, domain) sings += solveset(i.args[0] + 1, symbol, domain) return sings except NotImplementedError: raise NotImplementedError(filldedent(''' Methods for determining the singularities of this function have not been developed.'''))
singularities
sympy
89
sympy/ntheory/factor_.py
def smoothness_p(n, m=-1, power=0, visual=None): """ Return a list of [m, (p, (M, sm(p + m), psm(p + m)))...] where: 1. p**M is the base-p divisor of n 2. sm(p + m) is the smoothness of p + m (m = -1 by default) 3. psm(p + m) is the power smoothness of p + m The list is sorted according to smoothness (default) or by power smoothness if power=1. The smoothness of the numbers to the left (m = -1) or right (m = 1) of a factor govern the results that are obtained from the p +/- 1 type factoring methods. >>> from sympy.ntheory.factor_ import smoothness_p, factorint >>> smoothness_p(10431, m=1) (1, [(3, (2, 2, 4)), (19, (1, 5, 5)), (61, (1, 31, 31))]) >>> smoothness_p(10431) (-1, [(3, (2, 2, 2)), (19, (1, 3, 9)), (61, (1, 5, 5))]) >>> smoothness_p(10431, power=1) (-1, [(3, (2, 2, 2)), (61, (1, 5, 5)), (19, (1, 3, 9))]) If visual=True then an annotated string will be returned: >>> print(smoothness_p(21477639576571, visual=1)) p**i=4410317**1 has p-1 B=1787, B-pow=1787 p**i=4869863**1 has p-1 B=2434931, B-pow=2434931 This string can also be generated directly from a factorization dictionary and vice versa: >>> factorint(17*9) {3: 2, 17: 1} >>> smoothness_p(_) 'p**i=3**2 has p-1 B=2, B-pow=2\\np**i=17**1 has p-1 B=2, B-pow=16' >>> smoothness_p(_) {3: 2, 17: 1} The table of the output logic is: ====== ====== ======= ======= | Visual ------ ---------------------- Input True False other ====== ====== ======= ======= dict str tuple str str str tuple dict tuple str tuple str n str tuple tuple mul str tuple tuple ====== ====== ======= ======= See Also ======== factorint, smoothness """
/usr/src/app/target_test_cases/failed_tests_smoothness_p.txt
def smoothness_p(n, m=-1, power=0, visual=None): """ Return a list of [m, (p, (M, sm(p + m), psm(p + m)))...] where: 1. p**M is the base-p divisor of n 2. sm(p + m) is the smoothness of p + m (m = -1 by default) 3. psm(p + m) is the power smoothness of p + m The list is sorted according to smoothness (default) or by power smoothness if power=1. The smoothness of the numbers to the left (m = -1) or right (m = 1) of a factor govern the results that are obtained from the p +/- 1 type factoring methods. >>> from sympy.ntheory.factor_ import smoothness_p, factorint >>> smoothness_p(10431, m=1) (1, [(3, (2, 2, 4)), (19, (1, 5, 5)), (61, (1, 31, 31))]) >>> smoothness_p(10431) (-1, [(3, (2, 2, 2)), (19, (1, 3, 9)), (61, (1, 5, 5))]) >>> smoothness_p(10431, power=1) (-1, [(3, (2, 2, 2)), (61, (1, 5, 5)), (19, (1, 3, 9))]) If visual=True then an annotated string will be returned: >>> print(smoothness_p(21477639576571, visual=1)) p**i=4410317**1 has p-1 B=1787, B-pow=1787 p**i=4869863**1 has p-1 B=2434931, B-pow=2434931 This string can also be generated directly from a factorization dictionary and vice versa: >>> factorint(17*9) {3: 2, 17: 1} >>> smoothness_p(_) 'p**i=3**2 has p-1 B=2, B-pow=2\\np**i=17**1 has p-1 B=2, B-pow=16' >>> smoothness_p(_) {3: 2, 17: 1} The table of the output logic is: ====== ====== ======= ======= | Visual ------ ---------------------- Input True False other ====== ====== ======= ======= dict str tuple str str str tuple dict tuple str tuple str n str tuple tuple mul str tuple tuple ====== ====== ======= ======= See Also ======== factorint, smoothness """ # visual must be True, False or other (stored as None) if visual in (1, 0): visual = bool(visual) elif visual not in (True, False): visual = None if isinstance(n, str): if visual: return n d = {} for li in n.splitlines(): k, v = [int(i) for i in li.split('has')[0].split('=')[1].split('**')] d[k] = v if visual is not True and visual is not False: return d return smoothness_p(d, visual=False) elif not isinstance(n, tuple): facs = factorint(n, visual=False) if power: k = -1 else: k = 1 if isinstance(n, tuple): rv = n else: rv = (m, sorted([(f, tuple([M] + list(smoothness(f + m)))) for f, M in list(facs.items())], key=lambda x: (x[1][k], x[0]))) if visual is False or (visual is not True) and (type(n) in [int, Mul]): return rv lines = [] for dat in rv[1]: dat = flatten(dat) dat.insert(2, m) lines.append('p**i=%i**%i has p%+i B=%i, B-pow=%i' % tuple(dat)) return '\n'.join(lines)
smoothness_p
sympy
90
sympy/solvers/polysys.py
def solve_generic(polys, opt, strict=False): """ Solve a generic system of polynomial equations. Returns all possible solutions over C[x_1, x_2, ..., x_m] of a set F = { f_1, f_2, ..., f_n } of polynomial equations, using Groebner basis approach. For now only zero-dimensional systems are supported, which means F can have at most a finite number of solutions. If the basis contains only the ground, None is returned. The algorithm works by the fact that, supposing G is the basis of F with respect to an elimination order (here lexicographic order is used), G and F generate the same ideal, they have the same set of solutions. By the elimination property, if G is a reduced, zero-dimensional Groebner basis, then there exists an univariate polynomial in G (in its last variable). This can be solved by computing its roots. Substituting all computed roots for the last (eliminated) variable in other elements of G, new polynomial system is generated. Applying the above procedure recursively, a finite number of solutions can be found. The ability of finding all solutions by this procedure depends on the root finding algorithms. If no solutions were found, it means only that roots() failed, but the system is solvable. To overcome this difficulty use numerical algorithms instead. Parameters ========== polys: a list/tuple/set Listing all the polynomial equations that are needed to be solved opt: an Options object For specifying keyword arguments and generators strict: a boolean If strict is True, NotImplementedError will be raised if the solution is known to be incomplete Returns ======= List[Tuple] a list of tuples with elements being solutions for the symbols in the order they were passed as gens None None is returned when the computed basis contains only the ground. References ========== .. [Buchberger01] B. Buchberger, Groebner Bases: A Short Introduction for Systems Theorists, In: R. Moreno-Diaz, B. Buchberger, J.L. Freire, Proceedings of EUROCAST'01, February, 2001 .. [Cox97] D. Cox, J. Little, D. O'Shea, Ideals, Varieties and Algorithms, Springer, Second Edition, 1997, pp. 112 Raises ======== NotImplementedError If the system is not zero-dimensional (does not have a finite number of solutions) UnsolvableFactorError If ``strict`` is True and not all solution components are expressible in radicals Examples ======== >>> from sympy import Poly, Options >>> from sympy.solvers.polysys import solve_generic >>> from sympy.abc import x, y >>> NewOption = Options((x, y), {'domain': 'ZZ'}) >>> a = Poly(x - y + 5, x, y, domain='ZZ') >>> b = Poly(x + y - 3, x, y, domain='ZZ') >>> solve_generic([a, b], NewOption) [(-1, 4)] >>> a = Poly(x - 2*y + 5, x, y, domain='ZZ') >>> b = Poly(2*x - y - 3, x, y, domain='ZZ') >>> solve_generic([a, b], NewOption) [(11/3, 13/3)] >>> a = Poly(x**2 + y, x, y, domain='ZZ') >>> b = Poly(x + y*4, x, y, domain='ZZ') >>> solve_generic([a, b], NewOption) [(0, 0), (1/4, -1/16)] >>> a = Poly(x**5 - x + y**3, x, y, domain='ZZ') >>> b = Poly(y**2 - 1, x, y, domain='ZZ') >>> solve_generic([a, b], NewOption, strict=True) Traceback (most recent call last): ... UnsolvableFactorError """
/usr/src/app/target_test_cases/failed_tests_solve_generic.txt
def solve_generic(polys, opt, strict=False): """ Solve a generic system of polynomial equations. Returns all possible solutions over C[x_1, x_2, ..., x_m] of a set F = { f_1, f_2, ..., f_n } of polynomial equations, using Groebner basis approach. For now only zero-dimensional systems are supported, which means F can have at most a finite number of solutions. If the basis contains only the ground, None is returned. The algorithm works by the fact that, supposing G is the basis of F with respect to an elimination order (here lexicographic order is used), G and F generate the same ideal, they have the same set of solutions. By the elimination property, if G is a reduced, zero-dimensional Groebner basis, then there exists an univariate polynomial in G (in its last variable). This can be solved by computing its roots. Substituting all computed roots for the last (eliminated) variable in other elements of G, new polynomial system is generated. Applying the above procedure recursively, a finite number of solutions can be found. The ability of finding all solutions by this procedure depends on the root finding algorithms. If no solutions were found, it means only that roots() failed, but the system is solvable. To overcome this difficulty use numerical algorithms instead. Parameters ========== polys: a list/tuple/set Listing all the polynomial equations that are needed to be solved opt: an Options object For specifying keyword arguments and generators strict: a boolean If strict is True, NotImplementedError will be raised if the solution is known to be incomplete Returns ======= List[Tuple] a list of tuples with elements being solutions for the symbols in the order they were passed as gens None None is returned when the computed basis contains only the ground. References ========== .. [Buchberger01] B. Buchberger, Groebner Bases: A Short Introduction for Systems Theorists, In: R. Moreno-Diaz, B. Buchberger, J.L. Freire, Proceedings of EUROCAST'01, February, 2001 .. [Cox97] D. Cox, J. Little, D. O'Shea, Ideals, Varieties and Algorithms, Springer, Second Edition, 1997, pp. 112 Raises ======== NotImplementedError If the system is not zero-dimensional (does not have a finite number of solutions) UnsolvableFactorError If ``strict`` is True and not all solution components are expressible in radicals Examples ======== >>> from sympy import Poly, Options >>> from sympy.solvers.polysys import solve_generic >>> from sympy.abc import x, y >>> NewOption = Options((x, y), {'domain': 'ZZ'}) >>> a = Poly(x - y + 5, x, y, domain='ZZ') >>> b = Poly(x + y - 3, x, y, domain='ZZ') >>> solve_generic([a, b], NewOption) [(-1, 4)] >>> a = Poly(x - 2*y + 5, x, y, domain='ZZ') >>> b = Poly(2*x - y - 3, x, y, domain='ZZ') >>> solve_generic([a, b], NewOption) [(11/3, 13/3)] >>> a = Poly(x**2 + y, x, y, domain='ZZ') >>> b = Poly(x + y*4, x, y, domain='ZZ') >>> solve_generic([a, b], NewOption) [(0, 0), (1/4, -1/16)] >>> a = Poly(x**5 - x + y**3, x, y, domain='ZZ') >>> b = Poly(y**2 - 1, x, y, domain='ZZ') >>> solve_generic([a, b], NewOption, strict=True) Traceback (most recent call last): ... UnsolvableFactorError """ def _is_univariate(f): """Returns True if 'f' is univariate in its last variable. """ for monom in f.monoms(): if any(monom[:-1]): return False return True def _subs_root(f, gen, zero): """Replace generator with a root so that the result is nice. """ p = f.as_expr({gen: zero}) if f.degree(gen) >= 2: p = p.expand(deep=False) return p def _solve_reduced_system(system, gens, entry=False): """Recursively solves reduced polynomial systems. """ if len(system) == len(gens) == 1: # the below line will produce UnsolvableFactorError if # strict=True and the solution from `roots` is incomplete zeros = list(roots(system[0], gens[-1], strict=strict).keys()) return [(zero,) for zero in zeros] basis = groebner(system, gens, polys=True) if len(basis) == 1 and basis[0].is_ground: if not entry: return [] else: return None univariate = list(filter(_is_univariate, basis)) if len(basis) < len(gens): raise NotImplementedError(filldedent(''' only zero-dimensional systems supported (finite number of solutions) ''')) if len(univariate) == 1: f = univariate.pop() else: raise NotImplementedError(filldedent(''' only zero-dimensional systems supported (finite number of solutions) ''')) gens = f.gens gen = gens[-1] # the below line will produce UnsolvableFactorError if # strict=True and the solution from `roots` is incomplete zeros = list(roots(f.ltrim(gen), strict=strict).keys()) if not zeros: return [] if len(basis) == 1: return [(zero,) for zero in zeros] solutions = [] for zero in zeros: new_system = [] new_gens = gens[:-1] for b in basis[:-1]: eq = _subs_root(b, gen, zero) if eq is not S.Zero: new_system.append(eq) for solution in _solve_reduced_system(new_system, new_gens): solutions.append(solution + (zero,)) if solutions and len(solutions[0]) != len(gens): raise NotImplementedError(filldedent(''' only zero-dimensional systems supported (finite number of solutions) ''')) return solutions try: result = _solve_reduced_system(polys, opt.gens, entry=True) except CoercionFailed: raise NotImplementedError if result is not None: return sorted(result, key=default_sort_key)
solve_generic
sympy
91
sympy/polys/solvers.py
def solve_lin_sys(eqs, ring, _raw=True): """Solve a system of linear equations from a PolynomialRing Explanation =========== Solves a system of linear equations given as PolyElement instances of a PolynomialRing. The basic arithmetic is carried out using instance of DomainElement which is more efficient than :class:`~sympy.core.expr.Expr` for the most common inputs. While this is a public function it is intended primarily for internal use so its interface is not necessarily convenient. Users are suggested to use the :func:`sympy.solvers.solveset.linsolve` function (which uses this function internally) instead. Parameters ========== eqs: list[PolyElement] The linear equations to be solved as elements of a PolynomialRing (assumed equal to zero). ring: PolynomialRing The polynomial ring from which eqs are drawn. The generators of this ring are the unknowns to be solved for and the domain of the ring is the domain of the coefficients of the system of equations. _raw: bool If *_raw* is False, the keys and values in the returned dictionary will be of type Expr (and the unit of the field will be removed from the keys) otherwise the low-level polys types will be returned, e.g. PolyElement: PythonRational. Returns ======= ``None`` if the system has no solution. dict[Symbol, Expr] if _raw=False dict[Symbol, DomainElement] if _raw=True. Examples ======== >>> from sympy import symbols >>> from sympy.polys.solvers import solve_lin_sys, sympy_eqs_to_ring >>> x, y = symbols('x, y') >>> eqs = [x - y, x + y - 2] >>> eqs_ring, ring = sympy_eqs_to_ring(eqs, [x, y]) >>> solve_lin_sys(eqs_ring, ring) {y: 1, x: 1} Passing ``_raw=False`` returns the same result except that the keys are ``Expr`` rather than low-level poly types. >>> solve_lin_sys(eqs_ring, ring, _raw=False) {x: 1, y: 1} See also ======== sympy_eqs_to_ring: prepares the inputs to ``solve_lin_sys``. linsolve: ``linsolve`` uses ``solve_lin_sys`` internally. sympy.solvers.solvers.solve: ``solve`` uses ``solve_lin_sys`` internally. """
/usr/src/app/target_test_cases/failed_tests_solve_lin_sys.txt
def solve_lin_sys(eqs, ring, _raw=True): """Solve a system of linear equations from a PolynomialRing Explanation =========== Solves a system of linear equations given as PolyElement instances of a PolynomialRing. The basic arithmetic is carried out using instance of DomainElement which is more efficient than :class:`~sympy.core.expr.Expr` for the most common inputs. While this is a public function it is intended primarily for internal use so its interface is not necessarily convenient. Users are suggested to use the :func:`sympy.solvers.solveset.linsolve` function (which uses this function internally) instead. Parameters ========== eqs: list[PolyElement] The linear equations to be solved as elements of a PolynomialRing (assumed equal to zero). ring: PolynomialRing The polynomial ring from which eqs are drawn. The generators of this ring are the unknowns to be solved for and the domain of the ring is the domain of the coefficients of the system of equations. _raw: bool If *_raw* is False, the keys and values in the returned dictionary will be of type Expr (and the unit of the field will be removed from the keys) otherwise the low-level polys types will be returned, e.g. PolyElement: PythonRational. Returns ======= ``None`` if the system has no solution. dict[Symbol, Expr] if _raw=False dict[Symbol, DomainElement] if _raw=True. Examples ======== >>> from sympy import symbols >>> from sympy.polys.solvers import solve_lin_sys, sympy_eqs_to_ring >>> x, y = symbols('x, y') >>> eqs = [x - y, x + y - 2] >>> eqs_ring, ring = sympy_eqs_to_ring(eqs, [x, y]) >>> solve_lin_sys(eqs_ring, ring) {y: 1, x: 1} Passing ``_raw=False`` returns the same result except that the keys are ``Expr`` rather than low-level poly types. >>> solve_lin_sys(eqs_ring, ring, _raw=False) {x: 1, y: 1} See also ======== sympy_eqs_to_ring: prepares the inputs to ``solve_lin_sys``. linsolve: ``linsolve`` uses ``solve_lin_sys`` internally. sympy.solvers.solvers.solve: ``solve`` uses ``solve_lin_sys`` internally. """ as_expr = not _raw assert ring.domain.is_Field eqs_dict = [dict(eq) for eq in eqs] one_monom = ring.one.monoms()[0] zero = ring.domain.zero eqs_rhs = [] eqs_coeffs = [] for eq_dict in eqs_dict: eq_rhs = eq_dict.pop(one_monom, zero) eq_coeffs = {} for monom, coeff in eq_dict.items(): if sum(monom) != 1: msg = "Nonlinear term encountered in solve_lin_sys" raise PolyNonlinearError(msg) eq_coeffs[ring.gens[monom.index(1)]] = coeff if not eq_coeffs: if not eq_rhs: continue else: return None eqs_rhs.append(eq_rhs) eqs_coeffs.append(eq_coeffs) result = _solve_lin_sys(eqs_coeffs, eqs_rhs, ring) if result is not None and as_expr: def to_sympy(x): as_expr = getattr(x, 'as_expr', None) if as_expr: return as_expr() else: return ring.domain.to_sympy(x) tresult = {to_sympy(sym): to_sympy(val) for sym, val in result.items()} # Remove 1.0x result = {} for k, v in tresult.items(): if k.is_Mul: c, s = k.as_coeff_Mul() result[s] = v/c else: result[k] = v return result
solve_lin_sys
sympy
92
sympy/physics/quantum/operatorset.py
def state_to_operators(state, **options): """ Returns the operator or set of operators corresponding to the given eigenstate A global function for mapping state classes to their associated operators or sets of operators. It takes either a state class or instance. This function can handle both instances of a given state or just the class itself (i.e. both XKet() and XKet) There are multiple use cases to consider: 1) A state class is passed: In this case, we first try instantiating a default instance of the class. If this succeeds, then we try to call state._state_to_operators on that instance. If the creation of the default instance or if the calling of _state_to_operators fails, then either an operator class or set of operator classes is returned. Otherwise, the appropriate operator instances are returned. 2) A state instance is returned: Here, state._state_to_operators is called for the instance. If this fails, then a class or set of operator classes is returned. Otherwise, the instances are returned. In either case, if the state's class does not exist in state_mapping, None is returned. Parameters ========== arg: StateBase class or instance (or subclasses) The class or instance of the state to be mapped to an operator or set of operators Examples ======== >>> from sympy.physics.quantum.cartesian import XKet, PxKet, XBra, PxBra >>> from sympy.physics.quantum.operatorset import state_to_operators >>> from sympy.physics.quantum.state import Ket, Bra >>> state_to_operators(XKet) X >>> state_to_operators(XKet()) X >>> state_to_operators(PxKet) Px >>> state_to_operators(PxKet()) Px >>> state_to_operators(PxBra) Px >>> state_to_operators(XBra) X >>> state_to_operators(Ket) O >>> state_to_operators(Bra) O """
/usr/src/app/target_test_cases/failed_tests_state_to_operators.txt
def state_to_operators(state, **options): """ Returns the operator or set of operators corresponding to the given eigenstate A global function for mapping state classes to their associated operators or sets of operators. It takes either a state class or instance. This function can handle both instances of a given state or just the class itself (i.e. both XKet() and XKet) There are multiple use cases to consider: 1) A state class is passed: In this case, we first try instantiating a default instance of the class. If this succeeds, then we try to call state._state_to_operators on that instance. If the creation of the default instance or if the calling of _state_to_operators fails, then either an operator class or set of operator classes is returned. Otherwise, the appropriate operator instances are returned. 2) A state instance is returned: Here, state._state_to_operators is called for the instance. If this fails, then a class or set of operator classes is returned. Otherwise, the instances are returned. In either case, if the state's class does not exist in state_mapping, None is returned. Parameters ========== arg: StateBase class or instance (or subclasses) The class or instance of the state to be mapped to an operator or set of operators Examples ======== >>> from sympy.physics.quantum.cartesian import XKet, PxKet, XBra, PxBra >>> from sympy.physics.quantum.operatorset import state_to_operators >>> from sympy.physics.quantum.state import Ket, Bra >>> state_to_operators(XKet) X >>> state_to_operators(XKet()) X >>> state_to_operators(PxKet) Px >>> state_to_operators(PxKet()) Px >>> state_to_operators(PxBra) Px >>> state_to_operators(XBra) X >>> state_to_operators(Ket) O >>> state_to_operators(Bra) O """ if not (isinstance(state, StateBase) or issubclass(state, StateBase)): raise NotImplementedError("Argument is not a state!") if state in state_mapping: # state is a class state_inst = _make_default(state) try: ret = _get_ops(state_inst, _make_set(state_mapping[state]), **options) except (NotImplementedError, TypeError): ret = state_mapping[state] elif type(state) in state_mapping: ret = _get_ops(state, _make_set(state_mapping[type(state)]), **options) elif isinstance(state, BraBase) and state.dual_class() in state_mapping: ret = _get_ops(state, _make_set(state_mapping[state.dual_class()])) elif issubclass(state, BraBase) and state.dual_class() in state_mapping: state_inst = _make_default(state) try: ret = _get_ops(state_inst, _make_set(state_mapping[state.dual_class()])) except (NotImplementedError, TypeError): ret = state_mapping[state.dual_class()] else: ret = None return _make_set(ret)
state_to_operators
sympy
93
sympy/physics/secondquant.py
def substitute_dummies(expr, new_indices=False, pretty_indices={}): """ Collect terms by substitution of dummy variables. Explanation =========== This routine allows simplification of Add expressions containing terms which differ only due to dummy variables. The idea is to substitute all dummy variables consistently depending on the structure of the term. For each term, we obtain a sequence of all dummy variables, where the order is determined by the index range, what factors the index belongs to and its position in each factor. See _get_ordered_dummies() for more information about the sorting of dummies. The index sequence is then substituted consistently in each term. Examples ======== >>> from sympy import symbols, Function, Dummy >>> from sympy.physics.secondquant import substitute_dummies >>> a,b,c,d = symbols('a b c d', above_fermi=True, cls=Dummy) >>> i,j = symbols('i j', below_fermi=True, cls=Dummy) >>> f = Function('f') >>> expr = f(a,b) + f(c,d); expr f(_a, _b) + f(_c, _d) Since a, b, c and d are equivalent summation indices, the expression can be simplified to a single term (for which the dummy indices are still summed over) >>> substitute_dummies(expr) 2*f(_a, _b) Controlling output: By default the dummy symbols that are already present in the expression will be reused in a different permutation. However, if new_indices=True, new dummies will be generated and inserted. The keyword 'pretty_indices' can be used to control this generation of new symbols. By default the new dummies will be generated on the form i_1, i_2, a_1, etc. If you supply a dictionary with key:value pairs in the form: { index_group: string_of_letters } The letters will be used as labels for the new dummy symbols. The index_groups must be one of 'above', 'below' or 'general'. >>> expr = f(a,b,i,j) >>> my_dummies = { 'above':'st', 'below':'uv' } >>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies) f(_s, _t, _u, _v) If we run out of letters, or if there is no keyword for some index_group the default dummy generator will be used as a fallback: >>> p,q = symbols('p q', cls=Dummy) # general indices >>> expr = f(p,q) >>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies) f(_p_0, _p_1) """
/usr/src/app/target_test_cases/failed_tests_substitute_dummies.txt
def substitute_dummies(expr, new_indices=False, pretty_indices={}): """ Collect terms by substitution of dummy variables. Explanation =========== This routine allows simplification of Add expressions containing terms which differ only due to dummy variables. The idea is to substitute all dummy variables consistently depending on the structure of the term. For each term, we obtain a sequence of all dummy variables, where the order is determined by the index range, what factors the index belongs to and its position in each factor. See _get_ordered_dummies() for more information about the sorting of dummies. The index sequence is then substituted consistently in each term. Examples ======== >>> from sympy import symbols, Function, Dummy >>> from sympy.physics.secondquant import substitute_dummies >>> a,b,c,d = symbols('a b c d', above_fermi=True, cls=Dummy) >>> i,j = symbols('i j', below_fermi=True, cls=Dummy) >>> f = Function('f') >>> expr = f(a,b) + f(c,d); expr f(_a, _b) + f(_c, _d) Since a, b, c and d are equivalent summation indices, the expression can be simplified to a single term (for which the dummy indices are still summed over) >>> substitute_dummies(expr) 2*f(_a, _b) Controlling output: By default the dummy symbols that are already present in the expression will be reused in a different permutation. However, if new_indices=True, new dummies will be generated and inserted. The keyword 'pretty_indices' can be used to control this generation of new symbols. By default the new dummies will be generated on the form i_1, i_2, a_1, etc. If you supply a dictionary with key:value pairs in the form: { index_group: string_of_letters } The letters will be used as labels for the new dummy symbols. The index_groups must be one of 'above', 'below' or 'general'. >>> expr = f(a,b,i,j) >>> my_dummies = { 'above':'st', 'below':'uv' } >>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies) f(_s, _t, _u, _v) If we run out of letters, or if there is no keyword for some index_group the default dummy generator will be used as a fallback: >>> p,q = symbols('p q', cls=Dummy) # general indices >>> expr = f(p,q) >>> substitute_dummies(expr, new_indices=True, pretty_indices=my_dummies) f(_p_0, _p_1) """ # setup the replacing dummies if new_indices: letters_above = pretty_indices.get('above', "") letters_below = pretty_indices.get('below', "") letters_general = pretty_indices.get('general', "") len_above = len(letters_above) len_below = len(letters_below) len_general = len(letters_general) def _i(number): try: return letters_below[number] except IndexError: return 'i_' + str(number - len_below) def _a(number): try: return letters_above[number] except IndexError: return 'a_' + str(number - len_above) def _p(number): try: return letters_general[number] except IndexError: return 'p_' + str(number - len_general) aboves = [] belows = [] generals = [] dummies = expr.atoms(Dummy) if not new_indices: dummies = sorted(dummies, key=default_sort_key) # generate lists with the dummies we will insert a = i = p = 0 for d in dummies: assum = d.assumptions0 if assum.get("above_fermi"): if new_indices: sym = _a(a) a += 1 l1 = aboves elif assum.get("below_fermi"): if new_indices: sym = _i(i) i += 1 l1 = belows else: if new_indices: sym = _p(p) p += 1 l1 = generals if new_indices: l1.append(Dummy(sym, **assum)) else: l1.append(d) expr = expr.expand() terms = Add.make_args(expr) new_terms = [] for term in terms: i = iter(belows) a = iter(aboves) p = iter(generals) ordered = _get_ordered_dummies(term) subsdict = {} for d in ordered: if d.assumptions0.get('below_fermi'): subsdict[d] = next(i) elif d.assumptions0.get('above_fermi'): subsdict[d] = next(a) else: subsdict[d] = next(p) subslist = [] final_subs = [] for k, v in subsdict.items(): if k == v: continue if v in subsdict: # We check if the sequence of substitutions end quickly. In # that case, we can avoid temporary symbols if we ensure the # correct substitution order. if subsdict[v] in subsdict: # (x, y) -> (y, x), we need a temporary variable x = Dummy('x') subslist.append((k, x)) final_subs.append((x, v)) else: # (x, y) -> (y, a), x->y must be done last # but before temporary variables are resolved final_subs.insert(0, (k, v)) else: subslist.append((k, v)) subslist.extend(final_subs) new_terms.append(term.subs(subslist)) return Add(*new_terms)
substitute_dummies
sympy
94
sympy/core/sympify.py
def sympify(a, locals=None, convert_xor=True, strict=False, rational=False, evaluate=None): """ Converts an arbitrary expression to a type that can be used inside SymPy. Explanation =========== It will convert Python ints into instances of :class:`~.Integer`, floats into instances of :class:`~.Float`, etc. It is also able to coerce symbolic expressions which inherit from :class:`~.Basic`. This can be useful in cooperation with SAGE. .. warning:: Note that this function uses ``eval``, and thus shouldn't be used on unsanitized input. If the argument is already a type that SymPy understands, it will do nothing but return that value. This can be used at the beginning of a function to ensure you are working with the correct type. Examples ======== >>> from sympy import sympify >>> sympify(2).is_integer True >>> sympify(2).is_real True >>> sympify(2.0).is_real True >>> sympify("2.0").is_real True >>> sympify("2e-45").is_real True If the expression could not be converted, a SympifyError is raised. >>> sympify("x***2") Traceback (most recent call last): ... SympifyError: SympifyError: "could not parse 'x***2'" When attempting to parse non-Python syntax using ``sympify``, it raises a ``SympifyError``: >>> sympify("2x+1") Traceback (most recent call last): ... SympifyError: Sympify of expression 'could not parse '2x+1'' failed To parse non-Python syntax, use ``parse_expr`` from ``sympy.parsing.sympy_parser``. >>> from sympy.parsing.sympy_parser import parse_expr >>> parse_expr("2x+1", transformations="all") 2*x + 1 For more details about ``transformations``: see :func:`~sympy.parsing.sympy_parser.parse_expr` Locals ------ The sympification happens with access to everything that is loaded by ``from sympy import *``; anything used in a string that is not defined by that import will be converted to a symbol. In the following, the ``bitcount`` function is treated as a symbol and the ``O`` is interpreted as the :class:`~.Order` object (used with series) and it raises an error when used improperly: >>> s = 'bitcount(42)' >>> sympify(s) bitcount(42) >>> sympify("O(x)") O(x) >>> sympify("O + 1") Traceback (most recent call last): ... TypeError: unbound method... In order to have ``bitcount`` be recognized it can be imported into a namespace dictionary and passed as locals: >>> ns = {} >>> exec('from sympy.core.evalf import bitcount', ns) >>> sympify(s, locals=ns) 6 In order to have the ``O`` interpreted as a Symbol, identify it as such in the namespace dictionary. This can be done in a variety of ways; all three of the following are possibilities: >>> from sympy import Symbol >>> ns["O"] = Symbol("O") # method 1 >>> exec('from sympy.abc import O', ns) # method 2 >>> ns.update(dict(O=Symbol("O"))) # method 3 >>> sympify("O + 1", locals=ns) O + 1 If you want *all* single-letter and Greek-letter variables to be symbols then you can use the clashing-symbols dictionaries that have been defined there as private variables: ``_clash1`` (single-letter variables), ``_clash2`` (the multi-letter Greek names) or ``_clash`` (both single and multi-letter names that are defined in ``abc``). >>> from sympy.abc import _clash1 >>> set(_clash1) # if this fails, see issue #23903 {'E', 'I', 'N', 'O', 'Q', 'S'} >>> sympify('I & Q', _clash1) I & Q Strict ------ If the option ``strict`` is set to ``True``, only the types for which an explicit conversion has been defined are converted. In the other cases, a SympifyError is raised. >>> print(sympify(None)) None >>> sympify(None, strict=True) Traceback (most recent call last): ... SympifyError: SympifyError: None .. deprecated:: 1.6 ``sympify(obj)`` automatically falls back to ``str(obj)`` when all other conversion methods fail, but this is deprecated. ``strict=True`` will disable this deprecated behavior. See :ref:`deprecated-sympify-string-fallback`. Evaluation ---------- If the option ``evaluate`` is set to ``False``, then arithmetic and operators will be converted into their SymPy equivalents and the ``evaluate=False`` option will be added. Nested ``Add`` or ``Mul`` will be denested first. This is done via an AST transformation that replaces operators with their SymPy equivalents, so if an operand redefines any of those operations, the redefined operators will not be used. If argument a is not a string, the mathematical expression is evaluated before being passed to sympify, so adding ``evaluate=False`` will still return the evaluated result of expression. >>> sympify('2**2 / 3 + 5') 19/3 >>> sympify('2**2 / 3 + 5', evaluate=False) 2**2/3 + 5 >>> sympify('4/2+7', evaluate=True) 9 >>> sympify('4/2+7', evaluate=False) 4/2 + 7 >>> sympify(4/2+7, evaluate=False) 9.00000000000000 Extending --------- To extend ``sympify`` to convert custom objects (not derived from ``Basic``), just define a ``_sympy_`` method to your class. You can do that even to classes that you do not own by subclassing or adding the method at runtime. >>> from sympy import Matrix >>> class MyList1(object): ... def __iter__(self): ... yield 1 ... yield 2 ... return ... def __getitem__(self, i): return list(self)[i] ... def _sympy_(self): return Matrix(self) >>> sympify(MyList1()) Matrix([ [1], [2]]) If you do not have control over the class definition you could also use the ``converter`` global dictionary. The key is the class and the value is a function that takes a single argument and returns the desired SymPy object, e.g. ``converter[MyList] = lambda x: Matrix(x)``. >>> class MyList2(object): # XXX Do not do this if you control the class! ... def __iter__(self): # Use _sympy_! ... yield 1 ... yield 2 ... return ... def __getitem__(self, i): return list(self)[i] >>> from sympy.core.sympify import converter >>> converter[MyList2] = lambda x: Matrix(x) >>> sympify(MyList2()) Matrix([ [1], [2]]) Notes ===== The keywords ``rational`` and ``convert_xor`` are only used when the input is a string. convert_xor ----------- >>> sympify('x^y',convert_xor=True) x**y >>> sympify('x^y',convert_xor=False) x ^ y rational -------- >>> sympify('0.1',rational=False) 0.1 >>> sympify('0.1',rational=True) 1/10 Sometimes autosimplification during sympification results in expressions that are very different in structure than what was entered. Until such autosimplification is no longer done, the ``kernS`` function might be of some use. In the example below you can see how an expression reduces to $-1$ by autosimplification, but does not do so when ``kernS`` is used. >>> from sympy.core.sympify import kernS >>> from sympy.abc import x >>> -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1 -1 >>> s = '-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1' >>> sympify(s) -1 >>> kernS(s) -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1 Parameters ========== a : - any object defined in SymPy - standard numeric Python types: ``int``, ``long``, ``float``, ``Decimal`` - strings (like ``"0.09"``, ``"2e-19"`` or ``'sin(x)'``) - booleans, including ``None`` (will leave ``None`` unchanged) - dicts, lists, sets or tuples containing any of the above convert_xor : bool, optional If true, treats ``^`` as exponentiation. If False, treats ``^`` as XOR itself. Used only when input is a string. locals : any object defined in SymPy, optional In order to have strings be recognized it can be imported into a namespace dictionary and passed as locals. strict : bool, optional If the option strict is set to ``True``, only the types for which an explicit conversion has been defined are converted. In the other cases, a SympifyError is raised. rational : bool, optional If ``True``, converts floats into :class:`~.Rational`. If ``False``, it lets floats remain as it is. Used only when input is a string. evaluate : bool, optional If False, then arithmetic and operators will be converted into their SymPy equivalents. If True the expression will be evaluated and the result will be returned. """
/usr/src/app/target_test_cases/failed_tests_sympify.txt
def sympify(a, locals=None, convert_xor=True, strict=False, rational=False, evaluate=None): """ Converts an arbitrary expression to a type that can be used inside SymPy. Explanation =========== It will convert Python ints into instances of :class:`~.Integer`, floats into instances of :class:`~.Float`, etc. It is also able to coerce symbolic expressions which inherit from :class:`~.Basic`. This can be useful in cooperation with SAGE. .. warning:: Note that this function uses ``eval``, and thus shouldn't be used on unsanitized input. If the argument is already a type that SymPy understands, it will do nothing but return that value. This can be used at the beginning of a function to ensure you are working with the correct type. Examples ======== >>> from sympy import sympify >>> sympify(2).is_integer True >>> sympify(2).is_real True >>> sympify(2.0).is_real True >>> sympify("2.0").is_real True >>> sympify("2e-45").is_real True If the expression could not be converted, a SympifyError is raised. >>> sympify("x***2") Traceback (most recent call last): ... SympifyError: SympifyError: "could not parse 'x***2'" When attempting to parse non-Python syntax using ``sympify``, it raises a ``SympifyError``: >>> sympify("2x+1") Traceback (most recent call last): ... SympifyError: Sympify of expression 'could not parse '2x+1'' failed To parse non-Python syntax, use ``parse_expr`` from ``sympy.parsing.sympy_parser``. >>> from sympy.parsing.sympy_parser import parse_expr >>> parse_expr("2x+1", transformations="all") 2*x + 1 For more details about ``transformations``: see :func:`~sympy.parsing.sympy_parser.parse_expr` Locals ------ The sympification happens with access to everything that is loaded by ``from sympy import *``; anything used in a string that is not defined by that import will be converted to a symbol. In the following, the ``bitcount`` function is treated as a symbol and the ``O`` is interpreted as the :class:`~.Order` object (used with series) and it raises an error when used improperly: >>> s = 'bitcount(42)' >>> sympify(s) bitcount(42) >>> sympify("O(x)") O(x) >>> sympify("O + 1") Traceback (most recent call last): ... TypeError: unbound method... In order to have ``bitcount`` be recognized it can be imported into a namespace dictionary and passed as locals: >>> ns = {} >>> exec('from sympy.core.evalf import bitcount', ns) >>> sympify(s, locals=ns) 6 In order to have the ``O`` interpreted as a Symbol, identify it as such in the namespace dictionary. This can be done in a variety of ways; all three of the following are possibilities: >>> from sympy import Symbol >>> ns["O"] = Symbol("O") # method 1 >>> exec('from sympy.abc import O', ns) # method 2 >>> ns.update(dict(O=Symbol("O"))) # method 3 >>> sympify("O + 1", locals=ns) O + 1 If you want *all* single-letter and Greek-letter variables to be symbols then you can use the clashing-symbols dictionaries that have been defined there as private variables: ``_clash1`` (single-letter variables), ``_clash2`` (the multi-letter Greek names) or ``_clash`` (both single and multi-letter names that are defined in ``abc``). >>> from sympy.abc import _clash1 >>> set(_clash1) # if this fails, see issue #23903 {'E', 'I', 'N', 'O', 'Q', 'S'} >>> sympify('I & Q', _clash1) I & Q Strict ------ If the option ``strict`` is set to ``True``, only the types for which an explicit conversion has been defined are converted. In the other cases, a SympifyError is raised. >>> print(sympify(None)) None >>> sympify(None, strict=True) Traceback (most recent call last): ... SympifyError: SympifyError: None .. deprecated:: 1.6 ``sympify(obj)`` automatically falls back to ``str(obj)`` when all other conversion methods fail, but this is deprecated. ``strict=True`` will disable this deprecated behavior. See :ref:`deprecated-sympify-string-fallback`. Evaluation ---------- If the option ``evaluate`` is set to ``False``, then arithmetic and operators will be converted into their SymPy equivalents and the ``evaluate=False`` option will be added. Nested ``Add`` or ``Mul`` will be denested first. This is done via an AST transformation that replaces operators with their SymPy equivalents, so if an operand redefines any of those operations, the redefined operators will not be used. If argument a is not a string, the mathematical expression is evaluated before being passed to sympify, so adding ``evaluate=False`` will still return the evaluated result of expression. >>> sympify('2**2 / 3 + 5') 19/3 >>> sympify('2**2 / 3 + 5', evaluate=False) 2**2/3 + 5 >>> sympify('4/2+7', evaluate=True) 9 >>> sympify('4/2+7', evaluate=False) 4/2 + 7 >>> sympify(4/2+7, evaluate=False) 9.00000000000000 Extending --------- To extend ``sympify`` to convert custom objects (not derived from ``Basic``), just define a ``_sympy_`` method to your class. You can do that even to classes that you do not own by subclassing or adding the method at runtime. >>> from sympy import Matrix >>> class MyList1(object): ... def __iter__(self): ... yield 1 ... yield 2 ... return ... def __getitem__(self, i): return list(self)[i] ... def _sympy_(self): return Matrix(self) >>> sympify(MyList1()) Matrix([ [1], [2]]) If you do not have control over the class definition you could also use the ``converter`` global dictionary. The key is the class and the value is a function that takes a single argument and returns the desired SymPy object, e.g. ``converter[MyList] = lambda x: Matrix(x)``. >>> class MyList2(object): # XXX Do not do this if you control the class! ... def __iter__(self): # Use _sympy_! ... yield 1 ... yield 2 ... return ... def __getitem__(self, i): return list(self)[i] >>> from sympy.core.sympify import converter >>> converter[MyList2] = lambda x: Matrix(x) >>> sympify(MyList2()) Matrix([ [1], [2]]) Notes ===== The keywords ``rational`` and ``convert_xor`` are only used when the input is a string. convert_xor ----------- >>> sympify('x^y',convert_xor=True) x**y >>> sympify('x^y',convert_xor=False) x ^ y rational -------- >>> sympify('0.1',rational=False) 0.1 >>> sympify('0.1',rational=True) 1/10 Sometimes autosimplification during sympification results in expressions that are very different in structure than what was entered. Until such autosimplification is no longer done, the ``kernS`` function might be of some use. In the example below you can see how an expression reduces to $-1$ by autosimplification, but does not do so when ``kernS`` is used. >>> from sympy.core.sympify import kernS >>> from sympy.abc import x >>> -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1 -1 >>> s = '-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1' >>> sympify(s) -1 >>> kernS(s) -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1 Parameters ========== a : - any object defined in SymPy - standard numeric Python types: ``int``, ``long``, ``float``, ``Decimal`` - strings (like ``"0.09"``, ``"2e-19"`` or ``'sin(x)'``) - booleans, including ``None`` (will leave ``None`` unchanged) - dicts, lists, sets or tuples containing any of the above convert_xor : bool, optional If true, treats ``^`` as exponentiation. If False, treats ``^`` as XOR itself. Used only when input is a string. locals : any object defined in SymPy, optional In order to have strings be recognized it can be imported into a namespace dictionary and passed as locals. strict : bool, optional If the option strict is set to ``True``, only the types for which an explicit conversion has been defined are converted. In the other cases, a SympifyError is raised. rational : bool, optional If ``True``, converts floats into :class:`~.Rational`. If ``False``, it lets floats remain as it is. Used only when input is a string. evaluate : bool, optional If False, then arithmetic and operators will be converted into their SymPy equivalents. If True the expression will be evaluated and the result will be returned. """ # XXX: If a is a Basic subclass rather than instance (e.g. sin rather than # sin(x)) then a.__sympy__ will be the property. Only on the instance will # a.__sympy__ give the *value* of the property (True). Since sympify(sin) # was used for a long time we allow it to pass. However if strict=True as # is the case in internal calls to _sympify then we only allow # is_sympy=True. # # https://github.com/sympy/sympy/issues/20124 is_sympy = getattr(a, '__sympy__', None) if is_sympy is True: return a elif is_sympy is not None: if not strict: return a else: raise SympifyError(a) if isinstance(a, CantSympify): raise SympifyError(a) cls = getattr(a, "__class__", None) #Check if there exists a converter for any of the types in the mro for superclass in getmro(cls): #First check for user defined converters conv = _external_converter.get(superclass) if conv is None: #if none exists, check for SymPy defined converters conv = _sympy_converter.get(superclass) if conv is not None: return conv(a) if cls is type(None): if strict: raise SympifyError(a) else: return a if evaluate is None: evaluate = global_parameters.evaluate # Support for basic numpy datatypes if _is_numpy_instance(a): import numpy as np if np.isscalar(a): return _convert_numpy_types(a, locals=locals, convert_xor=convert_xor, strict=strict, rational=rational, evaluate=evaluate) _sympy_ = getattr(a, "_sympy_", None) if _sympy_ is not None: return a._sympy_() if not strict: # Put numpy array conversion _before_ float/int, see # <https://github.com/sympy/sympy/issues/13924>. flat = getattr(a, "flat", None) if flat is not None: shape = getattr(a, "shape", None) if shape is not None: from sympy.tensor.array import Array return Array(a.flat, a.shape) # works with e.g. NumPy arrays if not isinstance(a, str): if _is_numpy_instance(a): import numpy as np assert not isinstance(a, np.number) if isinstance(a, np.ndarray): # Scalar arrays (those with zero dimensions) have sympify # called on the scalar element. if a.ndim == 0: try: return sympify(a.item(), locals=locals, convert_xor=convert_xor, strict=strict, rational=rational, evaluate=evaluate) except SympifyError: pass elif hasattr(a, '__float__'): # float and int can coerce size-one numpy arrays to their lone # element. See issue https://github.com/numpy/numpy/issues/10404. return sympify(float(a)) elif hasattr(a, '__int__'): return sympify(int(a)) if strict: raise SympifyError(a) if iterable(a): try: return type(a)([sympify(x, locals=locals, convert_xor=convert_xor, rational=rational, evaluate=evaluate) for x in a]) except TypeError: # Not all iterables are rebuildable with their type. pass if not isinstance(a, str): raise SympifyError('cannot sympify object of type %r' % type(a)) from sympy.parsing.sympy_parser import (parse_expr, TokenError, standard_transformations) from sympy.parsing.sympy_parser import convert_xor as t_convert_xor from sympy.parsing.sympy_parser import rationalize as t_rationalize transformations = standard_transformations if rational: transformations += (t_rationalize,) if convert_xor: transformations += (t_convert_xor,) try: a = a.replace('\n', '') expr = parse_expr(a, local_dict=locals, transformations=transformations, evaluate=evaluate) except (TokenError, SyntaxError) as exc: raise SympifyError('could not parse %r' % a, exc) return expr
sympify
sympy
95
sympy/physics/quantum/spin.py
def uncouple(expr, jn=None, jcoupling_list=None): """ Uncouple a coupled spin state Gives the uncoupled representation of a coupled spin state. Arguments must be either a spin state that is a subclass of CoupledSpinState or a spin state that is a subclass of SpinState and an array giving the j values of the spaces that are to be coupled Parameters ========== expr : Expr The expression containing states that are to be coupled. If the states are a subclass of SpinState, the ``jn`` and ``jcoupling`` parameters must be defined. If the states are a subclass of CoupledSpinState, ``jn`` and ``jcoupling`` will be taken from the state. jn : list or tuple The list of the j-values that are coupled. If state is a CoupledSpinState, this parameter is ignored. This must be defined if state is not a subclass of CoupledSpinState. The syntax of this parameter is the same as the ``jn`` parameter of JzKetCoupled. jcoupling_list : list or tuple The list defining how the j-values are coupled together. If state is a CoupledSpinState, this parameter is ignored. This must be defined if state is not a subclass of CoupledSpinState. The syntax of this parameter is the same as the ``jcoupling`` parameter of JzKetCoupled. Examples ======== Uncouple a numerical state using a CoupledSpinState state: >>> from sympy.physics.quantum.spin import JzKetCoupled, uncouple >>> from sympy import S >>> uncouple(JzKetCoupled(1, 0, (S(1)/2, S(1)/2))) sqrt(2)*|1/2,-1/2>x|1/2,1/2>/2 + sqrt(2)*|1/2,1/2>x|1/2,-1/2>/2 Perform the same calculation using a SpinState state: >>> from sympy.physics.quantum.spin import JzKet >>> uncouple(JzKet(1, 0), (S(1)/2, S(1)/2)) sqrt(2)*|1/2,-1/2>x|1/2,1/2>/2 + sqrt(2)*|1/2,1/2>x|1/2,-1/2>/2 Uncouple a numerical state of three coupled spaces using a CoupledSpinState state: >>> uncouple(JzKetCoupled(1, 1, (1, 1, 1), ((1,3,1),(1,2,1)) )) |1,-1>x|1,1>x|1,1>/2 - |1,0>x|1,0>x|1,1>/2 + |1,1>x|1,0>x|1,0>/2 - |1,1>x|1,1>x|1,-1>/2 Perform the same calculation using a SpinState state: >>> uncouple(JzKet(1, 1), (1, 1, 1), ((1,3,1),(1,2,1)) ) |1,-1>x|1,1>x|1,1>/2 - |1,0>x|1,0>x|1,1>/2 + |1,1>x|1,0>x|1,0>/2 - |1,1>x|1,1>x|1,-1>/2 Uncouple a symbolic state using a CoupledSpinState state: >>> from sympy import symbols >>> j,m,j1,j2 = symbols('j m j1 j2') >>> uncouple(JzKetCoupled(j, m, (j1, j2))) Sum(CG(j1, m1, j2, m2, j, m)*|j1,m1>x|j2,m2>, (m1, -j1, j1), (m2, -j2, j2)) Perform the same calculation using a SpinState state >>> uncouple(JzKet(j, m), (j1, j2)) Sum(CG(j1, m1, j2, m2, j, m)*|j1,m1>x|j2,m2>, (m1, -j1, j1), (m2, -j2, j2)) """
/usr/src/app/target_test_cases/failed_tests_uncouple.txt
def uncouple(expr, jn=None, jcoupling_list=None): """ Uncouple a coupled spin state Gives the uncoupled representation of a coupled spin state. Arguments must be either a spin state that is a subclass of CoupledSpinState or a spin state that is a subclass of SpinState and an array giving the j values of the spaces that are to be coupled Parameters ========== expr : Expr The expression containing states that are to be coupled. If the states are a subclass of SpinState, the ``jn`` and ``jcoupling`` parameters must be defined. If the states are a subclass of CoupledSpinState, ``jn`` and ``jcoupling`` will be taken from the state. jn : list or tuple The list of the j-values that are coupled. If state is a CoupledSpinState, this parameter is ignored. This must be defined if state is not a subclass of CoupledSpinState. The syntax of this parameter is the same as the ``jn`` parameter of JzKetCoupled. jcoupling_list : list or tuple The list defining how the j-values are coupled together. If state is a CoupledSpinState, this parameter is ignored. This must be defined if state is not a subclass of CoupledSpinState. The syntax of this parameter is the same as the ``jcoupling`` parameter of JzKetCoupled. Examples ======== Uncouple a numerical state using a CoupledSpinState state: >>> from sympy.physics.quantum.spin import JzKetCoupled, uncouple >>> from sympy import S >>> uncouple(JzKetCoupled(1, 0, (S(1)/2, S(1)/2))) sqrt(2)*|1/2,-1/2>x|1/2,1/2>/2 + sqrt(2)*|1/2,1/2>x|1/2,-1/2>/2 Perform the same calculation using a SpinState state: >>> from sympy.physics.quantum.spin import JzKet >>> uncouple(JzKet(1, 0), (S(1)/2, S(1)/2)) sqrt(2)*|1/2,-1/2>x|1/2,1/2>/2 + sqrt(2)*|1/2,1/2>x|1/2,-1/2>/2 Uncouple a numerical state of three coupled spaces using a CoupledSpinState state: >>> uncouple(JzKetCoupled(1, 1, (1, 1, 1), ((1,3,1),(1,2,1)) )) |1,-1>x|1,1>x|1,1>/2 - |1,0>x|1,0>x|1,1>/2 + |1,1>x|1,0>x|1,0>/2 - |1,1>x|1,1>x|1,-1>/2 Perform the same calculation using a SpinState state: >>> uncouple(JzKet(1, 1), (1, 1, 1), ((1,3,1),(1,2,1)) ) |1,-1>x|1,1>x|1,1>/2 - |1,0>x|1,0>x|1,1>/2 + |1,1>x|1,0>x|1,0>/2 - |1,1>x|1,1>x|1,-1>/2 Uncouple a symbolic state using a CoupledSpinState state: >>> from sympy import symbols >>> j,m,j1,j2 = symbols('j m j1 j2') >>> uncouple(JzKetCoupled(j, m, (j1, j2))) Sum(CG(j1, m1, j2, m2, j, m)*|j1,m1>x|j2,m2>, (m1, -j1, j1), (m2, -j2, j2)) Perform the same calculation using a SpinState state >>> uncouple(JzKet(j, m), (j1, j2)) Sum(CG(j1, m1, j2, m2, j, m)*|j1,m1>x|j2,m2>, (m1, -j1, j1), (m2, -j2, j2)) """ a = expr.atoms(SpinState) for state in a: expr = expr.subs(state, _uncouple(state, jn, jcoupling_list)) return expr
uncouple
sympy
96
sympy/solvers/solvers.py
def unrad(eq, *syms, **flags): """ Remove radicals with symbolic arguments and return (eq, cov), None, or raise an error. Explanation =========== None is returned if there are no radicals to remove. NotImplementedError is raised if there are radicals and they cannot be removed or if the relationship between the original symbols and the change of variable needed to rewrite the system as a polynomial cannot be solved. Otherwise the tuple, ``(eq, cov)``, is returned where: *eq*, ``cov`` *eq* is an equation without radicals (in the symbol(s) of interest) whose solutions are a superset of the solutions to the original expression. *eq* might be rewritten in terms of a new variable; the relationship to the original variables is given by ``cov`` which is a list containing ``v`` and ``v**p - b`` where ``p`` is the power needed to clear the radical and ``b`` is the radical now expressed as a polynomial in the symbols of interest. For example, for sqrt(2 - x) the tuple would be ``(c, c**2 - 2 + x)``. The solutions of *eq* will contain solutions to the original equation (if there are any). *syms* An iterable of symbols which, if provided, will limit the focus of radical removal: only radicals with one or more of the symbols of interest will be cleared. All free symbols are used if *syms* is not set. *flags* are used internally for communication during recursive calls. Two options are also recognized: ``take``, when defined, is interpreted as a single-argument function that returns True if a given Pow should be handled. Radicals can be removed from an expression if: * All bases of the radicals are the same; a change of variables is done in this case. * If all radicals appear in one term of the expression. * There are only four terms with sqrt() factors or there are less than four terms having sqrt() factors. * There are only two terms with radicals. Examples ======== >>> from sympy.solvers.solvers import unrad >>> from sympy.abc import x >>> from sympy import sqrt, Rational, root >>> unrad(sqrt(x)*x**Rational(1, 3) + 2) (x**5 - 64, []) >>> unrad(sqrt(x) + root(x + 1, 3)) (-x**3 + x**2 + 2*x + 1, []) >>> eq = sqrt(x) + root(x, 3) - 2 >>> unrad(eq) (_p**3 + _p**2 - 2, [_p, _p**6 - x]) """
/usr/src/app/target_test_cases/failed_tests_unrad.txt
def unrad(eq, *syms, **flags): """ Remove radicals with symbolic arguments and return (eq, cov), None, or raise an error. Explanation =========== None is returned if there are no radicals to remove. NotImplementedError is raised if there are radicals and they cannot be removed or if the relationship between the original symbols and the change of variable needed to rewrite the system as a polynomial cannot be solved. Otherwise the tuple, ``(eq, cov)``, is returned where: *eq*, ``cov`` *eq* is an equation without radicals (in the symbol(s) of interest) whose solutions are a superset of the solutions to the original expression. *eq* might be rewritten in terms of a new variable; the relationship to the original variables is given by ``cov`` which is a list containing ``v`` and ``v**p - b`` where ``p`` is the power needed to clear the radical and ``b`` is the radical now expressed as a polynomial in the symbols of interest. For example, for sqrt(2 - x) the tuple would be ``(c, c**2 - 2 + x)``. The solutions of *eq* will contain solutions to the original equation (if there are any). *syms* An iterable of symbols which, if provided, will limit the focus of radical removal: only radicals with one or more of the symbols of interest will be cleared. All free symbols are used if *syms* is not set. *flags* are used internally for communication during recursive calls. Two options are also recognized: ``take``, when defined, is interpreted as a single-argument function that returns True if a given Pow should be handled. Radicals can be removed from an expression if: * All bases of the radicals are the same; a change of variables is done in this case. * If all radicals appear in one term of the expression. * There are only four terms with sqrt() factors or there are less than four terms having sqrt() factors. * There are only two terms with radicals. Examples ======== >>> from sympy.solvers.solvers import unrad >>> from sympy.abc import x >>> from sympy import sqrt, Rational, root >>> unrad(sqrt(x)*x**Rational(1, 3) + 2) (x**5 - 64, []) >>> unrad(sqrt(x) + root(x + 1, 3)) (-x**3 + x**2 + 2*x + 1, []) >>> eq = sqrt(x) + root(x, 3) - 2 >>> unrad(eq) (_p**3 + _p**2 - 2, [_p, _p**6 - x]) """ uflags = {"check": False, "simplify": False} def _cov(p, e): if cov: # XXX - uncovered oldp, olde = cov if Poly(e, p).degree(p) in (1, 2): cov[:] = [p, olde.subs(oldp, _vsolve(e, p, **uflags)[0])] else: raise NotImplementedError else: cov[:] = [p, e] def _canonical(eq, cov): if cov: # change symbol to vanilla so no solutions are eliminated p, e = cov rep = {p: Dummy(p.name)} eq = eq.xreplace(rep) cov = [p.xreplace(rep), e.xreplace(rep)] # remove constants and powers of factors since these don't change # the location of the root; XXX should factor or factor_terms be used? eq = factor_terms(_mexpand(eq.as_numer_denom()[0], recursive=True), clear=True) if eq.is_Mul: args = [] for f in eq.args: if f.is_number: continue if f.is_Pow: args.append(f.base) else: args.append(f) eq = Mul(*args) # leave as Mul for more efficient solving # make the sign canonical margs = list(Mul.make_args(eq)) changed = False for i, m in enumerate(margs): if m.could_extract_minus_sign(): margs[i] = -m changed = True if changed: eq = Mul(*margs, evaluate=False) return eq, cov def _Q(pow): # return leading Rational of denominator of Pow's exponent c = pow.as_base_exp()[1].as_coeff_Mul()[0] if not c.is_Rational: return S.One return c.q # define the _take method that will determine whether a term is of interest def _take(d): # return True if coefficient of any factor's exponent's den is not 1 for pow in Mul.make_args(d): if not pow.is_Pow: continue if _Q(pow) == 1: continue if pow.free_symbols & syms: return True return False _take = flags.setdefault('_take', _take) if isinstance(eq, Eq): eq = eq.lhs - eq.rhs # XXX legacy Eq as Eqn support elif not isinstance(eq, Expr): return cov, nwas, rpt = [flags.setdefault(k, v) for k, v in sorted({"cov": [], "n": None, "rpt": 0}.items())] # preconditioning eq = powdenest(factor_terms(eq, radical=True, clear=True)) eq = eq.as_numer_denom()[0] eq = _mexpand(eq, recursive=True) if eq.is_number: return # see if there are radicals in symbols of interest syms = set(syms) or eq.free_symbols # _take uses this poly = eq.as_poly() gens = [g for g in poly.gens if _take(g)] if not gens: return # recast poly in terms of eigen-gens poly = eq.as_poly(*gens) # not a polynomial e.g. 1 + sqrt(x)*exp(sqrt(x)) with gen sqrt(x) if poly is None: return # - an exponent has a symbol of interest (don't handle) if any(g.exp.has(*syms) for g in gens): return def _rads_bases_lcm(poly): # if all the bases are the same or all the radicals are in one # term, `lcm` will be the lcm of the denominators of the # exponents of the radicals lcm = 1 rads = set() bases = set() for g in poly.gens: q = _Q(g) if q != 1: rads.add(g) lcm = ilcm(lcm, q) bases.add(g.base) return rads, bases, lcm rads, bases, lcm = _rads_bases_lcm(poly) covsym = Dummy('p', nonnegative=True) # only keep in syms symbols that actually appear in radicals; # and update gens newsyms = set() for r in rads: newsyms.update(syms & r.free_symbols) if newsyms != syms: syms = newsyms # get terms together that have common generators drad = dict(zip(rads, range(len(rads)))) rterms = {(): []} args = Add.make_args(poly.as_expr()) for t in args: if _take(t): common = set(t.as_poly().gens).intersection(rads) key = tuple(sorted([drad[i] for i in common])) else: key = () rterms.setdefault(key, []).append(t) others = Add(*rterms.pop(())) rterms = [Add(*rterms[k]) for k in rterms.keys()] # the output will depend on the order terms are processed, so # make it canonical quickly rterms = list(reversed(list(ordered(rterms)))) ok = False # we don't have a solution yet depth = sqrt_depth(eq) if len(rterms) == 1 and not (rterms[0].is_Add and lcm > 2): eq = rterms[0]**lcm - ((-others)**lcm) ok = True else: if len(rterms) == 1 and rterms[0].is_Add: rterms = list(rterms[0].args) if len(bases) == 1: b = bases.pop() if len(syms) > 1: x = b.free_symbols else: x = syms x = list(ordered(x))[0] try: inv = _vsolve(covsym**lcm - b, x, **uflags) if not inv: raise NotImplementedError eq = poly.as_expr().subs(b, covsym**lcm).subs(x, inv[0]) _cov(covsym, covsym**lcm - b) return _canonical(eq, cov) except NotImplementedError: pass if len(rterms) == 2: if not others: eq = rterms[0]**lcm - (-rterms[1])**lcm ok = True elif not log(lcm, 2).is_Integer: # the lcm-is-power-of-two case is handled below r0, r1 = rterms if flags.get('_reverse', False): r1, r0 = r0, r1 i0 = _rads0, _bases0, lcm0 = _rads_bases_lcm(r0.as_poly()) i1 = _rads1, _bases1, lcm1 = _rads_bases_lcm(r1.as_poly()) for reverse in range(2): if reverse: i0, i1 = i1, i0 r0, r1 = r1, r0 _rads1, _, lcm1 = i1 _rads1 = Mul(*_rads1) t1 = _rads1**lcm1 c = covsym**lcm1 - t1 for x in syms: try: sol = _vsolve(c, x, **uflags) if not sol: raise NotImplementedError neweq = r0.subs(x, sol[0]) + covsym*r1/_rads1 + \ others tmp = unrad(neweq, covsym) if tmp: eq, newcov = tmp if newcov: newp, newc = newcov _cov(newp, c.subs(covsym, _vsolve(newc, covsym, **uflags)[0])) else: _cov(covsym, c) else: eq = neweq _cov(covsym, c) ok = True break except NotImplementedError: if reverse: raise NotImplementedError( 'no successful change of variable found') else: pass if ok: break elif len(rterms) == 3: # two cube roots and another with order less than 5 # (so an analytical solution can be found) or a base # that matches one of the cube root bases info = [_rads_bases_lcm(i.as_poly()) for i in rterms] RAD = 0 BASES = 1 LCM = 2 if info[0][LCM] != 3: info.append(info.pop(0)) rterms.append(rterms.pop(0)) elif info[1][LCM] != 3: info.append(info.pop(1)) rterms.append(rterms.pop(1)) if info[0][LCM] == info[1][LCM] == 3: if info[1][BASES] != info[2][BASES]: info[0], info[1] = info[1], info[0] rterms[0], rterms[1] = rterms[1], rterms[0] if info[1][BASES] == info[2][BASES]: eq = rterms[0]**3 + (rterms[1] + rterms[2] + others)**3 ok = True elif info[2][LCM] < 5: # a*root(A, 3) + b*root(B, 3) + others = c a, b, c, d, A, B = [Dummy(i) for i in 'abcdAB'] # zz represents the unraded expression into which the # specifics for this case are substituted zz = (c - d)*(A**3*a**9 + 3*A**2*B*a**6*b**3 - 3*A**2*a**6*c**3 + 9*A**2*a**6*c**2*d - 9*A**2*a**6*c*d**2 + 3*A**2*a**6*d**3 + 3*A*B**2*a**3*b**6 + 21*A*B*a**3*b**3*c**3 - 63*A*B*a**3*b**3*c**2*d + 63*A*B*a**3*b**3*c*d**2 - 21*A*B*a**3*b**3*d**3 + 3*A*a**3*c**6 - 18*A*a**3*c**5*d + 45*A*a**3*c**4*d**2 - 60*A*a**3*c**3*d**3 + 45*A*a**3*c**2*d**4 - 18*A*a**3*c*d**5 + 3*A*a**3*d**6 + B**3*b**9 - 3*B**2*b**6*c**3 + 9*B**2*b**6*c**2*d - 9*B**2*b**6*c*d**2 + 3*B**2*b**6*d**3 + 3*B*b**3*c**6 - 18*B*b**3*c**5*d + 45*B*b**3*c**4*d**2 - 60*B*b**3*c**3*d**3 + 45*B*b**3*c**2*d**4 - 18*B*b**3*c*d**5 + 3*B*b**3*d**6 - c**9 + 9*c**8*d - 36*c**7*d**2 + 84*c**6*d**3 - 126*c**5*d**4 + 126*c**4*d**5 - 84*c**3*d**6 + 36*c**2*d**7 - 9*c*d**8 + d**9) def _t(i): b = Mul(*info[i][RAD]) return cancel(rterms[i]/b), Mul(*info[i][BASES]) aa, AA = _t(0) bb, BB = _t(1) cc = -rterms[2] dd = others eq = zz.xreplace(dict(zip( (a, A, b, B, c, d), (aa, AA, bb, BB, cc, dd)))) ok = True # handle power-of-2 cases if not ok: if log(lcm, 2).is_Integer and (not others and len(rterms) == 4 or len(rterms) < 4): def _norm2(a, b): return a**2 + b**2 + 2*a*b if len(rterms) == 4: # (r0+r1)**2 - (r2+r3)**2 r0, r1, r2, r3 = rterms eq = _norm2(r0, r1) - _norm2(r2, r3) ok = True elif len(rterms) == 3: # (r1+r2)**2 - (r0+others)**2 r0, r1, r2 = rterms eq = _norm2(r1, r2) - _norm2(r0, others) ok = True elif len(rterms) == 2: # r0**2 - (r1+others)**2 r0, r1 = rterms eq = r0**2 - _norm2(r1, others) ok = True new_depth = sqrt_depth(eq) if ok else depth rpt += 1 # XXX how many repeats with others unchanging is enough? if not ok or ( nwas is not None and len(rterms) == nwas and new_depth is not None and new_depth == depth and rpt > 3): raise NotImplementedError('Cannot remove all radicals') flags.update({"cov": cov, "n": len(rterms), "rpt": rpt}) neq = unrad(eq, *syms, **flags) if neq: eq, cov = neq eq, cov = _canonical(eq, cov) return eq, cov
unrad