Description
stringlengths
18
161k
Code
stringlengths
15
300k
the mandelbrot set is the set of complex numbers c for which the series zn1 zn zn c does not diverge i e remains bounded thus a complex number c is a member of the mandelbrot set if when starting with z0 0 and applying the iteration repeatedly the absolute value of zn remains bounded for all n 0 complex numbers can be written as a bi a is the real component usually drawn on the xaxis and bi is the imaginary component usually drawn on the yaxis most visualizations of the mandelbrot set use a colorcoding to indicate after how many steps in the series the numbers outside the set diverge images of the mandelbrot set exhibit an elaborate and infinitely complicated boundary that reveals progressively everfiner recursive detail at increasing magnifications making the boundary of the mandelbrot set a fractal curve description adapted from https en wikipedia orgwikimandelbrotset see also https en wikipedia orgwikiplottingalgorithmsforthemandelbrotset return the relative distance stepmaxstep after which the complex number constituted by this xypair diverges members of the mandelbrot set do not diverge so their distance is 1 getdistance0 0 50 1 0 getdistance0 5 0 5 50 0 061224489795918366 getdistance2 0 50 0 0 divergence happens for all complex number with an absolute value greater than 4 blackwhite colorcoding that ignores the relative distance the mandelbrot set is black everything else is white getblackandwhitergb0 255 255 255 getblackandwhitergb0 5 255 255 255 getblackandwhitergb1 0 0 0 colorcoding taking the relative distance into account the mandelbrot set is black getcolorcodedrgb0 255 0 0 getcolorcodedrgb0 5 0 255 255 getcolorcodedrgb1 0 0 0 function to generate the image of the mandelbrot set two types of coordinates are used imagecoordinates that refer to the pixels and figurecoordinates that refer to the complex numbers inside and outside the mandelbrot set the figurecoordinates in the arguments of this function determine which section of the mandelbrot set is viewed the main area of the mandelbrot set is roughly between 1 5 x 0 5 and 1 y 1 in the figurecoordinates commenting out tests that slow down pytest 13 35s call fractalsmandelbrot py mandelbrot getimage getimage load0 0 255 0 0 getimageusedistancecolorcoding false load0 0 255 255 255 loop through the imagecoordinates determine the figurecoordinates based on the imagecoordinates color the corresponding pixel based on the selected coloringfunction colored version full figure uncomment for colored version different section zoomed in img getimagefigurecenterx 0 6 figurecentery 0 4 figurewidth 0 8 uncomment for black and white version full figure img getimageusedistancecolorcoding false uncomment to save the image img savemandelbrot png type ignore return the relative distance step max_step after which the complex number constituted by this x y pair diverges members of the mandelbrot set do not diverge so their distance is 1 get_distance 0 0 50 1 0 get_distance 0 5 0 5 50 0 061224489795918366 get_distance 2 0 50 0 0 noqa b007 divergence happens for all complex number with an absolute value greater than 4 black white color coding that ignores the relative distance the mandelbrot set is black everything else is white get_black_and_white_rgb 0 255 255 255 get_black_and_white_rgb 0 5 255 255 255 get_black_and_white_rgb 1 0 0 0 color coding taking the relative distance into account the mandelbrot set is black get_color_coded_rgb 0 255 0 0 get_color_coded_rgb 0 5 0 255 255 get_color_coded_rgb 1 0 0 0 function to generate the image of the mandelbrot set two types of coordinates are used image coordinates that refer to the pixels and figure coordinates that refer to the complex numbers inside and outside the mandelbrot set the figure coordinates in the arguments of this function determine which section of the mandelbrot set is viewed the main area of the mandelbrot set is roughly between 1 5 x 0 5 and 1 y 1 in the figure coordinates commenting out tests that slow down pytest 13 35s call fractals mandelbrot py mandelbrot get_image get_image load 0 0 255 0 0 get_image use_distance_color_coding false load 0 0 255 255 255 loop through the image coordinates determine the figure coordinates based on the image coordinates color the corresponding pixel based on the selected coloring function colored version full figure uncomment for colored version different section zoomed in img get_image figure_center_x 0 6 figure_center_y 0 4 figure_width 0 8 uncomment for black and white version full figure img get_image use_distance_color_coding false uncomment to save the image img save mandelbrot png
import colorsys from PIL import Image def get_distance(x: float, y: float, max_step: int) -> float: a = x b = y for step in range(max_step): a_new = a * a - b * b + x b = 2 * a * b + y a = a_new if a * a + b * b > 4: break return step / (max_step - 1) def get_black_and_white_rgb(distance: float) -> tuple: if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def get_color_coded_rgb(distance: float) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(distance, 1, 1)) def get_image( image_width: int = 800, image_height: int = 600, figure_center_x: float = -0.6, figure_center_y: float = 0, figure_width: float = 3.2, max_step: int = 50, use_distance_color_coding: bool = True, ) -> Image.Image: img = Image.new("RGB", (image_width, image_height)) pixels = img.load() for image_x in range(image_width): for image_y in range(image_height): figure_height = figure_width / image_width * image_height figure_x = figure_center_x + (image_x / image_width - 0.5) * figure_width figure_y = figure_center_y + (image_y / image_height - 0.5) * figure_height distance = get_distance(figure_x, figure_y, max_step) if use_distance_color_coding: pixels[image_x, image_y] = get_color_coded_rgb(distance) else: pixels[image_x, image_y] = get_black_and_white_rgb(distance) return img if __name__ == "__main__": import doctest doctest.testmod() img = get_image() img.show()
anurag kumar anuragkumarak95gmail com gitanuragkumarak95 simple example of fractal generation using recursion what is the sierpiski triangle the sierpiski triangle sometimes spelled sierpinski also called the sierpiski gasket or sierpiski sieve is a fractal attractive fixed set with the overall shape of an equilateral triangle subdivided recursively into smaller equilateral triangles originally constructed as a curve this is one of the basic examples of selfsimilar setsthat is it is a mathematically generated pattern that is reproducible at any magnification or reduction it is named after the polish mathematician wacaw sierpiski but appeared as a decorative pattern many centuries before the work of sierpiski usage python sierpinskitriangle py int depthforfractal credits the above description is taken from https en wikipedia orgwikisierpic584skitriangle this code was written by editing the code from https www riannetrujillo comblogpythonfractal find the midpoint of two points getmid0 0 2 2 1 0 1 0 getmid3 3 3 3 0 0 0 0 getmid1 0 3 2 2 0 1 0 getmid0 0 1 1 0 5 0 5 getmid0 0 0 0 0 0 0 0 recursively draw the sierpinski triangle given the vertices of the triangle and the recursion depth find the midpoint of two points get_mid 0 0 2 2 1 0 1 0 get_mid 3 3 3 3 0 0 0 0 get_mid 1 0 3 2 2 0 1 0 get_mid 0 0 1 1 0 5 0 5 get_mid 0 0 0 0 0 0 0 0 recursively draw the sierpinski triangle given the vertices of the triangle and the recursion depth vertices of triangle
import sys import turtle def get_mid(p1: tuple[float, float], p2: tuple[float, float]) -> tuple[float, float]: return (p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2 def triangle( vertex1: tuple[float, float], vertex2: tuple[float, float], vertex3: tuple[float, float], depth: int, ) -> None: my_pen.up() my_pen.goto(vertex1[0], vertex1[1]) my_pen.down() my_pen.goto(vertex2[0], vertex2[1]) my_pen.goto(vertex3[0], vertex3[1]) my_pen.goto(vertex1[0], vertex1[1]) if depth == 0: return triangle(vertex1, get_mid(vertex1, vertex2), get_mid(vertex1, vertex3), depth - 1) triangle(vertex2, get_mid(vertex1, vertex2), get_mid(vertex2, vertex3), depth - 1) triangle(vertex3, get_mid(vertex3, vertex2), get_mid(vertex1, vertex3), depth - 1) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( "Correct format for using this script: " "python fractals.py <int:depth_for_fractal>" ) my_pen = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") vertices = [(-175, -125), (0, 175), (175, -125)] triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1])) turtle.Screen().exitonclick()
by shreya123714 https en wikipedia orgwikifuzzyset a class for representing and manipulating triangular fuzzy sets attributes name the name or label of the fuzzy set leftboundary the left boundary of the fuzzy set peak the peak central value of the fuzzy set rightboundary the right boundary of the fuzzy set methods membershipx calculate the membership value of an input x in the fuzzy set unionother calculate the union of this fuzzy set with another fuzzy set intersectionother calculate the intersection of this fuzzy set with another complement calculate the complement negation of this fuzzy set plot plot the membership function of the fuzzy set sheru fuzzysetsheru 0 4 1 0 6 sheru fuzzysetname sheru leftboundary0 4 peak1 rightboundary0 6 strsheru sheru 0 4 1 0 6 siya fuzzysetsiya 0 5 1 0 7 siya fuzzysetname siya leftboundary0 5 peak1 rightboundary0 7 complement operation sheru complement fuzzysetname sheru leftboundary0 4 peak0 6 rightboundary0 siya complement doctest normalizewhitespace fuzzysetname siya leftboundary0 30000000000000004 peak0 5 rightboundary0 intersection operation siya intersectionsheru fuzzysetname siya sheru leftboundary0 5 peak0 6 rightboundary1 0 membership operation sheru membership0 5 0 16666666666666663 sheru membership0 6 0 0 union operations siya unionsheru fuzzysetname siya sheru leftboundary0 4 peak0 7 rightboundary1 0 fuzzysetfuzzyset 0 1 0 2 0 3 fuzzysetname fuzzyset leftboundary0 1 peak0 2 rightboundary0 3 calculate the complement negation of this fuzzy set returns fuzzyset a new fuzzy set representing the complement fuzzysetfuzzyset 0 1 0 2 0 3 complement fuzzysetname fuzzyset leftboundary0 7 peak0 9 rightboundary0 8 calculate the intersection of this fuzzy set with another fuzzy set args other another fuzzy set to intersect with returns a new fuzzy set representing the intersection fuzzyseta 0 1 0 2 0 3 intersectionfuzzysetb 0 4 0 5 0 6 fuzzysetname a b leftboundary0 4 peak0 3 rightboundary0 35 calculate the membership value of an input x in the fuzzy set returns the membership value of x in the fuzzy set a fuzzyseta 0 1 0 2 0 3 a membership0 09 0 0 a membership0 1 0 0 a membership0 11 0 09999999999999995 a membership0 4 0 0 fuzzyseta 0 0 5 1 membership0 1 0 2 fuzzysetb 0 2 0 7 1 membership0 6 0 8 calculate the union of this fuzzy set with another fuzzy set args other fuzzyset another fuzzy set to union with returns fuzzyset a new fuzzy set representing the union fuzzyseta 0 1 0 2 0 3 unionfuzzysetb 0 4 0 5 0 6 fuzzysetname a b leftboundary0 1 peak0 6 rightboundary0 35 plot the membership function of the fuzzy set a class for representing and manipulating triangular fuzzy sets attributes name the name or label of the fuzzy set left_boundary the left boundary of the fuzzy set peak the peak central value of the fuzzy set right_boundary the right boundary of the fuzzy set methods membership x calculate the membership value of an input x in the fuzzy set union other calculate the union of this fuzzy set with another fuzzy set intersection other calculate the intersection of this fuzzy set with another complement calculate the complement negation of this fuzzy set plot plot the membership function of the fuzzy set sheru fuzzyset sheru 0 4 1 0 6 sheru fuzzyset name sheru left_boundary 0 4 peak 1 right_boundary 0 6 str sheru sheru 0 4 1 0 6 siya fuzzyset siya 0 5 1 0 7 siya fuzzyset name siya left_boundary 0 5 peak 1 right_boundary 0 7 complement operation sheru complement fuzzyset name sheru left_boundary 0 4 peak 0 6 right_boundary 0 siya complement doctest normalize_whitespace fuzzyset name siya left_boundary 0 30000000000000004 peak 0 5 right_boundary 0 intersection operation siya intersection sheru fuzzyset name siya sheru left_boundary 0 5 peak 0 6 right_boundary 1 0 membership operation sheru membership 0 5 0 16666666666666663 sheru membership 0 6 0 0 union operations siya union sheru fuzzyset name siya sheru left_boundary 0 4 peak 0 7 right_boundary 1 0 fuzzyset fuzzy_set 0 1 0 2 0 3 fuzzyset name fuzzy_set left_boundary 0 1 peak 0 2 right_boundary 0 3 calculate the complement negation of this fuzzy set returns fuzzyset a new fuzzy set representing the complement fuzzyset fuzzy_set 0 1 0 2 0 3 complement fuzzyset name fuzzy_set left_boundary 0 7 peak 0 9 right_boundary 0 8 calculate the intersection of this fuzzy set with another fuzzy set args other another fuzzy set to intersect with returns a new fuzzy set representing the intersection fuzzyset a 0 1 0 2 0 3 intersection fuzzyset b 0 4 0 5 0 6 fuzzyset name a b left_boundary 0 4 peak 0 3 right_boundary 0 35 calculate the membership value of an input x in the fuzzy set returns the membership value of x in the fuzzy set a fuzzyset a 0 1 0 2 0 3 a membership 0 09 0 0 a membership 0 1 0 0 a membership 0 11 0 09999999999999995 a membership 0 4 0 0 fuzzyset a 0 0 5 1 membership 0 1 0 2 fuzzyset b 0 2 0 7 1 membership 0 6 0 8 calculate the union of this fuzzy set with another fuzzy set args other fuzzyset another fuzzy set to union with returns fuzzyset a new fuzzy set representing the union fuzzyset a 0 1 0 2 0 3 union fuzzyset b 0 4 0 5 0 6 fuzzyset name a b left_boundary 0 1 peak 0 6 right_boundary 0 35 plot the membership function of the fuzzy set
from __future__ import annotations from dataclasses import dataclass import matplotlib.pyplot as plt import numpy as np @dataclass class FuzzySet: name: str left_boundary: float peak: float right_boundary: float def __str__(self) -> str: return ( f"{self.name}: [{self.left_boundary}, {self.peak}, {self.right_boundary}]" ) def complement(self) -> FuzzySet: return FuzzySet( f"¬{self.name}", 1 - self.right_boundary, 1 - self.left_boundary, 1 - self.peak, ) def intersection(self, other) -> FuzzySet: return FuzzySet( f"{self.name} ∩ {other.name}", max(self.left_boundary, other.left_boundary), min(self.right_boundary, other.right_boundary), (self.peak + other.peak) / 2, ) def membership(self, x: float) -> float: if x <= self.left_boundary or x >= self.right_boundary: return 0.0 elif self.left_boundary < x <= self.peak: return (x - self.left_boundary) / (self.peak - self.left_boundary) elif self.peak < x < self.right_boundary: return (self.right_boundary - x) / (self.right_boundary - self.peak) msg = f"Invalid value {x} for fuzzy set {self}" raise ValueError(msg) def union(self, other) -> FuzzySet: return FuzzySet( f"{self.name} ∪ {other.name}", min(self.left_boundary, other.left_boundary), max(self.right_boundary, other.right_boundary), (self.peak + other.peak) / 2, ) def plot(self): x = np.linspace(0, 1, 1000) y = [self.membership(xi) for xi in x] plt.plot(x, y, label=self.name) if __name__ == "__main__": from doctest import testmod testmod() a = FuzzySet("A", 0, 0.5, 1) b = FuzzySet("B", 0.2, 0.7, 1) a.plot() b.plot() plt.xlabel("x") plt.ylabel("Membership") plt.legend() plt.show() union_ab = a.union(b) intersection_ab = a.intersection(b) complement_a = a.complement() union_ab.plot() intersection_ab.plot() complement_a.plot() plt.xlabel("x") plt.ylabel("Membership") plt.legend() plt.show()
calculate great circle distance between two points in a sphere given longitudes and latitudes https en wikipedia orgwikihaversineformula we know that the globe is sort of spherical so a path between two points isn t exactly a straight line we need to account for the earth s curvature when calculating distance from point a to b this effect is negligible for small distances but adds up as distance increases the haversine method treats the earth as a sphere which allows us to project the two points a and b onto the surface of that sphere and approximate the spherical distance between them since the earth is not a perfect sphere other methods which model the earth s ellipsoidal nature are more accurate but a quick and modifiable computation like haversine can be handy for shorter range distances args lat1 lon1 latitude and longitude of coordinate 1 lat2 lon2 latitude and longitude of coordinate 2 returns geographical distance between two points in metres from collections import namedtuple point2d namedtuplepoint2d lat lon sanfrancisco point2d37 774856 122 424227 yosemite point2d37 864742 119 537521 fhaversinedistancesanfrancisco yosemite 0 0f meters 254 352 meters constants per wgs84 https en wikipedia orgwikiworldgeodeticsystem distance in metresm equation parameters equation https en wikipedia orgwikihaversineformulaformulation equation square both values calculate great circle distance between two points in a sphere given longitudes and latitudes https en wikipedia org wiki haversine_formula we know that the globe is sort of spherical so a path between two points isn t exactly a straight line we need to account for the earth s curvature when calculating distance from point a to b this effect is negligible for small distances but adds up as distance increases the haversine method treats the earth as a sphere which allows us to project the two points a and b onto the surface of that sphere and approximate the spherical distance between them since the earth is not a perfect sphere other methods which model the earth s ellipsoidal nature are more accurate but a quick and modifiable computation like haversine can be handy for shorter range distances args lat1 lon1 latitude and longitude of coordinate 1 lat2 lon2 latitude and longitude of coordinate 2 returns geographical distance between two points in metres from collections import namedtuple point_2d namedtuple point_2d lat lon san_francisco point_2d 37 774856 122 424227 yosemite point_2d 37 864742 119 537521 f haversine_distance san_francisco yosemite 0 0f meters 254 352 meters constants per wgs84 https en wikipedia org wiki world_geodetic_system distance in metres m equation parameters equation https en wikipedia org wiki haversine_formula formulation equation square both values
from math import asin, atan, cos, radians, sin, sqrt, tan AXIS_A = 6378137.0 AXIS_B = 6356752.314245 RADIUS = 6378137 def haversine_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> float: flattening = (AXIS_A - AXIS_B) / AXIS_A phi_1 = atan((1 - flattening) * tan(radians(lat1))) phi_2 = atan((1 - flattening) * tan(radians(lat2))) lambda_1 = radians(lon1) lambda_2 = radians(lon2) sin_sq_phi = sin((phi_2 - phi_1) / 2) sin_sq_lambda = sin((lambda_2 - lambda_1) / 2) sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda h_value = sqrt(sin_sq_phi + (cos(phi_1) * cos(phi_2) * sin_sq_lambda)) return 2 * RADIUS * asin(h_value) if __name__ == "__main__": import doctest doctest.testmod()
calculate the shortest distance along the surface of an ellipsoid between two points on the surface of earth given longitudes and latitudes https en wikipedia orgwikigeographicaldistancelambert sformulaforlonglines note this algorithm uses geodesyhaversinedistance py to compute central angle sigma representing the earth as an ellipsoid allows us to approximate distances between points on the surface much better than a sphere ellipsoidal formulas treat the earth as an oblate ellipsoid which means accounting for the flattening that happens at the north and south poles lambert s formulae provide accuracy on the order of 10 meteres over thousands of kilometeres other methods can provide millimeterlevel accuracy but this is a simpler method to calculate long range distances without increasing computational intensity args lat1 lon1 latitude and longitude of coordinate 1 lat2 lon2 latitude and longitude of coordinate 2 returns geographical distance between two points in metres from collections import namedtuple point2d namedtuplepoint2d lat lon sanfrancisco point2d37 774856 122 424227 yosemite point2d37 864742 119 537521 newyork point2d40 713019 74 012647 venice point2d45 443012 12 313071 flambertsellipsoidaldistancesanfrancisco yosemite 0 0f meters 254 351 meters flambertsellipsoidaldistancesanfrancisco newyork 0 0f meters 4 138 992 meters flambertsellipsoidaldistancesanfrancisco venice 0 0f meters 9 737 326 meters constants per wgs84 https en wikipedia orgwikiworldgeodeticsystem distance in metresm equation parameters https en wikipedia orgwikigeographicaldistancelambert sformulaforlonglines parametric latitudes https en wikipedia orgwikilatitudeparametricorreducedlatitude compute central angle between two points using haversine theta sigma haversinedistance equatorial radius intermediate p and q values intermediate x value x sigma sinsigma sin2pcos2q cos2sigma2 intermediate y value y sigma sinsigma cos2psin2q sin2sigma2 calculate the shortest distance along the surface of an ellipsoid between two points on the surface of earth given longitudes and latitudes https en wikipedia org wiki geographical_distance lambert s_formula_for_long_lines note this algorithm uses geodesy haversine_distance py to compute central angle sigma representing the earth as an ellipsoid allows us to approximate distances between points on the surface much better than a sphere ellipsoidal formulas treat the earth as an oblate ellipsoid which means accounting for the flattening that happens at the north and south poles lambert s formulae provide accuracy on the order of 10 meteres over thousands of kilometeres other methods can provide millimeter level accuracy but this is a simpler method to calculate long range distances without increasing computational intensity args lat1 lon1 latitude and longitude of coordinate 1 lat2 lon2 latitude and longitude of coordinate 2 returns geographical distance between two points in metres from collections import namedtuple point_2d namedtuple point_2d lat lon san_francisco point_2d 37 774856 122 424227 yosemite point_2d 37 864742 119 537521 new_york point_2d 40 713019 74 012647 venice point_2d 45 443012 12 313071 f lamberts_ellipsoidal_distance san_francisco yosemite 0 0f meters 254 351 meters f lamberts_ellipsoidal_distance san_francisco new_york 0 0f meters 4 138 992 meters f lamberts_ellipsoidal_distance san_francisco venice 0 0f meters 9 737 326 meters constants per wgs84 https en wikipedia org wiki world_geodetic_system distance in metres m equation parameters https en wikipedia org wiki geographical_distance lambert s_formula_for_long_lines parametric latitudes https en wikipedia org wiki latitude parametric_ or_reduced _latitude compute central angle between two points using haversine theta sigma haversine_distance equatorial radius intermediate p and q values intermediate x value x sigma sin sigma sin 2pcos 2q cos 2 sigma 2 intermediate y value y sigma sin sigma cos 2psin 2q sin 2 sigma 2
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance AXIS_A = 6378137.0 AXIS_B = 6356752.314245 EQUATORIAL_RADIUS = 6378137 def lamberts_ellipsoidal_distance( lat1: float, lon1: float, lat2: float, lon2: float ) -> float: flattening = (AXIS_A - AXIS_B) / AXIS_A b_lat1 = atan((1 - flattening) * tan(radians(lat1))) b_lat2 = atan((1 - flattening) * tan(radians(lat2))) sigma = haversine_distance(lat1, lon1, lat2, lon2) / EQUATORIAL_RADIUS p_value = (b_lat1 + b_lat2) / 2 q_value = (b_lat2 - b_lat1) / 2 x_numerator = (sin(p_value) ** 2) * (cos(q_value) ** 2) x_demonimator = cos(sigma / 2) ** 2 x_value = (sigma - sin(sigma)) * (x_numerator / x_demonimator) y_numerator = (cos(p_value) ** 2) * (sin(q_value) ** 2) y_denominator = sin(sigma / 2) ** 2 y_value = (sigma + sin(sigma)) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
building block classes an angle in degrees unit of measurement angle angledegrees90 angle45 5 angledegrees45 5 angle1 traceback most recent call last typeerror degrees must be a numeric value between 0 and 360 angle361 traceback most recent call last typeerror degrees must be a numeric value between 0 and 360 a side of a two dimensional shape such as polygon etc adjacentsides a list of sides which are adjacent to the current side angle the angle in degrees between each adjacent side length the length of the current side in meters side5 sidelength5 angleangledegrees90 nextsidenone side5 angle45 6 sidelength5 angleangledegrees45 6 nextsidenone side5 angle45 6 side1 angle2 doctest ellipsis sidelength5 angleangledegrees45 6 nextsidesidelength1 angleangled a geometric ellipse on a 2d surface ellipse5 10 ellipsemajorradius5 minorradius10 ellipse5 10 is ellipse5 10 false ellipse5 10 ellipse5 10 true ellipse5 10 area 157 07963267948966 ellipse5 10 perimeter 47 12388980384689 a geometric circle on a 2d surface circle5 circleradius5 circle5 is circle5 false circle5 circle5 true circle5 area 78 53981633974483 circle5 perimeter 31 41592653589793 circle5 diameter 10 return the maximum number of parts that circle can be divided into if cut numcuts times circle circle5 circle maxparts0 1 0 circle maxparts7 29 0 circle maxparts54 1486 0 circle maxparts22 5 265 375 circle maxparts222 traceback most recent call last typeerror numcuts must be a positive numeric value circle maxparts222 traceback most recent call last typeerror numcuts must be a positive numeric value an abstract class which represents polygon on a 2d surface polygon polygonsides polygon addsideside5 polygonsidessidelength5 angleangledegrees90 nextsidenone polygon getside0 traceback most recent call last indexerror list index out of range polygon addsideside5 getside1 sidelength5 angleangledegrees90 nextsidenone polygon setside0 side5 traceback most recent call last indexerror list assignment index out of range polygon addsideside5 setside0 side10 polygonsidessidelength10 angleangledegrees90 nextsidenone a geometric rectangle on a 2d surface rectangleone rectangle5 10 rectangleone perimeter 30 rectangleone area 50 rectangle5 10 doctest normalizewhitespace rectanglesidessidelength5 angleangledegrees90 nextsidenone sidelength10 angleangledegrees90 nextsidenone a structure which represents a geometrical square on a 2d surface squareone square5 squareone perimeter 20 squareone area 25 building block classes an angle in degrees unit of measurement angle angle degrees 90 angle 45 5 angle degrees 45 5 angle 1 traceback most recent call last typeerror degrees must be a numeric value between 0 and 360 angle 361 traceback most recent call last typeerror degrees must be a numeric value between 0 and 360 a side of a two dimensional shape such as polygon etc adjacent_sides a list of sides which are adjacent to the current side angle the angle in degrees between each adjacent side length the length of the current side in meters side 5 side length 5 angle angle degrees 90 next_side none side 5 angle 45 6 side length 5 angle angle degrees 45 6 next_side none side 5 angle 45 6 side 1 angle 2 doctest ellipsis side length 5 angle angle degrees 45 6 next_side side length 1 angle angle d a geometric ellipse on a 2d surface ellipse 5 10 ellipse major_radius 5 minor_radius 10 ellipse 5 10 is ellipse 5 10 false ellipse 5 10 ellipse 5 10 true ellipse 5 10 area 157 07963267948966 ellipse 5 10 perimeter 47 12388980384689 a geometric circle on a 2d surface circle 5 circle radius 5 circle 5 is circle 5 false circle 5 circle 5 true circle 5 area 78 53981633974483 circle 5 perimeter 31 41592653589793 circle 5 diameter 10 return the maximum number of parts that circle can be divided into if cut num_cuts times circle circle 5 circle max_parts 0 1 0 circle max_parts 7 29 0 circle max_parts 54 1486 0 circle max_parts 22 5 265 375 circle max_parts 222 traceback most recent call last typeerror num_cuts must be a positive numeric value circle max_parts 222 traceback most recent call last typeerror num_cuts must be a positive numeric value an abstract class which represents polygon on a 2d surface polygon polygon sides polygon add_side side 5 polygon sides side length 5 angle angle degrees 90 next_side none polygon get_side 0 traceback most recent call last indexerror list index out of range polygon add_side side 5 get_side 1 side length 5 angle angle degrees 90 next_side none polygon set_side 0 side 5 traceback most recent call last indexerror list assignment index out of range polygon add_side side 5 set_side 0 side 10 polygon sides side length 10 angle angle degrees 90 next_side none a geometric rectangle on a 2d surface rectangle_one rectangle 5 10 rectangle_one perimeter 30 rectangle_one area 50 rectangle 5 10 doctest normalize_whitespace rectangle sides side length 5 angle angle degrees 90 next_side none side length 10 angle angle degrees 90 next_side none a structure which represents a geometrical square on a 2d surface square_one square 5 square_one perimeter 20 square_one area 25
from __future__ import annotations import math from dataclasses import dataclass, field from types import NoneType from typing import Self @dataclass class Angle: degrees: float = 90 def __post_init__(self) -> None: if not isinstance(self.degrees, (int, float)) or not 0 <= self.degrees <= 360: raise TypeError("degrees must be a numeric value between 0 and 360.") @dataclass class Side: length: float angle: Angle = field(default_factory=Angle) next_side: Side | None = None def __post_init__(self) -> None: if not isinstance(self.length, (int, float)) or self.length <= 0: raise TypeError("length must be a positive numeric value.") if not isinstance(self.angle, Angle): raise TypeError("angle must be an Angle object.") if not isinstance(self.next_side, (Side, NoneType)): raise TypeError("next_side must be a Side or None.") @dataclass class Ellipse: major_radius: float minor_radius: float @property def area(self) -> float: return math.pi * self.major_radius * self.minor_radius @property def perimeter(self) -> float: return math.pi * (self.major_radius + self.minor_radius) class Circle(Ellipse): def __init__(self, radius: float) -> None: super().__init__(radius, radius) self.radius = radius def __repr__(self) -> str: return f"Circle(radius={self.radius})" @property def diameter(self) -> float: return self.radius * 2 def max_parts(self, num_cuts: float) -> float: if not isinstance(num_cuts, (int, float)) or num_cuts < 0: raise TypeError("num_cuts must be a positive numeric value.") return (num_cuts + 2 + num_cuts**2) * 0.5 @dataclass class Polygon: sides: list[Side] = field(default_factory=list) def add_side(self, side: Side) -> Self: self.sides.append(side) return self def get_side(self, index: int) -> Side: return self.sides[index] def set_side(self, index: int, side: Side) -> Self: self.sides[index] = side return self class Rectangle(Polygon): def __init__(self, short_side_length: float, long_side_length: float) -> None: super().__init__() self.short_side_length = short_side_length self.long_side_length = long_side_length self.post_init() def post_init(self) -> None: self.short_side = Side(self.short_side_length) self.long_side = Side(self.long_side_length) super().add_side(self.short_side) super().add_side(self.long_side) def perimeter(self) -> float: return (self.short_side.length + self.long_side.length) * 2 def area(self) -> float: return self.short_side.length * self.long_side.length @dataclass class Square(Rectangle): def __init__(self, side_length: float) -> None: super().__init__(side_length, side_length) def perimeter(self) -> float: return super().perimeter() def area(self) -> float: return super().area() if __name__ == "__main__": __import__("doctest").testmod()
https en wikipedia orgwikibc3a9ziercurve https www tutorialspoint comcomputergraphicscomputergraphicscurves htm bezier curve is a weighted sum of a set of control points generate bezier curves from a given set of control points this implementation works only for 2d coordinates in the xy plane listofpoints control points in the xy plane on which to interpolate these points control the behavior shape of the bezier curve degree determines the flexibility of the curve degree 1 will produce a straight line the basis function determines the weight of each control point at time t t time value between 0 and 1 inclusive at which to evaluate the basis of the curve returns the x y values of basis function at time t curve beziercurve1 1 1 2 curve basisfunction0 1 0 0 0 curve basisfunction1 0 0 1 0 basis function for each i the basis must sum up to 1 for it to produce a valid bezier curve the function to produce the values of the bezier curve at time t t the value of time t at which to evaluate the bezier function returns the x y coordinates of the bezier curve at time t the first point in the curve is when t 0 the last point in the curve is when t 1 curve beziercurve1 1 1 2 curve beziercurvefunction0 1 0 1 0 curve beziercurvefunction1 1 0 2 0 for all points sum up the product of ith basis function and ith point plots the bezier curve using matplotlib plotting capabilities stepsize defines the steps at which to evaluate the bezier curve the smaller the step size the finer the curve produced https en wikipedia org wiki b c3 a9zier_curve https www tutorialspoint com computer_graphics computer_graphics_curves htm type ignore bezier curve is a weighted sum of a set of control points generate bezier curves from a given set of control points this implementation works only for 2d coordinates in the xy plane list_of_points control points in the xy plane on which to interpolate these points control the behavior shape of the bezier curve degree determines the flexibility of the curve degree 1 will produce a straight line the basis function determines the weight of each control point at time t t time value between 0 and 1 inclusive at which to evaluate the basis of the curve returns the x y values of basis function at time t curve beziercurve 1 1 1 2 curve basis_function 0 1 0 0 0 curve basis_function 1 0 0 1 0 basis function for each i the basis must sum up to 1 for it to produce a valid bezier curve the function to produce the values of the bezier curve at time t t the value of time t at which to evaluate the bezier function returns the x y coordinates of the bezier curve at time t the first point in the curve is when t 0 the last point in the curve is when t 1 curve beziercurve 1 1 1 2 curve bezier_curve_function 0 1 0 1 0 curve bezier_curve_function 1 1 0 2 0 for all points sum up the product of i th basis function and i th point plots the bezier curve using matplotlib plotting capabilities step_size defines the step s at which to evaluate the bezier curve the smaller the step size the finer the curve produced type ignore x coordinates of points to plot y coordinates of points to plot degree 1 degree 2 degree 3
from __future__ import annotations from scipy.special import comb class BezierCurve: def __init__(self, list_of_points: list[tuple[float, float]]): self.list_of_points = list_of_points self.degree = len(list_of_points) - 1 def basis_function(self, t: float) -> list[float]: assert 0 <= t <= 1, "Time t must be between 0 and 1." output_values: list[float] = [] for i in range(len(self.list_of_points)): output_values.append( comb(self.degree, i) * ((1 - t) ** (self.degree - i)) * (t**i) ) assert round(sum(output_values), 5) == 1 return output_values def bezier_curve_function(self, t: float) -> tuple[float, float]: assert 0 <= t <= 1, "Time t must be between 0 and 1." basis_function = self.basis_function(t) x = 0.0 y = 0.0 for i in range(len(self.list_of_points)): x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def plot_curve(self, step_size: float = 0.01): from matplotlib import pyplot as plt to_plot_x: list[float] = [] to_plot_y: list[float] = [] t = 0.0 while t <= 1: value = self.bezier_curve_function(t) to_plot_x.append(value[0]) to_plot_y.append(value[1]) t += step_size x = [i[0] for i in self.list_of_points] y = [i[1] for i in self.list_of_points] plt.plot( to_plot_x, to_plot_y, color="blue", label="Curve of Degree " + str(self.degree), ) plt.scatter(x, y, color="red", label="Control Points") plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve()
render 3d points for 2d surfaces converts 3d point to a 2d drawable point convertto2d1 0 2 0 3 0 10 0 10 0 7 6923076923076925 15 384615384615385 convertto2d1 2 3 10 10 7 6923076923076925 15 384615384615385 convertto2d1 2 3 10 10 1 is str traceback most recent call last typeerror input values must either be float or int 1 2 3 10 10 rotate a point around a certain axis with a certain angle angle can be any integer between 1 360 and axis can be any one of x y z rotate1 0 2 0 3 0 y 90 0 3 130524675073759 2 0 0 4470070007889556 rotate1 2 3 z 180 0 999736015495891 2 0001319704760485 3 rotate 1 2 3 z 90 0 1 is str traceback most recent call last typeerror input values except axis must either be float or int 1 2 3 90 0 rotate1 2 3 n 90 n is not a valid axis traceback most recent call last valueerror not a valid axis choose one of x y z rotate1 2 3 x 90 1 2 5049096187183877 2 5933429780983657 rotate1 2 3 x 450 450 wrap around to 90 1 3 5776792428178217 0 44744970165427644 converts 3d point to a 2d drawable point convert_to_2d 1 0 2 0 3 0 10 0 10 0 7 6923076923076925 15 384615384615385 convert_to_2d 1 2 3 10 10 7 6923076923076925 15 384615384615385 convert_to_2d 1 2 3 10 10 1 is str traceback most recent call last typeerror input values must either be float or int 1 2 3 10 10 rotate a point around a certain axis with a certain angle angle can be any integer between 1 360 and axis can be any one of x y z rotate 1 0 2 0 3 0 y 90 0 3 130524675073759 2 0 0 4470070007889556 rotate 1 2 3 z 180 0 999736015495891 2 0001319704760485 3 rotate 1 2 3 z 90 0 1 is str traceback most recent call last typeerror input values except axis must either be float or int 1 2 3 90 0 rotate 1 2 3 n 90 n is not a valid axis traceback most recent call last valueerror not a valid axis choose one of x y z rotate 1 2 3 x 90 1 2 5049096187183877 2 5933429780983657 rotate 1 2 3 x 450 450 wrap around to 90 1 3 5776792428178217 0 44744970165427644
from __future__ import annotations import math __version__ = "2020.9.26" __author__ = "xcodz-dot, cclaus, dhruvmanila" def convert_to_2d( x: float, y: float, z: float, scale: float, distance: float ) -> tuple[float, float]: if not all(isinstance(val, (float, int)) for val in locals().values()): msg = f"Input values must either be float or int: {list(locals().values())}" raise TypeError(msg) projected_x = ((x * distance) / (z + distance)) * scale projected_y = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def rotate( x: float, y: float, z: float, axis: str, angle: float ) -> tuple[float, float, float]: if not isinstance(axis, str): raise TypeError("Axis must be a str") input_variables = locals() del input_variables["axis"] if not all(isinstance(val, (float, int)) for val in input_variables.values()): msg = ( "Input values except axis must either be float or int: " f"{list(input_variables.values())}" ) raise TypeError(msg) angle = (angle % 360) / 450 * 180 / math.pi if axis == "z": new_x = x * math.cos(angle) - y * math.sin(angle) new_y = y * math.cos(angle) + x * math.sin(angle) new_z = z elif axis == "x": new_y = y * math.cos(angle) - z * math.sin(angle) new_z = z * math.cos(angle) + y * math.sin(angle) new_x = x elif axis == "y": new_x = x * math.cos(angle) - z * math.sin(angle) new_z = z * math.cos(angle) + x * math.sin(angle) new_y = y else: raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'") return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(f"{convert_to_2d(1.0, 2.0, 3.0, 10.0, 10.0) = }") print(f"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }")
function to search the path search for a path on a grid avoiding obstacles grid 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 init 0 0 goal lengrid 1 lengrid0 1 cost 1 heuristic 0 lengrid0 for in rangelengrid heuristic 0 for row in rangelengrid0 for col in rangelengrid for i in rangelengrid for j in rangelengrid0 heuristicij absi goal0 absj goal1 if gridij 1 heuristicij 99 path action searchgrid init goal cost heuristic path doctest normalizewhitespace 0 0 1 0 2 0 3 0 4 0 4 1 4 2 4 3 3 3 2 3 2 4 2 5 3 5 4 5 action doctest normalizewhitespace 0 0 0 0 0 0 2 0 0 0 0 0 2 0 0 0 3 3 2 0 0 0 0 2 2 3 3 3 0 2 all coordinates are given in format y x the cost map which pushes the path closer to the goal added extra penalty in the heuristic map left down right up function to search the path search for a path on a grid avoiding obstacles grid 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 1 0 0 0 0 0 1 0 init 0 0 goal len grid 1 len grid 0 1 cost 1 heuristic 0 len grid 0 for _ in range len grid heuristic 0 for row in range len grid 0 for col in range len grid for i in range len grid for j in range len grid 0 heuristic i j abs i goal 0 abs j goal 1 if grid i j 1 heuristic i j 99 path action search grid init goal cost heuristic path doctest normalize_whitespace 0 0 1 0 2 0 3 0 4 0 4 1 4 2 4 3 3 3 2 3 2 4 2 5 3 5 4 5 action doctest normalize_whitespace 0 0 0 0 0 0 2 0 0 0 0 0 2 0 0 0 3 3 2 0 0 0 0 2 2 3 3 3 0 2 the reference grid the action grid cost from starting cell to destination cell flag that is set when search is complete flag set if we can t find expand to choose the least costliest action so as to move closer to the goal to try out different valid actions we get the reverse path from here 0 are free path whereas 1 s are obstacles all coordinates are given in format y x the cost map which pushes the path closer to the goal added extra penalty in the heuristic map
from __future__ import annotations DIRECTIONS = [ [-1, 0], [0, -1], [1, 0], [0, 1], ] def search( grid: list[list[int]], init: list[int], goal: list[int], cost: int, heuristic: list[list[int]], ) -> tuple[list[list[int]], list[list[int]]]: closed = [ [0 for col in range(len(grid[0]))] for row in range(len(grid)) ] closed[init[0]][init[1]] = 1 action = [ [0 for col in range(len(grid[0]))] for row in range(len(grid)) ] x = init[0] y = init[1] g = 0 f = g + heuristic[x][y] cell = [[f, g, x, y]] found = False resign = False while not found and not resign: if len(cell) == 0: raise ValueError("Algorithm is unable to find solution") else: cell.sort() cell.reverse() next_cell = cell.pop() x = next_cell[2] y = next_cell[3] g = next_cell[1] if x == goal[0] and y == goal[1]: found = True else: for i in range(len(DIRECTIONS)): x2 = x + DIRECTIONS[i][0] y2 = y + DIRECTIONS[i][1] if x2 >= 0 and x2 < len(grid) and y2 >= 0 and y2 < len(grid[0]): if closed[x2][y2] == 0 and grid[x2][y2] == 0: g2 = g + cost f2 = g2 + heuristic[x2][y2] cell.append([f2, g2, x2, y2]) closed[x2][y2] = 1 action[x2][y2] = i invpath = [] x = goal[0] y = goal[1] invpath.append([x, y]) while x != init[0] or y != init[1]: x2 = x - DIRECTIONS[action[x][y]][0] y2 = y - DIRECTIONS[action[x][y]][1] x = x2 y = y2 invpath.append([x, y]) path = [] for i in range(len(invpath)): path.append(invpath[len(invpath) - 1 - i]) return path, action if __name__ == "__main__": grid = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] init = [0, 0] goal = [len(grid) - 1, len(grid[0]) - 1] cost = 1 heuristic = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): heuristic[i][j] = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: heuristic[i][j] = 99 path, action = search(grid, init, goal, cost, heuristic) print("ACTION MAP") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
use an ant colony optimization algorithm to solve the travelling salesman problem tsp which asks the following question given a list of cities and the distances between each pair of cities what is the shortest possible route that visits each city exactly once and returns to the origin city https en wikipedia orgwikiantcolonyoptimizationalgorithms https en wikipedia orgwikitravellingsalesmanproblem clark ant colony algorithm main function maincitiescities antsnum10 iterationsnum20 pheromoneevaporation0 7 alpha1 0 beta5 0 q10 0 1 2 3 4 5 6 7 0 37 909778143828696 maincities0 0 0 1 2 2 antsnum5 iterationsnum5 pheromoneevaporation0 7 alpha1 0 beta5 0 q10 0 1 0 5 656854249492381 maincities0 0 0 1 2 2 4 4 4 antsnum5 iterationsnum5 pheromoneevaporation0 7 alpha1 0 beta5 0 q10 traceback most recent call last indexerror list index out of range maincities antsnum5 iterationsnum5 pheromoneevaporation0 7 alpha1 0 beta5 0 q10 traceback most recent call last stopiteration maincities0 0 0 1 2 2 antsnum0 iterationsnum5 pheromoneevaporation0 7 alpha1 0 beta5 0 q10 inf maincities0 0 0 1 2 2 antsnum5 iterationsnum0 pheromoneevaporation0 7 alpha1 0 beta5 0 q10 inf maincities0 0 0 1 2 2 antsnum5 iterationsnum5 pheromoneevaporation1 alpha1 0 beta5 0 q10 0 1 0 5 656854249492381 maincities0 0 0 1 2 2 antsnum5 iterationsnum5 pheromoneevaporation0 alpha1 0 beta5 0 q10 0 1 0 5 656854249492381 initialize the pheromone matrix calculate the distance between two coordinate points distance0 0 3 4 5 0 distance0 0 3 4 5 0 distance0 0 3 4 5 0 update pheromones on the route and update the best route pheromoneupdatepheromone1 0 1 0 1 0 1 0 cities0 0 0 1 2 2 pheromoneevaporation0 7 antsroute0 1 0 q10 bestpath bestdistancefloatinf 0 7 4 235533905932737 4 235533905932737 0 7 0 1 0 5 656854249492381 pheromoneupdatepheromone cities0 0 0 1 2 2 pheromoneevaporation0 7 antsroute0 1 0 q10 bestpath bestdistancefloatinf traceback most recent call last indexerror list index out of range pheromoneupdatepheromone1 0 1 0 1 0 1 0 cities pheromoneevaporation0 7 antsroute0 1 0 q10 bestpath bestdistancefloatinf traceback most recent call last keyerror 0 choose the next city for ants cityselectpheromone1 0 1 0 1 0 1 0 currentcity0 0 0 unvisitedcities1 2 2 alpha1 0 beta5 0 1 2 2 cityselectpheromone currentcity0 0 0 unvisitedcities1 2 2 alpha1 0 beta5 0 traceback most recent call last indexerror list index out of range cityselectpheromone1 0 1 0 1 0 1 0 currentcity unvisitedcities1 2 2 alpha1 0 beta5 0 traceback most recent call last stopiteration cityselectpheromone1 0 1 0 1 0 1 0 currentcity0 0 0 unvisitedcities alpha1 0 beta5 0 traceback most recent call last indexerror list index out of range pheromone system parameters q which is a constant ant colony algorithm main function main cities cities ants_num 10 iterations_num 20 pheromone_evaporation 0 7 alpha 1 0 beta 5 0 q 10 0 1 2 3 4 5 6 7 0 37 909778143828696 main cities 0 0 0 1 2 2 ants_num 5 iterations_num 5 pheromone_evaporation 0 7 alpha 1 0 beta 5 0 q 10 0 1 0 5 656854249492381 main cities 0 0 0 1 2 2 4 4 4 ants_num 5 iterations_num 5 pheromone_evaporation 0 7 alpha 1 0 beta 5 0 q 10 traceback most recent call last indexerror list index out of range main cities ants_num 5 iterations_num 5 pheromone_evaporation 0 7 alpha 1 0 beta 5 0 q 10 traceback most recent call last stopiteration main cities 0 0 0 1 2 2 ants_num 0 iterations_num 5 pheromone_evaporation 0 7 alpha 1 0 beta 5 0 q 10 inf main cities 0 0 0 1 2 2 ants_num 5 iterations_num 0 pheromone_evaporation 0 7 alpha 1 0 beta 5 0 q 10 inf main cities 0 0 0 1 2 2 ants_num 5 iterations_num 5 pheromone_evaporation 1 alpha 1 0 beta 5 0 q 10 0 1 0 5 656854249492381 main cities 0 0 0 1 2 2 ants_num 5 iterations_num 5 pheromone_evaporation 0 alpha 1 0 beta 5 0 q 10 0 1 0 5 656854249492381 initialize the pheromone matrix calculate the distance between two coordinate points distance 0 0 3 4 5 0 distance 0 0 3 4 5 0 distance 0 0 3 4 5 0 pheromone system parameters q which is a constant update pheromones on the route and update the best route pheromone_update pheromone 1 0 1 0 1 0 1 0 cities 0 0 0 1 2 2 pheromone_evaporation 0 7 ants_route 0 1 0 q 10 best_path best_distance float inf 0 7 4 235533905932737 4 235533905932737 0 7 0 1 0 5 656854249492381 pheromone_update pheromone cities 0 0 0 1 2 2 pheromone_evaporation 0 7 ants_route 0 1 0 q 10 best_path best_distance float inf traceback most recent call last indexerror list index out of range pheromone_update pheromone 1 0 1 0 1 0 1 0 cities pheromone_evaporation 0 7 ants_route 0 1 0 q 10 best_path best_distance float inf traceback most recent call last keyerror 0 update the volatilization of pheromone on all routes calculate total distance update pheromones choose the next city for ants city_select pheromone 1 0 1 0 1 0 1 0 current_city 0 0 0 unvisited_cities 1 2 2 alpha 1 0 beta 5 0 1 2 2 city_select pheromone current_city 0 0 0 unvisited_cities 1 2 2 alpha 1 0 beta 5 0 traceback most recent call last indexerror list index out of range city_select pheromone 1 0 1 0 1 0 1 0 current_city unvisited_cities 1 2 2 alpha 1 0 beta 5 0 traceback most recent call last stopiteration city_select pheromone 1 0 1 0 1 0 1 0 current_city 0 0 0 unvisited_cities alpha 1 0 beta 5 0 traceback most recent call last indexerror list index out of range
import copy import random cities = { 0: [0, 0], 1: [0, 5], 2: [3, 8], 3: [8, 10], 4: [12, 8], 5: [12, 4], 6: [8, 0], 7: [6, 2], } def main( cities: dict[int, list[int]], ants_num: int, iterations_num: int, pheromone_evaporation: float, alpha: float, beta: float, q: float, ) -> tuple[list[int], float]: cities_num = len(cities) pheromone = [[1.0] * cities_num] * cities_num best_path: list[int] = [] best_distance = float("inf") for _ in range(iterations_num): ants_route = [] for _ in range(ants_num): unvisited_cities = copy.deepcopy(cities) current_city = {next(iter(cities.keys())): next(iter(cities.values()))} del unvisited_cities[next(iter(current_city.keys()))] ant_route = [next(iter(current_city.keys()))] while unvisited_cities: current_city, unvisited_cities = city_select( pheromone, current_city, unvisited_cities, alpha, beta ) ant_route.append(next(iter(current_city.keys()))) ant_route.append(0) ants_route.append(ant_route) pheromone, best_path, best_distance = pheromone_update( pheromone, cities, pheromone_evaporation, ants_route, q, best_path, best_distance, ) return best_path, best_distance def distance(city1: list[int], city2: list[int]) -> float: return (((city1[0] - city2[0]) ** 2) + ((city1[1] - city2[1]) ** 2)) ** 0.5 def pheromone_update( pheromone: list[list[float]], cities: dict[int, list[int]], pheromone_evaporation: float, ants_route: list[list[int]], q: float, best_path: list[int], best_distance: float, ) -> tuple[list[list[float]], list[int], float]: for a in range(len(cities)): for b in range(len(cities)): pheromone[a][b] *= pheromone_evaporation for ant_route in ants_route: total_distance = 0.0 for i in range(len(ant_route) - 1): total_distance += distance(cities[ant_route[i]], cities[ant_route[i + 1]]) delta_pheromone = q / total_distance for i in range(len(ant_route) - 1): pheromone[ant_route[i]][ant_route[i + 1]] += delta_pheromone pheromone[ant_route[i + 1]][ant_route[i]] = pheromone[ant_route[i]][ ant_route[i + 1] ] if total_distance < best_distance: best_path = ant_route best_distance = total_distance return pheromone, best_path, best_distance def city_select( pheromone: list[list[float]], current_city: dict[int, list[int]], unvisited_cities: dict[int, list[int]], alpha: float, beta: float, ) -> tuple[dict[int, list[int]], dict[int, list[int]]]: probabilities = [] for city in unvisited_cities: city_distance = distance( unvisited_cities[city], next(iter(current_city.values())) ) probability = (pheromone[city][next(iter(current_city.keys()))] ** alpha) * ( (1 / city_distance) ** beta ) probabilities.append(probability) chosen_city_i = random.choices( list(unvisited_cities.keys()), weights=probabilities )[0] chosen_city = {chosen_city_i: unvisited_cities[chosen_city_i]} del unvisited_cities[next(iter(chosen_city.keys()))] return chosen_city, unvisited_cities if __name__ == "__main__": best_path, best_distance = main( cities=cities, ants_num=10, iterations_num=20, pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10, ) print(f"{best_path = }") print(f"{best_distance = }")
finding articulation points in undirected graph ap found via bridge ap found via cycle adjacency list of graph finding articulation points in undirected graph noqa e741 ap found via bridge ap found via cycle adjacency list of graph
def compute_ap(l): n = len(l) out_edge_count = 0 low = [0] * n visited = [False] * n is_art = [False] * n def dfs(root, at, parent, out_edge_count): if parent == root: out_edge_count += 1 visited[at] = True low[at] = at for to in l[at]: if to == parent: pass elif not visited[to]: out_edge_count = dfs(root, to, at, out_edge_count) low[at] = min(low[at], low[to]) if at < low[to]: is_art[at] = True if at == low[to]: is_art[at] = True else: low[at] = min(low[at], to) return out_edge_count for i in range(n): if not visited[i]: out_edge_count = 0 out_edge_count = dfs(i, i, -1, out_edge_count) is_art[i] = out_edge_count > 1 for x in range(len(is_art)): if is_art[x] is True: print(x) data = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
depth first search args g dictionary of edges s starting node vars vis set of visited nodes s traversal stack breadth first search args g dictionary of edges s starting node vars vis set of visited nodes q traversal stack dijkstra s shortest path algorithm args g dictionary of edges s starting node vars dist dictionary storing shortest distance from s to every other node known set of knows nodes path preceding node in path topological sort reading an adjacency matrix n intinput strip a for in rangen a appendtuplemapint input strip split return a n def floyaandn a n aandn dist lista path 0 n for i in rangen for k in rangen for i in rangen for j in rangen if distij distik distkj distij distik distkj pathik k printdist def primg s dist known path s 0 set s 0 while true if lenknown leng 1 break mini 100000 for i in dist if i not in known and disti mini mini disti u i known addu for v in gu if v0 not in known and v1 dist getv0 100000 distv0 v1 pathv0 u return dist def edglist r get the edges and number of edges from the user parameters none returns tuple a tuple containing a list of edges and number of edges example simulate user input for 3 edges and 4 vertices 1 2 2 3 3 4 inputdata 4 3n1 2n2 3n3 4n import sys io originalinput sys stdin sys stdin io stringioinputdata redirect stdin for testing edglist 1 2 2 3 3 4 4 sys stdin originalinput restore original stdin kruskal s mst algorithm args e edge list n number of nodes vars s set of all nodes as unique disjoint sets initially sort edges on the basis of distance find the isolated node in the graph parameters graph dict a dictionary representing a graph returns list a list of isolated nodes examples graph1 1 2 3 2 1 3 3 1 2 4 findisolatednodesgraph1 4 graph2 a b c b a c a d findisolatednodesgraph2 d graph3 x y z findisolatednodesgraph3 x y z graph4 1 2 3 2 1 3 3 1 2 findisolatednodesgraph4 graph5 findisolatednodesgraph5 depth first search args g dictionary of edges s starting node vars vis set of visited nodes s traversal stack breadth first search args g dictionary of edges s starting node vars vis set of visited nodes q traversal stack dijkstra s shortest path algorithm args g dictionary of edges s starting node vars dist dictionary storing shortest distance from s to every other node known set of knows nodes path preceding node in path topological sort since oth index is ignored reading an adjacency matrix reading an adjacency matrix parameters none returns tuple a tuple containing a list of edges and number of edges example simulate user input for 3 nodes input_data 4 n0 1 0 1 n1 0 1 0 n0 1 0 1 n1 0 1 0 n import sys io original_input sys stdin sys stdin io stringio input_data redirect stdin for testing adjm 0 1 0 1 1 0 1 0 0 1 0 1 1 0 1 0 4 sys stdin original_input restore original stdin floyd warshall s algorithm args g dictionary of edges s starting node vars dist dictionary storing shortest distance from s to every other node known set of knows nodes path preceding node in path prim s mst algorithm args g dictionary of edges s starting node vars dist dictionary storing shortest distance from s to nearest node known set of knows nodes path preceding node in path accepting edge list vars n number of nodes m number of edges returns l edge list n number of nodes get the edges and number of edges from the user parameters none returns tuple a tuple containing a list of edges and number of edges example simulate user input for 3 edges and 4 vertices 1 2 2 3 3 4 input_data 4 3 n1 2 n2 3 n3 4 n import sys io original_input sys stdin sys stdin io stringio input_data redirect stdin for testing edglist 1 2 2 3 3 4 4 sys stdin original_input restore original stdin kruskal s mst algorithm args e edge list n number of nodes vars s set of all nodes as unique disjoint sets initially sort edges on the basis of distance find the isolated node in the graph parameters graph dict a dictionary representing a graph returns list a list of isolated nodes examples graph1 1 2 3 2 1 3 3 1 2 4 find_isolated_nodes graph1 4 graph2 a b c b a c a d find_isolated_nodes graph2 d graph3 x y z find_isolated_nodes graph3 x y z graph4 1 2 3 2 1 3 3 1 2 find_isolated_nodes graph4 graph5 find_isolated_nodes graph5
from collections import deque def _input(message): return input(message).strip().split(" ") def initialize_unweighted_directed_graph( node_count: int, edge_count: int ) -> dict[int, list[int]]: graph: dict[int, list[int]] = {} for i in range(node_count): graph[i + 1] = [] for e in range(edge_count): x, y = (int(i) for i in _input(f"Edge {e + 1}: <node1> <node2> ")) graph[x].append(y) return graph def initialize_unweighted_undirected_graph( node_count: int, edge_count: int ) -> dict[int, list[int]]: graph: dict[int, list[int]] = {} for i in range(node_count): graph[i + 1] = [] for e in range(edge_count): x, y = (int(i) for i in _input(f"Edge {e + 1}: <node1> <node2> ")) graph[x].append(y) graph[y].append(x) return graph def initialize_weighted_undirected_graph( node_count: int, edge_count: int ) -> dict[int, list[tuple[int, int]]]: graph: dict[int, list[tuple[int, int]]] = {} for i in range(node_count): graph[i + 1] = [] for e in range(edge_count): x, y, w = (int(i) for i in _input(f"Edge {e + 1}: <node1> <node2> <weight> ")) graph[x].append((y, w)) graph[y].append((x, w)) return graph if __name__ == "__main__": n, m = (int(i) for i in _input("Number of nodes and edges: ")) graph_choice = int( _input( "Press 1 or 2 or 3 \n" "1. Unweighted directed \n" "2. Unweighted undirected \n" "3. Weighted undirected \n" )[0] ) g = { 1: initialize_unweighted_directed_graph, 2: initialize_unweighted_undirected_graph, 3: initialize_weighted_undirected_graph, }[graph_choice](n, m) def dfs(g, s): vis, _s = {s}, [s] print(s) while _s: flag = 0 for i in g[_s[-1]]: if i not in vis: _s.append(i) vis.add(i) flag = 1 print(i) break if not flag: _s.pop() def bfs(g, s): vis, q = {s}, deque([s]) print(s) while q: u = q.popleft() for v in g[u]: if v not in vis: vis.add(v) q.append(v) print(v) def dijk(g, s): dist, known, path = {s: 0}, set(), {s: 0} while True: if len(known) == len(g) - 1: break mini = 100000 for i in dist: if i not in known and dist[i] < mini: mini = dist[i] u = i known.add(u) for v in g[u]: if v[0] not in known and dist[u] + v[1] < dist.get(v[0], 100000): dist[v[0]] = dist[u] + v[1] path[v[0]] = u for i in dist: if i != s: print(dist[i]) def topo(g, ind=None, q=None): if q is None: q = [1] if ind is None: ind = [0] * (len(g) + 1) for u in g: for v in g[u]: ind[v] += 1 q = deque() for i in g: if ind[i] == 0: q.append(i) if len(q) == 0: return v = q.popleft() print(v) for w in g[v]: ind[w] -= 1 if ind[w] == 0: q.append(w) topo(g, ind, q) def adjm(): r n = int(input().strip()) a = [] for _ in range(n): a.append(tuple(map(int, input().strip().split()))) return a, n def floy(a_and_n): (a, n) = a_and_n dist = list(a) path = [[0] * n for i in range(n)] for k in range(n): for i in range(n): for j in range(n): if dist[i][j] > dist[i][k] + dist[k][j]: dist[i][j] = dist[i][k] + dist[k][j] path[i][k] = k print(dist) def prim(g, s): dist, known, path = {s: 0}, set(), {s: 0} while True: if len(known) == len(g) - 1: break mini = 100000 for i in dist: if i not in known and dist[i] < mini: mini = dist[i] u = i known.add(u) for v in g[u]: if v[0] not in known and v[1] < dist.get(v[0], 100000): dist[v[0]] = v[1] path[v[0]] = u return dist def edglist(): r n, m = tuple(map(int, input().split(" "))) edges = [] for _ in range(m): edges.append(tuple(map(int, input().split(" ")))) return edges, n def krusk(e_and_n): (e, n) = e_and_n e.sort(reverse=True, key=lambda x: x[2]) s = [{i} for i in range(1, n + 1)] while True: if len(s) == 1: break print(s) x = e.pop() for i in range(len(s)): if x[0] in s[i]: break for j in range(len(s)): if x[1] in s[j]: if i == j: break s[j].update(s[i]) s.pop(i) break def find_isolated_nodes(graph): isolated = [] for node in graph: if not graph[node]: isolated.append(node) return isolated
returns shortest paths from a vertex src to all other vertices edges 2 1 10 3 2 3 0 3 5 0 1 4 g src s dst d weight w for s d w in edges bellmanfordg 4 4 0 0 0 2 0 8 0 5 0 g src s dst d weight w for s d w in edges 1 3 5 bellmanfordg 4 5 0 traceback most recent call last exception negative cycle found returns shortest paths from a vertex src to all other vertices edges 2 1 10 3 2 3 0 3 5 0 1 4 g src s dst d weight w for s d w in edges bellman_ford g 4 4 0 0 0 2 0 8 0 5 0 g src s dst d weight w for s d w in edges 1 3 5 bellman_ford g 4 5 0 traceback most recent call last exception negative cycle found
from __future__ import annotations def print_distance(distance: list[float], src): print(f"Vertex\tShortest Distance from vertex {src}") for i, d in enumerate(distance): print(f"{i}\t\t{d}") def check_negative_cycle( graph: list[dict[str, int]], distance: list[float], edge_count: int ): for j in range(edge_count): u, v, w = (graph[j][k] for k in ["src", "dst", "weight"]) if distance[u] != float("inf") and distance[u] + w < distance[v]: return True return False def bellman_ford( graph: list[dict[str, int]], vertex_count: int, edge_count: int, src: int ) -> list[float]: distance = [float("inf")] * vertex_count distance[src] = 0.0 for _ in range(vertex_count - 1): for j in range(edge_count): u, v, w = (graph[j][k] for k in ["src", "dst", "weight"]) if distance[u] != float("inf") and distance[u] + w < distance[v]: distance[v] = distance[u] + w negative_cycle_exists = check_negative_cycle(graph, distance, edge_count) if negative_cycle_exists: raise Exception("Negative cycle found") return distance if __name__ == "__main__": import doctest doctest.testmod() V = int(input("Enter number of vertices: ").strip()) E = int(input("Enter number of edges: ").strip()) graph: list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("Edge ", i + 1) src, dest, weight = ( int(x) for x in input("Enter source, destination, weight: ").strip().split(" ") ) graph[i] = {"src": src, "dst": dest, "weight": weight} source = int(input("\nEnter shortest path source:").strip()) shortest_distance = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
bidirectional dijkstra s algorithm a bidirectional approach is an efficient and less time consuming optimization for dijkstra s searching algorithm reference shorturl atexhm7 swayam singh https github compractice404 bidirectional dijkstra s algorithm returns shortestpathdistance int length of the shortest path warnings if the destination is not reachable function returns 1 bidirectionaldije f graphfwd graphbwd 3 swayam singh https github com practice404 bi directional dijkstra s algorithm returns shortest_path_distance int length of the shortest path warnings if the destination is not reachable function returns 1 bidirectional_dij e f graph_fwd graph_bwd 3
from queue import PriorityQueue from typing import Any import numpy as np def pass_and_relaxation( graph: dict, v: str, visited_forward: set, visited_backward: set, cst_fwd: dict, cst_bwd: dict, queue: PriorityQueue, parent: dict, shortest_distance: float, ) -> float: for nxt, d in graph[v]: if nxt in visited_forward: continue old_cost_f = cst_fwd.get(nxt, np.inf) new_cost_f = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt)) cst_fwd[nxt] = new_cost_f parent[nxt] = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: shortest_distance = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def bidirectional_dij( source: str, destination: str, graph_forward: dict, graph_backward: dict ) -> int: shortest_path_distance = -1 visited_forward = set() visited_backward = set() cst_fwd = {source: 0} cst_bwd = {destination: 0} parent_forward = {source: None} parent_backward = {destination: None} queue_forward: PriorityQueue[Any] = PriorityQueue() queue_backward: PriorityQueue[Any] = PriorityQueue() shortest_distance = np.inf queue_forward.put((0, source)) queue_backward.put((0, destination)) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _, v_fwd = queue_forward.get() visited_forward.add(v_fwd) _, v_bwd = queue_backward.get() visited_backward.add(v_bwd) shortest_distance = pass_and_relaxation( graph_forward, v_fwd, visited_forward, visited_backward, cst_fwd, cst_bwd, queue_forward, parent_forward, shortest_distance, ) shortest_distance = pass_and_relaxation( graph_backward, v_bwd, visited_backward, visited_forward, cst_bwd, cst_fwd, queue_backward, parent_backward, shortest_distance, ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: shortest_path_distance = shortest_distance return shortest_path_distance graph_fwd = { "B": [["C", 1]], "C": [["D", 1]], "D": [["F", 1]], "E": [["B", 1], ["G", 2]], "F": [], "G": [["F", 1]], } graph_bwd = { "B": [["E", 1]], "C": [["B", 1]], "D": [["C", 1]], "F": [["D", 1], ["G", 1]], "E": [[None, np.inf]], "G": [["E", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
https en wikipedia orgwikibidirectionalsearch 1 for manhattan 0 for euclidean k node0 0 4 3 0 none k calculateheuristic 5 0 n node1 4 3 4 2 none n calculateheuristic 2 0 l k n n l0 false l sort n l0 true heuristic for the a astar astar0 0 lengrid 1 lengrid0 1 astar start posy delta30 astar start posx delta31 0 1 x pos for x in astar getsuccessorsastar start 1 0 0 1 astar start posy delta20 astar start posx delta21 1 0 astar retracepathastar start 0 0 astar search doctest normalizewhitespace 0 0 1 0 2 0 2 1 2 2 2 3 3 3 4 3 4 4 5 4 5 5 6 5 6 6 open nodes are sorted using lt retrieve the best current path returns a list of successors both in the grid and free spaces retrace the path from parents to parents until start node bdastar bidirectionalastar0 0 lengrid 1 lengrid0 1 bdastar fwdastar start pos bdastar bwdastar target pos true bdastar retracebidirectionalpathbdastar fwdastar start bdastar bwdastar start 0 0 bdastar search doctest normalizewhitespace 0 0 0 1 0 2 1 2 1 3 2 3 2 4 2 5 3 5 4 5 5 5 5 6 6 6 retrieve the best current path all coordinates are given in format y x 1 for manhattan 0 for euclidean 0 are free path whereas 1 s are obstacles up left down right k node 0 0 4 3 0 none k calculate_heuristic 5 0 n node 1 4 3 4 2 none n calculate_heuristic 2 0 l k n n l 0 false l sort n l 0 true heuristic for the a astar astar 0 0 len grid 1 len grid 0 1 astar start pos_y delta 3 0 astar start pos_x delta 3 1 0 1 x pos for x in astar get_successors astar start 1 0 0 1 astar start pos_y delta 2 0 astar start pos_x delta 2 1 1 0 astar retrace_path astar start 0 0 astar search doctest normalize_whitespace 0 0 1 0 2 0 2 1 2 2 2 3 3 3 4 3 4 4 5 4 5 5 6 5 6 6 open nodes are sorted using __lt__ retrieve the best current path returns a list of successors both in the grid and free spaces retrace the path from parents to parents until start node bd_astar bidirectionalastar 0 0 len grid 1 len grid 0 1 bd_astar fwd_astar start pos bd_astar bwd_astar target pos true bd_astar retrace_bidirectional_path bd_astar fwd_astar start bd_astar bwd_astar start 0 0 bd_astar search doctest normalize_whitespace 0 0 0 1 0 2 1 2 1 3 2 3 2 4 2 5 3 5 4 5 5 5 5 6 6 6 retrieve the best current path all coordinates are given in format y x
from __future__ import annotations import time from math import sqrt HEURISTIC = 0 grid = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] delta = [[-1, 0], [0, -1], [1, 0], [0, 1]] TPosition = tuple[int, int] class Node: def __init__( self, pos_x: int, pos_y: int, goal_x: int, goal_y: int, g_cost: int, parent: Node | None, ) -> None: self.pos_x = pos_x self.pos_y = pos_y self.pos = (pos_y, pos_x) self.goal_x = goal_x self.goal_y = goal_y self.g_cost = g_cost self.parent = parent self.h_cost = self.calculate_heuristic() self.f_cost = self.g_cost + self.h_cost def calculate_heuristic(self) -> float: dy = self.pos_x - self.goal_x dx = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(dx) + abs(dy) else: return sqrt(dy**2 + dx**2) def __lt__(self, other: Node) -> bool: return self.f_cost < other.f_cost class AStar: def __init__(self, start: TPosition, goal: TPosition): self.start = Node(start[1], start[0], goal[1], goal[0], 0, None) self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None) self.open_nodes = [self.start] self.closed_nodes: list[Node] = [] self.reached = False def search(self) -> list[TPosition]: while self.open_nodes: self.open_nodes.sort() current_node = self.open_nodes.pop(0) if current_node.pos == self.target.pos: return self.retrace_path(current_node) self.closed_nodes.append(current_node) successors = self.get_successors(current_node) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(child_node) else: better_node = self.open_nodes.pop(self.open_nodes.index(child_node)) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(child_node) else: self.open_nodes.append(better_node) return [self.start.pos] def get_successors(self, parent: Node) -> list[Node]: successors = [] for action in delta: pos_x = parent.pos_x + action[1] pos_y = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( pos_x, pos_y, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, parent, ) ) return successors def retrace_path(self, node: Node | None) -> list[TPosition]: current_node = node path = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) current_node = current_node.parent path.reverse() return path class BidirectionalAStar: def __init__(self, start: TPosition, goal: TPosition) -> None: self.fwd_astar = AStar(start, goal) self.bwd_astar = AStar(goal, start) self.reached = False def search(self) -> list[TPosition]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() current_fwd_node = self.fwd_astar.open_nodes.pop(0) current_bwd_node = self.bwd_astar.open_nodes.pop(0) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( current_fwd_node, current_bwd_node ) self.fwd_astar.closed_nodes.append(current_fwd_node) self.bwd_astar.closed_nodes.append(current_bwd_node) self.fwd_astar.target = current_bwd_node self.bwd_astar.target = current_fwd_node successors = { self.fwd_astar: self.fwd_astar.get_successors(current_fwd_node), self.bwd_astar: self.bwd_astar.get_successors(current_bwd_node), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(child_node) else: better_node = astar.open_nodes.pop( astar.open_nodes.index(child_node) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(child_node) else: astar.open_nodes.append(better_node) return [self.fwd_astar.start.pos] def retrace_bidirectional_path( self, fwd_node: Node, bwd_node: Node ) -> list[TPosition]: fwd_path = self.fwd_astar.retrace_path(fwd_node) bwd_path = self.bwd_astar.retrace_path(bwd_node) bwd_path.pop() bwd_path.reverse() path = fwd_path + bwd_path return path if __name__ == "__main__": init = (0, 0) goal = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) start_time = time.time() a_star = AStar(init, goal) path = a_star.search() end_time = time.time() - start_time print(f"AStar execution time = {end_time:f} seconds") bd_start_time = time.time() bidir_astar = BidirectionalAStar(init, goal) bd_end_time = time.time() - bd_start_time print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
https en wikipedia orgwikibidirectionalsearch comment out slow pytests 9 15s call graphsbidirectionalbreadthfirstsearch py graphs bidirectionalbreadthfirstsearch breadthfirstsearch bfs breadthfirstsearch0 0 lengrid 1 lengrid0 1 bfs start posy delta30 bfs start posx delta31 0 1 x pos for x in bfs getsuccessorsbfs start 1 0 0 1 bfs start posy delta20 bfs start posx delta21 1 0 bfs retracepathbfs start 0 0 bfs search doctest normalizewhitespace 0 0 1 0 2 0 3 0 3 1 4 1 5 1 5 2 5 3 5 4 5 5 6 5 6 6 returns a list of successors both in the grid and free spaces retrace the path from parents to parents until start node bdbfs bidirectionalbreadthfirstsearch0 0 lengrid 1 lengrid0 1 bdbfs fwdbfs start pos bdbfs bwdbfs target pos true bdbfs retracebidirectionalpathbdbfs fwdbfs start bdbfs bwdbfs start 0 0 bdbfs search doctest normalizewhitespace 0 0 0 1 0 2 1 2 2 2 2 3 2 4 3 4 3 5 3 6 4 6 5 6 6 6 all coordinates are given in format y x 0 are free path whereas 1 s are obstacles up left down right comment out slow pytests 9 15s call graphs bidirectional_breadth_first_search py graphs bidirectional_breadth_first_search breadthfirstsearch bfs breadthfirstsearch 0 0 len grid 1 len grid 0 1 bfs start pos_y delta 3 0 bfs start pos_x delta 3 1 0 1 x pos for x in bfs get_successors bfs start 1 0 0 1 bfs start pos_y delta 2 0 bfs start pos_x delta 2 1 1 0 bfs retrace_path bfs start 0 0 bfs search doctest normalize_whitespace 0 0 1 0 2 0 3 0 3 1 4 1 5 1 5 2 5 3 5 4 5 5 6 5 6 6 returns a list of successors both in the grid and free spaces retrace the path from parents to parents until start node bd_bfs bidirectionalbreadthfirstsearch 0 0 len grid 1 len grid 0 1 bd_bfs fwd_bfs start pos bd_bfs bwd_bfs target pos true bd_bfs retrace_bidirectional_path bd_bfs fwd_bfs start bd_bfs bwd_bfs start 0 0 bd_bfs search doctest normalize_whitespace 0 0 0 1 0 2 1 2 2 2 2 3 2 4 3 4 3 5 3 6 4 6 5 6 6 6 all coordinates are given in format y x
from __future__ import annotations import time Path = list[tuple[int, int]] grid = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] delta = [[-1, 0], [0, -1], [1, 0], [0, 1]] class Node: def __init__( self, pos_x: int, pos_y: int, goal_x: int, goal_y: int, parent: Node | None ): self.pos_x = pos_x self.pos_y = pos_y self.pos = (pos_y, pos_x) self.goal_x = goal_x self.goal_y = goal_y self.parent = parent class BreadthFirstSearch: def __init__(self, start: tuple[int, int], goal: tuple[int, int]): self.start = Node(start[1], start[0], goal[1], goal[0], None) self.target = Node(goal[1], goal[0], goal[1], goal[0], None) self.node_queue = [self.start] self.reached = False def search(self) -> Path | None: while self.node_queue: current_node = self.node_queue.pop(0) if current_node.pos == self.target.pos: self.reached = True return self.retrace_path(current_node) successors = self.get_successors(current_node) for node in successors: self.node_queue.append(node) if not self.reached: return [self.start.pos] return None def get_successors(self, parent: Node) -> list[Node]: successors = [] for action in delta: pos_x = parent.pos_x + action[1] pos_y = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(pos_x, pos_y, self.target.pos_y, self.target.pos_x, parent) ) return successors def retrace_path(self, node: Node | None) -> Path: current_node = node path = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) current_node = current_node.parent path.reverse() return path class BidirectionalBreadthFirstSearch: def __init__(self, start, goal): self.fwd_bfs = BreadthFirstSearch(start, goal) self.bwd_bfs = BreadthFirstSearch(goal, start) self.reached = False def search(self) -> Path | None: while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: current_fwd_node = self.fwd_bfs.node_queue.pop(0) current_bwd_node = self.bwd_bfs.node_queue.pop(0) if current_bwd_node.pos == current_fwd_node.pos: self.reached = True return self.retrace_bidirectional_path( current_fwd_node, current_bwd_node ) self.fwd_bfs.target = current_bwd_node self.bwd_bfs.target = current_fwd_node successors = { self.fwd_bfs: self.fwd_bfs.get_successors(current_fwd_node), self.bwd_bfs: self.bwd_bfs.get_successors(current_bwd_node), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(node) if not self.reached: return [self.fwd_bfs.start.pos] return None def retrace_bidirectional_path(self, fwd_node: Node, bwd_node: Node) -> Path: fwd_path = self.fwd_bfs.retrace_path(fwd_node) bwd_path = self.bwd_bfs.retrace_path(bwd_node) bwd_path.pop() bwd_path.reverse() path = fwd_path + bwd_path return path if __name__ == "__main__": import doctest doctest.testmod() init = (0, 0) goal = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) start_bfs_time = time.time() bfs = BreadthFirstSearch(init, goal) path = bfs.search() bfs_time = time.time() - start_bfs_time print("Unidirectional BFS computation time : ", bfs_time) start_bd_bfs_time = time.time() bd_bfs = BidirectionalBreadthFirstSearch(init, goal) bd_path = bd_bfs.search() bd_bfs_time = time.time() - start_bd_bfs_time print("Bidirectional BFS computation time : ", bd_bfs_time)
usrbinpython omkar pathak from future import annotations from queue import queue class graph def initself none self vertices dictint listint def printgraphself none for i in self vertices printi joinstrj for j in self verticesi def addedgeself fromvertex int tovertex int none if fromvertex in self vertices self verticesfromvertex appendtovertex else self verticesfromvertex tovertex def bfsself startvertex int setint initialize set for storing already visited vertices visited set create a first in first out queue to store all the vertices for bfs queue queue queue mark the source node as visited and enqueue it visited addstartvertex queue putstartvertex while not queue empty vertex queue get loop through all adjacent vertex and enqueue it if not yet visited for adjacentvertex in self verticesvertex if adjacentvertex not in visited queue putadjacentvertex visited addadjacentvertex return visited if name main from doctest import testmod testmodverbosetrue g graph g addedge0 1 g addedge0 2 g addedge1 2 g addedge2 0 g addedge2 3 g addedge3 3 g printgraph 0 1 2 1 2 2 0 3 3 3 assert sortedg bfs2 0 1 2 3 usr bin python omkar pathak prints adjacency list representation of graaph g graph g print_graph g add_edge 0 1 g print_graph 0 1 adding the edge between two vertices g graph g print_graph g add_edge 0 1 g print_graph 0 1 g graph g add_edge 0 1 g add_edge 0 1 g add_edge 0 2 g add_edge 1 2 g add_edge 2 0 g add_edge 2 3 g add_edge 3 3 sorted g bfs 2 0 1 2 3 initialize set for storing already visited vertices create a first in first out queue to store all the vertices for bfs mark the source node as visited and enqueue it loop through all adjacent vertex and enqueue it if not yet visited 0 1 2 1 2 2 0 3 3 3
from __future__ import annotations from queue import Queue class Graph: def __init__(self) -> None: self.vertices: dict[int, list[int]] = {} def print_graph(self) -> None: for i in self.vertices: print(i, " : ", " -> ".join([str(j) for j in self.vertices[i]])) def add_edge(self, from_vertex: int, to_vertex: int) -> None: if from_vertex in self.vertices: self.vertices[from_vertex].append(to_vertex) else: self.vertices[from_vertex] = [to_vertex] def bfs(self, start_vertex: int) -> set[int]: visited = set() queue: Queue = Queue() visited.add(start_vertex) queue.put(start_vertex) while not queue.empty(): vertex = queue.get() for adjacent_vertex in self.vertices[vertex]: if adjacent_vertex not in visited: queue.put(adjacent_vertex) visited.add(adjacent_vertex) return visited if __name__ == "__main__": from doctest import testmod testmod(verbose=True) g = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() assert sorted(g.bfs(2)) == [0, 1, 2, 3]
https en wikipedia orgwikibreadthfirstsearch pseudocode breadthfirstsearchgraph g start vertex s all nodes initially unexplored mark s as explored let q queue data structure initialized with s while q is nonempty remove the first node of q call it v for each edgev w for w in graphv if w unexplored mark w as explored add w to q at the end implementation of breadth first search using queue queue joinbreadthfirstsearchg a abcdef implementation of breadth first search using collection queue joinbreadthfirstsearchwithdequeg a abcdef breadthfirstsearch finished 10000 runs in 0 20999 seconds breadthfirstsearchwithdeque finished 10000 runs in 0 01421 seconds implementation of breadth first search using queue queue join breadth_first_search g a abcdef implementation of breadth first search using collection queue join breadth_first_search_with_deque g a abcdef breadth_first_search finished 10000 runs in 0 20999 seconds breadth_first_search_with_deque finished 10000 runs in 0 01421 seconds
from __future__ import annotations from collections import deque from queue import Queue from timeit import timeit G = { "A": ["B", "C"], "B": ["A", "D", "E"], "C": ["A", "F"], "D": ["B"], "E": ["B", "F"], "F": ["C", "E"], } def breadth_first_search(graph: dict, start: str) -> list[str]: explored = {start} result = [start] queue: Queue = Queue() queue.put(start) while not queue.empty(): v = queue.get() for w in graph[v]: if w not in explored: explored.add(w) result.append(w) queue.put(w) return result def breadth_first_search_with_deque(graph: dict, start: str) -> list[str]: visited = {start} result = [start] queue = deque([start]) while queue: v = queue.popleft() for child in graph[v]: if child not in visited: visited.add(child) result.append(child) queue.append(child) return result def benchmark_function(name: str) -> None: setup = f"from __main__ import G, {name}" number = 10000 res = timeit(f"{name}(G, 'A')", setup=setup, number=number) print(f"{name:<35} finished {number} runs in {res:.5f} seconds") if __name__ == "__main__": import doctest doctest.testmod() benchmark_function("breadth_first_search") benchmark_function("breadth_first_search_with_deque")
breath first search bfs can be used when finding the shortest path from a given source node to a target node in an unweighted graph graph is implemented as dictionary of adjacency lists also source vertex have to be defined upon initialization mapping node to its parent in resulting breadth first tree this function is a helper for running breath first search on this graph g graphgraph g g breathfirstsearch g parent g none c g a c f c b a e a d b this shortest path function returns a string describing the result 1 no path is found the string is a human readable message to indicate this 2 the shortest path is found the string is in the form v1v2v3 vn where v1 is the source vertex and vn is the target vertex if it exists separately g graphgraph g g breathfirstsearch case 1 no path is found g shortestpathfoo traceback most recent call last valueerror no path from vertex g to vertex foo case 2 the path is found g shortestpathd gcabd g shortestpathg g graph is implemented as dictionary of adjacency lists also source vertex have to be defined upon initialization mapping node to its parent in resulting breadth first tree this function is a helper for running breath first search on this graph g graph graph g g breath_first_search g parent g none c g a c f c b a e a d b first in first out queue this shortest path function returns a string describing the result 1 no path is found the string is a human readable message to indicate this 2 the shortest path is found the string is in the form v1 v2 v3 vn where v1 is the source vertex and vn is the target vertex if it exists separately g graph graph g g breath_first_search case 1 no path is found g shortest_path foo traceback most recent call last valueerror no path from vertex g to vertex foo case 2 the path is found g shortest_path d g c a b d g shortest_path g g
from __future__ import annotations graph = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } class Graph: def __init__(self, graph: dict[str, list[str]], source_vertex: str) -> None: self.graph = graph self.parent: dict[str, str | None] = {} self.source_vertex = source_vertex def breath_first_search(self) -> None: visited = {self.source_vertex} self.parent[self.source_vertex] = None queue = [self.source_vertex] while queue: vertex = queue.pop(0) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(adjacent_vertex) self.parent[adjacent_vertex] = vertex queue.append(adjacent_vertex) def shortest_path(self, target_vertex: str) -> str: if target_vertex == self.source_vertex: return self.source_vertex target_vertex_parent = self.parent.get(target_vertex) if target_vertex_parent is None: msg = ( f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) raise ValueError(msg) return self.shortest_path(target_vertex_parent) + f"->{target_vertex}" if __name__ == "__main__": g = Graph(graph, "G") g.breath_first_search() print(g.shortest_path("D")) print(g.shortest_path("G")) print(g.shortest_path("Foo"))
breadthfirst search shortest path implementations doctest python m doctest v bfsshortestpath py manual test python bfsshortestpath py find shortest path between start and goal nodes args graph dict nodelist of neighboring nodes keyvalue pairs start start node goal target node returns shortest path between start and goal nodes as a string of nodes not found string if no path found example bfsshortestpathdemograph g d g c a b d bfsshortestpathdemograph g g g bfsshortestpathdemograph g unknown keep track of explored nodes keep track of all the paths to be checked return path if start is goal keeps looping until all possible paths have been checked pop the first path from the queue get the last node from the path go through all neighbour nodes construct a new path and push it into the queue return path if neighbour is goal mark node as explored in case there s no path between the 2 nodes find shortest path distance between start and target nodes args graph nodelist of neighboring nodes keyvalue pairs start node to start search from target node to search for returns number of edges in shortest path between start and target nodes 1 if no path exists example bfsshortestpathdistancedemograph g d 4 bfsshortestpathdistancedemograph a a 0 bfsshortestpathdistancedemograph a unknown 1 keep tab on distances from start node find shortest path between start and goal nodes args graph dict node list of neighboring nodes key value pairs start start node goal target node returns shortest path between start and goal nodes as a string of nodes not found string if no path found example bfs_shortest_path demo_graph g d g c a b d bfs_shortest_path demo_graph g g g bfs_shortest_path demo_graph g unknown keep track of explored nodes keep track of all the paths to be checked return path if start is goal keeps looping until all possible paths have been checked pop the first path from the queue get the last node from the path go through all neighbour nodes construct a new path and push it into the queue return path if neighbour is goal mark node as explored in case there s no path between the 2 nodes find shortest path distance between start and target nodes args graph node list of neighboring nodes key value pairs start node to start search from target node to search for returns number of edges in shortest path between start and target nodes 1 if no path exists example bfs_shortest_path_distance demo_graph g d 4 bfs_shortest_path_distance demo_graph a a 0 bfs_shortest_path_distance demo_graph a unknown 1 keep tab on distances from start node returns g c a b d returns 4
demo_graph = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } def bfs_shortest_path(graph: dict, start, goal) -> list[str]: explored = set() queue = [[start]] if start == goal: return [start] while queue: path = queue.pop(0) node = path[-1] if node not in explored: neighbours = graph[node] for neighbour in neighbours: new_path = list(path) new_path.append(neighbour) queue.append(new_path) if neighbour == goal: return new_path explored.add(node) return [] def bfs_shortest_path_distance(graph: dict, start, target) -> int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 queue = [start] visited = set(start) dist = {start: 0, target: -1} while queue: node = queue.pop(0) if node == target: dist[target] = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node]) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(adjacent) queue.append(adjacent) dist[adjacent] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, "G", "D")) print(bfs_shortest_path_distance(demo_graph, "G", "D"))
finding the shortest path in 01graph in oe v which is faster than dijkstra 01graph is the weighted graph with the weights equal to 0 or 1 link https codeforces comblogentry22276 weighted directed graph edge destinationvertex int weight int class adjacencylist get all the vertices adjacent to the given one return iterself graphvertex property def sizeself return self size def addedgeself fromvertex int tovertex int weight int if weight not in 0 1 raise valueerroredge weight must be either 0 or 1 if tovertex 0 or tovertex self size raise valueerrorvertex indexes must be in 0 size self graphfromvertex appendedgetovertex weight def getshortestpathself startvertex int finishvertex int int none queue dequestartvertex distances listint none none self size distancesstartvertex 0 while queue currentvertex queue popleft currentdistance distancescurrentvertex if currentdistance is none continue for edge in selfcurrentvertex newdistance currentdistance edge weight destvertexdistance distancesedge destinationvertex if isinstancedestvertexdistance int and newdistance destvertexdistance continue distancesedge destinationvertex newdistance if edge weight 0 queue appendleftedge destinationvertex else queue appendedge destinationvertex if distancesfinishvertex is none raise valueerrorno path from startvertex to finishvertex return distancesfinishvertex if name main import doctest doctest testmod weighted directed graph edge graph adjacency list get all the vertices adjacent to the given one g adjacencylist 2 g add_edge 0 1 0 g add_edge 1 0 1 list g 0 edge destination_vertex 1 weight 0 list g 1 edge destination_vertex 0 weight 1 g add_edge 0 1 2 traceback most recent call last valueerror edge weight must be either 0 or 1 g add_edge 0 2 1 traceback most recent call last valueerror vertex indexes must be in 0 size return the shortest distance from start_vertex to finish_vertex in 0 1 graph 1 1 1 0 3 6 7 8 1 0 v 0 0 1 9 10 1 v 0 1 2 4 5 0 1 1 g adjacencylist 11 g add_edge 0 1 0 g add_edge 0 3 1 g add_edge 1 2 0 g add_edge 2 3 0 g add_edge 4 2 1 g add_edge 4 5 1 g add_edge 4 6 1 g add_edge 5 9 0 g add_edge 6 7 1 g add_edge 7 8 1 g add_edge 8 10 1 g add_edge 9 7 0 g add_edge 9 10 1 g add_edge 1 2 2 traceback most recent call last valueerror edge weight must be either 0 or 1 g get_shortest_path 0 3 0 g get_shortest_path 0 4 traceback most recent call last valueerror no path from start_vertex to finish_vertex g get_shortest_path 4 10 2 g get_shortest_path 4 8 2 g get_shortest_path 0 1 0 g get_shortest_path 1 0 traceback most recent call last valueerror no path from start_vertex to finish_vertex
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class Edge: destination_vertex: int weight: int class AdjacencyList: def __init__(self, size: int): self._graph: list[list[Edge]] = [[] for _ in range(size)] self._size = size def __getitem__(self, vertex: int) -> Iterator[Edge]: return iter(self._graph[vertex]) @property def size(self): return self._size def add_edge(self, from_vertex: int, to_vertex: int, weight: int): if weight not in (0, 1): raise ValueError("Edge weight must be either 0 or 1.") if to_vertex < 0 or to_vertex >= self.size: raise ValueError("Vertex indexes must be in [0; size).") self._graph[from_vertex].append(Edge(to_vertex, weight)) def get_shortest_path(self, start_vertex: int, finish_vertex: int) -> int | None: queue = deque([start_vertex]) distances: list[int | None] = [None] * self.size distances[start_vertex] = 0 while queue: current_vertex = queue.popleft() current_distance = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: new_distance = current_distance + edge.weight dest_vertex_distance = distances[edge.destination_vertex] if ( isinstance(dest_vertex_distance, int) and new_distance >= dest_vertex_distance ): continue distances[edge.destination_vertex] = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex) else: queue.append(edge.destination_vertex) if distances[finish_vertex] is None: raise ValueError("No path from start_vertex to finish_vertex.") return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
check if a graph is bipartite using depthfirst search dfs args graph adjacency list representing the graph returns true if bipartite false otherwise checks if the graph can be divided into two sets of vertices such that no two vertices within the same set are connected by an edge examples fixme this test should pass isbipartitedfsdefaultdictlist 0 1 2 1 0 3 2 0 4 traceback most recent call last runtimeerror dictionary changed size during iteration isbipartitedfsdefaultdictlist 0 1 2 1 0 3 2 0 1 false isbipartitedfs true isbipartitedfs0 1 3 1 0 2 2 1 3 3 0 2 true isbipartitedfs0 1 2 3 1 0 2 2 0 1 3 3 0 2 false isbipartitedfs0 4 1 2 4 3 4 4 0 2 3 true isbipartitedfs0 1 3 1 0 2 2 1 3 3 0 2 4 0 false isbipartitedfs7 1 3 1 0 2 2 1 3 3 0 2 4 0 traceback most recent call last keyerror 0 fixme this test should fails with keyerror 4 isbipartitedfs0 1 3 1 0 2 2 1 3 3 0 2 9 0 false isbipartitedfs0 1 3 1 0 2 traceback most recent call last keyerror 1 isbipartitedfs1 0 2 0 1 1 1 0 2 2 1 1 true isbipartitedfs0 9 1 3 1 0 2 2 1 3 3 0 2 traceback most recent call last keyerror 0 fixme this test should fails with typeerror list indices must be integers or isbipartitedfs0 1 0 3 0 1 0 0 2 0 2 0 1 0 3 0 3 0 0 2 0 true isbipartitedfsa 1 3 b 0 2 c 1 3 d 0 2 traceback most recent call last keyerror 1 isbipartitedfs0 b d 1 a c 2 b d 3 a c traceback most recent call last keyerror b perform depthfirst search dfs on the graph starting from a node args node the current node being visited color the color assigned to the current node returns true if the graph is bipartite starting from the current node false otherwise check if a graph is bipartite using a breadthfirst search bfs args graph adjacency list representing the graph returns true if bipartite false otherwise check if the graph can be divided into two sets of vertices such that no two vertices within the same set are connected by an edge examples fixme this test should pass isbipartitebfsdefaultdictlist 0 1 2 1 0 3 2 0 4 traceback most recent call last runtimeerror dictionary changed size during iteration isbipartitebfsdefaultdictlist 0 1 2 1 0 2 2 0 1 false isbipartitebfs true isbipartitebfs0 1 3 1 0 2 2 1 3 3 0 2 true isbipartitebfs0 1 2 3 1 0 2 2 0 1 3 3 0 2 false isbipartitebfs0 4 1 2 4 3 4 4 0 2 3 true isbipartitebfs0 1 3 1 0 2 2 1 3 3 0 2 4 0 false isbipartitebfs7 1 3 1 0 2 2 1 3 3 0 2 4 0 traceback most recent call last keyerror 0 fixme this test should fails with keyerror 4 isbipartitebfs0 1 3 1 0 2 2 1 3 3 0 2 9 0 false isbipartitebfs0 1 3 1 0 2 traceback most recent call last keyerror 1 isbipartitebfs1 0 2 0 1 1 1 0 2 2 1 1 true isbipartitebfs0 9 1 3 1 0 2 2 1 3 3 0 2 traceback most recent call last keyerror 0 fixme this test should fails with typeerror list indices must be integers or isbipartitebfs0 1 0 3 0 1 0 0 2 0 2 0 1 0 3 0 3 0 0 2 0 true isbipartitebfsa 1 3 b 0 2 c 1 3 d 0 2 traceback most recent call last keyerror 1 isbipartitebfs0 b d 1 a c 2 b d 3 a c traceback most recent call last keyerror b check if a graph is bipartite using depth first search dfs args graph adjacency list representing the graph returns true if bipartite false otherwise checks if the graph can be divided into two sets of vertices such that no two vertices within the same set are connected by an edge examples fixme this test should pass is_bipartite_dfs defaultdict list 0 1 2 1 0 3 2 0 4 traceback most recent call last runtimeerror dictionary changed size during iteration is_bipartite_dfs defaultdict list 0 1 2 1 0 3 2 0 1 false is_bipartite_dfs true is_bipartite_dfs 0 1 3 1 0 2 2 1 3 3 0 2 true is_bipartite_dfs 0 1 2 3 1 0 2 2 0 1 3 3 0 2 false is_bipartite_dfs 0 4 1 2 4 3 4 4 0 2 3 true is_bipartite_dfs 0 1 3 1 0 2 2 1 3 3 0 2 4 0 false is_bipartite_dfs 7 1 3 1 0 2 2 1 3 3 0 2 4 0 traceback most recent call last keyerror 0 fixme this test should fails with keyerror 4 is_bipartite_dfs 0 1 3 1 0 2 2 1 3 3 0 2 9 0 false is_bipartite_dfs 0 1 3 1 0 2 traceback most recent call last keyerror 1 is_bipartite_dfs 1 0 2 0 1 1 1 0 2 2 1 1 true is_bipartite_dfs 0 9 1 3 1 0 2 2 1 3 3 0 2 traceback most recent call last keyerror 0 fixme this test should fails with typeerror list indices must be integers or is_bipartite_dfs 0 1 0 3 0 1 0 0 2 0 2 0 1 0 3 0 3 0 0 2 0 true is_bipartite_dfs a 1 3 b 0 2 c 1 3 d 0 2 traceback most recent call last keyerror 1 is_bipartite_dfs 0 b d 1 a c 2 b d 3 a c traceback most recent call last keyerror b perform depth first search dfs on the graph starting from a node args node the current node being visited color the color assigned to the current node returns true if the graph is bipartite starting from the current node false otherwise check if a graph is bipartite using a breadth first search bfs args graph adjacency list representing the graph returns true if bipartite false otherwise check if the graph can be divided into two sets of vertices such that no two vertices within the same set are connected by an edge examples fixme this test should pass is_bipartite_bfs defaultdict list 0 1 2 1 0 3 2 0 4 traceback most recent call last runtimeerror dictionary changed size during iteration is_bipartite_bfs defaultdict list 0 1 2 1 0 2 2 0 1 false is_bipartite_bfs true is_bipartite_bfs 0 1 3 1 0 2 2 1 3 3 0 2 true is_bipartite_bfs 0 1 2 3 1 0 2 2 0 1 3 3 0 2 false is_bipartite_bfs 0 4 1 2 4 3 4 4 0 2 3 true is_bipartite_bfs 0 1 3 1 0 2 2 1 3 3 0 2 4 0 false is_bipartite_bfs 7 1 3 1 0 2 2 1 3 3 0 2 4 0 traceback most recent call last keyerror 0 fixme this test should fails with keyerror 4 is_bipartite_bfs 0 1 3 1 0 2 2 1 3 3 0 2 9 0 false is_bipartite_bfs 0 1 3 1 0 2 traceback most recent call last keyerror 1 is_bipartite_bfs 1 0 2 0 1 1 1 0 2 2 1 1 true is_bipartite_bfs 0 9 1 3 1 0 2 2 1 3 3 0 2 traceback most recent call last keyerror 0 fixme this test should fails with typeerror list indices must be integers or is_bipartite_bfs 0 1 0 3 0 1 0 0 2 0 2 0 1 0 3 0 3 0 0 2 0 true is_bipartite_bfs a 1 3 b 0 2 c 1 3 d 0 2 traceback most recent call last keyerror 1 is_bipartite_bfs 0 b d 1 a c 2 b d 3 a c traceback most recent call last keyerror b
from collections import defaultdict, deque def is_bipartite_dfs(graph: defaultdict[int, list[int]]) -> bool: def depth_first_search(node: int, color: int) -> bool: if visited[node] == -1: visited[node] = color for neighbor in graph[node]: if not depth_first_search(neighbor, 1 - color): return False return visited[node] == color visited: defaultdict[int, int] = defaultdict(lambda: -1) for node in graph: if visited[node] == -1 and not depth_first_search(node, 0): return False return True def is_bipartite_bfs(graph: defaultdict[int, list[int]]) -> bool: visited: defaultdict[int, int] = defaultdict(lambda: -1) for node in graph: if visited[node] == -1: queue: deque[int] = deque() queue.append(node) visited[node] = 0 while queue: curr_node = queue.popleft() for neighbor in graph[curr_node]: if visited[neighbor] == -1: visited[neighbor] = 1 - visited[curr_node] queue.append(neighbor) elif visited[neighbor] == visited[curr_node]: return False return True if __name__ == "__main": import doctest result = doctest.testmod() if result.failed: print(f"{result.failed} test(s) failed.") else: print("All tests passed!")
program to check if a cycle is present in a given graph returns true if graph is cyclic else false checkcyclegraph0 1 0 3 2 0 4 3 5 4 5 5 false checkcyclegraph0 1 2 1 2 2 0 3 3 3 true keep track of visited nodes to detect a back edge keep track of vertices currently in the recursion stack recur for all neighbours if any neighbour is visited and in recstk then graph is cyclic graph 0 1 0 3 2 0 4 3 5 4 5 5 vertex visited recstk 0 set set depthfirstsearchgraph vertex visited recstk false mark current node as visited and add to recursion stack the node needs to be removed from recursion stack before function ends returns true if graph is cyclic else false check_cycle graph 0 1 0 3 2 0 4 3 5 4 5 5 false check_cycle graph 0 1 2 1 2 2 0 3 3 3 true keep track of visited nodes to detect a back edge keep track of vertices currently in the recursion stack recur for all neighbours if any neighbour is visited and in rec_stk then graph is cyclic graph 0 1 0 3 2 0 4 3 5 4 5 5 vertex visited rec_stk 0 set set depth_first_search graph vertex visited rec_stk false mark current node as visited and add to recursion stack the node needs to be removed from recursion stack before function ends
def check_cycle(graph: dict) -> bool: visited: set[int] = set() rec_stk: set[int] = set() return any( node not in visited and depth_first_search(graph, node, visited, rec_stk) for node in graph ) def depth_first_search(graph: dict, vertex: int, visited: set, rec_stk: set) -> bool: visited.add(vertex) rec_stk.add(vertex) for node in graph[vertex]: if node not in visited: if depth_first_search(graph, node, visited, rec_stk): return True elif node in rec_stk: return True rec_stk.remove(vertex) return False if __name__ == "__main__": from doctest import testmod testmod()
https en wikipedia orgwikicomponentgraphtheory finding connected components in graph use depth first search to find all vertices being in the same component as initial vertex dfstestgraph1 0 5 false 0 1 3 2 dfstestgraph2 0 6 false 0 1 3 2 this function takes graph as a parameter and then returns the list of connected components connectedcomponentstestgraph1 0 1 3 2 4 5 6 connectedcomponentstestgraph2 0 1 3 2 4 5 use depth first search to find all vertices being in the same component as initial vertex dfs test_graph_1 0 5 false 0 1 3 2 dfs test_graph_2 0 6 false 0 1 3 2 this function takes graph as a parameter and then returns the list of connected components connected_components test_graph_1 0 1 3 2 4 5 6 connected_components test_graph_2 0 1 3 2 4 5
test_graph_1 = {0: [1, 2], 1: [0, 3], 2: [0], 3: [1], 4: [5, 6], 5: [4, 6], 6: [4, 5]} test_graph_2 = {0: [1, 2, 3], 1: [0, 3], 2: [0], 3: [0, 1], 4: [], 5: []} def dfs(graph: dict, vert: int, visited: list) -> list: visited[vert] = True connected_verts = [] for neighbour in graph[vert]: if not visited[neighbour]: connected_verts += dfs(graph, neighbour, visited) return [vert, *connected_verts] def connected_components(graph: dict) -> list: graph_size = len(graph) visited = graph_size * [False] components_list = [] for i in range(graph_size): if not visited[i]: i_connected = dfs(graph, i, visited) components_list.append(i_connected) return components_list if __name__ == "__main__": import doctest doctest.testmod()
leetcode 133 clone graph https leetcode comproblemsclonegraph given a reference of a node in a connected undirected graph return a deep copy clone of the graph each node in the graph contains a value int and a list listnode of its neighbors node3 neighbors hashnode3 0 true this function returns a clone of a connected undirected graph clonegraphnode1 nodevalue1 neighbors clonegraphnode1 node2 nodevalue1 neighborsnodevalue2 neighbors clonegraphnone is none true node 3 neighbors hash node 3 0 true this function returns a clone of a connected undirected graph clone_graph node 1 node value 1 neighbors clone_graph node 1 node 2 node value 1 neighbors node value 2 neighbors clone_graph none is none true map nodes to clones
from dataclasses import dataclass @dataclass class Node: value: int = 0 neighbors: list["Node"] | None = None def __post_init__(self) -> None: self.neighbors = self.neighbors or [] def __hash__(self) -> int: return id(self) def clone_graph(node: Node | None) -> Node | None: if not node: return None originals_to_clones = {} stack = [node] while stack: original = stack.pop() if original in originals_to_clones: continue originals_to_clones[original] = Node(original.value) stack.extend(original.neighbors or []) for original, clone in originals_to_clones.items(): for neighbor in original.neighbors or []: cloned_neighbor = originals_to_clones[neighbor] if not clone.neighbors: clone.neighbors = [] clone.neighbors.append(cloned_neighbor) return originals_to_clones[node] if __name__ == "__main__": import doctest doctest.testmod()
non recursive implementation of a dfs algorithm from future import annotations def depthfirstsearchgraph dict start str setstr explored stack setstart start while stack v stack pop explored addv differences from bfs 1 pop last element instead of first one 2 add adjacent elements to stack without exploring them for adj in reversedgraphv if adj not in explored stack appendadj return explored g a b c d b a d e c a f d b d e b f f c e g g f if name main import doctest doctest testmod printdepthfirstsearchg a depth first search on graph param graph directed graph in dictionary format param start starting vertex as a string returns the trace of the search input_g a b c d b a d e c a f d b d e b f f c e g g f output_g list a b c d e f g all x in output_g for x in list depth_first_search input_g a true all x in output_g for x in list depth_first_search input_g g true differences from bfs 1 pop last element instead of first one 2 add adjacent elements to stack without exploring them
from __future__ import annotations def depth_first_search(graph: dict, start: str) -> set[str]: explored, stack = set(start), [start] while stack: v = stack.pop() explored.add(v) for adj in reversed(graph[v]): if adj not in explored: stack.append(adj) return explored G = { "A": ["B", "C", "D"], "B": ["A", "D", "E"], "C": ["A", "F"], "D": ["B", "D"], "E": ["B", "F"], "F": ["C", "E", "G"], "G": ["F"], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, "A"))
usrbinpython omkar pathak class graph def initself self vertex for printing the graph vertices def printgraphself none printself vertex for i in self vertex printi joinstrj for j in self vertexi for adding the edge between two vertices def addedgeself fromvertex int tovertex int none check if vertex is already present if fromvertex in self vertex self vertexfromvertex appendtovertex else else make a new vertex self vertexfromvertex tovertex def dfsself none visited array for storing already visited nodes visited false lenself vertex call the recursive helper function for i in rangelenself vertex if not visitedi self dfsrecursivei visited def dfsrecursiveself startvertex int visited list none mark start vertex as visited visitedstartvertex true printstartvertex end recur for all the vertices that are adjacent to this node for i in self vertex if not visitedi print end self dfsrecursivei visited if name main import doctest doctest testmod g graph g addedge0 1 g addedge0 2 g addedge1 2 g addedge2 0 g addedge2 3 g addedge3 3 g printgraph printdfs g dfs usr bin python omkar pathak for printing the graph vertices print the graph vertices example g graph g add_edge 0 1 g add_edge 0 2 g add_edge 1 2 g add_edge 2 0 g add_edge 2 3 g add_edge 3 3 g print_graph 0 1 2 1 2 2 0 3 3 3 0 1 2 1 2 2 0 3 3 3 for adding the edge between two vertices add an edge between two vertices param from_vertex the source vertex param to_vertex the destination vertex example g graph g add_edge 0 1 g add_edge 0 2 g print_graph 0 1 2 0 1 2 check if vertex is already present else make a new vertex perform depth first search dfs traversal on the graph and print the visited vertices example g graph g add_edge 0 1 g add_edge 0 2 g add_edge 1 2 g add_edge 2 0 g add_edge 2 3 g add_edge 3 3 g dfs 0 1 2 3 visited array for storing already visited nodes call the recursive helper function perform a recursive depth first search dfs traversal on the graph param start_vertex the starting vertex for the traversal param visited a list to track visited vertices example g graph g add_edge 0 1 g add_edge 0 2 g add_edge 1 2 g add_edge 2 0 g add_edge 2 3 g add_edge 3 3 visited false len g vertex g dfs_recursive 0 visited 0 1 2 3 mark start vertex as visited recur for all the vertices that are adjacent to this node
class Graph: def __init__(self): self.vertex = {} def print_graph(self) -> None: print(self.vertex) for i in self.vertex: print(i, " -> ", " -> ".join([str(j) for j in self.vertex[i]])) def add_edge(self, from_vertex: int, to_vertex: int) -> None: if from_vertex in self.vertex: self.vertex[from_vertex].append(to_vertex) else: self.vertex[from_vertex] = [to_vertex] def dfs(self) -> None: visited = [False] * len(self.vertex) for i in range(len(self.vertex)): if not visited[i]: self.dfs_recursive(i, visited) def dfs_recursive(self, start_vertex: int, visited: list) -> None: visited[start_vertex] = True print(start_vertex, end="") for i in self.vertex: if not visited[i]: print(" ", end="") self.dfs_recursive(i, visited) if __name__ == "__main__": import doctest doctest.testmod() g = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("DFS:") g.dfs()
pseudocode dijkstragraph g start vertex s destination vertex d all nodes initially unexplored 1 let h min heap data structure initialized with 0 and s here 0 indicates the distance from start vertex s 2 while h is nonempty 3 remove the first node and cost of h call it u and cost 4 if u has been previously explored 5 go to the while loop line 2 once a node is explored there is no need to make it again 6 mark u as explored 7 if u is d 8 return cost total cost from start to destination vertex 9 for each edgeu v ccost of edgeu v for v in graphu 10 if v explored 11 go to next v in line 9 12 totalcost cost c 13 add totalcost v to h you can think at cost as a distance where dijkstra finds the shortest distance between vertices s and v in a graph g the use of a min heap as h guarantees that if a vertex has already been explored there will be no other path with shortest distance that happens because heapq heappop will always return the next vertex with the shortest distance considering that the heap stores not only the distance between previous vertex and current vertex but the entire distance between each vertex that makes up the path from start vertex to target vertex return the cost of the shortest path between vertices start and end dijkstrag e c 6 dijkstrag2 e f 3 dijkstrag3 e f 3 g2 b c 1 c d 1 d f 1 e b 1 f 3 f r layout of g3 e 1 b 1 c 1 d 1 f 2 g 1 return the cost of the shortest path between vertices start and end dijkstra g e c 6 dijkstra g2 e f 3 dijkstra g3 e f 3 cost from start node end node layout of g2 e 1 b 1 c 1 d 1 f 3 layout of g3 e 1 b 1 c 1 d 1 f 2 g 1 e 3 f 3 c 6 e 3 f 3 e 2 g 1 f 3
import heapq def dijkstra(graph, start, end): heap = [(0, start)] visited = set() while heap: (cost, u) = heapq.heappop(heap) if u in visited: continue visited.add(u) if u == end: return cost for v, c in graph[u]: if v in visited: continue next_item = cost + c heapq.heappush(heap, (next_item, v)) return -1 G = { "A": [["B", 2], ["C", 5]], "B": [["A", 2], ["D", 3], ["E", 1], ["F", 1]], "C": [["A", 5], ["F", 3]], "D": [["B", 3]], "E": [["B", 4], ["F", 3]], "F": [["C", 3], ["E", 3]], } r G2 = { "B": [["C", 1]], "C": [["D", 1]], "D": [["F", 1]], "E": [["B", 1], ["F", 3]], "F": [], } r G3 = { "B": [["C", 1]], "C": [["D", 1]], "D": [["F", 1]], "E": [["B", 1], ["G", 2]], "F": [], "G": [["F", 1]], } short_distance = dijkstra(G, "E", "C") print(short_distance) short_distance = dijkstra(G2, "E", "F") print(short_distance) short_distance = dijkstra(G3, "E", "F") print(short_distance) if __name__ == "__main__": import doctest doctest.testmod()
title dijkstra s algorithm for finding single source shortest path from scratch shubham malik references https en wikipedia orgwikidijkstra27salgorithm for storing the vertex set to retrieve node with the lowest distance based on min heap priority queue class constructor method examples priorityqueuetest priorityqueue priorityqueuetest cursize 0 priorityqueuetest array priorityqueuetest pos conditional boolean method to determine if the priority queue is empty or not examples priorityqueuetest priorityqueue priorityqueuetest isempty true priorityqueuetest insert2 a priorityqueuetest isempty false sorts the queue array so that the minimum element is root examples priorityqueuetest priorityqueue priorityqueuetest cursize 3 priorityqueuetest pos a 0 b 1 c 2 priorityqueuetest array 5 a 10 b 15 c priorityqueuetest minheapify0 traceback most recent call last typeerror list object is not callable priorityqueuetest array 5 a 10 b 15 c priorityqueuetest array 10 a 5 b 15 c priorityqueuetest minheapify0 traceback most recent call last typeerror list object is not callable priorityqueuetest array 10 a 5 b 15 c priorityqueuetest array 10 a 15 b 5 c priorityqueuetest minheapify0 traceback most recent call last typeerror list object is not callable priorityqueuetest array 10 a 15 b 5 c priorityqueuetest array 10 a 5 b priorityqueuetest cursize lenpriorityqueuetest array priorityqueuetest pos a 0 b 1 priorityqueuetest minheapify0 traceback most recent call last typeerror list object is not callable priorityqueuetest array 10 a 5 b inserts a node into the priority queue examples priorityqueuetest priorityqueue priorityqueuetest insert10 a priorityqueuetest array 10 a priorityqueuetest insert15 b priorityqueuetest array 10 a 15 b priorityqueuetest insert5 c priorityqueuetest array 5 c 10 a 15 b removes and returns the min element at top of priority queue examples priorityqueuetest priorityqueue priorityqueuetest array 10 a 15 b priorityqueuetest cursize lenpriorityqueuetest array priorityqueuetest pos a 0 b 1 priorityqueuetest insert5 c priorityqueuetest extractmin c priorityqueuetest array0 15 b returns the index of left child examples priorityqueuetest priorityqueue priorityqueuetest left0 1 priorityqueuetest left1 3 returns the index of right child examples priorityqueuetest priorityqueue priorityqueuetest right0 2 priorityqueuetest right1 4 returns the index of parent examples priorityqueuetest priorityqueue priorityqueuetest par1 0 priorityqueuetest par2 1 priorityqueuetest par4 2 swaps array elements at indices i and j update the pos examples priorityqueuetest priorityqueue priorityqueuetest array 10 a 15 b priorityqueuetest cursize lenpriorityqueuetest array priorityqueuetest pos a 0 b 1 priorityqueuetest swap0 1 priorityqueuetest array 15 b 10 a priorityqueuetest pos a 1 b 0 decrease the key value for a given tuple assuming the newd is at most oldd examples priorityqueuetest priorityqueue priorityqueuetest array 10 a 15 b priorityqueuetest cursize lenpriorityqueuetest array priorityqueuetest pos a 0 b 1 priorityqueuetest decreasekey10 a 5 priorityqueuetest array 5 a 15 b assuming the newd is atmost oldd graph class constructor examples graphtest graph1 graphtest numnodes 1 graphtest dist 0 graphtest par 1 graphtest adjlist to store the distance from source vertex add edge going from node u to v and v to u with weight w u w v v w u examples graphtest graph1 graphtest addedge1 2 1 graphtest addedge2 3 2 graphtest adjlist 1 2 1 2 1 1 3 2 3 2 2 check if u already in graph assuming undirected graph show the graph u vw examples graphtest graph1 graphtest addedge1 2 1 graphtest showgraph 1 21 2 11 graphtest addedge2 3 2 graphtest showgraph 1 21 2 11 32 3 22 dijkstra algorithm examples graphtest graph3 graphtest addedge0 1 2 graphtest addedge1 2 2 graphtest dijkstra0 distance from node 0 node 0 has distance 0 node 1 has distance 2 node 2 has distance 4 graphtest dist 0 2 4 graphtest graph2 graphtest addedge0 1 2 graphtest dijkstra0 distance from node 0 node 0 has distance 0 node 1 has distance 2 graphtest dist 0 2 graphtest graph3 graphtest addedge0 1 2 graphtest dijkstra0 distance from node 0 node 0 has distance 0 node 1 has distance 2 node 2 has distance 0 graphtest dist 0 2 0 graphtest graph3 graphtest addedge0 1 2 graphtest addedge1 2 2 graphtest addedge0 2 1 graphtest dijkstra0 distance from node 0 node 0 has distance 0 node 1 has distance 2 node 2 has distance 1 graphtest dist 0 2 1 graphtest graph4 graphtest addedge0 1 4 graphtest addedge1 2 2 graphtest addedge2 3 1 graphtest addedge0 2 3 graphtest dijkstra0 distance from node 0 node 0 has distance 0 node 1 has distance 4 node 2 has distance 3 node 3 has distance 4 graphtest dist 0 4 3 4 graphtest graph4 graphtest addedge0 1 4 graphtest addedge1 2 2 graphtest addedge2 3 1 graphtest addedge0 2 7 graphtest dijkstra0 distance from node 0 node 0 has distance 0 node 1 has distance 4 node 2 has distance 6 node 3 has distance 7 graphtest dist 0 4 6 7 flush old junk values in par src is the source node update the distance of all the neighbours of u and if their prev dist was infinity then push them in q show the shortest distances from src show the distances from src to all other nodes in a graph examples graphtest graph1 graphtest showdistances0 distance from node 0 node 0 has distance 0 shows the shortest path from src to dest warning use it after calling dijkstra examples graphtest graph4 graphtest addedge0 1 1 graphtest addedge1 2 2 graphtest addedge2 3 3 graphtest dijkstra0 distance from node 0 node 0 has distance 0 node 1 has distance 1 node 2 has distance 3 node 3 has distance 6 graphtest showpath0 3 doctest normalizewhitespace path to reach 3 from 0 0 1 2 3 total cost of path 6 backtracking from dest to src output 0 14 78 1 04 28 711 7 08 111 61 87 2 18 37 82 54 3 27 49 514 8 22 66 77 5 24 314 410 62 4 39 510 6 52 71 86 distance from node 0 node 0 has distance 0 node 1 has distance 4 node 2 has distance 12 node 3 has distance 19 node 4 has distance 21 node 5 has distance 11 node 6 has distance 9 node 7 has distance 8 node 8 has distance 14 path to reach 4 from 0 0 7 6 5 4 total cost of path 21 title dijkstra s algorithm for finding single source shortest path from scratch shubham malik references https en wikipedia org wiki dijkstra 27s_algorithm for storing the vertex set to retrieve node with the lowest distance based on min heap priority queue class constructor method examples priority_queue_test priorityqueue priority_queue_test cur_size 0 priority_queue_test array priority_queue_test pos to store the pos of node in array conditional boolean method to determine if the priority queue is empty or not examples priority_queue_test priorityqueue priority_queue_test is_empty true priority_queue_test insert 2 a priority_queue_test is_empty false sorts the queue array so that the minimum element is root examples priority_queue_test priorityqueue priority_queue_test cur_size 3 priority_queue_test pos a 0 b 1 c 2 priority_queue_test array 5 a 10 b 15 c priority_queue_test min_heapify 0 traceback most recent call last typeerror list object is not callable priority_queue_test array 5 a 10 b 15 c priority_queue_test array 10 a 5 b 15 c priority_queue_test min_heapify 0 traceback most recent call last typeerror list object is not callable priority_queue_test array 10 a 5 b 15 c priority_queue_test array 10 a 15 b 5 c priority_queue_test min_heapify 0 traceback most recent call last typeerror list object is not callable priority_queue_test array 10 a 15 b 5 c priority_queue_test array 10 a 5 b priority_queue_test cur_size len priority_queue_test array priority_queue_test pos a 0 b 1 priority_queue_test min_heapify 0 traceback most recent call last typeerror list object is not callable priority_queue_test array 10 a 5 b inserts a node into the priority queue examples priority_queue_test priorityqueue priority_queue_test insert 10 a priority_queue_test array 10 a priority_queue_test insert 15 b priority_queue_test array 10 a 15 b priority_queue_test insert 5 c priority_queue_test array 5 c 10 a 15 b removes and returns the min element at top of priority queue examples priority_queue_test priorityqueue priority_queue_test array 10 a 15 b priority_queue_test cur_size len priority_queue_test array priority_queue_test pos a 0 b 1 priority_queue_test insert 5 c priority_queue_test extract_min c priority_queue_test array 0 15 b returns the index of left child examples priority_queue_test priorityqueue priority_queue_test left 0 1 priority_queue_test left 1 3 returns the index of right child examples priority_queue_test priorityqueue priority_queue_test right 0 2 priority_queue_test right 1 4 returns the index of parent examples priority_queue_test priorityqueue priority_queue_test par 1 0 priority_queue_test par 2 1 priority_queue_test par 4 2 swaps array elements at indices i and j update the pos examples priority_queue_test priorityqueue priority_queue_test array 10 a 15 b priority_queue_test cur_size len priority_queue_test array priority_queue_test pos a 0 b 1 priority_queue_test swap 0 1 priority_queue_test array 15 b 10 a priority_queue_test pos a 1 b 0 decrease the key value for a given tuple assuming the new_d is at most old_d examples priority_queue_test priorityqueue priority_queue_test array 10 a 15 b priority_queue_test cur_size len priority_queue_test array priority_queue_test pos a 0 b 1 priority_queue_test decrease_key 10 a 5 priority_queue_test array 5 a 15 b assuming the new_d is atmost old_d graph class constructor examples graph_test graph 1 graph_test num_nodes 1 graph_test dist 0 graph_test par 1 graph_test adjlist to store graph u v w number of nodes in graph to store the distance from source vertex to store the path add edge going from node u to v and v to u with weight w u w v v w u examples graph_test graph 1 graph_test add_edge 1 2 1 graph_test add_edge 2 3 2 graph_test adjlist 1 2 1 2 1 1 3 2 3 2 2 check if u already in graph assuming undirected graph show the graph u v w examples graph_test graph 1 graph_test add_edge 1 2 1 graph_test show_graph 1 2 1 2 1 1 graph_test add_edge 2 3 2 graph_test show_graph 1 2 1 2 1 1 3 2 3 2 2 dijkstra algorithm examples graph_test graph 3 graph_test add_edge 0 1 2 graph_test add_edge 1 2 2 graph_test dijkstra 0 distance from node 0 node 0 has distance 0 node 1 has distance 2 node 2 has distance 4 graph_test dist 0 2 4 graph_test graph 2 graph_test add_edge 0 1 2 graph_test dijkstra 0 distance from node 0 node 0 has distance 0 node 1 has distance 2 graph_test dist 0 2 graph_test graph 3 graph_test add_edge 0 1 2 graph_test dijkstra 0 distance from node 0 node 0 has distance 0 node 1 has distance 2 node 2 has distance 0 graph_test dist 0 2 0 graph_test graph 3 graph_test add_edge 0 1 2 graph_test add_edge 1 2 2 graph_test add_edge 0 2 1 graph_test dijkstra 0 distance from node 0 node 0 has distance 0 node 1 has distance 2 node 2 has distance 1 graph_test dist 0 2 1 graph_test graph 4 graph_test add_edge 0 1 4 graph_test add_edge 1 2 2 graph_test add_edge 2 3 1 graph_test add_edge 0 2 3 graph_test dijkstra 0 distance from node 0 node 0 has distance 0 node 1 has distance 4 node 2 has distance 3 node 3 has distance 4 graph_test dist 0 4 3 4 graph_test graph 4 graph_test add_edge 0 1 4 graph_test add_edge 1 2 2 graph_test add_edge 2 3 1 graph_test add_edge 0 2 7 graph_test dijkstra 0 distance from node 0 node 0 has distance 0 node 1 has distance 4 node 2 has distance 6 node 3 has distance 7 graph_test dist 0 4 6 7 flush old junk values in par src is the source node dist from src node infinity returns node with the min dist from source update the distance of all the neighbours of u and if their prev dist was infinity then push them in q show the shortest distances from src show the distances from src to all other nodes in a graph examples graph_test graph 1 graph_test show_distances 0 distance from node 0 node 0 has distance 0 shows the shortest path from src to dest warning use it after calling dijkstra examples graph_test graph 4 graph_test add_edge 0 1 1 graph_test add_edge 1 2 2 graph_test add_edge 2 3 3 graph_test dijkstra 0 distance from node 0 node 0 has distance 0 node 1 has distance 1 node 2 has distance 3 node 3 has distance 6 graph_test show_path 0 3 doctest normalize_whitespace path to reach 3 from 0 0 1 2 3 total cost of path 6 backtracking from dest to src output 0 1 4 7 8 1 0 4 2 8 7 11 7 0 8 1 11 6 1 8 7 2 1 8 3 7 8 2 5 4 3 2 7 4 9 5 14 8 2 2 6 6 7 7 5 2 4 3 14 4 10 6 2 4 3 9 5 10 6 5 2 7 1 8 6 distance from node 0 node 0 has distance 0 node 1 has distance 4 node 2 has distance 12 node 3 has distance 19 node 4 has distance 21 node 5 has distance 11 node 6 has distance 9 node 7 has distance 8 node 8 has distance 14 path to reach 4 from 0 0 7 6 5 4 total cost of path 21
import math import sys class PriorityQueue: def __init__(self): self.cur_size = 0 self.array = [] self.pos = {} def is_empty(self): return self.cur_size == 0 def min_heapify(self, idx): lc = self.left(idx) rc = self.right(idx) if lc < self.cur_size and self.array(lc)[0] < self.array[idx][0]: smallest = lc else: smallest = idx if rc < self.cur_size and self.array(rc)[0] < self.array[smallest][0]: smallest = rc if smallest != idx: self.swap(idx, smallest) self.min_heapify(smallest) def insert(self, tup): self.pos[tup[1]] = self.cur_size self.cur_size += 1 self.array.append((sys.maxsize, tup[1])) self.decrease_key((sys.maxsize, tup[1]), tup[0]) def extract_min(self): min_node = self.array[0][1] self.array[0] = self.array[self.cur_size - 1] self.cur_size -= 1 self.min_heapify(1) del self.pos[min_node] return min_node def left(self, i): return 2 * i + 1 def right(self, i): return 2 * i + 2 def par(self, i): return math.floor(i / 2) def swap(self, i, j): self.pos[self.array[i][1]] = j self.pos[self.array[j][1]] = i temp = self.array[i] self.array[i] = self.array[j] self.array[j] = temp def decrease_key(self, tup, new_d): idx = self.pos[tup[1]] self.array[idx] = (new_d, tup[1]) while idx > 0 and self.array[self.par(idx)][0] > self.array[idx][0]: self.swap(idx, self.par(idx)) idx = self.par(idx) class Graph: def __init__(self, num): self.adjList = {} self.num_nodes = num self.dist = [0] * self.num_nodes self.par = [-1] * self.num_nodes def add_edge(self, u, v, w): if u in self.adjList: self.adjList[u].append((v, w)) else: self.adjList[u] = [(v, w)] if v in self.adjList: self.adjList[v].append((u, w)) else: self.adjList[v] = [(u, w)] def show_graph(self): for u in self.adjList: print(u, "->", " -> ".join(str(f"{v}({w})") for v, w in self.adjList[u])) def dijkstra(self, src): self.par = [-1] * self.num_nodes self.dist[src] = 0 q = PriorityQueue() q.insert((0, src)) for u in self.adjList: if u != src: self.dist[u] = sys.maxsize self.par[u] = -1 while not q.is_empty(): u = q.extract_min() for v, w in self.adjList[u]: new_dist = self.dist[u] + w if self.dist[v] > new_dist: if self.dist[v] == sys.maxsize: q.insert((new_dist, v)) else: q.decrease_key((self.dist[v], v), new_dist) self.dist[v] = new_dist self.par[v] = u self.show_distances(src) def show_distances(self, src): print(f"Distance from node: {src}") for u in range(self.num_nodes): print(f"Node {u} has distance: {self.dist[u]}") def show_path(self, src, dest): path = [] cost = 0 temp = dest while self.par[temp] != -1: path.append(temp) if temp != src: for v, w in self.adjList[temp]: if v == self.par[temp]: cost += w break temp = self.par[temp] path.append(src) path.reverse() print(f"----Path to reach {dest} from {src}----") for u in path: print(f"{u}", end=" ") if u != dest: print("-> ", end="") print("\nTotal cost of path: ", cost) if __name__ == "__main__": from doctest import testmod testmod() graph = Graph(9) graph.add_edge(0, 1, 4) graph.add_edge(0, 7, 8) graph.add_edge(1, 2, 8) graph.add_edge(1, 7, 11) graph.add_edge(2, 3, 7) graph.add_edge(2, 8, 2) graph.add_edge(2, 5, 4) graph.add_edge(3, 4, 9) graph.add_edge(3, 5, 14) graph.add_edge(4, 5, 10) graph.add_edge(5, 6, 2) graph.add_edge(6, 7, 1) graph.add_edge(6, 8, 6) graph.add_edge(7, 8, 7) graph.show_graph() graph.dijkstra(0) graph.show_path(0, 4)
graph graph2 graph vertices 2 lengraph graph 2 lengraph graph0 2 graph0 printsolution doctest normalizewhitespace vertex distance from source a utility function to find the vertex with minimum distance value from the set of vertices not yet included in shortest path tree graph3 minimumdistance1 2 3 false false true 0 initialize minimum distance for next node search not nearest vertex not in the shortest path tree function that implements dijkstra s single source shortest path algorithm for a graph represented using adjacency matrix representation graph4 dijkstra1 doctest normalizewhitespace vertex distance from source 0 10000000 1 0 2 10000000 3 10000000 update dist value of the adjacent vertices of the picked vertex only if the current distance is greater than new distance and the vertex in not in the shortest path tree graph graph 2 graph vertices 2 len graph graph 2 len graph graph 0 2 graph 0 print_solution doctest normalize_whitespace vertex distance from source a utility function to find the vertex with minimum distance value from the set of vertices not yet included in shortest path tree graph 3 minimum_distance 1 2 3 false false true 0 initialize minimum distance for next node search not nearest vertex not in the shortest path tree function that implements dijkstra s single source shortest path algorithm for a graph represented using adjacency matrix representation graph 4 dijkstra 1 doctest normalize_whitespace vertex distance from source 0 10000000 1 0 2 10000000 3 10000000 distances from the source update dist value of the adjacent vertices of the picked vertex only if the current distance is greater than new distance and the vertex in not in the shortest path tree
from __future__ import annotations class Graph: def __init__(self, vertices: int) -> None: self.vertices = vertices self.graph = [[0] * vertices for _ in range(vertices)] def print_solution(self, distances_from_source: list[int]) -> None: print("Vertex \t Distance from Source") for vertex in range(self.vertices): print(vertex, "\t\t", distances_from_source[vertex]) def minimum_distance( self, distances_from_source: list[int], visited: list[bool] ) -> int: minimum = 1e7 min_index = 0 for vertex in range(self.vertices): if distances_from_source[vertex] < minimum and visited[vertex] is False: minimum = distances_from_source[vertex] min_index = vertex return min_index def dijkstra(self, source: int) -> None: distances = [int(1e7)] * self.vertices distances[source] = 0 visited = [False] * self.vertices for _ in range(self.vertices): u = self.minimum_distance(distances, visited) visited[u] = True for v in range(self.vertices): if ( self.graph[u][v] > 0 and visited[v] is False and distances[v] > distances[u] + self.graph[u][v] ): distances[v] = distances[u] + self.graph[u][v] self.print_solution(distances) if __name__ == "__main__": graph = Graph(9) graph.graph = [ [0, 4, 0, 0, 0, 0, 0, 8, 0], [4, 0, 8, 0, 0, 0, 0, 11, 0], [0, 8, 0, 7, 0, 4, 0, 0, 2], [0, 0, 7, 0, 9, 14, 0, 0, 0], [0, 0, 0, 9, 0, 10, 0, 0, 0], [0, 0, 4, 14, 10, 0, 2, 0, 0], [0, 0, 0, 0, 0, 2, 0, 1, 6], [8, 11, 0, 0, 0, 0, 1, 0, 7], [0, 0, 2, 0, 0, 0, 6, 7, 0], ] graph.dijkstra(0)
this script implements the dijkstra algorithm on a binary grid the grid consists of 0s and 1s where 1 represents a walkable node and 0 represents an obstacle the algorithm finds the shortest path from a start node to a destination node diagonal movement can be allowed or disallowed implements dijkstra s algorithm on a binary grid args grid np ndarray a 2d numpy array representing the grid 1 represents a walkable node and 0 represents an obstacle source tupleint int a tuple representing the start node destination tupleint int a tuple representing the destination node allowdiagonal bool a boolean determining whether diagonal movements are allowed returns tupleunionfloat int listtupleint int the shortest distance from the start node to the destination node and the shortest path as a list of nodes dijkstranp array1 1 1 0 1 0 0 1 1 0 0 2 2 false 4 0 0 0 0 1 1 1 2 1 2 2 dijkstranp array1 1 1 0 1 0 0 1 1 0 0 2 2 true 2 0 0 0 1 1 2 2 dijkstranp array1 1 1 0 0 1 0 1 1 0 0 2 2 false 4 0 0 0 0 1 0 2 1 2 2 2 implements dijkstra s algorithm on a binary grid args grid np ndarray a 2d numpy array representing the grid 1 represents a walkable node and 0 represents an obstacle source tuple int int a tuple representing the start node destination tuple int int a tuple representing the destination node allow_diagonal bool a boolean determining whether diagonal movements are allowed returns tuple union float int list tuple int int the shortest distance from the start node to the destination node and the shortest path as a list of nodes dijkstra np array 1 1 1 0 1 0 0 1 1 0 0 2 2 false 4 0 0 0 0 1 1 1 2 1 2 2 dijkstra np array 1 1 1 0 1 0 0 1 1 0 0 2 2 true 2 0 0 0 1 1 2 2 dijkstra np array 1 1 1 0 0 1 0 1 1 0 0 2 2 false 4 0 0 0 0 1 0 2 1 2 2 2 add the source manually
from heapq import heappop, heappush import numpy as np def dijkstra( grid: np.ndarray, source: tuple[int, int], destination: tuple[int, int], allow_diagonal: bool, ) -> tuple[float | int, list[tuple[int, int]]]: rows, cols = grid.shape dx = [-1, 1, 0, 0] dy = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] queue, visited = [(0, source)], set() matrix = np.full((rows, cols), np.inf) matrix[source] = 0 predecessors = np.empty((rows, cols), dtype=object) predecessors[source] = None while queue: (dist, (x, y)) = heappop(queue) if (x, y) in visited: continue visited.add((x, y)) if (x, y) == destination: path = [] while (x, y) != source: path.append((x, y)) x, y = predecessors[x, y] path.append(source) path.reverse() return matrix[destination], path for i in range(len(dx)): nx, ny = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: next_node = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(queue, (dist + 1, (nx, ny))) matrix[nx, ny] = dist + 1 predecessors[nx, ny] = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
here we will add our edges containing with the following parameters vertex closest to source vertex closest to sink and flow capacity through that edge this is a sample depth first search to be used at maxflow here we calculate the flow that reaches the sink example to use will be a bipartite graph than it has the vertices near the source4 and the vertices near the sink4 here we make a graphs with 10 vertexsource and sink includes now we add the vertices next to the font in the font with 1 capacity in this edge source source vertices we will do the same thing for the vertices near the sink but from vertex to sink sink vertices sink finally we add the verices near the sink to the vertices near the source source vertices sink vertices now we can know that is the maximum flowsource sink here we will add our edges containing with the following parameters vertex closest to source vertex closest to sink and flow capacity through that edge this is a sample depth first search to be used at max_flow here we calculate the flow that reaches the sink noqa e741 l 30 maybe faster for random data example to use will be a bipartite graph than it has the vertices near the source 4 and the vertices near the sink 4 here we make a graphs with 10 vertex source and sink includes now we add the vertices next to the font in the font with 1 capacity in this edge source source vertices we will do the same thing for the vertices near the sink but from vertex to sink sink vertices sink finally we add the verices near the sink to the vertices near the source source vertices sink vertices now we can know that is the maximum flow source sink
INF = float("inf") class Dinic: def __init__(self, n): self.lvl = [0] * n self.ptr = [0] * n self.q = [0] * n self.adj = [[] for _ in range(n)] def add_edge(self, a, b, c, rcap=0): self.adj[a].append([b, len(self.adj[b]), c, 0]) self.adj[b].append([a, len(self.adj[a]) - 1, rcap, 0]) def depth_first_search(self, vertex, sink, flow): if vertex == sink or not flow: return flow for i in range(self.ptr[vertex], len(self.adj[vertex])): e = self.adj[vertex][i] if self.lvl[e[0]] == self.lvl[vertex] + 1: p = self.depth_first_search(e[0], sink, min(flow, e[2] - e[3])) if p: self.adj[vertex][i][3] += p self.adj[e[0]][e[1]][3] -= p return p self.ptr[vertex] = self.ptr[vertex] + 1 return 0 def max_flow(self, source, sink): flow, self.q[0] = 0, source for l in range(31): while True: self.lvl, self.ptr = [0] * len(self.q), [0] * len(self.q) qi, qe, self.lvl[source] = 0, 1, 1 while qi < qe and not self.lvl[sink]: v = self.q[qi] qi += 1 for e in self.adj[v]: if not self.lvl[e[0]] and (e[2] - e[3]) >> (30 - l): self.q[qe] = e[0] qe += 1 self.lvl[e[0]] = self.lvl[v] + 1 p = self.depth_first_search(source, sink, INF) while p: flow += p p = self.depth_first_search(source, sink, INF) if not self.lvl[sink]: break return flow graph = Dinic(10) source = 0 sink = 9 for vertex in range(1, 5): graph.add_edge(source, vertex, 1) for vertex in range(5, 9): graph.add_edge(vertex, sink, 1) for vertex in range(1, 5): graph.add_edge(vertex, vertex + 4, 1) print(graph.max_flow(source, sink))
the default weight is 1 if not assigned but all the implementation is weighted adding vertices and edges adding the weight is optional handles repetition handles if the input does not exist if no destination is meant the default value is 1 check if there is any non isolated nodes check if all the children are visited check if se have reached the starting point c is the count of nodes you want and if you leave it or pass 1 to the function the count will be random from 10 to 10000 every vertex has max 100 edges check if there is any non isolated nodes check if all the children are visited check if se have reached the starting point check if there is any non isolated nodes check if all the children are visited check if se have reached the starting point check if there is any non isolated nodes check if all the children are visited check if se have reached the starting point adding vertices and edges adding the weight is optional handles repetition check if the u exists if there already is a edge if u does not exist add the other way if there already is a edge if u does not exist handles if the input does not exist the other way round if no destination is meant the default value is 1 check if there is any non isolated nodes check if all the children are visited check if se have reached the starting point c is the count of nodes you want and if you leave it or pass 1 to the function the count will be random from 10 to 10000 every vertex has max 100 edges check if there is any non isolated nodes check if all the children are visited check if se have reached the starting point check if there is any non isolated nodes check if all the children are visited check if se have reached the starting point the default weight is 1 if not assigned but all the implementation is weighted adding vertices and edges adding the weight is optional handles repetition handles if the input does not exist if no destination is meant the default value is 1 check if there is any non isolated nodes check if all the children are visited check if se have reached the starting point c is the count of nodes you want and if you leave it or pass 1 to the function the count will be random from 10 to 10000 every vertex has max 100 edges check if there is any non isolated nodes check if all the children are visited check if se have reached the starting point check if there is any non isolated nodes check if all the children are visited check if se have reached the starting point check if there is any non isolated nodes check if all the children are visited check if se have reached the starting point adding vertices and edges adding the weight is optional handles repetition check if the u exists if there already is a edge if u does not exist add the other way if there already is a edge if u does not exist handles if the input does not exist the other way round if no destination is meant the default value is 1 check if there is any non isolated nodes check if all the children are visited check if se have reached the starting point c is the count of nodes you want and if you leave it or pass 1 to the function the count will be random from 10 to 10000 every vertex has max 100 edges check if there is any non isolated nodes check if all the children are visited check if se have reached the starting point check if there is any non isolated nodes check if all the children are visited check if se have reached the starting point
from collections import deque from math import floor from random import random from time import time class DirectedGraph: def __init__(self): self.graph = {} def add_pair(self, u, v, w=1): if self.graph.get(u): if self.graph[u].count([w, v]) == 0: self.graph[u].append([w, v]) else: self.graph[u] = [[w, v]] if not self.graph.get(v): self.graph[v] = [] def all_nodes(self): return list(self.graph) def remove_pair(self, u, v): if self.graph.get(u): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_) def dfs(self, s=-2, d=-1): if s == d: return [] stack = [] visited = [] if s == -2: s = next(iter(self.graph)) stack.append(s) visited.append(s) ss = s while True: if len(self.graph[s]) != 0: ss = s for node in self.graph[s]: if visited.count(node[1]) < 1: if node[1] == d: visited.append(d) return visited else: stack.append(node[1]) visited.append(node[1]) ss = node[1] break if s == ss: stack.pop() if len(stack) != 0: s = stack[len(stack) - 1] else: s = ss if len(stack) == 0: return visited def fill_graph_randomly(self, c=-1): if c == -1: c = floor(random() * 10000) + 10 for i in range(c): for _ in range(floor(random() * 102) + 1): n = floor(random() * c) + 1 if n != i: self.add_pair(i, n, 1) def bfs(self, s=-2): d = deque() visited = [] if s == -2: s = next(iter(self.graph)) d.append(s) visited.append(s) while d: s = d.popleft() if len(self.graph[s]) != 0: for node in self.graph[s]: if visited.count(node[1]) < 1: d.append(node[1]) visited.append(node[1]) return visited def in_degree(self, u): count = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def out_degree(self, u): return len(self.graph[u]) def topological_sort(self, s=-2): stack = [] visited = [] if s == -2: s = next(iter(self.graph)) stack.append(s) visited.append(s) ss = s sorted_nodes = [] while True: if len(self.graph[s]) != 0: ss = s for node in self.graph[s]: if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) ss = node[1] break if s == ss: sorted_nodes.append(stack.pop()) if len(stack) != 0: s = stack[len(stack) - 1] else: s = ss if len(stack) == 0: return sorted_nodes def cycle_nodes(self): stack = [] visited = [] s = next(iter(self.graph)) stack.append(s) visited.append(s) parent = -2 indirect_parents = [] ss = s on_the_way_back = False anticipating_nodes = set() while True: if len(self.graph[s]) != 0: ss = s for node in self.graph[s]: if ( visited.count(node[1]) > 0 and node[1] != parent and indirect_parents.count(node[1]) > 0 and not on_the_way_back ): len_stack = len(stack) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1]) break else: anticipating_nodes.add(stack[len_stack]) len_stack -= 1 if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) ss = node[1] break if s == ss: stack.pop() on_the_way_back = True if len(stack) != 0: s = stack[len(stack) - 1] else: on_the_way_back = False indirect_parents.append(parent) parent = s s = ss if len(stack) == 0: return list(anticipating_nodes) def has_cycle(self): stack = [] visited = [] s = next(iter(self.graph)) stack.append(s) visited.append(s) parent = -2 indirect_parents = [] ss = s on_the_way_back = False anticipating_nodes = set() while True: if len(self.graph[s]) != 0: ss = s for node in self.graph[s]: if ( visited.count(node[1]) > 0 and node[1] != parent and indirect_parents.count(node[1]) > 0 and not on_the_way_back ): len_stack_minus_one = len(stack) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1]) break else: return True if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) ss = node[1] break if s == ss: stack.pop() on_the_way_back = True if len(stack) != 0: s = stack[len(stack) - 1] else: on_the_way_back = False indirect_parents.append(parent) parent = s s = ss if len(stack) == 0: return False def dfs_time(self, s=-2, e=-1): begin = time() self.dfs(s, e) end = time() return end - begin def bfs_time(self, s=-2): begin = time() self.bfs(s) end = time() return end - begin class Graph: def __init__(self): self.graph = {} def add_pair(self, u, v, w=1): if self.graph.get(u): if self.graph[u].count([w, v]) == 0: self.graph[u].append([w, v]) else: self.graph[u] = [[w, v]] if self.graph.get(v): if self.graph[v].count([w, u]) == 0: self.graph[v].append([w, u]) else: self.graph[v] = [[w, u]] def remove_pair(self, u, v): if self.graph.get(u): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_) if self.graph.get(v): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(_) def dfs(self, s=-2, d=-1): if s == d: return [] stack = [] visited = [] if s == -2: s = next(iter(self.graph)) stack.append(s) visited.append(s) ss = s while True: if len(self.graph[s]) != 0: ss = s for node in self.graph[s]: if visited.count(node[1]) < 1: if node[1] == d: visited.append(d) return visited else: stack.append(node[1]) visited.append(node[1]) ss = node[1] break if s == ss: stack.pop() if len(stack) != 0: s = stack[len(stack) - 1] else: s = ss if len(stack) == 0: return visited def fill_graph_randomly(self, c=-1): if c == -1: c = floor(random() * 10000) + 10 for i in range(c): for _ in range(floor(random() * 102) + 1): n = floor(random() * c) + 1 if n != i: self.add_pair(i, n, 1) def bfs(self, s=-2): d = deque() visited = [] if s == -2: s = next(iter(self.graph)) d.append(s) visited.append(s) while d: s = d.popleft() if len(self.graph[s]) != 0: for node in self.graph[s]: if visited.count(node[1]) < 1: d.append(node[1]) visited.append(node[1]) return visited def degree(self, u): return len(self.graph[u]) def cycle_nodes(self): stack = [] visited = [] s = next(iter(self.graph)) stack.append(s) visited.append(s) parent = -2 indirect_parents = [] ss = s on_the_way_back = False anticipating_nodes = set() while True: if len(self.graph[s]) != 0: ss = s for node in self.graph[s]: if ( visited.count(node[1]) > 0 and node[1] != parent and indirect_parents.count(node[1]) > 0 and not on_the_way_back ): len_stack = len(stack) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1]) break else: anticipating_nodes.add(stack[len_stack]) len_stack -= 1 if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) ss = node[1] break if s == ss: stack.pop() on_the_way_back = True if len(stack) != 0: s = stack[len(stack) - 1] else: on_the_way_back = False indirect_parents.append(parent) parent = s s = ss if len(stack) == 0: return list(anticipating_nodes) def has_cycle(self): stack = [] visited = [] s = next(iter(self.graph)) stack.append(s) visited.append(s) parent = -2 indirect_parents = [] ss = s on_the_way_back = False anticipating_nodes = set() while True: if len(self.graph[s]) != 0: ss = s for node in self.graph[s]: if ( visited.count(node[1]) > 0 and node[1] != parent and indirect_parents.count(node[1]) > 0 and not on_the_way_back ): len_stack_minus_one = len(stack) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1]) break else: return True if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) ss = node[1] break if s == ss: stack.pop() on_the_way_back = True if len(stack) != 0: s = stack[len(stack) - 1] else: on_the_way_back = False indirect_parents.append(parent) parent = s s = ss if len(stack) == 0: return False def all_nodes(self): return list(self.graph) def dfs_time(self, s=-2, e=-1): begin = time() self.dfs(s, e) end = time() return end - begin def bfs_time(self, s=-2): begin = time() self.bfs(s) end = time() return end - begin
make only one source and one sink make fake vertex if there are more than one source or sink it s just a reference so you shouldn t change it in your algorithms use deep copy before doing that you should override it use this to save your result push some substance to graph relabeltofront selection rule move through list if it was relabeled swap elements and start from 0 index if it s neighbour and current vertex is higher graph 0 0 4 6 0 0 0 0 5 2 0 0 0 0 0 0 4 4 0 0 0 0 6 6 0 0 0 0 0 0 0 0 0 0 0 0 prepare our network set algorithm and calculate make only one source and one sink make fake vertex if there are more than one source or sink it s just a reference so you shouldn t change it in your algorithms use deep copy before doing that you should override it use this to save your result push some substance to graph relabel to front selection rule move through list if it was relabeled swap elements and start from 0 index if it s neighbour and current vertex is higher graph 0 0 4 6 0 0 0 0 5 2 0 0 0 0 0 0 4 4 0 0 0 0 6 6 0 0 0 0 0 0 0 0 0 0 0 0 prepare our network set algorithm and calculate
class FlowNetwork: def __init__(self, graph, sources, sinks): self.source_index = None self.sink_index = None self.graph = graph self._normalize_graph(sources, sinks) self.vertices_count = len(graph) self.maximum_flow_algorithm = None def _normalize_graph(self, sources, sinks): if sources is int: sources = [sources] if sinks is int: sinks = [sinks] if len(sources) == 0 or len(sinks) == 0: return self.source_index = sources[0] self.sink_index = sinks[0] if len(sources) > 1 or len(sinks) > 1: max_input_flow = 0 for i in sources: max_input_flow += sum(self.graph[i]) size = len(self.graph) + 1 for room in self.graph: room.insert(0, 0) self.graph.insert(0, [0] * size) for i in sources: self.graph[0][i + 1] = max_input_flow self.source_index = 0 size = len(self.graph) + 1 for room in self.graph: room.append(0) self.graph.append([0] * size) for i in sinks: self.graph[i + 1][size - 1] = max_input_flow self.sink_index = size - 1 def find_maximum_flow(self): if self.maximum_flow_algorithm is None: raise Exception("You need to set maximum flow algorithm before.") if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def set_maximum_flow_algorithm(self, algorithm): self.maximum_flow_algorithm = algorithm(self) class FlowNetworkAlgorithmExecutor: def __init__(self, flow_network): self.flow_network = flow_network self.verticies_count = flow_network.verticesCount self.source_index = flow_network.sourceIndex self.sink_index = flow_network.sinkIndex self.graph = flow_network.graph self.executed = False def execute(self): if not self.executed: self._algorithm() self.executed = True def _algorithm(self): pass class MaximumFlowAlgorithmExecutor(FlowNetworkAlgorithmExecutor): def __init__(self, flow_network): super().__init__(flow_network) self.maximum_flow = -1 def get_maximum_flow(self): if not self.executed: raise Exception("You should execute algorithm before using its result!") return self.maximum_flow class PushRelabelExecutor(MaximumFlowAlgorithmExecutor): def __init__(self, flow_network): super().__init__(flow_network) self.preflow = [[0] * self.verticies_count for i in range(self.verticies_count)] self.heights = [0] * self.verticies_count self.excesses = [0] * self.verticies_count def _algorithm(self): self.heights[self.source_index] = self.verticies_count for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index]): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth vertices_list = [ i for i in range(self.verticies_count) if i not in {self.source_index, self.sink_index} ] i = 0 while i < len(vertices_list): vertex_index = vertices_list[i] previous_height = self.heights[vertex_index] self.process_vertex(vertex_index) if self.heights[vertex_index] > previous_height: vertices_list.insert(0, vertices_list.pop(i)) i = 0 else: i += 1 self.maximum_flow = sum(self.preflow[self.source_index]) def process_vertex(self, vertex_index): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count): if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(vertex_index, neighbour_index) self.relabel(vertex_index) def push(self, from_index, to_index): preflow_delta = min( self.excesses[from_index], self.graph[from_index][to_index] - self.preflow[from_index][to_index], ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def relabel(self, vertex_index): min_height = None for to_index in range(self.verticies_count): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): min_height = self.heights[to_index] if min_height is not None: self.heights[vertex_index] = min_height + 1 if __name__ == "__main__": entrances = [0] exits = [3] graph = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] flow_network = FlowNetwork(graph, entrances, exits) flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) maximum_flow = flow_network.find_maximum_flow() print(f"maximum flow is {maximum_flow}")
eulerian path is a path in graph that visits every edge exactly once eulerian circuit is an eulerian path which starts and ends on the same vertex time complexity is ove space complexity is ove using dfs for finding eulerian path traversal for checking in graph has euler path or circuit all degree is zero eulerian path is a path in graph that visits every edge exactly once eulerian circuit is an eulerian path which starts and ends on the same vertex time complexity is o v e space complexity is o ve using dfs for finding eulerian path traversal for checking in graph has euler path or circuit all degree is zero
def dfs(u, graph, visited_edge, path=None): path = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: visited_edge[u][v], visited_edge[v][u] = True, True path = dfs(v, graph, visited_edge, path) return path def check_circuit_or_path(graph, max_node): odd_degree_nodes = 0 odd_node = -1 for i in range(max_node): if i not in graph: continue if len(graph[i]) % 2 == 1: odd_degree_nodes += 1 odd_node = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def check_euler(graph, max_node): visited_edge = [[False for _ in range(max_node + 1)] for _ in range(max_node + 1)] check, odd_node = check_circuit_or_path(graph, max_node) if check == 3: print("graph is not Eulerian") print("no path") return start_node = 1 if check == 2: start_node = odd_node print("graph has a Euler path") if check == 1: print("graph has a Euler cycle") path = dfs(start_node, graph, visited_edge) print(path) def main(): g1 = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} g2 = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} g3 = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} g4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]} g5 = { 1: [], 2: [] } max_node = 10 check_euler(g1, max_node) check_euler(g2, max_node) check_euler(g3, max_node) check_euler(g4, max_node) check_euler(g5, max_node) if __name__ == "__main__": main()
you are given a treea simple connected graph with no cycles the tree has n nodes numbered from 1 to n and is rooted at node 1 find the maximum number of edges you can remove from the tree to get a forest such that each connected component of the forest contains an even number of nodes constraints 2 2 100 note the tree input will be such that it can always be decomposed into components containing an even number of nodes pylint disableinvalidname dfs traversal pylint disableredefinedoutername ret 1 visitedstart true for v in treestart if v not in visited ret dfsv if ret 2 0 cuts appendstart return ret def eventree dfs1 if name main n m 10 9 tree defaultdictlist visited dictint bool cuts listint count 0 edges 2 1 3 1 4 3 5 2 6 1 7 2 8 6 9 8 10 8 for u v in edges treeu appendv treev appendu eventree printlencuts 1 pylint disable invalid name dfs traversal pylint disable redefined outer name 2 1 3 1 4 3 5 2 6 1 7 2 8 6 9 8 10 8 on removing edges 1 3 and 1 6 we can get the desired result 2
from collections import defaultdict def dfs(start: int) -> int: ret = 1 visited[start] = True for v in tree[start]: if v not in visited: ret += dfs(v) if ret % 2 == 0: cuts.append(start) return ret def even_tree(): dfs(1) if __name__ == "__main__": n, m = 10, 9 tree = defaultdict(list) visited: dict[int, bool] = {} cuts: list[int] = [] count = 0 edges = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
an edge is a bridge if after removing it count of connected components in graph will be increased by one bridges represent vulnerabilities in a connected network and are useful for designing reliable networks for example in a wired computer network an articulation point indicates the critical computers and a bridge indicates the critical wires or connections for more details refer this article https www geeksforgeeks orgbridgeinagraph return the list of undirected graph bridges a1 b1 ak bk ai bi computebridgesgetdemograph0 3 4 2 3 2 5 computebridgesgetdemograph1 6 7 0 6 1 9 3 4 2 4 2 5 computebridgesgetdemograph2 1 6 4 6 0 4 computebridgesgetdemograph3 computebridges this edge is a back edge and cannot be a bridge return the list of undirected graph bridges a1 b1 ak bk ai bi compute_bridges __get_demo_graph 0 3 4 2 3 2 5 compute_bridges __get_demo_graph 1 6 7 0 6 1 9 3 4 2 4 2 5 compute_bridges __get_demo_graph 2 1 6 4 6 0 4 compute_bridges __get_demo_graph 3 compute_bridges no of vertices in graph this edge is a back edge and cannot be a bridge
def __get_demo_graph(index): return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def compute_bridges(graph: dict[int, list[int]]) -> list[tuple[int, int]]: id_ = 0 n = len(graph) low = [0] * n visited = [False] * n def dfs(at, parent, bridges, id_): visited[at] = True low[at] = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(to, at, bridges, id_) low[at] = min(low[at], low[to]) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at)) else: low[at] = min(low[at], low[to]) bridges: list[tuple[int, int]] = [] for i in range(n): if not visited[i]: dfs(i, -1, bridges, id_) return bridges if __name__ == "__main__": import doctest doctest.testmod()
fpgraphminer a fast frequent pattern mining algorithm for network graphs a novel frequent pattern graph mining algorithm fpgraphminer that compactly represents a set of network graphs as a frequent pattern graph or fpgraph this graph can be used to efficiently mine frequent subgraphs including maximal frequent subgraphs and maximum common subgraphs url https www researchgate netpublication235255851 fmt off fmt on return distinct edges from edge array of multiple graphs sortedgetdistinctedgeedgearray a b c d e f g h return bitcode of distinctedge returns frequency table print bit bit bt joinbit store distinct edge wtbitcode bitcode in descending order returns nodes format nodesbitcode edges that represent the bitcode getnodes ab 5 11111 ac 5 11111 df 5 11111 bd 5 11111 bc 5 11111 11111 ab ac df bd bc returns cluster format cluster wtbitcode nodes with same wt returns support getsupport5 11111 ab ac df bd bc 4 11101 ef eg de fg 11011 cd 3 11001 ad 10101 dg 2 10010 dh bh 11000 be 10100 gh 10001 ce 1 00100 fh eh 10000 hi 100 0 80 0 60 0 40 0 20 0 create edge between the nodes creates edge only if the condition satisfies find different dfs walk from given node to header node find edges of multiple frequent subgraphs returns edge list for frequent subgraphs preprocess the edge array preprocess abe1 ace3 ade5 bce4 bde2 bee6 bhe12 cde2 cee4 dee1 dfe8 dge5 dhe10 efe3 ege2 fge6 ghe6 hie3 fmt off fmt on return distinct edges from edge array of multiple graphs sorted get_distinct_edge edge_array a b c d e f g h return bitcode of distinct_edge returns frequency table print bit bit bt join bit store distinct edge wt bitcode bitcode in descending order returns nodes format nodes bitcode edges that represent the bitcode get_nodes ab 5 11111 ac 5 11111 df 5 11111 bd 5 11111 bc 5 11111 11111 ab ac df bd bc returns cluster format cluster wt bitcode nodes with same wt returns support get_support 5 11111 ab ac df bd bc 4 11101 ef eg de fg 11011 cd 3 11001 ad 10101 dg 2 10010 dh bh 11000 be 10100 gh 10001 ce 1 00100 fh eh 10000 hi 100 0 80 0 60 0 40 0 20 0 create edge between the nodes creates edge only if the condition satisfies find different dfs walk from given node to header node find edges of multiple frequent subgraphs returns edge list for frequent subgraphs preprocess the edge array preprocess ab e1 ac e3 ad e5 bc e4 bd e2 be e6 bh e12 cd e2 ce e4 de e1 df e8 dg e5 dh e10 ef e3 eg e2 fg e6 gh e6 hi e3
edge_array = [ ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'bh-e12', 'cd-e2', 'ce-e4', 'de-e1', 'df-e8', 'dg-e5', 'dh-e10', 'ef-e3', 'eg-e2', 'fg-e6', 'gh-e6', 'hi-e3'], ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'be-e6', 'cd-e2', 'de-e1', 'df-e8', 'ef-e3', 'eg-e2', 'fg-e6'], ['ab-e1', 'ac-e3', 'bc-e4', 'bd-e2', 'de-e1', 'df-e8', 'dg-e5', 'ef-e3', 'eg-e2', 'eh-e12', 'fg-e6', 'fh-e10', 'gh-e6'], ['ab-e1', 'ac-e3', 'bc-e4', 'bd-e2', 'bh-e12', 'cd-e2', 'df-e8', 'dh-e10'], ['ab-e1', 'ac-e3', 'ad-e5', 'bc-e4', 'bd-e2', 'cd-e2', 'ce-e4', 'de-e1', 'df-e8', 'dg-e5', 'ef-e3', 'eg-e2', 'fg-e6'] ] def get_distinct_edge(edge_array): distinct_edge = set() for row in edge_array: for item in row: distinct_edge.add(item[0]) return list(distinct_edge) def get_bitcode(edge_array, distinct_edge): bitcode = ["0"] * len(edge_array) for i, row in enumerate(edge_array): for item in row: if distinct_edge in item[0]: bitcode[i] = "1" break return "".join(bitcode) def get_frequency_table(edge_array): distinct_edge = get_distinct_edge(edge_array) frequency_table = {} for item in distinct_edge: bit = get_bitcode(edge_array, item) s = bit.count("1") frequency_table[item] = [s, bit] sorted_frequency_table = [ [k, v[0], v[1]] for k, v in sorted(frequency_table.items(), key=lambda v: v[1][0], reverse=True) ] return sorted_frequency_table def get_nodes(frequency_table): nodes = {} for _, item in enumerate(frequency_table): nodes.setdefault(item[2], []).append(item[0]) return nodes def get_cluster(nodes): cluster = {} for key, value in nodes.items(): cluster.setdefault(key.count("1"), {})[key] = value return cluster def get_support(cluster): return [i * 100 / len(cluster) for i in cluster] def print_all() -> None: print("\nNodes\n") for key, value in nodes.items(): print(key, value) print("\nSupport\n") print(support) print("\n Cluster \n") for key, value in sorted(cluster.items(), reverse=True): print(key, value) print("\n Graph\n") for key, value in graph.items(): print(key, value) print("\n Edge List of Frequent subgraphs \n") for edge_list in freq_subgraph_edge_list: print(edge_list) def create_edge(nodes, graph, cluster, c1): for i in cluster[c1]: count = 0 c2 = c1 + 1 while c2 < max(cluster.keys()): for j in cluster[c2]: if int(i, 2) & int(j, 2) == int(i, 2): if tuple(nodes[i]) in graph: graph[tuple(nodes[i])].append(nodes[j]) else: graph[tuple(nodes[i])] = [nodes[j]] count += 1 if count == 0: c2 = c2 + 1 else: break def construct_graph(cluster, nodes): x = cluster[max(cluster.keys())] cluster[max(cluster.keys()) + 1] = "Header" graph = {} for i in x: if (["Header"],) in graph: graph[(["Header"],)].append(x[i]) else: graph[(["Header"],)] = [x[i]] for i in x: graph[(x[i],)] = [["Header"]] i = 1 while i < max(cluster) - 1: create_edge(nodes, graph, cluster, i) i = i + 1 return graph def my_dfs(graph, start, end, path=None): path = (path or []) + [start] if start == end: paths.append(path) for node in graph[start]: if tuple(node) not in path: my_dfs(graph, tuple(node), end, path) def find_freq_subgraph_given_support(s, cluster, graph): k = int(s / 100 * (len(cluster) - 1)) for i in cluster[k]: my_dfs(graph, tuple(cluster[k][i]), (["Header"],)) def freq_subgraphs_edge_list(paths): freq_sub_el = [] for edges in paths: el = [] for j in range(len(edges) - 1): temp = list(edges[j]) for e in temp: edge = (e[0], e[1]) el.append(edge) freq_sub_el.append(el) return freq_sub_el def preprocess(edge_array): for i in range(len(edge_array)): for j in range(len(edge_array[i])): t = edge_array[i][j].split("-") edge_array[i][j] = t if __name__ == "__main__": preprocess(edge_array) frequency_table = get_frequency_table(edge_array) nodes = get_nodes(frequency_table) cluster = get_cluster(nodes) support = get_support(cluster) graph = construct_graph(cluster, nodes) find_freq_subgraph_given_support(60, cluster, graph) paths: list = [] freq_subgraph_edge_list = freq_subgraphs_edge_list(paths) print_all()
phyllipe bezerra https github compmba phyllipe bezerra https github com pmba
clothes = { 0: "underwear", 1: "pants", 2: "belt", 3: "suit", 4: "shoe", 5: "socks", 6: "shirt", 7: "tie", 8: "watch", } graph = [[1, 4], [2, 4], [3], [], [], [4], [2, 7], [3], []] visited = [0 for x in range(len(graph))] stack = [] def print_stack(stack, clothes): order = 1 while stack: current_clothing = stack.pop() print(order, clothes[current_clothing]) order += 1 def depth_first_search(u, visited, graph): visited[u] = 1 for v in graph[u]: if not visited[v]: depth_first_search(v, visited, graph) stack.append(u) def topological_sort(graph, visited): for v in range(len(graph)): if not visited[v]: depth_first_search(v, visited, graph) if __name__ == "__main__": topological_sort(graph, visited) print(stack) print_stack(stack, clothes)
finds the stable match in any bipartite graph i e a pairing where no 2 objects prefer each other over their partner the function accepts the preferences of oegan donors and recipients where both are assigned numbers from 0 to n1 and returns a list where the index position corresponds to the donor and value at the index is the organ recipient to better understand the algorithm see also https github comakashvshroffgaleshapleystablematching readme https www youtube comwatch vqcv1iqhwazgt13s numberphile youtube donorpref 0 1 3 2 0 2 3 1 1 0 2 3 0 3 1 2 recipientpref 3 1 2 0 3 1 0 2 0 3 1 2 1 0 3 2 stablematchingdonorpref recipientpref 1 2 3 0 finds the stable match in any bipartite graph i e a pairing where no 2 objects prefer each other over their partner the function accepts the preferences of oegan donors and recipients where both are assigned numbers from 0 to n 1 and returns a list where the index position corresponds to the donor and value at the index is the organ recipient to better understand the algorithm see also https github com akashvshroff gale_shapley_stable_matching readme https www youtube com watch v qcv1iqhwazg t 13s numberphile youtube donor_pref 0 1 3 2 0 2 3 1 1 0 2 3 0 3 1 2 recipient_pref 3 1 2 0 3 1 0 2 0 3 1 2 1 0 3 2 stable_matching donor_pref recipient_pref 1 2 3 0 who the donor has donated to who the recipient has received from
from __future__ import annotations def stable_matching( donor_pref: list[list[int]], recipient_pref: list[list[int]] ) -> list[int]: assert len(donor_pref) == len(recipient_pref) n = len(donor_pref) unmatched_donors = list(range(n)) donor_record = [-1] * n rec_record = [-1] * n num_donations = [0] * n while unmatched_donors: donor = unmatched_donors[0] donor_preference = donor_pref[donor] recipient = donor_preference[num_donations[donor]] num_donations[donor] += 1 rec_preference = recipient_pref[recipient] prev_donor = rec_record[recipient] if prev_donor != -1: if rec_preference.index(prev_donor) > rec_preference.index(donor): rec_record[recipient] = donor donor_record[donor] = recipient unmatched_donors.append(prev_donor) unmatched_donors.remove(donor) else: rec_record[recipient] = donor donor_record[donor] = recipient unmatched_donors.remove(donor) return donor_record
usrbinenv python3 vikram nithyanandam description the following implementation is a robust unweighted graph data structure implemented using an adjacency list this vertices and edges of this graph can be effectively initialized and modified while storing your chosen generic value in each vertex adjacency list https en wikipedia orgwikiadjacencylist potential future ideas add a flag to set edge weights on and set edge weights make edge weights and vertex values customizable to store whatever the client wants support multigraph functionality if the client wants it parameters vertices listt the list of vertex names the client wants to pass in default is empty edges listlistt the list of edges the client wants to pass in each edge is a 2element list default is empty directed bool indicates if graph is directed or undirected default is true falsey checks adds a vertex to the graph if the given vertex already exists a valueerror will be thrown creates an edge from source vertex to destination vertex if any given vertex doesn t exist or the edge already exists a valueerror will be thrown add the destination vertex to the list associated with the source vertex and vice versa if not directed removes the given vertex from the graph and deletes all incoming and outgoing edges from the given vertex as well if the given vertex does not exist a valueerror will be thrown if not directed find all neighboring vertices and delete all references of edges connecting to the given vertex if directed search all neighbors of all vertices and delete all references of edges connecting to the given vertex finally delete the given vertex and all of its outgoing edge references removes the edge between the two vertices if any given vertex doesn t exist or the edge does not exist a valueerror will be thrown remove the destination vertex from the list associated with the source vertex and vice versa if not directed returns true if the graph contains the vertex false otherwise returns true if the graph contains the edge from the sourcevertex to the destinationvertex false otherwise if any given vertex doesn t exist a valueerror will be thrown clears all vertices and edges generate graph input build graphs test graph initialization with vertices and edges build graphs without edges test containsvertex build empty graphs run addvertex test addvertex worked build graphs without edges test removevertex worked build graphs without edges test adding and removing vertices remove all vertices generate graphs and graph input generate all possible edges for testing test containsedge function since this edge exists for undirected but the reverse may not exist for directed generate graph input build graphs without edges run and test addedge generate graph input and graphs run and test removeedge make some more edge options usr bin env python3 vikram nithyanandam description the following implementation is a robust unweighted graph data structure implemented using an adjacency list this vertices and edges of this graph can be effectively initialized and modified while storing your chosen generic value in each vertex adjacency list https en wikipedia org wiki adjacency_list potential future ideas add a flag to set edge weights on and set edge weights make edge weights and vertex values customizable to store whatever the client wants support multigraph functionality if the client wants it parameters vertices list t the list of vertex names the client wants to pass in default is empty edges list list t the list of edges the client wants to pass in each edge is a 2 element list default is empty directed bool indicates if graph is directed or undirected default is true dictionary of lists of t falsey checks adds a vertex to the graph if the given vertex already exists a valueerror will be thrown creates an edge from source vertex to destination vertex if any given vertex doesn t exist or the edge already exists a valueerror will be thrown add the destination vertex to the list associated with the source vertex and vice versa if not directed removes the given vertex from the graph and deletes all incoming and outgoing edges from the given vertex as well if the given vertex does not exist a valueerror will be thrown if not directed find all neighboring vertices and delete all references of edges connecting to the given vertex if directed search all neighbors of all vertices and delete all references of edges connecting to the given vertex finally delete the given vertex and all of its outgoing edge references removes the edge between the two vertices if any given vertex doesn t exist or the edge does not exist a valueerror will be thrown remove the destination vertex from the list associated with the source vertex and vice versa if not directed returns true if the graph contains the vertex false otherwise returns true if the graph contains the edge from the source_vertex to the destination_vertex false otherwise if any given vertex doesn t exist a valueerror will be thrown clears all vertices and edges generate graph input build graphs test graph initialization with vertices and edges build graphs without edges test contains_vertex build empty graphs run add_vertex test add_vertex worked build graphs without edges test remove_vertex worked build graphs without edges test adding and removing vertices remove all vertices generate graphs and graph input generate all possible edges for testing test contains_edge function since this edge exists for undirected but the reverse may not exist for directed generate graph input build graphs without edges run and test add_edge generate graph input and graphs run and test remove_edge make some more edge options
from __future__ import annotations import random import unittest from pprint import pformat from typing import Generic, TypeVar import pytest T = TypeVar("T") class GraphAdjacencyList(Generic[T]): def __init__( self, vertices: list[T], edges: list[list[T]], directed: bool = True ) -> None: self.adj_list: dict[T, list[T]] = {} self.directed = directed edges = edges or [] vertices = vertices or [] for vertex in vertices: self.add_vertex(vertex) for edge in edges: if len(edge) != 2: msg = f"Invalid input: {edge} is the wrong length." raise ValueError(msg) self.add_edge(edge[0], edge[1]) def add_vertex(self, vertex: T) -> None: if self.contains_vertex(vertex): msg = f"Incorrect input: {vertex} is already in the graph." raise ValueError(msg) self.adj_list[vertex] = [] def add_edge(self, source_vertex: T, destination_vertex: T) -> None: if not ( self.contains_vertex(source_vertex) and self.contains_vertex(destination_vertex) ): msg = ( f"Incorrect input: Either {source_vertex} or " f"{destination_vertex} does not exist" ) raise ValueError(msg) if self.contains_edge(source_vertex, destination_vertex): msg = ( "Incorrect input: The edge already exists between " f"{source_vertex} and {destination_vertex}" ) raise ValueError(msg) self.adj_list[source_vertex].append(destination_vertex) if not self.directed: self.adj_list[destination_vertex].append(source_vertex) def remove_vertex(self, vertex: T) -> None: if not self.contains_vertex(vertex): msg = f"Incorrect input: {vertex} does not exist in this graph." raise ValueError(msg) if not self.directed: for neighbor in self.adj_list[vertex]: self.adj_list[neighbor].remove(vertex) else: for edge_list in self.adj_list.values(): if vertex in edge_list: edge_list.remove(vertex) self.adj_list.pop(vertex) def remove_edge(self, source_vertex: T, destination_vertex: T) -> None: if not ( self.contains_vertex(source_vertex) and self.contains_vertex(destination_vertex) ): msg = ( f"Incorrect input: Either {source_vertex} or " f"{destination_vertex} does not exist" ) raise ValueError(msg) if not self.contains_edge(source_vertex, destination_vertex): msg = ( "Incorrect input: The edge does NOT exist between " f"{source_vertex} and {destination_vertex}" ) raise ValueError(msg) self.adj_list[source_vertex].remove(destination_vertex) if not self.directed: self.adj_list[destination_vertex].remove(source_vertex) def contains_vertex(self, vertex: T) -> bool: return vertex in self.adj_list def contains_edge(self, source_vertex: T, destination_vertex: T) -> bool: if not ( self.contains_vertex(source_vertex) and self.contains_vertex(destination_vertex) ): msg = ( f"Incorrect input: Either {source_vertex} " f"or {destination_vertex} does not exist." ) raise ValueError(msg) return destination_vertex in self.adj_list[source_vertex] def clear_graph(self) -> None: self.adj_list = {} def __repr__(self) -> str: return pformat(self.adj_list) class TestGraphAdjacencyList(unittest.TestCase): def __assert_graph_edge_exists_check( self, undirected_graph: GraphAdjacencyList, directed_graph: GraphAdjacencyList, edge: list[int], ) -> None: assert undirected_graph.contains_edge(edge[0], edge[1]) assert undirected_graph.contains_edge(edge[1], edge[0]) assert directed_graph.contains_edge(edge[0], edge[1]) def __assert_graph_edge_does_not_exist_check( self, undirected_graph: GraphAdjacencyList, directed_graph: GraphAdjacencyList, edge: list[int], ) -> None: assert not undirected_graph.contains_edge(edge[0], edge[1]) assert not undirected_graph.contains_edge(edge[1], edge[0]) assert not directed_graph.contains_edge(edge[0], edge[1]) def __assert_graph_vertex_exists_check( self, undirected_graph: GraphAdjacencyList, directed_graph: GraphAdjacencyList, vertex: int, ) -> None: assert undirected_graph.contains_vertex(vertex) assert directed_graph.contains_vertex(vertex) def __assert_graph_vertex_does_not_exist_check( self, undirected_graph: GraphAdjacencyList, directed_graph: GraphAdjacencyList, vertex: int, ) -> None: assert not undirected_graph.contains_vertex(vertex) assert not directed_graph.contains_vertex(vertex) def __generate_random_edges( self, vertices: list[int], edge_pick_count: int ) -> list[list[int]]: assert edge_pick_count <= len(vertices) random_source_vertices: list[int] = random.sample( vertices[0 : int(len(vertices) / 2)], edge_pick_count ) random_destination_vertices: list[int] = random.sample( vertices[int(len(vertices) / 2) :], edge_pick_count ) random_edges: list[list[int]] = [] for source in random_source_vertices: for dest in random_destination_vertices: random_edges.append([source, dest]) return random_edges def __generate_graphs( self, vertex_count: int, min_val: int, max_val: int, edge_pick_count: int ) -> tuple[GraphAdjacencyList, GraphAdjacencyList, list[int], list[list[int]]]: if max_val - min_val + 1 < vertex_count: raise ValueError( "Will result in duplicate vertices. Either increase range " "between min_val and max_val or decrease vertex count." ) random_vertices: list[int] = random.sample( range(min_val, max_val + 1), vertex_count ) random_edges: list[list[int]] = self.__generate_random_edges( random_vertices, edge_pick_count ) undirected_graph = GraphAdjacencyList( vertices=random_vertices, edges=random_edges, directed=False ) directed_graph = GraphAdjacencyList( vertices=random_vertices, edges=random_edges, directed=True ) return undirected_graph, directed_graph, random_vertices, random_edges def test_init_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for num in random_vertices: self.__assert_graph_vertex_exists_check( undirected_graph, directed_graph, num ) for edge in random_edges: self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) assert not undirected_graph.directed assert directed_graph.directed def test_contains_vertex(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) undirected_graph = GraphAdjacencyList( vertices=random_vertices, edges=[], directed=False ) directed_graph = GraphAdjacencyList( vertices=random_vertices, edges=[], directed=True ) for num in range(101): assert (num in random_vertices) == undirected_graph.contains_vertex(num) assert (num in random_vertices) == directed_graph.contains_vertex(num) def test_add_vertices(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) undirected_graph: GraphAdjacencyList = GraphAdjacencyList( vertices=[], edges=[], directed=False ) directed_graph: GraphAdjacencyList = GraphAdjacencyList( vertices=[], edges=[], directed=True ) for num in random_vertices: undirected_graph.add_vertex(num) for num in random_vertices: directed_graph.add_vertex(num) for num in random_vertices: self.__assert_graph_vertex_exists_check( undirected_graph, directed_graph, num ) def test_remove_vertices(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) undirected_graph = GraphAdjacencyList( vertices=random_vertices, edges=[], directed=False ) directed_graph = GraphAdjacencyList( vertices=random_vertices, edges=[], directed=True ) for num in random_vertices: self.__assert_graph_vertex_exists_check( undirected_graph, directed_graph, num ) undirected_graph.remove_vertex(num) directed_graph.remove_vertex(num) self.__assert_graph_vertex_does_not_exist_check( undirected_graph, directed_graph, num ) def test_add_and_remove_vertices_repeatedly(self) -> None: random_vertices1: list[int] = random.sample(range(51), 20) random_vertices2: list[int] = random.sample(range(51, 101), 20) undirected_graph = GraphAdjacencyList( vertices=random_vertices1, edges=[], directed=False ) directed_graph = GraphAdjacencyList( vertices=random_vertices1, edges=[], directed=True ) for i, _ in enumerate(random_vertices1): undirected_graph.add_vertex(random_vertices2[i]) directed_graph.add_vertex(random_vertices2[i]) self.__assert_graph_vertex_exists_check( undirected_graph, directed_graph, random_vertices2[i] ) undirected_graph.remove_vertex(random_vertices1[i]) directed_graph.remove_vertex(random_vertices1[i]) self.__assert_graph_vertex_does_not_exist_check( undirected_graph, directed_graph, random_vertices1[i] ) for i, _ in enumerate(random_vertices1): undirected_graph.remove_vertex(random_vertices2[i]) directed_graph.remove_vertex(random_vertices2[i]) self.__assert_graph_vertex_does_not_exist_check( undirected_graph, directed_graph, random_vertices2[i] ) def test_contains_edge(self) -> None: vertex_count = 20 ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(vertex_count, 0, 100, 4) all_possible_edges: list[list[int]] = [] for i in range(vertex_count - 1): for j in range(i + 1, vertex_count): all_possible_edges.append([random_vertices[i], random_vertices[j]]) all_possible_edges.append([random_vertices[j], random_vertices[i]]) for edge in all_possible_edges: if edge in random_edges: self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) elif [edge[1], edge[0]] in random_edges: self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, [edge[1], edge[0]] ) else: self.__assert_graph_edge_does_not_exist_check( undirected_graph, directed_graph, edge ) def test_add_edge(self) -> None: random_vertices: list[int] = random.sample(range(101), 15) random_edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) undirected_graph = GraphAdjacencyList( vertices=random_vertices, edges=[], directed=False ) directed_graph = GraphAdjacencyList( vertices=random_vertices, edges=[], directed=True ) for edge in random_edges: undirected_graph.add_edge(edge[0], edge[1]) directed_graph.add_edge(edge[0], edge[1]) self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) def test_remove_edge(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for edge in random_edges: self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) undirected_graph.remove_edge(edge[0], edge[1]) directed_graph.remove_edge(edge[0], edge[1]) self.__assert_graph_edge_does_not_exist_check( undirected_graph, directed_graph, edge ) def test_add_and_remove_edges_repeatedly(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) more_random_edges: list[list[int]] = [] while len(more_random_edges) != len(random_edges): edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) for edge in edges: if len(more_random_edges) == len(random_edges): break elif edge not in more_random_edges and edge not in random_edges: more_random_edges.append(edge) for i, _ in enumerate(random_edges): undirected_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) directed_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, more_random_edges[i] ) undirected_graph.remove_edge(random_edges[i][0], random_edges[i][1]) directed_graph.remove_edge(random_edges[i][0], random_edges[i][1]) self.__assert_graph_edge_does_not_exist_check( undirected_graph, directed_graph, random_edges[i] ) def test_add_vertex_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: with pytest.raises(ValueError): undirected_graph.add_vertex(vertex) with pytest.raises(ValueError): directed_graph.add_vertex(vertex) def test_remove_vertex_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for i in range(101): if i not in random_vertices: with pytest.raises(ValueError): undirected_graph.remove_vertex(i) with pytest.raises(ValueError): directed_graph.remove_vertex(i) def test_add_edge_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for edge in random_edges: with pytest.raises(ValueError): undirected_graph.add_edge(edge[0], edge[1]) with pytest.raises(ValueError): directed_graph.add_edge(edge[0], edge[1]) def test_remove_edge_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) more_random_edges: list[list[int]] = [] while len(more_random_edges) != len(random_edges): edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) for edge in edges: if len(more_random_edges) == len(random_edges): break elif edge not in more_random_edges and edge not in random_edges: more_random_edges.append(edge) for edge in more_random_edges: with pytest.raises(ValueError): undirected_graph.remove_edge(edge[0], edge[1]) with pytest.raises(ValueError): directed_graph.remove_edge(edge[0], edge[1]) def test_contains_edge_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: with pytest.raises(ValueError): undirected_graph.contains_edge(vertex, 102) with pytest.raises(ValueError): directed_graph.contains_edge(vertex, 102) with pytest.raises(ValueError): undirected_graph.contains_edge(103, 102) with pytest.raises(ValueError): directed_graph.contains_edge(103, 102) if __name__ == "__main__": unittest.main()
usrbinenv python3 vikram nithyanandam description the following implementation is a robust unweighted graph data structure implemented using an adjacency matrix this vertices and edges of this graph can be effectively initialized and modified while storing your chosen generic value in each vertex adjacency matrix https mathworld wolfram comadjacencymatrix html potential future ideas add a flag to set edge weights on and set edge weights make edge weights and vertex values customizable to store whatever the client wants support multigraph functionality if the client wants it parameters vertices listt the list of vertex names the client wants to pass in default is empty edges listlistt the list of edges the client wants to pass in each edge is a 2element list default is empty directed bool indicates if graph is directed or undirected default is true falsey checks creates an edge from source vertex to destination vertex if any given vertex doesn t exist or the edge already exists a valueerror will be thrown get the indices of the corresponding vertices and set their edge value to 1 removes the edge between the two vertices if any given vertex doesn t exist or the edge does not exist a valueerror will be thrown get the indices of the corresponding vertices and set their edge value to 0 adds a vertex to the graph if the given vertex already exists a valueerror will be thrown build column for vertex build row for vertex and update other data structures removes the given vertex from the graph and deletes all incoming and outgoing edges from the given vertex as well if the given vertex does not exist a valueerror will be thrown first slide up the rows by deleting the row corresponding to the vertex being deleted next slide the columns to the left by deleting the values in the column corresponding to the vertex being deleted final clean up decrement indices for vertices shifted by the deleted vertex in the adj matrix returns true if the graph contains the vertex false otherwise returns true if the graph contains the edge from the sourcevertex to the destinationvertex false otherwise if any given vertex doesn t exist a valueerror will be thrown clears all vertices and edges generate graph input build graphs test graph initialization with vertices and edges build graphs without edges test containsvertex build empty graphs run addvertex test addvertex worked build graphs without edges test removevertex worked build graphs without edges test adding and removing vertices remove all vertices generate graphs and graph input generate all possible edges for testing test containsedge function since this edge exists for undirected but the reverse may not exist for directed generate graph input build graphs without edges run and test addedge generate graph input and graphs run and test removeedge make some more edge options usr bin env python3 vikram nithyanandam description the following implementation is a robust unweighted graph data structure implemented using an adjacency matrix this vertices and edges of this graph can be effectively initialized and modified while storing your chosen generic value in each vertex adjacency matrix https mathworld wolfram com adjacencymatrix html potential future ideas add a flag to set edge weights on and set edge weights make edge weights and vertex values customizable to store whatever the client wants support multigraph functionality if the client wants it parameters vertices list t the list of vertex names the client wants to pass in default is empty edges list list t the list of edges the client wants to pass in each edge is a 2 element list default is empty directed bool indicates if graph is directed or undirected default is true falsey checks creates an edge from source vertex to destination vertex if any given vertex doesn t exist or the edge already exists a valueerror will be thrown get the indices of the corresponding vertices and set their edge value to 1 removes the edge between the two vertices if any given vertex doesn t exist or the edge does not exist a valueerror will be thrown get the indices of the corresponding vertices and set their edge value to 0 adds a vertex to the graph if the given vertex already exists a valueerror will be thrown build column for vertex build row for vertex and update other data structures removes the given vertex from the graph and deletes all incoming and outgoing edges from the given vertex as well if the given vertex does not exist a valueerror will be thrown first slide up the rows by deleting the row corresponding to the vertex being deleted next slide the columns to the left by deleting the values in the column corresponding to the vertex being deleted final clean up decrement indices for vertices shifted by the deleted vertex in the adj matrix returns true if the graph contains the vertex false otherwise returns true if the graph contains the edge from the source_vertex to the destination_vertex false otherwise if any given vertex doesn t exist a valueerror will be thrown clears all vertices and edges generate graph input build graphs test graph initialization with vertices and edges build graphs without edges test contains_vertex build empty graphs run add_vertex test add_vertex worked build graphs without edges test remove_vertex worked build graphs without edges test adding and removing vertices remove all vertices generate graphs and graph input generate all possible edges for testing test contains_edge function since this edge exists for undirected but the reverse may not exist for directed generate graph input build graphs without edges run and test add_edge generate graph input and graphs run and test remove_edge make some more edge options
from __future__ import annotations import random import unittest from pprint import pformat from typing import Generic, TypeVar import pytest T = TypeVar("T") class GraphAdjacencyMatrix(Generic[T]): def __init__( self, vertices: list[T], edges: list[list[T]], directed: bool = True ) -> None: self.directed = directed self.vertex_to_index: dict[T, int] = {} self.adj_matrix: list[list[int]] = [] edges = edges or [] vertices = vertices or [] for vertex in vertices: self.add_vertex(vertex) for edge in edges: if len(edge) != 2: msg = f"Invalid input: {edge} must have length 2." raise ValueError(msg) self.add_edge(edge[0], edge[1]) def add_edge(self, source_vertex: T, destination_vertex: T) -> None: if not ( self.contains_vertex(source_vertex) and self.contains_vertex(destination_vertex) ): msg = ( f"Incorrect input: Either {source_vertex} or " f"{destination_vertex} does not exist" ) raise ValueError(msg) if self.contains_edge(source_vertex, destination_vertex): msg = ( "Incorrect input: The edge already exists between " f"{source_vertex} and {destination_vertex}" ) raise ValueError(msg) u: int = self.vertex_to_index[source_vertex] v: int = self.vertex_to_index[destination_vertex] self.adj_matrix[u][v] = 1 if not self.directed: self.adj_matrix[v][u] = 1 def remove_edge(self, source_vertex: T, destination_vertex: T) -> None: if not ( self.contains_vertex(source_vertex) and self.contains_vertex(destination_vertex) ): msg = ( f"Incorrect input: Either {source_vertex} or " f"{destination_vertex} does not exist" ) raise ValueError(msg) if not self.contains_edge(source_vertex, destination_vertex): msg = ( "Incorrect input: The edge does NOT exist between " f"{source_vertex} and {destination_vertex}" ) raise ValueError(msg) u: int = self.vertex_to_index[source_vertex] v: int = self.vertex_to_index[destination_vertex] self.adj_matrix[u][v] = 0 if not self.directed: self.adj_matrix[v][u] = 0 def add_vertex(self, vertex: T) -> None: if self.contains_vertex(vertex): msg = f"Incorrect input: {vertex} already exists in this graph." raise ValueError(msg) for row in self.adj_matrix: row.append(0) self.adj_matrix.append([0] * (len(self.adj_matrix) + 1)) self.vertex_to_index[vertex] = len(self.adj_matrix) - 1 def remove_vertex(self, vertex: T) -> None: if not self.contains_vertex(vertex): msg = f"Incorrect input: {vertex} does not exist in this graph." raise ValueError(msg) start_index = self.vertex_to_index[vertex] self.adj_matrix.pop(start_index) for lst in self.adj_matrix: lst.pop(start_index) self.vertex_to_index.pop(vertex) for vertex in self.vertex_to_index: if self.vertex_to_index[vertex] >= start_index: self.vertex_to_index[vertex] = self.vertex_to_index[vertex] - 1 def contains_vertex(self, vertex: T) -> bool: return vertex in self.vertex_to_index def contains_edge(self, source_vertex: T, destination_vertex: T) -> bool: if not ( self.contains_vertex(source_vertex) and self.contains_vertex(destination_vertex) ): msg = ( f"Incorrect input: Either {source_vertex} " f"or {destination_vertex} does not exist." ) raise ValueError(msg) u = self.vertex_to_index[source_vertex] v = self.vertex_to_index[destination_vertex] return self.adj_matrix[u][v] == 1 def clear_graph(self) -> None: self.vertex_to_index = {} self.adj_matrix = [] def __repr__(self) -> str: first = "Adj Matrix:\n" + pformat(self.adj_matrix) second = "\nVertex to index mapping:\n" + pformat(self.vertex_to_index) return first + second class TestGraphMatrix(unittest.TestCase): def __assert_graph_edge_exists_check( self, undirected_graph: GraphAdjacencyMatrix, directed_graph: GraphAdjacencyMatrix, edge: list[int], ) -> None: assert undirected_graph.contains_edge(edge[0], edge[1]) assert undirected_graph.contains_edge(edge[1], edge[0]) assert directed_graph.contains_edge(edge[0], edge[1]) def __assert_graph_edge_does_not_exist_check( self, undirected_graph: GraphAdjacencyMatrix, directed_graph: GraphAdjacencyMatrix, edge: list[int], ) -> None: assert not undirected_graph.contains_edge(edge[0], edge[1]) assert not undirected_graph.contains_edge(edge[1], edge[0]) assert not directed_graph.contains_edge(edge[0], edge[1]) def __assert_graph_vertex_exists_check( self, undirected_graph: GraphAdjacencyMatrix, directed_graph: GraphAdjacencyMatrix, vertex: int, ) -> None: assert undirected_graph.contains_vertex(vertex) assert directed_graph.contains_vertex(vertex) def __assert_graph_vertex_does_not_exist_check( self, undirected_graph: GraphAdjacencyMatrix, directed_graph: GraphAdjacencyMatrix, vertex: int, ) -> None: assert not undirected_graph.contains_vertex(vertex) assert not directed_graph.contains_vertex(vertex) def __generate_random_edges( self, vertices: list[int], edge_pick_count: int ) -> list[list[int]]: assert edge_pick_count <= len(vertices) random_source_vertices: list[int] = random.sample( vertices[0 : int(len(vertices) / 2)], edge_pick_count ) random_destination_vertices: list[int] = random.sample( vertices[int(len(vertices) / 2) :], edge_pick_count ) random_edges: list[list[int]] = [] for source in random_source_vertices: for dest in random_destination_vertices: random_edges.append([source, dest]) return random_edges def __generate_graphs( self, vertex_count: int, min_val: int, max_val: int, edge_pick_count: int ) -> tuple[GraphAdjacencyMatrix, GraphAdjacencyMatrix, list[int], list[list[int]]]: if max_val - min_val + 1 < vertex_count: raise ValueError( "Will result in duplicate vertices. Either increase " "range between min_val and max_val or decrease vertex count" ) random_vertices: list[int] = random.sample( range(min_val, max_val + 1), vertex_count ) random_edges: list[list[int]] = self.__generate_random_edges( random_vertices, edge_pick_count ) undirected_graph = GraphAdjacencyMatrix( vertices=random_vertices, edges=random_edges, directed=False ) directed_graph = GraphAdjacencyMatrix( vertices=random_vertices, edges=random_edges, directed=True ) return undirected_graph, directed_graph, random_vertices, random_edges def test_init_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for num in random_vertices: self.__assert_graph_vertex_exists_check( undirected_graph, directed_graph, num ) for edge in random_edges: self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) assert not undirected_graph.directed assert directed_graph.directed def test_contains_vertex(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) undirected_graph = GraphAdjacencyMatrix( vertices=random_vertices, edges=[], directed=False ) directed_graph = GraphAdjacencyMatrix( vertices=random_vertices, edges=[], directed=True ) for num in range(101): assert (num in random_vertices) == undirected_graph.contains_vertex(num) assert (num in random_vertices) == directed_graph.contains_vertex(num) def test_add_vertices(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) undirected_graph: GraphAdjacencyMatrix = GraphAdjacencyMatrix( vertices=[], edges=[], directed=False ) directed_graph: GraphAdjacencyMatrix = GraphAdjacencyMatrix( vertices=[], edges=[], directed=True ) for num in random_vertices: undirected_graph.add_vertex(num) for num in random_vertices: directed_graph.add_vertex(num) for num in random_vertices: self.__assert_graph_vertex_exists_check( undirected_graph, directed_graph, num ) def test_remove_vertices(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) undirected_graph = GraphAdjacencyMatrix( vertices=random_vertices, edges=[], directed=False ) directed_graph = GraphAdjacencyMatrix( vertices=random_vertices, edges=[], directed=True ) for num in random_vertices: self.__assert_graph_vertex_exists_check( undirected_graph, directed_graph, num ) undirected_graph.remove_vertex(num) directed_graph.remove_vertex(num) self.__assert_graph_vertex_does_not_exist_check( undirected_graph, directed_graph, num ) def test_add_and_remove_vertices_repeatedly(self) -> None: random_vertices1: list[int] = random.sample(range(51), 20) random_vertices2: list[int] = random.sample(range(51, 101), 20) undirected_graph = GraphAdjacencyMatrix( vertices=random_vertices1, edges=[], directed=False ) directed_graph = GraphAdjacencyMatrix( vertices=random_vertices1, edges=[], directed=True ) for i, _ in enumerate(random_vertices1): undirected_graph.add_vertex(random_vertices2[i]) directed_graph.add_vertex(random_vertices2[i]) self.__assert_graph_vertex_exists_check( undirected_graph, directed_graph, random_vertices2[i] ) undirected_graph.remove_vertex(random_vertices1[i]) directed_graph.remove_vertex(random_vertices1[i]) self.__assert_graph_vertex_does_not_exist_check( undirected_graph, directed_graph, random_vertices1[i] ) for i, _ in enumerate(random_vertices1): undirected_graph.remove_vertex(random_vertices2[i]) directed_graph.remove_vertex(random_vertices2[i]) self.__assert_graph_vertex_does_not_exist_check( undirected_graph, directed_graph, random_vertices2[i] ) def test_contains_edge(self) -> None: vertex_count = 20 ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(vertex_count, 0, 100, 4) all_possible_edges: list[list[int]] = [] for i in range(vertex_count - 1): for j in range(i + 1, vertex_count): all_possible_edges.append([random_vertices[i], random_vertices[j]]) all_possible_edges.append([random_vertices[j], random_vertices[i]]) for edge in all_possible_edges: if edge in random_edges: self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) elif [edge[1], edge[0]] in random_edges: self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, [edge[1], edge[0]] ) else: self.__assert_graph_edge_does_not_exist_check( undirected_graph, directed_graph, edge ) def test_add_edge(self) -> None: random_vertices: list[int] = random.sample(range(101), 15) random_edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) undirected_graph = GraphAdjacencyMatrix( vertices=random_vertices, edges=[], directed=False ) directed_graph = GraphAdjacencyMatrix( vertices=random_vertices, edges=[], directed=True ) for edge in random_edges: undirected_graph.add_edge(edge[0], edge[1]) directed_graph.add_edge(edge[0], edge[1]) self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) def test_remove_edge(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for edge in random_edges: self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) undirected_graph.remove_edge(edge[0], edge[1]) directed_graph.remove_edge(edge[0], edge[1]) self.__assert_graph_edge_does_not_exist_check( undirected_graph, directed_graph, edge ) def test_add_and_remove_edges_repeatedly(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) more_random_edges: list[list[int]] = [] while len(more_random_edges) != len(random_edges): edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) for edge in edges: if len(more_random_edges) == len(random_edges): break elif edge not in more_random_edges and edge not in random_edges: more_random_edges.append(edge) for i, _ in enumerate(random_edges): undirected_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) directed_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1]) self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, more_random_edges[i] ) undirected_graph.remove_edge(random_edges[i][0], random_edges[i][1]) directed_graph.remove_edge(random_edges[i][0], random_edges[i][1]) self.__assert_graph_edge_does_not_exist_check( undirected_graph, directed_graph, random_edges[i] ) def test_add_vertex_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: with pytest.raises(ValueError): undirected_graph.add_vertex(vertex) with pytest.raises(ValueError): directed_graph.add_vertex(vertex) def test_remove_vertex_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for i in range(101): if i not in random_vertices: with pytest.raises(ValueError): undirected_graph.remove_vertex(i) with pytest.raises(ValueError): directed_graph.remove_vertex(i) def test_add_edge_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for edge in random_edges: with pytest.raises(ValueError): undirected_graph.add_edge(edge[0], edge[1]) with pytest.raises(ValueError): directed_graph.add_edge(edge[0], edge[1]) def test_remove_edge_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) more_random_edges: list[list[int]] = [] while len(more_random_edges) != len(random_edges): edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4) for edge in edges: if len(more_random_edges) == len(random_edges): break elif edge not in more_random_edges and edge not in random_edges: more_random_edges.append(edge) for edge in more_random_edges: with pytest.raises(ValueError): undirected_graph.remove_edge(edge[0], edge[1]) with pytest.raises(ValueError): directed_graph.remove_edge(edge[0], edge[1]) def test_contains_edge_exception_check(self) -> None: ( undirected_graph, directed_graph, random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: with pytest.raises(ValueError): undirected_graph.contains_edge(vertex, 102) with pytest.raises(ValueError): directed_graph.contains_edge(vertex, 102) with pytest.raises(ValueError): undirected_graph.contains_edge(103, 102) with pytest.raises(ValueError): directed_graph.contains_edge(103, 102) if __name__ == "__main__": unittest.main()
usrbinenv python3 omkar pathak nwachukwu chidiebere use a python dictionary to construct the graph adjacency list type graph data structure that accounts for directed and undirected graphs initialize graph object indicating whether it s directed or undirected directed graph example dgraph graphadjacencylist printdgraph dgraph addedge0 1 0 1 1 dgraph addedge1 2 addedge1 4 addedge1 5 0 1 1 2 4 5 2 4 5 dgraph addedge2 0 addedge2 6 addedge2 7 0 1 1 2 4 5 2 0 6 7 4 5 6 7 dgraph 0 1 1 2 4 5 2 0 6 7 4 5 6 7 printreprdgraph 0 1 1 2 4 5 2 0 6 7 4 5 6 7 undirected graph example ugraph graphadjacencylistdirectedfalse ugraph addedge0 1 0 1 1 0 ugraph addedge1 2 addedge1 4 addedge1 5 0 1 1 0 2 4 5 2 1 4 1 5 1 ugraph addedge2 0 addedge2 6 addedge2 7 0 1 2 1 0 2 4 5 2 1 0 6 7 4 1 5 1 6 2 7 2 ugraph addedge4 5 0 1 2 1 0 2 4 5 2 1 0 6 7 4 1 5 5 1 4 6 2 7 2 printugraph 0 1 2 1 0 2 4 5 2 1 0 6 7 4 1 5 5 1 4 6 2 7 2 printreprugraph 0 1 2 1 0 2 4 5 2 1 0 6 7 4 1 5 5 1 4 6 2 7 2 chargraph graphadjacencylistdirectedfalse chargraph addedge a b a b b a chargraph addedge b c addedge b e addedge b f a b b a c e f c b e b f b chargraph a b b a c e f c b e b f b parameters directed bool indicates if graph is directed or undirected default is true connects vertices together creates and edge from source vertex to destination vertex vertices will be created if not found in graph if both source vertex and destination vertex are both present in the adjacency list add destination vertex to source vertex list of adjacent vertices and add source vertex to destination vertex list of adjacent vertices if only source vertex is present in adjacency list add destination vertex to source vertex list of adjacent vertices then create a new vertex with destination vertex as key and assign a list containing the source vertex as it s first adjacent vertex if only destination vertex is present in adjacency list add source vertex to destination vertex list of adjacent vertices then create a new vertex with source vertex as key and assign a list containing the source vertex as it s first adjacent vertex if both source vertex and destination vertex are not present in adjacency list create a new vertex with source vertex as key and assign a list containing the destination vertex as it s first adjacent vertex also create a new vertex with destination vertex as key and assign a list containing the source vertex as it s first adjacent vertex if both source vertex and destination vertex are present in adjacency list add destination vertex to source vertex list of adjacent vertices if only source vertex is present in adjacency list add destination vertex to source vertex list of adjacent vertices and create a new vertex with destination vertex as key which has no adjacent vertex if only destination vertex is present in adjacency list create a new vertex with source vertex as key and assign a list containing destination vertex as first adjacent vertex if both source vertex and destination vertex are not present in adjacency list create a new vertex with source vertex as key and a list containing destination vertex as it s first adjacent vertex then create a new vertex with destination vertex as key which has no adjacent vertex usr bin env python3 omkar pathak nwachukwu chidiebere use a python dictionary to construct the graph adjacency list type graph data structure that accounts for directed and undirected graphs initialize graph object indicating whether it s directed or undirected directed graph example d_graph graphadjacencylist print d_graph d_graph add_edge 0 1 0 1 1 d_graph add_edge 1 2 add_edge 1 4 add_edge 1 5 0 1 1 2 4 5 2 4 5 d_graph add_edge 2 0 add_edge 2 6 add_edge 2 7 0 1 1 2 4 5 2 0 6 7 4 5 6 7 d_graph 0 1 1 2 4 5 2 0 6 7 4 5 6 7 print repr d_graph 0 1 1 2 4 5 2 0 6 7 4 5 6 7 undirected graph example u_graph graphadjacencylist directed false u_graph add_edge 0 1 0 1 1 0 u_graph add_edge 1 2 add_edge 1 4 add_edge 1 5 0 1 1 0 2 4 5 2 1 4 1 5 1 u_graph add_edge 2 0 add_edge 2 6 add_edge 2 7 0 1 2 1 0 2 4 5 2 1 0 6 7 4 1 5 1 6 2 7 2 u_graph add_edge 4 5 0 1 2 1 0 2 4 5 2 1 0 6 7 4 1 5 5 1 4 6 2 7 2 print u_graph 0 1 2 1 0 2 4 5 2 1 0 6 7 4 1 5 5 1 4 6 2 7 2 print repr u_graph 0 1 2 1 0 2 4 5 2 1 0 6 7 4 1 5 5 1 4 6 2 7 2 char_graph graphadjacencylist directed false char_graph add_edge a b a b b a char_graph add_edge b c add_edge b e add_edge b f a b b a c e f c b e b f b char_graph a b b a c e f c b e b f b parameters directed bool indicates if graph is directed or undirected default is true dictionary of lists connects vertices together creates and edge from source vertex to destination vertex vertices will be created if not found in graph for undirected graphs if both source vertex and destination vertex are both present in the adjacency list add destination vertex to source vertex list of adjacent vertices and add source vertex to destination vertex list of adjacent vertices if only source vertex is present in adjacency list add destination vertex to source vertex list of adjacent vertices then create a new vertex with destination vertex as key and assign a list containing the source vertex as it s first adjacent vertex if only destination vertex is present in adjacency list add source vertex to destination vertex list of adjacent vertices then create a new vertex with source vertex as key and assign a list containing the source vertex as it s first adjacent vertex if both source vertex and destination vertex are not present in adjacency list create a new vertex with source vertex as key and assign a list containing the destination vertex as it s first adjacent vertex also create a new vertex with destination vertex as key and assign a list containing the source vertex as it s first adjacent vertex for directed graphs if both source vertex and destination vertex are present in adjacency list add destination vertex to source vertex list of adjacent vertices if only source vertex is present in adjacency list add destination vertex to source vertex list of adjacent vertices and create a new vertex with destination vertex as key which has no adjacent vertex if only destination vertex is present in adjacency list create a new vertex with source vertex as key and assign a list containing destination vertex as first adjacent vertex if both source vertex and destination vertex are not present in adjacency list create a new vertex with source vertex as key and a list containing destination vertex as it s first adjacent vertex then create a new vertex with destination vertex as key which has no adjacent vertex
from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar T = TypeVar("T") class GraphAdjacencyList(Generic[T]): def __init__(self, directed: bool = True) -> None: self.adj_list: dict[T, list[T]] = {} self.directed = directed def add_edge( self, source_vertex: T, destination_vertex: T ) -> GraphAdjacencyList[T]: if not self.directed: if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(destination_vertex) self.adj_list[destination_vertex].append(source_vertex) elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(destination_vertex) self.adj_list[destination_vertex] = [source_vertex] elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(source_vertex) self.adj_list[source_vertex] = [destination_vertex] else: self.adj_list[source_vertex] = [destination_vertex] self.adj_list[destination_vertex] = [source_vertex] else: if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(destination_vertex) elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(destination_vertex) self.adj_list[destination_vertex] = [] elif destination_vertex in self.adj_list: self.adj_list[source_vertex] = [destination_vertex] else: self.adj_list[source_vertex] = [destination_vertex] self.adj_list[destination_vertex] = [] return self def __repr__(self) -> str: return pformat(self.adj_list)
floydwarshall py the problem is to find the shortest distance between all pairs of vertices in a weighted directed graph that can have negative edge weights param graph 2d array calculated from weightedgei j type graph listlistfloat param v number of vertices type v int return shortest distance between all vertex pairs distanceuv will contain the shortest distance from vertex u to v 1 for all edges from v to n distanceij weightedgei j 3 the algorithm then performs distanceij mindistanceij distanceik distancekj for each possible pair i j of vertices 4 the above is repeated for each vertex k in the graph 5 whenever distanceij is given a new minimum value next vertexij is updated to the next vertexik check vertex k against all other vertices i j looping through rows of graph array looping through columns of graph array src and dst are indices that must be within the array size graphev failure to follow this will result in an error example input enter number of vertices 3 enter number of edges 2 generated graph from vertex and edge inputs inf inf inf inf inf inf inf inf inf 0 0 inf inf inf 0 0 inf inf inf 0 0 specify source destination and weight for edge 1 edge 1 enter source 1 enter destination 2 enter weight 2 specify source destination and weight for edge 2 edge 2 enter source 2 enter destination 1 enter weight 1 expected output from the vertice edge and src dst weight inputs 0 inf inf inf 0 2 inf 1 0 floyd_warshall py the problem is to find the shortest distance between all pairs of vertices in a weighted directed graph that can have negative edge weights param graph 2d array calculated from weight edge i j type graph list list float param v number of vertices type v int return shortest distance between all vertex pairs distance u v will contain the shortest distance from vertex u to v 1 for all edges from v to n distance i j weight edge i j 3 the algorithm then performs distance i j min distance i j distance i k distance k j for each possible pair i j of vertices 4 the above is repeated for each vertex k in the graph 5 whenever distance i j is given a new minimum value next vertex i j is updated to the next vertex i k check vertex k against all other vertices i j looping through rows of graph array looping through columns of graph array src and dst are indices that must be within the array size graph e v failure to follow this will result in an error example input enter number of vertices 3 enter number of edges 2 generated graph from vertex and edge inputs inf inf inf inf inf inf inf inf inf 0 0 inf inf inf 0 0 inf inf inf 0 0 specify source destination and weight for edge 1 edge 1 enter source 1 enter destination 2 enter weight 2 specify source destination and weight for edge 2 edge 2 enter source 2 enter destination 1 enter weight 1 expected output from the vertice edge and src dst weight inputs 0 inf inf inf 0 2 inf 1 0
def _print_dist(dist, v): print("\nThe shortest path matrix using Floyd Warshall algorithm\n") for i in range(v): for j in range(v): if dist[i][j] != float("inf"): print(int(dist[i][j]), end="\t") else: print("INF", end="\t") print() def floyd_warshall(graph, v): dist = [[float("inf") for _ in range(v)] for _ in range(v)] for i in range(v): for j in range(v): dist[i][j] = graph[i][j] for k in range(v): for i in range(v): for j in range(v): if ( dist[i][k] != float("inf") and dist[k][j] != float("inf") and dist[i][k] + dist[k][j] < dist[i][j] ): dist[i][j] = dist[i][k] + dist[k][j] _print_dist(dist, v) return dist, v if __name__ == "__main__": v = int(input("Enter number of vertices: ")) e = int(input("Enter number of edges: ")) graph = [[float("inf") for i in range(v)] for j in range(v)] for i in range(v): graph[i][i] = 0.0 for i in range(e): print("\nEdge ", i + 1) src = int(input("Enter source:")) dst = int(input("Enter destination:")) weight = float(input("Enter weight:")) graph[src][dst] = weight floyd_warshall(graph, v)
https en wikipedia orgwikibestfirstsearchgreedybfs 0 s are free path whereas 1 s are obstacles k node0 0 4 5 0 none k calculateheuristic 9 n node1 4 3 4 2 none n calculateheuristic 2 l k n n l0 false l sort n l0 true the heuristic here is the manhattan distance could elaborate to offer more than one choice grid testgrids2 gbf greedybestfirstgrid 0 0 lengrid 1 lengrid0 1 x pos for x in gbf getsuccessorsgbf start 1 0 0 1 gbf start posy delta30 gbf start posx delta31 0 1 gbf start posy delta20 gbf start posx delta21 1 0 gbf retracepathgbf start 0 0 gbf search doctest normalizewhitespace 0 0 1 0 2 0 2 1 3 1 4 1 4 2 4 3 4 4 search for the path if a path is not found only the starting position is returned open nodes are sorted using lt returns a list of successors both in the grid and free spaces retrace the path from parents to parents until start node 0 s are free path whereas 1 s are obstacles up left down right k node 0 0 4 5 0 none k calculate_heuristic 9 n node 1 4 3 4 2 none n calculate_heuristic 2 l k n n l 0 false l sort n l 0 true the heuristic here is the manhattan distance could elaborate to offer more than one choice grid test_grids 2 gbf greedybestfirst grid 0 0 len grid 1 len grid 0 1 x pos for x in gbf get_successors gbf start 1 0 0 1 gbf start pos_y delta 3 0 gbf start pos_x delta 3 1 0 1 gbf start pos_y delta 2 0 gbf start pos_x delta 2 1 1 0 gbf retrace_path gbf start 0 0 gbf search doctest normalize_whitespace 0 0 1 0 2 0 2 1 3 1 4 1 4 2 4 3 4 4 search for the path if a path is not found only the starting position is returned open nodes are sorted using __lt__ returns a list of successors both in the grid and free spaces retrace the path from parents to parents until start node
from __future__ import annotations Path = list[tuple[int, int]] TEST_GRIDS = [ [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ], [ [0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 0, 1], [0, 0, 0, 1, 1, 0, 0], [0, 1, 0, 0, 1, 0, 0], [1, 0, 0, 1, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0], ], [ [0, 0, 1, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 1], [1, 0, 0, 1, 1], [0, 0, 0, 0, 0], ], ] delta = ([-1, 0], [0, -1], [1, 0], [0, 1]) class Node: def __init__( self, pos_x: int, pos_y: int, goal_x: int, goal_y: int, g_cost: float, parent: Node | None, ): self.pos_x = pos_x self.pos_y = pos_y self.pos = (pos_y, pos_x) self.goal_x = goal_x self.goal_y = goal_y self.g_cost = g_cost self.parent = parent self.f_cost = self.calculate_heuristic() def calculate_heuristic(self) -> float: dx = abs(self.pos_x - self.goal_x) dy = abs(self.pos_y - self.goal_y) return dx + dy def __lt__(self, other) -> bool: return self.f_cost < other.f_cost def __eq__(self, other) -> bool: return self.pos == other.pos class GreedyBestFirst: def __init__( self, grid: list[list[int]], start: tuple[int, int], goal: tuple[int, int] ): self.grid = grid self.start = Node(start[1], start[0], goal[1], goal[0], 0, None) self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None) self.open_nodes = [self.start] self.closed_nodes: list[Node] = [] self.reached = False def search(self) -> Path | None: while self.open_nodes: self.open_nodes.sort() current_node = self.open_nodes.pop(0) if current_node.pos == self.target.pos: self.reached = True return self.retrace_path(current_node) self.closed_nodes.append(current_node) successors = self.get_successors(current_node) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(child_node) if not self.reached: return [self.start.pos] return None def get_successors(self, parent: Node) -> list[Node]: return [ Node( pos_x, pos_y, self.target.pos_x, self.target.pos_y, parent.g_cost + 1, parent, ) for action in delta if ( 0 <= (pos_x := parent.pos_x + action[1]) < len(self.grid[0]) and 0 <= (pos_y := parent.pos_y + action[0]) < len(self.grid) and self.grid[pos_y][pos_x] == 0 ) ] def retrace_path(self, node: Node | None) -> Path: current_node = node path = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) current_node = current_node.parent path.reverse() return path if __name__ == "__main__": for idx, grid in enumerate(TEST_GRIDS): print(f"==grid-{idx + 1}==") init = (0, 0) goal = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print("------") greedy_bf = GreedyBestFirst(grid, init, goal) path = greedy_bf.search() if path: for pos_x, pos_y in path: grid[pos_x][pos_y] = 2 for elem in grid: print(elem)
manuel di lullo https github commanueldilullo description approximization algorithm for minimum vertex cover problem greedy approach uses graphs represented with an adjacency list url https mathworld wolfram comminimumvertexcover html url https cs stackexchange comquestions129017greedyalgorithmforvertexcover greedy apx algorithm for min vertex cover input graph graph stored in an adjacency list where each vertex is represented with an integer example graph 0 1 3 1 0 3 2 0 3 4 3 0 1 2 4 2 3 greedyminvertexcovergraph 0 1 2 4 queue used to store nodes and their rank for each node and his adjacency list add them and the rank of the node to queue using heapq module the queue will be filled like a priority queue heapq works with a min priority queue so i used 1lenv to build it ologn chosenvertices set of chosen vertices while queue isn t empty and there are still edges queue00 is the rank of the node with max rank extract vertex with max rank from queue and add it to chosenvertices remove all arcs adjacent to argmax if v haven t adjacent node skip if argmax is reachable from elem remove argmax from elem s adjacent list and update his rank reorder the queue greedy apx algorithm for min vertex cover input graph graph stored in an adjacency list where each vertex is represented with an integer example graph 0 1 3 1 0 3 2 0 3 4 3 0 1 2 4 2 3 greedy_min_vertex_cover graph 0 1 2 4 queue used to store nodes and their rank for each node and his adjacency list add them and the rank of the node to queue using heapq module the queue will be filled like a priority queue heapq works with a min priority queue so i used 1 len v to build it o log n chosen_vertices set of chosen vertices while queue isn t empty and there are still edges queue 0 0 is the rank of the node with max rank extract vertex with max rank from queue and add it to chosen_vertices remove all arcs adjacent to argmax if v haven t adjacent node skip if argmax is reachable from elem remove argmax from elem s adjacent list and update his rank re order the queue
import heapq def greedy_min_vertex_cover(graph: dict) -> set[int]: queue: list[list] = [] for key, value in graph.items(): heapq.heappush(queue, [-1 * len(value), (key, value)]) chosen_vertices = set() while queue and queue[0][0] != 0: argmax = heapq.heappop(queue)[1][0] chosen_vertices.add(argmax) for elem in queue: if elem[0] == 0: continue if argmax in elem[1][1]: index = elem[1][1].index(argmax) del elem[1][1][index] elem[0] += 1 heapq.heapify(queue) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(f"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}")
finding longest distance in directed acyclic graph using kahnsalgorithm adjacency list of graph finding longest distance in directed acyclic graph using kahnsalgorithm adjacency list of graph
def longest_distance(graph): indegree = [0] * len(graph) queue = [] long_dist = [1] * len(graph) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(indegree)): if indegree[i] == 0: queue.append(i) while queue: vertex = queue.pop(0) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: long_dist[x] = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(x) print(max(long_dist)) graph = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
kahn s algorithm is used to find topological ordering of directed acyclic graph using bfs adjacency list of graph kahn s algorithm is used to find topological ordering of directed acyclic graph using bfs adjacency list of graph
def topological_sort(graph): indegree = [0] * len(graph) queue = [] topo = [] cnt = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(indegree)): if indegree[i] == 0: queue.append(i) while queue: vertex = queue.pop(0) cnt += 1 topo.append(vertex) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(x) if cnt != len(graph): print("Cycle exists") else: print(topo) graph = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
an implementation of karger s algorithm for partitioning a graph adjacency list representation of this graph https en wikipedia orgwikifile singlerunofkargere28099smincutalgorithm svg partitions a graph using karger s algorithm implemented from pseudocode found here https en wikipedia orgwikikarger27salgorithm this function involves random choices meaning it will not give consistent outputs args graph a dictionary containing adacency lists for the graph nodes must be strings returns the cutset of the cut found by karger s algorithm graph 0 1 1 0 partitiongraphgraph 0 1 dict that maps contracted nodes to a list of all the nodes it contains choose a random edge contract edge u v to new node uv remove nodes u and v find cutset adjacency list representation of this graph https en wikipedia org wiki file single_run_of_karger e2 80 99s_mincut_algorithm svg partitions a graph using karger s algorithm implemented from pseudocode found here https en wikipedia org wiki karger 27s_algorithm this function involves random choices meaning it will not give consistent outputs args graph a dictionary containing adacency lists for the graph nodes must be strings returns the cutset of the cut found by karger s algorithm graph 0 1 1 0 partition_graph graph 0 1 dict that maps contracted nodes to a list of all the nodes it contains choose a random edge contract edge u v to new node uv remove nodes u and v find cutset
from __future__ import annotations import random TEST_GRAPH = { "1": ["2", "3", "4", "5"], "2": ["1", "3", "4", "5"], "3": ["1", "2", "4", "5", "10"], "4": ["1", "2", "3", "5", "6"], "5": ["1", "2", "3", "4", "7"], "6": ["7", "8", "9", "10", "4"], "7": ["6", "8", "9", "10", "5"], "8": ["6", "7", "9", "10"], "9": ["6", "7", "8", "10"], "10": ["6", "7", "8", "9", "3"], } def partition_graph(graph: dict[str, list[str]]) -> set[tuple[str, str]]: contracted_nodes = {node: {node} for node in graph} graph_copy = {node: graph[node][:] for node in graph} while len(graph_copy) > 2: u = random.choice(list(graph_copy.keys())) v = random.choice(graph_copy[u]) uv = u + v uv_neighbors = list(set(graph_copy[u] + graph_copy[v])) uv_neighbors.remove(u) uv_neighbors.remove(v) graph_copy[uv] = uv_neighbors for neighbor in uv_neighbors: graph_copy[neighbor].append(uv) contracted_nodes[uv] = set(contracted_nodes[u].union(contracted_nodes[v])) del graph_copy[u] del graph_copy[v] for neighbor in uv_neighbors: if u in graph_copy[neighbor]: graph_copy[neighbor].remove(u) if v in graph_copy[neighbor]: graph_copy[neighbor].remove(v) groups = [contracted_nodes[node] for node in graph_copy] return { (node, neighbor) for node in groups[0] for neighbor in graph[node] if neighbor in groups[1] } if __name__ == "__main__": print(partition_graph(TEST_GRAPH))
undirected unweighted graph for running markov chain algorithm running markov chain algorithm and calculating the number of times each node is visited transitions a a 0 9 a b 0 075 a c 0 025 b a 0 15 b b 0 8 b c 0 05 c a 0 25 c b 0 25 c c 0 5 result gettransitions a transitions 5000 result a result b result c true undirected unweighted graph for running markov chain algorithm running markov chain algorithm and calculating the number of times each node is visited transitions a a 0 9 a b 0 075 a c 0 025 b a 0 15 b b 0 8 b c 0 05 c a 0 25 c b 0 25 c c 0 5 result get_transitions a transitions 5000 result a result b result c true
from __future__ import annotations from collections import Counter from random import random class MarkovChainGraphUndirectedUnweighted: def __init__(self): self.connections = {} def add_node(self, node: str) -> None: self.connections[node] = {} def add_transition_probability( self, node1: str, node2: str, probability: float ) -> None: if node1 not in self.connections: self.add_node(node1) if node2 not in self.connections: self.add_node(node2) self.connections[node1][node2] = probability def get_nodes(self) -> list[str]: return list(self.connections) def transition(self, node: str) -> str: current_probability = 0 random_value = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def get_transitions( start: str, transitions: list[tuple[str, str, float]], steps: int ) -> dict[str, int]: graph = MarkovChainGraphUndirectedUnweighted() for node1, node2, probability in transitions: graph.add_transition_probability(node1, node2, probability) visited = Counter(graph.get_nodes()) node = start for _ in range(steps): node = graph.transition(node) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
manuel di lullo https github commanueldilullo description approximization algorithm for minimum vertex cover problem matching approach uses graphs represented with an adjacency list url https mathworld wolfram comminimumvertexcover html url https www princeton eduaaapublicteachingorf523orf523lec6 pdf apx algorithm for min vertex cover using matching approach input graph graph stored in an adjacency list where each vertex is represented as an integer example graph 0 1 3 1 0 3 2 0 3 4 3 0 1 2 4 2 3 matchingminvertexcovergraph 0 1 2 4 chosenvertices set of chosen vertices edges list of graph s edges while there are still elements in edges list take an arbitrary edge fromnode tonode and add his extremity to chosenvertices and then remove all arcs adjacent to the fromnode and tonode return a set of couples that represents all of the edges input graph graph stored in an adjacency list where each vertex is represented as an integer example graph 0 1 3 1 0 3 2 0 3 3 0 1 2 getedgesgraph 0 1 3 1 0 3 2 0 3 0 2 3 1 0 3 2 1 3 graph 0 1 3 1 0 3 2 0 3 4 3 0 1 2 4 2 3 printfmatching vertex cover nmatchingminvertexcovergraph apx algorithm for min vertex cover using matching approach input graph graph stored in an adjacency list where each vertex is represented as an integer example graph 0 1 3 1 0 3 2 0 3 4 3 0 1 2 4 2 3 matching_min_vertex_cover graph 0 1 2 4 chosen_vertices set of chosen vertices edges list of graph s edges while there are still elements in edges list take an arbitrary edge from_node to_node and add his extremity to chosen_vertices and then remove all arcs adjacent to the from_node and to_node return a set of couples that represents all of the edges input graph graph stored in an adjacency list where each vertex is represented as an integer example graph 0 1 3 1 0 3 2 0 3 3 0 1 2 get_edges graph 0 1 3 1 0 3 2 0 3 0 2 3 1 0 3 2 1 3 graph 0 1 3 1 0 3 2 0 3 4 3 0 1 2 4 2 3 print f matching vertex cover n matching_min_vertex_cover graph
def matching_min_vertex_cover(graph: dict) -> set: chosen_vertices = set() edges = get_edges(graph) while edges: from_node, to_node = edges.pop() chosen_vertices.add(from_node) chosen_vertices.add(to_node) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(edge) return chosen_vertices def get_edges(graph: dict) -> set: edges = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node)) return edges if __name__ == "__main__": import doctest doctest.testmod()
find the path from top left to bottom right of array of numbers with the lowest possible sum and return the sum along this path minpathsum 1 3 1 1 5 1 4 2 1 7 minpathsum 1 0 5 6 7 8 9 0 4 2 4 4 4 5 1 9 6 3 1 0 8 4 3 2 7 20 minpathsumnone traceback most recent call last typeerror the grid does not contain the appropriate information minpathsum traceback most recent call last typeerror the grid does not contain the appropriate information fillrow2 2 2 1 2 3 3 4 5 find the path from top left to bottom right of array of numbers with the lowest possible sum and return the sum along this path min_path_sum 1 3 1 1 5 1 4 2 1 7 min_path_sum 1 0 5 6 7 8 9 0 4 2 4 4 4 5 1 9 6 3 1 0 8 4 3 2 7 20 min_path_sum none traceback most recent call last typeerror the grid does not contain the appropriate information min_path_sum traceback most recent call last typeerror the grid does not contain the appropriate information fill_row 2 2 2 1 2 3 3 4 5
def min_path_sum(grid: list) -> int: if not grid or not grid[0]: raise TypeError("The grid does not contain the appropriate information") for cell_n in range(1, len(grid[0])): grid[0][cell_n] += grid[0][cell_n - 1] row_above = grid[0] for row_n in range(1, len(grid)): current_row = grid[row_n] grid[row_n] = fill_row(current_row, row_above) row_above = grid[row_n] return grid[-1][-1] def fill_row(current_row: list, row_above: list) -> list: current_row[0] += row_above[0] for cell_n in range(1, len(current_row)): current_row[cell_n] += min(current_row[cell_n - 1], row_above[cell_n]) return current_row if __name__ == "__main__": import doctest doctest.testmod()
data structure to store graphs based on adjacency lists adds a vertex to the graph adds an edge to the graph for boruvks s algorithm the weights should be distinct converts the weights to be distinct returns string representation of the graph returna all edges in the graph returns all vertices in the graph builds a graph from the given set of vertices and edges disjoint set union and find for boruvka s algorithm implementation of boruvka s algorithm g graph g graph build0 1 2 3 0 1 1 0 2 1 2 3 1 g distinctweight bg graph boruvkamstg printbg 1 0 1 2 0 2 0 1 1 0 2 2 3 2 3 2 3 3 data structure to store graphs based on adjacency lists adds a vertex to the graph adds an edge to the graph for boruvks s algorithm the weights should be distinct converts the weights to be distinct returns string representation of the graph returna all edges in the graph returns all vertices in the graph builds a graph from the given set of vertices and edges disjoint set union and find for boruvka s algorithm implementation of boruvka s algorithm g graph g graph build 0 1 2 3 0 1 1 0 2 1 2 3 1 g distinct_weight bg graph boruvka_mst g print bg 1 0 1 2 0 2 0 1 1 0 2 2 3 2 3 2 3 3
class Graph: def __init__(self): self.num_vertices = 0 self.num_edges = 0 self.adjacency = {} def add_vertex(self, vertex): if vertex not in self.adjacency: self.adjacency[vertex] = {} self.num_vertices += 1 def add_edge(self, head, tail, weight): self.add_vertex(head) self.add_vertex(tail) if head == tail: return self.adjacency[head][tail] = weight self.adjacency[tail][head] = weight def distinct_weight(self): edges = self.get_edges() for edge in edges: head, tail, weight = edge edges.remove((tail, head, weight)) for i in range(len(edges)): edges[i] = list(edges[i]) edges.sort(key=lambda e: e[2]) for i in range(len(edges) - 1): if edges[i][2] >= edges[i + 1][2]: edges[i + 1][2] = edges[i][2] + 1 for edge in edges: head, tail, weight = edge self.adjacency[head][tail] = weight self.adjacency[tail][head] = weight def __str__(self): string = "" for tail in self.adjacency: for head in self.adjacency[tail]: weight = self.adjacency[head][tail] string += f"{head} -> {tail} == {weight}\n" return string.rstrip("\n") def get_edges(self): output = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail])) return output def get_vertices(self): return self.adjacency.keys() @staticmethod def build(vertices=None, edges=None): g = Graph() if vertices is None: vertices = [] if edges is None: edge = [] for vertex in vertices: g.add_vertex(vertex) for edge in edges: g.add_edge(*edge) return g class UnionFind: def __init__(self): self.parent = {} self.rank = {} def __len__(self): return len(self.parent) def make_set(self, item): if item in self.parent: return self.find(item) self.parent[item] = item self.rank[item] = 0 return item def find(self, item): if item not in self.parent: return self.make_set(item) if item != self.parent[item]: self.parent[item] = self.find(self.parent[item]) return self.parent[item] def union(self, item1, item2): root1 = self.find(item1) root2 = self.find(item2) if root1 == root2: return root1 if self.rank[root1] > self.rank[root2]: self.parent[root2] = root1 return root1 if self.rank[root1] < self.rank[root2]: self.parent[root1] = root2 return root2 if self.rank[root1] == self.rank[root2]: self.rank[root1] += 1 self.parent[root2] = root1 return root1 return None @staticmethod def boruvka_mst(graph): num_components = graph.num_vertices union_find = Graph.UnionFind() mst_edges = [] while num_components > 1: cheap_edge = {} for vertex in graph.get_vertices(): cheap_edge[vertex] = -1 edges = graph.get_edges() for edge in edges: head, tail, weight = edge edges.remove((tail, head, weight)) for edge in edges: head, tail, weight = edge set1 = union_find.find(head) set2 = union_find.find(tail) if set1 != set2: if cheap_edge[set1] == -1 or cheap_edge[set1][2] > weight: cheap_edge[set1] = [head, tail, weight] if cheap_edge[set2] == -1 or cheap_edge[set2][2] > weight: cheap_edge[set2] = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: head, tail, weight = cheap_edge[vertex] if union_find.find(head) != union_find.find(tail): union_find.union(head, tail) mst_edges.append(cheap_edge[vertex]) num_components = num_components - 1 mst = Graph.build(edges=mst_edges) return mst
kruskal4 0 1 3 1 2 5 2 3 1 2 3 1 0 1 3 1 2 5 kruskal4 0 1 3 1 2 5 2 3 1 0 2 1 0 3 2 2 3 1 0 2 1 0 1 3 kruskal4 0 1 3 1 2 5 2 3 1 0 2 1 0 3 2 2 1 1 2 3 1 0 2 1 2 1 1 kruskal 4 0 1 3 1 2 5 2 3 1 2 3 1 0 1 3 1 2 5 kruskal 4 0 1 3 1 2 5 2 3 1 0 2 1 0 3 2 2 3 1 0 2 1 0 1 3 kruskal 4 0 1 3 1 2 5 2 3 1 0 2 1 0 3 2 2 1 1 2 3 1 0 2 1 2 1 1 pragma no cover
def kruskal( num_nodes: int, edges: list[tuple[int, int, int]] ) -> list[tuple[int, int, int]]: edges = sorted(edges, key=lambda edge: edge[2]) parent = list(range(num_nodes)) def find_parent(i): if i != parent[i]: parent[i] = find_parent(parent[i]) return parent[i] minimum_spanning_tree_cost = 0 minimum_spanning_tree = [] for edge in edges: parent_a = find_parent(edge[0]) parent_b = find_parent(edge[1]) if parent_a != parent_b: minimum_spanning_tree_cost += edge[2] minimum_spanning_tree.append(edge) parent[parent_a] = parent_b return minimum_spanning_tree if __name__ == "__main__": num_nodes, num_edges = list(map(int, input().strip().split())) edges = [] for _ in range(num_edges): node1, node2, cost = (int(x) for x in input().strip().split()) edges.append((node1, node2, cost)) kruskal(num_nodes, edges)
disjoint set node to store the parent and rank disjoint set datastructure map from node name to the node object create a new set with x as its member find the set x belongs to with pathcompression helper function for union operation merge 2 disjoint sets connections map from the node to the neighbouring nodes with weights add a node only if its not present in the graph add an edge with the given weight kruskal s algorithm to generate a minimum spanning tree mst of a graph details https en wikipedia orgwikikruskal27salgorithm example g1 graphundirectedweightedint g1 addedge1 2 1 g1 addedge2 3 2 g1 addedge3 4 1 g1 addedge3 5 100 removed in mst g1 addedge4 5 5 assert 5 in g1 connections3 mst g1 kruskal assert 5 not in mst connections3 g2 graphundirectedweightedstr g2 addedge a b 1 g2 addedge b c 2 g2 addedge c d 1 g2 addedge c e 100 removed in mst g2 addedge d e 5 assert e in g2 connectionsc mst g2 kruskal assert e not in mst connections c getting the edges in ascending order of weights creating the disjoint set mst generation disjoint set node to store the parent and rank disjoint set datastructure map from node name to the node object create a new set with x as its member find the set x belongs to with path compression helper function for union operation merge 2 disjoint sets connections map from the node to the neighbouring nodes with weights add a node only if its not present in the graph add an edge with the given weight kruskal s algorithm to generate a minimum spanning tree mst of a graph details https en wikipedia org wiki kruskal 27s_algorithm example g1 graphundirectedweighted int g1 add_edge 1 2 1 g1 add_edge 2 3 2 g1 add_edge 3 4 1 g1 add_edge 3 5 100 removed in mst g1 add_edge 4 5 5 assert 5 in g1 connections 3 mst g1 kruskal assert 5 not in mst connections 3 g2 graphundirectedweighted str g2 add_edge a b 1 g2 add_edge b c 2 g2 add_edge c d 1 g2 add_edge c e 100 removed in mst g2 add_edge d e 5 assert e in g2 connections c mst g2 kruskal assert e not in mst connections c getting the edges in ascending order of weights creating the disjoint set mst generation
from __future__ import annotations from typing import Generic, TypeVar T = TypeVar("T") class DisjointSetTreeNode(Generic[T]): def __init__(self, data: T) -> None: self.data = data self.parent = self self.rank = 0 class DisjointSetTree(Generic[T]): def __init__(self) -> None: self.map: dict[T, DisjointSetTreeNode[T]] = {} def make_set(self, data: T) -> None: self.map[data] = DisjointSetTreeNode(data) def find_set(self, data: T) -> DisjointSetTreeNode[T]: elem_ref = self.map[data] if elem_ref != elem_ref.parent: elem_ref.parent = self.find_set(elem_ref.parent.data) return elem_ref.parent def link( self, node1: DisjointSetTreeNode[T], node2: DisjointSetTreeNode[T] ) -> None: if node1.rank > node2.rank: node2.parent = node1 else: node1.parent = node2 if node1.rank == node2.rank: node2.rank += 1 def union(self, data1: T, data2: T) -> None: self.link(self.find_set(data1), self.find_set(data2)) class GraphUndirectedWeighted(Generic[T]): def __init__(self) -> None: self.connections: dict[T, dict[T, int]] = {} def add_node(self, node: T) -> None: if node not in self.connections: self.connections[node] = {} def add_edge(self, node1: T, node2: T, weight: int) -> None: self.add_node(node1) self.add_node(node2) self.connections[node1][node2] = weight self.connections[node2][node1] = weight def kruskal(self) -> GraphUndirectedWeighted[T]: edges = [] seen = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start)) edges.append((start, end, self.connections[start][end])) edges.sort(key=lambda x: x[2]) disjoint_set = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(node) num_edges = 0 index = 0 graph = GraphUndirectedWeighted[T]() while num_edges < len(self.connections) - 1: u, v, w = edges[index] index += 1 parent_u = disjoint_set.find_set(u) parent_v = disjoint_set.find_set(v) if parent_u != parent_v: num_edges += 1 graph.add_edge(u, v, w) disjoint_set.union(u, v) return graph
update function if value of any node in minheap decreases adjacencylist 0 1 1 3 3 1 0 1 2 6 3 5 4 1 2 1 6 4 5 5 2 3 0 3 1 5 4 1 4 1 1 2 5 3 1 5 4 5 2 2 4 4 prismsalgorithmadjacencylist 0 1 1 4 4 3 4 5 5 2 minimum distance of explored vertex with neighboring vertex of partial tree formed in graph prims algorithm update function if value of any node in min heap decreases adjacency_list 0 1 1 3 3 1 0 1 2 6 3 5 4 1 2 1 6 4 5 5 2 3 0 3 1 5 4 1 4 1 1 2 5 3 1 5 4 5 2 2 4 4 prisms_algorithm adjacency_list 0 1 1 4 4 3 4 5 5 2 neighboring tree vertex of selected vertex minimum distance of explored vertex with neighboring vertex of partial tree formed in graph heap of distance of vertices from their neighboring vertex pragma no cover prims algorithm
import sys from collections import defaultdict class Heap: def __init__(self): self.node_position = [] def get_position(self, vertex): return self.node_position[vertex] def set_position(self, vertex, pos): self.node_position[vertex] = pos def top_to_bottom(self, heap, start, size, positions): if start > size // 2 - 1: return else: if 2 * start + 2 >= size: smallest_child = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: smallest_child = 2 * start + 1 else: smallest_child = 2 * start + 2 if heap[smallest_child] < heap[start]: temp, temp1 = heap[smallest_child], positions[smallest_child] heap[smallest_child], positions[smallest_child] = ( heap[start], positions[start], ) heap[start], positions[start] = temp, temp1 temp = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child], self.get_position(positions[start]) ) self.set_position(positions[start], temp) self.top_to_bottom(heap, smallest_child, size, positions) def bottom_to_top(self, val, index, heap, position): temp = position[index] while index != 0: parent = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: heap[index] = heap[parent] position[index] = position[parent] self.set_position(position[parent], index) else: heap[index] = val position[index] = temp self.set_position(temp, index) break index = parent else: heap[0] = val position[0] = temp self.set_position(temp, 0) def heapify(self, heap, positions): start = len(heap) // 2 - 1 for i in range(start, -1, -1): self.top_to_bottom(heap, i, len(heap), positions) def delete_minimum(self, heap, positions): temp = positions[0] heap[0] = sys.maxsize self.top_to_bottom(heap, 0, len(heap), positions) return temp def prisms_algorithm(adjacency_list): heap = Heap() visited = [0] * len(adjacency_list) nbr_tv = [-1] * len(adjacency_list) distance_tv = [] positions = [] for vertex in range(len(adjacency_list)): distance_tv.append(sys.maxsize) positions.append(vertex) heap.node_position.append(vertex) tree_edges = [] visited[0] = 1 distance_tv[0] = sys.maxsize for neighbor, distance in adjacency_list[0]: nbr_tv[neighbor] = 0 distance_tv[neighbor] = distance heap.heapify(distance_tv, positions) for _ in range(1, len(adjacency_list)): vertex = heap.delete_minimum(distance_tv, positions) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex)) visited[vertex] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(neighbor)] ): distance_tv[heap.get_position(neighbor)] = distance heap.bottom_to_top( distance, heap.get_position(neighbor), distance_tv, positions ) nbr_tv[neighbor] = vertex return tree_edges if __name__ == "__main__": edges_number = int(input("Enter number of edges: ").strip()) adjacency_list = defaultdict(list) for _ in range(edges_number): edge = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
prim s also known as jarnk s algorithm is a greedy algorithm that finds a minimum spanning tree for a weighted undirected graph this means it finds a subset of the edges that forms a tree that includes every vertex where the total weight of all the edges in the tree is minimized the algorithm operates by building this tree one vertex at a time from an arbitrary starting vertex at each step adding the cheapest possible connection from the tree to another vertex heap helper function get the position of the parent of the current node getparentposition1 0 getparentposition2 0 heap helper function get the position of the left child of the current node getchildleftposition0 1 heap helper function get the position of the right child of the current node getchildrightposition0 2 minimum priority queue class functions isempty function to check if the priority queue is empty push function to add an element with given priority to the queue extractmin function to remove and return the element with lowest weight highest priority updatekey function to update the weight of the given key bubbleup helper function to place a node at the proper position upward movement bubbledown helper function to place a node at the proper position downward movement swapnodes helper function to swap the nodes at the given positions queue minpriorityqueue queue push1 1000 queue push2 100 queue push3 4000 queue push4 3000 queue extractmin 2 queue updatekey4 50 queue extractmin 4 queue extractmin 1 queue extractmin 3 check if the priority queue is empty add an element with given priority to the queue remove and return the element with lowest weight highest priority update the weight of the given key place a node at the proper position upward movement to be used internally only place a node at the proper position downward movement to be used internally only swap the nodes at the given positions graph undirected weighted class functions addnode function to add a node in the graph addedge function to add an edge between 2 nodes in the graph add a node in the graph if it is not in the graph add an edge between 2 nodes in the graph graph graphundirectedweighted graph addedgea b 3 graph addedgeb c 10 graph addedgec d 5 graph addedgea c 15 graph addedgeb d 100 dist parent primsalgograph absdista distb 3 absdistd distb 15 absdista distc 13 prim s algorithm for minimum spanning tree initialization running prim s algorithm heap helper function get the position of the parent of the current node get_parent_position 1 0 get_parent_position 2 0 heap helper function get the position of the left child of the current node get_child_left_position 0 1 heap helper function get the position of the right child of the current node get_child_right_position 0 2 minimum priority queue class functions is_empty function to check if the priority queue is empty push function to add an element with given priority to the queue extract_min function to remove and return the element with lowest weight highest priority update_key function to update the weight of the given key _bubble_up helper function to place a node at the proper position upward movement _bubble_down helper function to place a node at the proper position downward movement _swap_nodes helper function to swap the nodes at the given positions queue minpriorityqueue queue push 1 1000 queue push 2 100 queue push 3 4000 queue push 4 3000 queue extract_min 2 queue update_key 4 50 queue extract_min 4 queue extract_min 1 queue extract_min 3 check if the priority queue is empty add an element with given priority to the queue remove and return the element with lowest weight highest priority update the weight of the given key place a node at the proper position upward movement to be used internally only place a node at the proper position downward movement to be used internally only swap the nodes at the given positions graph undirected weighted class functions add_node function to add a node in the graph add_edge function to add an edge between 2 nodes in the graph add a node in the graph if it is not in the graph add an edge between 2 nodes in the graph graph graphundirectedweighted graph add_edge a b 3 graph add_edge b c 10 graph add_edge c d 5 graph add_edge a c 15 graph add_edge b d 100 dist parent prims_algo graph abs dist a dist b 3 abs dist d dist b 15 abs dist a dist c 13 prim s algorithm for minimum spanning tree initialization running prim s algorithm
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar T = TypeVar("T") def get_parent_position(position: int) -> int: return (position - 1) // 2 def get_child_left_position(position: int) -> int: return (2 * position) + 1 def get_child_right_position(position: int) -> int: return (2 * position) + 2 class MinPriorityQueue(Generic[T]): def __init__(self) -> None: self.heap: list[tuple[T, int]] = [] self.position_map: dict[T, int] = {} self.elements: int = 0 def __len__(self) -> int: return self.elements def __repr__(self) -> str: return str(self.heap) def is_empty(self) -> bool: return self.elements == 0 def push(self, elem: T, weight: int) -> None: self.heap.append((elem, weight)) self.position_map[elem] = self.elements self.elements += 1 self._bubble_up(elem) def extract_min(self) -> T: if self.elements > 1: self._swap_nodes(0, self.elements - 1) elem, _ = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: bubble_down_elem, _ = self.heap[0] self._bubble_down(bubble_down_elem) return elem def update_key(self, elem: T, weight: int) -> None: position = self.position_map[elem] self.heap[position] = (elem, weight) if position > 0: parent_position = get_parent_position(position) _, parent_weight = self.heap[parent_position] if parent_weight > weight: self._bubble_up(elem) else: self._bubble_down(elem) else: self._bubble_down(elem) def _bubble_up(self, elem: T) -> None: curr_pos = self.position_map[elem] if curr_pos == 0: return None parent_position = get_parent_position(curr_pos) _, weight = self.heap[curr_pos] _, parent_weight = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(parent_position, curr_pos) return self._bubble_up(elem) return None def _bubble_down(self, elem: T) -> None: curr_pos = self.position_map[elem] _, weight = self.heap[curr_pos] child_left_position = get_child_left_position(curr_pos) child_right_position = get_child_right_position(curr_pos) if child_left_position < self.elements and child_right_position < self.elements: _, child_left_weight = self.heap[child_left_position] _, child_right_weight = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(child_right_position, curr_pos) return self._bubble_down(elem) if child_left_position < self.elements: _, child_left_weight = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(child_left_position, curr_pos) return self._bubble_down(elem) else: return None if child_right_position < self.elements: _, child_right_weight = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(child_right_position, curr_pos) return self._bubble_down(elem) return None def _swap_nodes(self, node1_pos: int, node2_pos: int) -> None: node1_elem = self.heap[node1_pos][0] node2_elem = self.heap[node2_pos][0] self.heap[node1_pos], self.heap[node2_pos] = ( self.heap[node2_pos], self.heap[node1_pos], ) self.position_map[node1_elem] = node2_pos self.position_map[node2_elem] = node1_pos class GraphUndirectedWeighted(Generic[T]): def __init__(self) -> None: self.connections: dict[T, dict[T, int]] = {} self.nodes: int = 0 def __repr__(self) -> str: return str(self.connections) def __len__(self) -> int: return self.nodes def add_node(self, node: T) -> None: if node not in self.connections: self.connections[node] = {} self.nodes += 1 def add_edge(self, node1: T, node2: T, weight: int) -> None: self.add_node(node1) self.add_node(node2) self.connections[node1][node2] = weight self.connections[node2][node1] = weight def prims_algo( graph: GraphUndirectedWeighted[T], ) -> tuple[dict[T, int], dict[T, T | None]]: dist: dict[T, int] = {node: maxsize for node in graph.connections} parent: dict[T, T | None] = {node: None for node in graph.connections} priority_queue: MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(node, weight) if priority_queue.is_empty(): return dist, parent node = priority_queue.extract_min() dist[node] = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: dist[neighbour] = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(neighbour, dist[neighbour]) parent[neighbour] = node while not priority_queue.is_empty(): node = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: dist[neighbour] = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(neighbour, dist[neighbour]) parent[neighbour] = node return dist, parent
https github combhushanborole the input graph for the algorithm is a b c a 0 1 1 b 0 0 1 c 1 0 0 the input graph for the algorithm is a b c a 0 1 1 b 0 0 1 c 1 0 0
graph = [[0, 1, 1], [0, 0, 1], [1, 0, 0]] class Node: def __init__(self, name): self.name = name self.inbound = [] self.outbound = [] def add_inbound(self, node): self.inbound.append(node) def add_outbound(self, node): self.outbound.append(node) def __repr__(self): return f"<node={self.name} inbound={self.inbound} outbound={self.outbound}>" def page_rank(nodes, limit=3, d=0.85): ranks = {} for node in nodes: ranks[node.name] = 1 outbounds = {} for node in nodes: outbounds[node.name] = len(node.outbound) for i in range(limit): print(f"======= Iteration {i + 1} =======") for _, node in enumerate(nodes): ranks[node.name] = (1 - d) + d * sum( ranks[ib] / outbounds[ib] for ib in node.inbound ) print(ranks) def main(): names = list(input("Enter Names of the Nodes: ").split()) nodes = [Node(name) for name in names] for ri, row in enumerate(graph): for ci, col in enumerate(row): if col == 1: nodes[ci].add_inbound(names[ri]) nodes[ri].add_outbound(names[ci]) print("======= Nodes =======") for node in nodes: print(node) page_rank(nodes) if __name__ == "__main__": main()
manuel di lullo https github commanueldilullo description random graphs generator uses graphs represented with an adjacency list url https en wikipedia orgwikirandomgraph generate a random graph input verticesnumber number of vertices probability probability that a generic edge u v exists directed if true graph will be a directed graph otherwise it will be an undirected graph examples random seed1 randomgraph4 0 5 0 1 1 0 2 3 2 1 3 3 1 2 random seed1 randomgraph4 0 5 true 0 1 1 2 3 2 3 3 if probability is greater or equal than 1 then generate a complete graph if probability is lower or equal than 0 then return a graph without edges for each couple of nodes add an edge from u to v if the number randomly generated is greater than probability probability if the graph is undirected add an edge in from j to i either generate a complete graph with verticesnumber vertices input verticesnumber number of vertices directed false if the graph is undirected true otherwise example completegraph3 0 1 2 1 0 2 2 0 1 generate a random graph input vertices_number number of vertices probability probability that a generic edge u v exists directed if true graph will be a directed graph otherwise it will be an undirected graph examples random seed 1 random_graph 4 0 5 0 1 1 0 2 3 2 1 3 3 1 2 random seed 1 random_graph 4 0 5 true 0 1 1 2 3 2 3 3 if probability is greater or equal than 1 then generate a complete graph if probability is lower or equal than 0 then return a graph without edges for each couple of nodes add an edge from u to v if the number randomly generated is greater than probability probability if the graph is undirected add an edge in from j to i either generate a complete graph with vertices_number vertices input vertices_number number of vertices directed false if the graph is undirected true otherwise example complete_graph 3 0 1 2 1 0 2 2 0 1
import random def random_graph( vertices_number: int, probability: float, directed: bool = False ) -> dict: graph: dict = {i: [] for i in range(vertices_number)} if probability >= 1: return complete_graph(vertices_number) if probability <= 0: return graph for i in range(vertices_number): for j in range(i + 1, vertices_number): if random.random() < probability: graph[i].append(j) if not directed: graph[j].append(i) return graph def complete_graph(vertices_number: int) -> dict: return { i: [j for j in range(vertices_number) if i != j] for i in range(vertices_number) } if __name__ == "__main__": import doctest doctest.testmod()
n no of nodes m no of edges input graph data edges n no of nodes m no of edges graph reversed graph input graph data edges
from __future__ import annotations def dfs(u): global graph, reversed_graph, scc, component, visit, stack if visit[u]: return visit[u] = True for v in graph[u]: dfs(v) stack.append(u) def dfs2(u): global graph, reversed_graph, scc, component, visit, stack if visit[u]: return visit[u] = True component.append(u) for v in reversed_graph[u]: dfs2(v) def kosaraju(): global graph, reversed_graph, scc, component, visit, stack for i in range(n): dfs(i) visit = [False] * n for i in stack[::-1]: if visit[i]: continue component = [] dfs2(i) scc.append(component) return scc if __name__ == "__main__": n, m = list(map(int, input().strip().split())) graph: list[list[int]] = [[] for _ in range(n)] reversed_graph: list[list[int]] = [[] for i in range(n)] for _ in range(m): u, v = list(map(int, input().strip().split())) graph[u].append(v) reversed_graph[v].append(u) stack: list[int] = [] visit: list[bool] = [False] * n scc: list[int] = [] component: list[int] = [] print(kosaraju())
https en wikipedia orgwikistronglyconnectedcomponent finding strongly connected components in directed graph use depth first search to sort graph at this time graph is the same as input topologysorttestgraph1 0 5 false 1 2 4 3 0 topologysorttestgraph2 0 6 false 2 1 5 4 3 0 use depth first search to find strongliy connected vertices now graph is reversed findcomponents0 1 1 2 2 0 0 5 false 0 1 2 findcomponents0 2 1 0 2 0 1 0 6 false 0 2 1 this function takes graph as a parameter and then returns the list of strongly connected components stronglyconnectedcomponentstestgraph1 0 1 2 3 4 stronglyconnectedcomponentstestgraph2 0 2 1 3 5 4 use depth first search to sort graph at this time graph is the same as input topology_sort test_graph_1 0 5 false 1 2 4 3 0 topology_sort test_graph_2 0 6 false 2 1 5 4 3 0 use depth first search to find strongliy connected vertices now graph is reversed find_components 0 1 1 2 2 0 0 5 false 0 1 2 find_components 0 2 1 0 2 0 1 0 6 false 0 2 1 this function takes graph as a parameter and then returns the list of strongly connected components strongly_connected_components test_graph_1 0 1 2 3 4 strongly_connected_components test_graph_2 0 2 1 3 5 4
test_graph_1 = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} test_graph_2 = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def topology_sort( graph: dict[int, list[int]], vert: int, visited: list[bool] ) -> list[int]: visited[vert] = True order = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(graph, neighbour, visited) order.append(vert) return order def find_components( reversed_graph: dict[int, list[int]], vert: int, visited: list[bool] ) -> list[int]: visited[vert] = True component = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(reversed_graph, neighbour, visited) return component def strongly_connected_components(graph: dict[int, list[int]]) -> list[list[int]]: visited = len(graph) * [False] reversed_graph: dict[int, list[int]] = {vert: [] for vert in range(len(graph))} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(vert) order = [] for i, was_visited in enumerate(visited): if not was_visited: order += topology_sort(graph, i, visited) components_list = [] visited = len(graph) * [False] for i in range(len(graph)): vert = order[len(graph) - i - 1] if not visited[vert]: component = find_components(reversed_graph, vert, visited) components_list.append(component) return components_list
tarjan s algo for finding strongly connected components in a directed graph uses two main attributes of each node to track reachability the index of that node within a componentindex and the lowest index reachable from that nodelowlink we then perform a dfs of the each component making sure to update these parameters for each node and saving the nodes we visit on the way if ever we find that the lowest reachable node from a current node is equal to the index of the current node then it must be the root of a strongly connected component and so we save it and it s equireachable vertices as a strongly connected component complexity strongconnect is called at most once for each node and has a complexity of oe as it is dfs therefore this has complexity ov e for a graph g v e tarjan2 3 4 2 3 4 0 1 3 0 1 2 1 4 3 1 2 0 tarjan 0 1 2 3 a 0 1 2 3 4 5 4 b 1 0 3 2 5 4 0 n 7 sortedtarjancreategraphn listzipa b sorted tarjancreategraphn listzipa 1 b 1 true a 0 1 2 3 4 5 6 b 0 1 2 3 4 5 6 sortedtarjancreategraphn listzipa b 0 1 2 3 4 5 6 n 7 source 0 0 1 2 3 3 4 4 6 target 1 3 2 0 1 4 5 6 5 edges listzipsource target creategraphn edges 1 3 2 0 1 4 5 6 5 test tarjan s algo for finding strongly connected components in a directed graph uses two main attributes of each node to track reachability the index of that node within a component index and the lowest index reachable from that node lowlink we then perform a dfs of the each component making sure to update these parameters for each node and saving the nodes we visit on the way if ever we find that the lowest reachable node from a current node is equal to the index of the current node then it must be the root of a strongly connected component and so we save it and it s equireachable vertices as a strongly connected component complexity strong_connect is called at most once for each node and has a complexity of o e as it is dfs therefore this has complexity o v e for a graph g v e tarjan 2 3 4 2 3 4 0 1 3 0 1 2 1 4 3 1 2 0 tarjan 0 1 2 3 a 0 1 2 3 4 5 4 b 1 0 3 2 5 4 0 n 7 sorted tarjan create_graph n list zip a b sorted tarjan create_graph n list zip a 1 b 1 true a 0 1 2 3 4 5 6 b 0 1 2 3 4 5 6 sorted tarjan create_graph n list zip a b 0 1 2 3 4 5 6 the number when this node is seen lowest rank node reachable from here n 7 source 0 0 1 2 3 3 4 4 6 target 1 3 2 0 1 4 5 6 5 edges list zip source target create_graph n edges 1 3 2 0 1 4 5 6 5 test
from collections import deque def tarjan(g: list[list[int]]) -> list[list[int]]: n = len(g) stack: deque[int] = deque() on_stack = [False for _ in range(n)] index_of = [-1 for _ in range(n)] lowlink_of = index_of[:] def strong_connect(v: int, index: int, components: list[list[int]]) -> int: index_of[v] = index lowlink_of[v] = index index += 1 stack.append(v) on_stack[v] = True for w in g[v]: if index_of[w] == -1: index = strong_connect(w, index, components) lowlink_of[v] = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: lowlink_of[v] = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: component = [] w = stack.pop() on_stack[w] = False component.append(w) while w != v: w = stack.pop() on_stack[w] = False component.append(w) components.append(component) return index components: list[list[int]] = [] for v in range(n): if index_of[v] == -1: strong_connect(v, 0, components) return components def create_graph(n: int, edges: list[tuple[int, int]]) -> list[list[int]]: g: list[list[int]] = [[] for _ in range(n)] for u, v in edges: g[u].append(v) return g if __name__ == "__main__": n_vertices = 7 source = [0, 0, 1, 2, 3, 3, 4, 4, 6] target = [1, 3, 2, 0, 1, 4, 5, 6, 5] edges = list(zip(source, target)) g = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
given a list of stock prices calculate the maximum profit that can be made from a single buy and sell of one share of stock we only allowed to complete one buy transaction and one sell transaction but must buy before we sell example prices 7 1 5 3 6 4 maxprofit will return 5 which is by buying at price 1 and selling at price 6 this problem can be solved using the concept of greedy algorithm we iterate over the price array once keeping track of the lowest price point buy and the maximum profit we can get at each point the greedy choice at each point is to either buy at the current price if it s less than our current buying price or sell at the current price if the profit is more than our current maximum profit maxprofit7 1 5 3 6 4 5 maxprofit7 6 4 3 1 0 max_profit 7 1 5 3 6 4 5 max_profit 7 6 4 3 1 0
def max_profit(prices: list[int]) -> int: if not prices: return 0 min_price = prices[0] max_profit: int = 0 for price in prices: min_price = min(price, min_price) max_profit = max(price - min_price, max_profit) return max_profit if __name__ == "__main__": import doctest doctest.testmod() print(max_profit([7, 1, 5, 3, 6, 4]))
https en wikipedia orgwikisetcoverproblem return the valuetoweight ratio for the item returns float the valuetoweight ratio for the item examples item10 65 ratio 6 5 item20 100 ratio 5 0 item30 120 ratio 4 0 solve the fractional cover problem args items a list of items where each item has weight and value attributes capacity the maximum weight capacity of the knapsack returns the maximum value that can be obtained by selecting fractions of items to cover the knapsack s capacity raises valueerror if capacity is negative examples fractionalcoveritem10 60 item20 100 item30 120 capacity50 240 0 fractionalcoveritem20 100 item30 120 item10 60 capacity25 135 0 fractionalcoveritem10 60 item20 100 item30 120 capacity60 280 0 fractionalcoveritemsitem5 30 item10 60 item15 90 capacity30 180 0 fractionalcoveritems capacity50 0 0 fractionalcoveritemsitem10 60 capacity5 30 0 fractionalcoveritemsitem10 60 capacity1 6 0 fractionalcoveritemsitem10 60 capacity0 0 0 fractionalcoveritemsitem10 60 capacity1 traceback most recent call last valueerror capacity cannot be negative sort the items by their valuetoweight ratio in descending order https en wikipedia org wiki set_cover_problem return the value to weight ratio for the item returns float the value to weight ratio for the item examples item 10 65 ratio 6 5 item 20 100 ratio 5 0 item 30 120 ratio 4 0 solve the fractional cover problem args items a list of items where each item has weight and value attributes capacity the maximum weight capacity of the knapsack returns the maximum value that can be obtained by selecting fractions of items to cover the knapsack s capacity raises valueerror if capacity is negative examples fractional_cover item 10 60 item 20 100 item 30 120 capacity 50 240 0 fractional_cover item 20 100 item 30 120 item 10 60 capacity 25 135 0 fractional_cover item 10 60 item 20 100 item 30 120 capacity 60 280 0 fractional_cover items item 5 30 item 10 60 item 15 90 capacity 30 180 0 fractional_cover items capacity 50 0 0 fractional_cover items item 10 60 capacity 5 30 0 fractional_cover items item 10 60 capacity 1 6 0 fractional_cover items item 10 60 capacity 0 0 0 fractional_cover items item 10 60 capacity 1 traceback most recent call last valueerror capacity cannot be negative sort the items by their value to weight ratio in descending order
from dataclasses import dataclass from operator import attrgetter @dataclass class Item: weight: int value: int @property def ratio(self) -> float: return self.value / self.weight def fractional_cover(items: list[Item], capacity: int) -> float: if capacity < 0: raise ValueError("Capacity cannot be negative") total_value = 0.0 remaining_capacity = capacity for item in sorted(items, key=attrgetter("ratio"), reverse=True): if remaining_capacity == 0: break weight_taken = min(item.weight, remaining_capacity) total_value += weight_taken * item.ratio remaining_capacity -= weight_taken return total_value if __name__ == "__main__": import doctest if result := doctest.testmod().failed: print(f"{result} test(s) failed") else: print("All tests passed")
fracknapsack60 100 120 10 20 30 50 3 240 0 fracknapsack10 40 30 50 5 4 6 3 10 4 105 0 fracknapsack10 40 30 50 5 4 6 3 8 4 95 0 fracknapsack10 40 30 50 5 4 6 8 4 60 0 fracknapsack10 40 30 5 4 6 3 8 4 60 0 fracknapsack10 40 30 50 5 4 6 3 0 4 0 fracknapsack10 40 30 50 5 4 6 3 8 0 95 0 fracknapsack10 40 30 50 5 4 6 3 8 4 0 fracknapsack10 40 30 50 5 4 6 3 8 4 95 0 fracknapsack10 40 30 50 5 4 6 3 800 4 130 fracknapsack10 40 30 50 5 4 6 3 8 400 95 0 fracknapsackabcd 5 4 6 3 8 400 traceback most recent call last typeerror unsupported operand types for str and int frac_knapsack 60 100 120 10 20 30 50 3 240 0 frac_knapsack 10 40 30 50 5 4 6 3 10 4 105 0 frac_knapsack 10 40 30 50 5 4 6 3 8 4 95 0 frac_knapsack 10 40 30 50 5 4 6 8 4 60 0 frac_knapsack 10 40 30 5 4 6 3 8 4 60 0 frac_knapsack 10 40 30 50 5 4 6 3 0 4 0 frac_knapsack 10 40 30 50 5 4 6 3 8 0 95 0 frac_knapsack 10 40 30 50 5 4 6 3 8 4 0 frac_knapsack 10 40 30 50 5 4 6 3 8 4 95 0 frac_knapsack 10 40 30 50 5 4 6 3 800 4 130 frac_knapsack 10 40 30 50 5 4 6 3 8 400 95 0 frac_knapsack abcd 5 4 6 3 8 400 traceback most recent call last typeerror unsupported operand type s for str and int
from bisect import bisect from itertools import accumulate def frac_knapsack(vl, wt, w, n): r = sorted(zip(vl, wt), key=lambda x: x[0] / x[1], reverse=True) vl, wt = [i[0] for i in r], [i[1] for i in r] acc = list(accumulate(wt)) k = bisect(acc, w) return ( 0 if k == 0 else sum(vl[:k]) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k]) ) if __name__ == "__main__": import doctest doctest.testmod()
https en wikipedia orgwikicontinuousknapsackproblem https www guru99 comfractionalknapsackproblemgreedy html https medium comwalkinthecodegreedyalgorithmfractionalknapsackproblem9aba1daecc93 value 1 3 5 7 9 weight 0 9 0 7 0 5 0 3 0 1 fractionalknapsackvalue weight 5 25 1 1 1 1 1 fractionalknapsackvalue weight 15 25 1 1 1 1 1 fractionalknapsackvalue weight 25 25 1 1 1 1 1 fractionalknapsackvalue weight 26 25 1 1 1 1 1 fractionalknapsackvalue weight 1 90 0 0 0 0 0 10 0 fractionalknapsack1 3 5 7 weight 30 16 1 1 1 1 fractionalknapsackvalue 0 9 0 7 0 5 0 3 0 1 30 25 1 1 1 1 1 fractionalknapsack 30 0 https en wikipedia org wiki continuous_knapsack_problem https www guru99 com fractional knapsack problem greedy html https medium com walkinthecode greedy algorithm fractional knapsack problem 9aba1daecc93 value 1 3 5 7 9 weight 0 9 0 7 0 5 0 3 0 1 fractional_knapsack value weight 5 25 1 1 1 1 1 fractional_knapsack value weight 15 25 1 1 1 1 1 fractional_knapsack value weight 25 25 1 1 1 1 1 fractional_knapsack value weight 26 25 1 1 1 1 1 fractional_knapsack value weight 1 90 0 0 0 0 0 10 0 fractional_knapsack 1 3 5 7 weight 30 16 1 1 1 1 fractional_knapsack value 0 9 0 7 0 5 0 3 0 1 30 25 1 1 1 1 1 fractional_knapsack 30 0
from __future__ import annotations def fractional_knapsack( value: list[int], weight: list[int], capacity: int ) -> tuple[float, list[float]]: index = list(range(len(value))) ratio = [v / w for v, w in zip(value, weight)] index.sort(key=lambda i: ratio[i], reverse=True) max_value: float = 0 fractions: list[float] = [0] * len(value) for i in index: if weight[i] <= capacity: fractions[i] = 1 max_value += value[i] capacity -= weight[i] else: fractions[i] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
task there are n gas stations along a circular route where the amount of gas at the ith station is gasquantitiesi you have a car with an unlimited gas tank and it costs costsi of gas to travel from the ith station to its next i 1th station you begin the journey with an empty tank at one of the gas stations given two integer arrays gasquantities and costs return the starting gas station s index if you can travel around the circuit once in the clockwise direction otherwise return 1 if there exists a solution it is guaranteed to be unique reference https leetcode comproblemsgasstationdescription implementation notes first check whether the total gas is enough to complete the journey if not return 1 however if there is enough gas it is guaranteed that there is a valid starting index to reach the end of the journey greedily calculate the net gain gasquantity cost at each station if the net gain ever goes below 0 while iterating through the stations start checking from the next station this function returns a tuple of gas stations args gasquantities amount of gas available at each station costs the cost of gas required to move from one station to the next returns a tuple of gas stations gasstations getgasstations1 2 3 4 5 3 4 5 1 2 lengasstations 5 gasstations0 gasstationgasquantity1 cost3 gasstations1 gasstationgasquantity5 cost2 this function returns the index from which to start the journey in order to reach the end args gasquantities list amount of gas available at each station cost list the cost of gas required to move from one station to the next returns start int start index needed to complete the journey examples cancompletejourneygetgasstations1 2 3 4 5 3 4 5 1 2 3 cancompletejourneygetgasstations2 3 4 3 4 3 1 this function returns a tuple of gas stations args gas_quantities amount of gas available at each station costs the cost of gas required to move from one station to the next returns a tuple of gas stations gas_stations get_gas_stations 1 2 3 4 5 3 4 5 1 2 len gas_stations 5 gas_stations 0 gasstation gas_quantity 1 cost 3 gas_stations 1 gasstation gas_quantity 5 cost 2 this function returns the index from which to start the journey in order to reach the end args gas_quantities list amount of gas available at each station cost list the cost of gas required to move from one station to the next returns start int start index needed to complete the journey examples can_complete_journey get_gas_stations 1 2 3 4 5 3 4 5 1 2 3 can_complete_journey get_gas_stations 2 3 4 3 4 3 1
from dataclasses import dataclass @dataclass class GasStation: gas_quantity: int cost: int def get_gas_stations( gas_quantities: list[int], costs: list[int] ) -> tuple[GasStation, ...]: return tuple( GasStation(quantity, cost) for quantity, cost in zip(gas_quantities, costs) ) def can_complete_journey(gas_stations: tuple[GasStation, ...]) -> int: total_gas = sum(gas_station.gas_quantity for gas_station in gas_stations) total_cost = sum(gas_station.cost for gas_station in gas_stations) if total_gas < total_cost: return -1 start = 0 net = 0 for i, gas_station in enumerate(gas_stations): net += gas_station.gas_quantity - gas_station.cost if net < 0: start = i + 1 net = 0 return start if __name__ == "__main__": import doctest doctest.testmod()
test cases do you want to enter your denominations yn n enter the change you want to make in indian currency 987 following is minimal change for 987 500 100 100 100 100 50 20 10 5 2 do you want to enter your denominations yn y enter number of denomination 10 1 5 10 20 50 100 200 500 1000 2000 enter the change you want to make 18745 following is minimal change for 18745 2000 2000 2000 2000 2000 2000 2000 2000 2000 500 200 20 20 5 do you want to enter your denominations yn n enter the change you want to make 0 the total value cannot be zero or negative do you want to enter your denominations yn n enter the change you want to make 98 the total value cannot be zero or negative do you want to enter your denominations yn y enter number of denomination 5 1 5 100 500 1000 enter the change you want to make 456 following is minimal change for 456 100 100 100 100 5 5 5 5 5 5 5 5 5 5 5 1 find the minimum change from the given denominations and value findminimumchange1 5 10 20 50 100 200 500 1000 2000 18745 2000 2000 2000 2000 2000 2000 2000 2000 2000 500 200 20 20 5 findminimumchange1 2 5 10 20 50 100 500 2000 987 500 100 100 100 100 50 20 10 5 2 findminimumchange1 2 5 10 20 50 100 500 2000 0 findminimumchange1 2 5 10 20 50 100 500 2000 98 findminimumchange1 5 100 500 1000 456 100 100 100 100 5 5 5 5 5 5 5 5 5 5 5 1 initialize result traverse through all denomination find denominations driver code all denominations of indian currency if user does not enter print result find the minimum change from the given denominations and value find_minimum_change 1 5 10 20 50 100 200 500 1000 2000 18745 2000 2000 2000 2000 2000 2000 2000 2000 2000 500 200 20 20 5 find_minimum_change 1 2 5 10 20 50 100 500 2000 987 500 100 100 100 100 50 20 10 5 2 find_minimum_change 1 2 5 10 20 50 100 500 2000 0 find_minimum_change 1 2 5 10 20 50 100 500 2000 98 find_minimum_change 1 5 100 500 1000 456 100 100 100 100 5 5 5 5 5 5 5 5 5 5 5 1 initialize result traverse through all denomination find denominations append the answers array driver code all denominations of indian currency if user does not enter print result
def find_minimum_change(denominations: list[int], value: str) -> list[int]: total_value = int(value) answer = [] for denomination in reversed(denominations): while int(total_value) >= int(denomination): total_value -= int(denomination) answer.append(denomination) return answer if __name__ == "__main__": denominations = [] value = "0" if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): n = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(n): denominations.append(int(input(f"Denomination {i}: ").strip())) value = input("Enter the change you want to make in Indian Currency: ").strip() else: denominations = [1, 2, 5, 10, 20, 50, 100, 500, 2000] value = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(f"Following is minimal change for {value}: ") answer = find_minimum_change(denominations, value) for i in range(len(answer)): print(answer[i], end=" ")
calculate the minimum waiting time using a greedy algorithm reference https www youtube comwatch vsf3eio12ejs for doctests run following command python m doctest v minimumwaitingtime py the minimumwaitingtime function uses a greedy algorithm to calculate the minimum time for queries to complete it sorts the list in nondecreasing order calculates the waiting time for each query by multiplying its position in the list with the sum of all remaining query times and returns the total waiting time a doctest ensures that the function produces the correct output this function takes a list of query times and returns the minimum waiting time for all queries to be completed args queries a list of queries measured in picoseconds returns totalwaitingtime minimum waiting time measured in picoseconds examples minimumwaitingtime3 2 1 2 6 17 minimumwaitingtime3 2 1 4 minimumwaitingtime1 2 3 4 10 minimumwaitingtime5 5 5 5 30 minimumwaitingtime 0 this function takes a list of query times and returns the minimum waiting time for all queries to be completed args queries a list of queries measured in picoseconds returns total_waiting_time minimum waiting time measured in picoseconds examples minimum_waiting_time 3 2 1 2 6 17 minimum_waiting_time 3 2 1 4 minimum_waiting_time 1 2 3 4 10 minimum_waiting_time 5 5 5 5 30 minimum_waiting_time 0
def minimum_waiting_time(queries: list[int]) -> int: n = len(queries) if n in (0, 1): return 0 return sum(query * (n - i - 1) for i, query in enumerate(sorted(queries))) if __name__ == "__main__": import doctest doctest.testmod()
this is a pure python implementation of the greedymergesort algorithm reference https www geeksforgeeks orgoptimalfilemergepatterns for doctests run following command python3 m doctest v greedymergesort py objective merge a set of sorted files of different length into a single sorted file we need to find an optimal solution where the resultant file will be generated in minimum time approach if the number of sorted files are given there are many ways to merge them into a single sorted file this merge can be performed pair wise to merge a mrecord file and a nrecord file requires possibly mn record moves the optimal choice being merge the two smallest files together at each step greedy approach function to merge all the files with optimum cost args files list a list of sizes of different files to be merged returns optimalmergecost int optimal cost to merge all those files examples optimalmergepattern2 3 4 14 optimalmergepattern5 10 20 30 30 205 optimalmergepattern8 8 8 8 8 96 consider two files with minimum cost to be merged function to merge all the files with optimum cost args files list a list of sizes of different files to be merged returns optimal_merge_cost int optimal cost to merge all those files examples optimal_merge_pattern 2 3 4 14 optimal_merge_pattern 5 10 20 30 30 205 optimal_merge_pattern 8 8 8 8 8 96 consider two files with minimum cost to be merged
def optimal_merge_pattern(files: list) -> float: optimal_merge_cost = 0 while len(files) > 1: temp = 0 for _ in range(2): min_index = files.index(min(files)) temp += files[min_index] files.pop(min_index) files.append(temp) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
smallestrange function takes a list of sorted integer lists and finds the smallest range that includes at least one number from each list using a min heap for efficiency find the smallest range from each list in nums uses min heap for efficiency the range includes at least one number from each list args nums list of k sorted integer lists returns list smallest range as a twoelement list examples smallestrange4 10 15 24 26 0 9 12 20 5 18 22 30 20 24 smallestrange1 2 3 1 2 3 1 2 3 1 1 smallestrange1 2 3 1 2 3 1 2 3 1 1 smallestrange3 2 1 0 0 0 1 2 3 1 1 smallestrange1 2 3 4 5 6 7 8 9 3 7 smallestrange0 0 0 0 0 0 0 0 0 0 0 smallestrange traceback most recent call last indexerror list index out of range initialize smallestrange with large integer values find the smallest range from each list in nums uses min heap for efficiency the range includes at least one number from each list args nums list of k sorted integer lists returns list smallest range as a two element list examples smallest_range 4 10 15 24 26 0 9 12 20 5 18 22 30 20 24 smallest_range 1 2 3 1 2 3 1 2 3 1 1 smallest_range 1 2 3 1 2 3 1 2 3 1 1 smallest_range 3 2 1 0 0 0 1 2 3 1 1 smallest_range 1 2 3 4 5 6 7 8 9 3 7 smallest_range 0 0 0 0 0 0 0 0 0 0 0 smallest_range traceback most recent call last indexerror list index out of range initialize smallest_range with large integer values output 1 1
from heapq import heappop, heappush from sys import maxsize def smallest_range(nums: list[list[int]]) -> list[int]: min_heap: list[tuple[int, int, int]] = [] current_max = -maxsize - 1 for i, items in enumerate(nums): heappush(min_heap, (items[0], i, 0)) current_max = max(current_max, items[0]) smallest_range = [-maxsize - 1, maxsize] while min_heap: current_min, list_index, element_index = heappop(min_heap) if current_max - current_min < smallest_range[1] - smallest_range[0]: smallest_range = [current_min, current_max] if element_index == len(nums[list_index]) - 1: break next_element = nums[list_index][element_index + 1] heappush(min_heap, (next_element, list_index, element_index + 1)) current_max = max(current_max, next_element) return smallest_range if __name__ == "__main__": from doctest import testmod testmod() print(f"{smallest_range([[1, 2, 3], [1, 2, 3], [1, 2, 3]])}")
adler32 is a checksum algorithm which was invented by mark adler in 1995 compared to a cyclic redundancy check of the same length it trades reliability for speed preferring the latter adler32 is more reliable than fletcher16 and slightly less reliable than fletcher32 2 source https en wikipedia orgwikiadler32 function implements adler32 hash iterates and evaluates a new value for each character adler32 algorithms 363791387 adler32 go adler em all 708642122 function implements adler 32 hash iterates and evaluates a new value for each character adler32 algorithms 363791387 adler32 go adler em all 708642122
MOD_ADLER = 65521 def adler32(plain_text: str) -> int: a = 1 b = 0 for plain_chr in plain_text: a = (a + ord(plain_chr)) % MOD_ADLER b = (b + a) % MOD_ADLER return (b << 16) | a
this algorithm k33 was first reported by dan bernstein many years ago in comp lang c another version of this algorithm now favored by bernstein uses xor hashi hashi 1 33 stri first magic constant 33 it has never been adequately explained it s magic because it works better than many other constants prime or not second magic constant 5381 1 odd number 2 prime number 3 deficient number 4 001010100000101 b source http www cse yorku caozhash html implementation of djb2 hash algorithm that is popular because of it s magic constants djb2 algorithms 3782405311 djb2 scramble bits 1609059040 implementation of djb2 hash algorithm that is popular because of it s magic constants djb2 algorithms 3782405311 djb2 scramble bits 1609059040
def djb2(s: str) -> int: hash_value = 5381 for x in s: hash_value = ((hash_value << 5) + hash_value) + ord(x) return hash_value & 0xFFFFFFFF
implementation of elfhash algorithm a variant of pjw hash function elfhash lorem ipsum 253956621 implementation of elfhash algorithm a variant of pjw hash function elf_hash lorem ipsum 253956621
def elf_hash(data: str) -> int: hash_ = x = 0 for letter in data: hash_ = (hash_ << 4) + ord(letter) x = hash_ & 0xF0000000 if x != 0: hash_ ^= x >> 24 hash_ &= ~x return hash_ if __name__ == "__main__": import doctest doctest.testmod()
the fletcher checksum is an algorithm for computing a positiondependent checksum devised by john g fletcher 19342012 at lawrence livermore labs in the late 1970s 1 the objective of the fletcher checksum was to provide errordetection properties approaching those of a cyclic redundancy check but with the lower computational effort associated with summation techniques source https en wikipedia orgwikifletcher27schecksum loop through every character in the data and add to two sums fletcher16 hello world 6752 fletcher16 onethousandfourhundredthirtyfour 28347 fletcher16 the quick brown fox jumps over the lazy dog 5655 loop through every character in the data and add to two sums fletcher16 hello world 6752 fletcher16 onethousandfourhundredthirtyfour 28347 fletcher16 the quick brown fox jumps over the lazy dog 5655
def fletcher16(text: str) -> int: data = bytes(text, "ascii") sum1 = 0 sum2 = 0 for character in data: sum1 = (sum1 + character) % 255 sum2 = (sum1 + sum2) % 255 return (sum2 << 8) | sum1 if __name__ == "__main__": import doctest doctest.testmod()
joo gustavo a amorim gabriel kunz email joaogustavoamorimgmail com and gabrielkunzuergs edu br coding date apr 2019 black true this code implement the hamming code https en wikipedia orgwikihammingcode in telecommunication hamming codes are a family of linear errorcorrecting codes hamming codes can detect up to twobit errors or correct onebit errors without detection of uncorrected errors by contrast the simple parity code cannot correct errors and can detect only an odd number of bits in error hamming codes are perfect codes that is they achieve the highest possible rate for codes with their block length and minimum distance of three the implemented code consists of a function responsible for encoding the message emitterconverter return the encoded message a function responsible for decoding the message receptorconverter return the decoded message and a ack of data integrity how to use to be used you must declare how many parity bits sizepari you want to include in the message it is desired for test purposes to select a bit to be set as an error this serves to check whether the code is working correctly lastly the variable of the messageword that must be desired to be encoded text how this work declaration of variables sizepari be text converts the messageword text to binary using the texttobits function encodes the message using the rules of hamming encoding decodes the message using the rules of hamming encoding print the original message the encoded message and the decoded message forces an error in the coded text variable decodes the message that was forced the error print the original message the encoded message the bit changed message and the decoded message imports functions of binary conversion texttobitsmsg 011011010111001101100111 textfrombits 011011010111001101100111 msg functions of hamming code param sizepar how many parity bits the message must have param data information bits return message to be transmitted by unreliable medium bits of information merged with parity bits emitterconverter4 101010111111 1 1 1 1 0 1 0 0 1 0 1 1 1 1 1 1 emitterconverter5 101010111111 traceback most recent call last valueerror size of parity don t match with size of data sorted information data for the size of the output data data position template parity parity bit counter counter position of data bits performs a template of bit positions who should be given and who should be parity sorts the data to the new output size calculates parity bit counter one for a given parity counter to control the loop reading mount the message receptorconverter4 1111010010111111 1 0 1 0 1 0 1 1 1 1 1 1 true data position template parity parity bit counter counter p data bit reading list of parity received performs a template of bit positions who should be given and who should be parity sorts the data to the new output size calculates the parity with the data sorted information data for the size of the output data data position feedback parity parity bit counter counter p data bit reading performs a template position of bits who should be given and who should be parity sorts the data to the new output size calculates parity bit counter one for a certain parity counter to control loop reading mount the message example how to use number of parity bits sizepari 4 location of the bit that will be forced an error be 2 messageword to be encoded and decoded with hamming text inputenter the word to be read text message01 convert the message to binary binarytext texttobitstext prints the binary of the string printtext input in binary is binarytext total transmitted bits totalbits lenbinarytext sizepari printsize of data is strtotalbits printn message exchange printdata to send binarytext dataout emitterconvertersizepari binarytext printdata converted joindataout datareceiv ack receptorconvertersizepari dataout print data receive joindatareceiv tt data integrity strack printn force error printdata to send binarytext dataout emitterconvertersizepari binarytext printdata converted joindataout forces error dataoutbe 1 dataoutbe 0 0 dataoutbe 1 printdata after transmission joindataout datareceiv ack receptorconvertersizepari dataout print data receive joindatareceiv tt data integrity strack joão gustavo a amorim gabriel kunz email joaogustavoamorim gmail com and gabriel kunz uergs edu br coding date apr 2019 black true this code implement the hamming code https en wikipedia org wiki hamming_code in telecommunication hamming codes are a family of linear error correcting codes hamming codes can detect up to two bit errors or correct one bit errors without detection of uncorrected errors by contrast the simple parity code cannot correct errors and can detect only an odd number of bits in error hamming codes are perfect codes that is they achieve the highest possible rate for codes with their block length and minimum distance of three the implemented code consists of a function responsible for encoding the message emitterconverter return the encoded message a function responsible for decoding the message receptorconverter return the decoded message and a ack of data integrity how to use to be used you must declare how many parity bits sizepari you want to include in the message it is desired for test purposes to select a bit to be set as an error this serves to check whether the code is working correctly lastly the variable of the message word that must be desired to be encoded text how this work declaration of variables sizepari be text converts the message word text to binary using the text_to_bits function encodes the message using the rules of hamming encoding decodes the message using the rules of hamming encoding print the original message the encoded message and the decoded message forces an error in the coded text variable decodes the message that was forced the error print the original message the encoded message the bit changed message and the decoded message imports functions of binary conversion text_to_bits msg 011011010111001101100111 text_from_bits 011011010111001101100111 msg functions of hamming code param size_par how many parity bits the message must have param data information bits return message to be transmitted by unreliable medium bits of information merged with parity bits emitter_converter 4 101010111111 1 1 1 1 0 1 0 0 1 0 1 1 1 1 1 1 emitter_converter 5 101010111111 traceback most recent call last valueerror size of parity don t match with size of data sorted information data for the size of the output data data position template parity parity bit counter counter position of data bits performs a template of bit positions who should be given and who should be parity sorts the data to the new output size calculates parity parity bit counter bit counter one for a given parity counter to control the loop reading mount the message parity bit counter receptor_converter 4 1111010010111111 1 0 1 0 1 0 1 1 1 1 1 1 true data position template parity parity bit counter counter p data bit reading list of parity received performs a template of bit positions who should be given and who should be parity sorts the data to the new output size calculates the parity with the data sorted information data for the size of the output data data position feedback parity parity bit counter counter p data bit reading performs a template position of bits who should be given and who should be parity sorts the data to the new output size calculates parity parity bit counter bit counter one for a certain parity counter to control loop reading mount the message parity bit counter example how to use number of parity bits sizepari 4 location of the bit that will be forced an error be 2 message word to be encoded and decoded with hamming text input enter the word to be read text message01 convert the message to binary binarytext text_to_bits text prints the binary of the string print text input in binary is binarytext total transmitted bits totalbits len binarytext sizepari print size of data is str totalbits print n message exchange print data to send binarytext dataout emitterconverter sizepari binarytext print data converted join dataout datareceiv ack receptorconverter sizepari dataout print data receive join datareceiv t t data integrity str ack print n force error print data to send binarytext dataout emitterconverter sizepari binarytext print data converted join dataout forces error dataout be 1 dataout be 0 0 dataout be 1 print data after transmission join dataout datareceiv ack receptorconverter sizepari dataout print data receive join datareceiv t t data integrity str ack
import numpy as np def text_to_bits(text, encoding="utf-8", errors="surrogatepass"): bits = bin(int.from_bytes(text.encode(encoding, errors), "big"))[2:] return bits.zfill(8 * ((len(bits) + 7) // 8)) def text_from_bits(bits, encoding="utf-8", errors="surrogatepass"): n = int(bits, 2) return n.to_bytes((n.bit_length() + 7) // 8, "big").decode(encoding, errors) or "\0" def emitter_converter(size_par, data): if size_par + len(data) <= 2**size_par - (len(data) - 1): raise ValueError("size of parity don't match with size of data") data_out = [] parity = [] bin_pos = [bin(x)[2:] for x in range(1, size_par + len(data) + 1)] data_ord = [] data_out_gab = [] qtd_bp = 0 cont_data = 0 for x in range(1, size_par + len(data) + 1): if qtd_bp < size_par: if (np.log(x) / np.log(2)).is_integer(): data_out_gab.append("P") qtd_bp = qtd_bp + 1 else: data_out_gab.append("D") else: data_out_gab.append("D") if data_out_gab[-1] == "D": data_ord.append(data[cont_data]) cont_data += 1 else: data_ord.append(None) qtd_bp = 0 for bp in range(1, size_par + 1): cont_bo = 0 cont_loop = 0 for x in data_ord: if x is not None: try: aux = (bin_pos[cont_loop])[-1 * (bp)] except IndexError: aux = "0" if aux == "1" and x == "1": cont_bo += 1 cont_loop += 1 parity.append(cont_bo % 2) qtd_bp += 1 cont_bp = 0 for x in range(size_par + len(data)): if data_ord[x] is None: data_out.append(str(parity[cont_bp])) cont_bp += 1 else: data_out.append(data_ord[x]) return data_out def receptor_converter(size_par, data): data_out_gab = [] qtd_bp = 0 cont_data = 0 parity_received = [] data_output = [] for x in range(1, len(data) + 1): if qtd_bp < size_par and (np.log(x) / np.log(2)).is_integer(): data_out_gab.append("P") qtd_bp = qtd_bp + 1 else: data_out_gab.append("D") if data_out_gab[-1] == "D": data_output.append(data[cont_data]) else: parity_received.append(data[cont_data]) cont_data += 1 data_out = [] parity = [] bin_pos = [bin(x)[2:] for x in range(1, size_par + len(data_output) + 1)] data_ord = [] data_out_gab = [] qtd_bp = 0 cont_data = 0 for x in range(1, size_par + len(data_output) + 1): if qtd_bp < size_par and (np.log(x) / np.log(2)).is_integer(): data_out_gab.append("P") qtd_bp = qtd_bp + 1 else: data_out_gab.append("D") if data_out_gab[-1] == "D": data_ord.append(data_output[cont_data]) cont_data += 1 else: data_ord.append(None) qtd_bp = 0 for bp in range(1, size_par + 1): cont_bo = 0 cont_loop = 0 for x in data_ord: if x is not None: try: aux = (bin_pos[cont_loop])[-1 * (bp)] except IndexError: aux = "0" if aux == "1" and x == "1": cont_bo += 1 cont_loop += 1 parity.append(str(cont_bo % 2)) qtd_bp += 1 cont_bp = 0 for x in range(size_par + len(data_output)): if data_ord[x] is None: data_out.append(str(parity[cont_bp])) cont_bp += 1 else: data_out.append(data_ord[x]) ack = parity_received == parity return data_output, ack
luhn algorithm from future import annotations def isluhnstring str bool checkdigit int vector liststr liststring vector checkdigit vector 1 intvector1 vector listint intdigit for digit in vector vector reverse for i digit in enumeratevector if i 1 0 doubled int digit 2 if doubled 9 doubled 9 checkdigit doubled else checkdigit digit return checkdigit 10 0 if name main import doctest doctest testmod assert isluhn79927398713 assert not isluhn79927398714 perform luhn validation on an input string algorithm double every other digit starting from 2nd last digit subtract 9 if number is greater than 9 sum the numbers test_cases 79927398710 79927398711 79927398712 79927398713 79927398714 79927398715 79927398716 79927398717 79927398718 79927398719 is_luhn str test_case for test_case in test_cases false false false true false false false false false false
from __future__ import annotations def is_luhn(string: str) -> bool: check_digit: int _vector: list[str] = list(string) __vector, check_digit = _vector[:-1], int(_vector[-1]) vector: list[int] = [int(digit) for digit in __vector] vector.reverse() for i, digit in enumerate(vector): if i & 1 == 0: doubled: int = digit * 2 if doubled > 9: doubled -= 9 check_digit += doubled else: check_digit += digit return check_digit % 10 == 0 if __name__ == "__main__": import doctest doctest.testmod() assert is_luhn("79927398713") assert not is_luhn("79927398714")
the md5 algorithm is a hash function that s commonly used as a checksum to detect data corruption the algorithm works by processing a given message in blocks of 512 bits padding the message as needed it uses the blocks to operate a 128bit state and performs a total of 64 such operations note that all values are littleendian so inputs are converted as needed although md5 was used as a cryptographic hash function in the past it s since been cracked so it shouldn t be used for security purposes for more info see https en wikipedia orgwikimd5 converts the given string to littleendian in groups of 8 chars arguments string32 string 32char string raises valueerror input is not 32 char returns 32char littleendian string tolittleendianb 1234567890abcdfghijklmnopqrstuvw b pqrstuvwhijklmno90abcdfg12345678 tolittleendianb 1234567890 traceback most recent call last valueerror input must be of length 32 converts the given nonnegative integer to hex string example suppose the input is the following i 1234 the input is 0x000004d2 in hex so the littleendian hex string is d2040000 arguments i int integer raises valueerror input is negative returns 8char littleendian hex string reformathex1234 b d2040000 reformathex666 b 9a020000 reformathex0 b 00000000 reformathex1234567890 b d2029649 reformathex1234567890987654321 b b11c6cb1 reformathex1 traceback most recent call last valueerror input must be nonnegative preprocesses the message string convert message to bit string pad bit string to a multiple of 512 chars append a 1 append 0 s until length 448 mod 512 append length of original message 64 chars example suppose the input is the following message a the message bit string is 01100001 which is 8 bits long thus the bit string needs 439 bits of padding so that bitstring 1 padding 448 mod 512 the message length is 000010000 0 in 64bit littleendian binary the combined bit string is then 512 bits long arguments message string message string returns processed bit string padded to a multiple of 512 chars preprocessba b01100001 b1 b0 439 b00001000 b0 56 true preprocessb b1 b0 447 b0 64 true pad bitstring to a multiple of 512 chars splits bit string into blocks of 512 chars and yields each block as a list of 32bit words example suppose the input is the following bitstring 000000000 0 0x00 32 bits padded to the right 000000010 0 0x01 32 bits padded to the right 000000100 0 0x02 32 bits padded to the right 000000110 0 0x03 32 bits padded to the right 000011110 0 0x0a 32 bits padded to the right then lenbitstring 512 so there ll be 1 block the block is split into 32bit words and each word is converted to little endian the first word is interpreted as 0 in decimal the second word is interpreted as 1 in decimal etc thus blockwords 0 1 2 3 15 arguments bitstring string bit string with multiple of 512 as length raises valueerror length of bit string isn t multiple of 512 yields a list of 16 32bit words teststring joinformatn 24 032b for n in range16 encodeutf8 listgetblockwordsteststring 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 listgetblockwordsteststring 4 listrange16 4 true listgetblockwordsb1 512 4294967295 16 true listgetblockwordsb listgetblockwordsb1111 traceback most recent call last valueerror input must have length that s a multiple of 512 perform bitwise not on given int arguments i int given int raises valueerror input is negative returns result of bitwise not on i not3234 4294967261 not321234 4294966061 not324294966061 1234 not320 4294967295 not321 4294967294 not321 traceback most recent call last valueerror input must be nonnegative add two numbers as 32bit ints arguments a int first given int b int second given int returns a b as an unsigned 32bit int sum321 1 2 sum322 3 5 sum320 0 0 sum321 1 4294967294 sum324294967295 1 0 rotate the bits of a given int left by a given amount arguments i int given int shift int shift amount raises valueerror either given int or shift is negative returns i rotated to the left by shift bits leftrotate321234 1 2468 leftrotate321111 4 17776 leftrotate322147483648 1 1 leftrotate322147483648 3 4 leftrotate324294967295 4 4294967295 leftrotate321234 0 1234 leftrotate320 0 0 leftrotate321 0 traceback most recent call last valueerror input must be nonnegative leftrotate320 1 traceback most recent call last valueerror shift must be nonnegative returns the 32char md5 hash of a given message reference https en wikipedia orgwikimd5algorithm arguments message string message returns 32char md5 hash string md5meb b d41d8cd98f00b204e9800998ecf8427e md5mebthe quick brown fox jumps over the lazy dog b 9e107d9d372bb6826bd81d3542a419d6 md5mebthe quick brown fox jumps over the lazy dog b e4d909c290d0fb1ca068ffaddf22cbd0 import hashlib from string import asciiletters msgs b asciiletters encodeutf8 encodeutf8 bthe quick brown fox jumps over the lazy dog allmd5memsg hashlib md5msg hexdigest encodeutf8 for msg in msgs true convert to bit string add padding and append message length starting states process bit string in chunks each with 16 32char words hash current chunk f b c not32b d alternate definition for f f d b not32d c alternate definition for f add hashed chunk to running total converts the given string to little endian in groups of 8 chars arguments string_32 string 32 char string raises valueerror input is not 32 char returns 32 char little endian string to_little_endian b 1234567890abcdfghijklmnopqrstuvw b pqrstuvwhijklmno90abcdfg12345678 to_little_endian b 1234567890 traceback most recent call last valueerror input must be of length 32 converts the given non negative integer to hex string example suppose the input is the following i 1234 the input is 0x000004d2 in hex so the little endian hex string is d2040000 arguments i int integer raises valueerror input is negative returns 8 char little endian hex string reformat_hex 1234 b d2040000 reformat_hex 666 b 9a020000 reformat_hex 0 b 00000000 reformat_hex 1234567890 b d2029649 reformat_hex 1234567890987654321 b b11c6cb1 reformat_hex 1 traceback most recent call last valueerror input must be non negative preprocesses the message string convert message to bit string pad bit string to a multiple of 512 chars append a 1 append 0 s until length 448 mod 512 append length of original message 64 chars example suppose the input is the following message a the message bit string is 01100001 which is 8 bits long thus the bit string needs 439 bits of padding so that bit_string 1 padding 448 mod 512 the message length is 000010000 0 in 64 bit little endian binary the combined bit string is then 512 bits long arguments message string message string returns processed bit string padded to a multiple of 512 chars preprocess b a b 01100001 b 1 b 0 439 b 00001000 b 0 56 true preprocess b b 1 b 0 447 b 0 64 true pad bit_string to a multiple of 512 chars splits bit string into blocks of 512 chars and yields each block as a list of 32 bit words example suppose the input is the following bit_string 000000000 0 0x00 32 bits padded to the right 000000010 0 0x01 32 bits padded to the right 000000100 0 0x02 32 bits padded to the right 000000110 0 0x03 32 bits padded to the right 000011110 0 0x0a 32 bits padded to the right then len bit_string 512 so there ll be 1 block the block is split into 32 bit words and each word is converted to little endian the first word is interpreted as 0 in decimal the second word is interpreted as 1 in decimal etc thus block_words 0 1 2 3 15 arguments bit_string string bit string with multiple of 512 as length raises valueerror length of bit string isn t multiple of 512 yields a list of 16 32 bit words test_string join format n 24 032b for n in range 16 encode utf 8 list get_block_words test_string 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 list get_block_words test_string 4 list range 16 4 true list get_block_words b 1 512 4294967295 16 true list get_block_words b list get_block_words b 1111 traceback most recent call last valueerror input must have length that s a multiple of 512 perform bitwise not on given int arguments i int given int raises valueerror input is negative returns result of bitwise not on i not_32 34 4294967261 not_32 1234 4294966061 not_32 4294966061 1234 not_32 0 4294967295 not_32 1 4294967294 not_32 1 traceback most recent call last valueerror input must be non negative add two numbers as 32 bit ints arguments a int first given int b int second given int returns a b as an unsigned 32 bit int sum_32 1 1 2 sum_32 2 3 5 sum_32 0 0 0 sum_32 1 1 4294967294 sum_32 4294967295 1 0 rotate the bits of a given int left by a given amount arguments i int given int shift int shift amount raises valueerror either given int or shift is negative returns i rotated to the left by shift bits left_rotate_32 1234 1 2468 left_rotate_32 1111 4 17776 left_rotate_32 2147483648 1 1 left_rotate_32 2147483648 3 4 left_rotate_32 4294967295 4 4294967295 left_rotate_32 1234 0 1234 left_rotate_32 0 0 0 left_rotate_32 1 0 traceback most recent call last valueerror input must be non negative left_rotate_32 0 1 traceback most recent call last valueerror shift must be non negative returns the 32 char md5 hash of a given message reference https en wikipedia org wiki md5 algorithm arguments message string message returns 32 char md5 hash string md5_me b b d41d8cd98f00b204e9800998ecf8427e md5_me b the quick brown fox jumps over the lazy dog b 9e107d9d372bb6826bd81d3542a419d6 md5_me b the quick brown fox jumps over the lazy dog b e4d909c290d0fb1ca068ffaddf22cbd0 import hashlib from string import ascii_letters msgs b ascii_letters encode utf 8 üñîçø é encode utf 8 b the quick brown fox jumps over the lazy dog all md5_me msg hashlib md5 msg hexdigest encode utf 8 for msg in msgs true convert to bit string add padding and append message length starting states process bit string in chunks each with 16 32 char words hash current chunk f b c not_32 b d alternate definition for f f d b not_32 d c alternate definition for f add hashed chunk to running total
from collections.abc import Generator from math import sin def to_little_endian(string_32: bytes) -> bytes: if len(string_32) != 32: raise ValueError("Input must be of length 32") little_endian = b"" for i in [3, 2, 1, 0]: little_endian += string_32[8 * i : 8 * i + 8] return little_endian def reformat_hex(i: int) -> bytes: if i < 0: raise ValueError("Input must be non-negative") hex_rep = format(i, "08x")[-8:] little_endian_hex = b"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8") return little_endian_hex def preprocess(message: bytes) -> bytes: bit_string = b"" for char in message: bit_string += format(char, "08b").encode("utf-8") start_len = format(len(bit_string), "064b").encode("utf-8") bit_string += b"1" while len(bit_string) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32]) return bit_string def get_block_words(bit_string: bytes) -> Generator[list[int], None, None]: if len(bit_string) % 512 != 0: raise ValueError("Input must have length that's a multiple of 512") for pos in range(0, len(bit_string), 512): block = bit_string[pos : pos + 512] block_words = [] for i in range(0, 512, 32): block_words.append(int(to_little_endian(block[i : i + 32]), 2)) yield block_words def not_32(i: int) -> int: if i < 0: raise ValueError("Input must be non-negative") i_str = format(i, "032b") new_str = "" for c in i_str: new_str += "1" if c == "0" else "0" return int(new_str, 2) def sum_32(a: int, b: int) -> int: return (a + b) % 2**32 def left_rotate_32(i: int, shift: int) -> int: if i < 0: raise ValueError("Input must be non-negative") if shift < 0: raise ValueError("Shift must be non-negative") return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def md5_me(message: bytes) -> bytes: bit_string = preprocess(message) added_consts = [int(2**32 * abs(sin(i + 1))) for i in range(64)] a0 = 0x67452301 b0 = 0xEFCDAB89 c0 = 0x98BADCFE d0 = 0x10325476 shift_amounts = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] for block_words in get_block_words(bit_string): a = a0 b = b0 c = c0 d = d0 for i in range(64): if i <= 15: f = d ^ (b & (c ^ d)) g = i elif i <= 31: f = c ^ (d & (b ^ c)) g = (5 * i + 1) % 16 elif i <= 47: f = b ^ c ^ d g = (3 * i + 5) % 16 else: f = c ^ (b | not_32(d)) g = (7 * i) % 16 f = (f + a + added_consts[i] + block_words[g]) % 2**32 a = d d = c c = b b = sum_32(b, left_rotate_32(f, shift_amounts[i])) a0 = sum_32(a0, a) b0 = sum_32(b0, b) c0 = sum_32(c0, c) d0 = sum_32(d0, d) digest = reformat_hex(a0) + reformat_hex(b0) + reformat_hex(c0) + reformat_hex(d0) return digest if __name__ == "__main__": import doctest doctest.testmod()
this algorithm was created for sdbm a publicdomain reimplementation of ndbm database library it was found to do well in scrambling bits causing better distribution of the keys and fewer splits it also happens to be a good general hashing function with good distribution the actual function pseudo code is for i in i lenstr hashi hashi 1 65599 stri what is included below is the faster version used in gawk there is even a faster duffdevice version the magic constant 65599 was picked out of thin air while experimenting with different constants it turns out to be a prime this is one of the algorithms used in berkeley db see sleepycat and elsewhere source http www cse yorku caozhash html function implements sdbm hash easy to use great for bits scrambling iterates over each character in the given string and applies function to each of them sdbm algorithms 1462174910723540325254304520539387479031000036 sdbm scramble bits 730247649148944819640658295400555317318720608290373040936089 function implements sdbm hash easy to use great for bits scrambling iterates over each character in the given string and applies function to each of them sdbm algorithms 1462174910723540325254304520539387479031000036 sdbm scramble bits 730247649148944819640658295400555317318720608290373040936089
def sdbm(plain_text: str) -> int: hash_value = 0 for plain_chr in plain_text: hash_value = ( ord(plain_chr) + (hash_value << 6) + (hash_value << 16) - hash_value ) return hash_value
implementation of the sha1 hash function and gives utilities to find hash of string or hash of text from a file also contains a test class to verify that the generated hash matches what is returned by the hashlib library usage python sha1 py string hello world python sha1 py file helloworld txt when run without any arguments it prints the hash of the string hello world welcome to cryptography sha1 hash or sha1 sum of a string is a cryptographic function which means it is easy to calculate forwards but extremely difficult to calculate backwards what this means is you can easily calculate the hash of a string but it is extremely difficult to know the original string if you have its hash this property is useful for communicating securely send encrypted messages and is very useful in payment systems blockchain and cryptocurrency etc the algorithm as described in the reference first we start with a message the message is padded and the length of the message is added to the end it is then split into blocks of 512 bits or 64 bytes the blocks are then processed one at a time each block must be expanded and compressed the value after each compression is added to a 160bit buffer called the current hash state after the last block is processed the current hash state is returned as the final hash reference https deadhacker com20060221sha1illustrated class to contain the entire pipeline for sha1 hashing algorithm sha1hashbytes allan utf8 finalhash 872af2d8ac3d8695387e7c804bf0e02c18df9e6e initiates the variables data and h h is a list of 5 8digit hexadecimal numbers corresponding to 1732584193 4023233417 2562383102 271733878 3285377520 respectively we will start with this as a message digest 0x is how you write hexadecimal numbers in python static method to be used inside other methods left rotates n by b sha1hash rotate12 2 48 pads the input message with zeros so that paddeddata has 64 bytes or 512 bits returns a list of bytestrings each of length 64 staticmethod takes a bytestringblock of length 64 unpacks it to a list of integers and returns a list of 80 integers after some bit operations calls all the other methods to process the input pads the data then splits into blocks and then does a series of operations for each block including expansion for each block the variable h that was initialized is copied to a b c d e and these 5 variables a b c d e undergo several changes after all the blocks are processed these 5 variables are pairwise added to h ie a to h0 b to h1 and so on this h becomes our final hash which is returned provides option string or file to take input and prints the calculated sha1 hash unittest main has been commented out because we probably don t want to run the test each time unittest main in any case hash input should be a bytestring hashlib is only used inside the test class class to contain the entire pipeline for sha1 hashing algorithm sha1hash bytes allan utf 8 final_hash 872af2d8ac3d8695387e7c804bf0e02c18df9e6e initiates the variables data and h h is a list of 5 8 digit hexadecimal numbers corresponding to 1732584193 4023233417 2562383102 271733878 3285377520 respectively we will start with this as a message digest 0x is how you write hexadecimal numbers in python static method to be used inside other methods left rotates n by b sha1hash rotate 12 2 48 pads the input message with zeros so that padded_data has 64 bytes or 512 bits returns a list of bytestrings each of length 64 staticmethod takes a bytestring block of length 64 unpacks it to a list of integers and returns a list of 80 integers after some bit operations calls all the other methods to process the input pads the data then splits into blocks and then does a series of operations for each block including expansion for each block the variable h that was initialized is copied to a b c d e and these 5 variables a b c d e undergo several changes after all the blocks are processed these 5 variables are pairwise added to h ie a to h 0 b to h 1 and so on this h becomes our final hash which is returned noqa s324 provides option string or file to take input and prints the calculated sha1 hash unittest main has been commented out because we probably don t want to run the test each time unittest main in any case hash input should be a bytestring
import argparse import hashlib import struct class SHA1Hash: def __init__(self, data): self.data = data self.h = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0] @staticmethod def rotate(n, b): return ((n << b) | (n >> (32 - b))) & 0xFFFFFFFF def padding(self): padding = b"\x80" + b"\x00" * (63 - (len(self.data) + 8) % 64) padded_data = self.data + padding + struct.pack(">Q", 8 * len(self.data)) return padded_data def split_blocks(self): return [ self.padded_data[i : i + 64] for i in range(0, len(self.padded_data), 64) ] def expand_block(self, block): w = list(struct.unpack(">16L", block)) + [0] * 64 for i in range(16, 80): w[i] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1) return w def final_hash(self): self.padded_data = self.padding() self.blocks = self.split_blocks() for block in self.blocks: expanded_block = self.expand_block(block) a, b, c, d, e = self.h for i in range(80): if 0 <= i < 20: f = (b & c) | ((~b) & d) k = 0x5A827999 elif 20 <= i < 40: f = b ^ c ^ d k = 0x6ED9EBA1 elif 40 <= i < 60: f = (b & c) | (b & d) | (c & d) k = 0x8F1BBCDC elif 60 <= i < 80: f = b ^ c ^ d k = 0xCA62C1D6 a, b, c, d, e = ( self.rotate(a, 5) + f + e + k + expanded_block[i] & 0xFFFFFFFF, a, self.rotate(b, 30), c, d, ) self.h = ( self.h[0] + a & 0xFFFFFFFF, self.h[1] + b & 0xFFFFFFFF, self.h[2] + c & 0xFFFFFFFF, self.h[3] + d & 0xFFFFFFFF, self.h[4] + e & 0xFFFFFFFF, ) return ("{:08x}" * 5).format(*self.h) def test_sha1_hash(): msg = b"Test String" assert SHA1Hash(msg).final_hash() == hashlib.sha1(msg).hexdigest() def main(): parser = argparse.ArgumentParser(description="Process some strings or files") parser.add_argument( "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", ) parser.add_argument("--file", dest="input_file", help="Hash contents of a file") args = parser.parse_args() input_string = args.input_string if args.input_file: with open(args.input_file, "rb") as f: hash_input = f.read() else: hash_input = bytes(input_string, "utf-8") print(SHA1Hash(hash_input).final_hash()) if __name__ == "__main__": main() import doctest doctest.testmod()
m yathurshan black formatter true implementation of sha256 hash function in a python class and provides utilities to find hash of string or hash of text from a file usage python sha256 py string hello world python sha256 py file helloworld txt when run without any arguments it prints the hash of the string hello world welcome to cryptography references https qvault iocryptographyhowsha2worksstepbystepsha256 https en wikipedia orgwikisha2 class to contain the entire pipeline for sha1 hashing algorithm sha256b python hash 18885f27b5af9012df19e496460f9294d5ab76128824c6f993787004f6d9a7db sha256b hello world hash b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9 initialize hash values initialize round constants convert into blocks of 64 bytes convert the given block into a list of 4 byte integers add 48 0ed integers modify the zeroed indexes at the end of the array compression modify final values right rotate a given unsigned number by a certain amount of rotations test class for the sha256 class inherits the testcase class from unittest provides option string or file to take input and prints the calculated sha256 hash unittest main hash input should be a bytestring m yathurshan black formatter true implementation of sha256 hash function in a python class and provides utilities to find hash of string or hash of text from a file usage python sha256 py string hello world python sha256 py file hello_world txt when run without any arguments it prints the hash of the string hello world welcome to cryptography references https qvault io cryptography how sha 2 works step by step sha 256 https en wikipedia org wiki sha 2 class to contain the entire pipeline for sha1 hashing algorithm sha256 b python hash 18885f27b5af9012df19e496460f9294d5ab76128824c6f993787004f6d9a7db sha256 b hello world hash b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9 initialize hash values initialize round constants convert into blocks of 64 bytes convert the given block into a list of 4 byte integers add 48 0 ed integers modify the zero ed indexes at the end of the array compression modify final values right rotate a given unsigned number by a certain amount of rotations test class for the sha256 class inherits the testcase class from unittest provides option string or file to take input and prints the calculated sha 256 hash unittest main hash input should be a bytestring
import argparse import struct import unittest class SHA256: def __init__(self, data: bytes) -> None: self.data = data self.hashes = [ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19, ] self.round_constants = [ 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2, ] self.preprocessed_data = self.preprocessing(self.data) self.final_hash() @staticmethod def preprocessing(data: bytes) -> bytes: padding = b"\x80" + (b"\x00" * (63 - (len(data) + 8) % 64)) big_endian_integer = struct.pack(">Q", (len(data) * 8)) return data + padding + big_endian_integer def final_hash(self) -> None: self.blocks = [ self.preprocessed_data[x : x + 64] for x in range(0, len(self.preprocessed_data), 64) ] for block in self.blocks: words = list(struct.unpack(">16L", block)) words += [0] * 48 a, b, c, d, e, f, g, h = self.hashes for index in range(64): if index > 15: s0 = ( self.ror(words[index - 15], 7) ^ self.ror(words[index - 15], 18) ^ (words[index - 15] >> 3) ) s1 = ( self.ror(words[index - 2], 17) ^ self.ror(words[index - 2], 19) ^ (words[index - 2] >> 10) ) words[index] = ( words[index - 16] + s0 + words[index - 7] + s1 ) % 0x100000000 s1 = self.ror(e, 6) ^ self.ror(e, 11) ^ self.ror(e, 25) ch = (e & f) ^ ((~e & (0xFFFFFFFF)) & g) temp1 = ( h + s1 + ch + self.round_constants[index] + words[index] ) % 0x100000000 s0 = self.ror(a, 2) ^ self.ror(a, 13) ^ self.ror(a, 22) maj = (a & b) ^ (a & c) ^ (b & c) temp2 = (s0 + maj) % 0x100000000 h, g, f, e, d, c, b, a = ( g, f, e, ((d + temp1) % 0x100000000), c, b, a, ((temp1 + temp2) % 0x100000000), ) mutated_hash_values = [a, b, c, d, e, f, g, h] self.hashes = [ ((element + mutated_hash_values[index]) % 0x100000000) for index, element in enumerate(self.hashes) ] self.hash = "".join([hex(value)[2:].zfill(8) for value in self.hashes]) def ror(self, value: int, rotations: int) -> int: return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations) class SHA256HashTest(unittest.TestCase): def test_match_hashes(self) -> None: import hashlib msg = bytes("Test String", "utf-8") assert SHA256(msg).hash == hashlib.sha256(msg).hexdigest() def main() -> None: import doctest doctest.testmod() parser = argparse.ArgumentParser() parser.add_argument( "-s", "--string", dest="input_string", default="Hello World!! Welcome to Cryptography", help="Hash the string", ) parser.add_argument( "-f", "--file", dest="input_file", help="Hash contents of a file" ) args = parser.parse_args() input_string = args.input_string if args.input_file: with open(args.input_file, "rb") as f: hash_input = f.read() else: hash_input = bytes(input_string, "utf-8") print(SHA256(hash_input).hash) if __name__ == "__main__": main()
to get an insight into greedy algorithm through the knapsack problem a shopkeeper has bags of wheat that each have different weights and different profits eg profit 5 8 7 1 12 3 4 weight 2 7 1 6 4 2 5 maxweight 100 constraints maxweight 0 profiti 0 weighti 0 calculate the maximum profit that the shopkeeper can make given maxmum weight that can be carried function description is as follows param profit take a list of profits param weight take a list of weight if bags corresponding to the profits param maxweight maximum weight that could be carried return maximum expected gain calcprofit1 2 3 3 4 5 15 6 calcprofit10 9 8 3 4 5 25 27 list created to store profit gained for the 1kg in case of each weight respectively calculate and append profitweight for each element creating a copy of the list and sorting profitweight in ascending order declaring useful variables loop till the total weight do not reach max limit e g 15 kg and till ilength flag value for encountered greatest element in sortedprofitbyweight calculate the index of the biggestprofitbyweight in profitbyweight list this will give the index of the first encountered element which is same as of biggestprofitbyweight there may be one or more values same as that of biggestprofitbyweight but index always encounter the very first element only to curb this alter the values in profitbyweight once they are used here it is done to 1 because neither profit nor weight can be in negative check if the weight encountered is less than the total weight encountered before adding profit gained for the given weight 1 weightindexweightindex since the weight encountered is greater than limit therefore take the required number of remaining kgs and calculate profit for it weight remaining weightindex function call to get an insight into greedy algorithm through the knapsack problem a shopkeeper has bags of wheat that each have different weights and different profits eg profit 5 8 7 1 12 3 4 weight 2 7 1 6 4 2 5 max_weight 100 constraints max_weight 0 profit i 0 weight i 0 calculate the maximum profit that the shopkeeper can make given maxmum weight that can be carried function description is as follows param profit take a list of profits param weight take a list of weight if bags corresponding to the profits param max_weight maximum weight that could be carried return maximum expected gain calc_profit 1 2 3 3 4 5 15 6 calc_profit 10 9 8 3 4 5 25 27 list created to store profit gained for the 1kg in case of each weight respectively calculate and append profit weight for each element creating a copy of the list and sorting profit weight in ascending order declaring useful variables loop till the total weight do not reach max limit e g 15 kg and till i length flag value for encountered greatest element in sorted_profit_by_weight calculate the index of the biggest_profit_by_weight in profit_by_weight list this will give the index of the first encountered element which is same as of biggest_profit_by_weight there may be one or more values same as that of biggest_profit_by_weight but index always encounter the very first element only to curb this alter the values in profit_by_weight once they are used here it is done to 1 because neither profit nor weight can be in negative check if the weight encountered is less than the total weight encountered before adding profit gained for the given weight 1 weight index weight index since the weight encountered is greater than limit therefore take the required number of remaining kgs and calculate profit for it weight remaining weight index function call
def calc_profit(profit: list, weight: list, max_weight: int) -> int: if len(profit) != len(weight): raise ValueError("The length of profit and weight must be same.") if max_weight <= 0: raise ValueError("max_weight must greater than zero.") if any(p < 0 for p in profit): raise ValueError("Profit can not be negative.") if any(w < 0 for w in weight): raise ValueError("Weight can not be negative.") profit_by_weight = [p / w for p, w in zip(profit, weight)] sorted_profit_by_weight = sorted(profit_by_weight) length = len(sorted_profit_by_weight) limit = 0 gain = 0 i = 0 while limit <= max_weight and i < length: biggest_profit_by_weight = sorted_profit_by_weight[length - i - 1] index = profit_by_weight.index(biggest_profit_by_weight) profit_by_weight[index] = -1 if max_weight - limit >= weight[index]: limit += weight[index] gain += 1 * profit[index] else: gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( "Input profits, weights, and then max_weight (all positive ints) separated by " "spaces." ) profit = [int(x) for x in input("Input profits separated by spaces: ").split()] weight = [int(x) for x in input("Input weights separated by spaces: ").split()] max_weight = int(input("Max weight allowed: ")) calc_profit(profit, weight, max_weight)
a naive recursive implementation of 01 knapsack problem https en wikipedia orgwikiknapsackproblem returns the maximum value that can be put in a knapsack of a capacity cap whereby each weight w has a specific value val cap 50 val 60 100 120 w 10 20 30 c lenval knapsackcap w val c 220 the result is 220 cause the values of 100 and 120 got the weight of 50 which is the limit of the capacity base case if weight of the nth item is more than knapsack of capacity then this item cannot be included in the optimal solution else return the maximum of two cases 1 nth item included 2 not included returns the maximum value that can be put in a knapsack of a capacity cap whereby each weight w has a specific value val cap 50 val 60 100 120 w 10 20 30 c len val knapsack cap w val c 220 the result is 220 cause the values of 100 and 120 got the weight of 50 which is the limit of the capacity base case if weight of the nth item is more than knapsack of capacity then this item cannot be included in the optimal solution else return the maximum of two cases 1 nth item included 2 not included
from __future__ import annotations def knapsack(capacity: int, weights: list[int], values: list[int], counter: int) -> int: if counter == 0 or capacity == 0: return 0 if weights[counter - 1] > capacity: return knapsack(capacity, weights, values, counter - 1) else: left_capacity = capacity - weights[counter - 1] new_value_included = values[counter - 1] + knapsack( left_capacity, weights, values, counter - 1 ) without_new_value = knapsack(capacity, weights, values, counter - 1) return max(new_value_included, without_new_value) if __name__ == "__main__": import doctest doctest.testmod()
to get an insight into naive recursive way to solve the knapsack problem a shopkeeper has bags of wheat that each have different weights and different profits eg noofitems 4 profit 5 4 8 6 weight 1 2 4 5 maxweight 5 constraints maxweight 0 profiti 0 weighti 0 calculate the maximum profit that the shopkeeper can make given maxmum weight that can be carried function description is as follows param weights take a list of weights param values take a list of profits corresponding to the weights param numberofitems number of items available to pick from param maxweight maximum weight that could be carried param index the element we are looking at return maximum expected gain knapsack1 2 4 5 5 4 8 6 4 5 0 13 knapsack3 4 5 10 9 8 3 25 0 27 to get an insight into naive recursive way to solve the knapsack problem a shopkeeper has bags of wheat that each have different weights and different profits eg no_of_items 4 profit 5 4 8 6 weight 1 2 4 5 max_weight 5 constraints max_weight 0 profit i 0 weight i 0 calculate the maximum profit that the shopkeeper can make given maxmum weight that can be carried function description is as follows param weights take a list of weights param values take a list of profits corresponding to the weights param number_of_items number of items available to pick from param max_weight maximum weight that could be carried param index the element we are looking at return maximum expected gain knapsack 1 2 4 5 5 4 8 6 4 5 0 13 knapsack 3 4 5 10 9 8 3 25 0 27
def knapsack( weights: list, values: list, number_of_items: int, max_weight: int, index: int ) -> int: if index == number_of_items: return 0 ans1 = 0 ans2 = 0 ans1 = knapsack(weights, values, number_of_items, max_weight, index + 1) if weights[index] <= max_weight: ans2 = values[index] + knapsack( weights, values, number_of_items, max_weight - weights[index], index + 1 ) return max(ans1, ans2) if __name__ == "__main__": import doctest doctest.testmod()
test cases for knapsack kp calcprofit takes the required argument profit weight maxweight and returns whether the answer matches to the expected ones returns valueerror for any negative maxweight value return valueerror profit 10 20 30 40 50 60 weight 2 4 6 8 10 12 maxweight 15 returns valueerror for any negative profit value in the list return valueerror profit 10 20 30 40 50 60 weight 2 4 6 8 10 12 maxweight 15 returns valueerror for any negative weight value in the list return valueerror profit 10 20 30 40 50 60 weight 2 4 6 8 10 12 maxweight 15 returns valueerror for any zero maxweight value return valueerror profit 10 20 30 40 50 60 weight 2 4 6 8 10 12 maxweight null returns indexerror if length of lists profit and weight are unequal return indexerror profit 10 20 30 40 50 weight 2 4 6 8 10 12 maxweight 100 test cases for knapsack kp calc_profit takes the required argument profit weight max_weight and returns whether the answer matches to the expected ones returns valueerror for any negative max_weight value return valueerror profit 10 20 30 40 50 60 weight 2 4 6 8 10 12 max_weight 15 returns valueerror for any negative profit value in the list return valueerror profit 10 20 30 40 50 60 weight 2 4 6 8 10 12 max_weight 15 returns valueerror for any negative weight value in the list return valueerror profit 10 20 30 40 50 60 weight 2 4 6 8 10 12 max_weight 15 returns valueerror for any zero max_weight value return valueerror profit 10 20 30 40 50 60 weight 2 4 6 8 10 12 max_weight null returns indexerror if length of lists profit and weight are unequal return indexerror profit 10 20 30 40 50 weight 2 4 6 8 10 12 max_weight 100
import unittest import pytest from knapsack import greedy_knapsack as kp class TestClass(unittest.TestCase): def test_sorted(self): profit = [10, 20, 30, 40, 50, 60] weight = [2, 4, 6, 8, 10, 12] max_weight = 100 assert kp.calc_profit(profit, weight, max_weight) == 210 def test_negative_max_weight(self): pytest.raises(ValueError, match="max_weight must greater than zero.") def test_negative_profit_value(self): pytest.raises(ValueError, match="Weight can not be negative.") def test_negative_weight_value(self): pytest.raises(ValueError, match="Profit can not be negative.") def test_null_max_weight(self): pytest.raises(ValueError, match="max_weight must greater than zero.") def test_unequal_list_length(self): pytest.raises(IndexError, match="The length of profit and weight must be same.") if __name__ == "__main__": unittest.main()
created on fri oct 16 09 31 07 2020 dr tobias schrder license mitlicense this file contains the testsuite for the knapsack problem test for the base case test for the base case test for the knapsack test for the base case test for the base case test for the knapsack
import unittest from knapsack import knapsack as k class Test(unittest.TestCase): def test_base_case(self): cap = 0 val = [0] w = [0] c = len(val) assert k.knapsack(cap, w, val, c) == 0 val = [60] w = [10] c = len(val) assert k.knapsack(cap, w, val, c) == 0 def test_easy_case(self): cap = 3 val = [1, 2, 3] w = [3, 2, 1] c = len(val) assert k.knapsack(cap, w, val, c) == 5 def test_knapsack(self): cap = 50 val = [60, 100, 120] w = [10, 20, 30] c = len(val) assert k.knapsack(cap, w, val, c) == 220 if __name__ == "__main__": unittest.main()
gaussian elimination method for solving a system of linear equations gaussian elimination https en wikipedia orgwikigaussianelimination this function performs a retroactive linear system resolution for triangular matrix examples 2x1 2x2 1x3 5 2x1 2x2 1 0x1 2x2 1x3 7 0x1 2x2 1 0x1 0x2 5x3 15 gaussianelimination2 2 1 0 2 1 0 0 5 5 7 15 array2 2 3 gaussianelimination2 2 0 2 1 1 array1 0 5 this function performs gaussian elimination method examples 1x1 4x2 2x3 2 1x1 2x2 5 5x1 2x2 2x3 3 5x1 2x2 5 1x1 1x2 0x3 4 gaussianelimination1 4 2 5 2 2 1 1 0 2 3 4 array 2 3 1 7 5 55 gaussianelimination1 2 5 2 5 5 array0 2 5 coefficients must to be a square matrix so we need to check first augmented matrix scale the matrix leaving it triangular this function performs a retroactive linear system resolution for triangular matrix examples 2x1 2x2 1x3 5 2x1 2x2 1 0x1 2x2 1x3 7 0x1 2x2 1 0x1 0x2 5x3 15 gaussian_elimination 2 2 1 0 2 1 0 0 5 5 7 15 array 2 2 3 gaussian_elimination 2 2 0 2 1 1 array 1 0 5 this function performs gaussian elimination method examples 1x1 4x2 2x3 2 1x1 2x2 5 5x1 2x2 2x3 3 5x1 2x2 5 1x1 1x2 0x3 4 gaussian_elimination 1 4 2 5 2 2 1 1 0 2 3 4 array 2 3 1 7 5 55 gaussian_elimination 1 2 5 2 5 5 array 0 2 5 coefficients must to be a square matrix so we need to check first augmented matrix scale the matrix leaving it triangular
import numpy as np from numpy import float64 from numpy.typing import NDArray def retroactive_resolution( coefficients: NDArray[float64], vector: NDArray[float64] ) -> NDArray[float64]: rows, columns = np.shape(coefficients) x: NDArray[float64] = np.zeros((rows, 1), dtype=float) for row in reversed(range(rows)): total = np.dot(coefficients[row, row + 1 :], x[row + 1 :]) x[row, 0] = (vector[row][0] - total[0]) / coefficients[row, row] return x def gaussian_elimination( coefficients: NDArray[float64], vector: NDArray[float64] ) -> NDArray[float64]: rows, columns = np.shape(coefficients) if rows != columns: return np.array((), dtype=float) augmented_mat: NDArray[float64] = np.concatenate((coefficients, vector), axis=1) augmented_mat = augmented_mat.astype("float64") for row in range(rows - 1): pivot = augmented_mat[row, row] for col in range(row + 1, columns): factor = augmented_mat[col, row] / pivot augmented_mat[col, :] -= factor * augmented_mat[row, :] x = retroactive_resolution( augmented_mat[:, 0:columns], augmented_mat[:, columns : columns + 1] ) return x if __name__ == "__main__": import doctest doctest.testmod()
jacobi iteration method https en wikipedia orgwikijacobimethod method to find solution of system of linear equations jacobi iteration method an iterative algorithm to determine the solutions of strictly diagonally dominant system of linear equations 4x1 x2 x3 2 x1 5x2 2x3 6 x1 2x2 4x3 4 xinit 0 5 0 5 0 5 examples coefficient np array4 1 1 1 5 2 1 2 4 constant np array2 6 4 initval 0 5 0 5 0 5 iterations 3 jacobiiterationmethodcoefficient constant initval iterations 0 909375 1 14375 0 7484375 coefficient np array4 1 1 1 5 2 constant np array2 6 4 initval 0 5 0 5 0 5 iterations 3 jacobiiterationmethodcoefficient constant initval iterations traceback most recent call last valueerror coefficient matrix dimensions must be nxn but received 2x3 coefficient np array4 1 1 1 5 2 1 2 4 constant np array2 6 initval 0 5 0 5 0 5 iterations 3 jacobiiterationmethod coefficient constant initval iterations doctest normalizewhitespace traceback most recent call last valueerror coefficient and constant matrices dimensions must be nxn and nx1 but received 3x3 and 2x1 coefficient np array4 1 1 1 5 2 1 2 4 constant np array2 6 4 initval 0 5 0 5 iterations 3 jacobiiterationmethod coefficient constant initval iterations doctest normalizewhitespace traceback most recent call last valueerror number of initial values must be equal to number of rows in coefficient matrix but received 2 and 3 coefficient np array4 1 1 1 5 2 1 2 4 constant np array2 6 4 initval 0 5 0 5 0 5 iterations 0 jacobiiterationmethodcoefficient constant initval iterations traceback most recent call last valueerror iterations must be at least 1 iterates the whole matrix for given number of times for in rangeiterations newval for row in rangerows temp 0 for col in rangecols if col row denom tablerowcol elif col cols 1 val tablerowcol else temp 1 tablerowcol initvalcol temp temp val denom newval appendtemp initval newval denominator a list of values along the diagonal vallast values of the last column of the table array masks boolean mask of all strings without diagonal elements array coefficientmatrix nodiagonals coefficientmatrix array values without diagonal elements here we get icol these are the column numbers for each row without diagonal elements except for the last column icol is converted to a twodimensional list ind which will be used to make selections from initval arr array see below iterates the whole matrix for given number of times checks if the given matrix is strictly diagonally dominant table np array4 1 1 2 1 5 2 6 1 2 4 4 strictlydiagonallydominanttable true table np array4 1 1 2 1 5 2 6 1 2 3 4 strictlydiagonallydominanttable traceback most recent call last valueerror coefficient matrix is not strictly diagonally dominant test cases method to find solution of system of linear equations jacobi iteration method an iterative algorithm to determine the solutions of strictly diagonally dominant system of linear equations 4x1 x2 x3 2 x1 5x2 2x3 6 x1 2x2 4x3 4 x_init 0 5 0 5 0 5 examples coefficient np array 4 1 1 1 5 2 1 2 4 constant np array 2 6 4 init_val 0 5 0 5 0 5 iterations 3 jacobi_iteration_method coefficient constant init_val iterations 0 909375 1 14375 0 7484375 coefficient np array 4 1 1 1 5 2 constant np array 2 6 4 init_val 0 5 0 5 0 5 iterations 3 jacobi_iteration_method coefficient constant init_val iterations traceback most recent call last valueerror coefficient matrix dimensions must be nxn but received 2x3 coefficient np array 4 1 1 1 5 2 1 2 4 constant np array 2 6 init_val 0 5 0 5 0 5 iterations 3 jacobi_iteration_method coefficient constant init_val iterations doctest normalize_whitespace traceback most recent call last valueerror coefficient and constant matrices dimensions must be nxn and nx1 but received 3x3 and 2x1 coefficient np array 4 1 1 1 5 2 1 2 4 constant np array 2 6 4 init_val 0 5 0 5 iterations 3 jacobi_iteration_method coefficient constant init_val iterations doctest normalize_whitespace traceback most recent call last valueerror number of initial values must be equal to number of rows in coefficient matrix but received 2 and 3 coefficient np array 4 1 1 1 5 2 1 2 4 constant np array 2 6 4 init_val 0 5 0 5 0 5 iterations 0 jacobi_iteration_method coefficient constant init_val iterations traceback most recent call last valueerror iterations must be at least 1 iterates the whole matrix for given number of times for _ in range iterations new_val for row in range rows temp 0 for col in range cols if col row denom table row col elif col cols 1 val table row col else temp 1 table row col init_val col temp temp val denom new_val append temp init_val new_val denominator a list of values along the diagonal val_last values of the last column of the table array masks boolean mask of all strings without diagonal elements array coefficient_matrix no_diagonals coefficient_matrix array values without diagonal elements here we get i_col these are the column numbers for each row without diagonal elements except for the last column i_col is converted to a two dimensional list ind which will be used to make selections from init_val arr array see below iterates the whole matrix for given number of times checks if the given matrix is strictly diagonally dominant table np array 4 1 1 2 1 5 2 6 1 2 4 4 strictly_diagonally_dominant table true table np array 4 1 1 2 1 5 2 6 1 2 3 4 strictly_diagonally_dominant table traceback most recent call last valueerror coefficient matrix is not strictly diagonally dominant test cases
from __future__ import annotations import numpy as np from numpy import float64 from numpy.typing import NDArray def jacobi_iteration_method( coefficient_matrix: NDArray[float64], constant_matrix: NDArray[float64], init_val: list[float], iterations: int, ) -> list[float]: rows1, cols1 = coefficient_matrix.shape rows2, cols2 = constant_matrix.shape if rows1 != cols1: msg = f"Coefficient matrix dimensions must be nxn but received {rows1}x{cols1}" raise ValueError(msg) if cols2 != 1: msg = f"Constant matrix must be nx1 but received {rows2}x{cols2}" raise ValueError(msg) if rows1 != rows2: msg = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " f"received {rows1}x{cols1} and {rows2}x{cols2}" ) raise ValueError(msg) if len(init_val) != rows1: msg = ( "Number of initial values must be equal to number of rows in coefficient " f"matrix but received {len(init_val)} and {rows1}" ) raise ValueError(msg) if iterations <= 0: raise ValueError("Iterations must be at least 1") table: NDArray[float64] = np.concatenate( (coefficient_matrix, constant_matrix), axis=1 ) rows, cols = table.shape strictly_diagonally_dominant(table) denominator = np.diag(coefficient_matrix) val_last = table[:, -1] masks = ~np.eye(coefficient_matrix.shape[0], dtype=bool) no_diagonals = coefficient_matrix[masks].reshape(-1, rows - 1) i_row, i_col = np.where(masks) ind = i_col.reshape(-1, rows - 1) for _ in range(iterations): arr = np.take(init_val, ind) sum_product_rows = np.sum((-1) * no_diagonals * arr, axis=1) new_val = (sum_product_rows + val_last) / denominator init_val = new_val return new_val.tolist() def strictly_diagonally_dominant(table: NDArray[float64]) -> bool: rows, cols = table.shape is_diagonally_dominant = True for i in range(rows): total = 0 for j in range(cols - 1): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant") return is_diagonally_dominant if __name__ == "__main__": import doctest doctest.testmod()
lowerupper lu decomposition factors a matrix as a product of a lower triangular matrix and an upper triangular matrix a square matrix has an lu decomposition under the following conditions if the matrix is invertible then it has an lu decomposition if and only if all of its leading principal minors are nonzero see https en wikipedia orgwikiminorlinearalgebra for an explanation of leading principal minors of a matrix if the matrix is singular i e not invertible and it has a rank of k i e it has k linearly independent columns then it has an lu decomposition if its first k leading principal minors are nonzero this algorithm will simply attempt to perform lu decomposition on any square matrix and raise an error if no such decomposition exists reference https en wikipedia orgwikiludecomposition perform lu decomposition on a given matrix and raises an error if the matrix isn t square or if no such decomposition exists matrix np array2 2 1 0 1 2 5 3 1 lowermat uppermat lowerupperdecompositionmatrix lowermat array1 0 0 0 1 0 2 5 8 1 uppermat array 2 2 1 0 1 2 0 0 17 5 matrix np array4 3 6 3 lowermat uppermat lowerupperdecompositionmatrix lowermat array1 0 1 5 1 uppermat array 4 3 0 1 5 matrix is not square matrix np array2 2 1 0 1 2 lowermat uppermat lowerupperdecompositionmatrix traceback most recent call last valueerror table has to be of square shaped array but got a 2x3 array 2 2 1 0 1 2 matrix is invertible but its first leading principal minor is 0 matrix np array0 1 1 0 lowermat uppermat lowerupperdecompositionmatrix traceback most recent call last arithmeticerror no lu decomposition exists matrix is singular but its first leading principal minor is 1 matrix np array1 0 1 0 lowermat uppermat lowerupperdecompositionmatrix lowermat array1 0 1 1 uppermat array1 0 0 0 matrix is singular but its first leading principal minor is 0 matrix np array0 1 0 1 lowermat uppermat lowerupperdecompositionmatrix traceback most recent call last arithmeticerror no lu decomposition exists ensure that table is a square array in total the necessary data is extracted through slices and the sum of the products is obtained perform lu decomposition on a given matrix and raises an error if the matrix isn t square or if no such decomposition exists matrix np array 2 2 1 0 1 2 5 3 1 lower_mat upper_mat lower_upper_decomposition matrix lower_mat array 1 0 0 0 1 0 2 5 8 1 upper_mat array 2 2 1 0 1 2 0 0 17 5 matrix np array 4 3 6 3 lower_mat upper_mat lower_upper_decomposition matrix lower_mat array 1 0 1 5 1 upper_mat array 4 3 0 1 5 matrix is not square matrix np array 2 2 1 0 1 2 lower_mat upper_mat lower_upper_decomposition matrix traceback most recent call last valueerror table has to be of square shaped array but got a 2x3 array 2 2 1 0 1 2 matrix is invertible but its first leading principal minor is 0 matrix np array 0 1 1 0 lower_mat upper_mat lower_upper_decomposition matrix traceback most recent call last arithmeticerror no lu decomposition exists matrix is singular but its first leading principal minor is 1 matrix np array 1 0 1 0 lower_mat upper_mat lower_upper_decomposition matrix lower_mat array 1 0 1 1 upper_mat array 1 0 0 0 matrix is singular but its first leading principal minor is 0 matrix np array 0 1 0 1 lower_mat upper_mat lower_upper_decomposition matrix traceback most recent call last arithmeticerror no lu decomposition exists ensure that table is a square array in total the necessary data is extracted through slices and the sum of the products is obtained
from __future__ import annotations import numpy as np def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray]: rows, columns = np.shape(table) if rows != columns: msg = ( "'table' has to be of square shaped array but got a " f"{rows}x{columns} array:\n{table}" ) raise ValueError(msg) lower = np.zeros((rows, columns)) upper = np.zeros((rows, columns)) for i in range(columns): for j in range(i): total = np.sum(lower[i, :i] * upper[:i, j]) if upper[j][j] == 0: raise ArithmeticError("No LU decomposition exists") lower[i][j] = (table[i][j] - total) / upper[j][j] lower[i][i] = 1 for j in range(i, columns): total = np.sum(lower[i, :i] * upper[:i, j]) upper[i][j] = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
resources https en wikipedia orgwikiconjugategradientmethod https en wikipedia orgwikidefinitesymmetricmatrix returns true if input matrix is symmetric positive definite returns false otherwise for a matrix to be spd all eigenvalues must be positive import numpy as np matrix np array 4 12401784 5 01453636 0 63865857 5 01453636 12 33347422 3 40493586 0 63865857 3 40493586 5 78591885 ismatrixspdmatrix true matrix np array 0 34634879 1 96165514 2 18277744 0 74074469 1 19648894 1 34223498 0 7687067 0 06018373 1 16315631 ismatrixspdmatrix false ensure matrix is square if matrix not symmetric exit right away get eigenvalues and eignevectors for a symmetric matrix check sign of all eigenvalues np all returns a value of type np bool returns a symmetric positive definite matrix given a dimension input dimension gives the square matrix dimension output spdmatrix is an diminesion x dimensions symmetric positive definite spd matrix import numpy as np dimension 3 spdmatrix createspdmatrixdimension ismatrixspdspdmatrix true returns solution to the linear system np dotspdmatrix x b input spdmatrix is an nxn symmetric positive definite spd matrix loadvector is an nx1 vector output x is an nx1 vector that is the solution vector import numpy as np spdmatrix np array 8 73256573 5 02034289 2 68709226 5 02034289 3 78188322 0 91980451 2 68709226 0 91980451 1 94746467 b np array 5 80872761 3 23807431 1 95381422 conjugategradientspdmatrix b array0 63114139 0 01561498 0 13979294 ensure proper dimensionality initialize solution guess residual search direction set initial errors in solution guess and residual set iteration counter to threshold number of iterations save this value so we only calculate the matrixvector product once the main algorithm update search direction magnitude update solution guess calculate new residual calculate new krylov subspace scale calculate new a conjuage search direction calculate errors update variables update number of iterations testconjugategradient self running tests create linear system with spd matrix and known solution xtrue numpy solution our implementation ensure both solutions are close to xtrue and therefore one another returns true if input matrix is symmetric positive definite returns false otherwise for a matrix to be spd all eigenvalues must be positive import numpy as np matrix np array 4 12401784 5 01453636 0 63865857 5 01453636 12 33347422 3 40493586 0 63865857 3 40493586 5 78591885 _is_matrix_spd matrix true matrix np array 0 34634879 1 96165514 2 18277744 0 74074469 1 19648894 1 34223498 0 7687067 0 06018373 1 16315631 _is_matrix_spd matrix false ensure matrix is square if matrix not symmetric exit right away get eigenvalues and eignevectors for a symmetric matrix check sign of all eigenvalues np all returns a value of type np bool_ returns a symmetric positive definite matrix given a dimension input dimension gives the square matrix dimension output spd_matrix is an diminesion x dimensions symmetric positive definite spd matrix import numpy as np dimension 3 spd_matrix _create_spd_matrix dimension _is_matrix_spd spd_matrix true returns solution to the linear system np dot spd_matrix x b input spd_matrix is an nxn symmetric positive definite spd matrix load_vector is an nx1 vector output x is an nx1 vector that is the solution vector import numpy as np spd_matrix np array 8 73256573 5 02034289 2 68709226 5 02034289 3 78188322 0 91980451 2 68709226 0 91980451 1 94746467 b np array 5 80872761 3 23807431 1 95381422 conjugate_gradient spd_matrix b array 0 63114139 0 01561498 0 13979294 ensure proper dimensionality initialize solution guess residual search direction set initial errors in solution guess and residual set iteration counter to threshold number of iterations save this value so we only calculate the matrix vector product once the main algorithm update search direction magnitude update solution guess calculate new residual calculate new krylov subspace scale calculate new a conjuage search direction calculate errors update variables update number of iterations test_conjugate_gradient self running tests create linear system with spd matrix and known solution x_true numpy solution our implementation ensure both solutions are close to x_true and therefore one another
from typing import Any import numpy as np def _is_matrix_spd(matrix: np.ndarray) -> bool: assert np.shape(matrix)[0] == np.shape(matrix)[1] if np.allclose(matrix, matrix.T) is False: return False eigen_values, _ = np.linalg.eigh(matrix) return bool(np.all(eigen_values > 0)) def _create_spd_matrix(dimension: int) -> Any: random_matrix = np.random.randn(dimension, dimension) spd_matrix = np.dot(random_matrix, random_matrix.T) assert _is_matrix_spd(spd_matrix) return spd_matrix def conjugate_gradient( spd_matrix: np.ndarray, load_vector: np.ndarray, max_iterations: int = 1000, tol: float = 1e-8, ) -> Any: assert np.shape(spd_matrix)[0] == np.shape(spd_matrix)[1] assert np.shape(load_vector)[0] == np.shape(spd_matrix)[0] assert _is_matrix_spd(spd_matrix) x0 = np.zeros((np.shape(load_vector)[0], 1)) r0 = np.copy(load_vector) p0 = np.copy(r0) error_residual = 1e9 error_x_solution = 1e9 error = 1e9 iterations = 0 while error > tol: w = np.dot(spd_matrix, p0) alpha = np.dot(r0.T, r0) / np.dot(p0.T, w) x = x0 + alpha * p0 r = r0 - alpha * w beta = np.dot(r.T, r) / np.dot(r0.T, r0) p = r + beta * p0 error_residual = np.linalg.norm(r - r0) error_x_solution = np.linalg.norm(x - x0) error = np.maximum(error_residual, error_x_solution) x0 = np.copy(x) r0 = np.copy(r) p0 = np.copy(p) iterations += 1 if iterations > max_iterations: break return x def test_conjugate_gradient() -> None: dimension = 3 spd_matrix = _create_spd_matrix(dimension) x_true = np.random.randn(dimension, 1) b = np.dot(spd_matrix, x_true) x_numpy = np.linalg.solve(spd_matrix, b) x_conjugate_gradient = conjugate_gradient(spd_matrix, b) assert np.linalg.norm(x_numpy - x_true) <= 1e-6 assert np.linalg.norm(x_conjugate_gradient - x_true) <= 1e-6 if __name__ == "__main__": import doctest doctest.testmod() test_conjugate_gradient()
solve a linear system of equations using gaussian elimination with partial pivoting args matrix coefficient matrix with the last column representing the constants returns solution vector raises valueerror if the matrix is not correct i e singular https courses engr illinois educs357su2013lect htm lecture 7 example a np array2 1 1 3 1 2 2 1 2 dtypefloat b np array8 11 3 dtypefloat solution solvelinearsystemnp columnstacka b np allclosesolution np array2 3 1 true solvelinearsystemnp array0 0 0 0 dtypefloat arraynan nan lead element search upper triangular matrix find x vector back substitution return the solution vector example usage solve a linear system of equations using gaussian elimination with partial pivoting args matrix coefficient matrix with the last column representing the constants returns solution vector raises valueerror if the matrix is not correct i e singular https courses engr illinois edu cs357 su2013 lect htm lecture 7 example a np array 2 1 1 3 1 2 2 1 2 dtype float b np array 8 11 3 dtype float solution solve_linear_system np column_stack a b np allclose solution np array 2 3 1 true solve_linear_system np array 0 0 0 0 dtype float array nan nan lead element search upper triangular matrix find x vector back substitution return the solution vector example usage
import numpy as np matrix = np.array( [ [5.0, -5.0, -3.0, 4.0, -11.0], [1.0, -4.0, 6.0, -4.0, -10.0], [-2.0, -5.0, 4.0, -5.0, -12.0], [-3.0, -3.0, 5.0, -5.0, 8.0], ], dtype=float, ) def solve_linear_system(matrix: np.ndarray) -> np.ndarray: ab = np.copy(matrix) num_of_rows = ab.shape[0] num_of_columns = ab.shape[1] - 1 x_lst: list[float] = [] for column_num in range(num_of_rows): for i in range(column_num, num_of_columns): if abs(ab[i][column_num]) > abs(ab[column_num][column_num]): ab[[column_num, i]] = ab[[i, column_num]] if ab[column_num, column_num] == 0.0: raise ValueError("Matrix is not correct") else: pass if column_num != 0: for i in range(column_num, num_of_rows): ab[i, :] -= ( ab[i, column_num - 1] / ab[column_num - 1, column_num - 1] * ab[column_num - 1, :] ) for column_num in range(num_of_rows): for i in range(column_num, num_of_columns): if abs(ab[i][column_num]) > abs(ab[column_num][column_num]): ab[[column_num, i]] = ab[[i, column_num]] if ab[column_num, column_num] == 0.0: raise ValueError("Matrix is not correct") else: pass if column_num != 0: for i in range(column_num, num_of_rows): ab[i, :] -= ( ab[i, column_num - 1] / ab[column_num - 1, column_num - 1] * ab[column_num - 1, :] ) for column_num in range(num_of_rows - 1, -1, -1): x = ab[column_num, -1] / ab[column_num, column_num] x_lst.insert(0, x) for i in range(column_num - 1, -1, -1): ab[i, -1] -= ab[i, column_num] * x return np.asarray(x_lst) if __name__ == "__main__": from doctest import testmod from pathlib import Path testmod() file_path = Path(__file__).parent / "matrix.txt" try: matrix = np.loadtxt(file_path) except FileNotFoundError: print(f"Error: {file_path} not found. Using default matrix instead.") print(f"Matrix:\n{matrix}") print(f"{solve_linear_system(matrix) = }")
created on mon feb 26 14 29 11 2018 christian bender license mitlicense this module contains some useful classes and functions for dealing with linear algebra in python overview class vector function zerovectordimension function unitbasisvectordimension pos function axpyscalar vector1 vector2 function randomvectorn a b class matrix function squarezeromatrixn function randommatrixw h a b this class represents a vector of arbitrary size you need to give the vector components overview of the methods initcomponents collectionfloat none init the vector len gets the size of the vector number of components str returns a string representation addother vector vector addition subother vector vector subtraction mulother float scalar multiplication mulother vector dot product copy copies this vector and returns it componenti gets the ith component 0indexed changecomponentpos int value float changes specified component euclideanlength returns the euclidean length of the vector angleother vector deg bool returns the angle between two vectors todo compareoperator input components or nothing simple constructor for init the vector returns the size of the vector returns a string representation of the vector input other vector assumes other vector has the same size returns a new vector that represents the sum input other vector assumes other vector has the same size returns a new vector that represents the difference mul implements the scalar multiplication and the dotproduct copies this vector and returns it input index 0indexed output the ith component of the vector input an index pos and a value changes the specified component pos with the value precondition returns the euclidean length of the vector vector2 3 4 euclideanlength 5 385164807134504 vector1 euclideanlength 1 0 vector0 1 2 3 4 5 6 euclideanlength 9 539392014169456 vector euclideanlength traceback most recent call last exception vector is empty find angle between two vector self vector vector3 4 1 anglevector2 1 1 1 4906464636572374 vector3 4 1 anglevector2 1 1 deg true 85 40775111366095 vector3 4 1 anglevector2 1 traceback most recent call last exception invalid operand returns a zerovector of size dimension precondition returns a unit basis vector with a one at index pos indexing at 0 precondition input a scalar and two vectors x and y output a vector computes the axpy operation precondition input size n of the vector random range a b output returns a random vector of size n with random integer components between a and b class matrix this class represents an arbitrary matrix overview of the methods init str returns a string representation addother matrix matrix addition subother matrix matrix subtraction mulother float scalar multiplication mulother vector vector multiplication height returns height width returns width componentx int y int returns specified component changecomponentx int y int value float changes specified component minorx int y int returns minor along x y cofactorx int y int returns cofactor along x y determinant returns determinant simple constructor for initializing the matrix with components returns a string representation of this matrix implements matrix addition implements matrix subtraction implements the matrixvector multiplication implements the matrixscalar multiplication getter for the height getter for the width returns the specified x y component changes the xy component of this matrix returns the minor along x y returns the cofactor signed minor along x y returns the determinant of an nxn matrix using laplace expansion returns a square zeromatrix of dimension nxn returns a random matrix wxh with integer components between a and b this class represents a vector of arbitrary size you need to give the vector components overview of the methods __init__ components collection float none init the vector __len__ gets the size of the vector number of components __str__ returns a string representation __add__ other vector vector addition __sub__ other vector vector subtraction __mul__ other float scalar multiplication __mul__ other vector dot product copy copies this vector and returns it component i gets the i th component 0 indexed change_component pos int value float changes specified component euclidean_length returns the euclidean length of the vector angle other vector deg bool returns the angle between two vectors todo compare operator input components or nothing simple constructor for init the vector returns the size of the vector returns a string representation of the vector input other vector assumes other vector has the same size returns a new vector that represents the sum input other vector assumes other vector has the same size returns a new vector that represents the difference error case mul implements the scalar multiplication and the dot product error case copies this vector and returns it input index 0 indexed output the i th component of the vector input an index pos and a value changes the specified component pos with the value precondition returns the euclidean length of the vector vector 2 3 4 euclidean_length 5 385164807134504 vector 1 euclidean_length 1 0 vector 0 1 2 3 4 5 6 euclidean_length 9 539392014169456 vector euclidean_length traceback most recent call last exception vector is empty find angle between two vector self vector vector 3 4 1 angle vector 2 1 1 1 4906464636572374 vector 3 4 1 angle vector 2 1 1 deg true 85 40775111366095 vector 3 4 1 angle vector 2 1 traceback most recent call last exception invalid operand returns a zero vector of size dimension precondition returns a unit basis vector with a one at index pos indexing at 0 precondition input a scalar and two vectors x and y output a vector computes the axpy operation precondition input size n of the vector random range a b output returns a random vector of size n with random integer components between a and b class matrix this class represents an arbitrary matrix overview of the methods __init__ __str__ returns a string representation __add__ other matrix matrix addition __sub__ other matrix matrix subtraction __mul__ other float scalar multiplication __mul__ other vector vector multiplication height returns height width returns width component x int y int returns specified component change_component x int y int value float changes specified component minor x int y int returns minor along x y cofactor x int y int returns cofactor along x y determinant returns determinant simple constructor for initializing the matrix with components returns a string representation of this matrix implements matrix addition implements matrix subtraction implements the matrix vector multiplication implements the matrix scalar multiplication matrix vector matrix scalar getter for the height getter for the width returns the specified x y component changes the x y component of this matrix returns the minor along x y returns the cofactor signed minor along x y returns the determinant of an nxn matrix using laplace expansion returns a square zero matrix of dimension nxn returns a random matrix wxh with integer components between a and b
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class Vector: def __init__(self, components: Collection[float] | None = None) -> None: if components is None: components = [] self.__components = list(components) def __len__(self) -> int: return len(self.__components) def __str__(self) -> str: return "(" + ",".join(map(str, self.__components)) + ")" def __add__(self, other: Vector) -> Vector: size = len(self) if size == len(other): result = [self.__components[i] + other.component(i) for i in range(size)] return Vector(result) else: raise Exception("must have the same size") def __sub__(self, other: Vector) -> Vector: size = len(self) if size == len(other): result = [self.__components[i] - other.component(i) for i in range(size)] return Vector(result) else: raise Exception("must have the same size") @overload def __mul__(self, other: float) -> Vector: ... @overload def __mul__(self, other: Vector) -> float: ... def __mul__(self, other: float | Vector) -> float | Vector: if isinstance(other, (float, int)): ans = [c * other for c in self.__components] return Vector(ans) elif isinstance(other, Vector) and len(self) == len(other): size = len(self) prods = [self.__components[i] * other.component(i) for i in range(size)] return sum(prods) else: raise Exception("invalid operand!") def copy(self) -> Vector: return Vector(self.__components) def component(self, i: int) -> float: if isinstance(i, int) and -len(self.__components) <= i < len(self.__components): return self.__components[i] else: raise Exception("index out of range") def change_component(self, pos: int, value: float) -> None: assert -len(self.__components) <= pos < len(self.__components) self.__components[pos] = value def euclidean_length(self) -> float: if len(self.__components) == 0: raise Exception("Vector is empty") squares = [c**2 for c in self.__components] return math.sqrt(sum(squares)) def angle(self, other: Vector, deg: bool = False) -> float: num = self * other den = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den)) else: return math.acos(num / den) def zero_vector(dimension: int) -> Vector: assert isinstance(dimension, int) return Vector([0] * dimension) def unit_basis_vector(dimension: int, pos: int) -> Vector: assert isinstance(dimension, int) assert isinstance(pos, int) ans = [0] * dimension ans[pos] = 1 return Vector(ans) def axpy(scalar: float, x: Vector, y: Vector) -> Vector: assert isinstance(x, Vector) assert isinstance(y, Vector) assert isinstance(scalar, (int, float)) return x * scalar + y def random_vector(n: int, a: int, b: int) -> Vector: random.seed(None) ans = [random.randint(a, b) for _ in range(n)] return Vector(ans) class Matrix: def __init__(self, matrix: list[list[float]], w: int, h: int) -> None: self.__matrix = matrix self.__width = w self.__height = h def __str__(self) -> str: ans = "" for i in range(self.__height): ans += "|" for j in range(self.__width): if j < self.__width - 1: ans += str(self.__matrix[i][j]) + "," else: ans += str(self.__matrix[i][j]) + "|\n" return ans def __add__(self, other: Matrix) -> Matrix: if self.__width == other.width() and self.__height == other.height(): matrix = [] for i in range(self.__height): row = [ self.__matrix[i][j] + other.component(i, j) for j in range(self.__width) ] matrix.append(row) return Matrix(matrix, self.__width, self.__height) else: raise Exception("matrix must have the same dimension!") def __sub__(self, other: Matrix) -> Matrix: if self.__width == other.width() and self.__height == other.height(): matrix = [] for i in range(self.__height): row = [ self.__matrix[i][j] - other.component(i, j) for j in range(self.__width) ] matrix.append(row) return Matrix(matrix, self.__width, self.__height) else: raise Exception("matrices must have the same dimension!") @overload def __mul__(self, other: float) -> Matrix: ... @overload def __mul__(self, other: Vector) -> Vector: ... def __mul__(self, other: float | Vector) -> Vector | Matrix: if isinstance(other, Vector): if len(other) == self.__width: ans = zero_vector(self.__height) for i in range(self.__height): prods = [ self.__matrix[i][j] * other.component(j) for j in range(self.__width) ] ans.change_component(i, sum(prods)) return ans else: raise Exception( "vector must have the same size as the " "number of columns of the matrix!" ) elif isinstance(other, (int, float)): matrix = [ [self.__matrix[i][j] * other for j in range(self.__width)] for i in range(self.__height) ] return Matrix(matrix, self.__width, self.__height) return None def height(self) -> int: return self.__height def width(self) -> int: return self.__width def component(self, x: int, y: int) -> float: if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception("change_component: indices out of bounds") def change_component(self, x: int, y: int, value: float) -> None: if 0 <= x < self.__height and 0 <= y < self.__width: self.__matrix[x][y] = value else: raise Exception("change_component: indices out of bounds") def minor(self, x: int, y: int) -> float: if self.__height != self.__width: raise Exception("Matrix is not square") minor = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(minor)): minor[i] = minor[i][:y] + minor[i][y + 1 :] return Matrix(minor, self.__width - 1, self.__height - 1).determinant() def cofactor(self, x: int, y: int) -> float: if self.__height != self.__width: raise Exception("Matrix is not square") if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(x, y) else: raise Exception("Indices out of bounds") def determinant(self) -> float: if self.__height != self.__width: raise Exception("Matrix is not square") if self.__height < 1: raise Exception("Matrix has no element") elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: cofactor_prods = [ self.__matrix[0][y] * self.cofactor(0, y) for y in range(self.__width) ] return sum(cofactor_prods) def square_zero_matrix(n: int) -> Matrix: ans: list[list[float]] = [[0] * n for _ in range(n)] return Matrix(ans, n, n) def random_matrix(width: int, height: int, a: int, b: int) -> Matrix: random.seed(None) matrix: list[list[float]] = [ [random.randint(a, b) for _ in range(width)] for _ in range(height) ] return Matrix(matrix, width, height)
coordinates is a two dimensional matrix x y x y number of points you want to use printpointstopolynomial traceback most recent call last valueerror the program cannot work out a fitting polynomial printpointstopolynomial traceback most recent call last valueerror the program cannot work out a fitting polynomial printpointstopolynomial1 0 2 0 3 0 fxx20 0x10 0x00 0 printpointstopolynomial1 1 2 1 3 1 fxx20 0x10 0x01 0 printpointstopolynomial1 3 2 3 3 3 fxx20 0x10 0x03 0 printpointstopolynomial1 1 2 2 3 3 fxx20 0x11 0x00 0 printpointstopolynomial1 1 2 4 3 9 fxx21 0x10 0x00 0 printpointstopolynomial1 3 2 6 3 11 fxx21 0x10 0x02 0 printpointstopolynomial1 3 2 6 3 11 fxx21 0x10 0x02 0 printpointstopolynomial1 5 2 2 3 9 fxx25 0x118 0x018 0 put the x and x to the power values in a matrix put the y values into a vector manipulating all the values in the matrix manipulating the values in the vector make solutions coordinates is a two dimensional matrix x y x y number of points you want to use print points_to_polynomial traceback most recent call last valueerror the program cannot work out a fitting polynomial print points_to_polynomial traceback most recent call last valueerror the program cannot work out a fitting polynomial print points_to_polynomial 1 0 2 0 3 0 f x x 2 0 0 x 1 0 0 x 0 0 0 print points_to_polynomial 1 1 2 1 3 1 f x x 2 0 0 x 1 0 0 x 0 1 0 print points_to_polynomial 1 3 2 3 3 3 f x x 2 0 0 x 1 0 0 x 0 3 0 print points_to_polynomial 1 1 2 2 3 3 f x x 2 0 0 x 1 1 0 x 0 0 0 print points_to_polynomial 1 1 2 4 3 9 f x x 2 1 0 x 1 0 0 x 0 0 0 print points_to_polynomial 1 3 2 6 3 11 f x x 2 1 0 x 1 0 0 x 0 2 0 print points_to_polynomial 1 3 2 6 3 11 f x x 2 1 0 x 1 0 0 x 0 2 0 print points_to_polynomial 1 5 2 2 3 9 f x x 2 5 0 x 1 18 0 x 0 18 0 put the x and x to the power values in a matrix put the y values into a vector manipulating all the values in the matrix manipulating the values in the vector make solutions
def points_to_polynomial(coordinates: list[list[int]]) -> str: if len(coordinates) == 0 or not all(len(pair) == 2 for pair in coordinates): raise ValueError("The program cannot work out a fitting polynomial.") if len({tuple(pair) for pair in coordinates}) != len(coordinates): raise ValueError("The program cannot work out a fitting polynomial.") set_x = {x for x, _ in coordinates} if len(set_x) == 1: return f"x={coordinates[0][0]}" if len(set_x) != len(coordinates): raise ValueError("The program cannot work out a fitting polynomial.") x = len(coordinates) matrix: list[list[float]] = [ [ coordinates[count_of_line][0] ** (x - (count_in_line + 1)) for count_in_line in range(x) ] for count_of_line in range(x) ] vector: list[float] = [coordinates[count_of_line][1] for count_of_line in range(x)] for count in range(x): for number in range(x): if count == number: continue fraction = matrix[number][count] / matrix[count][count] for counting_columns, item in enumerate(matrix[count]): matrix[number][counting_columns] -= item * fraction vector[number] -= vector[count] * fraction solution: list[str] = [ str(vector[count] / matrix[count][count]) for count in range(x) ] solved = "f(x)=" for count in range(x): remove_e: list[str] = solution[count].split("E") if len(remove_e) > 1: solution[count] = f"{remove_e[0]}*10^{remove_e[1]}" solved += f"x^{x - (count + 1)}*{solution[count]}" if count + 1 != x: solved += "+" return solved if __name__ == "__main__": print(points_to_polynomial([])) print(points_to_polynomial([[]])) print(points_to_polynomial([[1, 0], [2, 0], [3, 0]])) print(points_to_polynomial([[1, 1], [2, 1], [3, 1]])) print(points_to_polynomial([[1, 3], [2, 3], [3, 3]])) print(points_to_polynomial([[1, 1], [2, 2], [3, 3]])) print(points_to_polynomial([[1, 1], [2, 4], [3, 9]])) print(points_to_polynomial([[1, 3], [2, 6], [3, 11]])) print(points_to_polynomial([[1, -3], [2, -6], [3, -11]])) print(points_to_polynomial([[1, 5], [2, 2], [3, 9]]))
power iteration find the largest eigenvalue and corresponding eigenvector of matrix inputmatrix given a random vector in the same space will work so long as vector has component of largest eigenvector inputmatrix must be either real or hermitian input inputmatrix input matrix whose largest eigenvalue we will find numpy array np shapeinputmatrix n n vector random initial vector in same space as matrix numpy array np shapevector n or n 1 output largesteigenvalue largest eigenvalue of the matrix inputmatrix float scalar largesteigenvector eigenvector corresponding to largesteigenvalue numpy array np shapelargesteigenvector n or n 1 import numpy as np inputmatrix np array 41 4 20 4 26 30 20 30 50 vector np array41 4 20 poweriterationinputmatrix vector 79 66086378788381 array0 44472726 0 46209842 0 76725662 ensure matrix is square ensure proper dimensionality ensure inputs are either both complex or both real ensure complex inputmatrix is hermitian set convergence to false will define convergence when we exceed maxiterations or when we have small changes from one iteration to next multiple matrix by the vector normalize the resulting output vector find rayleigh quotient faster than usual bc we know vector is normalized already check convergence testpoweriteration self running tests our implementation numpy implementation get eigenvalues and eigenvectors using builtin numpy eigh eigh used for symmetric or hermetian matrices last eigenvalue is the maximum one last column in this matrix is eigenvector corresponding to largest eigenvalue check our implementation and numpy gives close answers take absolute values element wise of each eigenvector as they are only unique to a minus sign power iteration find the largest eigenvalue and corresponding eigenvector of matrix input_matrix given a random vector in the same space will work so long as vector has component of largest eigenvector input_matrix must be either real or hermitian input input_matrix input matrix whose largest eigenvalue we will find numpy array np shape input_matrix n n vector random initial vector in same space as matrix numpy array np shape vector n or n 1 output largest_eigenvalue largest eigenvalue of the matrix input_matrix float scalar largest_eigenvector eigenvector corresponding to largest_eigenvalue numpy array np shape largest_eigenvector n or n 1 import numpy as np input_matrix np array 41 4 20 4 26 30 20 30 50 vector np array 41 4 20 power_iteration input_matrix vector 79 66086378788381 array 0 44472726 0 46209842 0 76725662 ensure matrix is square ensure proper dimensionality ensure inputs are either both complex or both real ensure complex input_matrix is hermitian set convergence to false will define convergence when we exceed max_iterations or when we have small changes from one iteration to next multiple matrix by the vector normalize the resulting output vector find rayleigh quotient faster than usual b c we know vector is normalized already check convergence test_power_iteration self running tests our implementation numpy implementation get eigenvalues and eigenvectors using built in numpy eigh eigh used for symmetric or hermetian matrices last eigenvalue is the maximum one last column in this matrix is eigenvector corresponding to largest eigenvalue check our implementation and numpy gives close answers take absolute values element wise of each eigenvector as they are only unique to a minus sign
import numpy as np def power_iteration( input_matrix: np.ndarray, vector: np.ndarray, error_tol: float = 1e-12, max_iterations: int = 100, ) -> tuple[float, np.ndarray]: assert np.shape(input_matrix)[0] == np.shape(input_matrix)[1] assert np.shape(input_matrix)[0] == np.shape(vector)[0] assert np.iscomplexobj(input_matrix) == np.iscomplexobj(vector) is_complex = np.iscomplexobj(input_matrix) if is_complex: assert np.array_equal(input_matrix, input_matrix.conj().T) convergence = False lambda_previous = 0 iterations = 0 error = 1e12 while not convergence: w = np.dot(input_matrix, vector) vector = w / np.linalg.norm(w) vector_h = vector.conj().T if is_complex else vector.T lambda_ = np.dot(vector_h, np.dot(input_matrix, vector)) error = np.abs(lambda_ - lambda_previous) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: convergence = True lambda_previous = lambda_ if is_complex: lambda_ = np.real(lambda_) return lambda_, vector def test_power_iteration() -> None: real_input_matrix = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]]) real_vector = np.array([41, 4, 20]) complex_input_matrix = real_input_matrix.astype(np.complex128) imag_matrix = np.triu(1j * complex_input_matrix, 1) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T complex_vector = np.array([41, 4, 20]).astype(np.complex128) for problem_type in ["real", "complex"]: if problem_type == "real": input_matrix = real_input_matrix vector = real_vector elif problem_type == "complex": input_matrix = complex_input_matrix vector = complex_vector eigen_value, eigen_vector = power_iteration(input_matrix, vector) eigen_values, eigen_vectors = np.linalg.eigh(input_matrix) eigen_value_max = eigen_values[-1] eigen_vector_max = eigen_vectors[:, -1] assert np.abs(eigen_value - eigen_value_max) <= 1e-6 assert np.linalg.norm(np.abs(eigen_vector) - np.abs(eigen_vector_max)) <= 1e-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
calculate the rank of a matrix see https en wikipedia orgwikiranklinearalgebra finds the rank of a matrix args matrix the matrix as a list of lists returns the rank of the matrix example matrix1 1 2 3 4 5 6 7 8 9 rankofmatrixmatrix1 2 matrix2 1 0 0 0 1 0 0 0 0 rankofmatrixmatrix2 2 matrix3 1 2 3 4 5 6 7 8 9 10 11 12 rankofmatrixmatrix3 2 rankofmatrix2 3 1 1 1 1 2 4 3 1 3 2 6 3 0 7 4 rankofmatrix2 1 3 6 3 3 1 2 1 1 1 2 3 rankofmatrix2 1 0 1 3 4 4 1 3 3 rankofmatrix3 2 1 6 4 2 1 rankofmatrix 0 rankofmatrix1 1 rankofmatrix 0 check if diagonal element is not zero eliminate all the elements below the diagonal find a nonzero diagonal element to swap rows reduce the row pointer by one to stay on the same row finds the rank of a matrix args matrix the matrix as a list of lists returns the rank of the matrix example matrix1 1 2 3 4 5 6 7 8 9 rank_of_matrix matrix1 2 matrix2 1 0 0 0 1 0 0 0 0 rank_of_matrix matrix2 2 matrix3 1 2 3 4 5 6 7 8 9 10 11 12 rank_of_matrix matrix3 2 rank_of_matrix 2 3 1 1 1 1 2 4 3 1 3 2 6 3 0 7 4 rank_of_matrix 2 1 3 6 3 3 1 2 1 1 1 2 3 rank_of_matrix 2 1 0 1 3 4 4 1 3 3 rank_of_matrix 3 2 1 6 4 2 1 rank_of_matrix 0 rank_of_matrix 1 1 rank_of_matrix 0 check if diagonal element is not zero eliminate all the elements below the diagonal find a non zero diagonal element to swap rows reduce the row pointer by one to stay on the same row
def rank_of_matrix(matrix: list[list[int | float]]) -> int: rows = len(matrix) columns = len(matrix[0]) rank = min(rows, columns) for row in range(rank): if matrix[row][row] != 0: for col in range(row + 1, rows): multiplier = matrix[col][row] / matrix[row][row] for i in range(row, columns): matrix[col][i] -= multiplier * matrix[row][i] else: reduce = True for i in range(row + 1, rows): if matrix[i][row] != 0: matrix[row], matrix[i] = matrix[i], matrix[row] reduce = False break if reduce: rank -= 1 for i in range(rows): matrix[i][row] = matrix[i][rank] row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
https en wikipedia orgwikirayleighquotient checks if a matrix is hermitian import numpy as np a np array 2 21j 4 21j 3 1j 4 1j 1 ishermitiana true a np array 2 21j 41j 21j 3 1j 4 1j 1 ishermitiana false returns the rayleigh quotient of a hermitian matrix a and vector v import numpy as np a np array 1 2 4 2 3 1 4 1 1 v np array 1 2 3 rayleighquotienta v array3 checks if a matrix is hermitian import numpy as np a np array 2 2 1j 4 2 1j 3 1j 4 1j 1 is_hermitian a true a np array 2 2 1j 4 1j 2 1j 3 1j 4 1j 1 is_hermitian a false returns the rayleigh quotient of a hermitian matrix a and vector v import numpy as np a np array 1 2 4 2 3 1 4 1 1 v np array 1 2 3 rayleigh_quotient a v array 3
from typing import Any import numpy as np def is_hermitian(matrix: np.ndarray) -> bool: return np.array_equal(matrix, matrix.conjugate().T) def rayleigh_quotient(a: np.ndarray, v: np.ndarray) -> Any: v_star = v.conjugate().T v_star_dot = v_star.dot(a) assert isinstance(v_star_dot, np.ndarray) return (v_star_dot.dot(v)) / (v_star.dot(v)) def tests() -> None: a = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]]) v = np.array([[1], [2], [3]]) assert is_hermitian(a), f"{a} is not hermitian." print(rayleigh_quotient(a, v)) a = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]]) assert is_hermitian(a), f"{a} is not hermitian." assert rayleigh_quotient(a, v) == float(3) if __name__ == "__main__": import doctest doctest.testmod() tests()
schur complement of a symmetric matrix x given as a 2x2 block matrix consisting of matrices a b and c matrix a must be quadratic and nonsingular in case a is singular a pseudoinverse may be provided using the pseudoinv argument link to wiki https en wikipedia orgwikischurcomplement see also convex optimization boyd and vandenberghe a 5 5 import numpy as np a np array1 2 2 1 b np array0 3 3 0 c np array2 1 6 3 schurcomplementa b c array 5 5 0 6 schur complement of a symmetric matrix x given as a 2x2 block matrix consisting of matrices a b and c matrix a must be quadratic and non singular in case a is singular a pseudo inverse may be provided using the pseudo_inv argument link to wiki https en wikipedia org wiki schur_complement see also convex optimization boyd and vandenberghe a 5 5 import numpy as np a np array 1 2 2 1 b np array 0 3 3 0 c np array 2 1 6 3 schur_complement a b c array 5 5 0 6
import unittest import numpy as np import pytest def schur_complement( mat_a: np.ndarray, mat_b: np.ndarray, mat_c: np.ndarray, pseudo_inv: np.ndarray | None = None, ) -> np.ndarray: shape_a = np.shape(mat_a) shape_b = np.shape(mat_b) shape_c = np.shape(mat_c) if shape_a[0] != shape_b[0]: msg = ( "Expected the same number of rows for A and B. " f"Instead found A of size {shape_a} and B of size {shape_b}" ) raise ValueError(msg) if shape_b[1] != shape_c[1]: msg = ( "Expected the same number of columns for B and C. " f"Instead found B of size {shape_b} and C of size {shape_c}" ) raise ValueError(msg) a_inv = pseudo_inv if a_inv is None: try: a_inv = np.linalg.inv(mat_a) except np.linalg.LinAlgError: raise ValueError( "Input matrix A is not invertible. Cannot compute Schur complement." ) return mat_c - mat_b.T @ a_inv @ mat_b class TestSchurComplement(unittest.TestCase): def test_schur_complement(self) -> None: a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]]) b = np.array([[0, 3], [3, 0], [2, 3]]) c = np.array([[2, 1], [6, 3]]) s = schur_complement(a, b, c) input_matrix = np.block([[a, b], [b.T, c]]) det_x = np.linalg.det(input_matrix) det_a = np.linalg.det(a) det_s = np.linalg.det(s) assert np.is_close(det_x, det_a * det_s) def test_improper_a_b_dimensions(self) -> None: a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]]) b = np.array([[0, 3], [3, 0], [2, 3]]) c = np.array([[2, 1], [6, 3]]) with pytest.raises(ValueError): schur_complement(a, b, c) def test_improper_b_c_dimensions(self) -> None: a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]]) b = np.array([[0, 3], [3, 0], [2, 3]]) c = np.array([[2, 1, 3], [6, 3, 5]]) with pytest.raises(ValueError): schur_complement(a, b, c) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
created on mon feb 26 15 40 07 2018 christian bender license mitlicense this file contains the testsuite for the linear algebra library test for method component test for method tostring test for method size test for method euclideanlength test for operator test for operator test for operator test for global function zerovector test for global function unitbasisvector test for global function axpy operation test for method copy test for method changecomponent test for matrix method str test for matrix method minor test for matrix method cofactor test for matrix method determinant test for matrix operator test for matrix method changecomponent test for matrix method component test for matrix operator test for matrix operator test for global function squarezeromatrix test for method component test for method tostring test for method size test for method euclidean_length test for operator test for operator test for operator for test of dot product test for global function zero_vector test for global function unit_basis_vector test for global function axpy operation test for method copy test for method change_component test for matrix method str test for matrix method minor test for matrix method cofactor test for matrix method determinant test for matrix operator test for matrix method change_component test for matrix method component test for matrix operator test for matrix operator test for global function square_zero_matrix
import unittest import pytest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class Test(unittest.TestCase): def test_component(self) -> None: x = Vector([1, 2, 3]) assert x.component(0) == 1 assert x.component(2) == 3 _ = Vector() def test_str(self) -> None: x = Vector([0, 0, 0, 0, 0, 1]) assert str(x) == "(0,0,0,0,0,1)" def test_size(self) -> None: x = Vector([1, 2, 3, 4]) assert len(x) == 4 def test_euclidean_length(self) -> None: x = Vector([1, 2]) y = Vector([1, 2, 3, 4, 5]) z = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) w = Vector([1, -1, 1, -1, 2, -3, 4, -5]) assert x.euclidean_length() == pytest.approx(2.236, abs=1e-3) assert y.euclidean_length() == pytest.approx(7.416, abs=1e-3) assert z.euclidean_length() == 0 assert w.euclidean_length() == pytest.approx(7.616, abs=1e-3) def test_add(self) -> None: x = Vector([1, 2, 3]) y = Vector([1, 1, 1]) assert (x + y).component(0) == 2 assert (x + y).component(1) == 3 assert (x + y).component(2) == 4 def test_sub(self) -> None: x = Vector([1, 2, 3]) y = Vector([1, 1, 1]) assert (x - y).component(0) == 0 assert (x - y).component(1) == 1 assert (x - y).component(2) == 2 def test_mul(self) -> None: x = Vector([1, 2, 3]) a = Vector([2, -1, 4]) b = Vector([1, -2, -1]) assert str(x * 3.0) == "(3.0,6.0,9.0)" assert a * b == 0 def test_zero_vector(self) -> None: assert str(zero_vector(10)).count("0") == 10 def test_unit_basis_vector(self) -> None: assert str(unit_basis_vector(3, 1)) == "(0,1,0)" def test_axpy(self) -> None: x = Vector([1, 2, 3]) y = Vector([1, 0, 1]) assert str(axpy(2, x, y)) == "(3,4,7)" def test_copy(self) -> None: x = Vector([1, 0, 0, 0, 0, 0]) y = x.copy() assert str(x) == str(y) def test_change_component(self) -> None: x = Vector([1, 0, 0]) x.change_component(0, 0) x.change_component(1, 1) assert str(x) == "(0,1,0)" def test_str_matrix(self) -> None: a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) assert str(a) == "|1,2,3|\n|2,4,5|\n|6,7,8|\n" def test_minor(self) -> None: a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) minors = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height()): for y in range(a.width()): assert minors[x][y] == a.minor(x, y) def test_cofactor(self) -> None: a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) cofactors = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height()): for y in range(a.width()): assert cofactors[x][y] == a.cofactor(x, y) def test_determinant(self) -> None: a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) assert a.determinant() == -5 def test__mul__matrix(self) -> None: a = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3) x = Vector([1, 2, 3]) assert str(a * x) == "(14,32,50)" assert str(a * 2) == "|2,4,6|\n|8,10,12|\n|14,16,18|\n" def test_change_component_matrix(self) -> None: a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) a.change_component(0, 2, 5) assert str(a) == "|1,2,5|\n|2,4,5|\n|6,7,8|\n" def test_component_matrix(self) -> None: a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) assert a.component(2, 1) == 7, 0.01 def test__add__matrix(self) -> None: a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) b = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) assert str(a + b) == "|2,4,10|\n|4,8,10|\n|12,14,18|\n" def test__sub__matrix(self) -> None: a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) b = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) assert str(a - b) == "|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" def test_square_zero_matrix(self) -> None: assert str(square_zero_matrix(5)) == ( "|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" ) if __name__ == "__main__": unittest.main()
2d transformations are regularly used in linear algebra i have added the codes for reflection projection scaling and rotation 2d matrices scaling5 5 0 0 0 0 0 5 0 rotation45 0 5253219888177297 0 8509035245341184 0 8509035245341184 0 5253219888177297 projection45 0 27596319193541496 0 446998331800279 0 446998331800279 0 7240368080645851 reflection45 0 05064397763545947 0 893996663600558 0 893996663600558 0 7018070490682369 scaling5 5 0 0 0 0 0 5 0 rotation45 doctest normalizewhitespace 0 5253219888177297 0 8509035245341184 0 8509035245341184 0 5253219888177297 projection45 doctest normalizewhitespace 0 27596319193541496 0 446998331800279 0 446998331800279 0 7240368080645851 reflection45 doctest normalizewhitespace 0 05064397763545947 0 893996663600558 0 893996663600558 0 7018070490682369 scaling 5 5 0 0 0 0 0 5 0 rotation 45 doctest normalize_whitespace 0 5253219888177297 0 8509035245341184 0 8509035245341184 0 5253219888177297 projection 45 doctest normalize_whitespace 0 27596319193541496 0 446998331800279 0 446998331800279 0 7240368080645851 reflection 45 doctest normalize_whitespace 0 05064397763545947 0 893996663600558 0 893996663600558 0 7018070490682369
from math import cos, sin def scaling(scaling_factor: float) -> list[list[float]]: scaling_factor = float(scaling_factor) return [[scaling_factor * int(x == y) for x in range(2)] for y in range(2)] def rotation(angle: float) -> list[list[float]]: c, s = cos(angle), sin(angle) return [[c, -s], [s, c]] def projection(angle: float) -> list[list[float]]: c, s = cos(angle), sin(angle) cs = c * s return [[c * c, cs], [cs, s * s]] def reflection(angle: float) -> list[list[float]]: c, s = cos(angle), sin(angle) cs = c * s return [[2 * c - 1, 2 * cs], [2 * cs, 2 * s - 1]] print(f" {scaling(5) = }") print(f" {rotation(45) = }") print(f"{projection(45) = }") print(f"{reflection(45) = }")
python implementation of the simplex algorithm for solving linear programs in tabular form with and constraints and each variable x1 x2 0 see https gist github comimengusf9619a568f7da5bc74eaf20169a24d98 for how to convert linear programs to simplex tableaus and the steps taken in the simplex algorithm resources https en wikipedia orgwikisimplexalgorithm https tinyurl comsimplex4beginners operate on simplex tableaus tableaunp array1 1 0 0 1 1 3 1 0 4 3 1 0 1 4 2 2 traceback most recent call last typeerror tableau must have type float64 tableaunp array1 1 0 0 1 1 3 1 0 4 3 1 0 1 4 2 2 traceback most recent call last valueerror rhs must be 0 tableaunp array1 1 0 0 1 1 3 1 0 4 3 1 0 1 4 2 2 traceback most recent call last valueerror number of artificial variables must be a natural number max iteration number to prevent cycling check if rhs is negative number of decision variables x1 x2 x3 2 if there are or constraints nonstandard 1 otherwise std number of slack variables added to make inequalities into equalities objectives for each stage in two stage simplex first minimise then maximise index of current pivot row and column does objective row only contain nonnegative values generate column titles for tableau of specific dimensions tableaunp array1 1 0 0 1 1 3 1 0 4 3 1 0 1 4 2 0 generatecoltitles x1 x2 s1 s2 rhs tableaunp array1 1 0 0 1 1 3 1 0 4 3 1 0 1 4 2 2 generatecoltitles x1 x2 rhs decision slack finds the pivot row and column tableaunp array2 1 0 0 0 3 1 1 0 6 1 2 0 1 7 2 0 findpivot 1 0 find entries of highest magnitude in objective rows choice is only valid if below 0 for maximise and above for minimise pivot row is chosen as having the lowest quotient when elements of the pivot column divide the righthand side slice excluding the objective rows rhs elements of pivot column within slice array filled with nans if element in pivot column is greater than zero return quotient or nan otherwise arg of minimum quotient excluding the nan values nstages is added to compensate for earlier exclusion of objective columns pivots on value on the intersection of pivot row and column tableaunp array2 3 0 0 0 1 3 1 0 4 3 1 0 1 4 2 2 pivot1 0 tolist doctest normalizewhitespace 0 0 3 0 2 0 0 0 8 0 1 0 3 0 1 0 0 0 4 0 0 0 8 0 3 0 1 0 8 0 avoid changes to original tableau entry becomes 1 variable in pivot column becomes basic ie the only nonzero entry exits first phase of the twostage method by deleting artificial rows and columns or completes the algorithm if exiting the standard case tableaunp array 3 3 1 1 0 0 4 2 1 0 0 0 0 0 1 2 1 0 1 0 2 2 1 0 1 0 1 2 2 2 changestage tolist doctest normalizewhitespace 2 0 1 0 0 0 0 0 0 0 1 0 2 0 1 0 0 0 2 0 2 0 1 0 0 0 1 0 2 0 objective of original objective row remains slice containing ids for artificial columns delete the artificial variable columns delete the objective row of the first stage operate on tableau until objective function cannot be improved further standard linear program max x1 x2 st x1 3x2 4 3x1 x2 4 tableaunp array1 1 0 0 0 1 3 1 0 4 3 1 0 1 4 2 0 runsimplex p 2 0 x1 1 0 x2 1 0 standard linear program with 3 variables max 3x1 x2 3x3 st 2x1 x2 x3 2 x1 2x2 3x3 5 2x1 2x2 x3 6 tableaunp array 3 1 3 0 0 0 0 2 1 1 1 0 0 2 1 2 3 0 1 0 5 2 2 1 0 0 1 6 3 0 runsimplex doctest ellipsis p 5 4 x1 0 199 x3 1 6 optimal tableau input tableaunp array 0 0 0 25 0 25 2 0 1 0 375 0 125 1 1 0 0 125 0 375 1 2 0 runsimplex p 2 0 x1 1 0 x2 1 0 nonstandard constraints max 2x1 3x2 x3 st x1 x2 x3 40 2x1 x2 x3 10 x2 x3 10 tableaunp array 2 0 0 0 1 1 0 0 20 2 3 1 0 0 0 0 0 0 1 1 1 1 0 0 0 0 40 2 1 1 0 1 0 1 0 10 0 1 1 0 0 1 0 1 10 3 2 runsimplex p 70 0 x1 10 0 x2 10 0 x3 20 0 non standard minimisation and equalities min x1 x2 st 2x1 x2 12 6x1 5x2 40 tableaunp array 8 6 0 0 52 1 1 0 0 0 2 1 1 0 12 6 5 0 1 40 2 2 runsimplex p 7 0 x1 5 0 x2 2 0 pivot on slack variables max 8x1 6x2 st x1 3x2 33 4x1 2x2 48 2x1 4x2 48 x1 x2 10 x1 2 tableaunp array 2 1 0 0 0 1 1 0 0 12 0 8 6 0 0 0 0 0 0 0 0 0 1 3 1 0 0 0 0 0 0 33 0 4 2 0 1 0 0 0 0 0 60 0 2 4 0 0 1 0 0 0 0 48 0 1 1 0 0 0 1 0 1 0 10 0 1 0 0 0 0 0 1 0 1 2 0 2 2 runsimplex doctest ellipsis p 132 0 x1 12 000 x2 5 999 stop simplex algorithm from cycling completion of each stage removes an objective if both stages are complete then no objectives are left find the values of each variable at optimal solution if there are no more negative values in objective row delete artificial variable columns and rows update attributes given the final tableau add the corresponding values of the basic decision variables to the outputdict tableaunp array 0 0 0 875 0 375 5 0 1 0 375 0 125 1 1 0 0 125 0 375 1 2 0 interprettableau p 5 0 x1 1 0 x2 1 0 p rhs of final tableau gives indices of nonzero entries in the ith column first entry in the nonzero indices if there is only one nonzero value in column which is one operate on simplex tableaus tableau np array 1 1 0 0 1 1 3 1 0 4 3 1 0 1 4 2 2 traceback most recent call last typeerror tableau must have type float64 tableau np array 1 1 0 0 1 1 3 1 0 4 3 1 0 1 4 2 2 traceback most recent call last valueerror rhs must be 0 tableau np array 1 1 0 0 1 1 3 1 0 4 3 1 0 1 4 2 2 traceback most recent call last valueerror number of artificial variables must be a natural number max iteration number to prevent cycling check if rhs is negative number of decision variables x1 x2 x3 2 if there are or constraints nonstandard 1 otherwise std number of slack variables added to make inequalities into equalities objectives for each stage in two stage simplex first minimise then maximise index of current pivot row and column does objective row only contain non negative values generate column titles for tableau of specific dimensions tableau np array 1 1 0 0 1 1 3 1 0 4 3 1 0 1 4 2 0 generate_col_titles x1 x2 s1 s2 rhs tableau np array 1 1 0 0 1 1 3 1 0 4 3 1 0 1 4 2 2 generate_col_titles x1 x2 rhs decision slack finds the pivot row and column tableau np array 2 1 0 0 0 3 1 1 0 6 1 2 0 1 7 2 0 find_pivot 1 0 find entries of highest magnitude in objective rows choice is only valid if below 0 for maximise and above for minimise pivot row is chosen as having the lowest quotient when elements of the pivot column divide the right hand side slice excluding the objective rows rhs elements of pivot column within slice array filled with nans if element in pivot column is greater than zero return quotient or nan otherwise arg of minimum quotient excluding the nan values n_stages is added to compensate for earlier exclusion of objective columns pivots on value on the intersection of pivot row and column tableau np array 2 3 0 0 0 1 3 1 0 4 3 1 0 1 4 2 2 pivot 1 0 tolist doctest normalize_whitespace 0 0 3 0 2 0 0 0 8 0 1 0 3 0 1 0 0 0 4 0 0 0 8 0 3 0 1 0 8 0 avoid changes to original tableau entry becomes 1 variable in pivot column becomes basic ie the only non zero entry exits first phase of the two stage method by deleting artificial rows and columns or completes the algorithm if exiting the standard case tableau np array 3 3 1 1 0 0 4 2 1 0 0 0 0 0 1 2 1 0 1 0 2 2 1 0 1 0 1 2 2 2 change_stage tolist doctest normalize_whitespace 2 0 1 0 0 0 0 0 0 0 1 0 2 0 1 0 0 0 2 0 2 0 1 0 0 0 1 0 2 0 objective of original objective row remains slice containing ids for artificial columns delete the artificial variable columns delete the objective row of the first stage operate on tableau until objective function cannot be improved further standard linear program max x1 x2 st x1 3x2 4 3x1 x2 4 tableau np array 1 1 0 0 0 1 3 1 0 4 3 1 0 1 4 2 0 run_simplex p 2 0 x1 1 0 x2 1 0 standard linear program with 3 variables max 3x1 x2 3x3 st 2x1 x2 x3 2 x1 2x2 3x3 5 2x1 2x2 x3 6 tableau np array 3 1 3 0 0 0 0 2 1 1 1 0 0 2 1 2 3 0 1 0 5 2 2 1 0 0 1 6 3 0 run_simplex doctest ellipsis p 5 4 x1 0 199 x3 1 6 optimal tableau input tableau np array 0 0 0 25 0 25 2 0 1 0 375 0 125 1 1 0 0 125 0 375 1 2 0 run_simplex p 2 0 x1 1 0 x2 1 0 non standard constraints max 2x1 3x2 x3 st x1 x2 x3 40 2x1 x2 x3 10 x2 x3 10 tableau np array 2 0 0 0 1 1 0 0 20 2 3 1 0 0 0 0 0 0 1 1 1 1 0 0 0 0 40 2 1 1 0 1 0 1 0 10 0 1 1 0 0 1 0 1 10 3 2 run_simplex p 70 0 x1 10 0 x2 10 0 x3 20 0 non standard minimisation and equalities min x1 x2 st 2x1 x2 12 6x1 5x2 40 tableau np array 8 6 0 0 52 1 1 0 0 0 2 1 1 0 12 6 5 0 1 40 2 2 run_simplex p 7 0 x1 5 0 x2 2 0 pivot on slack variables max 8x1 6x2 st x1 3x2 33 4x1 2x2 48 2x1 4x2 48 x1 x2 10 x1 2 tableau np array 2 1 0 0 0 1 1 0 0 12 0 8 6 0 0 0 0 0 0 0 0 0 1 3 1 0 0 0 0 0 0 33 0 4 2 0 1 0 0 0 0 0 60 0 2 4 0 0 1 0 0 0 0 48 0 1 1 0 0 0 1 0 1 0 10 0 1 0 0 0 0 0 1 0 1 2 0 2 2 run_simplex doctest ellipsis p 132 0 x1 12 000 x2 5 999 stop simplex algorithm from cycling completion of each stage removes an objective if both stages are complete then no objectives are left find the values of each variable at optimal solution if there are no more negative values in objective row delete artificial variable columns and rows update attributes given the final tableau add the corresponding values of the basic decision variables to the output_dict tableau np array 0 0 0 875 0 375 5 0 1 0 375 0 125 1 1 0 0 125 0 375 1 2 0 interpret_tableau p 5 0 x1 1 0 x2 1 0 p rhs of final tableau gives indices of nonzero entries in the ith column first entry in the nonzero indices if there is only one nonzero value in column which is one
from typing import Any import numpy as np class Tableau: maxiter = 100 def __init__( self, tableau: np.ndarray, n_vars: int, n_artificial_vars: int ) -> None: if tableau.dtype != "float64": raise TypeError("Tableau must have type float64") if not (tableau[:, -1] >= 0).all(): raise ValueError("RHS must be > 0") if n_vars < 2 or n_artificial_vars < 0: raise ValueError( "number of (artificial) variables must be a natural number" ) self.tableau = tableau self.n_rows, n_cols = tableau.shape self.n_vars, self.n_artificial_vars = n_vars, n_artificial_vars self.n_stages = (self.n_artificial_vars > 0) + 1 self.n_slack = n_cols - self.n_vars - self.n_artificial_vars - 1 self.objectives = ["max"] if self.n_artificial_vars: self.objectives.append("min") self.col_titles = self.generate_col_titles() self.row_idx = None self.col_idx = None self.stop_iter = False def generate_col_titles(self) -> list[str]: args = (self.n_vars, self.n_slack) string_starts = ["x", "s"] titles = [] for i in range(2): for j in range(args[i]): titles.append(string_starts[i] + str(j + 1)) titles.append("RHS") return titles def find_pivot(self) -> tuple[Any, Any]: objective = self.objectives[-1] sign = (objective == "min") - (objective == "max") col_idx = np.argmax(sign * self.tableau[0, :-1]) if sign * self.tableau[0, col_idx] <= 0: self.stop_iter = True return 0, 0 s = slice(self.n_stages, self.n_rows) dividend = self.tableau[s, -1] divisor = self.tableau[s, col_idx] nans = np.full(self.n_rows - self.n_stages, np.nan) quotients = np.divide(dividend, divisor, out=nans, where=divisor > 0) row_idx = np.nanargmin(quotients) + self.n_stages return row_idx, col_idx def pivot(self, row_idx: int, col_idx: int) -> np.ndarray: piv_row = self.tableau[row_idx].copy() piv_val = piv_row[col_idx] piv_row *= 1 / piv_val for idx, coeff in enumerate(self.tableau[:, col_idx]): self.tableau[idx] += -coeff * piv_row self.tableau[row_idx] = piv_row return self.tableau def change_stage(self) -> np.ndarray: self.objectives.pop() if not self.objectives: return self.tableau s = slice(-self.n_artificial_vars - 1, -1) self.tableau = np.delete(self.tableau, s, axis=1) self.tableau = np.delete(self.tableau, 0, axis=0) self.n_stages = 1 self.n_rows -= 1 self.n_artificial_vars = 0 self.stop_iter = False return self.tableau def run_simplex(self) -> dict[Any, Any]: for _ in range(Tableau.maxiter): if not self.objectives: return self.interpret_tableau() row_idx, col_idx = self.find_pivot() if self.stop_iter: self.tableau = self.change_stage() else: self.tableau = self.pivot(row_idx, col_idx) return {} def interpret_tableau(self) -> dict[str, float]: output_dict = {"P": abs(self.tableau[0, -1])} for i in range(self.n_vars): nonzero = np.nonzero(self.tableau[:, i]) n_nonzero = len(nonzero[0]) nonzero_rowidx = nonzero[0][0] nonzero_val = self.tableau[nonzero_rowidx, i] if n_nonzero == 1 and nonzero_val == 1: rhs_val = self.tableau[nonzero_rowidx, -1] output_dict[self.col_titles[i]] = rhs_val return output_dict if __name__ == "__main__": import doctest doctest.testmod()