import gradio as gr import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeClassifier, plot_tree from sklearn.cluster import KMeans def plot_neural_network(): fig, ax = plt.subplots(figsize=(10, 6)) ax.set_title("Simple Neural Network") layer_sizes = [4, 5, 3, 1] layer_positions = [1, 2, 3, 4] for i, layer_size in enumerate(layer_sizes): for j in range(layer_size): circle = plt.Circle((layer_positions[i], j*2), 0.2, fill=False) ax.add_artist(circle) if i < len(layer_sizes) - 1: for k in range(layer_sizes[i+1]): ax.plot([layer_positions[i], layer_positions[i+1]], [j*2, k*2], 'gray', alpha=0.5) ax.set_xlim(0, 5) ax.set_ylim(-1, 9) ax.axis('off') return fig def plot_linear_regression(): x = np.linspace(0, 10, 100).reshape(-1, 1) y = 2 * x + 1 + np.random.randn(100, 1) * 2 model = LinearRegression() model.fit(x, y) fig, ax = plt.subplots(figsize=(10, 6)) ax.scatter(x, y, color='blue', alpha=0.5) ax.plot(x, model.predict(x), color='red', linewidth=2) ax.set_title("Linear Regression") ax.set_xlabel("X") ax.set_ylabel("Y") return fig def plot_kmeans(): X = np.random.randn(300, 2) * 0.5 X[:100] += np.array([2, 2]) X[100:200] += np.array([-2, 2]) X[200:] += np.array([0, -2]) kmeans = KMeans(n_clusters=3) kmeans.fit(X) fig, ax = plt.subplots(figsize=(10, 6)) scatter = ax.scatter(X[:, 0], X[:, 1], c=kmeans.labels_, cmap='viridis') ax.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], marker='x', s=200, linewidths=3, color='r') ax.set_title("K-Means Clustering") ax.set_xlabel("Feature 1") ax.set_ylabel("Feature 2") return fig def plot_decision_tree(): X = np.random.randn(100, 2) y = (X[:, 0] + X[:, 1] > 0).astype(int) clf = DecisionTreeClassifier(max_depth=3, random_state=42) clf.fit(X, y) fig, ax = plt.subplots(figsize=(12, 8)) plot_tree(clf, filled=True, feature_names=['Feature 1', 'Feature 2'], class_names=['Class 0', 'Class 1'], ax=ax) ax.set_title("Decision Tree") return fig def visualize_algorithm(algorithm): if algorithm == "Neural Network": fig = plot_neural_network() description = "A neural network is a series of algorithms that endeavors to recognize underlying relationships in a set of data through a process that mimics the way the human brain operates. This visualization shows a simple neural network with an input layer (4 neurons), two hidden layers (5 and 3 neurons), and an output layer (1 neuron)." elif algorithm == "Linear Regression": fig = plot_linear_regression() description = "Linear regression is a linear approach to modeling the relationship between a scalar response and one or more explanatory variables. This plot shows a scatter plot of data points and the best-fit line found by linear regression." elif algorithm == "K-Means Clustering": fig = plot_kmeans() description = "K-Means clustering is an unsupervised learning algorithm that attempts to group similar data points into K clusters. This visualization shows the result of K-Means clustering on a 2D dataset with 3 clusters. The 'x' markers represent the cluster centers." elif algorithm == "Decision Tree": fig = plot_decision_tree() description = "A decision tree is a flowchart-like structure in which each internal node represents a test on an attribute, each branch represents the outcome of the test, and each leaf node represents a class label. This visualization shows a decision tree with a maximum depth of 3. The color intensity of each node represents the majority class at that node." else: fig = plt.figure() description = "Invalid algorithm selected." return fig, description iface = gr.Interface( fn=visualize_algorithm, inputs=gr.Dropdown(["Neural Network", "Linear Regression", "K-Means Clustering", "Decision Tree"], label="Choose an algorithm"), outputs=[ gr.Plot(label="Visualization"), gr.Textbox(label="Description") ], title="Machine Learning Algorithm Visualizer", description="Select an algorithm to visualize and learn about:", theme="huggingface", allow_flagging="never" ) if __name__ == "__main__": iface.launch()