import streamlit as st import torch import io import sys # Function to execute the input code and capture print statements def execute_code(code): # Redirect stdout to capture print statements old_stdout = sys.stdout sys.stdout = mystdout = io.StringIO() global_vars = {"torch": torch} local_vars = {} try: exec(code, global_vars, local_vars) output = mystdout.getvalue() except Exception as e: output = str(e) finally: # Reset redirect. sys.stdout = old_stdout return output, local_vars # Dictionary with exercise details exercises = { "Exercise 1: Create and Manipulate Tensors": { "description": "Tensors are the core data structure in PyTorch, similar to arrays in NumPy but with additional capabilities for GPU acceleration. This exercise introduces how to create tensors from various data sources such as lists and NumPy arrays. It also covers basic tensor operations like addition, subtraction, and element-wise multiplication, which are fundamental for manipulating data in PyTorch.", "code": '''import torch import numpy as np # Creating tensors from Python lists # This creates a 1D tensor from the list [1, 2, 3] tensor_from_list = torch.tensor([1, 2, 3]) print("Tensor from list:", tensor_from_list) # Creating tensors from NumPy arrays # This converts a NumPy array to a tensor numpy_array = np.array([4, 5, 6]) tensor_from_numpy = torch.tensor(numpy_array) print("Tensor from NumPy array:", tensor_from_numpy) # Performing basic tensor operations tensor1 = torch.tensor([1, 2, 3]) tensor2 = torch.tensor([4, 5, 6]) # Addition addition = tensor1 + tensor2 print("Addition:", addition) # Subtraction subtraction = tensor1 - tensor2 print("Subtraction:", subtraction) # Element-wise multiplication elementwise_multiplication = tensor1 * tensor2 print("Element-wise Multiplication:", elementwise_multiplication) ''' }, "Exercise 2: Tensor Indexing and Slicing": { "description": "Indexing and slicing allow you to access and manipulate specific elements and sub-tensors. This is crucial for tasks such as data preprocessing and manipulation in machine learning workflows. This exercise demonstrates how to index and slice tensors to extract and modify elements efficiently.", "code": '''import torch # Creating a 2D tensor (matrix) tensor = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) # Indexing elements # Accessing the element at the 2nd row and 3rd column (indexing starts at 0) element = tensor[1, 2] print("Element at index [1, 2]:", element) # Slicing sub-tensors # Extracting the entire second row row = tensor[1, :] print("Second row:", row) # Extracting the entire third column column = tensor[:, 2] print("Third column:", column) # Modifying elements # Changing the first element of the tensor to 10 tensor[0, 0] = 10 print("Modified tensor:", tensor) ''' }, "Exercise 3: Reshaping and Transposing Tensors": { "description": "Reshaping and transposing tensors are common operations in machine learning workflows, especially when preparing data for model training. This exercise covers how to reshape tensors using view, squeeze, and unsqueeze, as well as how to transpose tensors.", "code": '''import torch # Creating a 2D tensor tensor = torch.tensor([[1, 2, 3], [4, 5, 6]]) # Reshaping a tensor reshaped_tensor = tensor.view(3, 2) print("Reshaped tensor:", reshaped_tensor) # Squeezing a tensor (removing dimensions of size 1) squeezed_tensor = torch.tensor([[1], [2], [3]]).squeeze() print("Squeezed tensor:", squeezed_tensor) # Unsqueezing a tensor (adding dimensions of size 1) unsqueezed_tensor = squeezed_tensor.unsqueeze(1) print("Unsqueezed tensor:", unsqueezed_tensor) # Transposing a tensor transposed_tensor = tensor.t() print("Transposed tensor:", transposed_tensor) ''' }, "Exercise 4: Tensor Operations for Deep Learning": { "description": "Deep learning requires various tensor operations such as matrix multiplication and element-wise operations. This exercise demonstrates how to perform matrix multiplication, calculate the dot product, and transpose tensors, which are essential for building neural networks.", "code": '''import torch # Creating tensors for matrix multiplication a = torch.tensor([[1, 2], [3, 4]]) b = torch.tensor([[5, 6], [7, 8]]) # Matrix multiplication matrix_multiplication = torch.matmul(a, b) print("Matrix multiplication result:", matrix_multiplication) # Transposing a tensor transposed_a = a.t() print("Transposed tensor:", transposed_a) # Calculating the dot product dot_product = torch.dot(torch.tensor([1, 2]), torch.tensor([3, 4])) print("Dot product result:", dot_product) ''' }, "Exercise 5: Tensors and Gradients": { "description": "Gradients are essential for optimizing neural networks during training. This exercise introduces the autograd feature in PyTorch, showing how to compute gradients using backpropagation.", "code": '''import torch # Creating a tensor with gradient tracking enabled x = torch.tensor([2.0, 3.0], requires_grad=True) # Performing operations on the tensor y = x * 2 z = y.mean() # Backpropagation to compute gradients z.backward() # Printing the gradients print("Gradients of x:", x.grad) # Disabling gradient tracking with torch.no_grad(): y = x * 2 print("Result with no gradient tracking:", y) ''' }, "Exercise 6: Practical Tensor Exercises - Custom Layers": { "description": "Implementing custom layers and activation functions is crucial for creating neural networks tailored to specific tasks. This exercise guides you through creating a simple linear layer and a ReLU activation function.", "code": '''import torch # Implementing a custom linear layer class LinearLayer: def __init__(self, input_dim, output_dim): self.weights = torch.randn(input_dim, output_dim, requires_grad=True) self.bias = torch.randn(output_dim, requires_grad=True) def forward(self, x): return torch.matmul(x, self.weights) + self.bias # Creating an instance of the custom linear layer layer = LinearLayer(2, 1) # Passing a tensor through the layer input_tensor = torch.tensor([[1.0, 2.0]]) output_tensor = layer.forward(input_tensor) print("Output of the custom linear layer:", output_tensor) # Implementing a custom ReLU activation function def relu(x): return torch.max(torch.tensor(0.0), x) # Applying the ReLU activation function relu_output = relu(torch.tensor([-1.0, 2.0, -0.5, 3.0])) print("Output of the custom ReLU function:", relu_output) ''' }, "Exercise 7: Data Normalization with Tensors": { "description": "Data normalization is a key preprocessing step in machine learning. This exercise demonstrates how to normalize data using Min-Max normalization, which scales the data to a specific range.", "code": '''import torch # Function for Min-Max normalization def min_max_normalize(tensor): min_val = tensor.min() max_val = tensor.max() return (tensor - min_val) / (max_val - min_val) # Creating a tensor with sample data data = torch.tensor([10, 20, 30, 40, 50]) # Applying Min-Max normalization normalized_data = min_max_normalize(data) print("Normalized data:", normalized_data) ''' }, "Final Project: Image Classification with a Simple CNN": { "description": "In this project, you will build and train a simple Convolutional Neural Network (CNN) for image classification using the CIFAR-10 dataset. This involves loading the dataset, defining the CNN model, and training the model to classify images into one of the 10 classes.", "code": '''import torch import torch.nn as nn import torch.optim as optim import torchvision import torchvision.transforms as transforms # Define the transformation for the dataset transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5))]) # Load the CIFAR-10 dataset trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False, num_workers=2) # Define the CNN model class SimpleCNN(nn.Module): def __init__(self): super(SimpleCNN, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(nn.functional.relu(self.conv1(x))) x = self.pool(nn.functional.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = nn.functional.relu(self.fc1(x)) x = nn.functional.relu(self.fc2(x)) x = self.fc3(x) return x # Instantiate the model, loss function, and optimizer net = SimpleCNN() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) # Training loop for epoch in range(5): # loop over the dataset multiple times running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if i % 200 == 199: # print every 200 mini-batches print(f'[{epoch + 1}, {i + 1}] loss: {running_loss / 200:.3f}') running_loss = 0.0 print('Finished Training') # Save the trained model torch.save(net.state_dict(), 'simple_cnn.pth') # Testing the model correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(f'Accuracy of the network on the 10000 test images: {100 * correct / total}%') ''' }, } st.title('PyTorch Code Runner') # Side menu for exercises exercise_choice = st.sidebar.radio("Choose an exercise", list(exercises.keys())) # Display the chosen exercise description st.subheader(exercise_choice) st.write(exercises[exercise_choice]["description"]) # Text area for inputting the PyTorch code code_input = st.text_area("Enter your PyTorch code here", height=300, value=exercises[exercise_choice]["code"]) # Button to execute the code if st.button("Run Code"): # Prepend the import statement code_to_run = "import torch\n" + code_input # Execute the code and capture the output output, variables = execute_code(code_to_run) # Display the output st.subheader('Output') st.text(output) # Display returned variables if variables: st.subheader('Variables') for key, value in variables.items(): st.text(f"{key}: {value}")