Du Mingzhe commited on
Commit
859ae4c
1 Parent(s): 534fea8
Files changed (1) hide show
  1. app.py +23 -1
app.py CHANGED
@@ -3,6 +3,28 @@ import streamlit as st
3
 
4
  st.title("GCP Resource Alloctor")
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  st.subheader("Configuration")
7
 
8
  # GPU Type
@@ -119,9 +141,9 @@ ssd_disk_price = serivces_mapping['SSD'] * ssd_disk_size
119
  duration_total_price = core_price + memory_price + gpu_price + balanced_disk_price + ssd_disk_price
120
  total_price = duration_total_price * hours
121
 
122
- st.divider()
123
 
124
  st.subheader("Hourly estimate")
 
125
  st.write(f"Core: SGD :blue[{core_price:.3f}]")
126
  st.write(f"Memory: SGD :blue[{memory_price:.3f}]")
127
  st.write(f"GPU: SGD :blue[{gpu_price:.3f}]")
 
3
 
4
  st.title("GCP Resource Alloctor")
5
 
6
+ st.subheader("Readme")
7
+
8
+ st.write("Compute Engine provides NVIDIA GPUs for your VMs in passthrough mode so that your VMs have direct control over the GPUs and their associated memory.")
9
+ st.write("* To run NVIDIA H100 80GB GPUs, you must use an A3 accelerator-optimized machine type.")
10
+ st.write("* To run NVIDIA A100 GPUs, you must use the A2 accelerator-optimized machine type.")
11
+ st.write("* To run NVIDIA L4 GPUs, you must use a G2 accelerator-optimized machine type.")
12
+ st.write("* Each A3/A2/G2 machine type has a fixed GPU count, vCPU count, and memory size.")
13
+
14
+ st.markdown("""
15
+ | GPU | Memory | FP64 | FP32 | Price | Interconnect | Best used for |
16
+ | --------- | ------------------------- | --------- | ----------| --------- | ----------------------------- | ------------- |
17
+ | H100 80GB | 80 GB HBM3 @ 3.35 TBps | 34 | 67 | 12.11 | NVLink Full Mesh @ 900 GBps | Large models with massive data tables for ML Training, Inference, HPC, BERT, DLRM |
18
+ | A100 80GB | 80 GB HBM2e @ 1.9 TBps | 9.7 | 19.5 | 2.61 | NVLink Full Mesh @ 600 GBps | Large models with massive data tables for ML Training, Inference, HPC, BERT, DLRM |
19
+ | A100 40GB | 40 GB HBM2 @ 1.6 TBps | 9.7 | 19.5 | 1.67 | NVLink Full Mesh @ 600 GBps | ML Training, Inference, HPC |
20
+ | L4 | 24 GB GDDR6 @ 300 GBps | 0.5 | 30.3 | 0.28 | N/A | ML Inference, Training, Remote Visualization Workstations, Video Transcoding, HPC |
21
+ | T4 | 16 GB GDDR6 @ 320 GBps | 0.25 | 8.1 | 0.15 | N/A | ML Inference, Training, Remote Visualization Workstations, Video Transcoding |
22
+ | V100 | 16 GB HBM2 @ 900 GBps | 7.8 | 15.7 | 0.99 | NVLink Ring @ 300 GBps | ML Training, Inference, HPC |
23
+ | P4 | 8 GB GDDR5 @ 192 GBps | 0.2 | 5.5 | 0.30 | N/A | Remote Visualization Workstations, ML Inference, and Video Transcoding |
24
+ | P100 | 16 GB HBM2 @ 732 GBps | 4.7 | 9.3 | 0.58 | N/A | ML Training, Inference, HPC, Remote Visualization Workstations |
25
+ """)
26
+
27
+
28
  st.subheader("Configuration")
29
 
30
  # GPU Type
 
141
  duration_total_price = core_price + memory_price + gpu_price + balanced_disk_price + ssd_disk_price
142
  total_price = duration_total_price * hours
143
 
 
144
 
145
  st.subheader("Hourly estimate")
146
+
147
  st.write(f"Core: SGD :blue[{core_price:.3f}]")
148
  st.write(f"Memory: SGD :blue[{memory_price:.3f}]")
149
  st.write(f"GPU: SGD :blue[{gpu_price:.3f}]")