Spaces:
Runtime error
Runtime error
Updated plot layout
Browse files
app.py
CHANGED
@@ -67,7 +67,8 @@ def plot_lda_pca():
|
|
67 |
)
|
68 |
plt.legend(loc="best", shadow=False, scatterpoints=1)
|
69 |
axes[1].legend(loc="lower right")
|
70 |
-
axes[1].set_title("LDA of IRIS dataset")
|
|
|
71 |
|
72 |
|
73 |
return fig
|
@@ -76,19 +77,16 @@ def plot_lda_pca():
|
|
76 |
title = "2-D projection of Iris dataset using LDA and PCA"
|
77 |
with gr.Blocks(title=title) as demo:
|
78 |
gr.Markdown(f"# {title}")
|
79 |
-
gr.Markdown(" This example shows how one can use Prinicipal Components Analysis (PCA) and
|
80 |
-
"
|
81 |
-
"
|
82 |
-
|
83 |
" For further details please see the sklearn docs:"
|
84 |
)
|
85 |
|
86 |
gr.Markdown(" **[Demo is based on sklearn docs found here](https://scikit-learn.org/stable/auto_examples/decomposition/plot_pca_vs_lda.html#sphx-glr-auto-examples-decomposition-plot-pca-vs-lda-py)** <br>")
|
87 |
|
88 |
-
gr.Markdown(" **Dataset** :
|
89 |
-
gr.Markdown(" Different number of features and number of components affect how well the low rank space is recovered. <br>"
|
90 |
-
" Larger Depth trying to overfit and learn even the finner details of the data.<br>"
|
91 |
-
)
|
92 |
|
93 |
# with gr.Row():
|
94 |
# n_samples = gr.Slider(value=100, minimum=10, maximum=1000, step=10, label="n_samples")
|
|
|
67 |
)
|
68 |
plt.legend(loc="best", shadow=False, scatterpoints=1)
|
69 |
axes[1].legend(loc="lower right")
|
70 |
+
axes[1].set_title("LDA of IRIS dataset")
|
71 |
+
plt.tight_layout()
|
72 |
|
73 |
|
74 |
return fig
|
|
|
77 |
title = "2-D projection of Iris dataset using LDA and PCA"
|
78 |
with gr.Blocks(title=title) as demo:
|
79 |
gr.Markdown(f"# {title}")
|
80 |
+
gr.Markdown(" This example shows how one can use Prinicipal Components Analysis (PCA) and Linear Discriminant Analysis (LDA) to cluster the Iris dataset based on provided features. <br>"
|
81 |
+
" PCA applied to this data identifies the combination of attributes (principal components, or directions in the feature space) that account for the most variance in the data. Here we plot the different samples on the 2 first principal components. <br>"
|
82 |
+
" <br>"
|
83 |
+
|
84 |
" For further details please see the sklearn docs:"
|
85 |
)
|
86 |
|
87 |
gr.Markdown(" **[Demo is based on sklearn docs found here](https://scikit-learn.org/stable/auto_examples/decomposition/plot_pca_vs_lda.html#sphx-glr-auto-examples-decomposition-plot-pca-vs-lda-py)** <br>")
|
88 |
|
89 |
+
gr.Markdown(" **Dataset** : The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour and Virginica) with 4 attributes: sepal length, sepal width, petal length and petal width. . <br>")
|
|
|
|
|
|
|
90 |
|
91 |
# with gr.Row():
|
92 |
# n_samples = gr.Slider(value=100, minimum=10, maximum=1000, step=10, label="n_samples")
|