diff --git a/Plot_anomaly_comparison_3D.ipynb b/Plot_anomaly_comparison_3D.ipynb index 07b067849ddd8..ba60ea8b464d6 100644 --- a/Plot_anomaly_comparison_3D.ipynb +++ b/Plot_anomaly_comparison_3D.ipynb @@ -270,7 +270,6 @@ } ], "source": [ - "\n", "# Plot\n", "plt.figure(figsize=((len(anomaly_algorithms) + 1) * 2.5 + 1, len(datasets3D) * 2.5 + 1))\n", "plt.subplots_adjust(\n", @@ -421,10 +420,17 @@ "metadata": {}, "source": [ "## Result and disscussion \n", - "The figures show the outlier detection performance and visualization. Each row represent different datasets. The first four columns compare each algorithm the computation time (``.__ s``) and outlier prediction accuracy (``acc``). The number and name of each outlier detection algorithm are on the top of the column. The last column plots all four algorithms in the ROC curve compare AUC score. The number label on AUC score matches the number in front of the algorithm names. The ``x`` in the ROC curves indicate the thresholds where algorithms start to classify data as outliers.\n", + "The figures show the outlier detection performance and visualization. Each row represents different datasets. The first four columns compare each algorithm the computation time (``.__ s``) and outlier prediction accuracy (``acc``). The number and name of each outlier detection algorithm are on the top of the column. The last column plots all four algorithms in the ROC curve compare AUC score. The number label on AUC score matches the number in front of the algorithm names. The ``x`` in the ROC curves indicate the thresholds where algorithms start to classify data as outliers.\n", "\n", - "From the plots, ``sklearn.covariance.EllipticEnvelope`` shows best result in high dimensional noise ``d_noise = 10``. However, since robust covariance create a ellptical envelope for inliers, we need more test on an inlier data that is not in a elliptical shape." + "From the plots, ``sklearn.covariance.EllipticEnvelope`` shows best result in high dimensional noise ``d_noise = 10``. However, since robust covariance creates a ellptical envelope for inliers, we need more test on an inlier data that is not in a elliptical shape." ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/plot_anomaly_comparison-3D.py b/plot_anomaly_comparison-3D.py index 73d34f9a277ea..03cf518b0a847 100644 --- a/plot_anomaly_comparison-3D.py +++ b/plot_anomaly_comparison-3D.py @@ -76,18 +76,18 @@ Result and disscussion --------------------------------- The figures show the outlier detection performance and visualization. -Each row represent different datasets. The first four columns compare +Each row represents different datasets. The first four columns compare each algorithm the computation time (``.__ s``) and outlier prediction accuracy (``acc``). The number and name of each outlier detection algorithm are on -the top of the columns. The last column plots all four algorithms in +the top of the column. The last column plots all four algorithms in the ROC curve compare AUC score. The number label on AUC score matches the number in front of the algorithm names. The ``x`` in the ROC curves indicate the thresholds where algorithms start to classify data as outliers. From the plots, ``sklearn.covariance.EllipticEnvelope`` shows best result in high dimensional noise ``d_noise = 10``. -However, since robust covariance create a ellptical envelope for inliers, +However, since robust covariance creates a ellptical envelope for inliers, we need more test on an inlier data that is not in a elliptical shape. """ @@ -381,4 +381,4 @@ def misaligned_blobs(samples=3, sd=0.0): ax.yaxis.set_ticklabels([]) plot_num += 1 print("d_noise = ", str(d_noise)) -plt.show() +plt.show() \ No newline at end of file