diff --git a/DESCRIPTION b/DESCRIPTION index 6d5fcbb6..1b6e2990 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,6 +1,6 @@ Package: SLmetrics Title: Machine Learning Performance Evaluation on Steroids -Version: 0.2-0 +Version: 0.3-0 Authors@R: c( person( given = "Serkan", diff --git a/Makefile b/Makefile index 5b0b82c4..c4a14ab3 100644 --- a/Makefile +++ b/Makefile @@ -9,21 +9,6 @@ PKGNAME = SLmetrics VERSION = $(shell grep "^Version:" DESCRIPTION | sed "s/Version: //") TARBALL = $(PKGNAME)_$(VERSION).tar.gz -py-setup: - @echo "Setting up Python environment" - @echo "=============================" - @python -m venv .venv - @echo "Activating virtual environment" - @pip cache purge - @python -m pip install --upgrade pip - @pip install numpy scipy torch torchmetrics scikit-learn imbalanced-learn mkl mkl-service mkl_fft mkl_random - @echo "Done!" - -py-check: - @echo "Checking installed python modules" - @echo "=================================" - @pip list - document: clear @echo "Documenting {$(PKGNAME)}" @@ -31,16 +16,20 @@ document: build: document @echo "Installing {$(PKGNAME)}" + rm -f src/*.o src/*.so R CMD build . R CMD INSTALL $(TARBALL) rm -f $(TARBALL) + rm -f src/*.o src/*.so check: document @echo "Checking {$(PKGNAME)}" + rm -f src/*.o src/*.so R CMD build . R CMD check $(TARBALL) rm -f $(TARBALL) rm -rf $(PKGNAME).Rcheck + rm -f src/*.o src/*.so build-site: @echo "Building {pkgdown}" diff --git a/NAMESPACE b/NAMESPACE index f83adb5c..d9478ede 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -13,6 +13,7 @@ S3method(csi,cmatrix) S3method(csi,factor) S3method(dor,cmatrix) S3method(dor,factor) +S3method(entropy,factor) S3method(fallout,cmatrix) S3method(fallout,factor) S3method(fbeta,cmatrix) @@ -28,6 +29,7 @@ S3method(fpr,factor) S3method(huberloss,numeric) S3method(jaccard,cmatrix) S3method(jaccard,factor) +S3method(logloss,factor) S3method(mae,numeric) S3method(mape,numeric) S3method(mcc,cmatrix) @@ -61,6 +63,7 @@ S3method(recall,cmatrix) S3method(recall,factor) S3method(rmse,numeric) S3method(rmsle,numeric) +S3method(rrmse,numeric) S3method(rrse,numeric) S3method(rsq,numeric) S3method(selectivity,cmatrix) @@ -79,12 +82,15 @@ S3method(tpr,cmatrix) S3method(tpr,factor) S3method(tscore,cmatrix) S3method(tscore,factor) +S3method(weighted.ROC,factor) S3method(weighted.accuracy,factor) S3method(weighted.baccuracy,factor) S3method(weighted.ccc,numeric) S3method(weighted.ckappa,factor) +S3method(weighted.cmatrix,factor) S3method(weighted.csi,factor) S3method(weighted.dor,factor) +S3method(weighted.entropy,factor) S3method(weighted.fallout,factor) S3method(weighted.fbeta,factor) S3method(weighted.fdr,factor) @@ -92,6 +98,7 @@ S3method(weighted.fer,factor) S3method(weighted.fpr,factor) S3method(weighted.huberloss,numeric) S3method(weighted.jaccard,factor) +S3method(weighted.logloss,factor) S3method(weighted.mae,numeric) S3method(weighted.mape,numeric) S3method(weighted.mcc,factor) @@ -103,11 +110,13 @@ S3method(weighted.phi,factor) S3method(weighted.pinball,numeric) S3method(weighted.plr,factor) S3method(weighted.ppv,factor) +S3method(weighted.prROC,factor) S3method(weighted.precision,factor) S3method(weighted.rae,numeric) S3method(weighted.recall,factor) S3method(weighted.rmse,numeric) S3method(weighted.rmsle,numeric) +S3method(weighted.rrmse,numeric) S3method(weighted.rrse,numeric) S3method(weighted.rsq,numeric) S3method(weighted.selectivity,factor) @@ -128,6 +137,7 @@ export(ckappa) export(cmatrix) export(csi) export(dor) +export(entropy) export(fallout) export(fbeta) export(fdr) @@ -136,6 +146,7 @@ export(fmi) export(fpr) export(huberloss) export(jaccard) +export(logloss) export(mae) export(mape) export(mcc) @@ -153,6 +164,7 @@ export(rae) export(recall) export(rmse) export(rmsle) +export(rrmse) export(rrse) export(rsq) export(selectivity) @@ -162,12 +174,15 @@ export(specificity) export(tnr) export(tpr) export(tscore) +export(weighted.ROC) export(weighted.accuracy) export(weighted.baccuracy) export(weighted.ccc) export(weighted.ckappa) +export(weighted.cmatrix) export(weighted.csi) export(weighted.dor) +export(weighted.entropy) export(weighted.fallout) export(weighted.fbeta) export(weighted.fdr) @@ -175,6 +190,7 @@ export(weighted.fer) export(weighted.fpr) export(weighted.huberloss) export(weighted.jaccard) +export(weighted.logloss) export(weighted.mae) export(weighted.mape) export(weighted.mcc) @@ -186,11 +202,13 @@ export(weighted.phi) export(weighted.pinball) export(weighted.plr) export(weighted.ppv) +export(weighted.prROC) export(weighted.precision) export(weighted.rae) export(weighted.recall) export(weighted.rmse) export(weighted.rmsle) +export(weighted.rrmse) export(weighted.rrse) export(weighted.rsq) export(weighted.selectivity) diff --git a/NEWS.Rmd b/NEWS.Rmd index e4ab95b2..a1c1bdad 100644 --- a/NEWS.Rmd +++ b/NEWS.Rmd @@ -15,14 +15,120 @@ knitr::opts_chunk$set( set.seed(1903) ``` -# Version 0.2-0 +# Version 0.3-0 -> Version 0.2-0 is considered pre-release of {SLmetrics}. We do not +> Version 0.3-0 is considered pre-release of {SLmetrics}. We do not > expect any breaking changes, unless a major bug/issue is reported and its nature > forces breaking changes. ## Improvements +## New Feature + +* **Relative Root Mean Squared Error:** The function normalizes the Root Mean Squared Error by a facttor. There is no official way of normalizing it - and in {SLmetrics} the RMSE can be normalized using three options; mean-, range- and IQR-normalization. It can be used as follows, + +```{r} +# 1) define values +actual <- rnorm(1e3) +predicted <- actual + rnorm(1e3) + +# 2) calculate Relative Root Mean Squared Error +cat( + "Mean Relative Root Mean Squared Error", SLmetrics::rrmse( + actual = actual, + predicted = predicted, + normalization = 0 + ), + "Range Relative Root Mean Squared Error", SLmetrics::rrmse( + actual = actual, + predicted = predicted, + normalization = 1 + ), + "IQR Relative Root Mean Squared Error", SLmetrics::rrmse( + actual = actual, + predicted = predicted, + normalization = 2 + ), + sep = "\n" +) +``` + +* **Cross Entropy:** Weighted and unweighted Cross Entropy, with and without normalization. The function can be used as follows, + +```{r} +# Create factors and response probabilities +actual <- factor(c("Class A", "Class B", "Class A")) +weights <- c(0.3,0.9,1) +response <- matrix(cbind( + 0.2, 0.8, + 0.8, 0.2, + 0.7, 0.3 +),nrow = 3, ncol = 2) + +cat( + "Unweighted Cross Entropy:", + SLmetrics::entropy( + actual, + response + ), + "Weighted Cross Entropy:", + SLmetrics::weighted.entropy( + actual = actual, + response = response, + w = weights + ), + sep = "\n" +) +``` + +* **Weighted Receiver Operator Characteristics:** `weighted.ROC()`, the function calculates the weighted True Positive and False Positive Rates for each threshold. + +* **Weighted Precision-Recall Curve:** `weighted.prROC()`, the function calculates the weighted Recall and Precsion for each threshold. + +## Breaking Changes + +* **Weighted Confusion Matix:** The `w`-argument in `cmatrix()` has been removed in favor of the more verbose weighted confusion matrix call `weighted.cmatrix()`-function. See below, + +Prior to version `0.3-0` the weighted confusion matrix were a part of the `cmatrix()`-function and were called as follows, + +```{r, eval = FALSE} +SLmetrics::cmatrix( + actual = actual, + predicted = predicted, + w = weights +) +``` + +This solution, although simple, were inconsistent with the remaining implementation of weighted metrics in {SLmetrics}. To regain consistency and simplicity the weighted confusion matrix are now retrieved as follows, + +```{r} +# 1) define factors +actual <- factor(sample(letters[1:3], 100, replace = TRUE)) +predicted <- factor(sample(letters[1:3], 100, replace = TRUE)) +weights <- runif(length(actual)) + +# 2) without weights +SLmetrics::cmatrix( + actual = actual, + predicted = predicted +) + +# 2) with weights +SLmetrics::weighted.cmatrix( + actual = actual, + predicted = predicted, + w = weights +) +``` + +## Bug-fixes + +* **Return named vectors:** The classification metrics when `micro == NULL` were not returning named vectors. This has been fixed. + +# Version 0.2-0 + +## Improvements + * **documentation:** The documentation has gotten some extra love, and now all functions have their formulas embedded, the details section have been freed from a general description of [factor] creation. This will make room for future expansions on the various functions where more details are required. * **weighted classification metrics:** The `cmatrix()`-function now accepts the argument `w` which is the sample weights; if passed the respective method will return the weighted metric. Below is an example using sample weights for the confusion matrix, @@ -40,7 +146,7 @@ SLmetrics::cmatrix( ) # 2) with weights -SLmetrics::cmatrix( +SLmetrics::weighted.cmatrix( actual = actual, predicted = predicted, w = weights diff --git a/NEWS.md b/NEWS.md index c270ae2f..1e3c0fdb 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,12 +1,157 @@ -# Version 0.2-0 +# Version 0.3-0 -> Version 0.2-0 is considered pre-release of {SLmetrics}. We do not +> Version 0.3-0 is considered pre-release of {SLmetrics}. We do not > expect any breaking changes, unless a major bug/issue is reported and > its nature forces breaking changes. ## Improvements +## New Feature + +- **Relative Root Mean Squared Error:** The function normalizes the Root + Mean Squared Error by a facttor. There is no official way of + normalizing it - and in {SLmetrics} the RMSE can be normalized using + three options; mean-, range- and IQR-normalization. It can be used as + follows, + +``` r +# 1) define values +actual <- rnorm(1e3) +predicted <- actual + rnorm(1e3) + +# 2) calculate Relative Root Mean Squared Error +cat( + "Mean Relative Root Mean Squared Error", SLmetrics::rrmse( + actual = actual, + predicted = predicted, + normalization = 0 + ), + "Range Relative Root Mean Squared Error", SLmetrics::rrmse( + actual = actual, + predicted = predicted, + normalization = 1 + ), + "IQR Relative Root Mean Squared Error", SLmetrics::rrmse( + actual = actual, + predicted = predicted, + normalization = 2 + ), + sep = "\n" +) +``` + + #> Mean Relative Root Mean Squared Error + #> 2751.381 + #> Range Relative Root Mean Squared Error + #> 0.1564043 + #> IQR Relative Root Mean Squared Error + #> 0.7323898 + +- **Cross Entropy:** Weighted and unweighted Cross Entropy, with and + without normalization. The function can be used as follows, + +``` r +# Create factors and response probabilities +actual <- factor(c("Class A", "Class B", "Class A")) +weights <- c(0.3,0.9,1) +response <- matrix(cbind( + 0.2, 0.8, + 0.8, 0.2, + 0.7, 0.3 +),nrow = 3, ncol = 2) + +cat( + "Unweighted Cross Entropy:", + SLmetrics::entropy( + actual, + response + ), + "Weighted Cross Entropy:", + SLmetrics::weighted.entropy( + actual = actual, + response = response, + w = weights + ), + sep = "\n" +) +``` + + #> Unweighted Cross Entropy: + #> 0.7297521 + #> Weighted Cross Entropy: + #> 0.4668102 + +- **Weighted Receiver Operator Characteristics:** `weighted.ROC()`, the + function calculates the weighted True Positive and False Positive + Rates for each threshold. + +- **Weighted Precision-Recall Curve:** `weighted.prROC()`, the function + calculates the weighted Recall and Precsion for each threshold. + +## Breaking Changes + +- **Weighted Confusion Matix:** The `w`-argument in `cmatrix()` has been + removed in favor of the more verbose weighted confusion matrix call + `weighted.cmatrix()`-function. See below, + +Prior to version `0.3-0` the weighted confusion matrix were a part of +the `cmatrix()`-function and were called as follows, + +``` r +SLmetrics::cmatrix( + actual = actual, + predicted = predicted, + w = weights +) +``` + +This solution, although simple, were inconsistent with the remaining +implementation of weighted metrics in {SLmetrics}. To regain consistency +and simplicity the weighted confusion matrix are now retrieved as +follows, + +``` r +# 1) define factors +actual <- factor(sample(letters[1:3], 100, replace = TRUE)) +predicted <- factor(sample(letters[1:3], 100, replace = TRUE)) +weights <- runif(length(actual)) + +# 2) without weights +SLmetrics::cmatrix( + actual = actual, + predicted = predicted +) +``` + + #> a b c + #> a 7 8 18 + #> b 6 13 15 + #> c 15 14 4 + +``` r +# 2) with weights +SLmetrics::weighted.cmatrix( + actual = actual, + predicted = predicted, + w = weights +) +``` + + #> a b c + #> a 3.627355 4.443065 7.164199 + #> b 3.506631 5.426818 8.358687 + #> c 6.615661 6.390454 2.233511 + +## Bug-fixes + +- **Return named vectors:** The classification metrics when + `micro == NULL` were not returning named vectors. This has been fixed. + +# Version 0.2-0 + +## Improvements + - **documentation:** The documentation has gotten some extra love, and now all functions have their formulas embedded, the details section have been freed from a general description of \[factor\] creation. @@ -32,13 +177,13 @@ SLmetrics::cmatrix( ``` #> a b c - #> a 16 6 8 - #> b 14 10 11 - #> c 5 15 15 + #> a 15 10 4 + #> b 11 18 10 + #> c 10 8 14 ``` r # 2) with weights -SLmetrics::cmatrix( +SLmetrics::weighted.cmatrix( actual = actual, predicted = predicted, w = weights @@ -46,9 +191,9 @@ SLmetrics::cmatrix( ``` #> a b c - #> a 8.796270 3.581817 3.422532 - #> b 6.471277 4.873632 5.732148 - #> c 0.908202 8.319738 8.484611 + #> a 7.578554 4.232749 2.170964 + #> b 3.818030 9.816465 4.838924 + #> c 6.280916 3.577268 6.219229 Calculating weighted metrics manually or by using `foo.cmatrix()`-method, @@ -69,7 +214,7 @@ SLmetrics::accuracy( ) ``` - #> [1] 0.4379208 + #> [1] 0.47 ``` r # 3) calculate the weighted @@ -81,7 +226,7 @@ SLmetrics::weighted.accuracy( ) ``` - #> [1] 0.4379208 + #> [1] 0.4865597 Please note, however, that it is not possible to pass `cmatix()`-into `weighted.accurracy()`, @@ -157,14 +302,14 @@ w <- runif(n = 1e3) SLmetrics::rmse(actual, predicted) ``` - #> [1] 0.9613081 + #> [1] 1.008854 ``` r # 3) weighted metrics SLmetrics::weighted.rmse(actual, predicted, w = w) ``` - #> [1] 0.957806 + #> [1] 0.9904359 - The `rrmse()`-function have been removed in favor of the `rrse()`-function. This function was incorrectly specified and @@ -249,7 +394,7 @@ plot( ) ``` -![](NEWS_files/figure-gfm/unnamed-chunk-6-1.png) +![](NEWS_files/figure-gfm/unnamed-chunk-10-1.png) ``` r plot( @@ -258,7 +403,7 @@ plot( ) ``` -![](NEWS_files/figure-gfm/unnamed-chunk-6-2.png) +![](NEWS_files/figure-gfm/unnamed-chunk-10-2.png) # Version 0.1-0 @@ -281,7 +426,7 @@ print( ) ``` - #> [1] b b a c a a c b a b + #> [1] c c c a b c c b b b #> Levels: a b c ``` r @@ -293,7 +438,7 @@ print( ) ``` - #> [1] a b b c c a a a c a + #> [1] a a c b b c b b b a #> Levels: a b c ``` r @@ -311,16 +456,16 @@ summary( #> Confusion Matrix (3 x 3) #> ================================================================================ #> a b c - #> a 1 1 2 - #> b 3 1 0 - #> c 1 0 1 + #> a 0 1 0 + #> b 1 3 0 + #> c 2 1 2 #> ================================================================================ #> Overall Statistics (micro average) - #> - Accuracy: 0.30 - #> - Balanced Accuracy: 0.33 - #> - Sensitivity: 0.30 - #> - Specificity: 0.65 - #> - Precision: 0.30 + #> - Accuracy: 0.50 + #> - Balanced Accuracy: 0.38 + #> - Sensitivity: 0.50 + #> - Specificity: 0.75 + #> - Precision: 0.50 ``` r # 2) calculate false positive @@ -331,7 +476,7 @@ SLmetrics::fpr( ``` #> a b c - #> 0.6666667 0.1666667 0.2500000 + #> 0.3333333 0.3333333 0.0000000 ### Supervised regression metrics @@ -352,4 +497,4 @@ SLmetrics::huberloss( ) ``` - #> [1] 0.5326572 + #> [1] 0.4698688 diff --git a/NEWS_files/figure-gfm/unnamed-chunk-10-1.png b/NEWS_files/figure-gfm/unnamed-chunk-10-1.png new file mode 100644 index 00000000..0df2beeb Binary files /dev/null and b/NEWS_files/figure-gfm/unnamed-chunk-10-1.png differ diff --git a/NEWS_files/figure-gfm/unnamed-chunk-10-2.png b/NEWS_files/figure-gfm/unnamed-chunk-10-2.png new file mode 100644 index 00000000..0525d3cd Binary files /dev/null and b/NEWS_files/figure-gfm/unnamed-chunk-10-2.png differ diff --git a/NEWS_files/figure-gfm/unnamed-chunk-9-1.png b/NEWS_files/figure-gfm/unnamed-chunk-9-1.png new file mode 100644 index 00000000..4cc9765a Binary files /dev/null and b/NEWS_files/figure-gfm/unnamed-chunk-9-1.png differ diff --git a/NEWS_files/figure-gfm/unnamed-chunk-9-2.png b/NEWS_files/figure-gfm/unnamed-chunk-9-2.png new file mode 100644 index 00000000..126d3180 Binary files /dev/null and b/NEWS_files/figure-gfm/unnamed-chunk-9-2.png differ diff --git a/R/RcppExports.R b/R/RcppExports.R index 9697f220..6bc851d3 100644 --- a/R/RcppExports.R +++ b/R/RcppExports.R @@ -5,441 +5,483 @@ #' @method accuracy factor #' @export accuracy.factor <- function(actual, predicted, ...) { - .Call(`_SLmetrics_accuracy`, actual, predicted) + .Call(`_SLmetrics_Accuracy`, actual, predicted) } #' @rdname accuracy #' @method weighted.accuracy factor #' @export weighted.accuracy.factor <- function(actual, predicted, w, ...) { - .Call(`_SLmetrics_weighted_accuracy`, actual, predicted, w) + .Call(`_SLmetrics_weighted_Accuracy`, actual, predicted, w) } #' @rdname accuracy #' @method accuracy cmatrix #' @export accuracy.cmatrix <- function(x, ...) { - .Call(`_SLmetrics_accuracy_cmatrix`, x) + .Call(`_SLmetrics_cmatrix_Accuracy`, x) } #' @rdname baccuracy #' @method baccuracy factor #' @export baccuracy.factor <- function(actual, predicted, adjust = FALSE, na.rm = TRUE, ...) { - .Call(`_SLmetrics_baccuracy`, actual, predicted, adjust, na_rm = na.rm) + .Call(`_SLmetrics_BalancedAccuracy`, actual, predicted, adjust, na_rm = na.rm) } #' @rdname baccuracy #' @method weighted.baccuracy factor #' @export weighted.baccuracy.factor <- function(actual, predicted, w, adjust = FALSE, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_baccuracy`, actual, predicted, w, adjust, na_rm = na.rm) + .Call(`_SLmetrics_weighted_BalancedAccuracy`, actual, predicted, w, adjust, na_rm = na.rm) } #' @rdname baccuracy #' @method baccuracy cmatrix #' @export baccuracy.cmatrix <- function(x, adjust = FALSE, na.rm = TRUE, ...) { - .Call(`_SLmetrics_baccuracy_cmatrix`, x, adjust, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_BalancedAccuracy`, x, adjust, na_rm = na.rm) } #' @rdname ckappa #' @method ckappa factor #' @export ckappa.factor <- function(actual, predicted, beta = 0.0, ...) { - .Call(`_SLmetrics_ckappa`, actual, predicted, beta) + .Call(`_SLmetrics_CohensKappa`, actual, predicted, beta) } #' @rdname ckappa #' @method weighted.ckappa factor #' @export weighted.ckappa.factor <- function(actual, predicted, w, beta = 0.0, ...) { - .Call(`_SLmetrics_weighted_ckappa`, actual, predicted, w, beta) + .Call(`_SLmetrics_weighted_CohensKappa`, actual, predicted, w, beta) } #' @rdname ckappa #' @method ckappa cmatrix #' @export ckappa.cmatrix <- function(x, beta = 0.0, ...) { - .Call(`_SLmetrics_ckappa_cmatrix`, x, beta) + .Call(`_SLmetrics_cmatrix_CohensKappa`, x, beta) } #' @rdname cmatrix #' @method cmatrix factor #' @export -cmatrix.factor <- function(actual, predicted, w = NULL, ...) { - .Call(`_SLmetrics_cmatrix`, actual, predicted, w) +cmatrix.factor <- function(actual, predicted, ...) { + .Call(`_SLmetrics_UnweightedConfusionMatrix`, actual, predicted) +} + +#' @rdname cmatrix +#' @method weighted.cmatrix factor +#' @export +weighted.cmatrix.factor <- function(actual, predicted, w, ...) { + .Call(`_SLmetrics_WeightedConfusionMatrix`, actual, predicted, w) +} + +#' @rdname entropy +#' @method entropy factor +#' @export +entropy.factor <- function(actual, response, normalize = TRUE, ...) { + .Call(`_SLmetrics_CrossEntropy`, actual, response, normalize) +} + +#' @rdname weighted.entropy +#' @method weighted.entropy factor +#' @export +weighted.entropy.factor <- function(actual, response, w, normalize = TRUE, ...) { + .Call(`_SLmetrics_weighted_CrossEntropy`, actual, response, w, normalize) +} + +#' @rdname entropy +#' @method logloss factor +#' @export +logloss.factor <- function(actual, response, normalize = TRUE, ...) { + .Call(`_SLmetrics_LogLoss`, actual, response, normalize) +} + +#' @rdname weighted.entropy +#' @method weighted.logloss factor +#' @export +weighted.logloss.factor <- function(actual, response, w, normalize = TRUE, ...) { + .Call(`_SLmetrics_weighted_LogLoss`, actual, response, w, normalize) } #' @rdname dor #' @method dor factor #' @export -dor.factor <- function(actual, predicted, micro = NULL, ...) { - .Call(`_SLmetrics_dor`, actual, predicted, micro) +dor.factor <- function(actual, predicted, ...) { + .Call(`_SLmetrics_DiagnosticOddsRatio`, actual, predicted) } #' @rdname dor #' @method weighted.dor factor #' @export -weighted.dor.factor <- function(actual, predicted, w, micro = NULL, ...) { - .Call(`_SLmetrics_weighted_dor`, actual, predicted, w, micro) +weighted.dor.factor <- function(actual, predicted, w, ...) { + .Call(`_SLmetrics_weighted_DiagnosticOddsRatio`, actual, predicted, w) } #' @rdname dor #' @method dor cmatrix #' @export -dor.cmatrix <- function(x, micro = NULL, ...) { - .Call(`_SLmetrics_dor_cmatrix`, x, micro) +dor.cmatrix <- function(x, ...) { + .Call(`_SLmetrics_cmatrix_DiagnosticOddsRatio`, x) } #' @rdname fbeta #' @method fbeta factor #' @export fbeta.factor <- function(actual, predicted, beta = 1.0, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_fbeta`, actual, predicted, beta, micro, na_rm = na.rm) + .Call(`_SLmetrics_FBetaScore`, actual, predicted, beta, micro, na_rm = na.rm) } #' @rdname fbeta #' @method weighted.fbeta factor #' @export weighted.fbeta.factor <- function(actual, predicted, w, beta = 1.0, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_fbeta`, actual, predicted, w, beta, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_FBetaScore`, actual, predicted, w, beta, micro, na_rm = na.rm) } #' @rdname fbeta #' @method fbeta cmatrix #' @export fbeta.cmatrix <- function(x, beta = 1.0, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_fbeta_cmatrix`, x, beta, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_FBetaScore`, x, beta, micro, na_rm = na.rm) } #' @rdname fdr #' @method fdr factor #' @export fdr.factor <- function(actual, predicted, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_fdr`, actual, predicted, micro, na_rm = na.rm) + .Call(`_SLmetrics_FalseDiscoveryRate`, actual, predicted, micro, na_rm = na.rm) } #' @rdname fdr #' @method weighted.fdr factor #' @export weighted.fdr.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_fdr`, actual, predicted, w, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_FalseDiscoveryRate`, actual, predicted, w, micro, na_rm = na.rm) } #' @rdname fdr #' @method fdr cmatrix #' @export fdr.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_fdr_cmatrix`, x, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_FalseDiscoveryRate`, x, micro, na_rm = na.rm) } #' @rdname fer #' @method fer factor #' @export fer.factor <- function(actual, predicted, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_fer`, actual, predicted, micro, na_rm = na.rm) + .Call(`_SLmetrics_FalseOmissionRate`, actual, predicted, micro, na_rm = na.rm) } #' @rdname fer #' @method weighted.fer factor #' @export weighted.fer.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_fer`, actual, predicted, w, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_FalseOmissionRate`, actual, predicted, w, micro, na_rm = na.rm) } #' @rdname fer #' @method fer cmatrix #' @export fer.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_fer_cmatrix`, x, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_FalseOmissionRate`, x, micro, na_rm = na.rm) } #' @rdname fpr #' @method fpr factor #' @export fpr.factor <- function(actual, predicted, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_fpr`, actual, predicted, micro, na_rm = na.rm) + .Call(`_SLmetrics_FalsePositiveRate`, actual, predicted, micro, na_rm = na.rm) } #' @rdname fpr #' @method weighted.fpr factor #' @export weighted.fpr.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_fpr`, actual, predicted, w, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_FalsePositiveRate`, actual, predicted, w, micro, na_rm = na.rm) } #' @rdname fpr #' @method fpr cmatrix #' @export fpr.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_fpr_cmatrix`, x, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_FalsePositiveRate`, x, micro, na_rm = na.rm) } #' @rdname fpr #' @method fallout factor #' @export fallout.factor <- function(actual, predicted, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_fallout`, actual, predicted, micro, na_rm = na.rm) + .Call(`_SLmetrics_Fallout`, actual, predicted, micro, na_rm = na.rm) } #' @rdname fpr #' @method weighted.fallout factor #' @export weighted.fallout.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_fallout`, actual, predicted, w, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_Fallout`, actual, predicted, w, micro, na_rm = na.rm) } #' @rdname fpr #' @method fallout cmatrix #' @export fallout.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_fallout_cmatrix`, x, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_Fallout`, x, micro, na_rm = na.rm) } #' @rdname fmi #' @method fmi factor #' @export fmi.factor <- function(actual, predicted, ...) { - .Call(`_SLmetrics_fmi`, actual, predicted) + .Call(`_SLmetrics_FowlkesMallowsIndex`, actual, predicted) } #' @rdname fmi #' @method fmi cmatrix #' @export fmi.cmatrix <- function(x, ...) { - .Call(`_SLmetrics_fmi_cmatrix`, x) + .Call(`_SLmetrics_cmatrix_FowlkesMallowsIndexClass`, x) } #' @rdname jaccard #' @method jaccard factor #' @export jaccard.factor <- function(actual, predicted, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_jaccard`, actual, predicted, micro, na_rm = na.rm) + .Call(`_SLmetrics_JaccardIndex`, actual, predicted, micro, na_rm = na.rm) } #' @rdname jaccard #' @method weighted.jaccard factor #' @export weighted.jaccard.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_jaccard`, actual, predicted, w, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_JaccardIndex`, actual, predicted, w, micro, na_rm = na.rm) } #' @rdname jaccard #' @method jaccard cmatrix #' @export jaccard.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_jaccard_cmatrix`, x, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_JaccardIndex`, x, micro, na_rm = na.rm) } #' @rdname jaccard #' @method csi factor #' @export csi.factor <- function(actual, predicted, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_csi`, actual, predicted, micro, na_rm = na.rm) + .Call(`_SLmetrics_CriticalSuccessIndex`, actual, predicted, micro, na_rm = na.rm) } #' @rdname jaccard #' @method weighted.csi factor #' @export weighted.csi.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_csi`, actual, predicted, w, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_CriticalSuccessIndex`, actual, predicted, w, micro, na_rm = na.rm) } #' @rdname jaccard #' @method csi cmatrix #' @export csi.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_csi_cmatrix`, x, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_CriticalSuccessIndex`, x, micro, na_rm = na.rm) } #' @rdname jaccard #' @method tscore factor #' @export tscore.factor <- function(actual, predicted, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_tscore`, actual, predicted, micro, na_rm = na.rm) + .Call(`_SLmetrics_ThreatScore`, actual, predicted, micro, na_rm = na.rm) } #' @rdname jaccard #' @method weighted.tscore factor #' @export weighted.tscore.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_tscore`, actual, predicted, w, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_ThreatScore`, actual, predicted, w, micro, na_rm = na.rm) } #' @rdname jaccard #' @method tscore cmatrix #' @export tscore.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_tscore_cmatrix`, x, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_ThreatScore`, x, micro, na_rm = na.rm) } #' @rdname mcc #' @method mcc factor #' @export mcc.factor <- function(actual, predicted, ...) { - .Call(`_SLmetrics_mcc`, actual, predicted) + .Call(`_SLmetrics_MatthewsCorrelationCoefficient`, actual, predicted) } #' @rdname mcc #' @method weighted.mcc factor #' @export weighted.mcc.factor <- function(actual, predicted, w, ...) { - .Call(`_SLmetrics_weigthed_mcc`, actual, predicted, w) + .Call(`_SLmetrics_weigthed_MatthewsCorrelationCoefficient`, actual, predicted, w) } #' @rdname mcc #' @method mcc cmatrix #' @export mcc.cmatrix <- function(x, ...) { - .Call(`_SLmetrics_mcc_cmatrix`, x) + .Call(`_SLmetrics_cmatrix_MatthewsCorrelationCoefficient`, x) } #' @rdname mcc #' @method phi factor #' @export phi.factor <- function(actual, predicted, ...) { - .Call(`_SLmetrics_phi`, actual, predicted) + .Call(`_SLmetrics_PhiCoefficient`, actual, predicted) } #' @rdname mcc #' @method weighted.phi factor #' @export weighted.phi.factor <- function(actual, predicted, w, ...) { - .Call(`_SLmetrics_weighted_phi`, actual, predicted, w) + .Call(`_SLmetrics_weighted_PhiCoefficient`, actual, predicted, w) } #' @rdname mcc #' @method phi cmatrix #' @export phi.cmatrix <- function(x, ...) { - .Call(`_SLmetrics_phi_cmatrix`, x) + .Call(`_SLmetrics_cmatrix_PhiCoefficient`, x) } #' @rdname nlr #' @method nlr factor #' @export -nlr.factor <- function(actual, predicted, micro = NULL, ...) { - .Call(`_SLmetrics_nlr`, actual, predicted, micro) +nlr.factor <- function(actual, predicted, ...) { + .Call(`_SLmetrics_NegativeLikelihoodRatio`, actual, predicted) } #' @rdname nlr #' @method weighted.nlr factor #' @export -weighted.nlr.factor <- function(actual, predicted, w, micro = NULL, ...) { - .Call(`_SLmetrics_weighted_nlr`, actual, predicted, w, micro) +weighted.nlr.factor <- function(actual, predicted, w, ...) { + .Call(`_SLmetrics_weighted_NegativeLikelihoodRatio`, actual, predicted, w) } #' @rdname nlr #' @method nlr cmatrix #' @export -nlr.cmatrix <- function(x, micro = NULL, ...) { - .Call(`_SLmetrics_nlr_cmatrix`, x, micro) +nlr.cmatrix <- function(x, ...) { + .Call(`_SLmetrics_cmatrix_NegativeLikelihoodRatio`, x) } #' @rdname npv #' @method npv factor #' @export npv.factor <- function(actual, predicted, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_npv`, actual, predicted, micro, na_rm = na.rm) + .Call(`_SLmetrics_NegativePredictitveValue`, actual, predicted, micro, na_rm = na.rm) } #' @rdname npv #' @method weighted.npv factor #' @export weighted.npv.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_npv`, actual, predicted, w, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_NegativePredictitveValue`, actual, predicted, w, micro, na_rm = na.rm) } #' @rdname npv #' @method npv cmatrix #' @export npv.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_npv_cmatrix`, x, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_NegativePredictitveValue`, x, micro, na_rm = na.rm) } #' @rdname plr #' @method plr factor #' @export -plr.factor <- function(actual, predicted, micro = NULL, ...) { - .Call(`_SLmetrics_plr`, actual, predicted, micro) +plr.factor <- function(actual, predicted, ...) { + .Call(`_SLmetrics_PositiveLikelihoodRatio`, actual, predicted) } #' @rdname plr #' @method weighted.plr factor #' @export -weighted.plr.factor <- function(actual, predicted, w, micro = NULL, ...) { - .Call(`_SLmetrics_weighted_plr`, actual, predicted, w, micro) +weighted.plr.factor <- function(actual, predicted, w, ...) { + .Call(`_SLmetrics_weighted_PositiveLikelihoodRatio`, actual, predicted, w) } #' @rdname plr #' @method plr cmatrix #' @export -plr.cmatrix <- function(x, micro = NULL, ...) { - .Call(`_SLmetrics_plr_cmatrix`, x, micro) +plr.cmatrix <- function(x, ...) { + .Call(`_SLmetrics_cmatrix_PositiveLikelihoodRatio`, x) } #' @rdname precision #' @method precision factor #' @export precision.factor <- function(actual, predicted, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_precision`, actual, predicted, micro, na_rm = na.rm) + .Call(`_SLmetrics_Precision`, actual, predicted, micro, na_rm = na.rm) } #' @rdname precision #' @method weighted.precision factor #' @export weighted.precision.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_precision`, actual, predicted, w, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_Precision`, actual, predicted, w, micro, na_rm = na.rm) } #' @rdname precision #' @method precision cmatrix #' @export precision.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_precision_cmatrix`, x, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_Precision`, x, micro, na_rm = na.rm) } #' @rdname precision #' @method ppv factor #' @export ppv.factor <- function(actual, predicted, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_ppv`, actual, predicted, micro, na_rm = na.rm) + .Call(`_SLmetrics_PositivePredictiveValue`, actual, predicted, micro, na_rm = na.rm) } #' @rdname precision #' @method weighted.ppv factor #' @export weighted.ppv.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_ppv`, actual, predicted, w, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_PositivePredictiveValue`, actual, predicted, w, micro, na_rm = na.rm) } #' @rdname precision #' @method ppv cmatrix #' @export ppv.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_ppv_cmatrix`, x, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_PositivePredictiveValue`, x, micro, na_rm = na.rm) } #' @rdname prROC #' @method prROC factor #' @export -prROC.factor <- function(actual, response, micro = NULL, thresholds = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_prROC`, actual, response, micro, thresholds, na_rm = na.rm) +prROC.factor <- function(actual, response, thresholds = NULL, ...) { + .Call(`_SLmetrics_PrecisionRecallCurve`, actual, response, thresholds) +} + +#' @rdname prROC +#' @method weighted.prROC factor +#' @export +weighted.prROC.factor <- function(actual, response, w, thresholds = NULL, ...) { + .Call(`_SLmetrics_weighted_PrecisionRecallCurve`, actual, response, w, thresholds) } #' @rdname recall #' @method recall factor #' @export recall.factor <- function(actual, predicted, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_recall`, actual, predicted, micro, na_rm = na.rm) + .Call(`_SLmetrics_Recall`, actual, predicted, micro, na_rm = na.rm) } #' @rdname recall #' @method weighted.recall factor #' @export weighted.recall.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_recall`, actual, predicted, w, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_Recall`, actual, predicted, w, micro, na_rm = na.rm) } #' @rdname recall @@ -447,7 +489,7 @@ weighted.recall.factor <- function(actual, predicted, w, micro = NULL, na.rm = T #' @method recall cmatrix #' @export recall.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_recall_cmatrix`, x, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_Recall`, x, micro, na_rm = na.rm) } #' @rdname recall @@ -455,14 +497,14 @@ recall.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { #' #' @export sensitivity.factor <- function(actual, predicted, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_sensitivity`, actual, predicted, micro, na_rm = na.rm) + .Call(`_SLmetrics_Sensitivity`, actual, predicted, micro, na_rm = na.rm) } #' @rdname recall #' @method weighted.sensitivity factor #' @export weighted.sensitivity.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_sensitivity`, actual, predicted, w, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_Sensitivity`, actual, predicted, w, micro, na_rm = na.rm) } #' @rdname recall @@ -470,7 +512,7 @@ weighted.sensitivity.factor <- function(actual, predicted, w, micro = NULL, na.r #' @method sensitivity cmatrix #' @export sensitivity.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_sensitivity_cmatrix`, x, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_Sensitivity`, x, micro, na_rm = na.rm) } #' @rdname recall @@ -478,14 +520,14 @@ sensitivity.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { #' @method tpr factor #' @export tpr.factor <- function(actual, predicted, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_tpr`, actual, predicted, micro, na_rm = na.rm) + .Call(`_SLmetrics_TruePositiveRate`, actual, predicted, micro, na_rm = na.rm) } #' @rdname recall #' @method weighted.tpr factor #' @export weighted.tpr.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_tpr`, actual, predicted, w, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_TruePositiveRate`, actual, predicted, w, micro, na_rm = na.rm) } #' @rdname recall @@ -493,7 +535,7 @@ weighted.tpr.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE #' @method tpr cmatrix #' @export tpr.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_tpr_cmatrix`, x, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_TruePositiveRate`, x, micro, na_rm = na.rm) } auc <- function(y, x, method = 0L) { @@ -503,92 +545,99 @@ auc <- function(y, x, method = 0L) { #' @rdname ROC #' @method ROC factor #' @export -ROC.factor <- function(actual, response, micro = NULL, thresholds = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_ROC`, actual, response, micro, thresholds, na_rm = na.rm) +ROC.factor <- function(actual, response, thresholds = NULL, ...) { + .Call(`_SLmetrics_RecieverOperatorCharacteristics`, actual, response, thresholds) +} + +#' @rdname ROC +#' @method weighted.ROC factor +#' @export +weighted.ROC.factor <- function(actual, response, w, thresholds = NULL, ...) { + .Call(`_SLmetrics_weighted_RecieverOperatorCharacteristics`, actual, response, w, thresholds) } #' @rdname specificity #' @method specificity factor #' @export specificity.factor <- function(actual, predicted, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_specificity`, actual, predicted, micro, na_rm = na.rm) + .Call(`_SLmetrics_Specificity`, actual, predicted, micro, na_rm = na.rm) } #' @rdname specificity #' @method weighted.specificity factor #' @export weighted.specificity.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_specificity`, actual, predicted, w, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_Specificity`, actual, predicted, w, micro, na_rm = na.rm) } #' @rdname specificity #' @method specificity cmatrix #' @export specificity.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_specificity_cmatrix`, x, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_Specificity`, x, micro, na_rm = na.rm) } #' @rdname specificity #' @method tnr factor #' @export tnr.factor <- function(actual, predicted, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_tnr`, actual, predicted, micro, na_rm = na.rm) + .Call(`_SLmetrics_TrueNegativeRate`, actual, predicted, micro, na_rm = na.rm) } #' @rdname specificity #' @method weighted.tnr factor #' @export weighted.tnr.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_tnr`, actual, predicted, w, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_TrueNegativeRate`, actual, predicted, w, micro, na_rm = na.rm) } #' @rdname specificity #' @method tnr cmatrix #' @export tnr.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_tnr_cmatrix`, x, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_TrueNegativeRate`, x, micro, na_rm = na.rm) } #' @rdname specificity #' @method selectivity factor #' @export selectivity.factor <- function(actual, predicted, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_selectivity`, actual, predicted, micro, na_rm = na.rm) + .Call(`_SLmetrics_Selectivity`, actual, predicted, micro, na_rm = na.rm) } #' @rdname specificity #' @method weighted.selectivity factor #' @export weighted.selectivity.factor <- function(actual, predicted, w, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_weighted_selectivity`, actual, predicted, w, micro, na_rm = na.rm) + .Call(`_SLmetrics_weighted_Selectivity`, actual, predicted, w, micro, na_rm = na.rm) } #' @rdname specificity #' @method selectivity cmatrix #' @export selectivity.cmatrix <- function(x, micro = NULL, na.rm = TRUE, ...) { - .Call(`_SLmetrics_selectivity_cmatrix`, x, micro, na_rm = na.rm) + .Call(`_SLmetrics_cmatrix_Selectivity`, x, micro, na_rm = na.rm) } #' @rdname zerooneloss #' @method zerooneloss factor #' @export zerooneloss.factor <- function(actual, predicted, ...) { - .Call(`_SLmetrics_zerooneloss`, actual, predicted) + .Call(`_SLmetrics_ZeroOneLoss`, actual, predicted) } #' @rdname zerooneloss #' @method weighted.zerooneloss factor #' @export weighted.zerooneloss.factor <- function(actual, predicted, w, ...) { - .Call(`_SLmetrics_weighted_zerooneloss`, actual, predicted, w) + .Call(`_SLmetrics_weighted_ZeroOneLoss`, actual, predicted, w) } #' @rdname zerooneloss #' @method zerooneloss cmatrix #' @export zerooneloss.cmatrix <- function(x, ...) { - .Call(`_SLmetrics_zerooneloss_cmatrix`, x) + .Call(`_SLmetrics_cmatrix_ZeroOneLoss`, x) } #' @rdname rsq @@ -717,6 +766,20 @@ weighted.rae.numeric <- function(actual, predicted, w, ...) { .Call(`_SLmetrics_weighted_rae`, actual, predicted, w) } +#' @rdname rrmse +#' @method rrmse numeric +#' @export +rrmse.numeric <- function(actual, predicted, normalization = 1L, ...) { + .Call(`_SLmetrics_RelativeRootMeanSquaredError`, actual, predicted, normalization) +} + +#' @rdname rrmse +#' @method weighted.rrmse numeric +#' @export +weighted.rrmse.numeric <- function(actual, predicted, w, normalization = 1L, ...) { + .Call(`_SLmetrics_weighted_RelativeRootMeanSquaredError`, actual, predicted, w, normalization) +} + #' @rdname rmse #' @method rmse numeric #' @export diff --git a/R/S3-CrossEntropyLoss.R b/R/S3-CrossEntropyLoss.R new file mode 100644 index 00000000..da8719d2 --- /dev/null +++ b/R/S3-CrossEntropyLoss.R @@ -0,0 +1,67 @@ +#' Compute the \eqn{\text{Cross}} \eqn{\text{Entropy}} \eqn{\text{Loss}} +#' +#' @description +#' The [entropy()] function computes the **Cross-Entropy Loss** — often called **Log Loss** — between observed classes (as a <[factor]>) and their predicted probability distributions (a <[numeric]> matrix). +#' The [weighted.entropy()] function is the weighted version, applying observation-specific weights. +#' +#' @inherit accuracy +#' @param response A \eqn{N \times k} <[numeric]>-matrix of predicted probabilities. +#' The \eqn{i}-th row should sum to 1 (i.e., a valid probability distribution +#' over the \eqn{k} classes). The first column corresponds to the first factor +#' level in \code{actual}, the second column to the second factor level, and so on. +#' @param normalize A <[logical]>-value (default: [TRUE]). If [TRUE], +#' the mean cross-entropy across all observations is returned; otherwise, the +#' sum of cross-entropies is returned. +#' +#' @section Calculation: +#' +#' Let \eqn{y_{i,k}} be the one-hot encoding of the actual class label for the \eqn{i}-th observation (that is, \eqn{y_{i,k} = 1} if observation \eqn{i} belongs to class \eqn{k}, and 0 otherwise), and let \eqn{\hat{p}_{i,k}} be the predicted probability of class \eqn{k} for observation \eqn{i}. +#' The cross-entropy loss \eqn{L} is: +#' +#' \deqn{ +#' L = -\sum_{i=1}^N \sum_{k=1}^K y_{i,k}\,\log(\hat{p}_{i,k}). +#' } +#' +#' If \code{normalize = TRUE}, this sum is divided by \eqn{N} (the number of observations). When weights \eqn{w_i} are supplied, each term is multiplied by \eqn{w_i}, and if \code{normalize = TRUE}, the final sum is divided by \eqn{\sum_i w_i}. +#' +#' @example man/examples/scr_CrossEntropyLoss.R +#' +#' @family Classification +#' @family Supervised Learning +#' +#' @aliases entropy logloss +#' +#' @export +entropy <- function(...) { + UseMethod( + generic = "entropy", + object = ..1 + ) +} + +#' @rdname entropy +#' @export +weighted.entropy <- function(...) { + UseMethod( + generic = "weighted.entropy", + object = ..1 + ) +} + +#' @rdname entropy +#' @export +logloss <- function(...) { + UseMethod( + generic = "logloss", + object = ..1 + ) +} + +#' @rdname entropy +#' @export +weighted.logloss <- function(...) { + UseMethod( + generic = "weighted.logloss", + object = ..1 + ) +} \ No newline at end of file diff --git a/R/S3_Accuracy.R b/R/S3_Accuracy.R index e97afc8c..6e42cf5c 100644 --- a/R/S3_Accuracy.R +++ b/R/S3_Accuracy.R @@ -15,7 +15,6 @@ #' @param predicted A vector of <[factor]>-vector of [length] \eqn{n}, and \eqn{k} levels #' @param w A <[numeric]>-vector of [length] \eqn{n}. [NULL] by default #' @param x A confusion matrix created [cmatrix()] -#' @param ... Arguments passed into other methods #' #' @inherit specificity #' diff --git a/R/S3_ConfusionMatrix.R b/R/S3_ConfusionMatrix.R index fa4666c3..8d94871d 100644 --- a/R/S3_ConfusionMatrix.R +++ b/R/S3_ConfusionMatrix.R @@ -50,6 +50,15 @@ cmatrix <- function(...) { ) } +#' @rdname cmatrix +#' @export +weighted.cmatrix <- function(...) { + UseMethod( + generic = "weighted.cmatrix", + object = ..1 + ) +} + #' @export print.cmatrix <- function( diff --git a/R/S3_PrecisionRecallCurve.R b/R/S3_PrecisionRecallCurve.R index eae4678e..13b56b6d 100644 --- a/R/S3_PrecisionRecallCurve.R +++ b/R/S3_PrecisionRecallCurve.R @@ -20,7 +20,7 @@ #' #' @returns A [data.frame] on the following form, #' -#' \item{thresholds}{<[numeric]> Thresholds used to determine [recall()] and [precision()]} +#' \item{threshold}{<[numeric]> Thresholds used to determine [recall()] and [precision()]} #' \item{level}{<[character]> The level of the actual <[factor]>} #' \item{label}{<[character]> The levels of the actual <[factor]>} #' \item{recall}{<[numeric]> The recall} @@ -34,6 +34,15 @@ prROC <- function(...) { ) } +#' @rdname prROC +#' @export +weighted.prROC <- function(...) { + UseMethod( + generic = "weighted.prROC", + object = ..1 + ) +} + #' @export print.prROC <- function(x, ...) { @@ -146,7 +155,7 @@ plot.prROC <- function( xlab = xlab, ylab = ylab, main = main, - DT = x, + DT = x[is.finite(x$threshold), ], add_poly = panels, ... ) diff --git a/R/S3_RecieverOperatorCharacteristics.R b/R/S3_RecieverOperatorCharacteristics.R index 17ece4f1..107cbf26 100644 --- a/R/S3_RecieverOperatorCharacteristics.R +++ b/R/S3_RecieverOperatorCharacteristics.R @@ -22,7 +22,7 @@ #' #' @returns A [data.frame] on the following form, #' -#' \item{thresholds}{<[numeric]> Thresholds used to determine [tpr()] and [fpr()]} +#' \item{threshold}{<[numeric]> Thresholds used to determine [tpr()] and [fpr()]} #' \item{level}{<[character]> The level of the actual <[factor]>} #' \item{label}{<[character]> The levels of the actual <[factor]>} #' \item{fpr}{<[numeric]> The false positive rate} @@ -39,6 +39,15 @@ ROC <- function(...) { ) } +#' @rdname ROC +#' @export +weighted.ROC <- function(...) { + UseMethod( + generic = "weighted.ROC", + object = ..1 + ) +} + #' @export print.ROC <- function(x, ...) { @@ -155,7 +164,7 @@ plot.ROC <- function( xlab = xlab, ylab = ylab, main = main, - DT = x, + DT = x[is.finite(x$threshold), ], add_poly = panels, ... ) diff --git a/R/S3_RelativeRootMeanSquaredError.R b/R/S3_RelativeRootMeanSquaredError.R new file mode 100644 index 00000000..031a845c --- /dev/null +++ b/R/S3_RelativeRootMeanSquaredError.R @@ -0,0 +1,46 @@ +# script: Relative Root Mean Squared Error +# date: 2024-12-27 +# author: Serkan Korkmaz, serkor1@duck.com +# objective: Generate Errors +# script start; + +#' Compute the \eqn{\text{relative}} \eqn{\text{root}} \eqn{\text{mean}} \eqn{\text{squared}} \eqn{\text{error}} +#' +#' The [rrmse()]-function computes the [Relative Root Mean Squared Error](https://en.wikipedia.org/wiki/Root-mean-square_deviation) between +#' the observed and predicted <[numeric]> vectors. The [weighted.rrmse()] function computes the weighted Relative Root Mean Squared Error. +#' +#' @inherit huberloss +#' @param normalization A <[numeric]>-value of [length] \eqn{1} (default: \eqn{1}). \eqn{0}: [mean]-normalization, \eqn{1}: [range]-normalization, \eqn{2}: [IQR]-normalization. +#' +#' @example man/examples/scr_RelativeRootMeanSquaredError.R +#' +#' @section Calculation: +#' +#' The metric is calculated as, +#' +#' \deqn{ +#' \frac{RMSE}{\gamma} +#' } +#' +#' Where \eqn{\gamma} is the normalization factor. +#' +#' @family Regression +#' @family Supervised Learning +#' @export +rrmse <- function(...) { + UseMethod( + generic = "rrmse", + object = ..1 + ) +} + +#' @rdname rrmse +#' @export +weighted.rrmse <- function(...) { + UseMethod( + generic = "weighted.rrmse", + object = ..1 + ) +} + +# script end; diff --git a/R/sysdata.rda b/R/sysdata.rda index b49fe545..15505651 100644 Binary files a/R/sysdata.rda and b/R/sysdata.rda differ diff --git a/README.Rmd b/README.Rmd index 3518084e..e162cb24 100644 --- a/README.Rmd +++ b/README.Rmd @@ -2,7 +2,6 @@ output: github_document always_allow_html: true --- - ```{r, include = FALSE} @@ -51,195 +50,170 @@ lattice::trellis.par.set( [![codecov](https://codecov.io/gh/serkor1/SLmetrics/branch/development/graph/badge.svg?token=X2osJDSRlN)](https://codecov.io/gh/serkor1/SLmetrics) -[{SLmetrics}](https://serkor1.github.io/SLmetrics/) is a collection of *lightning fast* performance evaluation metrics for regression and classification models written in `C++` and [{Rcpp}](/~https://github.com/RcppCore/Rcpp); it's like using a supercharged [{yardstick}](/~https://github.com/tidymodels/yardstick) to measure model performance, without the risk of soft to super-hard deprecations. [{SLmetrics}](https://serkor1.github.io/SLmetrics/) provides (almost) the same array of metrics as in [{scikit-learn}](/~https://github.com/scikit-learn/scikit-learn) and [{pytorch}](/~https://github.com/pytorch/pytorch) but without having to [{reticulate}](/~https://github.com/rstudio/reticulate) or go through the whole compile, run and debug cycle in `Python`. +[{SLmetrics}](https://serkor1.github.io/SLmetrics/) is a lightweight `R` package written in `C++` and [{Rcpp}](/~https://github.com/RcppCore/Rcpp) for *memory-efficient* and *lightning-fast* machine learning performance evaluation; it's like using a supercharged [{yardstick}](/~https://github.com/tidymodels/yardstick) but without the risk of soft to super-hard deprecations. [{SLmetrics}](https://serkor1.github.io/SLmetrics/) covers both regression and classification metrics and provides (almost) the same array of metrics as [{scikit-learn}](/~https://github.com/scikit-learn/scikit-learn) and [{PyTorch}](/~https://github.com/pytorch/pytorch) all without [{reticulate}](/~https://github.com/rstudio/reticulate) and the Python compile-run-(crash)-debug cylce. Depending on the mood and alignment of planets [{SLmetrics}](https://serkor1.github.io/SLmetrics/) stands for Supervised Learning metrics, or Statistical Learning metrics. If [{SLmetrics}](https://serkor1.github.io/SLmetrics/) catches on, the latter will be the core philosophy and include unsupervised learning metrics. If not, then it will remain a {pkg} for Supervised Learning metrics, and a sandbox for me to develop my `C++` skills. -## :information_source: Why? - -Machine Learning (ML) in itself can be a complicated task; the steps taken from feature engineering to the deployment of the model requires carefully measured actions, and decisions. One low-hanging of fruit of easing this task is *performance evaluation*. In it's core, performance evaluation is essentially *just* a comparison of two vectors; a programmatically and, at times, mathematically trivial step in the ML pipeline. And therefore a {pkg} that implements performance evaluations of ML models can, and should, be proportional to the triviality of the application itself; ie. be efficient, fast, straightforward and simple. There should be no need to consider *quasiquations*, *dependencies*, *deprecations* or variations of the same functions relative to its arguments; it should be plug-and-play, and "just" work out of the box. - -Below is four arguments of why [{SLmetrics}](https://serkor1.github.io/SLmetrics/) should be considered in your ML pipeline, +## :books: Table of Contents -Firstly, [{SLmetrics}](https://serkor1.github.io/SLmetrics/) is *fast*. One, obviously, can't build an `R`-package on `C++` and [{Rcpp}](/~https://github.com/RcppCore/Rcpp) without a proper pissing contest at the urinals; a detailed [blog post](https://www.r-bloggers.com/) about the difference in speed has been posted on [R-bloggers](https://www.r-bloggers.com/). For a quick summary see below, +* [:rocket: Gettting Started](#rocket-gettting-started) + + [:shield: Installation](#shield-installation) + + [:books: Basic Usage](#books-basic-usage) +* [:information_source: Why?](#information_source-why) +* [:zap: Performance Comparison](#zap-performance-comparison) + + [:fast_forward: Speed comparison](#fast_forward-speed-comparison) + + [:floppy_disk: Memory-efficiency](#floppy_disk-memory-efficiency) +* [:information_source: Basic usage](#information_source-basic-usage) + + [:books: Regression](#books-regression) + + [:books: Classification](#books-classification) +* [:information_source: Installation](#information_source-installation) + + [:shield: Stable version](#shield-stable-version) + + [:hammer_and_wrench: Development version](#hammer_and_wrench-development-version) +* [:information_source: Code of Conduct](#information_source-code-of-conduct) -
- Showcase: speed comparison -Below is two simple cases that any {pkg} should be able to handle gracefully; computing a confusion matrix and computing the root mean squared error. The source code of the performance test can be found [here](/~https://github.com/serkor1/SLmetrics/blob/main/data-raw/performance.R). +## :rocket: Gettting Started -## Execution time: Computing a 2 x 2 Confusion Matrix - -```{r performance-classification, echo = FALSE} -lattice::xyplot( - mean ~ sample_size, - data = DT[[1]], - groups = expr, - type = 'l', - auto.key = list(columns = 2, col = "#848e9c"), - scales = list( - y = list(log = FALSE) , - x = list(log = FALSE) - ), - xlab = "Sample Size (N)", - ylab = "Mean Execution Time (Microseconds)", - panel = function(...) { - lattice::panel.grid(...) - lattice::panel.xyplot(...) - } -) -``` +Below you’ll find instructions to install [{SLmetrics}](https://serkor1.github.io/SLmetrics/) and get started with your first metric, the Root Mean Squared Error (RMSE). -## Execution time: Computing the Root Mean Squared Error (RMSE) +### :shield: Installation -```{r performance-regression, echo = FALSE} -lattice::xyplot( - mean ~ sample_size, - data = DT[[2]], - groups = expr, - type = 'l', - scales = list( - y = list(log = FALSE), - x = list(log = FALSE) - ), - auto.key = list(columns = 2, col = "#848e9c"), - xlab = "Sample Size (N)", - ylab = "Mean Execution Time (Microseconds)", - panel = function(...) { - lattice::panel.grid(...) - lattice::panel.xyplot(...) - - } +```{r, eval = FALSE} +## install stable release +devtools::install_github( + repo = '/~https://github.com/serkor1/SLmetrics@*release', + ref = 'main' ) ``` -In both cases the execution time is diverging in favor of [{SLmetrics}](https://serkor1.github.io/SLmetrics/); we promised speed and efficiency - and that is what you get. - -> In all fairness, {yardstick} is more defensive in its implementation of some of its -> functions. However, the difference in the average runtime can't be entirely attributed to this element. - - - -
- -Secondly, [{SLmetrics}](https://serkor1.github.io/SLmetrics/) is *simple* and *flexible* to use; it is based on `S3` and provides the most essential class-wise and aggregated metrics. +### :books: Basic Usage -
- Showcase: simplicity and flexibility - -Consider the classification problem below, +Below is a minimal example demonstrating how to compute both unweighted and weighted RMSE. ```{r} -# 1) actual classes -actual <- factor( - x = sample(x = 1:3, size = 100, replace = TRUE,prob = c(0.25,0.5,0.5)), - levels = c(1:3), - labels = letters[1:3] -) +library(SLmetrics) -# 2) predicted classes -predicted <- factor( - x = sample(x = 1:3, size = 100, replace = TRUE,prob = c(0.5,0.25,0.25)), - levels = c(1:3), - labels = letters[1:3] +actual <- c(10.2, 12.5, 14.1) +predicted <- c(9.8, 11.5, 14.2) +weights <- c(0.2, 0.5, 0.3) + +cat( + "Root Mean Squared Error", rmse( + actual = actual, + predicted = predicted, + ), + "Root Mean Squared Error (weighted)", weighted.rmse( + actual = actual, + predicted = predicted, + w = weights + ), + sep = "\n" ) ``` -The `recall`, `precision` and `specificity` can be calculated as follows, - -```{r} -# 1) recall -recall(actual, predicted) +That’s all! Now you can explore the rest of this README for in-depth usage, performance comparisons, and more details about [{SLmetrics}](https://serkor1.github.io/SLmetrics/). -# 2) precision -precision(actual, predicted) +## :information_source: Why? -# 3) specificity -specificity(actual, predicted) -``` +Machine learning can be a complicated task; the steps from feature engineering to model deployment require carefully measured actions and decisions. One low-hanging fruit to simplify this process is *performance evaluation*. -Each function returns the class-wise metric; there is no need to specify the "positive" class - it just returns everything as defined by the `factor()`-function. The overall `recall`, for example, can be computed with a single `<[logical]>`-argument, +At its core, performance evaluation is essentially just comparing two vectors — a programmatically and, at times, mathematically trivial step in the machine learning pipeline, but one that can become complicated due to: -```{r} -# 1) micro-averaged -# recall -recall(actual, predicted, micro = TRUE) +1. Dependencies and potential deprecations +2. Needlessly complex or repetitive arguments +3. Performance and memory bottlenecks at scale -# 2) macro-averaged -# recall -recall(actual, predicted, micro = FALSE) -``` +[{SLmetrics}](https://serkor1.github.io/SLmetrics/) solves these issues by being: +1. **Fast:** Powered by `C++` and [Rcpp](/~https://github.com/RcppCore/Rcpp) +2. **Memory-efficient:** Everything is structured around pointers and references +3. **Lightweight:** Only depends on [Rcpp](/~https://github.com/RcppCore/Rcpp), [RcppEigen](/~https://github.com/RcppCore/RcppEigen), and [lattice](/~https://github.com/deepayan/lattice) +4. **Simple:** S3-based, minimal overhead, and flexible inputs -However, it is not efficient to loop through the entire range of the `actual`- and `predicted`-vector to calculate three metrics; we could just pass the functions a confusion matrix, and base the calculations off of that as below, +Performance evaluation should be plug-and-play and “just work” out of the box — there’s no need to worry about *quasiquations*, *dependencies*, *deprecations*, or variations of the same functions relative to their arguments when using [{SLmetrics}](https://serkor1.github.io/SLmetrics/). -```{r} -# 0) confusion matrix -confusion_matrix <- cmatrix( - actual, - predicted -) +## :zap: Performance Comparison -# 1) recall -recall(confusion_matrix) +One, obviously, can't build an `R`-package on `C++` and [{Rcpp}](/~https://github.com/RcppCore/Rcpp) without a proper pissing contest at the urinals - below is a comparison in execution time and memory efficiency of two simple cases that any {pkg} should be able to handle gracefully; computing a 2 x 2 confusion matrix and computing the RMSE[^1]. -# 2) precision -precision(confusion_matrix) +### :fast_forward: Speed comparison -# 3) specificity -specificity(confusion_matrix) +```{r performance, echo = FALSE} +lattice::xyplot( + median ~ sample_size | measure, + data = do.call(rbind, DT$speed), + groups = expr, + type = 'l', + auto.key = list(columns = 2, col = "#848e9c"), + scales = list( + y = list(log = FALSE, relation = "free"), + x = list(log = FALSE) + ), + xlab = "Sample Size (N)", + ylab = "Median Execution Time (Microseconds)", + panel = function(...) { + lattice::panel.grid(...) + lattice::panel.xyplot(...) + } +) ``` -It is the same call and metric with slightly different arguments; this is the power and simplicity of `S3`. +As shown in the chart, [{SLmetrics}](https://serkor1.github.io/SLmetrics/) maintains consistently low(er) execution times across different sample sizes. +### :floppy_disk: Memory-efficiency -
+Below are the results for garbage collections and total memory allocations when computing a 2×2 confusion matrix (N = 1e7) and RMSE (N = 1e7). Notice that [{SLmetrics}](https://serkor1.github.io/SLmetrics/) requires no GC calls for these operations. -Thirdly, [{SLmetrics}](https://serkor1.github.io/SLmetrics/) is *low level* and free of any *{pkg}verse*-regimes; this provides the freedom to develop it further as a part of your own {pkg}, or use it in any tidy, or untidy, pipeline you would want to. - -
- Showcase: Low level and (in)dependency - -Currently [{SLmetrics}](https://serkor1.github.io/SLmetrics/) depends on three {pkgs}; [{Rcpp}](/~https://github.com/RcppCore/Rcpp), [{RcppEigen}](/~https://github.com/RcppCore/RcppEigen) and [{lattice}](/~https://github.com/deepayan/lattice). Three incredibly stable, flexible and efficient R packages. There is basically zero risk of downstream breaking changes, {pkg} bloating and/or compatibility issues. +```{r prepare-memory, echo = FALSE} +# 1) prepare data +packages <- c("{SLmetrics}", "{yardstick}", "{MLmetrics}", "{mlr3measures}") +measures <- c("Confusion Matrix", "Root Mean Squared Error") +column_names <- c("Iterations", "Garbage Collections [gc()]", "gc() pr. second", "Memory Allocation (MB)") +``` -The source code of [{SLmetrics}](https://serkor1.github.io/SLmetrics/) are primarily made up of unrolled loops and matrix algebra using [{RcppEigen}](/~https://github.com/RcppCore/RcppEigen). There is, at most, one conversion between `R` and `C++` compatible objects without redundant type-checks, or various mapping functions; this makes [{SLmetrics}](https://serkor1.github.io/SLmetrics/) lightweight and ideal for high-speed computing. +```{r, echo = FALSE} +# 1) extract data; +DT_ <- DT$memory[[1]][,c("n_itr", "n_gc", "gc/sec", "mem_alloc")] +DT_$mem_alloc <- round(DT_$mem_alloc/(1024^2)) +colnames(DT_) <- column_names +rownames(DT_) <- packages -
+knitr::kable(DT_, caption = "2 x 2 Confusion Matrix (N = 1e7)", digits = 2) +``` -Fourthly, [{SLmetrics}](https://serkor1.github.io/SLmetrics/) has a *larger* repertoire of supervised machine learning metrics; all of which has been battle tested with [{scikit-learn}](/~https://github.com/scikit-learn/scikit-learn) and [{pytorch}](/~https://github.com/pytorch/pytorch) against [{yardstick}](/~https://github.com/tidymodels/yardstick), [{mlr3measures}](/~https://github.com/mlr-org/mlr3measures) and [{MLmetrics}](/~https://github.com/yanyachen/MLmetrics). +```{r, echo = FALSE} +# 1) extract data; +DT_ <- DT$memory[[2]][,c("n_itr", "n_gc", "gc/sec", "mem_alloc")] +DT_$mem_alloc <- round(DT_$mem_alloc/(1024^2)) +colnames(DT_) <- column_names +rownames(DT_) <- packages -
- Showcase: repertoire and unit-testing +knitr::kable(DT_, caption = "RMSE (N = 1e7)", digits = 2) +``` -[{SLmetrics}](https://serkor1.github.io/SLmetrics/) is build as the `R`-version of [{scikit-learn}](/~https://github.com/scikit-learn/scikit-learn) but with a larger focus on versatility, speed and the simplicity of `R`. All the functions implemented in [{SLmetrics}](https://serkor1.github.io/SLmetrics/) are tested using [{scikit-learn}](/~https://github.com/scikit-learn/scikit-learn) and [{pytorch}](/~https://github.com/pytorch/pytorch) as reference values. +In both tasks, [{SLmetrics}](https://serkor1.github.io/SLmetrics/) remains extremely memory-efficient, even at large sample sizes. -
+> [!IMPORTANT] +> +> From [{bench}](/~https://github.com/r-lib/bench) documentation: *Total amount of memory allocated by R while running the expression. Memory allocated outside the R heap, e.g. by `malloc()` or new directly is not tracked, take care to avoid misinterpreting the results if running code that may do this.* ## :information_source: Basic usage -In its most basic form the functions can be used as-is without any pipelines, data.frames or recipes. Below are two simple examples. +In its simplest form, [{SLmetrics}](https://serkor1.github.io/SLmetrics/)-functions work directly with pairs of \ vectors (for regression) or \ vectors (for classification). Below we demonstrate this on two well-known datasets, `mtcars` (regression) and `iris` (classification). ### :books: Regression -Below is an example evaluating the in-sample performance of a linear regression on `mpg` from the `mtcars` data set, +We first fit a linear model to predict `mpg` in the `mtcars` dataset, then compute the in-sample RMSE: ```{r mtcars regression (example)} -# 1) run regression -model <- lm( - formula = mpg ~ ., - data = mtcars -) - -# 2) evaluate RMSE -rmse( - actual = mtcars$mpg, - predicted = fitted(model) -) +# Evaluate a linear model on mpg (mtcars) +model <- lm(mpg ~ ., data = mtcars) +rmse(mtcars$mpg, fitted(model)) ``` ### :books: Classification -Below is an example evaluating the in-sample performance of a logistic regression on `Species` from the `iris` data set, +Now we recode the `iris` dataset into a binary problem ("virginica" vs. "others") and fit a logistic regression. Then we generate predicted classes, compute the confusion matrix and summarize it. ```{r iris classification (example)} # 1) recode iris @@ -288,53 +262,6 @@ summary( ) ``` -```{r ROC} -# 5) generate -# roc object -summary( - roc <- ROC( - actual = actual, - response = predict(model, type = "response") - ) -) - -# 6) plot roc -# object -plot(roc) -``` - -
- Class-wise and aggregated metrics - -__Classwise specificity__ - -```{r sensitivity (class-wise)} -sensitivity( - confusion_matrix, - micro = NULL -) -``` - -__Micro averaged specificity__ - -```{r sensitivity (micro)} -sensitivity( - confusion_matrix, - micro = TRUE -) -``` - -__Macro averaged specificity__ - -```{r sensitivity (macro)} -sensitivity( - confusion_matrix, - micro = FALSE -) -``` - -
- ## :information_source: Installation ### :shield: Stable version @@ -358,3 +285,6 @@ devtools::install_github( ## :information_source: Code of Conduct Please note that the [{SLmetrics}](https://serkor1.github.io/SLmetrics/) project is released with a [Contributor Code of Conduct](https://contributor-covenant.org/version/2/1/CODE_OF_CONDUCT.html). By contributing to this project, you agree to abide by its terms. + + +[^1]: The source code for these benchmarks is available [here](/~https://github.com/serkor1/SLmetrics/blob/main/data-raw/performance.R). \ No newline at end of file diff --git a/README.md b/README.md index fa45a412..b4feb2fb 100644 --- a/README.md +++ b/README.md @@ -15,18 +15,19 @@ experimental](https://img.shields.io/badge/lifecycle-experimental-orange.svg)](h [![codecov](https://codecov.io/gh/serkor1/SLmetrics/branch/development/graph/badge.svg?token=X2osJDSRlN)](https://codecov.io/gh/serkor1/SLmetrics) -[{SLmetrics}](https://serkor1.github.io/SLmetrics/) is a collection of -*lightning fast* performance evaluation metrics for regression and -classification models written in `C++` and -[{Rcpp}](/~https://github.com/RcppCore/Rcpp); it’s like using a -supercharged [{yardstick}](/~https://github.com/tidymodels/yardstick) to -measure model performance, without the risk of soft to super-hard -deprecations. [{SLmetrics}](https://serkor1.github.io/SLmetrics/) -provides (almost) the same array of metrics as in +[{SLmetrics}](https://serkor1.github.io/SLmetrics/) is a lightweight `R` +package written in `C++` and [{Rcpp}](/~https://github.com/RcppCore/Rcpp) +for *memory-efficient* and *lightning-fast* machine learning performance +evaluation; it’s like using a supercharged +[{yardstick}](/~https://github.com/tidymodels/yardstick) but without the +risk of soft to super-hard deprecations. +[{SLmetrics}](https://serkor1.github.io/SLmetrics/) covers both +regression and classification metrics and provides (almost) the same +array of metrics as [{scikit-learn}](/~https://github.com/scikit-learn/scikit-learn) and -[{pytorch}](/~https://github.com/pytorch/pytorch) but without having to -[{reticulate}](/~https://github.com/rstudio/reticulate) or go through the -whole compile, run and debug cycle in `Python`. +[{PyTorch}](/~https://github.com/pytorch/pytorch) all without +[{reticulate}](/~https://github.com/rstudio/reticulate) and the Python +compile-run-(crash)-debug cylce. Depending on the mood and alignment of planets [{SLmetrics}](https://serkor1.github.io/SLmetrics/) stands for @@ -36,248 +37,189 @@ latter will be the core philosophy and include unsupervised learning metrics. If not, then it will remain a {pkg} for Supervised Learning metrics, and a sandbox for me to develop my `C++` skills. -## :information_source: Why? - -Machine Learning (ML) in itself can be a complicated task; the steps -taken from feature engineering to the deployment of the model requires -carefully measured actions, and decisions. One low-hanging of fruit of -easing this task is *performance evaluation*. In it’s core, performance -evaluation is essentially *just* a comparison of two vectors; a -programmatically and, at times, mathematically trivial step in the ML -pipeline. And therefore a {pkg} that implements performance evaluations -of ML models can, and should, be proportional to the triviality of the -application itself; ie. be efficient, fast, straightforward and simple. -There should be no need to consider *quasiquations*, *dependencies*, -*deprecations* or variations of the same functions relative to its -arguments; it should be plug-and-play, and “just” work out of the box. - -Below is four arguments of why -[{SLmetrics}](https://serkor1.github.io/SLmetrics/) should be considered -in your ML pipeline, - -Firstly, [{SLmetrics}](https://serkor1.github.io/SLmetrics/) is *fast*. -One, obviously, can’t build an `R`-package on `C++` and -[{Rcpp}](/~https://github.com/RcppCore/Rcpp) without a proper pissing -contest at the urinals; a detailed [blog -post](https://www.r-bloggers.com/) about the difference in speed has -been posted on [R-bloggers](https://www.r-bloggers.com/). For a quick -summary see below, - -
- - - -Showcase: speed comparison - - -Below is two simple cases that any {pkg} should be able to handle -gracefully; computing a confusion matrix and computing the root mean -squared error. The source code of the performance test can be found -[here](/~https://github.com/serkor1/SLmetrics/blob/main/data-raw/performance.R). - -## Execution time: Computing a 2 x 2 Confusion Matrix - - - -## Execution time: Computing the Root Mean Squared Error (RMSE) - - - -In both cases the execution time is diverging in favor of -[{SLmetrics}](https://serkor1.github.io/SLmetrics/); we promised speed -and efficiency - and that is what you get. - -> In all fairness, {yardstick} is more defensive in its implementation -> of some of its functions. However, the difference in the average -> runtime can’t be entirely attributed to this element. - -
- -Secondly, [{SLmetrics}](https://serkor1.github.io/SLmetrics/) is -*simple* and *flexible* to use; it is based on `S3` and provides the -most essential class-wise and aggregated metrics. - -
- - - -Showcase: simplicity and flexibility - - -Consider the classification problem below, +## :books: Table of Contents + +- [:rocket: Gettting Started](#rocket-gettting-started) + - [:shield: Installation](#shield-installation) + - [:books: Basic Usage](#books-basic-usage) +- [:information_source: Why?](#information_source-why) +- [:zap: Performance Comparison](#zap-performance-comparison) + - [:fast_forward: Speed comparison](#fast_forward-speed-comparison) + - [:floppy_disk: Memory-efficiency](#floppy_disk-memory-efficiency) +- [:information_source: Basic usage](#information_source-basic-usage) + - [:books: Regression](#books-regression) + - [:books: Classification](#books-classification) +- [:information_source: Installation](#information_source-installation) + - [:shield: Stable version](#shield-stable-version) + - [:hammer_and_wrench: Development + version](#hammer_and_wrench-development-version) +- [:information_source: Code of + Conduct](#information_source-code-of-conduct) + +## :rocket: Gettting Started + +Below you’ll find instructions to install +[{SLmetrics}](https://serkor1.github.io/SLmetrics/) and get started with +your first metric, the Root Mean Squared Error (RMSE). + +### :shield: Installation ``` r -# 1) actual classes -actual <- factor( - x = sample(x = 1:3, size = 100, replace = TRUE,prob = c(0.25,0.5,0.5)), - levels = c(1:3), - labels = letters[1:3] -) - -# 2) predicted classes -predicted <- factor( - x = sample(x = 1:3, size = 100, replace = TRUE,prob = c(0.5,0.25,0.25)), - levels = c(1:3), - labels = letters[1:3] +## install stable release +devtools::install_github( + repo = '/~https://github.com/serkor1/SLmetrics@*release', + ref = 'main' ) ``` -The `recall`, `precision` and `specificity` can be calculated as -follows, +### :books: Basic Usage -``` r -# 1) recall -recall(actual, predicted) -#> a b c -#> 0.4736842 0.2444444 0.2500000 - -# 2) precision -precision(actual, predicted) -#> a b c -#> 0.1764706 0.4782609 0.3461538 - -# 3) specificity -specificity(actual, predicted) -#> a b c -#> 0.4814815 0.7818182 0.7343750 -``` - -Each function returns the class-wise metric; there is no need to specify -the “positive” class - it just returns everything as defined by the -`factor()`-function. The overall `recall`, for example, can be computed -with a single `<[logical]>`-argument, +Below is a minimal example demonstrating how to compute both unweighted +and weighted RMSE. ``` r -# 1) micro-averaged -# recall -recall(actual, predicted, micro = TRUE) -#> [1] 0.29 - -# 2) macro-averaged -# recall -recall(actual, predicted, micro = FALSE) -#> [1] 0.3227096 -``` +library(SLmetrics) -However, it is not efficient to loop through the entire range of the -`actual`- and `predicted`-vector to calculate three metrics; we could -just pass the functions a confusion matrix, and base the calculations -off of that as below, +actual <- c(10.2, 12.5, 14.1) +predicted <- c(9.8, 11.5, 14.2) +weights <- c(0.2, 0.5, 0.3) -``` r -# 0) confusion matrix -confusion_matrix <- cmatrix( - actual, - predicted +cat( + "Root Mean Squared Error", rmse( + actual = actual, + predicted = predicted, + ), + "Root Mean Squared Error (weighted)", weighted.rmse( + actual = actual, + predicted = predicted, + w = weights + ), + sep = "\n" ) - -# 1) recall -recall(confusion_matrix) -#> a b c -#> 0.4736842 0.2444444 0.2500000 - -# 2) precision -precision(confusion_matrix) -#> a b c -#> 0.1764706 0.4782609 0.3461538 - -# 3) specificity -specificity(confusion_matrix) -#> a b c -#> 0.4814815 0.7818182 0.7343750 +#> Root Mean Squared Error +#> 0.6244998 +#> Root Mean Squared Error (weighted) +#> 0.7314369 ``` -It is the same call and metric with slightly different arguments; this -is the power and simplicity of `S3`. +That’s all! Now you can explore the rest of this README for in-depth +usage, performance comparisons, and more details about +[{SLmetrics}](https://serkor1.github.io/SLmetrics/). -
+## :information_source: Why? -Thirdly, [{SLmetrics}](https://serkor1.github.io/SLmetrics/) is *low -level* and free of any *{pkg}verse*-regimes; this provides the freedom -to develop it further as a part of your own {pkg}, or use it in any -tidy, or untidy, pipeline you would want to. +Machine learning can be a complicated task; the steps from feature +engineering to model deployment require carefully measured actions and +decisions. One low-hanging fruit to simplify this process is +*performance evaluation*. + +At its core, performance evaluation is essentially just comparing two +vectors — a programmatically and, at times, mathematically trivial step +in the machine learning pipeline, but one that can become complicated +due to: + +1. Dependencies and potential deprecations +2. Needlessly complex or repetitive arguments +3. Performance and memory bottlenecks at scale + +[{SLmetrics}](https://serkor1.github.io/SLmetrics/) solves these issues +by being: + +1. **Fast:** Powered by `C++` and + [Rcpp](/~https://github.com/RcppCore/Rcpp) +2. **Memory-efficient:** Everything is structured around pointers and + references +3. **Lightweight:** Only depends on + [Rcpp](/~https://github.com/RcppCore/Rcpp), + [RcppEigen](/~https://github.com/RcppCore/RcppEigen), and + [lattice](/~https://github.com/deepayan/lattice) +4. **Simple:** S3-based, minimal overhead, and flexible inputs + +Performance evaluation should be plug-and-play and “just work” out of +the box — there’s no need to worry about *quasiquations*, +*dependencies*, *deprecations*, or variations of the same functions +relative to their arguments when using +[{SLmetrics}](https://serkor1.github.io/SLmetrics/). + +## :zap: Performance Comparison -
+One, obviously, can’t build an `R`-package on `C++` and +[{Rcpp}](/~https://github.com/RcppCore/Rcpp) without a proper pissing +contest at the urinals - below is a comparison in execution time and +memory efficiency of two simple cases that any {pkg} should be able to +handle gracefully; computing a 2 x 2 confusion matrix and computing the +RMSE[^1]. - +### :fast_forward: Speed comparison -Showcase: Low level and (in)dependency - + -Currently [{SLmetrics}](https://serkor1.github.io/SLmetrics/) depends on -three {pkgs}; [{Rcpp}](/~https://github.com/RcppCore/Rcpp), -[{RcppEigen}](/~https://github.com/RcppCore/RcppEigen) and -[{lattice}](/~https://github.com/deepayan/lattice). Three incredibly -stable, flexible and efficient R packages. There is basically zero risk -of downstream breaking changes, {pkg} bloating and/or compatibility -issues. +As shown in the chart, +[{SLmetrics}](https://serkor1.github.io/SLmetrics/) maintains +consistently low(er) execution times across different sample sizes. -The source code of [{SLmetrics}](https://serkor1.github.io/SLmetrics/) -are primarily made up of unrolled loops and matrix algebra using -[{RcppEigen}](/~https://github.com/RcppCore/RcppEigen). There is, at most, -one conversion between `R` and `C++` compatible objects without -redundant type-checks, or various mapping functions; this makes -[{SLmetrics}](https://serkor1.github.io/SLmetrics/) lightweight and -ideal for high-speed computing. +### :floppy_disk: Memory-efficiency -
+Below are the results for garbage collections and total memory +allocations when computing a 2×2 confusion matrix (N = 1e7) and RMSE (N += 1e7). Notice that [{SLmetrics}](https://serkor1.github.io/SLmetrics/) +requires no GC calls for these operations. -Fourthly, [{SLmetrics}](https://serkor1.github.io/SLmetrics/) has a -*larger* repertoire of supervised machine learning metrics; all of which -has been battle tested with -[{scikit-learn}](/~https://github.com/scikit-learn/scikit-learn) and -[{pytorch}](/~https://github.com/pytorch/pytorch) against -[{yardstick}](/~https://github.com/tidymodels/yardstick), -[{mlr3measures}](/~https://github.com/mlr-org/mlr3measures) and -[{MLmetrics}](/~https://github.com/yanyachen/MLmetrics). +| | Iterations | Garbage Collections \[gc()\] | gc() pr. second | Memory Allocation (MB) | +|:---|---:|---:|---:|---:| +| {SLmetrics} | 100 | 0 | 0.00 | 0 | +| {yardstick} | 100 | 186 | 4.53 | 381 | +| {MLmetrics} | 100 | 186 | 4.47 | 381 | +| {mlr3measures} | 100 | 386 | 3.57 | 916 | -
+2 x 2 Confusion Matrix (N = 1e7) - +| | Iterations | Garbage Collections \[gc()\] | gc() pr. second | Memory Allocation (MB) | +|:---|---:|---:|---:|---:| +| {SLmetrics} | 100 | 0 | 0.00 | 0 | +| {yardstick} | 100 | 157 | 4.47 | 420 | +| {MLmetrics} | 100 | 19 | 2.39 | 76 | +| {mlr3measures} | 100 | 12 | 1.27 | 76 | -Showcase: repertoire and unit-testing - +RMSE (N = 1e7) -[{SLmetrics}](https://serkor1.github.io/SLmetrics/) is build as the -`R`-version of -[{scikit-learn}](/~https://github.com/scikit-learn/scikit-learn) but with -a larger focus on versatility, speed and the simplicity of `R`. All the -functions implemented in -[{SLmetrics}](https://serkor1.github.io/SLmetrics/) are tested using -[{scikit-learn}](/~https://github.com/scikit-learn/scikit-learn) and -[{pytorch}](/~https://github.com/pytorch/pytorch) as reference values. +In both tasks, [{SLmetrics}](https://serkor1.github.io/SLmetrics/) +remains extremely memory-efficient, even at large sample sizes. -
+> \[!IMPORTANT\] +> +> From [{bench}](/~https://github.com/r-lib/bench) documentation: *Total +> amount of memory allocated by R while running the expression. Memory +> allocated outside the R heap, e.g. by `malloc()` or new directly is +> not tracked, take care to avoid misinterpreting the results if running +> code that may do this.* ## :information_source: Basic usage -In its most basic form the functions can be used as-is without any -pipelines, data.frames or recipes. Below are two simple examples. +In its simplest form, +[{SLmetrics}](https://serkor1.github.io/SLmetrics/)-functions work +directly with pairs of \ vectors (for regression) or +\ vectors (for classification). Below we demonstrate this on +two well-known datasets, `mtcars` (regression) and `iris` +(classification). ### :books: Regression -Below is an example evaluating the in-sample performance of a linear -regression on `mpg` from the `mtcars` data set, +We first fit a linear model to predict `mpg` in the `mtcars` dataset, +then compute the in-sample RMSE: ``` r -# 1) run regression -model <- lm( - formula = mpg ~ ., - data = mtcars -) - -# 2) evaluate RMSE -rmse( - actual = mtcars$mpg, - predicted = fitted(model) -) +# Evaluate a linear model on mpg (mtcars) +model <- lm(mpg ~ ., data = mtcars) +rmse(mtcars$mpg, fitted(model)) #> [1] 2.146905 ``` ### :books: Classification -Below is an example evaluating the in-sample performance of a logistic -regression on `Species` from the `iris` data set, +Now we recode the `iris` dataset into a binary problem (“virginica” +vs. “others”) and fit a logistic regression. Then we generate predicted +classes, compute the confusion matrix and summarize it. ``` r # 1) recode iris @@ -338,68 +280,6 @@ summary( #> - Precision: 0.81 ``` -``` r -# 5) generate -# roc object -summary( - roc <- ROC( - actual = actual, - response = predict(model, type = "response") - ) -) -#> Reciever Operator Characteristics -#> ================================================================================ -#> AUC -#> - Others: 0.116 -#> - Virginica: 0.887 - -# 6) plot roc -# object -plot(roc) -``` - - - -
- - - -Class-wise and aggregated metrics - - -**Classwise specificity** - -``` r -sensitivity( - confusion_matrix, - micro = NULL -) -#> Virginica Others -#> 0.70 0.86 -``` - -**Micro averaged specificity** - -``` r -sensitivity( - confusion_matrix, - micro = TRUE -) -#> [1] 0.8066667 -``` - -**Macro averaged specificity** - -``` r -sensitivity( - confusion_matrix, - micro = FALSE -) -#> [1] 0.78 -``` - -
- ## :information_source: Installation ### :shield: Stable version @@ -428,3 +308,6 @@ Please note that the [{SLmetrics}](https://serkor1.github.io/SLmetrics/) project is released with a [Contributor Code of Conduct](https://contributor-covenant.org/version/2/1/CODE_OF_CONDUCT.html). By contributing to this project, you agree to abide by its terms. + +[^1]: The source code for these benchmarks is available + [here](/~https://github.com/serkor1/SLmetrics/blob/main/data-raw/performance.R). diff --git a/data-raw/performance.R b/data-raw/performance.R index 36f1a471..ce7a7d37 100644 --- a/data-raw/performance.R +++ b/data-raw/performance.R @@ -44,10 +44,10 @@ for (n in N) { , .( sample_size = n, - classes = k, - mean = mean( + median = median( time - ) + ), + measure = "Confusion Matrix" ) , by = .( @@ -89,9 +89,10 @@ for (n in N) { , .( sample_size = n, - mean = mean( + median = median( time - ) + ), + measure = "Root Mean Squared Error" ) , by = .( @@ -108,11 +109,49 @@ for (n in N) { # 3) collect results # in data.table DT <- list( - confusion_matrix = data.table::rbindlist(confusion_matrix_performance), - rmse = data.table::rbindlist(rmse_performance) + speed = list( + confusion_matrix = data.table::rbindlist(confusion_matrix_performance), + rmse = data.table::rbindlist(rmse_performance) + ) ) -# 4) write data for +# 4) test memory usage +memory <- list() + +# 4.1) Confusion Matrix +actual <- create_factor(k = 2, n = 1e7) +predicted <- create_factor(k = 2, n = 1e7) + +test_results <- bench::mark( + `{SLmetrics}` = SLmetrics::cmatrix(actual, predicted), + `{yardstick}` = yardstick::conf_mat(table(actual, predicted)), + `{MLmetrics}` = MLmetrics::ConfusionMatrix(predicted, actual), + `{mlr3measures}` = mlr3measures::confusion_matrix(predicted, actual, positive = "a"), + check = FALSE, + iterations = 100 +) + +memory$confusion_matrix <- test_results + +# 4.2) RMSE +actual <- rnorm(n = 1e7) +predicted <- actual + rnorm(n = 1e7) + +test_results <- bench::mark( + `{SLmetrics}` = SLmetrics::rmse(actual, predicted), + `{yardstick}` = yardstick::rmse_vec(actual, predicted), + `{MLmetrics}` = MLmetrics::RMSE(predicted, actual), + `{mlr3measures}` = mlr3measures::rmse(actual, predicted), + check = FALSE, + iterations = 100 +) + +memory$rmse <- test_results + +# 4.3) append to list +DT$memory <- memory + +# 5) write data for # internal usage usethis::use_data( DT, diff --git a/man/ROC.Rd b/man/ROC.Rd index c8c991c3..3c8e7349 100644 --- a/man/ROC.Rd +++ b/man/ROC.Rd @@ -3,34 +3,34 @@ % R/S3_RecieverOperatorCharacteristics.R \name{ROC.factor} \alias{ROC.factor} +\alias{weighted.ROC.factor} \alias{ROC} +\alias{weighted.ROC} \title{Compute the \eqn{\text{reciever}} \eqn{\text{operator}} \eqn{\text{characteristics}}} \usage{ -\method{ROC}{factor}(actual, response, micro = NULL, thresholds = NULL, na.rm = TRUE, ...) +\method{ROC}{factor}(actual, response, thresholds = NULL, ...) + +\method{weighted.ROC}{factor}(actual, response, w, thresholds = NULL, ...) ROC(...) + +weighted.ROC(...) } \arguments{ \item{actual}{A vector of <\link{factor}>- of \link{length} \eqn{n}, and \eqn{k} levels.} \item{response}{A <\link{numeric}>-vector of \link{length} \eqn{n}. The estimated response probabilities.} -\item{micro}{A <\link{logical}>-value of \link{length} \eqn{1} (default: \link{NULL}). If \link{TRUE} it returns the -micro average across all \eqn{k} classes, if \link{FALSE} it returns the macro average.} - \item{thresholds}{An optional <\link{numeric}>-vector of non-zero \link{length} (default: \link{NULL}).} -\item{na.rm}{A <\link{logical}> value of \link{length} \eqn{1} (default: \link{TRUE}). If \link{TRUE}, \link{NA} values are removed from the computation. -This argument is only relevant when \code{micro != NULL}. -When \code{na.rm = TRUE}, the computation corresponds to \code{sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA)))}. -When \code{na.rm = FALSE}, the computation corresponds to \code{sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA))}.} - \item{...}{Arguments passed into other methods.} + +\item{w}{A <\link{numeric}>-vector of \link{length} \eqn{n}. \link{NULL} by default.} } \value{ A \link{data.frame} on the following form, -\item{thresholds}{<\link{numeric}> Thresholds used to determine \code{\link[=tpr]{tpr()}} and \code{\link[=fpr]{fpr()}}} +\item{threshold}{<\link{numeric}> Thresholds used to determine \code{\link[=tpr]{tpr()}} and \code{\link[=fpr]{fpr()}}} \item{level}{<\link{character}> The level of the actual <\link{factor}>} \item{label}{<\link{character}> The levels of the actual <\link{factor}>} \item{fpr}{<\link{numeric}> The false positive rate} @@ -150,6 +150,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -173,6 +174,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -194,6 +196,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/accuracy.Rd b/man/accuracy.Rd index c520a36b..a541a99a 100644 --- a/man/accuracy.Rd +++ b/man/accuracy.Rd @@ -144,6 +144,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -167,6 +168,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -188,6 +190,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/baccuracy.Rd b/man/baccuracy.Rd index 3d80b11a..3973af1b 100644 --- a/man/baccuracy.Rd +++ b/man/baccuracy.Rd @@ -147,6 +147,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -170,6 +171,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -191,6 +193,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/ccc.Rd b/man/ccc.Rd index 687f93ef..c384afb6 100644 --- a/man/ccc.Rd +++ b/man/ccc.Rd @@ -95,6 +95,7 @@ Other Regression: \code{\link{rae.numeric}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()} @@ -106,6 +107,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -127,6 +129,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/ckappa.Rd b/man/ckappa.Rd index e8e721b5..0e5ac2e4 100644 --- a/man/ckappa.Rd +++ b/man/ckappa.Rd @@ -152,6 +152,7 @@ Other Classification: \code{\link{baccuracy.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -175,6 +176,7 @@ Other Supervised Learning: \code{\link{ccc.numeric}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -196,6 +198,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/cmatrix.Rd b/man/cmatrix.Rd index aecfd2f1..2638e3d9 100644 --- a/man/cmatrix.Rd +++ b/man/cmatrix.Rd @@ -2,21 +2,27 @@ % Please edit documentation in R/RcppExports.R, R/S3_ConfusionMatrix.R \name{cmatrix.factor} \alias{cmatrix.factor} +\alias{weighted.cmatrix.factor} \alias{cmatrix} +\alias{weighted.cmatrix} \title{Confusion Matrix} \usage{ -\method{cmatrix}{factor}(actual, predicted, w = NULL, ...) +\method{cmatrix}{factor}(actual, predicted, ...) + +\method{weighted.cmatrix}{factor}(actual, predicted, w, ...) cmatrix(...) + +weighted.cmatrix(...) } \arguments{ \item{actual}{A <\link{factor}>-vector of \link{length} \eqn{n}, and \eqn{k} levels.} \item{predicted}{A <\link{factor}>-vector of \link{length} \eqn{n}, and \eqn{k} levels.} -\item{w}{A <\link{numeric}>-vector of \link{length} \eqn{n} (default: \link{NULL}) If passed it will return a weighted confusion matrix.} - \item{...}{Arguments passed into other methods.} + +\item{w}{A <\link{numeric}>-vector of \link{length} \eqn{n} (default: \link{NULL}) If passed it will return a weighted confusion matrix.} } \value{ A named \eqn{k} x \eqn{k} <\link{matrix}> of \link{class} \if{html}{\out{}} @@ -96,7 +102,7 @@ plot( ) # 4.2) weighted matrix -confusion_matrix <- cmatrix( +confusion_matrix <- weighted.cmatrix( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) @@ -120,6 +126,7 @@ Other Classification: \code{\link{baccuracy.factor}()}, \code{\link{ckappa.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -143,6 +150,7 @@ Other Supervised Learning: \code{\link{ccc.numeric}()}, \code{\link{ckappa.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -164,6 +172,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/dor.Rd b/man/dor.Rd index 1640919c..f795e16a 100644 --- a/man/dor.Rd +++ b/man/dor.Rd @@ -8,11 +8,11 @@ \alias{weighted.dor} \title{Compute the \eqn{\text{diagnostic}} \eqn{\text{odds}} \eqn{\text{ratio}}} \usage{ -\method{dor}{factor}(actual, predicted, micro = NULL, ...) +\method{dor}{factor}(actual, predicted, ...) -\method{weighted.dor}{factor}(actual, predicted, w, micro = NULL, ...) +\method{weighted.dor}{factor}(actual, predicted, w, ...) -\method{dor}{cmatrix}(x, micro = NULL, ...) +\method{dor}{cmatrix}(x, ...) dor(...) @@ -23,9 +23,6 @@ weighted.dor(...) \item{predicted}{A vector of <\link{factor}>-vector of \link{length} \eqn{n}, and \eqn{k} levels.} -\item{micro}{A <\link{logical}>-value of \link{length} \eqn{1} (default: \link{NULL}). If \link{TRUE} it returns the -micro average across all \eqn{k} classes, if \link{FALSE} it returns the macro average.} - \item{...}{Arguments passed into other methods} \item{w}{A <\link{numeric}>-vector of \link{length} \eqn{n}. \link{NULL} by default.} @@ -158,6 +155,7 @@ Other Classification: \code{\link{baccuracy.factor}()}, \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -181,6 +179,7 @@ Other Supervised Learning: \code{\link{ccc.numeric}()}, \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -202,6 +201,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/entropy.Rd b/man/entropy.Rd new file mode 100644 index 00000000..3bb5fa6e --- /dev/null +++ b/man/entropy.Rd @@ -0,0 +1,215 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/RcppExports.R, R/S3-CrossEntropyLoss.R +\name{entropy.factor} +\alias{entropy.factor} +\alias{logloss.factor} +\alias{entropy} +\alias{logloss} +\alias{weighted.entropy} +\alias{weighted.logloss} +\title{Compute the \eqn{\text{Cross}} \eqn{\text{Entropy}} \eqn{\text{Loss}}} +\usage{ +\method{entropy}{factor}(actual, response, normalize = TRUE, ...) + +\method{logloss}{factor}(actual, response, normalize = TRUE, ...) + +entropy(...) + +weighted.entropy(...) + +logloss(...) + +weighted.logloss(...) +} +\arguments{ +\item{actual}{A vector of <\link{factor}>- of \link{length} \eqn{n}, and \eqn{k} levels} + +\item{response}{A \eqn{N \times k} <\link{numeric}>-matrix of predicted probabilities. +The \eqn{i}-th row should sum to 1 (i.e., a valid probability distribution +over the \eqn{k} classes). The first column corresponds to the first factor +level in \code{actual}, the second column to the second factor level, and so on.} + +\item{normalize}{A <\link{logical}>-value (default: \link{TRUE}). If \link{TRUE}, +the mean cross-entropy across all observations is returned; otherwise, the +sum of cross-entropies is returned.} + +\item{...}{Arguments passed into other methods} +} +\value{ +A <\link{numeric}>-vector of \link{length} 1 +} +\description{ +The \code{\link[=entropy]{entropy()}} function computes the \strong{Cross-Entropy Loss} — often called \strong{Log Loss} — between observed classes (as a <\link{factor}>) and their predicted probability distributions (a <\link{numeric}> matrix). +The \code{\link[=weighted.entropy]{weighted.entropy()}} function is the weighted version, applying observation-specific weights. +} +\section{Calculation}{ + + +Let \eqn{y_{i,k}} be the one-hot encoding of the actual class label for the \eqn{i}-th observation (that is, \eqn{y_{i,k} = 1} if observation \eqn{i} belongs to class \eqn{k}, and 0 otherwise), and let \eqn{\hat{p}_{i,k}} be the predicted probability of class \eqn{k} for observation \eqn{i}. +The cross-entropy loss \eqn{L} is: + +\deqn{ + L = -\sum_{i=1}^N \sum_{k=1}^K y_{i,k}\,\log(\hat{p}_{i,k}). +} + +If \code{normalize = TRUE}, this sum is divided by \eqn{N} (the number of observations). When weights \eqn{w_i} are supplied, each term is multiplied by \eqn{w_i}, and if \code{normalize = TRUE}, the final sum is divided by \eqn{\sum_i w_i}. +} + +\section{Creating <\link{factor}>}{ + + +Consider a classification problem with three classes: \code{A}, \code{B}, and \code{C}. The actual vector of \code{\link[=factor]{factor()}} values is defined as follows: + +\if{html}{\out{
}}\preformatted{## set seed +set.seed(1903) + +## actual +factor( + x = sample(x = 1:3, size = 10, replace = TRUE), + levels = c(1, 2, 3), + labels = c("A", "B", "C") +) +#> [1] B A B B A C B C C A +#> Levels: A B C +}\if{html}{\out{
}} + +Here, the values 1, 2, and 3 are mapped to \code{A}, \code{B}, and \code{C}, respectively. Now, suppose your model does not predict any \code{B}'s. The predicted vector of \code{\link[=factor]{factor()}} values would be defined as follows: + +\if{html}{\out{
}}\preformatted{## set seed +set.seed(1903) + +## predicted +factor( + x = sample(x = c(1, 3), size = 10, replace = TRUE), + levels = c(1, 2, 3), + labels = c("A", "B", "C") +) +#> [1] C A C C C C C C A C +#> Levels: A B C +}\if{html}{\out{
}} + +In both cases, \eqn{k = 3}, determined indirectly by the \code{levels} argument. +} + +\examples{ +# 1) Recode the iris data set to a binary classification problem +# Here, the positive class ("Virginica") is coded as 1, +# and the rest ("Others") is coded as 0. +iris$species_num <- as.numeric(iris$Species == "virginica") + +# 2) Fit a logistic regression model predicting species_num from Sepal.Length & Sepal.Width +model <- glm( + formula = species_num ~ Sepal.Length + Sepal.Width, + data = iris, + family = binomial(link = "logit") +) + +# 3) Generate predicted classes: "Virginica" vs. "Others" +predicted <- factor( + as.numeric(predict(model, type = "response") > 0.5), + levels = c(1, 0), + labels = c("Virginica", "Others") +) + +# 3.1) Generate actual classes +actual <- factor( + x = iris$species_num, + levels = c(1, 0), + labels = c("Virginica", "Others") +) + +# For cross-entropy, we need predicted probabilities for each class. +# Since it's a binary model, we create a 2-column matrix: +# 1st column = P("Virginica") +# 2nd column = P("Others") = 1 - P("Virginica") +predicted_probs <- predict(model, type = "response") +response_matrix <- cbind(predicted_probs, 1 - predicted_probs) + +# 4) Evaluate unweighted cross-entropy +# 'entropy' takes (actual, response_matrix, normalize=TRUE/FALSE). +# The factor 'actual' must have the positive class (Virginica) as its first level. +unweighted_CrossEntropy <- entropy( + actual = actual, # factor + response = response_matrix, # numeric matrix of probabilities + normalize = TRUE # normalize = TRUE +) + +# 5) Evaluate weighted cross-entropy +# We introduce a weight vector, for example: +weights <- iris$Petal.Length / mean(iris$Petal.Length) +weighted_CrossEntropy <- weighted.entropy( + actual = actual, + response = response_matrix, + w = weights, + normalize = TRUE +) + +# 6) Print Results +cat( + "Unweighted Cross-Entropy:", unweighted_CrossEntropy, + "Weighted Cross-Entropy:", weighted_CrossEntropy, + sep = "\n" +) +} +\seealso{ +Other Classification: +\code{\link{ROC.factor}()}, +\code{\link{accuracy.factor}()}, +\code{\link{baccuracy.factor}()}, +\code{\link{ckappa.factor}()}, +\code{\link{cmatrix.factor}()}, +\code{\link{dor.factor}()}, +\code{\link{fbeta.factor}()}, +\code{\link{fdr.factor}()}, +\code{\link{fer.factor}()}, +\code{\link{fmi.factor}()}, +\code{\link{fpr.factor}()}, +\code{\link{jaccard.factor}()}, +\code{\link{mcc.factor}()}, +\code{\link{nlr.factor}()}, +\code{\link{npv.factor}()}, +\code{\link{plr.factor}()}, +\code{\link{prROC.factor}()}, +\code{\link{precision.factor}()}, +\code{\link{recall.factor}()}, +\code{\link{specificity.factor}()}, +\code{\link{zerooneloss.factor}()} + +Other Supervised Learning: +\code{\link{ROC.factor}()}, +\code{\link{accuracy.factor}()}, +\code{\link{baccuracy.factor}()}, +\code{\link{ccc.numeric}()}, +\code{\link{ckappa.factor}()}, +\code{\link{cmatrix.factor}()}, +\code{\link{dor.factor}()}, +\code{\link{fbeta.factor}()}, +\code{\link{fdr.factor}()}, +\code{\link{fer.factor}()}, +\code{\link{fpr.factor}()}, +\code{\link{huberloss.numeric}()}, +\code{\link{jaccard.factor}()}, +\code{\link{mae.numeric}()}, +\code{\link{mape.numeric}()}, +\code{\link{mcc.factor}()}, +\code{\link{mpe.numeric}()}, +\code{\link{mse.numeric}()}, +\code{\link{nlr.factor}()}, +\code{\link{npv.factor}()}, +\code{\link{pinball.numeric}()}, +\code{\link{plr.factor}()}, +\code{\link{prROC.factor}()}, +\code{\link{precision.factor}()}, +\code{\link{rae.numeric}()}, +\code{\link{recall.factor}()}, +\code{\link{rmse.numeric}()}, +\code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, +\code{\link{rrse.numeric}()}, +\code{\link{rsq.numeric}()}, +\code{\link{smape.numeric}()}, +\code{\link{specificity.factor}()}, +\code{\link{zerooneloss.factor}()} +} +\concept{Classification} +\concept{Supervised Learning} diff --git a/man/examples/scr_ConfusionMatrix.R b/man/examples/scr_ConfusionMatrix.R index aeea8125..104686ba 100644 --- a/man/examples/scr_ConfusionMatrix.R +++ b/man/examples/scr_ConfusionMatrix.R @@ -54,7 +54,7 @@ plot( ) # 4.2) weighted matrix -confusion_matrix <- cmatrix( +confusion_matrix <- weighted.cmatrix( actual = actual, predicted = predicted, w = iris$Petal.Length/mean(iris$Petal.Length) diff --git a/man/examples/scr_CrossEntropyLoss.R b/man/examples/scr_CrossEntropyLoss.R new file mode 100644 index 00000000..f5c38b0e --- /dev/null +++ b/man/examples/scr_CrossEntropyLoss.R @@ -0,0 +1,58 @@ +# 1) Recode the iris data set to a binary classification problem +# Here, the positive class ("Virginica") is coded as 1, +# and the rest ("Others") is coded as 0. +iris$species_num <- as.numeric(iris$Species == "virginica") + +# 2) Fit a logistic regression model predicting species_num from Sepal.Length & Sepal.Width +model <- glm( + formula = species_num ~ Sepal.Length + Sepal.Width, + data = iris, + family = binomial(link = "logit") +) + +# 3) Generate predicted classes: "Virginica" vs. "Others" +predicted <- factor( + as.numeric(predict(model, type = "response") > 0.5), + levels = c(1, 0), + labels = c("Virginica", "Others") +) + +# 3.1) Generate actual classes +actual <- factor( + x = iris$species_num, + levels = c(1, 0), + labels = c("Virginica", "Others") +) + +# For cross-entropy, we need predicted probabilities for each class. +# Since it's a binary model, we create a 2-column matrix: +# 1st column = P("Virginica") +# 2nd column = P("Others") = 1 - P("Virginica") +predicted_probs <- predict(model, type = "response") +response_matrix <- cbind(predicted_probs, 1 - predicted_probs) + +# 4) Evaluate unweighted cross-entropy +# 'entropy' takes (actual, response_matrix, normalize=TRUE/FALSE). +# The factor 'actual' must have the positive class (Virginica) as its first level. +unweighted_CrossEntropy <- entropy( + actual = actual, # factor + response = response_matrix, # numeric matrix of probabilities + normalize = TRUE # normalize = TRUE +) + +# 5) Evaluate weighted cross-entropy +# We introduce a weight vector, for example: +weights <- iris$Petal.Length / mean(iris$Petal.Length) +weighted_CrossEntropy <- weighted.entropy( + actual = actual, + response = response_matrix, + w = weights, + normalize = TRUE +) + +# 6) Print Results +cat( + "Unweighted Cross-Entropy:", unweighted_CrossEntropy, + "Weighted Cross-Entropy:", weighted_CrossEntropy, + sep = "\n" +) diff --git a/man/examples/scr_RelativeRootMeanSquaredError.R b/man/examples/scr_RelativeRootMeanSquaredError.R new file mode 100644 index 00000000..83526072 --- /dev/null +++ b/man/examples/scr_RelativeRootMeanSquaredError.R @@ -0,0 +1,30 @@ +# 1) fit a linear +# regression +model <- lm( + mpg ~ ., + data = mtcars +) + +# 1.1) define actual +# and predicted values +# to measure performance +actual <- mtcars$mpg +predicted <- fitted(model) + +# 2) evaluate in-sample model +# performance using Relative Root Mean Squared Error (RRMSE) +cat( + "IQR Relative Root Mean Squared Error", rrmse( + actual = actual, + predicted = predicted, + normalization = 2 + ), + "IQR Relative Root Mean Squared Error (weighted)", weighted.rrmse( + actual = actual, + predicted = predicted, + w = mtcars$mpg/mean(mtcars$mpg), + normalization = 2 + ), + sep = "\n" +) + diff --git a/man/examples/scr_RelativeRootSquaredError.R b/man/examples/scr_RootRelativeSquaredError.R similarity index 100% rename from man/examples/scr_RelativeRootSquaredError.R rename to man/examples/scr_RootRelativeSquaredError.R diff --git a/man/fbeta.Rd b/man/fbeta.Rd index fcf7a38a..6741ab5a 100644 --- a/man/fbeta.Rd +++ b/man/fbeta.Rd @@ -177,6 +177,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, \code{\link{fmi.factor}()}, @@ -200,6 +201,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, \code{\link{fpr.factor}()}, @@ -220,6 +222,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/fdr.Rd b/man/fdr.Rd index 8f9ee12a..9029da38 100644 --- a/man/fdr.Rd +++ b/man/fdr.Rd @@ -172,6 +172,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fer.factor}()}, \code{\link{fmi.factor}()}, @@ -195,6 +196,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fer.factor}()}, \code{\link{fpr.factor}()}, @@ -215,6 +217,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/fer.Rd b/man/fer.Rd index 0b37c7a9..4ce1f0dd 100644 --- a/man/fer.Rd +++ b/man/fer.Rd @@ -171,6 +171,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fmi.factor}()}, @@ -194,6 +195,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fpr.factor}()}, @@ -214,6 +216,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/figures/README-ROC-1.png b/man/figures/README-ROC-1.png index 11a3ae08..41564a89 100644 Binary files a/man/figures/README-ROC-1.png and b/man/figures/README-ROC-1.png differ diff --git a/man/figures/README-performance-1.png b/man/figures/README-performance-1.png new file mode 100644 index 00000000..8729a92a Binary files /dev/null and b/man/figures/README-performance-1.png differ diff --git a/man/figures/README-performance-2.png b/man/figures/README-performance-2.png new file mode 100644 index 00000000..75f4ede3 Binary files /dev/null and b/man/figures/README-performance-2.png differ diff --git a/man/figures/README-performance-classification-1.png b/man/figures/README-performance-classification-1.png index c837ceea..fa407ffd 100644 Binary files a/man/figures/README-performance-classification-1.png and b/man/figures/README-performance-classification-1.png differ diff --git a/man/figures/README-performance-regression-1.png b/man/figures/README-performance-regression-1.png index dc3449b0..75f4ede3 100644 Binary files a/man/figures/README-performance-regression-1.png and b/man/figures/README-performance-regression-1.png differ diff --git a/man/fmi.Rd b/man/fmi.Rd index f582c138..7b59a67e 100644 --- a/man/fmi.Rd +++ b/man/fmi.Rd @@ -130,6 +130,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, diff --git a/man/fpr.Rd b/man/fpr.Rd index f722f310..4633f76d 100644 --- a/man/fpr.Rd +++ b/man/fpr.Rd @@ -186,6 +186,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -209,6 +210,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -229,6 +231,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/huberloss.Rd b/man/huberloss.Rd index ae098f15..de6b0920 100644 --- a/man/huberloss.Rd +++ b/man/huberloss.Rd @@ -101,6 +101,7 @@ Other Regression: \code{\link{rae.numeric}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()} @@ -113,6 +114,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -133,6 +135,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/jaccard.Rd b/man/jaccard.Rd index 5b94a0dd..0a4d2f02 100644 --- a/man/jaccard.Rd +++ b/man/jaccard.Rd @@ -201,6 +201,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -224,6 +225,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -244,6 +246,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/mae.Rd b/man/mae.Rd index b271991f..65479e22 100644 --- a/man/mae.Rd +++ b/man/mae.Rd @@ -81,6 +81,7 @@ Other Regression: \code{\link{rae.numeric}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()} @@ -93,6 +94,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -113,6 +115,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/mape.Rd b/man/mape.Rd index d6a2ebab..b1101999 100644 --- a/man/mape.Rd +++ b/man/mape.Rd @@ -82,6 +82,7 @@ Other Regression: \code{\link{rae.numeric}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()} @@ -94,6 +95,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -114,6 +116,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/mcc.Rd b/man/mcc.Rd index 523b7040..a600c3fb 100644 --- a/man/mcc.Rd +++ b/man/mcc.Rd @@ -158,6 +158,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -181,6 +182,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -201,6 +203,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/mpe.Rd b/man/mpe.Rd index 88568edd..a281a3b2 100644 --- a/man/mpe.Rd +++ b/man/mpe.Rd @@ -83,6 +83,7 @@ Other Regression: \code{\link{rae.numeric}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()} @@ -95,6 +96,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -115,6 +117,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/mse.Rd b/man/mse.Rd index 82a9018c..e20a2a03 100644 --- a/man/mse.Rd +++ b/man/mse.Rd @@ -83,6 +83,7 @@ Other Regression: \code{\link{rae.numeric}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()} @@ -95,6 +96,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -115,6 +117,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/nlr.Rd b/man/nlr.Rd index 37a355e5..ed2aa7e2 100644 --- a/man/nlr.Rd +++ b/man/nlr.Rd @@ -8,11 +8,11 @@ \alias{weighted.nlr} \title{Compute the \eqn{\text{negative}} \eqn{\text{likelihood}} \eqn{\text{ratio}}} \usage{ -\method{nlr}{factor}(actual, predicted, micro = NULL, ...) +\method{nlr}{factor}(actual, predicted, ...) -\method{weighted.nlr}{factor}(actual, predicted, w, micro = NULL, ...) +\method{weighted.nlr}{factor}(actual, predicted, w, ...) -\method{nlr}{cmatrix}(x, micro = NULL, ...) +\method{nlr}{cmatrix}(x, ...) nlr(...) @@ -23,9 +23,6 @@ weighted.nlr(...) \item{predicted}{A vector of <\link{factor}>-vector of \link{length} \eqn{n}, and \eqn{k} levels.} -\item{micro}{A <\link{logical}>-value of \link{length} \eqn{1} (default: \link{NULL}). If \link{TRUE} it returns the -micro average across all \eqn{k} classes, if \link{FALSE} it returns the macro average.} - \item{...}{Arguments passed into other methods} \item{w}{A <\link{numeric}>-vector of \link{length} \eqn{n}. \link{NULL} by default.} @@ -150,6 +147,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -173,6 +171,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -193,6 +192,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/npv.Rd b/man/npv.Rd index e3fa5acf..b0cc48be 100644 --- a/man/npv.Rd +++ b/man/npv.Rd @@ -171,6 +171,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -194,6 +195,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -214,6 +216,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/pinball.Rd b/man/pinball.Rd index fa841843..3e8091dd 100644 --- a/man/pinball.Rd +++ b/man/pinball.Rd @@ -85,6 +85,7 @@ Other Regression: \code{\link{rae.numeric}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()} @@ -97,6 +98,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -117,6 +119,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/plr.Rd b/man/plr.Rd index 3b97b580..ab2957b0 100644 --- a/man/plr.Rd +++ b/man/plr.Rd @@ -8,11 +8,11 @@ \alias{weighted.plr} \title{Compute the \eqn{\text{positive}} \eqn{\text{likelihood}} \eqn{\text{ratio}}} \usage{ -\method{plr}{factor}(actual, predicted, micro = NULL, ...) +\method{plr}{factor}(actual, predicted, ...) -\method{weighted.plr}{factor}(actual, predicted, w, micro = NULL, ...) +\method{weighted.plr}{factor}(actual, predicted, w, ...) -\method{plr}{cmatrix}(x, micro = NULL, ...) +\method{plr}{cmatrix}(x, ...) plr(...) @@ -23,9 +23,6 @@ weighted.plr(...) \item{predicted}{A vector of <\link{factor}>-vector of \link{length} \eqn{n}, and \eqn{k} levels.} -\item{micro}{A <\link{logical}>-value of \link{length} \eqn{1} (default: \link{NULL}). If \link{TRUE} it returns the -micro average across all \eqn{k} classes, if \link{FALSE} it returns the macro average.} - \item{...}{Arguments passed into other methods} \item{w}{A <\link{numeric}>-vector of \link{length} \eqn{n}. \link{NULL} by default.} @@ -156,6 +153,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -179,6 +177,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -199,6 +198,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/prROC.Rd b/man/prROC.Rd index fb5be015..4d72a908 100644 --- a/man/prROC.Rd +++ b/man/prROC.Rd @@ -2,34 +2,34 @@ % Please edit documentation in R/RcppExports.R, R/S3_PrecisionRecallCurve.R \name{prROC.factor} \alias{prROC.factor} +\alias{weighted.prROC.factor} \alias{prROC} +\alias{weighted.prROC} \title{Compute the \eqn{\text{reciever}} \eqn{\text{operator}} \eqn{\text{characteristics}}} \usage{ -\method{prROC}{factor}(actual, response, micro = NULL, thresholds = NULL, na.rm = TRUE, ...) +\method{prROC}{factor}(actual, response, thresholds = NULL, ...) + +\method{weighted.prROC}{factor}(actual, response, w, thresholds = NULL, ...) prROC(...) + +weighted.prROC(...) } \arguments{ \item{actual}{A vector of <\link{factor}>- of \link{length} \eqn{n}, and \eqn{k} levels.} \item{response}{A <\link{numeric}>-vector of \link{length} \eqn{n}. The estimated response probabilities.} -\item{micro}{A <\link{logical}>-value of \link{length} \eqn{1} (default: \link{NULL}). If \link{TRUE} it returns the -micro average across all \eqn{k} classes, if \link{FALSE} it returns the macro average.} - \item{thresholds}{An optional <\link{numeric}>-vector of non-zero \link{length} (default: \link{NULL}).} -\item{na.rm}{A <\link{logical}> value of \link{length} \eqn{1} (default: \link{TRUE}). If \link{TRUE}, \link{NA} values are removed from the computation. -This argument is only relevant when \code{micro != NULL}. -When \code{na.rm = TRUE}, the computation corresponds to \code{sum(c(1, 2, NA), na.rm = TRUE) / length(na.omit(c(1, 2, NA)))}. -When \code{na.rm = FALSE}, the computation corresponds to \code{sum(c(1, 2, NA), na.rm = TRUE) / length(c(1, 2, NA))}.} - \item{...}{Arguments passed into other methods.} + +\item{w}{A <\link{numeric}>-vector of \link{length} \eqn{n}. \link{NULL} by default.} } \value{ A \link{data.frame} on the following form, -\item{thresholds}{<\link{numeric}> Thresholds used to determine \code{\link[=recall]{recall()}} and \code{\link[=precision]{precision()}}} +\item{threshold}{<\link{numeric}> Thresholds used to determine \code{\link[=recall]{recall()}} and \code{\link[=precision]{precision()}}} \item{level}{<\link{character}> The level of the actual <\link{factor}>} \item{label}{<\link{character}> The levels of the actual <\link{factor}>} \item{recall}{<\link{numeric}> The recall} @@ -153,6 +153,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -176,6 +177,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -196,6 +198,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/precision.Rd b/man/precision.Rd index e777ac75..9317bef3 100644 --- a/man/precision.Rd +++ b/man/precision.Rd @@ -186,6 +186,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -209,6 +210,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -229,6 +231,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/rae.Rd b/man/rae.Rd index bd491090..9b3486c6 100644 --- a/man/rae.Rd +++ b/man/rae.Rd @@ -84,6 +84,7 @@ Other Regression: \code{\link{pinball.numeric}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()} @@ -96,6 +97,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -116,6 +118,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/recall.Rd b/man/recall.Rd index 8bfb7a20..a40f4dc1 100644 --- a/man/recall.Rd +++ b/man/recall.Rd @@ -201,6 +201,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -224,6 +225,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -244,6 +246,7 @@ Other Supervised Learning: \code{\link{rae.numeric}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/rmse.Rd b/man/rmse.Rd index 29ac5d74..879c8fa3 100644 --- a/man/rmse.Rd +++ b/man/rmse.Rd @@ -84,6 +84,7 @@ Other Regression: \code{\link{pinball.numeric}()}, \code{\link{rae.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()} @@ -96,6 +97,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -116,6 +118,7 @@ Other Supervised Learning: \code{\link{rae.numeric}()}, \code{\link{recall.factor}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/rmsle.Rd b/man/rmsle.Rd index d829463d..2a549b2b 100644 --- a/man/rmsle.Rd +++ b/man/rmsle.Rd @@ -84,6 +84,7 @@ Other Regression: \code{\link{pinball.numeric}()}, \code{\link{rae.numeric}()}, \code{\link{rmse.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()} @@ -96,6 +97,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -116,6 +118,7 @@ Other Supervised Learning: \code{\link{rae.numeric}()}, \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/rrmse.Rd b/man/rrmse.Rd new file mode 100644 index 00000000..ca6fe3de --- /dev/null +++ b/man/rrmse.Rd @@ -0,0 +1,134 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/RcppExports.R, +% R/S3_RelativeRootMeanSquaredError.R +\name{rrmse.numeric} +\alias{rrmse.numeric} +\alias{weighted.rrmse.numeric} +\alias{rrmse} +\alias{weighted.rrmse} +\title{Compute the \eqn{\text{relative}} \eqn{\text{root}} \eqn{\text{mean}} \eqn{\text{squared}} \eqn{\text{error}}} +\usage{ +\method{rrmse}{numeric}(actual, predicted, normalization = 1L, ...) + +\method{weighted.rrmse}{numeric}(actual, predicted, w, normalization = 1L, ...) + +rrmse(...) + +weighted.rrmse(...) +} +\arguments{ +\item{actual}{A <\link{numeric}>-vector of \link{length} \eqn{n}. The observed (continuous) response variable.} + +\item{predicted}{A <\link{numeric}>-vector of \link{length} \eqn{n}. The estimated (continuous) response variable.} + +\item{normalization}{A <\link{numeric}>-value of \link{length} \eqn{1} (default: \eqn{1}). \eqn{0}: \link{mean}-normalization, \eqn{1}: \link{range}-normalization, \eqn{2}: \link{IQR}-normalization.} + +\item{...}{Arguments passed into other methods.} + +\item{w}{A <\link{numeric}>-vector of \link{length} \eqn{n}. The weight assigned to each observation in the data.} +} +\value{ +A <\link{numeric}> vector of \link{length} 1. +} +\description{ +The \code{\link[=rrmse]{rrmse()}}-function computes the \href{https://en.wikipedia.org/wiki/Root-mean-square_deviation}{Relative Root Mean Squared Error} between +the observed and predicted <\link{numeric}> vectors. The \code{\link[=weighted.rrmse]{weighted.rrmse()}} function computes the weighted Relative Root Mean Squared Error. +} +\section{Calculation}{ + + +The metric is calculated as, + +\deqn{ + \frac{RMSE}{\gamma} +} + +Where \eqn{\gamma} is the normalization factor. +} + +\examples{ +# 1) fit a linear +# regression +model <- lm( + mpg ~ ., + data = mtcars +) + +# 1.1) define actual +# and predicted values +# to measure performance +actual <- mtcars$mpg +predicted <- fitted(model) + +# 2) evaluate in-sample model +# performance using Relative Root Mean Squared Error (RRMSE) +cat( + "IQR Relative Root Mean Squared Error", rrmse( + actual = actual, + predicted = predicted, + normalization = 2 + ), + "IQR Relative Root Mean Squared Error (weighted)", weighted.rrmse( + actual = actual, + predicted = predicted, + w = mtcars$mpg/mean(mtcars$mpg), + normalization = 2 + ), + sep = "\n" +) + +} +\seealso{ +Other Regression: +\code{\link{ccc.numeric}()}, +\code{\link{huberloss.numeric}()}, +\code{\link{mae.numeric}()}, +\code{\link{mape.numeric}()}, +\code{\link{mpe.numeric}()}, +\code{\link{mse.numeric}()}, +\code{\link{pinball.numeric}()}, +\code{\link{rae.numeric}()}, +\code{\link{rmse.numeric}()}, +\code{\link{rmsle.numeric}()}, +\code{\link{rrse.numeric}()}, +\code{\link{rsq.numeric}()}, +\code{\link{smape.numeric}()} + +Other Supervised Learning: +\code{\link{ROC.factor}()}, +\code{\link{accuracy.factor}()}, +\code{\link{baccuracy.factor}()}, +\code{\link{ccc.numeric}()}, +\code{\link{ckappa.factor}()}, +\code{\link{cmatrix.factor}()}, +\code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, +\code{\link{fbeta.factor}()}, +\code{\link{fdr.factor}()}, +\code{\link{fer.factor}()}, +\code{\link{fpr.factor}()}, +\code{\link{huberloss.numeric}()}, +\code{\link{jaccard.factor}()}, +\code{\link{mae.numeric}()}, +\code{\link{mape.numeric}()}, +\code{\link{mcc.factor}()}, +\code{\link{mpe.numeric}()}, +\code{\link{mse.numeric}()}, +\code{\link{nlr.factor}()}, +\code{\link{npv.factor}()}, +\code{\link{pinball.numeric}()}, +\code{\link{plr.factor}()}, +\code{\link{prROC.factor}()}, +\code{\link{precision.factor}()}, +\code{\link{rae.numeric}()}, +\code{\link{recall.factor}()}, +\code{\link{rmse.numeric}()}, +\code{\link{rmsle.numeric}()}, +\code{\link{rrse.numeric}()}, +\code{\link{rsq.numeric}()}, +\code{\link{smape.numeric}()}, +\code{\link{specificity.factor}()}, +\code{\link{zerooneloss.factor}()} +} +\concept{Regression} +\concept{Supervised Learning} diff --git a/man/rrse.Rd b/man/rrse.Rd index 632e302a..b6b5f400 100644 --- a/man/rrse.Rd +++ b/man/rrse.Rd @@ -58,28 +58,19 @@ model <- lm( actual <- mtcars$mpg predicted <- fitted(model) - -# 2) calculate the metric -# with delta 0.5 -huberloss( - actual = actual, - predicted = predicted, - delta = 0.5 -) - -# 3) caclulate weighted -# metric using arbitrary weights -w <- rbeta( - n = 1e3, - shape1 = 10, - shape2 = 2 -) - -huberloss( - actual = actual, - predicted = predicted, - delta = 0.5, - w = w +# 2) evaluate in-sample model +# performance using Relative Root Squared Errror (RRSE) +cat( + "Relative Root Squared Errror", rrse( + actual = actual, + predicted = predicted, + ), + "Relative Root Squared Errror (weighted)", weighted.rrse( + actual = actual, + predicted = predicted, + w = mtcars$mpg/mean(mtcars$mpg) + ), + sep = "\n" ) } \seealso{ @@ -94,6 +85,7 @@ Other Regression: \code{\link{rae.numeric}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()} @@ -105,6 +97,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -126,6 +119,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, \code{\link{specificity.factor}()}, diff --git a/man/rsq.Rd b/man/rsq.Rd index d9b81f3c..2ed536a1 100644 --- a/man/rsq.Rd +++ b/man/rsq.Rd @@ -89,6 +89,7 @@ Other Regression: \code{\link{rae.numeric}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{smape.numeric}()} @@ -100,6 +101,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -121,6 +123,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{smape.numeric}()}, \code{\link{specificity.factor}()}, diff --git a/man/smape.Rd b/man/smape.Rd index 357c359f..d4da7795 100644 --- a/man/smape.Rd +++ b/man/smape.Rd @@ -85,6 +85,7 @@ Other Regression: \code{\link{rae.numeric}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()} @@ -96,6 +97,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -117,6 +119,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{specificity.factor}()}, diff --git a/man/specificity.Rd b/man/specificity.Rd index a5158d8b..024d9967 100644 --- a/man/specificity.Rd +++ b/man/specificity.Rd @@ -202,6 +202,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -225,6 +226,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -246,6 +248,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/man/zerooneloss.Rd b/man/zerooneloss.Rd index 312cdc80..31d13ac3 100644 --- a/man/zerooneloss.Rd +++ b/man/zerooneloss.Rd @@ -143,6 +143,7 @@ Other Classification: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -166,6 +167,7 @@ Other Supervised Learning: \code{\link{ckappa.factor}()}, \code{\link{cmatrix.factor}()}, \code{\link{dor.factor}()}, +\code{\link{entropy.factor}()}, \code{\link{fbeta.factor}()}, \code{\link{fdr.factor}()}, \code{\link{fer.factor}()}, @@ -187,6 +189,7 @@ Other Supervised Learning: \code{\link{recall.factor}()}, \code{\link{rmse.numeric}()}, \code{\link{rmsle.numeric}()}, +\code{\link{rrmse.numeric}()}, \code{\link{rrse.numeric}()}, \code{\link{rsq.numeric}()}, \code{\link{smape.numeric}()}, diff --git a/src/RcppExports.cpp b/src/RcppExports.cpp index 0baad8d4..7925bbb0 100644 --- a/src/RcppExports.cpp +++ b/src/RcppExports.cpp @@ -11,732 +11,789 @@ Rcpp::Rostream& Rcpp::Rcout = Rcpp::Rcpp_cout_get(); Rcpp::Rostream& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get(); #endif -// accuracy -NumericVector accuracy(const IntegerVector& actual, const IntegerVector& predicted); -RcppExport SEXP _SLmetrics_accuracy(SEXP actualSEXP, SEXP predictedSEXP) { +// Accuracy +Rcpp::NumericVector Accuracy(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted); +RcppExport SEXP _SLmetrics_Accuracy(SEXP actualSEXP, SEXP predictedSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - rcpp_result_gen = Rcpp::wrap(accuracy(actual, predicted)); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + rcpp_result_gen = Rcpp::wrap(Accuracy(actual, predicted)); return rcpp_result_gen; END_RCPP } -// weighted_accuracy -NumericVector weighted_accuracy(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w); -RcppExport SEXP _SLmetrics_weighted_accuracy(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP) { +// weighted_Accuracy +Rcpp::NumericVector weighted_Accuracy(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w); +RcppExport SEXP _SLmetrics_weighted_Accuracy(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_accuracy(actual, predicted, w)); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + rcpp_result_gen = Rcpp::wrap(weighted_Accuracy(actual, predicted, w)); return rcpp_result_gen; END_RCPP } -// accuracy_cmatrix -NumericVector accuracy_cmatrix(const NumericMatrix& x); -RcppExport SEXP _SLmetrics_accuracy_cmatrix(SEXP xSEXP) { +// cmatrix_Accuracy +Rcpp::NumericVector cmatrix_Accuracy(const Rcpp::NumericMatrix& x); +RcppExport SEXP _SLmetrics_cmatrix_Accuracy(SEXP xSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); - rcpp_result_gen = Rcpp::wrap(accuracy_cmatrix(x)); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); + rcpp_result_gen = Rcpp::wrap(cmatrix_Accuracy(x)); return rcpp_result_gen; END_RCPP } -// baccuracy -NumericVector baccuracy(const IntegerVector& actual, const IntegerVector& predicted, const bool& adjust, bool na_rm); -RcppExport SEXP _SLmetrics_baccuracy(SEXP actualSEXP, SEXP predictedSEXP, SEXP adjustSEXP, SEXP na_rmSEXP) { +// BalancedAccuracy +Rcpp::NumericVector BalancedAccuracy(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const bool& adjust, bool na_rm); +RcppExport SEXP _SLmetrics_BalancedAccuracy(SEXP actualSEXP, SEXP predictedSEXP, SEXP adjustSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); Rcpp::traits::input_parameter< const bool& >::type adjust(adjustSEXP); Rcpp::traits::input_parameter< bool >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(baccuracy(actual, predicted, adjust, na_rm)); + rcpp_result_gen = Rcpp::wrap(BalancedAccuracy(actual, predicted, adjust, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_baccuracy -NumericVector weighted_baccuracy(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, const bool& adjust, bool na_rm); -RcppExport SEXP _SLmetrics_weighted_baccuracy(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP adjustSEXP, SEXP na_rmSEXP) { +// weighted_BalancedAccuracy +Rcpp::NumericVector weighted_BalancedAccuracy(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, const bool& adjust, bool na_rm); +RcppExport SEXP _SLmetrics_weighted_BalancedAccuracy(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP adjustSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); Rcpp::traits::input_parameter< const bool& >::type adjust(adjustSEXP); Rcpp::traits::input_parameter< bool >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_baccuracy(actual, predicted, w, adjust, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_BalancedAccuracy(actual, predicted, w, adjust, na_rm)); return rcpp_result_gen; END_RCPP } -// baccuracy_cmatrix -NumericVector baccuracy_cmatrix(const NumericMatrix& x, const bool& adjust, bool na_rm); -RcppExport SEXP _SLmetrics_baccuracy_cmatrix(SEXP xSEXP, SEXP adjustSEXP, SEXP na_rmSEXP) { +// cmatrix_BalancedAccuracy +Rcpp::NumericVector cmatrix_BalancedAccuracy(const NumericMatrix& x, const bool& adjust, bool na_rm); +RcppExport SEXP _SLmetrics_cmatrix_BalancedAccuracy(SEXP xSEXP, SEXP adjustSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); Rcpp::traits::input_parameter< const bool& >::type adjust(adjustSEXP); Rcpp::traits::input_parameter< bool >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(baccuracy_cmatrix(x, adjust, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_BalancedAccuracy(x, adjust, na_rm)); return rcpp_result_gen; END_RCPP } -// ckappa -NumericVector ckappa(const IntegerVector& actual, const IntegerVector& predicted, const double& beta); -RcppExport SEXP _SLmetrics_ckappa(SEXP actualSEXP, SEXP predictedSEXP, SEXP betaSEXP) { +// CohensKappa +Rcpp::NumericVector CohensKappa(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const double& beta); +RcppExport SEXP _SLmetrics_CohensKappa(SEXP actualSEXP, SEXP predictedSEXP, SEXP betaSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); Rcpp::traits::input_parameter< const double& >::type beta(betaSEXP); - rcpp_result_gen = Rcpp::wrap(ckappa(actual, predicted, beta)); + rcpp_result_gen = Rcpp::wrap(CohensKappa(actual, predicted, beta)); return rcpp_result_gen; END_RCPP } -// weighted_ckappa -NumericVector weighted_ckappa(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, const double& beta); -RcppExport SEXP _SLmetrics_weighted_ckappa(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP betaSEXP) { +// weighted_CohensKappa +Rcpp::NumericVector weighted_CohensKappa(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, const double& beta); +RcppExport SEXP _SLmetrics_weighted_CohensKappa(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP betaSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); Rcpp::traits::input_parameter< const double& >::type beta(betaSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_ckappa(actual, predicted, w, beta)); + rcpp_result_gen = Rcpp::wrap(weighted_CohensKappa(actual, predicted, w, beta)); return rcpp_result_gen; END_RCPP } -// ckappa_cmatrix -NumericVector ckappa_cmatrix(const NumericMatrix& x, const double& beta); -RcppExport SEXP _SLmetrics_ckappa_cmatrix(SEXP xSEXP, SEXP betaSEXP) { +// cmatrix_CohensKappa +Rcpp::NumericVector cmatrix_CohensKappa(const Rcpp::NumericMatrix& x, const double& beta); +RcppExport SEXP _SLmetrics_cmatrix_CohensKappa(SEXP xSEXP, SEXP betaSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); Rcpp::traits::input_parameter< const double& >::type beta(betaSEXP); - rcpp_result_gen = Rcpp::wrap(ckappa_cmatrix(x, beta)); + rcpp_result_gen = Rcpp::wrap(cmatrix_CohensKappa(x, beta)); return rcpp_result_gen; END_RCPP } -// cmatrix -Rcpp::NumericMatrix cmatrix(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::Nullable& w); -RcppExport SEXP _SLmetrics_cmatrix(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP) { +// UnweightedConfusionMatrix +Rcpp::NumericMatrix UnweightedConfusionMatrix(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted); +RcppExport SEXP _SLmetrics_UnweightedConfusionMatrix(SEXP actualSEXP, SEXP predictedSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const Rcpp::Nullable& >::type w(wSEXP); - rcpp_result_gen = Rcpp::wrap(cmatrix(actual, predicted, w)); + rcpp_result_gen = Rcpp::wrap(UnweightedConfusionMatrix(actual, predicted)); return rcpp_result_gen; END_RCPP } -// dor -NumericVector dor(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro); -RcppExport SEXP _SLmetrics_dor(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP) { +// WeightedConfusionMatrix +Rcpp::NumericMatrix WeightedConfusionMatrix(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w); +RcppExport SEXP _SLmetrics_WeightedConfusionMatrix(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::RNGScope rcpp_rngScope_gen; + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + rcpp_result_gen = Rcpp::wrap(WeightedConfusionMatrix(actual, predicted, w)); + return rcpp_result_gen; +END_RCPP +} +// CrossEntropy +double CrossEntropy(const IntegerVector& actual, const NumericMatrix& response, const bool normalize); +RcppExport SEXP _SLmetrics_CrossEntropy(SEXP actualSEXP, SEXP responseSEXP, SEXP normalizeSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); - rcpp_result_gen = Rcpp::wrap(dor(actual, predicted, micro)); + Rcpp::traits::input_parameter< const NumericMatrix& >::type response(responseSEXP); + Rcpp::traits::input_parameter< const bool >::type normalize(normalizeSEXP); + rcpp_result_gen = Rcpp::wrap(CrossEntropy(actual, response, normalize)); return rcpp_result_gen; END_RCPP } -// weighted_dor -NumericVector weighted_dor(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro); -RcppExport SEXP _SLmetrics_weighted_dor(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP) { +// weighted_CrossEntropy +double weighted_CrossEntropy(const IntegerVector& actual, const NumericMatrix& response, const NumericVector& w, const bool normalize); +RcppExport SEXP _SLmetrics_weighted_CrossEntropy(SEXP actualSEXP, SEXP responseSEXP, SEXP wSEXP, SEXP normalizeSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const NumericMatrix& >::type response(responseSEXP); Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_dor(actual, predicted, w, micro)); + Rcpp::traits::input_parameter< const bool >::type normalize(normalizeSEXP); + rcpp_result_gen = Rcpp::wrap(weighted_CrossEntropy(actual, response, w, normalize)); return rcpp_result_gen; END_RCPP } -// dor_cmatrix -NumericVector dor_cmatrix(const NumericMatrix& x, Nullable micro); -RcppExport SEXP _SLmetrics_dor_cmatrix(SEXP xSEXP, SEXP microSEXP) { +// LogLoss +double LogLoss(const IntegerVector& actual, const NumericMatrix& response, const bool normalize); +RcppExport SEXP _SLmetrics_LogLoss(SEXP actualSEXP, SEXP responseSEXP, SEXP normalizeSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); - rcpp_result_gen = Rcpp::wrap(dor_cmatrix(x, micro)); + Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const NumericMatrix& >::type response(responseSEXP); + Rcpp::traits::input_parameter< const bool >::type normalize(normalizeSEXP); + rcpp_result_gen = Rcpp::wrap(LogLoss(actual, response, normalize)); return rcpp_result_gen; END_RCPP } -// fbeta -NumericVector fbeta(const IntegerVector& actual, const IntegerVector& predicted, const double& beta, Nullable micro, bool na_rm); -RcppExport SEXP _SLmetrics_fbeta(SEXP actualSEXP, SEXP predictedSEXP, SEXP betaSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_LogLoss +double weighted_LogLoss(const IntegerVector& actual, const NumericMatrix& response, const NumericVector& w, const bool normalize); +RcppExport SEXP _SLmetrics_weighted_LogLoss(SEXP actualSEXP, SEXP responseSEXP, SEXP wSEXP, SEXP normalizeSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const NumericMatrix& >::type response(responseSEXP); + Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< const bool >::type normalize(normalizeSEXP); + rcpp_result_gen = Rcpp::wrap(weighted_LogLoss(actual, response, w, normalize)); + return rcpp_result_gen; +END_RCPP +} +// DiagnosticOddsRatio +Rcpp::NumericVector DiagnosticOddsRatio(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted); +RcppExport SEXP _SLmetrics_DiagnosticOddsRatio(SEXP actualSEXP, SEXP predictedSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::RNGScope rcpp_rngScope_gen; + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + rcpp_result_gen = Rcpp::wrap(DiagnosticOddsRatio(actual, predicted)); + return rcpp_result_gen; +END_RCPP +} +// weighted_DiagnosticOddsRatio +Rcpp::NumericVector weighted_DiagnosticOddsRatio(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w); +RcppExport SEXP _SLmetrics_weighted_DiagnosticOddsRatio(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::RNGScope rcpp_rngScope_gen; + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + rcpp_result_gen = Rcpp::wrap(weighted_DiagnosticOddsRatio(actual, predicted, w)); + return rcpp_result_gen; +END_RCPP +} +// cmatrix_DiagnosticOddsRatio +Rcpp::NumericVector cmatrix_DiagnosticOddsRatio(const Rcpp::NumericMatrix& x); +RcppExport SEXP _SLmetrics_cmatrix_DiagnosticOddsRatio(SEXP xSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::RNGScope rcpp_rngScope_gen; + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); + rcpp_result_gen = Rcpp::wrap(cmatrix_DiagnosticOddsRatio(x)); + return rcpp_result_gen; +END_RCPP +} +// FBetaScore +Rcpp::NumericVector FBetaScore(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const double& beta, Rcpp::Nullable micro, bool na_rm); +RcppExport SEXP _SLmetrics_FBetaScore(SEXP actualSEXP, SEXP predictedSEXP, SEXP betaSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::RNGScope rcpp_rngScope_gen; + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); Rcpp::traits::input_parameter< const double& >::type beta(betaSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< bool >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(fbeta(actual, predicted, beta, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(FBetaScore(actual, predicted, beta, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_fbeta -NumericVector weighted_fbeta(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, const double& beta, Nullable micro, bool na_rm); -RcppExport SEXP _SLmetrics_weighted_fbeta(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP betaSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_FBetaScore +Rcpp::NumericVector weighted_FBetaScore(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, const double& beta, Rcpp::Nullable micro, bool na_rm); +RcppExport SEXP _SLmetrics_weighted_FBetaScore(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP betaSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); Rcpp::traits::input_parameter< const double& >::type beta(betaSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< bool >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_fbeta(actual, predicted, w, beta, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_FBetaScore(actual, predicted, w, beta, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// fbeta_cmatrix -NumericVector fbeta_cmatrix(const NumericMatrix& x, const double& beta, Nullable micro, bool na_rm); -RcppExport SEXP _SLmetrics_fbeta_cmatrix(SEXP xSEXP, SEXP betaSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_FBetaScore +Rcpp::NumericVector cmatrix_FBetaScore(const Rcpp::NumericMatrix& x, const double& beta, Rcpp::Nullable micro, bool na_rm); +RcppExport SEXP _SLmetrics_cmatrix_FBetaScore(SEXP xSEXP, SEXP betaSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); Rcpp::traits::input_parameter< const double& >::type beta(betaSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< bool >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(fbeta_cmatrix(x, beta, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_FBetaScore(x, beta, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// fdr -NumericVector fdr(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_fdr(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// FalseDiscoveryRate +Rcpp::NumericVector FalseDiscoveryRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_FalseDiscoveryRate(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(fdr(actual, predicted, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(FalseDiscoveryRate(actual, predicted, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_fdr -NumericVector weighted_fdr(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_weighted_fdr(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_FalseDiscoveryRate +Rcpp::NumericVector weighted_FalseDiscoveryRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_weighted_FalseDiscoveryRate(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_fdr(actual, predicted, w, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_FalseDiscoveryRate(actual, predicted, w, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// fdr_cmatrix -NumericVector fdr_cmatrix(const NumericMatrix& x, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_fdr_cmatrix(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_FalseDiscoveryRate +Rcpp::NumericVector cmatrix_FalseDiscoveryRate(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_cmatrix_FalseDiscoveryRate(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(fdr_cmatrix(x, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_FalseDiscoveryRate(x, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// fer -NumericVector fer(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_fer(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// FalseOmissionRate +Rcpp::NumericVector FalseOmissionRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_FalseOmissionRate(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(fer(actual, predicted, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(FalseOmissionRate(actual, predicted, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_fer -NumericVector weighted_fer(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_weighted_fer(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_FalseOmissionRate +Rcpp::NumericVector weighted_FalseOmissionRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_weighted_FalseOmissionRate(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_fer(actual, predicted, w, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_FalseOmissionRate(actual, predicted, w, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// fer_cmatrix -NumericVector fer_cmatrix(const NumericMatrix& x, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_fer_cmatrix(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_FalseOmissionRate +Rcpp::NumericVector cmatrix_FalseOmissionRate(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_cmatrix_FalseOmissionRate(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(fer_cmatrix(x, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_FalseOmissionRate(x, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// fpr -NumericVector fpr(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_fpr(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// FalsePositiveRate +Rcpp::NumericVector FalsePositiveRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_FalsePositiveRate(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(fpr(actual, predicted, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(FalsePositiveRate(actual, predicted, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_fpr -NumericVector weighted_fpr(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_weighted_fpr(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_FalsePositiveRate +Rcpp::NumericVector weighted_FalsePositiveRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_weighted_FalsePositiveRate(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_fpr(actual, predicted, w, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_FalsePositiveRate(actual, predicted, w, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// fpr_cmatrix -NumericVector fpr_cmatrix(const NumericMatrix& x, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_fpr_cmatrix(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_FalsePositiveRate +Rcpp::NumericVector cmatrix_FalsePositiveRate(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_cmatrix_FalsePositiveRate(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(fpr_cmatrix(x, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_FalsePositiveRate(x, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// fallout -NumericVector fallout(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_fallout(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// Fallout +Rcpp::NumericVector Fallout(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_Fallout(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(fallout(actual, predicted, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(Fallout(actual, predicted, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_fallout -NumericVector weighted_fallout(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_weighted_fallout(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_Fallout +Rcpp::NumericVector weighted_Fallout(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_weighted_Fallout(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_fallout(actual, predicted, w, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_Fallout(actual, predicted, w, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// fallout_cmatrix -NumericVector fallout_cmatrix(const NumericMatrix& x, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_fallout_cmatrix(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_Fallout +Rcpp::NumericVector cmatrix_Fallout(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_cmatrix_Fallout(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(fallout_cmatrix(x, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_Fallout(x, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// fmi -Rcpp::NumericVector fmi(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted); -RcppExport SEXP _SLmetrics_fmi(SEXP actualSEXP, SEXP predictedSEXP) { +// FowlkesMallowsIndex +Rcpp::NumericVector FowlkesMallowsIndex(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted); +RcppExport SEXP _SLmetrics_FowlkesMallowsIndex(SEXP actualSEXP, SEXP predictedSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); - rcpp_result_gen = Rcpp::wrap(fmi(actual, predicted)); + rcpp_result_gen = Rcpp::wrap(FowlkesMallowsIndex(actual, predicted)); return rcpp_result_gen; END_RCPP } -// fmi_cmatrix -Rcpp::NumericVector fmi_cmatrix(const NumericMatrix& x); -RcppExport SEXP _SLmetrics_fmi_cmatrix(SEXP xSEXP) { +// cmatrix_FowlkesMallowsIndexClass +Rcpp::NumericVector cmatrix_FowlkesMallowsIndexClass(const Rcpp::NumericMatrix& x); +RcppExport SEXP _SLmetrics_cmatrix_FowlkesMallowsIndexClass(SEXP xSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); - rcpp_result_gen = Rcpp::wrap(fmi_cmatrix(x)); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); + rcpp_result_gen = Rcpp::wrap(cmatrix_FowlkesMallowsIndexClass(x)); return rcpp_result_gen; END_RCPP } -// jaccard -NumericVector jaccard(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_jaccard(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// JaccardIndex +Rcpp::NumericVector JaccardIndex(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_JaccardIndex(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(jaccard(actual, predicted, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(JaccardIndex(actual, predicted, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_jaccard -NumericVector weighted_jaccard(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_weighted_jaccard(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_JaccardIndex +Rcpp::NumericVector weighted_JaccardIndex(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_weighted_JaccardIndex(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_jaccard(actual, predicted, w, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_JaccardIndex(actual, predicted, w, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// jaccard_cmatrix -NumericVector jaccard_cmatrix(const NumericMatrix& x, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_jaccard_cmatrix(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_JaccardIndex +Rcpp::NumericVector cmatrix_JaccardIndex(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_cmatrix_JaccardIndex(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(jaccard_cmatrix(x, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_JaccardIndex(x, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// csi -NumericVector csi(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_csi(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// CriticalSuccessIndex +Rcpp::NumericVector CriticalSuccessIndex(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_CriticalSuccessIndex(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(csi(actual, predicted, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(CriticalSuccessIndex(actual, predicted, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_csi -NumericVector weighted_csi(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_weighted_csi(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_CriticalSuccessIndex +Rcpp::NumericVector weighted_CriticalSuccessIndex(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_weighted_CriticalSuccessIndex(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_csi(actual, predicted, w, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_CriticalSuccessIndex(actual, predicted, w, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// csi_cmatrix -NumericVector csi_cmatrix(const NumericMatrix& x, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_csi_cmatrix(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_CriticalSuccessIndex +Rcpp::NumericVector cmatrix_CriticalSuccessIndex(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_cmatrix_CriticalSuccessIndex(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(csi_cmatrix(x, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_CriticalSuccessIndex(x, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// tscore -NumericVector tscore(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_tscore(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// ThreatScore +Rcpp::NumericVector ThreatScore(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_ThreatScore(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(tscore(actual, predicted, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(ThreatScore(actual, predicted, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_tscore -NumericVector weighted_tscore(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_weighted_tscore(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_ThreatScore +Rcpp::NumericVector weighted_ThreatScore(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_weighted_ThreatScore(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_tscore(actual, predicted, w, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_ThreatScore(actual, predicted, w, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// tscore_cmatrix -NumericVector tscore_cmatrix(const NumericMatrix& x, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_tscore_cmatrix(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_ThreatScore +Rcpp::NumericVector cmatrix_ThreatScore(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_cmatrix_ThreatScore(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(tscore_cmatrix(x, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_ThreatScore(x, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// mcc -Rcpp::NumericVector mcc(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted); -RcppExport SEXP _SLmetrics_mcc(SEXP actualSEXP, SEXP predictedSEXP) { +// MatthewsCorrelationCoefficient +Rcpp::NumericVector MatthewsCorrelationCoefficient(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted); +RcppExport SEXP _SLmetrics_MatthewsCorrelationCoefficient(SEXP actualSEXP, SEXP predictedSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); - rcpp_result_gen = Rcpp::wrap(mcc(actual, predicted)); + rcpp_result_gen = Rcpp::wrap(MatthewsCorrelationCoefficient(actual, predicted)); return rcpp_result_gen; END_RCPP } -// weigthed_mcc -Rcpp::NumericVector weigthed_mcc(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector w); -RcppExport SEXP _SLmetrics_weigthed_mcc(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP) { +// weigthed_MatthewsCorrelationCoefficient +Rcpp::NumericVector weigthed_MatthewsCorrelationCoefficient(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector w); +RcppExport SEXP _SLmetrics_weigthed_MatthewsCorrelationCoefficient(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); Rcpp::traits::input_parameter< const Rcpp::NumericVector >::type w(wSEXP); - rcpp_result_gen = Rcpp::wrap(weigthed_mcc(actual, predicted, w)); + rcpp_result_gen = Rcpp::wrap(weigthed_MatthewsCorrelationCoefficient(actual, predicted, w)); return rcpp_result_gen; END_RCPP } -// mcc_cmatrix -Rcpp::NumericVector mcc_cmatrix(const Rcpp::NumericMatrix& x); -RcppExport SEXP _SLmetrics_mcc_cmatrix(SEXP xSEXP) { +// cmatrix_MatthewsCorrelationCoefficient +Rcpp::NumericVector cmatrix_MatthewsCorrelationCoefficient(const Rcpp::NumericMatrix& x); +RcppExport SEXP _SLmetrics_cmatrix_MatthewsCorrelationCoefficient(SEXP xSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); - rcpp_result_gen = Rcpp::wrap(mcc_cmatrix(x)); + rcpp_result_gen = Rcpp::wrap(cmatrix_MatthewsCorrelationCoefficient(x)); return rcpp_result_gen; END_RCPP } -// phi -Rcpp::NumericVector phi(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted); -RcppExport SEXP _SLmetrics_phi(SEXP actualSEXP, SEXP predictedSEXP) { +// PhiCoefficient +Rcpp::NumericVector PhiCoefficient(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted); +RcppExport SEXP _SLmetrics_PhiCoefficient(SEXP actualSEXP, SEXP predictedSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); - rcpp_result_gen = Rcpp::wrap(phi(actual, predicted)); + rcpp_result_gen = Rcpp::wrap(PhiCoefficient(actual, predicted)); return rcpp_result_gen; END_RCPP } -// weighted_phi -Rcpp::NumericVector weighted_phi(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector w); -RcppExport SEXP _SLmetrics_weighted_phi(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP) { +// weighted_PhiCoefficient +Rcpp::NumericVector weighted_PhiCoefficient(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector w); +RcppExport SEXP _SLmetrics_weighted_PhiCoefficient(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); Rcpp::traits::input_parameter< const Rcpp::NumericVector >::type w(wSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_phi(actual, predicted, w)); + rcpp_result_gen = Rcpp::wrap(weighted_PhiCoefficient(actual, predicted, w)); return rcpp_result_gen; END_RCPP } -// phi_cmatrix -Rcpp::NumericVector phi_cmatrix(const Rcpp::NumericMatrix& x); -RcppExport SEXP _SLmetrics_phi_cmatrix(SEXP xSEXP) { +// cmatrix_PhiCoefficient +Rcpp::NumericVector cmatrix_PhiCoefficient(const Rcpp::NumericMatrix& x); +RcppExport SEXP _SLmetrics_cmatrix_PhiCoefficient(SEXP xSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); - rcpp_result_gen = Rcpp::wrap(phi_cmatrix(x)); + rcpp_result_gen = Rcpp::wrap(cmatrix_PhiCoefficient(x)); return rcpp_result_gen; END_RCPP } -// nlr -NumericVector nlr(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro); -RcppExport SEXP _SLmetrics_nlr(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP) { +// NegativeLikelihoodRatio +Rcpp::NumericVector NegativeLikelihoodRatio(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted); +RcppExport SEXP _SLmetrics_NegativeLikelihoodRatio(SEXP actualSEXP, SEXP predictedSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); - rcpp_result_gen = Rcpp::wrap(nlr(actual, predicted, micro)); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + rcpp_result_gen = Rcpp::wrap(NegativeLikelihoodRatio(actual, predicted)); return rcpp_result_gen; END_RCPP } -// weighted_nlr -NumericVector weighted_nlr(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro); -RcppExport SEXP _SLmetrics_weighted_nlr(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP) { +// weighted_NegativeLikelihoodRatio +Rcpp::NumericVector weighted_NegativeLikelihoodRatio(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w); +RcppExport SEXP _SLmetrics_weighted_NegativeLikelihoodRatio(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_nlr(actual, predicted, w, micro)); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + rcpp_result_gen = Rcpp::wrap(weighted_NegativeLikelihoodRatio(actual, predicted, w)); return rcpp_result_gen; END_RCPP } -// nlr_cmatrix -NumericVector nlr_cmatrix(const NumericMatrix& x, Nullable micro); -RcppExport SEXP _SLmetrics_nlr_cmatrix(SEXP xSEXP, SEXP microSEXP) { +// cmatrix_NegativeLikelihoodRatio +Rcpp::NumericVector cmatrix_NegativeLikelihoodRatio(const Rcpp::NumericMatrix& x); +RcppExport SEXP _SLmetrics_cmatrix_NegativeLikelihoodRatio(SEXP xSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); - rcpp_result_gen = Rcpp::wrap(nlr_cmatrix(x, micro)); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); + rcpp_result_gen = Rcpp::wrap(cmatrix_NegativeLikelihoodRatio(x)); return rcpp_result_gen; END_RCPP } -// npv -NumericVector npv(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_npv(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// NegativePredictitveValue +Rcpp::NumericVector NegativePredictitveValue(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_NegativePredictitveValue(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(npv(actual, predicted, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(NegativePredictitveValue(actual, predicted, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_npv -NumericVector weighted_npv(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_weighted_npv(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_NegativePredictitveValue +Rcpp::NumericVector weighted_NegativePredictitveValue(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_weighted_NegativePredictitveValue(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_npv(actual, predicted, w, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_NegativePredictitveValue(actual, predicted, w, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// npv_cmatrix -NumericVector npv_cmatrix(const NumericMatrix& x, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_npv_cmatrix(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_NegativePredictitveValue +Rcpp::NumericVector cmatrix_NegativePredictitveValue(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_cmatrix_NegativePredictitveValue(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(npv_cmatrix(x, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_NegativePredictitveValue(x, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// plr -NumericVector plr(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro); -RcppExport SEXP _SLmetrics_plr(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP) { +// PositiveLikelihoodRatio +Rcpp::NumericVector PositiveLikelihoodRatio(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted); +RcppExport SEXP _SLmetrics_PositiveLikelihoodRatio(SEXP actualSEXP, SEXP predictedSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); - rcpp_result_gen = Rcpp::wrap(plr(actual, predicted, micro)); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + rcpp_result_gen = Rcpp::wrap(PositiveLikelihoodRatio(actual, predicted)); return rcpp_result_gen; END_RCPP } -// weighted_plr -NumericVector weighted_plr(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro); -RcppExport SEXP _SLmetrics_weighted_plr(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP) { +// weighted_PositiveLikelihoodRatio +Rcpp::NumericVector weighted_PositiveLikelihoodRatio(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w); +RcppExport SEXP _SLmetrics_weighted_PositiveLikelihoodRatio(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_plr(actual, predicted, w, micro)); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + rcpp_result_gen = Rcpp::wrap(weighted_PositiveLikelihoodRatio(actual, predicted, w)); return rcpp_result_gen; END_RCPP } -// plr_cmatrix -NumericVector plr_cmatrix(const NumericMatrix& x, Nullable micro); -RcppExport SEXP _SLmetrics_plr_cmatrix(SEXP xSEXP, SEXP microSEXP) { +// cmatrix_PositiveLikelihoodRatio +Rcpp::NumericVector cmatrix_PositiveLikelihoodRatio(const Rcpp::NumericMatrix& x); +RcppExport SEXP _SLmetrics_cmatrix_PositiveLikelihoodRatio(SEXP xSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); - rcpp_result_gen = Rcpp::wrap(plr_cmatrix(x, micro)); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); + rcpp_result_gen = Rcpp::wrap(cmatrix_PositiveLikelihoodRatio(x)); return rcpp_result_gen; END_RCPP } -// precision -Rcpp::NumericVector precision(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, bool na_rm); -RcppExport SEXP _SLmetrics_precision(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// Precision +Rcpp::NumericVector Precision(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, bool na_rm); +RcppExport SEXP _SLmetrics_Precision(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -744,13 +801,13 @@ BEGIN_RCPP Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< bool >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(precision(actual, predicted, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(Precision(actual, predicted, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_precision -Rcpp::NumericVector weighted_precision(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, bool na_rm); -RcppExport SEXP _SLmetrics_weighted_precision(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_Precision +Rcpp::NumericVector weighted_Precision(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, bool na_rm); +RcppExport SEXP _SLmetrics_weighted_Precision(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -759,26 +816,26 @@ BEGIN_RCPP Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< bool >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_precision(actual, predicted, w, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_Precision(actual, predicted, w, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// precision_cmatrix -Rcpp::NumericVector precision_cmatrix(const NumericMatrix& x, Rcpp::Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_precision_cmatrix(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_Precision +Rcpp::NumericVector cmatrix_Precision(const NumericMatrix& x, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_cmatrix_Precision(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(precision_cmatrix(x, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_Precision(x, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// ppv -Rcpp::NumericVector ppv(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, bool na_rm); -RcppExport SEXP _SLmetrics_ppv(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// PositivePredictiveValue +Rcpp::NumericVector PositivePredictiveValue(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, bool na_rm); +RcppExport SEXP _SLmetrics_PositivePredictiveValue(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -786,13 +843,13 @@ BEGIN_RCPP Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< bool >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(ppv(actual, predicted, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(PositivePredictiveValue(actual, predicted, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_ppv -Rcpp::NumericVector weighted_ppv(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, bool na_rm); -RcppExport SEXP _SLmetrics_weighted_ppv(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_PositivePredictiveValue +Rcpp::NumericVector weighted_PositivePredictiveValue(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, bool na_rm); +RcppExport SEXP _SLmetrics_weighted_PositivePredictiveValue(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -801,41 +858,53 @@ BEGIN_RCPP Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< bool >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_ppv(actual, predicted, w, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_PositivePredictiveValue(actual, predicted, w, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// ppv_cmatrix -Rcpp::NumericVector ppv_cmatrix(const NumericMatrix& x, Rcpp::Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_ppv_cmatrix(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_PositivePredictiveValue +Rcpp::NumericVector cmatrix_PositivePredictiveValue(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_cmatrix_PositivePredictiveValue(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(ppv_cmatrix(x, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_PositivePredictiveValue(x, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// prROC -Rcpp::DataFrame prROC(const Rcpp::IntegerVector& actual, const Rcpp::NumericVector& response, Nullable micro, Rcpp::Nullable thresholds, const bool& na_rm); -RcppExport SEXP _SLmetrics_prROC(SEXP actualSEXP, SEXP responseSEXP, SEXP microSEXP, SEXP thresholdsSEXP, SEXP na_rmSEXP) { +// PrecisionRecallCurve +Rcpp::DataFrame PrecisionRecallCurve(const Rcpp::IntegerVector& actual, const Rcpp::NumericVector& response, Rcpp::Nullable thresholds); +RcppExport SEXP _SLmetrics_PrecisionRecallCurve(SEXP actualSEXP, SEXP responseSEXP, SEXP thresholdsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type response(responseSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< Rcpp::Nullable >::type thresholds(thresholdsSEXP); - Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(prROC(actual, response, micro, thresholds, na_rm)); + rcpp_result_gen = Rcpp::wrap(PrecisionRecallCurve(actual, response, thresholds)); + return rcpp_result_gen; +END_RCPP +} +// weighted_PrecisionRecallCurve +Rcpp::DataFrame weighted_PrecisionRecallCurve(const Rcpp::IntegerVector& actual, const Rcpp::NumericVector& response, const Rcpp::NumericVector& w, Rcpp::Nullable thresholds); +RcppExport SEXP _SLmetrics_weighted_PrecisionRecallCurve(SEXP actualSEXP, SEXP responseSEXP, SEXP wSEXP, SEXP thresholdsSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::RNGScope rcpp_rngScope_gen; + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type response(responseSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type thresholds(thresholdsSEXP); + rcpp_result_gen = Rcpp::wrap(weighted_PrecisionRecallCurve(actual, response, w, thresholds)); return rcpp_result_gen; END_RCPP } -// recall -Rcpp::NumericVector recall(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, bool na_rm); -RcppExport SEXP _SLmetrics_recall(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// Recall +Rcpp::NumericVector Recall(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, bool na_rm); +RcppExport SEXP _SLmetrics_Recall(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -843,13 +912,13 @@ BEGIN_RCPP Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< bool >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(recall(actual, predicted, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(Recall(actual, predicted, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_recall -Rcpp::NumericVector weighted_recall(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, bool na_rm); -RcppExport SEXP _SLmetrics_weighted_recall(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_Recall +Rcpp::NumericVector weighted_Recall(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, bool na_rm); +RcppExport SEXP _SLmetrics_weighted_Recall(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -858,26 +927,26 @@ BEGIN_RCPP Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< bool >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_recall(actual, predicted, w, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_Recall(actual, predicted, w, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// recall_cmatrix -Rcpp::NumericVector recall_cmatrix(const NumericMatrix& x, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_recall_cmatrix(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_Recall +Rcpp::NumericVector cmatrix_Recall(const NumericMatrix& x, Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_cmatrix_Recall(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(recall_cmatrix(x, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_Recall(x, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// sensitivity -Rcpp::NumericVector sensitivity(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_sensitivity(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// Sensitivity +Rcpp::NumericVector Sensitivity(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_Sensitivity(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -885,13 +954,13 @@ BEGIN_RCPP Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(sensitivity(actual, predicted, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(Sensitivity(actual, predicted, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_sensitivity -Rcpp::NumericVector weighted_sensitivity(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, bool na_rm); -RcppExport SEXP _SLmetrics_weighted_sensitivity(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_Sensitivity +Rcpp::NumericVector weighted_Sensitivity(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, bool na_rm); +RcppExport SEXP _SLmetrics_weighted_Sensitivity(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -900,26 +969,26 @@ BEGIN_RCPP Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< bool >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_sensitivity(actual, predicted, w, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_Sensitivity(actual, predicted, w, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// sensitivity_cmatrix -Rcpp::NumericVector sensitivity_cmatrix(const NumericMatrix& x, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_sensitivity_cmatrix(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_Sensitivity +Rcpp::NumericVector cmatrix_Sensitivity(const NumericMatrix& x, Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_cmatrix_Sensitivity(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(sensitivity_cmatrix(x, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_Sensitivity(x, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// tpr -Rcpp::NumericVector tpr(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_tpr(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// TruePositiveRate +Rcpp::NumericVector TruePositiveRate(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_TruePositiveRate(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -927,13 +996,13 @@ BEGIN_RCPP Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(tpr(actual, predicted, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(TruePositiveRate(actual, predicted, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_tpr -Rcpp::NumericVector weighted_tpr(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, bool na_rm); -RcppExport SEXP _SLmetrics_weighted_tpr(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_TruePositiveRate +Rcpp::NumericVector weighted_TruePositiveRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, bool na_rm); +RcppExport SEXP _SLmetrics_weighted_TruePositiveRate(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; @@ -942,20 +1011,20 @@ BEGIN_RCPP Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< bool >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_tpr(actual, predicted, w, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_TruePositiveRate(actual, predicted, w, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// tpr_cmatrix -Rcpp::NumericVector tpr_cmatrix(const NumericMatrix& x, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_tpr_cmatrix(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_TruePositiveRate +Rcpp::NumericVector cmatrix_TruePositiveRate(const NumericMatrix& x, Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_cmatrix_TruePositiveRate(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(tpr_cmatrix(x, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_TruePositiveRate(x, micro, na_rm)); return rcpp_result_gen; END_RCPP } @@ -972,180 +1041,192 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } -// ROC -Rcpp::DataFrame ROC(const Rcpp::IntegerVector& actual, const Rcpp::NumericVector& response, Nullable micro, Rcpp::Nullable thresholds, const bool& na_rm); -RcppExport SEXP _SLmetrics_ROC(SEXP actualSEXP, SEXP responseSEXP, SEXP microSEXP, SEXP thresholdsSEXP, SEXP na_rmSEXP) { +// RecieverOperatorCharacteristics +Rcpp::DataFrame RecieverOperatorCharacteristics(const Rcpp::IntegerVector& actual, const Rcpp::NumericVector& response, Rcpp::Nullable thresholds); +RcppExport SEXP _SLmetrics_RecieverOperatorCharacteristics(SEXP actualSEXP, SEXP responseSEXP, SEXP thresholdsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type response(responseSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< Rcpp::Nullable >::type thresholds(thresholdsSEXP); - Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(ROC(actual, response, micro, thresholds, na_rm)); + rcpp_result_gen = Rcpp::wrap(RecieverOperatorCharacteristics(actual, response, thresholds)); return rcpp_result_gen; END_RCPP } -// specificity -NumericVector specificity(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_specificity(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_RecieverOperatorCharacteristics +Rcpp::DataFrame weighted_RecieverOperatorCharacteristics(const Rcpp::IntegerVector& actual, const Rcpp::NumericVector& response, const Rcpp::NumericVector& w, Rcpp::Nullable thresholds); +RcppExport SEXP _SLmetrics_weighted_RecieverOperatorCharacteristics(SEXP actualSEXP, SEXP responseSEXP, SEXP wSEXP, SEXP thresholdsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type response(responseSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type thresholds(thresholdsSEXP); + rcpp_result_gen = Rcpp::wrap(weighted_RecieverOperatorCharacteristics(actual, response, w, thresholds)); + return rcpp_result_gen; +END_RCPP +} +// Specificity +Rcpp::NumericVector Specificity(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_Specificity(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::RNGScope rcpp_rngScope_gen; + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(specificity(actual, predicted, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(Specificity(actual, predicted, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_specificity -NumericVector weighted_specificity(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_weighted_specificity(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_Specificity +Rcpp::NumericVector weighted_Specificity(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_weighted_Specificity(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_specificity(actual, predicted, w, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_Specificity(actual, predicted, w, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// specificity_cmatrix -NumericVector specificity_cmatrix(const NumericMatrix& x, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_specificity_cmatrix(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_Specificity +Rcpp::NumericVector cmatrix_Specificity(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_cmatrix_Specificity(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(specificity_cmatrix(x, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_Specificity(x, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// tnr -NumericVector tnr(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_tnr(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// TrueNegativeRate +Rcpp::NumericVector TrueNegativeRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_TrueNegativeRate(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(tnr(actual, predicted, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(TrueNegativeRate(actual, predicted, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_tnr -NumericVector weighted_tnr(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_weighted_tnr(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_TrueNegativeRate +Rcpp::NumericVector weighted_TrueNegativeRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_weighted_TrueNegativeRate(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_tnr(actual, predicted, w, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_TrueNegativeRate(actual, predicted, w, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// tnr_cmatrix -NumericVector tnr_cmatrix(const NumericMatrix& x, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_tnr_cmatrix(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_TrueNegativeRate +Rcpp::NumericVector cmatrix_TrueNegativeRate(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_cmatrix_TrueNegativeRate(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(tnr_cmatrix(x, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_TrueNegativeRate(x, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// selectivity -NumericVector selectivity(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_selectivity(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// Selectivity +Rcpp::NumericVector Selectivity(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_Selectivity(SEXP actualSEXP, SEXP predictedSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(selectivity(actual, predicted, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(Selectivity(actual, predicted, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// weighted_selectivity -NumericVector weighted_selectivity(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_weighted_selectivity(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// weighted_Selectivity +Rcpp::NumericVector weighted_Selectivity(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_weighted_Selectivity(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const IntegerVector& >::type actual(actualSEXP); - Rcpp::traits::input_parameter< const IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_selectivity(actual, predicted, w, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(weighted_Selectivity(actual, predicted, w, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// selectivity_cmatrix -NumericVector selectivity_cmatrix(const NumericMatrix& x, Nullable micro, const bool& na_rm); -RcppExport SEXP _SLmetrics_selectivity_cmatrix(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { +// cmatrix_Selectivity +Rcpp::NumericVector cmatrix_Selectivity(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro, const bool& na_rm); +RcppExport SEXP _SLmetrics_cmatrix_Selectivity(SEXP xSEXP, SEXP microSEXP, SEXP na_rmSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; - Rcpp::traits::input_parameter< const NumericMatrix& >::type x(xSEXP); - Rcpp::traits::input_parameter< Nullable >::type micro(microSEXP); + Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); + Rcpp::traits::input_parameter< Rcpp::Nullable >::type micro(microSEXP); Rcpp::traits::input_parameter< const bool& >::type na_rm(na_rmSEXP); - rcpp_result_gen = Rcpp::wrap(selectivity_cmatrix(x, micro, na_rm)); + rcpp_result_gen = Rcpp::wrap(cmatrix_Selectivity(x, micro, na_rm)); return rcpp_result_gen; END_RCPP } -// zerooneloss -Rcpp::NumericVector zerooneloss(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted); -RcppExport SEXP _SLmetrics_zerooneloss(SEXP actualSEXP, SEXP predictedSEXP) { +// ZeroOneLoss +Rcpp::NumericVector ZeroOneLoss(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted); +RcppExport SEXP _SLmetrics_ZeroOneLoss(SEXP actualSEXP, SEXP predictedSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); - rcpp_result_gen = Rcpp::wrap(zerooneloss(actual, predicted)); + rcpp_result_gen = Rcpp::wrap(ZeroOneLoss(actual, predicted)); return rcpp_result_gen; END_RCPP } -// weighted_zerooneloss -Rcpp::NumericVector weighted_zerooneloss(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const NumericVector& w); -RcppExport SEXP _SLmetrics_weighted_zerooneloss(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP) { +// weighted_ZeroOneLoss +Rcpp::NumericVector weighted_ZeroOneLoss(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w); +RcppExport SEXP _SLmetrics_weighted_ZeroOneLoss(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type actual(actualSEXP); Rcpp::traits::input_parameter< const Rcpp::IntegerVector& >::type predicted(predictedSEXP); - Rcpp::traits::input_parameter< const NumericVector& >::type w(wSEXP); - rcpp_result_gen = Rcpp::wrap(weighted_zerooneloss(actual, predicted, w)); + Rcpp::traits::input_parameter< const Rcpp::NumericVector& >::type w(wSEXP); + rcpp_result_gen = Rcpp::wrap(weighted_ZeroOneLoss(actual, predicted, w)); return rcpp_result_gen; END_RCPP } -// zerooneloss_cmatrix -Rcpp::NumericVector zerooneloss_cmatrix(const Rcpp::NumericMatrix& x); -RcppExport SEXP _SLmetrics_zerooneloss_cmatrix(SEXP xSEXP) { +// cmatrix_ZeroOneLoss +Rcpp::NumericVector cmatrix_ZeroOneLoss(const Rcpp::NumericMatrix& x); +RcppExport SEXP _SLmetrics_cmatrix_ZeroOneLoss(SEXP xSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< const Rcpp::NumericMatrix& >::type x(xSEXP); - rcpp_result_gen = Rcpp::wrap(zerooneloss_cmatrix(x)); + rcpp_result_gen = Rcpp::wrap(cmatrix_ZeroOneLoss(x)); return rcpp_result_gen; END_RCPP } @@ -1384,6 +1465,33 @@ BEGIN_RCPP return rcpp_result_gen; END_RCPP } +// RelativeRootMeanSquaredError +double RelativeRootMeanSquaredError(const std::vector& actual, const std::vector& predicted, const int& normalization); +RcppExport SEXP _SLmetrics_RelativeRootMeanSquaredError(SEXP actualSEXP, SEXP predictedSEXP, SEXP normalizationSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::RNGScope rcpp_rngScope_gen; + Rcpp::traits::input_parameter< const std::vector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const std::vector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const int& >::type normalization(normalizationSEXP); + rcpp_result_gen = Rcpp::wrap(RelativeRootMeanSquaredError(actual, predicted, normalization)); + return rcpp_result_gen; +END_RCPP +} +// weighted_RelativeRootMeanSquaredError +double weighted_RelativeRootMeanSquaredError(const std::vector& actual, const std::vector& predicted, const std::vector w, const int& normalization); +RcppExport SEXP _SLmetrics_weighted_RelativeRootMeanSquaredError(SEXP actualSEXP, SEXP predictedSEXP, SEXP wSEXP, SEXP normalizationSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::RNGScope rcpp_rngScope_gen; + Rcpp::traits::input_parameter< const std::vector& >::type actual(actualSEXP); + Rcpp::traits::input_parameter< const std::vector& >::type predicted(predictedSEXP); + Rcpp::traits::input_parameter< const std::vector >::type w(wSEXP); + Rcpp::traits::input_parameter< const int& >::type normalization(normalizationSEXP); + rcpp_result_gen = Rcpp::wrap(weighted_RelativeRootMeanSquaredError(actual, predicted, w, normalization)); + return rcpp_result_gen; +END_RCPP +} // rmse double rmse(const std::vector& actual, const std::vector& predicted); RcppExport SEXP _SLmetrics_rmse(SEXP actualSEXP, SEXP predictedSEXP) { @@ -1486,90 +1594,97 @@ END_RCPP } static const R_CallMethodDef CallEntries[] = { - {"_SLmetrics_accuracy", (DL_FUNC) &_SLmetrics_accuracy, 2}, - {"_SLmetrics_weighted_accuracy", (DL_FUNC) &_SLmetrics_weighted_accuracy, 3}, - {"_SLmetrics_accuracy_cmatrix", (DL_FUNC) &_SLmetrics_accuracy_cmatrix, 1}, - {"_SLmetrics_baccuracy", (DL_FUNC) &_SLmetrics_baccuracy, 4}, - {"_SLmetrics_weighted_baccuracy", (DL_FUNC) &_SLmetrics_weighted_baccuracy, 5}, - {"_SLmetrics_baccuracy_cmatrix", (DL_FUNC) &_SLmetrics_baccuracy_cmatrix, 3}, - {"_SLmetrics_ckappa", (DL_FUNC) &_SLmetrics_ckappa, 3}, - {"_SLmetrics_weighted_ckappa", (DL_FUNC) &_SLmetrics_weighted_ckappa, 4}, - {"_SLmetrics_ckappa_cmatrix", (DL_FUNC) &_SLmetrics_ckappa_cmatrix, 2}, - {"_SLmetrics_cmatrix", (DL_FUNC) &_SLmetrics_cmatrix, 3}, - {"_SLmetrics_dor", (DL_FUNC) &_SLmetrics_dor, 3}, - {"_SLmetrics_weighted_dor", (DL_FUNC) &_SLmetrics_weighted_dor, 4}, - {"_SLmetrics_dor_cmatrix", (DL_FUNC) &_SLmetrics_dor_cmatrix, 2}, - {"_SLmetrics_fbeta", (DL_FUNC) &_SLmetrics_fbeta, 5}, - {"_SLmetrics_weighted_fbeta", (DL_FUNC) &_SLmetrics_weighted_fbeta, 6}, - {"_SLmetrics_fbeta_cmatrix", (DL_FUNC) &_SLmetrics_fbeta_cmatrix, 4}, - {"_SLmetrics_fdr", (DL_FUNC) &_SLmetrics_fdr, 4}, - {"_SLmetrics_weighted_fdr", (DL_FUNC) &_SLmetrics_weighted_fdr, 5}, - {"_SLmetrics_fdr_cmatrix", (DL_FUNC) &_SLmetrics_fdr_cmatrix, 3}, - {"_SLmetrics_fer", (DL_FUNC) &_SLmetrics_fer, 4}, - {"_SLmetrics_weighted_fer", (DL_FUNC) &_SLmetrics_weighted_fer, 5}, - {"_SLmetrics_fer_cmatrix", (DL_FUNC) &_SLmetrics_fer_cmatrix, 3}, - {"_SLmetrics_fpr", (DL_FUNC) &_SLmetrics_fpr, 4}, - {"_SLmetrics_weighted_fpr", (DL_FUNC) &_SLmetrics_weighted_fpr, 5}, - {"_SLmetrics_fpr_cmatrix", (DL_FUNC) &_SLmetrics_fpr_cmatrix, 3}, - {"_SLmetrics_fallout", (DL_FUNC) &_SLmetrics_fallout, 4}, - {"_SLmetrics_weighted_fallout", (DL_FUNC) &_SLmetrics_weighted_fallout, 5}, - {"_SLmetrics_fallout_cmatrix", (DL_FUNC) &_SLmetrics_fallout_cmatrix, 3}, - {"_SLmetrics_fmi", (DL_FUNC) &_SLmetrics_fmi, 2}, - {"_SLmetrics_fmi_cmatrix", (DL_FUNC) &_SLmetrics_fmi_cmatrix, 1}, - {"_SLmetrics_jaccard", (DL_FUNC) &_SLmetrics_jaccard, 4}, - {"_SLmetrics_weighted_jaccard", (DL_FUNC) &_SLmetrics_weighted_jaccard, 5}, - {"_SLmetrics_jaccard_cmatrix", (DL_FUNC) &_SLmetrics_jaccard_cmatrix, 3}, - {"_SLmetrics_csi", (DL_FUNC) &_SLmetrics_csi, 4}, - {"_SLmetrics_weighted_csi", (DL_FUNC) &_SLmetrics_weighted_csi, 5}, - {"_SLmetrics_csi_cmatrix", (DL_FUNC) &_SLmetrics_csi_cmatrix, 3}, - {"_SLmetrics_tscore", (DL_FUNC) &_SLmetrics_tscore, 4}, - {"_SLmetrics_weighted_tscore", (DL_FUNC) &_SLmetrics_weighted_tscore, 5}, - {"_SLmetrics_tscore_cmatrix", (DL_FUNC) &_SLmetrics_tscore_cmatrix, 3}, - {"_SLmetrics_mcc", (DL_FUNC) &_SLmetrics_mcc, 2}, - {"_SLmetrics_weigthed_mcc", (DL_FUNC) &_SLmetrics_weigthed_mcc, 3}, - {"_SLmetrics_mcc_cmatrix", (DL_FUNC) &_SLmetrics_mcc_cmatrix, 1}, - {"_SLmetrics_phi", (DL_FUNC) &_SLmetrics_phi, 2}, - {"_SLmetrics_weighted_phi", (DL_FUNC) &_SLmetrics_weighted_phi, 3}, - {"_SLmetrics_phi_cmatrix", (DL_FUNC) &_SLmetrics_phi_cmatrix, 1}, - {"_SLmetrics_nlr", (DL_FUNC) &_SLmetrics_nlr, 3}, - {"_SLmetrics_weighted_nlr", (DL_FUNC) &_SLmetrics_weighted_nlr, 4}, - {"_SLmetrics_nlr_cmatrix", (DL_FUNC) &_SLmetrics_nlr_cmatrix, 2}, - {"_SLmetrics_npv", (DL_FUNC) &_SLmetrics_npv, 4}, - {"_SLmetrics_weighted_npv", (DL_FUNC) &_SLmetrics_weighted_npv, 5}, - {"_SLmetrics_npv_cmatrix", (DL_FUNC) &_SLmetrics_npv_cmatrix, 3}, - {"_SLmetrics_plr", (DL_FUNC) &_SLmetrics_plr, 3}, - {"_SLmetrics_weighted_plr", (DL_FUNC) &_SLmetrics_weighted_plr, 4}, - {"_SLmetrics_plr_cmatrix", (DL_FUNC) &_SLmetrics_plr_cmatrix, 2}, - {"_SLmetrics_precision", (DL_FUNC) &_SLmetrics_precision, 4}, - {"_SLmetrics_weighted_precision", (DL_FUNC) &_SLmetrics_weighted_precision, 5}, - {"_SLmetrics_precision_cmatrix", (DL_FUNC) &_SLmetrics_precision_cmatrix, 3}, - {"_SLmetrics_ppv", (DL_FUNC) &_SLmetrics_ppv, 4}, - {"_SLmetrics_weighted_ppv", (DL_FUNC) &_SLmetrics_weighted_ppv, 5}, - {"_SLmetrics_ppv_cmatrix", (DL_FUNC) &_SLmetrics_ppv_cmatrix, 3}, - {"_SLmetrics_prROC", (DL_FUNC) &_SLmetrics_prROC, 5}, - {"_SLmetrics_recall", (DL_FUNC) &_SLmetrics_recall, 4}, - {"_SLmetrics_weighted_recall", (DL_FUNC) &_SLmetrics_weighted_recall, 5}, - {"_SLmetrics_recall_cmatrix", (DL_FUNC) &_SLmetrics_recall_cmatrix, 3}, - {"_SLmetrics_sensitivity", (DL_FUNC) &_SLmetrics_sensitivity, 4}, - {"_SLmetrics_weighted_sensitivity", (DL_FUNC) &_SLmetrics_weighted_sensitivity, 5}, - {"_SLmetrics_sensitivity_cmatrix", (DL_FUNC) &_SLmetrics_sensitivity_cmatrix, 3}, - {"_SLmetrics_tpr", (DL_FUNC) &_SLmetrics_tpr, 4}, - {"_SLmetrics_weighted_tpr", (DL_FUNC) &_SLmetrics_weighted_tpr, 5}, - {"_SLmetrics_tpr_cmatrix", (DL_FUNC) &_SLmetrics_tpr_cmatrix, 3}, + {"_SLmetrics_Accuracy", (DL_FUNC) &_SLmetrics_Accuracy, 2}, + {"_SLmetrics_weighted_Accuracy", (DL_FUNC) &_SLmetrics_weighted_Accuracy, 3}, + {"_SLmetrics_cmatrix_Accuracy", (DL_FUNC) &_SLmetrics_cmatrix_Accuracy, 1}, + {"_SLmetrics_BalancedAccuracy", (DL_FUNC) &_SLmetrics_BalancedAccuracy, 4}, + {"_SLmetrics_weighted_BalancedAccuracy", (DL_FUNC) &_SLmetrics_weighted_BalancedAccuracy, 5}, + {"_SLmetrics_cmatrix_BalancedAccuracy", (DL_FUNC) &_SLmetrics_cmatrix_BalancedAccuracy, 3}, + {"_SLmetrics_CohensKappa", (DL_FUNC) &_SLmetrics_CohensKappa, 3}, + {"_SLmetrics_weighted_CohensKappa", (DL_FUNC) &_SLmetrics_weighted_CohensKappa, 4}, + {"_SLmetrics_cmatrix_CohensKappa", (DL_FUNC) &_SLmetrics_cmatrix_CohensKappa, 2}, + {"_SLmetrics_UnweightedConfusionMatrix", (DL_FUNC) &_SLmetrics_UnweightedConfusionMatrix, 2}, + {"_SLmetrics_WeightedConfusionMatrix", (DL_FUNC) &_SLmetrics_WeightedConfusionMatrix, 3}, + {"_SLmetrics_CrossEntropy", (DL_FUNC) &_SLmetrics_CrossEntropy, 3}, + {"_SLmetrics_weighted_CrossEntropy", (DL_FUNC) &_SLmetrics_weighted_CrossEntropy, 4}, + {"_SLmetrics_LogLoss", (DL_FUNC) &_SLmetrics_LogLoss, 3}, + {"_SLmetrics_weighted_LogLoss", (DL_FUNC) &_SLmetrics_weighted_LogLoss, 4}, + {"_SLmetrics_DiagnosticOddsRatio", (DL_FUNC) &_SLmetrics_DiagnosticOddsRatio, 2}, + {"_SLmetrics_weighted_DiagnosticOddsRatio", (DL_FUNC) &_SLmetrics_weighted_DiagnosticOddsRatio, 3}, + {"_SLmetrics_cmatrix_DiagnosticOddsRatio", (DL_FUNC) &_SLmetrics_cmatrix_DiagnosticOddsRatio, 1}, + {"_SLmetrics_FBetaScore", (DL_FUNC) &_SLmetrics_FBetaScore, 5}, + {"_SLmetrics_weighted_FBetaScore", (DL_FUNC) &_SLmetrics_weighted_FBetaScore, 6}, + {"_SLmetrics_cmatrix_FBetaScore", (DL_FUNC) &_SLmetrics_cmatrix_FBetaScore, 4}, + {"_SLmetrics_FalseDiscoveryRate", (DL_FUNC) &_SLmetrics_FalseDiscoveryRate, 4}, + {"_SLmetrics_weighted_FalseDiscoveryRate", (DL_FUNC) &_SLmetrics_weighted_FalseDiscoveryRate, 5}, + {"_SLmetrics_cmatrix_FalseDiscoveryRate", (DL_FUNC) &_SLmetrics_cmatrix_FalseDiscoveryRate, 3}, + {"_SLmetrics_FalseOmissionRate", (DL_FUNC) &_SLmetrics_FalseOmissionRate, 4}, + {"_SLmetrics_weighted_FalseOmissionRate", (DL_FUNC) &_SLmetrics_weighted_FalseOmissionRate, 5}, + {"_SLmetrics_cmatrix_FalseOmissionRate", (DL_FUNC) &_SLmetrics_cmatrix_FalseOmissionRate, 3}, + {"_SLmetrics_FalsePositiveRate", (DL_FUNC) &_SLmetrics_FalsePositiveRate, 4}, + {"_SLmetrics_weighted_FalsePositiveRate", (DL_FUNC) &_SLmetrics_weighted_FalsePositiveRate, 5}, + {"_SLmetrics_cmatrix_FalsePositiveRate", (DL_FUNC) &_SLmetrics_cmatrix_FalsePositiveRate, 3}, + {"_SLmetrics_Fallout", (DL_FUNC) &_SLmetrics_Fallout, 4}, + {"_SLmetrics_weighted_Fallout", (DL_FUNC) &_SLmetrics_weighted_Fallout, 5}, + {"_SLmetrics_cmatrix_Fallout", (DL_FUNC) &_SLmetrics_cmatrix_Fallout, 3}, + {"_SLmetrics_FowlkesMallowsIndex", (DL_FUNC) &_SLmetrics_FowlkesMallowsIndex, 2}, + {"_SLmetrics_cmatrix_FowlkesMallowsIndexClass", (DL_FUNC) &_SLmetrics_cmatrix_FowlkesMallowsIndexClass, 1}, + {"_SLmetrics_JaccardIndex", (DL_FUNC) &_SLmetrics_JaccardIndex, 4}, + {"_SLmetrics_weighted_JaccardIndex", (DL_FUNC) &_SLmetrics_weighted_JaccardIndex, 5}, + {"_SLmetrics_cmatrix_JaccardIndex", (DL_FUNC) &_SLmetrics_cmatrix_JaccardIndex, 3}, + {"_SLmetrics_CriticalSuccessIndex", (DL_FUNC) &_SLmetrics_CriticalSuccessIndex, 4}, + {"_SLmetrics_weighted_CriticalSuccessIndex", (DL_FUNC) &_SLmetrics_weighted_CriticalSuccessIndex, 5}, + {"_SLmetrics_cmatrix_CriticalSuccessIndex", (DL_FUNC) &_SLmetrics_cmatrix_CriticalSuccessIndex, 3}, + {"_SLmetrics_ThreatScore", (DL_FUNC) &_SLmetrics_ThreatScore, 4}, + {"_SLmetrics_weighted_ThreatScore", (DL_FUNC) &_SLmetrics_weighted_ThreatScore, 5}, + {"_SLmetrics_cmatrix_ThreatScore", (DL_FUNC) &_SLmetrics_cmatrix_ThreatScore, 3}, + {"_SLmetrics_MatthewsCorrelationCoefficient", (DL_FUNC) &_SLmetrics_MatthewsCorrelationCoefficient, 2}, + {"_SLmetrics_weigthed_MatthewsCorrelationCoefficient", (DL_FUNC) &_SLmetrics_weigthed_MatthewsCorrelationCoefficient, 3}, + {"_SLmetrics_cmatrix_MatthewsCorrelationCoefficient", (DL_FUNC) &_SLmetrics_cmatrix_MatthewsCorrelationCoefficient, 1}, + {"_SLmetrics_PhiCoefficient", (DL_FUNC) &_SLmetrics_PhiCoefficient, 2}, + {"_SLmetrics_weighted_PhiCoefficient", (DL_FUNC) &_SLmetrics_weighted_PhiCoefficient, 3}, + {"_SLmetrics_cmatrix_PhiCoefficient", (DL_FUNC) &_SLmetrics_cmatrix_PhiCoefficient, 1}, + {"_SLmetrics_NegativeLikelihoodRatio", (DL_FUNC) &_SLmetrics_NegativeLikelihoodRatio, 2}, + {"_SLmetrics_weighted_NegativeLikelihoodRatio", (DL_FUNC) &_SLmetrics_weighted_NegativeLikelihoodRatio, 3}, + {"_SLmetrics_cmatrix_NegativeLikelihoodRatio", (DL_FUNC) &_SLmetrics_cmatrix_NegativeLikelihoodRatio, 1}, + {"_SLmetrics_NegativePredictitveValue", (DL_FUNC) &_SLmetrics_NegativePredictitveValue, 4}, + {"_SLmetrics_weighted_NegativePredictitveValue", (DL_FUNC) &_SLmetrics_weighted_NegativePredictitveValue, 5}, + {"_SLmetrics_cmatrix_NegativePredictitveValue", (DL_FUNC) &_SLmetrics_cmatrix_NegativePredictitveValue, 3}, + {"_SLmetrics_PositiveLikelihoodRatio", (DL_FUNC) &_SLmetrics_PositiveLikelihoodRatio, 2}, + {"_SLmetrics_weighted_PositiveLikelihoodRatio", (DL_FUNC) &_SLmetrics_weighted_PositiveLikelihoodRatio, 3}, + {"_SLmetrics_cmatrix_PositiveLikelihoodRatio", (DL_FUNC) &_SLmetrics_cmatrix_PositiveLikelihoodRatio, 1}, + {"_SLmetrics_Precision", (DL_FUNC) &_SLmetrics_Precision, 4}, + {"_SLmetrics_weighted_Precision", (DL_FUNC) &_SLmetrics_weighted_Precision, 5}, + {"_SLmetrics_cmatrix_Precision", (DL_FUNC) &_SLmetrics_cmatrix_Precision, 3}, + {"_SLmetrics_PositivePredictiveValue", (DL_FUNC) &_SLmetrics_PositivePredictiveValue, 4}, + {"_SLmetrics_weighted_PositivePredictiveValue", (DL_FUNC) &_SLmetrics_weighted_PositivePredictiveValue, 5}, + {"_SLmetrics_cmatrix_PositivePredictiveValue", (DL_FUNC) &_SLmetrics_cmatrix_PositivePredictiveValue, 3}, + {"_SLmetrics_PrecisionRecallCurve", (DL_FUNC) &_SLmetrics_PrecisionRecallCurve, 3}, + {"_SLmetrics_weighted_PrecisionRecallCurve", (DL_FUNC) &_SLmetrics_weighted_PrecisionRecallCurve, 4}, + {"_SLmetrics_Recall", (DL_FUNC) &_SLmetrics_Recall, 4}, + {"_SLmetrics_weighted_Recall", (DL_FUNC) &_SLmetrics_weighted_Recall, 5}, + {"_SLmetrics_cmatrix_Recall", (DL_FUNC) &_SLmetrics_cmatrix_Recall, 3}, + {"_SLmetrics_Sensitivity", (DL_FUNC) &_SLmetrics_Sensitivity, 4}, + {"_SLmetrics_weighted_Sensitivity", (DL_FUNC) &_SLmetrics_weighted_Sensitivity, 5}, + {"_SLmetrics_cmatrix_Sensitivity", (DL_FUNC) &_SLmetrics_cmatrix_Sensitivity, 3}, + {"_SLmetrics_TruePositiveRate", (DL_FUNC) &_SLmetrics_TruePositiveRate, 4}, + {"_SLmetrics_weighted_TruePositiveRate", (DL_FUNC) &_SLmetrics_weighted_TruePositiveRate, 5}, + {"_SLmetrics_cmatrix_TruePositiveRate", (DL_FUNC) &_SLmetrics_cmatrix_TruePositiveRate, 3}, {"_SLmetrics_auc", (DL_FUNC) &_SLmetrics_auc, 3}, - {"_SLmetrics_ROC", (DL_FUNC) &_SLmetrics_ROC, 5}, - {"_SLmetrics_specificity", (DL_FUNC) &_SLmetrics_specificity, 4}, - {"_SLmetrics_weighted_specificity", (DL_FUNC) &_SLmetrics_weighted_specificity, 5}, - {"_SLmetrics_specificity_cmatrix", (DL_FUNC) &_SLmetrics_specificity_cmatrix, 3}, - {"_SLmetrics_tnr", (DL_FUNC) &_SLmetrics_tnr, 4}, - {"_SLmetrics_weighted_tnr", (DL_FUNC) &_SLmetrics_weighted_tnr, 5}, - {"_SLmetrics_tnr_cmatrix", (DL_FUNC) &_SLmetrics_tnr_cmatrix, 3}, - {"_SLmetrics_selectivity", (DL_FUNC) &_SLmetrics_selectivity, 4}, - {"_SLmetrics_weighted_selectivity", (DL_FUNC) &_SLmetrics_weighted_selectivity, 5}, - {"_SLmetrics_selectivity_cmatrix", (DL_FUNC) &_SLmetrics_selectivity_cmatrix, 3}, - {"_SLmetrics_zerooneloss", (DL_FUNC) &_SLmetrics_zerooneloss, 2}, - {"_SLmetrics_weighted_zerooneloss", (DL_FUNC) &_SLmetrics_weighted_zerooneloss, 3}, - {"_SLmetrics_zerooneloss_cmatrix", (DL_FUNC) &_SLmetrics_zerooneloss_cmatrix, 1}, + {"_SLmetrics_RecieverOperatorCharacteristics", (DL_FUNC) &_SLmetrics_RecieverOperatorCharacteristics, 3}, + {"_SLmetrics_weighted_RecieverOperatorCharacteristics", (DL_FUNC) &_SLmetrics_weighted_RecieverOperatorCharacteristics, 4}, + {"_SLmetrics_Specificity", (DL_FUNC) &_SLmetrics_Specificity, 4}, + {"_SLmetrics_weighted_Specificity", (DL_FUNC) &_SLmetrics_weighted_Specificity, 5}, + {"_SLmetrics_cmatrix_Specificity", (DL_FUNC) &_SLmetrics_cmatrix_Specificity, 3}, + {"_SLmetrics_TrueNegativeRate", (DL_FUNC) &_SLmetrics_TrueNegativeRate, 4}, + {"_SLmetrics_weighted_TrueNegativeRate", (DL_FUNC) &_SLmetrics_weighted_TrueNegativeRate, 5}, + {"_SLmetrics_cmatrix_TrueNegativeRate", (DL_FUNC) &_SLmetrics_cmatrix_TrueNegativeRate, 3}, + {"_SLmetrics_Selectivity", (DL_FUNC) &_SLmetrics_Selectivity, 4}, + {"_SLmetrics_weighted_Selectivity", (DL_FUNC) &_SLmetrics_weighted_Selectivity, 5}, + {"_SLmetrics_cmatrix_Selectivity", (DL_FUNC) &_SLmetrics_cmatrix_Selectivity, 3}, + {"_SLmetrics_ZeroOneLoss", (DL_FUNC) &_SLmetrics_ZeroOneLoss, 2}, + {"_SLmetrics_weighted_ZeroOneLoss", (DL_FUNC) &_SLmetrics_weighted_ZeroOneLoss, 3}, + {"_SLmetrics_cmatrix_ZeroOneLoss", (DL_FUNC) &_SLmetrics_cmatrix_ZeroOneLoss, 1}, {"_SLmetrics_rsq", (DL_FUNC) &_SLmetrics_rsq, 3}, {"_SLmetrics_weighted_rsq", (DL_FUNC) &_SLmetrics_weighted_rsq, 4}, {"_SLmetrics_ccc", (DL_FUNC) &_SLmetrics_ccc, 3}, @@ -1588,6 +1703,8 @@ static const R_CallMethodDef CallEntries[] = { {"_SLmetrics_weighted_pinball", (DL_FUNC) &_SLmetrics_weighted_pinball, 5}, {"_SLmetrics_rae", (DL_FUNC) &_SLmetrics_rae, 2}, {"_SLmetrics_weighted_rae", (DL_FUNC) &_SLmetrics_weighted_rae, 3}, + {"_SLmetrics_RelativeRootMeanSquaredError", (DL_FUNC) &_SLmetrics_RelativeRootMeanSquaredError, 3}, + {"_SLmetrics_weighted_RelativeRootMeanSquaredError", (DL_FUNC) &_SLmetrics_weighted_RelativeRootMeanSquaredError, 4}, {"_SLmetrics_rmse", (DL_FUNC) &_SLmetrics_rmse, 2}, {"_SLmetrics_weighted_rmse", (DL_FUNC) &_SLmetrics_weighted_rmse, 3}, {"_SLmetrics_rmsle", (DL_FUNC) &_SLmetrics_rmsle, 2}, diff --git a/src/classification_Accuracy.cpp b/src/classification_Accuracy.cpp index d5c921d5..1b25e697 100644 --- a/src/classification_Accuracy.cpp +++ b/src/classification_Accuracy.cpp @@ -8,25 +8,28 @@ using namespace Rcpp; //' @method accuracy factor //' @export // [[Rcpp::export(accuracy.factor)]] -NumericVector accuracy(const IntegerVector& actual, const IntegerVector& predicted) { - AccuracyMetric foo; // Instantiate AccuracyMetric - return classification_base(actual, predicted, foo); +Rcpp::NumericVector Accuracy(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted) +{ + AccuracyClass cook; + return recipe(cook, actual, predicted); } //' @rdname accuracy //' @method weighted.accuracy factor //' @export // [[Rcpp::export(weighted.accuracy.factor)]] -NumericVector weighted_accuracy(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w) { - AccuracyMetric foo; // Instantiate AccuracyMetric - return classification_base(actual, predicted, w, foo); +Rcpp::NumericVector weighted_Accuracy(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w) +{ + AccuracyClass cook; + return recipe(cook, actual, predicted, w); } //' @rdname accuracy //' @method accuracy cmatrix //' @export // [[Rcpp::export(accuracy.cmatrix)]] -NumericVector accuracy_cmatrix(const NumericMatrix& x) { - AccuracyMetric foo; // Instantiate AccuracyMetric - return classification_base(x, foo); +Rcpp::NumericVector cmatrix_Accuracy(const Rcpp::NumericMatrix& x) +{ + AccuracyClass cook; + return recipe(cook, x); } diff --git a/src/classification_Accuracy.h b/src/classification_Accuracy.h index 85299799..e689fec1 100644 --- a/src/classification_Accuracy.h +++ b/src/classification_Accuracy.h @@ -6,13 +6,9 @@ #define EIGEN_USE_MKL_ALL EIGEN_MAKE_ALIGNED_OPERATOR_NEW -/* - Simplified AccuracyMetric class: - Calculates accuracy as (tp + tn) / N. -*/ -class AccuracyMetric : public classification { +class AccuracyClass : public classification { public: - // Compute overall accuracy + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { // 0) set sizes @@ -34,4 +30,4 @@ class AccuracyMetric : public classification { } }; -#endif // CLASSIFICATION_ACCURACY_H +#endif diff --git a/src/classification_BalancedAccuracy.cpp b/src/classification_BalancedAccuracy.cpp index 2736abf8..cb23b672 100644 --- a/src/classification_BalancedAccuracy.cpp +++ b/src/classification_BalancedAccuracy.cpp @@ -8,25 +8,28 @@ using namespace Rcpp; //' @method baccuracy factor //' @export // [[Rcpp::export(baccuracy.factor)]] -NumericVector baccuracy(const IntegerVector& actual, const IntegerVector& predicted, const bool& adjust = false, bool na_rm = true) { - BalancedAccuracyMetric foo; // Instantiate BalancedAccuracyMetric - return classification_base(actual, predicted, foo, adjust, na_rm); +Rcpp::NumericVector BalancedAccuracy(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const bool& adjust = false, bool na_rm = true) +{ + BalancedAccuracyClass cook(adjust, na_rm); + return recipe(cook, actual, predicted); } //' @rdname baccuracy //' @method weighted.baccuracy factor //' @export // [[Rcpp::export(weighted.baccuracy.factor)]] -NumericVector weighted_baccuracy(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, const bool& adjust = false, bool na_rm = true) { - BalancedAccuracyMetric foo; // Instantiate BalancedAccuracyMetric - return classification_base(actual, predicted, w, foo, adjust, na_rm); +Rcpp::NumericVector weighted_BalancedAccuracy(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, const bool& adjust = false, bool na_rm = true) +{ + BalancedAccuracyClass cook(adjust, na_rm); + return recipe(cook, actual, predicted, w); } //' @rdname baccuracy //' @method baccuracy cmatrix //' @export // [[Rcpp::export(baccuracy.cmatrix)]] -NumericVector baccuracy_cmatrix(const NumericMatrix& x, const bool& adjust = false, bool na_rm = true) { - BalancedAccuracyMetric foo; // Instantiate BalancedAccuracyMetric - return classification_base(x, foo, adjust, na_rm); +Rcpp::NumericVector cmatrix_BalancedAccuracy(const NumericMatrix& x, const bool& adjust = false, bool na_rm = true) +{ + BalancedAccuracyClass cook(adjust, na_rm); + return recipe(cook, x); } diff --git a/src/classification_BalancedAccuracy.h b/src/classification_BalancedAccuracy.h index 7a0f8ad8..febc3272 100644 --- a/src/classification_BalancedAccuracy.h +++ b/src/classification_BalancedAccuracy.h @@ -7,16 +7,18 @@ #define EIGEN_USE_MKL_ALL EIGEN_MAKE_ALIGNED_OPERATOR_NEW -/* - BalancedAccuracyMetric class: - Calculates the average recall across classes, - with an optional adjustment for chance agreement. -*/ +class BalancedAccuracyClass : public classification { + +private: + bool adjust; + bool na_rm; -class BalancedAccuracyMetric : public classification { public: + // Constructor + BalancedAccuracyClass(bool adjust, bool na_rm) + : adjust(adjust), na_rm(na_rm) {} - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool adjust, bool na_rm) const override { + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { // 0) define values Eigen::ArrayXd output(1); Eigen::ArrayXd tp(matrix.rows()); @@ -59,4 +61,4 @@ class BalancedAccuracyMetric : public classification { }; -#endif // CLASSIFICATION_BALANCED_ACCURACY_H +#endif diff --git a/src/classification_CohensKappa.cpp b/src/classification_CohensKappa.cpp index ffb44cbd..c815ebd4 100644 --- a/src/classification_CohensKappa.cpp +++ b/src/classification_CohensKappa.cpp @@ -8,25 +8,28 @@ using namespace Rcpp; //' @method ckappa factor //' @export // [[Rcpp::export(ckappa.factor)]] -NumericVector ckappa(const IntegerVector& actual, const IntegerVector& predicted, const double& beta = 0.0) { - CohensKappaMetric foo; // Instantiate CohensKappaMetric - return classification_base(actual, predicted, foo, beta); +Rcpp::NumericVector CohensKappa(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const double& beta = 0.0) +{ + CohensKappaClass cook(beta); + return recipe(cook, actual, predicted); } //' @rdname ckappa //' @method weighted.ckappa factor //' @export // [[Rcpp::export(weighted.ckappa.factor)]] -NumericVector weighted_ckappa(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, const double& beta = 0.0) { - CohensKappaMetric foo; // Instantiate CohensKappaMetric - return classification_base(actual, predicted, w, foo, beta); +Rcpp::NumericVector weighted_CohensKappa(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, const double& beta = 0.0) +{ + CohensKappaClass cook(beta); + return recipe(cook, actual, predicted, w); } //' @rdname ckappa //' @method ckappa cmatrix //' @export // [[Rcpp::export(ckappa.cmatrix)]] -NumericVector ckappa_cmatrix(const NumericMatrix& x, const double& beta = 0.0) { - CohensKappaMetric foo; // Instantiate CohensKappaMetric - return classification_base(x, foo, beta); +Rcpp::NumericVector cmatrix_CohensKappa(const Rcpp::NumericMatrix& x, const double& beta = 0.0) +{ + CohensKappaClass cook(beta); + return recipe(cook, x); } diff --git a/src/classification_CohensKappa.h b/src/classification_CohensKappa.h index f5633cc7..d8cac7cc 100644 --- a/src/classification_CohensKappa.h +++ b/src/classification_CohensKappa.h @@ -6,72 +6,72 @@ #define EIGEN_USE_MKL_ALL EIGEN_MAKE_ALIGNED_OPERATOR_NEW -/* - * Calculation of Cohen's Kappa statistic - * This class provides methods to compute the Cohen's Kappa with penalization. - */ -class CohensKappaMetric : public classification { -public: - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, double beta) const override { - - // 0) initialize containers - Eigen::MatrixXd penalizing_matrix(matrix.cols(), matrix.cols()); - Eigen::VectorXd row_sum(matrix.cols()), col_sum(matrix.cols()); - - // 1) populate the - // matrix - penalizing_matrix = penalizingMatrix(matrix.cols(), beta); - - // 2) - double N = matrix.sum(); - double N_inv = 1.0 / N; - - // 3) - row_sum = matrix.rowwise().sum(); - col_sum = matrix.colwise().sum(); - - // 4) Calculate weighted disagreement (observed agreement with penalizing matrix) - double n_disagree = (matrix.cwiseProduct(penalizing_matrix)).sum(); - - // 5) Calculate expected agreement by chance (weighted) - double n_chance = (row_sum * col_sum.transpose() * N_inv).cwiseProduct(penalizing_matrix).sum(); - - // Step 5: Return penalized kappa statistic - double kappa = 1.0 - (n_disagree / n_chance); - - return Rcpp::wrap(kappa); - - } - -private: - - inline __attribute__((always_inline)) Eigen::MatrixXd penalizingMatrix(const int& n, const double& power) const { - - /* - Returns a diagonal matrix, with diag(0) - */ - - Eigen::MatrixXd matrix(n, n); - - double* mat_data = matrix.data(); - - for (int i = 0; i < n; ++i) { - for (int j = i; j < n; ++j) { - double value = std::pow(std::abs(j - i), power); - - double* upper_elem = mat_data + i * n + j; - double* lower_elem = mat_data + j * n + i; - double* diag_elem = mat_data + i * n + i; - - *upper_elem = value; - *lower_elem = value; - *diag_elem = 0.0; +class CohensKappaClass : public classification { + + private: + + double beta; + + inline __attribute__((always_inline)) Eigen::MatrixXd penalizingMatrix(const int& n, const double& power) const { + + Eigen::MatrixXd matrix(n, n); + + double* mat_data = matrix.data(); + + for (int i = 0; i < n; ++i) { + for (int j = i; j < n; ++j) { + double value = std::pow(std::abs(j - i), power); + + double* upper_elem = mat_data + i * n + j; + double* lower_elem = mat_data + j * n + i; + double* diag_elem = mat_data + i * n + i; + + *upper_elem = value; + *lower_elem = value; + *diag_elem = 0.0; + } } - } - return matrix; + return matrix; - } + } + + + public: + + CohensKappaClass(double beta) + : beta(beta) {} + + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { + + // 0) initialize containers + Eigen::MatrixXd penalizing_matrix(matrix.cols(), matrix.cols()); + Eigen::VectorXd row_sum(matrix.cols()), col_sum(matrix.cols()); + + // 1) populate the + // matrix + penalizing_matrix = penalizingMatrix(matrix.cols(), beta); + + // 2) + double N = matrix.sum(); + double N_inv = 1.0 / N; + + // 3) + row_sum = matrix.rowwise().sum(); + col_sum = matrix.colwise().sum(); + + // 4) Calculate weighted disagreement (observed agreement with penalizing matrix) + double n_disagree = (matrix.cwiseProduct(penalizing_matrix)).sum(); + + // 5) Calculate expected agreement by chance (weighted) + double n_chance = (row_sum * col_sum.transpose() * N_inv).cwiseProduct(penalizing_matrix).sum(); + + // Step 5: Return penalized kappa statistic + double kappa = 1.0 - (n_disagree / n_chance); + + return Rcpp::wrap(kappa); + + } }; diff --git a/src/classification_ConfusionMatrix.cpp b/src/classification_ConfusionMatrix.cpp index 3ee71ac2..918971c4 100644 --- a/src/classification_ConfusionMatrix.cpp +++ b/src/classification_ConfusionMatrix.cpp @@ -1,6 +1,6 @@ // [[Rcpp::depends(RcppEigen)]] #include -#include "classification_Helpers.h" +#include "classification_ConfusionMatrix.h" #include using namespace Rcpp; @@ -8,39 +8,18 @@ using namespace Rcpp; //' @method cmatrix factor //' @export // [[Rcpp::export(cmatrix.factor)]] -Rcpp::NumericMatrix cmatrix( - const Rcpp::IntegerVector& actual, - const Rcpp::IntegerVector& predicted, - const Rcpp::Nullable& w = R_NilValue) { - - // 1) declare the output matrix - // compiler doesn't recognize it in if-statements - Rcpp::NumericMatrix output; - - // 1.1) extract levels (classes) - // from the actual-vector and the length - Rcpp::CharacterVector levels = actual.attr("levels"); - int k = levels.length() + 1; - - // 2) Determine the weights - // these are what determines - // the content of the confusion matrix - if (w.isNull()) { - - output = Rcpp::wrap(confusionMatrix(actual, predicted, k)); - - } else { - - output = Rcpp::wrap(confusionMatrix(actual, predicted, k, w)); - - } - - // 3) preare the matrix output - // by adding column and rownames - // and classes - Rcpp::rownames(output) = levels; - Rcpp::colnames(output) = levels; - output.attr("class") = "cmatrix"; +Rcpp::NumericMatrix UnweightedConfusionMatrix(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted) +{ + ConfusionMatrixClass args(actual, predicted); + return args.constructMatrix(); +} - return output; +//' @rdname cmatrix +//' @method weighted.cmatrix factor +//' @export +// [[Rcpp::export(weighted.cmatrix.factor)]] +Rcpp::NumericMatrix WeightedConfusionMatrix(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w) +{ + ConfusionMatrixClass args(actual, predicted); + return args.constructMatrix(w); } diff --git a/src/classification_ConfusionMatrix.h b/src/classification_ConfusionMatrix.h new file mode 100644 index 00000000..dcda4f47 --- /dev/null +++ b/src/classification_ConfusionMatrix.h @@ -0,0 +1,111 @@ +#ifndef CLASSIFICATION_CONFUSION_MATRIX_H +#define CLASSIFICATION_CONFUSION_MATRIX_H + +#include +#include +#define EIGEN_USE_MKL_ALL +EIGEN_MAKE_ALIGNED_OPERATOR_NEW + +class ConfusionMatrixClass { + protected: + Rcpp::IntegerVector actual_; + Rcpp::IntegerVector predicted_; + Rcpp::CharacterVector levels_; + int k_; + + void prepareLevels() { + levels_ = actual_.attr("levels"); + k_ = levels_.length() + 1; + } + + Rcpp::NumericMatrix finalizeMatrix(const Eigen::MatrixXd& matrix) const { + Rcpp::NumericMatrix output = Rcpp::wrap(matrix); + Rcpp::rownames(output) = levels_; + Rcpp::colnames(output) = levels_; + output.attr("class") = "cmatrix"; + return output; + } + + + template + MatrixType computeMatrix() const { + MatrixType placeholder = MatrixType::Zero(k_, k_).eval(); + const int n = actual_.size(); + + const int* actual_ptr = actual_.begin(); + const int* predicted_ptr = predicted_.begin(); + auto matrix_ptr = placeholder.data(); + + int i = 0; + for (; i <= n - 6; i += 6) { + ++matrix_ptr[predicted_ptr[i] * k_ + actual_ptr[i]]; + ++matrix_ptr[predicted_ptr[i + 1] * k_ + actual_ptr[i + 1]]; + ++matrix_ptr[predicted_ptr[i + 2] * k_ + actual_ptr[i + 2]]; + ++matrix_ptr[predicted_ptr[i + 3] * k_ + actual_ptr[i + 3]]; + ++matrix_ptr[predicted_ptr[i + 4] * k_ + actual_ptr[i + 4]]; + ++matrix_ptr[predicted_ptr[i + 5] * k_ + actual_ptr[i + 5]]; + } + + for (; i < n; ++i) { + ++matrix_ptr[predicted_ptr[i] * k_ + actual_ptr[i]]; + } + + return placeholder.block(1, 1, k_ - 1, k_ - 1); + } + + template + MatrixType computeMatrix(const Rcpp::NumericVector& weights) const { + MatrixType placeholder = MatrixType::Zero(k_, k_).eval(); + const int n = actual_.size(); + + const int* actual_ptr = actual_.begin(); + const int* predicted_ptr = predicted_.begin(); + const double* weights_ptr = weights.begin(); + auto matrix_ptr = placeholder.data(); + + int i = 0; + for (; i <= n - 6; i += 6) { + matrix_ptr[predicted_ptr[i] * k_ + actual_ptr[i]] += weights_ptr[i]; + matrix_ptr[predicted_ptr[i + 1] * k_ + actual_ptr[i + 1]] += weights_ptr[i + 1]; + matrix_ptr[predicted_ptr[i + 2] * k_ + actual_ptr[i + 2]] += weights_ptr[i + 2]; + matrix_ptr[predicted_ptr[i + 3] * k_ + actual_ptr[i + 3]] += weights_ptr[i + 3]; + matrix_ptr[predicted_ptr[i + 4] * k_ + actual_ptr[i + 4]] += weights_ptr[i + 4]; + matrix_ptr[predicted_ptr[i + 5] * k_ + actual_ptr[i + 5]] += weights_ptr[i + 5]; + } + + for (; i < n; ++i) { + matrix_ptr[predicted_ptr[i] * k_ + actual_ptr[i]] += weights_ptr[i]; + } + + return placeholder.block(1, 1, k_ - 1, k_ - 1); + } + + public: + + ConfusionMatrixClass(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted) + : actual_(actual), predicted_(predicted) { + prepareLevels();} + + /* + InputMatrix: An Eigen::MatrixXd passed internally to other classification functions + constructMatrix: A Rcpp::NumericMatrix passed directly to R + */ + + // Unweighted Confusion Matrices + Eigen::MatrixXd InputMatrix() const { + return computeMatrix();} + + Rcpp::NumericMatrix constructMatrix() const { + Eigen::MatrixXd matrix = computeMatrix(); + return finalizeMatrix(matrix);} + + // Weighted Confusion Matrices + Eigen::MatrixXd InputMatrix(const Rcpp::NumericVector& weights) const { + return computeMatrix(weights);} + + Rcpp::NumericMatrix constructMatrix(const Rcpp::NumericVector& weights) const { + Eigen::MatrixXd matrix = computeMatrix(weights); + return finalizeMatrix(matrix);} +}; + +#endif \ No newline at end of file diff --git a/src/classification_CrossEntropyLoss.cpp b/src/classification_CrossEntropyLoss.cpp new file mode 100644 index 00000000..be8e9266 --- /dev/null +++ b/src/classification_CrossEntropyLoss.cpp @@ -0,0 +1,44 @@ +#include +#include "classification_CrossEntropyLoss.h" +using namespace Rcpp; + +//' @rdname entropy +//' @method entropy factor +//' @export +// [[Rcpp::export(entropy.factor)]] +double CrossEntropy(const IntegerVector& actual, const NumericMatrix& response, const bool normalize = true) +{ + CrossEntropyClass CrossEntropyMetric(normalize); + return CrossEntropyMetric.compute(actual, response); +} + +//' @rdname weighted.entropy +//' @method weighted.entropy factor +//' @export +// [[Rcpp::export(weighted.entropy.factor)]] +double weighted_CrossEntropy(const IntegerVector& actual, const NumericMatrix& response, const NumericVector& w, const bool normalize = true) +{ + CrossEntropyClass CrossEntropyMetric(normalize); + return CrossEntropyMetric.compute(actual, response, w); +} + +//' @rdname entropy +//' @method logloss factor +//' @export +// [[Rcpp::export(logloss.factor)]] +double LogLoss(const IntegerVector& actual, const NumericMatrix& response, const bool normalize = true) +{ + CrossEntropyClass CrossEntropyMetric(normalize); + return CrossEntropyMetric.compute(actual, response); +} + +//' @rdname weighted.entropy +//' @method weighted.logloss factor +//' @export +// [[Rcpp::export(weighted.logloss.factor)]] +double weighted_LogLoss(const IntegerVector& actual, const NumericMatrix& response, const NumericVector& w, const bool normalize = true) +{ + CrossEntropyClass CrossEntropyMetric(normalize); + return CrossEntropyMetric.compute(actual, response, w); +} + diff --git a/src/classification_CrossEntropyLoss.h b/src/classification_CrossEntropyLoss.h new file mode 100644 index 00000000..de936586 --- /dev/null +++ b/src/classification_CrossEntropyLoss.h @@ -0,0 +1,74 @@ +#ifndef CLASSIFICATION_CROSS_ENTROPY_LOSS_H +#define CLASSIFICATION_CROSS_ENTROPY_LOSS_H + +#include +#include + +/* + NOTE: The positive class is irrelevant + as long as the probility matrix is correctly specified. +*/ + +class CrossEntropyClass { + private: + bool normalize_; + + public: + CrossEntropyClass(bool normalize) : normalize_(normalize) {} + + + double compute(const Rcpp::IntegerVector &actual, const Rcpp::NumericMatrix &response) const { + const int n = actual.size(); + double loss = 0.0; + + + const int *actual_ptr = &actual[0]; + const double *response_ptr = &response(0, 0); + const int nrows = response.nrow(); + + for (int i = 0; i < n; ++i) { + + const int c = actual_ptr[i] - 1; + const double p = response_ptr[i + c * nrows]; + loss -= std::log(p); + + } + + // If requested, average the loss + if (normalize_) { + loss /= n; + } + + return loss; + } + + // Weighted cross-entropy + double compute(const Rcpp::IntegerVector &actual, const Rcpp::NumericMatrix &response, const Rcpp::NumericVector &w) const { + const int n = actual.size(); + double loss = 0.0; + double wsum = 0.0; + + const int *actual_ptr = &actual[0]; + const double *response_ptr = &response(0, 0); + const double *w_ptr = &w[0]; + + const int nrows = response.nrow(); + + for (int i = 0; i < n; ++i) { + const int c = actual_ptr[i] - 1; + const double p = response_ptr[i + c * nrows]; + const double weight = w_ptr[i]; + + wsum += weight; + loss -= weight * std::log(p); + } + + if (normalize_) { + loss /= wsum; + } + + return loss; + } +}; + +#endif diff --git a/src/classification_DiagnosticOddsRatio.cpp b/src/classification_DiagnosticOddsRatio.cpp index 86c411f1..2dd007ef 100644 --- a/src/classification_DiagnosticOddsRatio.cpp +++ b/src/classification_DiagnosticOddsRatio.cpp @@ -1,6 +1,6 @@ // [[Rcpp::depends(RcppEigen)]] #include -#include "classification_DiagnosticOddsRatio.h" // DORMetric definition +#include "classification_DiagnosticOddsRatio.h" // DiagnosticOddsRatioClass definition using namespace Rcpp; @@ -8,25 +8,28 @@ using namespace Rcpp; //' @method dor factor //' @export // [[Rcpp::export(dor.factor)]] -NumericVector dor(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue) { - DORMetric foo; // Instantiate DORMetric - return classification_base(actual, predicted, foo, micro); +Rcpp::NumericVector DiagnosticOddsRatio(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted) +{ + DiagnosticOddsRatioClass cook; + return recipe(cook, actual, predicted); } //' @rdname dor //' @method weighted.dor factor //' @export // [[Rcpp::export(weighted.dor.factor)]] -NumericVector weighted_dor(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro = R_NilValue) { - DORMetric foo; // Instantiate DORMetric - return classification_base(actual, predicted, w, foo, micro); +Rcpp::NumericVector weighted_DiagnosticOddsRatio(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w) +{ + DiagnosticOddsRatioClass cook; + return recipe(cook, actual, predicted, w); } //' @rdname dor //' @method dor cmatrix //' @export // [[Rcpp::export(dor.cmatrix)]] -NumericVector dor_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue) { - DORMetric foo; // Instantiate DORMetric - return classification_base(x, foo, micro); +Rcpp::NumericVector cmatrix_DiagnosticOddsRatio(const Rcpp::NumericMatrix& x) +{ + DiagnosticOddsRatioClass cook; + return recipe(cook, x); } diff --git a/src/classification_DiagnosticOddsRatio.h b/src/classification_DiagnosticOddsRatio.h index 328d09bb..b124ad61 100644 --- a/src/classification_DiagnosticOddsRatio.h +++ b/src/classification_DiagnosticOddsRatio.h @@ -7,22 +7,22 @@ #define EIGEN_USE_MKL_ALL EIGEN_MAKE_ALIGNED_OPERATOR_NEW -class DORMetric : public classification { -public: +class DiagnosticOddsRatioClass : public classification { - // Compute DOR without micro aggregation - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { - Eigen::ArrayXd output(matrix.rows()); - Eigen::ArrayXd tp(matrix.rows()), fn(matrix.rows()), tn(matrix.rows()), fp(matrix.rows()); + public: - TP(matrix, tp); - FN(matrix, fn); - TN(matrix, tn); - FP(matrix, fp); + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { + Eigen::ArrayXd output(matrix.rows()); + Eigen::ArrayXd tp(matrix.rows()), fn(matrix.rows()), tn(matrix.rows()), fp(matrix.rows()); - output = (tp * tn) / (fp * fn); - return Rcpp::wrap(output); - } + TP(matrix, tp); + FN(matrix, fn); + TN(matrix, tn); + FP(matrix, fp); + + output = (tp * tn) / (fp * fn); + return Rcpp::wrap(output); + } }; #endif // CLASSIFICATION_DOR_H \ No newline at end of file diff --git a/src/classification_FBetaScore.cpp b/src/classification_FBetaScore.cpp index b0fcf630..29a0012c 100644 --- a/src/classification_FBetaScore.cpp +++ b/src/classification_FBetaScore.cpp @@ -8,25 +8,28 @@ using namespace Rcpp; //' @method fbeta factor //' @export // [[Rcpp::export(fbeta.factor)]] -NumericVector fbeta(const IntegerVector& actual, const IntegerVector& predicted, const double& beta = 1.0, Nullable micro = R_NilValue, bool na_rm = true) { - FBetaMetric foo; // Instantiate F-Beta metric with the provided beta value - return classification_base(actual, predicted, foo, micro, na_rm, beta); +Rcpp::NumericVector FBetaScore(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const double& beta = 1.0, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) +{ + FBetaScoreClass cook(beta, na_rm); // Instantiate F-Beta metric with the provided beta value + return recipe(cook, actual, predicted, std::nullopt, micro); } //' @rdname fbeta //' @method weighted.fbeta factor //' @export // [[Rcpp::export(weighted.fbeta.factor)]] -NumericVector weighted_fbeta(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, const double& beta = 1.0, Nullable micro = R_NilValue, bool na_rm = true) { - FBetaMetric foo; // Instantiate F-Beta metric with the provided beta value - return classification_base(actual, predicted, w, foo, micro, na_rm, beta); +Rcpp::NumericVector weighted_FBetaScore(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, const double& beta = 1.0, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) +{ + FBetaScoreClass cook(beta, na_rm); // Instantiate F-Beta metric with the provided beta value + return recipe(cook, actual, predicted, w, micro); } //' @rdname fbeta //' @method fbeta cmatrix //' @export // [[Rcpp::export(fbeta.cmatrix)]] -NumericVector fbeta_cmatrix(const NumericMatrix& x, const double& beta = 1.0, Nullable micro = R_NilValue, bool na_rm = true) { - FBetaMetric foo; // Instantiate F-Beta metric with the provided beta value - return classification_base(x, foo, micro, na_rm, beta); +Rcpp::NumericVector cmatrix_FBetaScore(const Rcpp::NumericMatrix& x, const double& beta = 1.0, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) +{ + FBetaScoreClass cook(beta, na_rm); // Instantiate F-Beta metric with the provided beta value + return recipe(cook, x, micro); } diff --git a/src/classification_FBetaScore.h b/src/classification_FBetaScore.h index da0502dd..695794a3 100644 --- a/src/classification_FBetaScore.h +++ b/src/classification_FBetaScore.h @@ -7,57 +7,65 @@ #define EIGEN_USE_MKL_ALL EIGEN_MAKE_ALIGNED_OPERATOR_NEW -class FBetaMetric : public classification { -public: +class FBetaScoreClass : public classification { - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro, bool na_rm, double beta) const override { - - // 0) Declare variables and size - // for efficiency. - // NOTE: Micro and macro already wraps and exports as Rcpp - Rcpp::NumericVector output(1); - Eigen::ArrayXd tp(matrix.rows()), fp(matrix.rows()), fn(matrix.rows()); - double beta_sq = beta * beta; + private: + double beta; + bool na_rm; - TP(matrix, tp); - FP(matrix, fp); - FN(matrix, fn); + public: - // 1) define recall - // and recall - Eigen::ArrayXd precision = do_micro - ? micro(tp, (tp + fp), na_rm) - : macro(tp, (tp + fp), na_rm); + FBetaScoreClass(double beta, bool na_rm) + : beta(beta), na_rm(na_rm) {} - Eigen::ArrayXd recall = do_micro - ? micro(tp, (tp + fn), na_rm) - : macro(tp, (tp + fn), na_rm); + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro) const override { + + // 0) Declare variables and size + // for efficiency. + // NOTE: Micro and macro already wraps and exports as Rcpp + Rcpp::NumericVector output(1); + Eigen::ArrayXd tp(matrix.rows()), fp(matrix.rows()), fn(matrix.rows()); + double beta_sq = beta * beta; + TP(matrix, tp); + FP(matrix, fp); + FN(matrix, fn); - // 2) retun with - // ternary expression - return do_micro - ? micro((1+beta_sq) * tp, (1+beta_sq) * tp + beta_sq * fn + fp, na_rm) - : macro((1+beta_sq) * tp, (1+beta_sq) * tp + beta_sq * fn + fp, na_rm); + // 1) define recall + // and recall + Eigen::ArrayXd precision = do_micro + ? micro(tp, (tp + fp), na_rm) + : macro(tp, (tp + fp), na_rm); + Eigen::ArrayXd recall = do_micro + ? micro(tp, (tp + fn), na_rm) + : macro(tp, (tp + fn), na_rm); - } - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool na_rm, double beta) const override { - Eigen::ArrayXd output(matrix.rows()); - Eigen::ArrayXd tp(matrix.rows()), fp(matrix.rows()), fn(matrix.rows()); - double beta_sq = beta * beta; + // 2) retun with + // ternary expression + return do_micro + ? micro((1.0 + beta_sq) * tp, (1.0 + beta_sq) * tp + beta_sq * fn + fp, na_rm) + : macro((1.0 + beta_sq) * tp, (1.0 + beta_sq) * tp + beta_sq * fn + fp, na_rm); - TP(matrix, tp); - FP(matrix, fp); - FN(matrix, fn); - Eigen::ArrayXd precision = tp / (tp + fp); - Eigen::ArrayXd recall = tp / (tp + fn); + } - output = (1 + beta_sq) * (precision * recall) / (beta_sq * precision + recall); - return Rcpp::wrap(output); - } + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { + Eigen::ArrayXd output(matrix.rows()); + Eigen::ArrayXd tp(matrix.rows()), fp(matrix.rows()), fn(matrix.rows()); + double beta_sq = beta * beta; + + TP(matrix, tp); + FP(matrix, fp); + FN(matrix, fn); + + Eigen::ArrayXd precision = tp / (tp + fp); + Eigen::ArrayXd recall = tp / (tp + fn); + + output = (1.0 + beta_sq) * (precision * recall) / (beta_sq * precision + recall); + return Rcpp::wrap(output); + } }; -#endif // CLASSIFICATION_FBETASCORE_H +#endif diff --git a/src/classification_FalseDiscoveryRate.cpp b/src/classification_FalseDiscoveryRate.cpp index 5bca29cd..4cd9ac81 100644 --- a/src/classification_FalseDiscoveryRate.cpp +++ b/src/classification_FalseDiscoveryRate.cpp @@ -8,25 +8,28 @@ using namespace Rcpp; //' @method fdr factor //' @export // [[Rcpp::export(fdr.factor)]] -NumericVector fdr(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue, const bool& na_rm = true) { - FalseDiscoveryRateMetric foo; // Instantiate FDR metric - return classification_base(actual, predicted, foo, micro, na_rm); +Rcpp::NumericVector FalseDiscoveryRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + FalseDiscoveryRateClass cook(na_rm); + return recipe(cook, actual, predicted, std::nullopt, micro); } //' @rdname fdr //' @method weighted.fdr factor //' @export // [[Rcpp::export(weighted.fdr.factor)]] -NumericVector weighted_fdr(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro = R_NilValue, const bool& na_rm = true) { - FalseDiscoveryRateMetric foo; // Instantiate FDR metric - return classification_base(actual, predicted, w, foo, micro, na_rm); +Rcpp::NumericVector weighted_FalseDiscoveryRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + FalseDiscoveryRateClass cook(na_rm); + return recipe(cook, actual, predicted, w, micro); } //' @rdname fdr //' @method fdr cmatrix //' @export // [[Rcpp::export(fdr.cmatrix)]] -NumericVector fdr_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) { - FalseDiscoveryRateMetric foo; // Instantiate FDR metric - return classification_base(x, foo, micro, na_rm); +Rcpp::NumericVector cmatrix_FalseDiscoveryRate(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + FalseDiscoveryRateClass cook(na_rm); + return recipe(cook, x, micro); } diff --git a/src/classification_FalseDiscoveryRate.h b/src/classification_FalseDiscoveryRate.h index 540960cc..b095a08a 100644 --- a/src/classification_FalseDiscoveryRate.h +++ b/src/classification_FalseDiscoveryRate.h @@ -7,36 +7,43 @@ #define EIGEN_USE_MKL_ALL EIGEN_MAKE_ALIGNED_OPERATOR_NEW -class FalseDiscoveryRateMetric : public classification { -public: +class FalseDiscoveryRateClass : public classification { - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro, bool na_rm) const override { + private: + bool na_rm; - // 0) Declare variables and size - // for efficiency. - // NOTE: Micro and macro already wraps and exports as Rcpp - Rcpp::NumericVector output(1); - Eigen::ArrayXd fp(matrix.rows()), tp(matrix.rows()); + public: - FP(matrix, fp); - TP(matrix, tp); + FalseDiscoveryRateClass(bool na_rm) + : na_rm(na_rm) {} - return do_micro - ? micro(fp, fp + tp, na_rm) - : macro(fp, fp + tp, na_rm); + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro) const override { - } + // 0) Declare variables and size + // for efficiency. + // NOTE: Micro and macro already wraps and exports as Rcpp + Rcpp::NumericVector output(1); + Eigen::ArrayXd fp(matrix.rows()), tp(matrix.rows()); - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool na_rm) const override { - Eigen::ArrayXd output(matrix.rows()); - Eigen::ArrayXd fp(matrix.rows()), tp(matrix.rows()); + FP(matrix, fp); + TP(matrix, tp); - FP(matrix, fp); - TP(matrix, tp); + return do_micro + ? micro(fp, fp + tp, na_rm) + : macro(fp, fp + tp, na_rm); - output = fp / (fp + tp); - return Rcpp::wrap(output); - } + } + + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { + Eigen::ArrayXd output(matrix.rows()); + Eigen::ArrayXd fp(matrix.rows()), tp(matrix.rows()); + + FP(matrix, fp); + TP(matrix, tp); + + output = fp / (fp + tp); + return Rcpp::wrap(output); + } }; #endif // CLASSIFICATION_FDR_H diff --git a/src/classification_FalseOmissionRate.cpp b/src/classification_FalseOmissionRate.cpp index dea13128..6aa07de0 100644 --- a/src/classification_FalseOmissionRate.cpp +++ b/src/classification_FalseOmissionRate.cpp @@ -8,25 +8,28 @@ using namespace Rcpp; //' @method fer factor //' @export // [[Rcpp::export(fer.factor)]] -NumericVector fer(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue, const bool& na_rm = true) { - FalseOmissionRateMetric foo; // Instantiate FOR metric - return classification_base(actual, predicted, foo, micro, na_rm); +Rcpp::NumericVector FalseOmissionRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + FalseOmissionRateClass cook(na_rm); + return recipe(cook, actual, predicted, std::nullopt, micro); } //' @rdname fer //' @method weighted.fer factor //' @export // [[Rcpp::export(weighted.fer.factor)]] -NumericVector weighted_fer(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro = R_NilValue, const bool& na_rm = true) { - FalseOmissionRateMetric foo; // Instantiate FOR metric - return classification_base(actual, predicted, w, foo, micro, na_rm); +Rcpp::NumericVector weighted_FalseOmissionRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + FalseOmissionRateClass cook(na_rm); + return recipe(cook, actual, predicted, w, micro); } //' @rdname fer //' @method fer cmatrix //' @export // [[Rcpp::export(fer.cmatrix)]] -NumericVector fer_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) { - FalseOmissionRateMetric foo; // Instantiate FOR metric - return classification_base(x, foo, micro, na_rm); +Rcpp::NumericVector cmatrix_FalseOmissionRate(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + FalseOmissionRateClass cook(na_rm); + return recipe(cook, x, micro); } diff --git a/src/classification_FalseOmissionRate.h b/src/classification_FalseOmissionRate.h index 20536287..7448ab0f 100644 --- a/src/classification_FalseOmissionRate.h +++ b/src/classification_FalseOmissionRate.h @@ -13,43 +13,48 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW the confusion matrix. So there is no need to add an overloaded function for the weighted metrics. */ -class FalseOmissionRateMetric : public classification { -public: - - // Compute FOR with micro or macro aggregation - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro, bool na_rm) const override { - // 0) Declare variables and size - // for efficiency. - // NOTE: Micro and macro already wraps and exports as Rcpp - Rcpp::NumericVector output(1); - Eigen::ArrayXd fn(matrix.rows()), tn(matrix.rows()); - - // Create FN and TN arrays for calculations - FN(matrix, fn); - TN(matrix, tn); - - return do_micro - ? micro(fn, fn + tn, na_rm) - : macro(fn, fn + tn, na_rm); - - } - - // Compute FOR without micro aggregation - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool na_rm) const override { - // Declare the output value and FN/TN arrays - Eigen::ArrayXd output(matrix.rows()); - Eigen::ArrayXd fn(matrix.rows()), tn(matrix.rows()); - - // Create FN and TN arrays for calculations - FN(matrix, fn); - TN(matrix, tn); - - // Calculate metric - output = fn / (fn + tn); - - // Return with R-compatible class - return Rcpp::wrap(output); - } +class FalseOmissionRateClass : public classification { + + private: + bool na_rm; + + public: + + FalseOmissionRateClass(bool na_rm) + : na_rm(na_rm) {} + + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro) const override { + // 0) Declare variables and size + // for efficiency. + // NOTE: Micro and macro already wraps and exports as Rcpp + Rcpp::NumericVector output(1); + Eigen::ArrayXd fn(matrix.rows()), tn(matrix.rows()); + + // Create FN and TN arrays for calculations + FN(matrix, fn); + TN(matrix, tn); + + return do_micro + ? micro(fn, fn + tn, na_rm) + : macro(fn, fn + tn, na_rm); + + } + + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { + // Declare the output value and FN/TN arrays + Eigen::ArrayXd output(matrix.rows()); + Eigen::ArrayXd fn(matrix.rows()), tn(matrix.rows()); + + // Create FN and TN arrays for calculations + FN(matrix, fn); + TN(matrix, tn); + + // Calculate metric + output = fn / (fn + tn); + + // Return with R-compatible class + return Rcpp::wrap(output); + } }; -#endif // CLASSIFICATION_FALSE_OMISSION_RATE_H +#endif diff --git a/src/classification_FalsePositiveRate.cpp b/src/classification_FalsePositiveRate.cpp index b1494ebe..23967921 100644 --- a/src/classification_FalsePositiveRate.cpp +++ b/src/classification_FalsePositiveRate.cpp @@ -8,52 +8,58 @@ using namespace Rcpp; //' @method fpr factor //' @export // [[Rcpp::export(fpr.factor)]] -NumericVector fpr(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue, const bool& na_rm = true) { - FalsePositiveRateMetric foo; // Instantiate FPR metric - return classification_base(actual, predicted, foo, micro, na_rm); +Rcpp::NumericVector FalsePositiveRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + FalsePositiveRateClass cook(na_rm); + return recipe(cook, actual, predicted, std::nullopt, micro); } //' @rdname fpr //' @method weighted.fpr factor //' @export // [[Rcpp::export(weighted.fpr.factor)]] -NumericVector weighted_fpr(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro = R_NilValue, const bool& na_rm = true) { - FalsePositiveRateMetric foo; // Instantiate FPR metric - return classification_base(actual, predicted, w, foo, micro, na_rm); +Rcpp::NumericVector weighted_FalsePositiveRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + FalsePositiveRateClass cook(na_rm); + return recipe(cook, actual, predicted, w, micro); } //' @rdname fpr //' @method fpr cmatrix //' @export // [[Rcpp::export(fpr.cmatrix)]] -NumericVector fpr_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) { - FalsePositiveRateMetric foo; // Instantiate FPR metric - return classification_base(x, foo, micro, na_rm); +Rcpp::NumericVector cmatrix_FalsePositiveRate(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + FalsePositiveRateClass cook(na_rm); + return recipe(cook, x, micro); } //' @rdname fpr //' @method fallout factor //' @export // [[Rcpp::export(fallout.factor)]] -NumericVector fallout(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue, const bool& na_rm = true) { - FalsePositiveRateMetric foo; // Instantiate FPR metric - return classification_base(actual, predicted, foo, micro, na_rm); +Rcpp::NumericVector Fallout(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + FalsePositiveRateClass cook(na_rm); + return recipe(cook, actual, predicted, std::nullopt, micro); } //' @rdname fpr //' @method weighted.fallout factor //' @export // [[Rcpp::export(weighted.fallout.factor)]] -NumericVector weighted_fallout(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro = R_NilValue, const bool& na_rm = true) { - FalsePositiveRateMetric foo; // Instantiate FPR metric - return classification_base(actual, predicted, w, foo, micro, na_rm); +Rcpp::NumericVector weighted_Fallout(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + FalsePositiveRateClass cook(na_rm); + return recipe(cook, actual, predicted, w, micro); } //' @rdname fpr //' @method fallout cmatrix //' @export // [[Rcpp::export(fallout.cmatrix)]] -NumericVector fallout_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) { - FalsePositiveRateMetric foo; // Instantiate FPR metric - return classification_base(x, foo, micro, na_rm); +Rcpp::NumericVector cmatrix_Fallout(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + FalsePositiveRateClass cook(na_rm); + return recipe(cook, x, micro); } diff --git a/src/classification_FalsePositiveRate.h b/src/classification_FalsePositiveRate.h index a90c200d..e7470bf0 100644 --- a/src/classification_FalsePositiveRate.h +++ b/src/classification_FalsePositiveRate.h @@ -7,38 +7,42 @@ #define EIGEN_USE_MKL_ALL EIGEN_MAKE_ALIGNED_OPERATOR_NEW -class FalsePositiveRateMetric : public classification { -public: - - // Compute FPR with micro or macro aggregation - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro, bool na_rm) const override { - - // 0) Declare variables and size - // for efficiency. - // NOTE: Micro and macro already wraps and exports as Rcpp - Rcpp::NumericVector output(1); - Eigen::ArrayXd fp(matrix.rows()), tn(matrix.rows()), auxillary(matrix.rows()); - - FP(matrix, fp); - TN(matrix, tn); - - return do_micro - ? micro(fp, fp + tn, na_rm) - : macro(fp, fp + tn, na_rm); - - } - - // Compute FPR without micro aggregation - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool na_rm) const override { - Eigen::ArrayXd output(matrix.rows()); - Eigen::ArrayXd fp(matrix.rows()), tn(matrix.rows()); - - FP(matrix, fp); - TN(matrix, tn); - - output = fp / (fp + tn); - return Rcpp::wrap(output); - } +class FalsePositiveRateClass : public classification { + + private: + bool na_rm; + + public: + FalsePositiveRateClass(bool na_rm) + : na_rm(na_rm) {} + + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro) const override { + + // 0) Declare variables and size + // for efficiency. + // NOTE: Micro and macro already wraps and exports as Rcpp + Rcpp::NumericVector output(1); + Eigen::ArrayXd fp(matrix.rows()), tn(matrix.rows()), auxillary(matrix.rows()); + + FP(matrix, fp); + TN(matrix, tn); + + return do_micro + ? micro(fp, fp + tn, na_rm) + : macro(fp, fp + tn, na_rm); + + } + + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { + Eigen::ArrayXd output(matrix.rows()); + Eigen::ArrayXd fp(matrix.rows()), tn(matrix.rows()); + + FP(matrix, fp); + TN(matrix, tn); + + output = fp / (fp + tn); + return Rcpp::wrap(output); + } }; -#endif // CLASSIFICATION_FPR_H +#endif diff --git a/src/classification_FowlkesMallowsIndex.cpp b/src/classification_FowlkesMallowsIndex.cpp index 06d80b88..23daf61e 100644 --- a/src/classification_FowlkesMallowsIndex.cpp +++ b/src/classification_FowlkesMallowsIndex.cpp @@ -8,20 +8,20 @@ using namespace Rcpp; //' @method fmi factor //' @export // [[Rcpp::export(fmi.factor)]] -Rcpp::NumericVector fmi(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted) +Rcpp::NumericVector FowlkesMallowsIndex(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted) { - FMIMetric foo; // Instantiate AccuracyMetric - return classification_base(actual, predicted, foo); + FowlkesMallowsIndexClass cook; + return recipe(cook, actual, predicted); } //' @rdname fmi //' @method fmi cmatrix //' @export // [[Rcpp::export(fmi.cmatrix)]] -Rcpp::NumericVector fmi_cmatrix(const NumericMatrix& x) +Rcpp::NumericVector cmatrix_FowlkesMallowsIndexClass(const Rcpp::NumericMatrix& x) { - FMIMetric foo; // Instantiate AccuracyMetric - return classification_base(x, foo); + FowlkesMallowsIndexClass cook; + return recipe(cook, x); } diff --git a/src/classification_FowlkesMallowsIndex.h b/src/classification_FowlkesMallowsIndex.h index 99e4fed8..ff036950 100644 --- a/src/classification_FowlkesMallowsIndex.h +++ b/src/classification_FowlkesMallowsIndex.h @@ -16,33 +16,33 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW * a different result than scikit-learn.Hence this approach * is prefferred */ -class FMIMetric : public classification { -public: +class FowlkesMallowsIndexClass : public classification { - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { + public: - // 0) set sizes - // of arrays - Eigen::ArrayXd output(1), N(1), pk(1), qk(1), tk(1); - Eigen::VectorXd col_sum(matrix.rows()), row_sum(matrix.rows()); + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { - // 1) calculate values - // accordingly - N = matrix.sum(); - row_sum = matrix.rowwise().sum(); - col_sum = matrix.colwise().sum(); - tk = matrix.cwiseProduct(matrix).sum() - N; - pk = col_sum.squaredNorm() - N; - qk = row_sum.squaredNorm() - N; + // 0) set sizes + // of arrays + Eigen::ArrayXd output(1), N(1), pk(1), qk(1), tk(1); + Eigen::VectorXd col_sum(matrix.rows()), row_sum(matrix.rows()); - // 2) calculate output - // value - output = (tk / pk) * (tk / qk); + // 1) calculate values + // accordingly + N = matrix.sum(); + row_sum = matrix.rowwise().sum(); + col_sum = matrix.colwise().sum(); + tk = matrix.cwiseProduct(matrix).sum() - N; + pk = col_sum.squaredNorm() - N; + qk = row_sum.squaredNorm() - N; - return Rcpp::wrap(output.array().sqrt()); + // 2) calculate output + // value + output = (tk / pk) * (tk / qk); - } + return Rcpp::wrap(output.array().sqrt()); + } }; diff --git a/src/classification_Helpers.h b/src/classification_Helpers.h index 471c9672..a9427b55 100644 --- a/src/classification_Helpers.h +++ b/src/classification_Helpers.h @@ -1,16 +1,58 @@ +#ifndef CLASSIFICATION_HELPERS_H +#define CLASSIFICATION_HELPERS_H + #include #include -#include "classification_Utils.h" #include #include +#include #include #include #include +#include "classification_ConfusionMatrix.h" + #define EIGEN_USE_MKL_ALL EIGEN_MAKE_ALIGNED_OPERATOR_NEW using namespace Rcpp; +class classification { + public: + + /* + Note to future self: + + 1.) These are just signatures. So in essence it doesn't matter + what you call them. The imporant thing is that they are distinguishable + + 2.) All functions have the same signature + 2.1) A Matrix (passed via helpers) + 2.2) Booleans to determine behaviour inside + the respective functions. For example: + + boolean 1: Controls missing values + + boolean 2: Controls wether micro/macro values are + to be rerrturned + + boolean k: Other behaviour that I can't think of as of now. + + It seems somewhat redundant and excessive to do it like this, but until a better + solution is found, this is what we do. + + Warning: ALL signatures has to be used (I think) + */ + + virtual Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const { + return Rcpp::NumericVector(); + }; + + virtual Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool na_rm) const { + return Rcpp::NumericVector(); + }; + + virtual ~classification() = default; +}; + + + /* Micro and Macro averages with missing value handling. If na_rm is TRUE it divides with the available number @@ -86,37 +128,6 @@ inline __attribute__((always_inline)) Rcpp::NumericVector macro -inline __attribute__((always_inline)) EigenType micro( - const EigenType& numerator, - const EigenType& denominator, - bool na_rm) -{ - double result = numerator.sum() / denominator.sum(); - return EigenType::Constant(1, result); // Use EigenType to determine both argument and return type -} - -template -inline __attribute__((always_inline)) EigenType macro( - const EigenType& numerator, - const EigenType& denominator, - bool na_rm) -{ - EigenType z = numerator / denominator; - double result = na_rm - ? z.isNaN().select(0, z).sum() / (z.isNaN() == false).count() - : z.isNaN().select(0, z).sum() / z.size(); - - return EigenType::Constant(1, result); // Use EigenType to determine both argument and return type -} -*/ - - - - - /* Calculating TP, FP, TN and FN from matrices. @@ -152,262 +163,75 @@ inline __attribute__((always_inline)) void FN(const MatrixType& matrix, Eigen::A } -/* -Confusion Matrix: - - ***ARGS*** - `actual`: IntegerVector - `predicted`: IntegerVector - `k`: int (NOTE: has to be passed as k + 1) - `weights`: Nullable NumericVector - - 1. This template returns a Eigen::MatrixX in weighted or unweighted form - depending on the argument `weight` - - The tests shows that for 1e7 observations it is still faster than the original implementation - up to v0.1-1; it runs 5.77 ms on average, while the original are 5.89 ms on average. This might be a chance - finding, but it seems they are equivalent in terms of speed, efficiency and memory handling. - - For lower values this function is not faster. In fact its 8 times slower than the original implementation - this is due to the overhead cost of the if-statements in relation to weighted - and unweighted version. - - It does not handle missing values, and will not handle missing values as this is inefficient. More on this - will come later. - -*/ -template -inline __attribute__((always_inline)) MatrixType confusionMatrix( - const Rcpp::IntegerVector& actual, - const Rcpp::IntegerVector& predicted, - const int& k, - const Rcpp::Nullable& weights = R_NilValue) { - - // 1) general setup of the function - // 1.1) initialize a k x k placeholder matrix - MatrixType placeholder = MatrixType::Zero(k, k).eval(); - - // 1.2) determine the size of - // the actual vector - used for the loop - const int n = actual.size(); - - // 1.3) initialize the pointers - // for efficient loops - const int* actual_ptr = actual.begin(); - const int* predicted_ptr = predicted.begin(); - const double* weights_ptr = weights.isNotNull() ? Rcpp::NumericVector(weights).begin() : nullptr; - auto matrix_ptr = placeholder.data(); - - // 2) populate the matrix - // according to location conditional - // on wether weights are passed - int i = 0; - if (weights_ptr) { - - for (; i <= n - 6; i += 6) { - matrix_ptr[predicted_ptr[i] * k + actual_ptr[i]] += weights_ptr[i]; - matrix_ptr[predicted_ptr[i + 1] * k + actual_ptr[i + 1]] += weights_ptr[i + 1]; - matrix_ptr[predicted_ptr[i + 2] * k + actual_ptr[i + 2]] += weights_ptr[i + 2]; - matrix_ptr[predicted_ptr[i + 3] * k + actual_ptr[i + 3]] += weights_ptr[i + 3]; - matrix_ptr[predicted_ptr[i + 4] * k + actual_ptr[i + 4]] += weights_ptr[i + 4]; - matrix_ptr[predicted_ptr[i + 5] * k + actual_ptr[i + 5]] += weights_ptr[i + 5]; - } - - for (; i < n; ++i) { - matrix_ptr[predicted_ptr[i] * k + actual_ptr[i]] += weights_ptr[i]; - } - - } else { - - for (; i <= n - 6; i += 6) { - ++matrix_ptr[predicted_ptr[i] * k + actual_ptr[i]]; - ++matrix_ptr[predicted_ptr[i + 1] * k + actual_ptr[i + 1]]; - ++matrix_ptr[predicted_ptr[i + 2] * k + actual_ptr[i + 2]]; - ++matrix_ptr[predicted_ptr[i + 3] * k + actual_ptr[i + 3]]; - ++matrix_ptr[predicted_ptr[i + 4] * k + actual_ptr[i + 4]]; - ++matrix_ptr[predicted_ptr[i + 5] * k + actual_ptr[i + 5]]; - } - - for (; i < n; ++i) { - ++matrix_ptr[predicted_ptr[i] * k + actual_ptr[i]]; - } - } - - - // 3) return the matrix - // but leave index - // (NOTE: Cpp is 0-indexed, and can't include zero) - return placeholder.block(1, 1, k - 1, k - 1); -} - - -/* - Note to future self: - - This implementation relies on variadic templates (https://www.geeksforgeeks.org/variadic-function-templates-c/) - it basically works like ellipsis (...) in R. - + Why? The main issue is that for a vast majority of the classification metric we would need additional arguments - that extends beyond the micro, na_rm arguments. And a further benefit is that we can add additional aguments - to the functions without having to recode the whole code-base. - - The classification_base functions works as follows - - + (actual, predicted), (actual, predicted, micro), (actual, predicted, w), (actual, predicted, w, micro), (matrix) and (matrix, micro) - - So it's one overloaded function per function specification. +template +Rcpp::NumericVector prepare( + const Function& cook, + const MatrixType& matrix, + const Rcpp::Nullable& micro, + const Rcpp::CharacterVector& names, + Args&&... args) { - NOTE: Working OOP here might be a huge benefit. But this won't be implement before anytime soon. The base package has - to be done first. - -*/ - -// matrix templates // - -// 1) matrix template -// without micro-agument -template -Rcpp::NumericVector classification_base( - const Rcpp::NumericMatrix& matrix, - const classification& foo, - Args&&... args) -{ - // 0) Convert matrix to Eigen format - Eigen::MatrixXd eigen_matrix = Rcpp::as(matrix); - - // 1) Forward the additional arguments to foo.compute - return foo.compute(eigen_matrix, std::forward(args)...); -} - -// 2) matrix template -// with micro argument -template -Rcpp::NumericVector classification_base( - const Rcpp::NumericMatrix& matrix, - const classification& foo, - Rcpp::Nullable micro, - Args&&... args) -{ - // 0) Extract dimension names - const Rcpp::List& dimnames = matrix.attr("dimnames"); - const Rcpp::CharacterVector& names = dimnames[1]; - const int k = names.size(); - - // 1) Convert matrix to Eigen format - Eigen::MatrixXd eigen_matrix = Rcpp::as(matrix); - - // 2) Handle micro or macro aggregation if (micro.isNull()) { - Rcpp::NumericVector output(k); - output = foo.compute(eigen_matrix, std::forward(args)...); - output.attr("names") = names; // Assign column names as names + Rcpp::NumericVector output(names.size()); + output = cook.compute(matrix, std::forward(args)...); + output.attr("names") = names; return output; } Rcpp::NumericVector output(1); - output = foo.compute(eigen_matrix, Rcpp::as(micro), std::forward(args)...); + output = cook.compute(matrix, Rcpp::as(micro), std::forward(args)...); return output; + } -// IntegerVectorr templates // -// 1) IntegerVector template without -// micro-argument template -Rcpp::NumericVector classification_base( - const Rcpp::IntegerVector& actual, - const Rcpp::IntegerVector& predicted, - const classification& foo, - Args&&... args) -{ - // 0) Extract the number of classes - Rcpp::CharacterVector levels = actual.attr("levels"); - int k = levels.length(); +Rcpp::NumericVector recipe( + const classification& cook, + const Rcpp::NumericMatrix& matrix, + const std::optional>& micro = std::nullopt, + Args&&... args) { - // 1) Construct the confusion matrix - Eigen::MatrixXd matrix = confusionMatrix(actual, predicted, k + 1); + const Rcpp::List dimnames = matrix.attr("dimnames"); + const Rcpp::CharacterVector names = Rcpp::as(dimnames[1]); + const Eigen::MatrixXd eigen_matrix = Rcpp::as(matrix); - // 2) Forward the additional arguments to foo.compute - return foo.compute(matrix, std::forward(args)...); + return micro.has_value() + ? prepare(cook, eigen_matrix, *micro, names, std::forward(args)...) + : cook.compute(eigen_matrix, std::forward(args)...); + } -// 2) IntegerVector template without -// micro-argument with weights template -Rcpp::NumericVector classification_base( +Rcpp::NumericVector recipe( + const classification& cook, const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, - const Rcpp::NumericVector& w, - const classification& foo, - Args&&... args) -{ - // 0) Extract the number of classes - Rcpp::CharacterVector levels = actual.attr("levels"); - int k = levels.length(); - - // 1) Construct the confusion matrix with weights - Eigen::MatrixXd matrix = confusionMatrix(actual, predicted, k + 1, w); - - // 2) Forward the additional arguments to foo.compute - return foo.compute(matrix, std::forward(args)...); -} + const std::optional& w = std::nullopt, + const std::optional>& micro = std::nullopt, + Args&&... args){ -// 3) IntegerVector template with -// micro-argument -template -Rcpp::NumericVector classification_base( - const Rcpp::IntegerVector& actual, - const Rcpp::IntegerVector& predicted, - const classification& foo, - Rcpp::Nullable micro, - Args&&... args) -{ - // 0) Extract the number of classes - Rcpp::CharacterVector levels = actual.attr("levels"); - int k = levels.length(); + /* + TODO: Check if its faster to have an if-else statement instead + - The names are only used if micro != NULL, so the calculations + are redundant. + */ - // 1) Construct the confusion matrix - Eigen::MatrixXd matrix = confusionMatrix(actual, predicted, k + 1); + const Rcpp::CharacterVector names = actual.attr("levels"); + const int k = names.size(); + Eigen::MatrixXd matrix(k + 1, k + 1); - // 2) Handle micro or macro aggregation - if (micro.isNull()) { - Rcpp::NumericVector output(k); - output = foo.compute(matrix, std::forward(args)...); - output.attr("names") = levels; // Assign levels as names - return output; - } + ConfusionMatrixClass matrixConstructor(actual, predicted); + matrix = w.has_value() + ? matrixConstructor.InputMatrix(*w) + : matrixConstructor.InputMatrix(); - Rcpp::NumericVector output(1); - output = foo.compute(matrix, Rcpp::as(micro), std::forward(args)...); - return output; + return micro.has_value() + ? prepare(cook, matrix, *micro, names, std::forward(args)...) + : cook.compute(matrix, std::forward(args)...); } -// 4) IntegerVector template with -// micro-argument and w -template -Rcpp::NumericVector classification_base( - const Rcpp::IntegerVector& actual, - const Rcpp::IntegerVector& predicted, - const Rcpp::NumericVector& w, - const classification& foo, - Rcpp::Nullable micro, - Args&&... args) -{ - // 0) Extract the number of classes - Rcpp::CharacterVector levels = actual.attr("levels"); - int k = levels.length(); - - // 1) Construct the confusion matrix - Eigen::MatrixXd matrix = confusionMatrix(actual, predicted, k + 1, w); - - // 2) Handle micro or macro aggregation - if (micro.isNull()) { - Rcpp::NumericVector output(k); - output = foo.compute(matrix, std::forward(args)...); - output.attr("names") = levels; // Assign levels as names - return output; - } - - Rcpp::NumericVector output(1); - output = foo.compute(matrix, Rcpp::as(micro), std::forward(args)...); - return output; -} \ No newline at end of file +#endif \ No newline at end of file diff --git a/src/classification_JaccardIndex.cpp b/src/classification_JaccardIndex.cpp index 38cf0f25..4fe4c3c7 100644 --- a/src/classification_JaccardIndex.cpp +++ b/src/classification_JaccardIndex.cpp @@ -8,27 +8,30 @@ using namespace Rcpp; //' @method jaccard factor //' @export // [[Rcpp::export(jaccard.factor)]] -NumericVector jaccard(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue, const bool& na_rm = true) { - JaccardIndexMetric foo; // Instantiate Jaccard Index metric - return classification_base(actual, predicted, foo, micro, na_rm); +Rcpp::NumericVector JaccardIndex(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + JaccardIndexClass cook(na_rm); + return recipe(cook, actual, predicted, std::nullopt, micro); } //' @rdname jaccard //' @method weighted.jaccard factor //' @export // [[Rcpp::export(weighted.jaccard.factor)]] -NumericVector weighted_jaccard(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro = R_NilValue, const bool& na_rm = true) { - JaccardIndexMetric foo; // Instantiate Jaccard Index metric - return classification_base(actual, predicted, w, foo, micro, na_rm); +Rcpp::NumericVector weighted_JaccardIndex(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + JaccardIndexClass cook(na_rm); + return recipe(cook, actual, predicted, w, micro); } //' @rdname jaccard //' @method jaccard cmatrix //' @export // [[Rcpp::export(jaccard.cmatrix)]] -NumericVector jaccard_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) { - JaccardIndexMetric foo; // Instantiate Jaccard Index metric - return classification_base(x, foo, micro, na_rm); +Rcpp::NumericVector cmatrix_JaccardIndex(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + JaccardIndexClass cook(na_rm); + return recipe(cook, x, micro); } @@ -36,27 +39,30 @@ NumericVector jaccard_cmatrix(const NumericMatrix& x, Nullable micro = R_N //' @method csi factor //' @export // [[Rcpp::export(csi.factor)]] -NumericVector csi(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue, const bool& na_rm = true) { - JaccardIndexMetric foo; // Instantiate Jaccard Index metric - return classification_base(actual, predicted, foo, micro, na_rm); +Rcpp::NumericVector CriticalSuccessIndex(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + JaccardIndexClass cook(na_rm); + return recipe(cook, actual, predicted, std::nullopt, micro); } //' @rdname jaccard //' @method weighted.csi factor //' @export // [[Rcpp::export(weighted.csi.factor)]] -NumericVector weighted_csi(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro = R_NilValue, const bool& na_rm = true) { - JaccardIndexMetric foo; // Instantiate Jaccard Index metric - return classification_base(actual, predicted, w, foo, micro, na_rm); +Rcpp::NumericVector weighted_CriticalSuccessIndex(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + JaccardIndexClass cook(na_rm); + return recipe(cook, actual, predicted, w, micro); } //' @rdname jaccard //' @method csi cmatrix //' @export // [[Rcpp::export(csi.cmatrix)]] -NumericVector csi_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) { - JaccardIndexMetric foo; // Instantiate Jaccard Index metric - return classification_base(x, foo, micro, na_rm); +Rcpp::NumericVector cmatrix_CriticalSuccessIndex(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + JaccardIndexClass cook(na_rm); + return recipe(cook, x, micro); } @@ -64,25 +70,28 @@ NumericVector csi_cmatrix(const NumericMatrix& x, Nullable micro = R_NilVa //' @method tscore factor //' @export // [[Rcpp::export(tscore.factor)]] -NumericVector tscore(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue, const bool& na_rm = true) { - JaccardIndexMetric foo; // Instantiate Jaccard Index metric - return classification_base(actual, predicted, foo, micro, na_rm); +Rcpp::NumericVector ThreatScore(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + JaccardIndexClass cook(na_rm); + return recipe(cook, actual, predicted, std::nullopt, micro); } //' @rdname jaccard //' @method weighted.tscore factor //' @export // [[Rcpp::export(weighted.tscore.factor)]] -NumericVector weighted_tscore(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro = R_NilValue, const bool& na_rm = true) { - JaccardIndexMetric foo; // Instantiate Jaccard Index metric - return classification_base(actual, predicted, w, foo, micro, na_rm); +Rcpp::NumericVector weighted_ThreatScore(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + JaccardIndexClass cook(na_rm); + return recipe(cook, actual, predicted, w, micro); } //' @rdname jaccard //' @method tscore cmatrix //' @export // [[Rcpp::export(tscore.cmatrix)]] -NumericVector tscore_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) { - JaccardIndexMetric foo; // Instantiate Jaccard Index metric - return classification_base(x, foo, micro, na_rm); +Rcpp::NumericVector cmatrix_ThreatScore(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + JaccardIndexClass cook(na_rm); + return recipe(cook, x, micro); } \ No newline at end of file diff --git a/src/classification_JaccardIndex.h b/src/classification_JaccardIndex.h index 77497cb4..37f99d9e 100644 --- a/src/classification_JaccardIndex.h +++ b/src/classification_JaccardIndex.h @@ -7,37 +7,43 @@ #define EIGEN_USE_MKL_ALL EIGEN_MAKE_ALIGNED_OPERATOR_NEW -class JaccardIndexMetric : public classification { -public: +class JaccardIndexClass : public classification { - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro, bool na_rm) const override { - Eigen::ArrayXd output(1); - Eigen::ArrayXd tp(matrix.rows()), fp(matrix.rows()), fn(matrix.rows()); + private: + bool na_rm; - TP(matrix, tp); - FP(matrix, fp); - FN(matrix, fn); + public: + JaccardIndexClass(bool na_rm) + : na_rm(na_rm) {} + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro) const override { + Eigen::ArrayXd output(1); + Eigen::ArrayXd tp(matrix.rows()), fp(matrix.rows()), fn(matrix.rows()); - // 2) retun with - // ternary expression - return do_micro - ? micro(tp, (tp + fp + fn), na_rm) - : macro(tp, (tp + fp + fn), na_rm); + TP(matrix, tp); + FP(matrix, fp); + FN(matrix, fn); - } - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool na_rm) const override { - Eigen::ArrayXd output(matrix.rows()); - Eigen::ArrayXd tp(matrix.rows()), fp(matrix.rows()), fn(matrix.rows()); + // 2) retun with + // ternary expression + return do_micro + ? micro(tp, (tp + fp + fn), na_rm) + : macro(tp, (tp + fp + fn), na_rm); - TP(matrix, tp); - FP(matrix, fp); - FN(matrix, fn); + } - output = tp / (tp + fp + fn); - return Rcpp::wrap(output); - } + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { + Eigen::ArrayXd output(matrix.rows()); + Eigen::ArrayXd tp(matrix.rows()), fp(matrix.rows()), fn(matrix.rows()); + + TP(matrix, tp); + FP(matrix, fp); + FN(matrix, fn); + + output = tp / (tp + fp + fn); + return Rcpp::wrap(output); + } }; #endif // CLASSIFICATION_JACCARD_H diff --git a/src/classification_MatthewsCorrelationCoefficient.cpp b/src/classification_MatthewsCorrelationCoefficient.cpp index d3593bad..2cbc1047 100644 --- a/src/classification_MatthewsCorrelationCoefficient.cpp +++ b/src/classification_MatthewsCorrelationCoefficient.cpp @@ -9,30 +9,30 @@ using namespace Rcpp; //' @method mcc factor //' @export // [[Rcpp::export(mcc.factor)]] -Rcpp::NumericVector mcc(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted) +Rcpp::NumericVector MatthewsCorrelationCoefficient(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted) { - MCCMetric foo; // Instantiate MCCMetric - return classification_base(actual, predicted, foo); + MatthewsCorrelationCoefficientClass cook; + return recipe(cook, actual, predicted); } //' @rdname mcc //' @method weighted.mcc factor //' @export // [[Rcpp::export(weighted.mcc.factor)]] -Rcpp::NumericVector weigthed_mcc(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector w) +Rcpp::NumericVector weigthed_MatthewsCorrelationCoefficient(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector w) { - MCCMetric foo; // Instantiate MCCMetric - return classification_base(actual, predicted, w, foo); + MatthewsCorrelationCoefficientClass cook; + return recipe(cook, actual, predicted, w); } //' @rdname mcc //' @method mcc cmatrix //' @export // [[Rcpp::export(mcc.cmatrix)]] -Rcpp::NumericVector mcc_cmatrix(const Rcpp::NumericMatrix& x) +Rcpp::NumericVector cmatrix_MatthewsCorrelationCoefficient(const Rcpp::NumericMatrix& x) { - MCCMetric foo; // Instantiate MCCMetric - return classification_base(x, foo); + MatthewsCorrelationCoefficientClass cook; + return recipe(cook, x); } @@ -40,10 +40,10 @@ Rcpp::NumericVector mcc_cmatrix(const Rcpp::NumericMatrix& x) //' @method phi factor //' @export // [[Rcpp::export(phi.factor)]] -Rcpp::NumericVector phi(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted) +Rcpp::NumericVector PhiCoefficient(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted) { - MCCMetric foo; // Instantiate MCCMetric - return classification_base(actual, predicted, foo); + MatthewsCorrelationCoefficientClass cook; + return recipe(cook, actual, predicted); } @@ -51,10 +51,10 @@ Rcpp::NumericVector phi(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVe //' @method weighted.phi factor //' @export // [[Rcpp::export(weighted.phi.factor)]] -Rcpp::NumericVector weighted_phi(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector w) +Rcpp::NumericVector weighted_PhiCoefficient(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector w) { - MCCMetric foo; // Instantiate MCCMetric - return classification_base(actual, predicted, w, foo); + MatthewsCorrelationCoefficientClass cook; + return recipe(cook, actual, predicted, w); } @@ -63,10 +63,10 @@ Rcpp::NumericVector weighted_phi(const Rcpp::IntegerVector& actual, const Rcpp:: //' @method phi cmatrix //' @export // [[Rcpp::export(phi.cmatrix)]] -Rcpp::NumericVector phi_cmatrix(const Rcpp::NumericMatrix& x) +Rcpp::NumericVector cmatrix_PhiCoefficient(const Rcpp::NumericMatrix& x) { - MCCMetric foo; // Instantiate MCCMetric - return classification_base(x, foo); + MatthewsCorrelationCoefficientClass cook; + return recipe(cook, x); } diff --git a/src/classification_MatthewsCorrelationCoefficient.h b/src/classification_MatthewsCorrelationCoefficient.h index 0d90e116..9465a829 100644 --- a/src/classification_MatthewsCorrelationCoefficient.h +++ b/src/classification_MatthewsCorrelationCoefficient.h @@ -11,37 +11,37 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW Calculates the Matthews Correlation Coefficient (MCC) using the provided confusion matrix or actual/predicted labels. */ -class MCCMetric : public classification { -public: - // Compute MCC or Phi Coefficient - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { - - // 0) set sizes - // of arrays - Eigen::ArrayXd output(1), N(1), row_sum(matrix.rows()), col_sum(matrix.cols()), tp_sum(1), cov_ytyp(1), cov_ypyp(1), cov_ytyt(1), product(1); - - - // 1) calculate values - // accordingly - tp_sum = matrix.diagonal().sum(); - row_sum = matrix.rowwise().sum(); - col_sum = matrix.colwise().sum(); - N = matrix.sum(); - - // 2) calculate covariances - cov_ytyp = tp_sum * N - row_sum.matrix().dot(col_sum.matrix()); - cov_ypyp = N * N - col_sum.matrix().squaredNorm(); - cov_ytyt = N * N - row_sum.matrix().squaredNorm(); - - // 3) calcualte the product - product = cov_ypyp * cov_ytyt; - - // 4) calculate output - // value - output = cov_ytyp / product.array().sqrt(); - - return Rcpp::wrap(output); - } +class MatthewsCorrelationCoefficientClass : public classification { + + public: + + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { + + Eigen::ArrayXd output(1), N(1), row_sum(matrix.rows()), col_sum(matrix.cols()), tp_sum(1), cov_ytyp(1), cov_ypyp(1), cov_ytyt(1), product(1); + + + // 1) calculate values + // accordingly + tp_sum = matrix.diagonal().sum(); + row_sum = matrix.rowwise().sum(); + col_sum = matrix.colwise().sum(); + N = matrix.sum(); + + // 2) calculate covariances + cov_ytyp = tp_sum * N - row_sum.matrix().dot(col_sum.matrix()); + cov_ypyp = N * N - col_sum.matrix().squaredNorm(); + cov_ytyt = N * N - row_sum.matrix().squaredNorm(); + + // 3) calcualte the product + product = cov_ypyp * cov_ytyt; + + // 4) calculate output + // value + output = cov_ytyp / product.array().sqrt(); + + return Rcpp::wrap(output); + } + }; -#endif // CLASSIFICATION_MCC_H +#endif diff --git a/src/classification_NegativeLikelihoodRatio.cpp b/src/classification_NegativeLikelihoodRatio.cpp index f6af6165..c31a4e24 100644 --- a/src/classification_NegativeLikelihoodRatio.cpp +++ b/src/classification_NegativeLikelihoodRatio.cpp @@ -1,6 +1,6 @@ // [[Rcpp::depends(RcppEigen)]] #include -#include "classification_NegativeLikelihoodRatio.h" // NLRMetric definition +#include "classification_NegativeLikelihoodRatio.h" // NegativeLikelihoodRatio definition using namespace Rcpp; @@ -8,25 +8,28 @@ using namespace Rcpp; //' @method nlr factor //' @export // [[Rcpp::export(nlr.factor)]] -NumericVector nlr(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue) { - NLRMetric foo; // Instantiate NLRMetric - return classification_base(actual, predicted, foo, micro); +Rcpp::NumericVector NegativeLikelihoodRatio(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted) +{ + NegativeLikelihoodRatioClass cook; + return recipe(cook, actual, predicted); } //' @rdname nlr //' @method weighted.nlr factor //' @export // [[Rcpp::export(weighted.nlr.factor)]] -NumericVector weighted_nlr(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro = R_NilValue) { - NLRMetric foo; // Instantiate NLRMetric - return classification_base(actual, predicted, w, foo, micro); +Rcpp::NumericVector weighted_NegativeLikelihoodRatio(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w) +{ + NegativeLikelihoodRatioClass cook; + return recipe(cook, actual, predicted, w); } //' @rdname nlr //' @method nlr cmatrix //' @export // [[Rcpp::export(nlr.cmatrix)]] -NumericVector nlr_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue) { - NLRMetric foo; // Instantiate NLRMetric - return classification_base(x, foo, micro); +Rcpp::NumericVector cmatrix_NegativeLikelihoodRatio(const Rcpp::NumericMatrix& x) +{ + NegativeLikelihoodRatioClass cook; + return recipe(cook, x); } diff --git a/src/classification_NegativeLikelihoodRatio.h b/src/classification_NegativeLikelihoodRatio.h index 9dfdfc6f..776db839 100644 --- a/src/classification_NegativeLikelihoodRatio.h +++ b/src/classification_NegativeLikelihoodRatio.h @@ -7,27 +7,27 @@ #define EIGEN_USE_MKL_ALL EIGEN_MAKE_ALIGNED_OPERATOR_NEW -class NLRMetric : public classification { -public: +class NegativeLikelihoodRatioClass : public classification { - // Compute NLR without micro aggregation - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { - Eigen::ArrayXd output(matrix.rows()); - Eigen::ArrayXd tp(matrix.rows()), fn(matrix.rows()), tn(matrix.rows()), fp(matrix.rows()); - Eigen::ArrayXd fnr(matrix.rows()), tnr(matrix.rows()); + public: - TP(matrix, tp); - FN(matrix, fn); - TN(matrix, tn); - FP(matrix, fp); + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { + Eigen::ArrayXd output(matrix.rows()); + Eigen::ArrayXd tp(matrix.rows()), fn(matrix.rows()), tn(matrix.rows()), fp(matrix.rows()); + Eigen::ArrayXd fnr(matrix.rows()), tnr(matrix.rows()); - fnr = fn / (tp + fn); - tnr = tn / (fp + tn); + TP(matrix, tp); + FN(matrix, fn); + TN(matrix, tn); + FP(matrix, fp); - output = fnr / tnr; + fnr = fn / (tp + fn); + tnr = tn / (fp + tn); - return Rcpp::wrap(output); - } + output = fnr / tnr; + + return Rcpp::wrap(output); + } }; diff --git a/src/classification_NegativePredictiveValue.cpp b/src/classification_NegativePredictiveValue.cpp index 8477af7d..fa708144 100644 --- a/src/classification_NegativePredictiveValue.cpp +++ b/src/classification_NegativePredictiveValue.cpp @@ -8,25 +8,28 @@ using namespace Rcpp; //' @method npv factor //' @export // [[Rcpp::export(npv.factor)]] -NumericVector npv(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue, const bool& na_rm = true) { - NegativePredictiveValueMetric foo; // Instantiate NPV metric - return classification_base(actual, predicted, foo, micro, na_rm); +Rcpp::NumericVector NegativePredictitveValue(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + NegativePredictiveValueClass cook(na_rm); + return recipe(cook, actual, predicted, std::nullopt, micro); } //' @rdname npv //' @method weighted.npv factor //' @export // [[Rcpp::export(weighted.npv.factor)]] -NumericVector weighted_npv(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro = R_NilValue, const bool& na_rm = true) { - NegativePredictiveValueMetric foo; // Instantiate NPV metric - return classification_base(actual, predicted, w, foo, micro, na_rm); +Rcpp::NumericVector weighted_NegativePredictitveValue(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + NegativePredictiveValueClass cook(na_rm); + return recipe(cook, actual, predicted, w, micro); } //' @rdname npv //' @method npv cmatrix //' @export // [[Rcpp::export(npv.cmatrix)]] -NumericVector npv_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) { - NegativePredictiveValueMetric foo; // Instantiate NPV metric - return classification_base(x, foo, micro, na_rm); +Rcpp::NumericVector cmatrix_NegativePredictitveValue(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + NegativePredictiveValueClass cook(na_rm); + return recipe(cook, x, micro); } diff --git a/src/classification_NegativePredictiveValue.h b/src/classification_NegativePredictiveValue.h index 5de75b90..320c5e62 100644 --- a/src/classification_NegativePredictiveValue.h +++ b/src/classification_NegativePredictiveValue.h @@ -13,44 +13,51 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW the confusion matrix. So there is no need to add an overloaded function for the weighted metrics. */ -class NegativePredictiveValueMetric : public classification { -public: +class NegativePredictiveValueClass : public classification { - // Compute NPV with micro or macro aggregation - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro, bool na_rm) const override { - // Declare the output value and TN/FN arrays - Eigen::ArrayXd output(1); - Eigen::ArrayXd tn(matrix.rows()), fn(matrix.rows()); + private: + bool na_rm; - // Populate TN and FN arrays for calculations - TN(matrix, tn); - FN(matrix, fn); + public: + + NegativePredictiveValueClass(bool na_rm) + : na_rm(na_rm) {} + // Compute NPV with micro or macro aggregation + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro) const override { + // Declare the output value and TN/FN arrays + Eigen::ArrayXd output(1); + Eigen::ArrayXd tn(matrix.rows()), fn(matrix.rows()); - // 2) retun with - // ternary expression - return do_micro - ? micro(tn, (tn + fn), na_rm) - : macro(tn, (tn + fn), na_rm); + // Populate TN and FN arrays for calculations + TN(matrix, tn); + FN(matrix, fn); - } - // Compute NPV without micro aggregation - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool na_rm) const override { - // Declare the output value and TN/FN arrays - Eigen::ArrayXd output(matrix.rows()); - Eigen::ArrayXd tn(matrix.rows()), fn(matrix.rows()); + // 2) retun with + // ternary expression + return do_micro + ? micro(tn, (tn + fn), na_rm) + : macro(tn, (tn + fn), na_rm); - // Populate TN and FN arrays for calculations - TN(matrix, tn); - FN(matrix, fn); + } - // Calculate metric - output = tn / (tn + fn); + // Compute NPV without micro aggregation + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { + // Declare the output value and TN/FN arrays + Eigen::ArrayXd output(matrix.rows()); + Eigen::ArrayXd tn(matrix.rows()), fn(matrix.rows()); - // Return with R-compatible class - return Rcpp::wrap(output); - } + // Populate TN and FN arrays for calculations + TN(matrix, tn); + FN(matrix, fn); + + // Calculate metric + output = tn / (tn + fn); + + // Return with R-compatible class + return Rcpp::wrap(output); + } }; #endif // CLASSIFICATION_NEGATIVE_PREDICTIVE_VALUE_H diff --git a/src/classification_PositiveLikelihoodRatio.cpp b/src/classification_PositiveLikelihoodRatio.cpp index b50e15f0..9e0497cc 100644 --- a/src/classification_PositiveLikelihoodRatio.cpp +++ b/src/classification_PositiveLikelihoodRatio.cpp @@ -8,25 +8,28 @@ using namespace Rcpp; //' @method plr factor //' @export // [[Rcpp::export(plr.factor)]] -NumericVector plr(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue) { - PLRMetric foo; // Instantiate PLRMetric - return classification_base(actual, predicted, foo, micro); +Rcpp::NumericVector PositiveLikelihoodRatio(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted) +{ + PositiveLikelihoodRatioClass cook; + return recipe(cook, actual, predicted); } //' @rdname plr //' @method weighted.plr factor //' @export // [[Rcpp::export(weighted.plr.factor)]] -NumericVector weighted_plr(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro = R_NilValue) { - PLRMetric foo; // Instantiate PLRMetric - return classification_base(actual, predicted, w, foo, micro); +Rcpp::NumericVector weighted_PositiveLikelihoodRatio(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w) +{ + PositiveLikelihoodRatioClass cook; + return recipe(cook, actual, predicted, w); } //' @rdname plr //' @method plr cmatrix //' @export // [[Rcpp::export(plr.cmatrix)]] -NumericVector plr_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue) { - PLRMetric foo; // Instantiate PLRMetric - return classification_base(x, foo, micro); +Rcpp::NumericVector cmatrix_PositiveLikelihoodRatio(const Rcpp::NumericMatrix& x) +{ + PositiveLikelihoodRatioClass cook; + return recipe(cook, x); } diff --git a/src/classification_PositiveLikelihoodRatio.h b/src/classification_PositiveLikelihoodRatio.h index 8e578d53..1ee7264c 100644 --- a/src/classification_PositiveLikelihoodRatio.h +++ b/src/classification_PositiveLikelihoodRatio.h @@ -7,28 +7,27 @@ #define EIGEN_USE_MKL_ALL EIGEN_MAKE_ALIGNED_OPERATOR_NEW -class PLRMetric : public classification { -public: +class PositiveLikelihoodRatioClass : public classification { - // Compute PLR without micro aggregation - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { - Eigen::ArrayXd output(matrix.rows()); - Eigen::ArrayXd tp(matrix.rows()), fn(matrix.rows()), tn(matrix.rows()), fp(matrix.rows()); - Eigen::ArrayXd tpr(matrix.rows()), fpr(matrix.rows()); + public: + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { + Eigen::ArrayXd output(matrix.rows()); + Eigen::ArrayXd tp(matrix.rows()), fn(matrix.rows()), tn(matrix.rows()), fp(matrix.rows()); + Eigen::ArrayXd tpr(matrix.rows()), fpr(matrix.rows()); - TP(matrix, tp); - FN(matrix, fn); - TN(matrix, tn); - FP(matrix, fp); + TP(matrix, tp); + FN(matrix, fn); + TN(matrix, tn); + FP(matrix, fp); - tpr = tp / (tp + fn); - fpr = fp / (fp + tn); + tpr = tp / (tp + fn); + fpr = fp / (fp + tn); - output = tpr / fpr; + output = tpr / fpr; - return Rcpp::wrap(output); - } + return Rcpp::wrap(output); + } }; diff --git a/src/classification_Precision.cpp b/src/classification_Precision.cpp index 6822a071..e1f9e863 100644 --- a/src/classification_Precision.cpp +++ b/src/classification_Precision.cpp @@ -9,52 +9,58 @@ using namespace Rcpp; //' @method precision factor //' @export // [[Rcpp::export(precision.factor)]] -Rcpp::NumericVector precision(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) { - PrecisionMetric foo; // Instantiate PrecisionMetric - return classification_base(actual, predicted, foo, micro, na_rm); +Rcpp::NumericVector Precision(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) +{ + PrecisionClass cook(na_rm); + return recipe(cook, actual, predicted, std::nullopt, micro); } //' @rdname precision //' @method weighted.precision factor //' @export // [[Rcpp::export(weighted.precision.factor)]] -Rcpp::NumericVector weighted_precision(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) { - PrecisionMetric foo; // Instantiate PrecisionMetric - return classification_base(actual, predicted, w, foo, micro, na_rm); +Rcpp::NumericVector weighted_Precision(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) +{ + PrecisionClass cook(na_rm); + return recipe(cook, actual, predicted, w, micro); } //' @rdname precision //' @method precision cmatrix //' @export // [[Rcpp::export(precision.cmatrix)]] -Rcpp::NumericVector precision_cmatrix(const NumericMatrix& x, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) { - PrecisionMetric foo; // Instantiate PrecisionMetric - return classification_base(x, foo, micro, na_rm); +Rcpp::NumericVector cmatrix_Precision(const NumericMatrix& x, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + PrecisionClass cook(na_rm); + return recipe(cook, x, micro); } //' @rdname precision //' @method ppv factor //' @export // [[Rcpp::export(ppv.factor)]] -Rcpp::NumericVector ppv(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) { - PrecisionMetric foo; // Instantiate PrecisionMetric - return classification_base(actual, predicted, foo, micro, na_rm); +Rcpp::NumericVector PositivePredictiveValue(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) +{ + PrecisionClass cook(na_rm); + return recipe(cook, actual, predicted, std::nullopt, micro); } //' @rdname precision //' @method weighted.ppv factor //' @export // [[Rcpp::export(weighted.ppv.factor)]] -Rcpp::NumericVector weighted_ppv(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) { - PrecisionMetric foo; // Instantiate PrecisionMetric - return classification_base(actual, predicted, w, foo, micro, na_rm); +Rcpp::NumericVector weighted_PositivePredictiveValue(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) +{ + PrecisionClass cook(na_rm); + return recipe(cook, actual, predicted, w, micro); } //' @rdname precision //' @method ppv cmatrix //' @export // [[Rcpp::export(ppv.cmatrix)]] -Rcpp::NumericVector ppv_cmatrix(const NumericMatrix& x, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) { - PrecisionMetric foo; // Instantiate PrecisionMetric - return classification_base(x, foo, micro, na_rm); +Rcpp::NumericVector cmatrix_PositivePredictiveValue(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + PrecisionClass cook(na_rm); + return recipe(cook, x, micro); } diff --git a/src/classification_Precision.h b/src/classification_Precision.h index fef3e15d..0007aaf2 100644 --- a/src/classification_Precision.h +++ b/src/classification_Precision.h @@ -13,50 +13,54 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW the confusion matrix. So there is no need to add an overloaded function for the weighted metrics. */ -class PrecisionMetric : public classification { -public: +class PrecisionClass : public classification { - // Compute precision with micro or macro aggregation - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro, bool na_rm) const override { + private: + bool na_rm; - // 0) Declare variables and size - // for efficiency. - // NOTE: Micro and macro already wraps and exports as Rcpp - Rcpp::NumericVector output(1); - Eigen::ArrayXd tp(matrix.rows()), fp(matrix.rows()); + public: + PrecisionClass(bool na_rm) + : na_rm(na_rm) {} - // 1) create TP and FP arrays - // for calculations - TP(matrix, tp); - FP(matrix, fp); + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro) const override { + + // 0) Declare variables and size + // for efficiency. + // NOTE: Micro and macro already wraps and exports as Rcpp + Rcpp::NumericVector output(1); + Eigen::ArrayXd tp(matrix.rows()), fp(matrix.rows()); - return do_micro - ? micro(tp, tp + fp, na_rm) - : macro(tp, tp + fp, na_rm); + // 1) create TP and FP arrays + // for calculations + TP(matrix, tp); + FP(matrix, fp); - } + return do_micro + ? micro(tp, tp + fp, na_rm) + : macro(tp, tp + fp, na_rm); - // Compute precision without micro aggregation - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool na_rm) const override { - - // 0) declare the - // output value and - // TP/FP - Eigen::ArrayXd output(matrix.rows()); - Eigen::ArrayXd tp(matrix.rows()), fp(matrix.rows()); - - // 1) create TP and FP arrays - // for calculations - TP(matrix, tp); - FP(matrix, fp); - - // 2) calculate metric - output = tp / (tp + fp); - - // 3) return with - // wrap (R compatible classes) - return Rcpp::wrap(output); - } + } + + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { + + // 0) declare the + // output value and + // TP/FP + Eigen::ArrayXd output(matrix.rows()); + Eigen::ArrayXd tp(matrix.rows()), fp(matrix.rows()); + + // 1) create TP and FP arrays + // for calculations + TP(matrix, tp); + FP(matrix, fp); + + // 2) calculate metric + output = tp / (tp + fp); + + // 3) return with + // wrap (R compatible classes) + return Rcpp::wrap(output); + } }; #endif // CLASSIFICATION_PRECISION_H diff --git a/src/classification_PrecisionRecallCurve.cpp b/src/classification_PrecisionRecallCurve.cpp index e3ca93f4..b53a5ea8 100644 --- a/src/classification_PrecisionRecallCurve.cpp +++ b/src/classification_PrecisionRecallCurve.cpp @@ -7,37 +7,16 @@ using namespace Rcpp; //' @method prROC factor //' @export // [[Rcpp::export(prROC.factor)]] -Rcpp::DataFrame prROC(const Rcpp::IntegerVector &actual, - const Rcpp::NumericVector &response, - Nullable micro = R_NilValue, - Rcpp::Nullable thresholds = R_NilValue, - const bool& na_rm = true) { - - /* - * Calculate ROC based - * on micro values and - * thresholds if passed - */ - - // 1) default return - // value - if (micro.isNull()) { - - return _metric_(actual, response, thresholds); - - } - - std::vector empty_numeric; - std::vector empty_integer; - CharacterVector empty_character; - - Rcpp::DataFrame empty_df = Rcpp::DataFrame::create( - Named("threshold") = empty_numeric, - Named("level") = empty_integer, - Named("label") = empty_character, - Named("precision") = empty_numeric, - Named("recall") = empty_numeric - ); - - return empty_df; +Rcpp::DataFrame PrecisionRecallCurve(const Rcpp::IntegerVector& actual, const Rcpp::NumericVector& response, Rcpp::Nullable thresholds = R_NilValue) { + PRCalculator pr_calculator(actual, response, thresholds); + return pr_calculator.calculate(); } + +//' @rdname prROC +//' @method weighted.prROC factor +//' @export +// [[Rcpp::export(weighted.prROC.factor)]] +Rcpp::DataFrame weighted_PrecisionRecallCurve(const Rcpp::IntegerVector& actual, const Rcpp::NumericVector& response, const Rcpp::NumericVector& w, Rcpp::Nullable thresholds = R_NilValue) { + PRCalculator pr_calculator(actual, response, w, thresholds); + return pr_calculator.calculate(); +} \ No newline at end of file diff --git a/src/classification_PrecisionRecallCurve.h b/src/classification_PrecisionRecallCurve.h index 1352e403..2fb20ef9 100644 --- a/src/classification_PrecisionRecallCurve.h +++ b/src/classification_PrecisionRecallCurve.h @@ -4,145 +4,65 @@ #include #include - -inline __attribute__((always_inline)) Rcpp::DataFrame _metric_(const Rcpp::IntegerVector &actual, const Rcpp::NumericVector &response, Rcpp::Nullable thresholds = R_NilValue) -{ - // 1) Get the nested list structure from the _temporary_ function - Rcpp::List nested_data = _temporary_(actual, response, thresholds); - - // 2) Determine the number of thresholds based on input or response length - R_xlen_t num_combinations = nested_data.size(); - R_xlen_t num_thresholds = thresholds.isNotNull() - ? Rcpp::as(thresholds).size() - : response.size(); - - // 3) Pre-allocate memory based on the expected size - R_xlen_t total_size = num_combinations * num_thresholds; - std::vector final_thresholds(total_size); - std::vector final_levels(total_size); - Rcpp::CharacterVector final_labels(total_size); - std::vector final_precision(total_size), final_recall(total_size); - - R_xlen_t insert_index = 0; - - // 4) Loop through nested data elements in chunks of 4 for optimized processing - R_xlen_t combo_index = 0; - for (; combo_index + 3 < num_combinations; combo_index += 4) { - // 5) Process four groups at a time for better memory access and loop unrolling - for (int group_offset = 0; group_offset < 4; ++group_offset) { - const Rcpp::List ¤t_group = nested_data[combo_index + group_offset]; - - // 6) Extract relevant vectors from the current group - const Rcpp::NumericVector &thresholds = current_group["threshold"]; - const Rcpp::NumericVector &tp = current_group["tp"]; - const Rcpp::NumericVector &fp = current_group["fp"]; - int level = Rcpp::as(current_group["level"]); - std::string label = Rcpp::as(current_group["label"]); - - // 7) Get pointers to tp, fp, and threshold vectors for efficient access - const double *tp_ptr = tp.begin(); - const double *fp_ptr = fp.begin(); - const double *threshold_ptr = thresholds.begin(); - - // 8) Determine group size and normalization factors - R_xlen_t group_size = thresholds.size(); - double max_tp = tp_ptr[group_size - 1]; // Maximum true positives for recall calculation - - // 9) Process group elements in blocks of 4 for efficiency - R_xlen_t i = 0; - for (; i + 3 < group_size; i += 4) { - for (int j = 0; j < 4; ++j) { - double tp_val = tp_ptr[i + j]; - double fp_val = fp_ptr[i + j]; - - // Compute precision and recall with safeguards against division by zero - double precision = (tp_val + fp_val == 0) ? 0.0 : tp_val / (tp_val + fp_val); - double recall = (max_tp == 0) ? 0.0 : tp_val / max_tp; - - // Directly assign to pre-allocated vectors - final_thresholds[insert_index] = threshold_ptr[i + j]; - final_levels[insert_index] = level; - final_labels[insert_index] = label; - final_precision[insert_index] = precision; - final_recall[insert_index] = recall; - insert_index++; +class PRCalculator : public MetricsCalculator { +public: + using MetricsCalculator::MetricsCalculator; + + Rcpp::DataFrame calculate() override { + int total_points = (n_ + 1) * num_classes_; + Rcpp::NumericVector precision_all(total_points); + Rcpp::NumericVector recall_all(total_points); + Rcpp::NumericVector thresholds_all(total_points); + Rcpp::CharacterVector labels_all(total_points); + Rcpp::IntegerVector levels_all(total_points); + + int idx = 0; + + for (int c = 0; c < num_classes_; ++c) { + const int class_label = c + 1; + double positives = 0.0; + + + for (int i = 0; i < n_; ++i) { + if (actual_[indices_[i]] == class_label) { + positives += weights_[indices_[i]]; + } + } + + double tp = 0.0, fp = 0.0; + for (int i = 0; i <= n_; ++i) { + + thresholds_all[idx] = (i == 0) ? R_PosInf : response_[indices_[i - 1]]; + + + if (i > 0) { + if (actual_[indices_[i - 1]] == class_label) { + tp += weights_[indices_[i - 1]]; + } else { + fp += weights_[indices_[i - 1]]; + } + } + + precision_all[idx] = (tp + fp > 0) ? (tp / (tp + fp)) : 0.0; + recall_all[idx] = (positives > 0) ? (tp / positives) : 0.0; + + labels_all[idx] = levels_[c]; + levels_all[idx] = class_label; + ++idx; + } } - } - - // 10) Process remaining elements if not divisible by 4 - for (; i < group_size; ++i) { - double tp_val = tp_ptr[i]; - double fp_val = fp_ptr[i]; - double precision = (tp_val + fp_val == 0) ? 0.0 : tp_val / (tp_val + fp_val); - double recall = (max_tp == 0) ? 0.0 : tp_val / max_tp; + Rcpp::DataFrame output = Rcpp::DataFrame::create( + Rcpp::Named("threshold") = thresholds_all, + Rcpp::Named("level") = levels_all, + Rcpp::Named("label") = labels_all, + Rcpp::Named("precision") = precision_all, + Rcpp::Named("recall") = recall_all + ); - final_thresholds[insert_index] = threshold_ptr[i]; - final_levels[insert_index] = level; - final_labels[insert_index] = label; - final_precision[insert_index] = precision; - final_recall[insert_index] = recall; - insert_index++; - } - } - } - - // 11) Process any remaining groups not divisible by 4 - for (R_xlen_t remaining_index = combo_index; remaining_index < num_combinations; ++remaining_index) { - const Rcpp::List ¤t_group = nested_data[remaining_index]; - - // 12) Extract relevant vectors from the current group - const Rcpp::NumericVector &thresholds = current_group["threshold"]; - const Rcpp::NumericVector &tp = current_group["tp"]; - const Rcpp::NumericVector &fp = current_group["fp"]; - int level = Rcpp::as(current_group["level"]); - std::string label = Rcpp::as(current_group["label"]); - - // 13) Get pointers to tp, fp, and threshold vectors for efficient access - const double *tp_ptr = tp.begin(); - const double *fp_ptr = fp.begin(); - const double *threshold_ptr = thresholds.begin(); - - // 14) Determine group size and normalization factors - R_xlen_t group_size = thresholds.size(); - double max_tp = tp_ptr[group_size - 1]; // Maximum true positives for recall calculation - - // 15) Process all elements in this group - for (R_xlen_t i = 0; i < group_size; ++i) { - double tp_val = tp_ptr[i]; - double fp_val = fp_ptr[i]; + // Add "prROC" class to the DataFrame + output.attr("class") = Rcpp::CharacterVector::create("prROC", "data.frame"); - double precision = (tp_val + fp_val == 0) ? 0.0 : tp_val / (tp_val + fp_val); - double recall = (max_tp == 0) ? 0.0 : tp_val / max_tp; - - final_thresholds[insert_index] = threshold_ptr[i]; - final_levels[insert_index] = level; - final_labels[insert_index] = label; - final_precision[insert_index] = precision; - final_recall[insert_index] = recall; - insert_index++; + return output; } - } - - // 16) Adjust the size of final vectors based on actual data inserted - final_thresholds.resize(insert_index); - final_levels.resize(insert_index); - final_labels = Rcpp::CharacterVector(final_labels.begin(), final_labels.begin() + insert_index); - final_precision.resize(insert_index); - final_recall.resize(insert_index); - - // Create and return the final result DataFrame - Rcpp::DataFrame final_result = Rcpp::DataFrame::create( - Rcpp::Named("threshold") = final_thresholds, - Rcpp::Named("level") = final_levels, - Rcpp::Named("label") = final_labels, - Rcpp::Named("precision") = final_precision, - Rcpp::Named("recall") = final_recall - ); - - - // Add "prROC" class to the DataFrame - final_result.attr("class") = Rcpp::CharacterVector::create("prROC", "data.frame"); - return final_result; - -} +}; \ No newline at end of file diff --git a/src/classification_ROCHelpers.h b/src/classification_ROCHelpers.h index a39bf65c..da62ea01 100644 --- a/src/classification_ROCHelpers.h +++ b/src/classification_ROCHelpers.h @@ -13,16 +13,69 @@ * R Studio crashes if the dimensions doesn't align with matrix-algebra! :-( */ -#include +#include #include #include #include #include -#define EIGEN_USE_MKL_ALL -EIGEN_MAKE_ALIGNED_OPERATOR_NEW - using namespace Rcpp; +class MetricsCalculator { + protected: + const Rcpp::IntegerVector& actual_; + const Rcpp::NumericVector& response_; + const Rcpp::NumericVector weights_; // Weights for instances + const Rcpp::CharacterVector levels_; + const int num_classes_; + const int n_; + std::vector indices_; + Rcpp::NumericVector thresholds_; + + public: + MetricsCalculator(const Rcpp::IntegerVector& actual, const Rcpp::NumericVector& response, Rcpp::Nullable thresholds = R_NilValue) + : actual_(actual), + response_(response), + weights_(Rcpp::NumericVector(actual.size(), 1.0)), // Default weights as 1.0 + levels_(actual.attr("levels")), + num_classes_(levels_.size()), + n_(actual.size()) { + initialize(thresholds); + } + + MetricsCalculator(const Rcpp::IntegerVector& actual, const Rcpp::NumericVector& response, const Rcpp::NumericVector& weights, Rcpp::Nullable thresholds = R_NilValue) + : actual_(actual), + response_(response), + weights_(weights), + levels_(actual.attr("levels")), + num_classes_(levels_.size()), + n_(actual.size()) { + initialize(thresholds); + } + + virtual Rcpp::DataFrame calculate() = 0; + + protected: + void initialize(Rcpp::Nullable thresholds) { + indices_.resize(n_); + std::iota(indices_.begin(), indices_.end(), 0); + std::sort(indices_.begin(), indices_.end(), [&](const int& i, const int& j) { + return response_[i] > response_[j]; + }); + + if (thresholds.isNotNull()) { + thresholds_ = Rcpp::NumericVector(thresholds); + thresholds_.push_front(R_PosInf); + } else { + thresholds_ = Rcpp::NumericVector(n_ + 1); + thresholds_[0] = R_PosInf; + for (int i = 0; i < n_; ++i) { + thresholds_[i + 1] = response_[indices_[i]]; + } + } + } +}; + + inline __attribute__((always_inline)) double calcArea(const Rcpp::NumericVector& y, const Rcpp::NumericVector& x, const int& method = 1) { double output = 0.0; @@ -94,122 +147,4 @@ inline __attribute__((always_inline)) double calcArea(const Rcpp::NumericVector& return output; } -//' Placeholder -//' -//' -//' @description -//' -//' -//' @returns -//' A [list] with stuff. -inline __attribute__((always_inline)) Rcpp::List _temporary_(const Rcpp::IntegerVector &actual, const Rcpp::NumericVector &response, Rcpp::Nullable thresholds = R_NilValue) -{ - /* - * This function calculates true positives and false positives - * across multiple thresholds for receiver operating characteristic - * curve calculations. - */ - - size_t n = actual.size(); - - // Extract the levels and their labels from the actual factor vector - Rcpp::CharacterVector level_labels = actual.attr("levels"); - Rcpp::IntegerVector levels = Rcpp::seq(1, level_labels.size()); - size_t num_levels = levels.size(); - - // Prepare to sort by response in descending order and keep the indices - std::vector order(n); - std::iota(order.begin(), order.end(), 0); - std::sort(order.begin(), order.end(), [&](size_t i, size_t j) { return response[i] > response[j]; }); - - // Reorder actual and response based on the sorted indices - Rcpp::IntegerVector sorted_actual(n); - Rcpp::NumericVector sorted_response(n); - - int* sorted_actual_ptr = sorted_actual.begin(); - double* sorted_response_ptr = sorted_response.begin(); - - for (size_t i = 0; i < n; ++i) { - size_t idx = order[i]; - sorted_actual_ptr[i] = actual[idx]; - sorted_response_ptr[i] = response[idx]; - } - - // Handle the provided thresholds or find unique thresholds from the sorted responses - Rcpp::NumericVector custom_thresholds; - - if (thresholds.isNotNull()) { - // Use provided thresholds and sort them in descending order - custom_thresholds = Rcpp::NumericVector(thresholds); - std::sort(custom_thresholds.begin(), custom_thresholds.end(), std::greater()); - } else { - // Identify unique thresholds from the sorted responses - std::vector unique_thresholds; - unique_thresholds.push_back(sorted_response_ptr[0]); - for (size_t i = 1; i < n; ++i) { - if (sorted_response_ptr[i] != sorted_response_ptr[i - 1]) { - unique_thresholds.push_back(sorted_response_ptr[i]); - } - } - custom_thresholds = Rcpp::wrap(unique_thresholds); - } - - size_t num_thresholds = custom_thresholds.size(); - - // Create a list to store results for each unique level-label combination - Rcpp::List result(num_levels); - - // Loop through each level and pre-allocate vectors based on the number of thresholds - for (size_t lvl = 0; lvl < num_levels; ++lvl) { - int current_level = levels[lvl]; - double cumulative_tp = 0.0; - double cumulative_fp = 0.0; - - // Pre-allocate vectors for tp, fp, and combined_threshold based on num_thresholds - Rcpp::NumericVector tp(num_thresholds); - Rcpp::NumericVector fp(num_thresholds); - Rcpp::NumericVector combined_threshold(num_thresholds); - Rcpp::CharacterVector combined_label(num_thresholds, level_labels[lvl]); - - double* tp_ptr = tp.begin(); - double* fp_ptr = fp.begin(); - double* combined_threshold_ptr = combined_threshold.begin(); - const double* custom_thresholds_ptr = custom_thresholds.begin(); - - size_t i = 0; - size_t j = 0; - - // Loop through sorted responses to calculate TP and FP for each threshold - while (j < num_thresholds) { - while (i < n && sorted_response_ptr[i] >= custom_thresholds_ptr[j]) { - if (sorted_actual_ptr[i] == current_level) { - cumulative_tp += 1.0; - } else { - cumulative_fp += 1.0; - } - ++i; - } - - // Store cumulative TP and FP at the current threshold - tp_ptr[j] = cumulative_tp; - fp_ptr[j] = cumulative_fp; - combined_threshold_ptr[j] = custom_thresholds_ptr[j]; - ++j; - } - - // Create a nested list for this level-label pair - Rcpp::List level_list = Rcpp::List::create( - Rcpp::Named("threshold") = combined_threshold, - Rcpp::Named("tp") = tp, - Rcpp::Named("fp") = fp, - Rcpp::Named("level") = current_level, - Rcpp::Named("label") = Rcpp::as(level_labels[lvl]) - ); - - result[lvl] = level_list; - } - - return result; -} - #endif \ No newline at end of file diff --git a/src/classification_Recall.cpp b/src/classification_Recall.cpp index 8413252a..98e907ff 100644 --- a/src/classification_Recall.cpp +++ b/src/classification_Recall.cpp @@ -1,6 +1,6 @@ // [[Rcpp::depends(RcppEigen)]] #include -#include "classification_Recall.h" // RecallMetric definition +#include "classification_Recall.h" // RecallClass definition // Namespace for cleaner usage using namespace Rcpp; @@ -9,18 +9,20 @@ using namespace Rcpp; //' @method recall factor //' @export // [[Rcpp::export(recall.factor)]] -Rcpp::NumericVector recall(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) { - RecallMetric foo; // Instantiate RecallMetric - return classification_base(actual, predicted, foo, micro, na_rm); +Rcpp::NumericVector Recall(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) +{ + RecallClass cook(na_rm); + return recipe(cook, actual, predicted, std::nullopt, micro); } //' @rdname recall //' @method weighted.recall factor //' @export // [[Rcpp::export(weighted.recall.factor)]] -Rcpp::NumericVector weighted_recall(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) { - RecallMetric foo; // Instantiate RecallMetric - return classification_base(actual, predicted, w, foo, micro, na_rm); +Rcpp::NumericVector weighted_Recall(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) +{ + RecallClass cook(na_rm); + return recipe(cook, actual, predicted, w, micro); } //' @rdname recall @@ -28,11 +30,11 @@ Rcpp::NumericVector weighted_recall(const Rcpp::IntegerVector& actual, const Rcp //' @method recall cmatrix //' @export // [[Rcpp::export(recall.cmatrix)]] -Rcpp::NumericVector recall_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) +Rcpp::NumericVector cmatrix_Recall(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) { - RecallMetric foo; // Instantiate RecallMetric - return classification_base(x, foo, micro, na_rm); + RecallClass cook(na_rm); + return recipe(cook, x, micro); } @@ -41,10 +43,10 @@ Rcpp::NumericVector recall_cmatrix(const NumericMatrix& x, Nullable micro //' //' @export // [[Rcpp::export(sensitivity.factor)]] -Rcpp::NumericVector sensitivity(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue, const bool& na_rm = true) { - - RecallMetric foo; // Instantiate RecallMetric - return classification_base(actual, predicted, foo, micro, na_rm); +Rcpp::NumericVector Sensitivity(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue, const bool& na_rm = true) +{ + RecallClass cook(na_rm); + return recipe(cook, actual, predicted, std::nullopt, micro); } @@ -52,9 +54,10 @@ Rcpp::NumericVector sensitivity(const IntegerVector& actual, const IntegerVector //' @method weighted.sensitivity factor //' @export // [[Rcpp::export(weighted.sensitivity.factor)]] -Rcpp::NumericVector weighted_sensitivity(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) { - RecallMetric foo; // Instantiate RecallMetric - return classification_base(actual, predicted, w, foo, micro, na_rm); +Rcpp::NumericVector weighted_Sensitivity(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) +{ + RecallClass cook(na_rm); + return recipe(cook, actual, predicted, w, micro); } //' @rdname recall @@ -62,10 +65,10 @@ Rcpp::NumericVector weighted_sensitivity(const Rcpp::IntegerVector& actual, cons //' @method sensitivity cmatrix //' @export // [[Rcpp::export(sensitivity.cmatrix)]] -Rcpp::NumericVector sensitivity_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) +Rcpp::NumericVector cmatrix_Sensitivity(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) { - RecallMetric foo; // Instantiate RecallMetric - return classification_base(x, foo, micro, na_rm); + RecallClass cook(na_rm); + return recipe(cook, x, micro); } @@ -74,19 +77,20 @@ Rcpp::NumericVector sensitivity_cmatrix(const NumericMatrix& x, Nullable //' @method tpr factor //' @export // [[Rcpp::export(tpr.factor)]] -Rcpp::NumericVector tpr(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue, const bool& na_rm = true) +Rcpp::NumericVector TruePositiveRate(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue, const bool& na_rm = true) { - RecallMetric foo; // Instantiate RecallMetric - return classification_base(actual, predicted, foo, micro, na_rm); + RecallClass cook(na_rm); + return recipe(cook, actual, predicted, std::nullopt, micro); } //' @rdname recall //' @method weighted.tpr factor //' @export // [[Rcpp::export(weighted.tpr.factor)]] -Rcpp::NumericVector weighted_tpr(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) { - RecallMetric foo; // Instantiate RecallMetric - return classification_base(actual, predicted, w, foo, micro, na_rm); +Rcpp::NumericVector weighted_TruePositiveRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, bool na_rm = true) +{ + RecallClass cook(na_rm); + return recipe(cook, actual, predicted, w, micro); } //' @rdname recall @@ -94,8 +98,8 @@ Rcpp::NumericVector weighted_tpr(const Rcpp::IntegerVector& actual, const Rcpp:: //' @method tpr cmatrix //' @export // [[Rcpp::export(tpr.cmatrix)]] -Rcpp::NumericVector tpr_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) +Rcpp::NumericVector cmatrix_TruePositiveRate(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) { - RecallMetric foo; // Instantiate RecallMetric - return classification_base(x, foo, micro, na_rm); + RecallClass cook(na_rm); + return recipe(cook, x, micro); } \ No newline at end of file diff --git a/src/classification_Recall.h b/src/classification_Recall.h index 5d0c531f..7bfa6f48 100644 --- a/src/classification_Recall.h +++ b/src/classification_Recall.h @@ -1,7 +1,6 @@ #ifndef CLASSIFICATION_RECALL_H #define CLASSIFICATION_RECALL_H -#include "classification_Utils.h" #include "classification_Helpers.h" #include #include @@ -14,50 +13,57 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW the confusion matrix. So there is no need to add a overloaded function for the weighted metrics. */ -class RecallMetric : public classification { -public: - - // Compute recall with micro or macro aggregation - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro, bool na_rm) const override { - - // 0) Declare variables and size - // for efficiency. - // NOTE: Micro and macro already wraps and exports as Rcpp - Rcpp::NumericVector output(1); - Eigen::ArrayXd tp(matrix.rows()), fn(matrix.rows()); - - // 1) create TP and FP arrays - // for calculations - TP(matrix, tp); - FN(matrix, fn); - - return do_micro - ? micro(tp, tp + fn, na_rm) - : macro(tp, tp + fn, na_rm); - - } - - // Compute recall without micro aggregation - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool na_rm) const override { - - // 0) declare the - // output value and - // TP/FP - Eigen::ArrayXd output(matrix.rows()); - Eigen::ArrayXd tp(matrix.rows()), fn(matrix.rows()); - - // 1) create TP and FP arrays - // for calculations - TP(matrix, tp); - FN(matrix, fn); - - // 2) calculate metric - output = tp / (tp + fn); - - // 3) rerturn with - // wrap (R compatible classes) - return Rcpp::wrap(output); - } +class RecallClass : public classification { + + private: + bool na_rm; + + public: + + RecallClass(bool na_rm) + : na_rm(na_rm) {} + + // Compute recall with micro or macro aggregation + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro) const override { + + // 0) Declare variables and size + // for efficiency. + // NOTE: Micro and macro already wraps and exports as Rcpp + Rcpp::NumericVector output(1); + Eigen::ArrayXd tp(matrix.rows()), fn(matrix.rows()); + + // 1) create TP and FP arrays + // for calculations + TP(matrix, tp); + FN(matrix, fn); + + return do_micro + ? micro(tp, tp + fn, na_rm) + : macro(tp, tp + fn, na_rm); + + } + + // Compute recall without micro aggregation + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { + + // 0) declare the + // output value and + // TP/FP + Eigen::ArrayXd output(matrix.rows()); + Eigen::ArrayXd tp(matrix.rows()), fn(matrix.rows()); + + // 1) create TP and FP arrays + // for calculations + TP(matrix, tp); + FN(matrix, fn); + + // 2) calculate metric + output = tp / (tp + fn); + + // 3) rerturn with + // wrap (R compatible classes) + return Rcpp::wrap(output); + } }; #endif // CLASSIFICATION_RECALL_H diff --git a/src/classification_RecieverOperatorCurve.cpp b/src/classification_RecieverOperatorCurve.cpp index 9c46bd4a..61c4dd81 100644 --- a/src/classification_RecieverOperatorCurve.cpp +++ b/src/classification_RecieverOperatorCurve.cpp @@ -14,41 +14,16 @@ double auc(const Rcpp::NumericVector y, const Rcpp::NumericVector x, const int& //' @method ROC factor //' @export // [[Rcpp::export(ROC.factor)]] -Rcpp::DataFrame ROC(const Rcpp::IntegerVector &actual, - const Rcpp::NumericVector &response, - Nullable micro = R_NilValue, - Rcpp::Nullable thresholds = R_NilValue, - const bool& na_rm = true) { - - /* - * Calculate ROC based - * on micro values and - * thresholds if passed - */ - - // 1) default return - // value - if (micro.isNull()) { - - return _metric_(actual, response, thresholds); - - } - - std::vector empty_numeric; - std::vector empty_integer; - CharacterVector empty_character; - - Rcpp::DataFrame empty_df = Rcpp::DataFrame::create( - Named("threshold") = empty_numeric, - Named("level") = empty_integer, - Named("label") = empty_character, - Named("fpr") = empty_numeric, - Named("tpr") = empty_numeric, - Named("class") = CharacterVector::create("ROC", "data.frame") - ); - - return empty_df; - +Rcpp::DataFrame RecieverOperatorCharacteristics(const Rcpp::IntegerVector& actual, const Rcpp::NumericVector& response, Rcpp::Nullable thresholds = R_NilValue) { + ROCCalculator roc_calculator(actual, response, thresholds); + return roc_calculator.calculate(); } - +//' @rdname ROC +//' @method weighted.ROC factor +//' @export +// [[Rcpp::export(weighted.ROC.factor)]] +Rcpp::DataFrame weighted_RecieverOperatorCharacteristics(const Rcpp::IntegerVector& actual, const Rcpp::NumericVector& response, const Rcpp::NumericVector& w, Rcpp::Nullable thresholds = R_NilValue) { + ROCCalculator roc_calculator(actual, response, w, thresholds); + return roc_calculator.calculate(); +} \ No newline at end of file diff --git a/src/classification_RecieverOperatorCurve.h b/src/classification_RecieverOperatorCurve.h index cb4b152f..254e10ed 100644 --- a/src/classification_RecieverOperatorCurve.h +++ b/src/classification_RecieverOperatorCurve.h @@ -1,160 +1,62 @@ #include "classification_ROCHelpers.h" -#include -#include -#include -#include - -inline __attribute__((always_inline)) Rcpp::DataFrame _metric_(const Rcpp::IntegerVector &actual, const Rcpp::NumericVector &response, Rcpp::Nullable thresholds = R_NilValue) -{ - - // Get the nested list structure from the _temporary_ function - Rcpp::List nested_data = _temporary_(actual, response, thresholds); - - // Determine the number of thresholds - R_xlen_t num_combinations = nested_data.size(); - R_xlen_t num_thresholds = 0; - - if (thresholds.isNotNull()) { - num_thresholds = Rcpp::as(thresholds).size(); - } else { - num_thresholds = response.size(); - } - - // Pre-allocate memory based on known size - R_xlen_t total_size = num_combinations * num_thresholds; - std::vector final_thresholds(total_size); - std::vector final_levels(total_size); - Rcpp::CharacterVector final_labels(total_size); - std::vector final_fpr(total_size), final_tpr(total_size); - - R_xlen_t list_index = 0, insert_index = 0; - - // Loop through elements of the nested list in chunks of 4 - for (; list_index + 3 < num_combinations; list_index += 4) { - for (int group_index = 0; group_index < 4; ++group_index) { - const Rcpp::List ¤t_group = nested_data[list_index + group_index]; - - // Extract relevant vectors from the current group - const Rcpp::NumericVector &thresholds = current_group["threshold"]; - const Rcpp::NumericVector &tp = current_group["tp"]; - const Rcpp::NumericVector &fp = current_group["fp"]; - int level = Rcpp::as(current_group["level"]); - std::string label = Rcpp::as(current_group["label"]); - - // Get pointers to tp, fp, and threshold vectors for efficient access - const double *tp_ptr = tp.begin(); - const double *fp_ptr = fp.begin(); - const double *threshold_ptr = thresholds.begin(); - - R_xlen_t group_size = thresholds.size(); - - // Compute the maximum TP and FP for normalization - double max_tp = *std::max_element(tp.begin(), tp.end()); - double max_fp = *std::max_element(fp.begin(), fp.end()); - - // Process elements in the group - R_xlen_t j = 0; - for (; j + 3 < group_size; j += 4) { - // Unroll the loop to process 4 elements at a time - for (int k = 0; k < 4; ++k) { - double tp_val = tp_ptr[j + k]; - double fp_val = fp_ptr[j + k]; - - // Compute TPR and FPR (use safeguards to prevent division by zero) - double tpr = (max_tp == 0) ? 0.0 : tp_val / max_tp; - double fpr = (max_fp == 0) ? 0.0 : fp_val / max_fp; - - // Directly assign to pre-allocated vectors - final_thresholds[insert_index] = threshold_ptr[j + k]; - final_levels[insert_index] = level; - final_labels[insert_index] = label; - final_fpr[insert_index] = fpr; - final_tpr[insert_index] = tpr; - insert_index++; +class ROCCalculator : public MetricsCalculator { +public: + using MetricsCalculator::MetricsCalculator; + + Rcpp::DataFrame calculate() override { + int total_points = (n_ + 1) * num_classes_; + Rcpp::NumericVector tpr_all(total_points); + Rcpp::NumericVector fpr_all(total_points); + Rcpp::NumericVector thresholds_all(total_points); + Rcpp::CharacterVector labels_all(total_points); + Rcpp::IntegerVector levels_all(total_points); + + int idx = 0; + + for (int c = 0; c < num_classes_; ++c) { + const int class_label = c + 1; + double positives = 0.0, negatives = 0.0; + + + for (int i = 0; i < n_; ++i) { + if (actual_[indices_[i]] == class_label) { + positives += weights_[indices_[i]]; + } else { + negatives += weights_[indices_[i]]; + } + } + + double tp = 0.0, fp = 0.0; + for (int i = 0; i <= n_; ++i) { + thresholds_all[idx] = (i == 0) ? R_PosInf : response_[indices_[i - 1]]; + if (i > 0) { + if (actual_[indices_[i - 1]] == class_label) { + tp += weights_[indices_[i - 1]]; + } else { + fp += weights_[indices_[i - 1]]; + } + } + + tpr_all[idx] = (positives > 0) ? (tp / positives) : 0.0; + fpr_all[idx] = (negatives > 0) ? (fp / negatives) : 0.0; + labels_all[idx] = levels_[c]; + levels_all[idx] = class_label; + ++idx; + } } - } - // Process remaining elements if not divisible by 4 - for (; j < group_size; ++j) { - double tp_val = tp_ptr[j]; - double fp_val = fp_ptr[j]; + Rcpp::DataFrame output = Rcpp::DataFrame::create( + Rcpp::Named("threshold") = thresholds_all, + Rcpp::Named("level") = levels_all, + Rcpp::Named("label") = labels_all, + Rcpp::Named("fpr") = fpr_all, + Rcpp::Named("tpr") = tpr_all + ); - // Compute TPR and FPR (use safeguards to prevent division by zero) - double tpr = (max_tp == 0) ? 0.0 : tp_val / max_tp; - double fpr = (max_fp == 0) ? 0.0 : fp_val / max_fp; + // Add "prROC" class to the DataFrame + output.attr("class") = Rcpp::CharacterVector::create("ROC", "data.frame"); - // Directly assign to pre-allocated vectors - final_thresholds[insert_index] = threshold_ptr[j]; - final_levels[insert_index] = level; - final_labels[insert_index] = label; - final_fpr[insert_index] = fpr; - final_tpr[insert_index] = tpr; - insert_index++; - } + return output; } - } - - // Process any remaining elements in nested_data not divisible by 4 - for (; list_index < num_combinations; ++list_index) { - const Rcpp::List ¤t_group = nested_data[list_index]; - - // Extract relevant vectors from the current group - const Rcpp::NumericVector &thresholds = current_group["threshold"]; - const Rcpp::NumericVector &tp = current_group["tp"]; - const Rcpp::NumericVector &fp = current_group["fp"]; - int level = Rcpp::as(current_group["level"]); - std::string label = Rcpp::as(current_group["label"]); - - // Get pointers to tp, fp, and threshold vectors for efficient access - const double *tp_ptr = tp.begin(); - const double *fp_ptr = fp.begin(); - const double *threshold_ptr = thresholds.begin(); - - R_xlen_t group_size = thresholds.size(); - - // Compute the maximum TP and FP for normalization - double max_tp = *std::max_element(tp.begin(), tp.end()); - double max_fp = *std::max_element(fp.begin(), fp.end()); - - // Process elements in the group - for (R_xlen_t j = 0; j < group_size; ++j) { - double tp_val = tp_ptr[j]; - double fp_val = fp_ptr[j]; - - // Compute TPR and FPR (use safeguards to prevent division by zero) - double tpr = (max_tp == 0) ? 0.0 : tp_val / max_tp; - double fpr = (max_fp == 0) ? 0.0 : fp_val / max_fp; - - // Directly assign to pre-allocated vectors - final_thresholds[insert_index] = threshold_ptr[j]; - final_levels[insert_index] = level; - final_labels[insert_index] = label; - final_fpr[insert_index] = fpr; - final_tpr[insert_index] = tpr; - insert_index++; - } - } - - // Adjust the final vector size if fewer entries were inserted than initially allocated - final_thresholds.resize(insert_index); - final_levels.resize(insert_index); - final_labels = Rcpp::CharacterVector(final_labels.begin(), final_labels.begin() + insert_index); - final_fpr.resize(insert_index); - final_tpr.resize(insert_index); - - // Create and return the final result DataFrame - Rcpp::DataFrame final_result = Rcpp::DataFrame::create( - Rcpp::Named("threshold") = final_thresholds, - Rcpp::Named("level") = final_levels, - Rcpp::Named("label") = final_labels, - Rcpp::Named("fpr") = final_fpr, - Rcpp::Named("tpr") = final_tpr - ); - - // Add "ROC" class to the DataFrame - final_result.attr("class") = Rcpp::CharacterVector::create("ROC", "data.frame"); - - return final_result; - -} +}; \ No newline at end of file diff --git a/src/classification_Specificity.cpp b/src/classification_Specificity.cpp index 20e4e9a0..a601fc32 100644 --- a/src/classification_Specificity.cpp +++ b/src/classification_Specificity.cpp @@ -1,6 +1,6 @@ // [[Rcpp::depends(RcppEigen)]] #include -#include "classification_Specificity.h" // SpecificityMetric definition +#include "classification_Specificity.h" // SpecificityClass definition using namespace Rcpp; @@ -8,80 +8,88 @@ using namespace Rcpp; //' @method specificity factor //' @export // [[Rcpp::export(specificity.factor)]] -NumericVector specificity(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue, const bool& na_rm = true) { - - SpecificityMetric foo; // Instantiate SpecificityMetric - return classification_base(actual, predicted, foo, micro, na_rm); +Rcpp::NumericVector Specificity(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + SpecificityClass cook(na_rm); + return recipe(cook, actual, predicted, std::nullopt, micro); } //' @rdname specificity //' @method weighted.specificity factor //' @export // [[Rcpp::export(weighted.specificity.factor)]] -NumericVector weighted_specificity(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro = R_NilValue, const bool& na_rm = true) { - SpecificityMetric foo; // Instantiate SpecificityMetric - return classification_base(actual, predicted, w, foo, micro, na_rm); +Rcpp::NumericVector weighted_Specificity(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + SpecificityClass cook(na_rm); + return recipe(cook, actual, predicted, w, micro); } //' @rdname specificity //' @method specificity cmatrix //' @export // [[Rcpp::export(specificity.cmatrix)]] -NumericVector specificity_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) { - SpecificityMetric foo; // Instantiate SpecificityMetric - return classification_base(x, foo, micro, na_rm); +Rcpp::NumericVector cmatrix_Specificity(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + SpecificityClass cook(na_rm); + return recipe(cook, x, micro); } //' @rdname specificity //' @method tnr factor //' @export // [[Rcpp::export(tnr.factor)]] -NumericVector tnr(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue, const bool& na_rm = true) { - SpecificityMetric foo; // Instantiate SpecificityMetric - return classification_base(actual, predicted, foo, micro, na_rm); +Rcpp::NumericVector TrueNegativeRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + SpecificityClass cook(na_rm); + return recipe(cook, actual, predicted, std::nullopt, micro); } //' @rdname specificity //' @method weighted.tnr factor //' @export // [[Rcpp::export(weighted.tnr.factor)]] -NumericVector weighted_tnr(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro = R_NilValue, const bool& na_rm = true) { - SpecificityMetric foo; // Instantiate SpecificityMetric - return classification_base(actual, predicted, w, foo, micro, na_rm); +Rcpp::NumericVector weighted_TrueNegativeRate(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + SpecificityClass cook(na_rm); + return recipe(cook, actual, predicted, w, micro); } //' @rdname specificity //' @method tnr cmatrix //' @export // [[Rcpp::export(tnr.cmatrix)]] -NumericVector tnr_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) { - SpecificityMetric foo; // Instantiate SpecificityMetric - return classification_base(x, foo, micro, na_rm); +Rcpp::NumericVector cmatrix_TrueNegativeRate(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + SpecificityClass cook(na_rm); + return recipe(cook, x, micro); } //' @rdname specificity //' @method selectivity factor //' @export // [[Rcpp::export(selectivity.factor)]] -NumericVector selectivity(const IntegerVector& actual, const IntegerVector& predicted, Nullable micro = R_NilValue, const bool& na_rm = true) { - SpecificityMetric foo; // Instantiate SpecificityMetric - return classification_base(actual, predicted, foo, micro, na_rm); +Rcpp::NumericVector Selectivity(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + SpecificityClass cook(na_rm); + return recipe(cook, actual, predicted, std::nullopt, micro); } //' @rdname specificity //' @method weighted.selectivity factor //' @export // [[Rcpp::export(weighted.selectivity.factor)]] -NumericVector weighted_selectivity(const IntegerVector& actual, const IntegerVector& predicted, const NumericVector& w, Nullable micro = R_NilValue, const bool& na_rm = true) { - SpecificityMetric foo; // Instantiate SpecificityMetric - return classification_base(actual, predicted, w, foo, micro, na_rm); +Rcpp::NumericVector weighted_Selectivity(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + SpecificityClass cook(na_rm); + return recipe(cook, actual, predicted, w, micro); } //' @rdname specificity //' @method selectivity cmatrix //' @export // [[Rcpp::export(selectivity.cmatrix)]] -NumericVector selectivity_cmatrix(const NumericMatrix& x, Nullable micro = R_NilValue, const bool& na_rm = true) { - SpecificityMetric foo; // Instantiate SpecificityMetric - return classification_base(x, foo, micro, na_rm); +Rcpp::NumericVector cmatrix_Selectivity(const Rcpp::NumericMatrix& x, Rcpp::Nullable micro = R_NilValue, const bool& na_rm = true) +{ + SpecificityClass cook(na_rm); + return recipe(cook, x, micro); } diff --git a/src/classification_Specificity.h b/src/classification_Specificity.h index 3adce23c..2f3d3bc3 100644 --- a/src/classification_Specificity.h +++ b/src/classification_Specificity.h @@ -13,44 +13,51 @@ EIGEN_MAKE_ALIGNED_OPERATOR_NEW the confusion matrix. So there is no need to add an overloaded function for the weighted metrics. */ -class SpecificityMetric : public classification { -public: +class SpecificityClass : public classification { - // Compute specificity with micro or macro aggregation - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro, bool na_rm) const override { + private: + bool na_rm; - // 0) Declare variables and size - // for efficiency. - // NOTE: Micro and macro already wraps and exports as Rcpp - Rcpp::NumericVector output(1); - Eigen::ArrayXd tn(matrix.rows()), fp(matrix.rows()); + public: - TN(matrix, tn); - FP(matrix, fp); + SpecificityClass(bool na_rm) + : na_rm(na_rm) {} - return do_micro - ? micro(tn, tn + fp, na_rm) - : macro(tn, tn + fp, na_rm); + // Compute specificity with micro or macro aggregation + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro) const override { - } + // 0) Declare variables and size + // for efficiency. + // NOTE: Micro and macro already wraps and exports as Rcpp + Rcpp::NumericVector output(1); + Eigen::ArrayXd tn(matrix.rows()), fp(matrix.rows()); - // Compute specificity without micro aggregation - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool na_rm) const override { + TN(matrix, tn); + FP(matrix, fp); - // 0) Declare output value and TN/FP arrays - Eigen::ArrayXd output(matrix.rows()); - Eigen::ArrayXd tn(matrix.rows()), fp(matrix.rows()); + return do_micro + ? micro(tn, tn + fp, na_rm) + : macro(tn, tn + fp, na_rm); - // 1) Create TN and FP arrays - TN(matrix, tn); - FP(matrix, fp); + } - // 2) Calculate metric - output = tn / (tn + fp); + // Compute specificity without micro aggregation + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { - // 3) Return wrapped (R-compatible classes) - return Rcpp::wrap(output); - } + // 0) Declare output value and TN/FP arrays + Eigen::ArrayXd output(matrix.rows()); + Eigen::ArrayXd tn(matrix.rows()), fp(matrix.rows()); + + // 1) Create TN and FP arrays + TN(matrix, tn); + FP(matrix, fp); + + // 2) Calculate metric + output = tn / (tn + fp); + + // 3) Return wrapped (R-compatible classes) + return Rcpp::wrap(output); + } }; -#endif // CLASSIFICATION_SPECIFICITY_H +#endif diff --git a/src/classification_Utils.h b/src/classification_Utils.h deleted file mode 100644 index 94113918..00000000 --- a/src/classification_Utils.h +++ /dev/null @@ -1,66 +0,0 @@ -#ifndef CLASSIFICATION_HELPERS_H -#define CLASSIFICATION_HELPERS_H - -#include -#include - -class classification { -public: - - /* - Note to future self: - - 1.) These are just signatures. So in essence it doesn't matter - what you call them. The imporant thing is that they are distinguishable - - 2.) All functions have the same signature - 2.1) A Matrix (passed via helpers) - 2.2) Booleans to determine behaviour inside - the respective functions. For example: - + boolean 1: Controls missing values - + boolean 2: Controls wether micro/macro values are - to be rerrturned - + boolean k: Other behaviour that I can't think of as of now. - - It seems somewhat redundant and excessive to do it like this, but until a better - solution is found, this is what we do. - - Warning: ALL signatures has to be used (I think) - */ - - virtual Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const { - return Rcpp::NumericVector(); - }; - - virtual Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool na_rm) const { - return Rcpp::NumericVector(); - }; - - virtual Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro, bool na_rm) const { - return Rcpp::NumericVector(); - }; - - virtual Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool do_micro, bool na_rm, double beta) const { - return Rcpp::NumericVector(); - }; - - virtual Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, bool na_rm, double beta) const { - return Rcpp::NumericVector(); - }; - - virtual Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, double beta) const { - return Rcpp::NumericVector(); - }; - - virtual Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, double beta, bool na_rm) const { - return Rcpp::NumericVector(); - }; - - virtual Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix, double beta, bool do_micro, bool na_rm) const { - return Rcpp::NumericVector(); - }; - - virtual ~classification() = default; -}; - -#endif // CLASSIFICATION_HELPERS_H diff --git a/src/classification_ZeroOneLoss.cpp b/src/classification_ZeroOneLoss.cpp index f3fb49f9..c89f9b55 100644 --- a/src/classification_ZeroOneLoss.cpp +++ b/src/classification_ZeroOneLoss.cpp @@ -7,25 +7,28 @@ using namespace Rcpp; //' @method zerooneloss factor //' @export // [[Rcpp::export(zerooneloss.factor)]] -Rcpp::NumericVector zerooneloss(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted) { - ZeroOneLossMetric foo; // Instantiate ZeroOneLossMetric - return classification_base(actual, predicted, foo); +Rcpp::NumericVector ZeroOneLoss(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted) +{ + ZeroOneLossClass cook; + return recipe(cook, actual, predicted); } //' @rdname zerooneloss //' @method weighted.zerooneloss factor //' @export // [[Rcpp::export(weighted.zerooneloss.factor)]] -Rcpp::NumericVector weighted_zerooneloss(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const NumericVector& w) { - ZeroOneLossMetric foo; // Instantiate ZeroOneLossMetric - return classification_base(actual, predicted, w, foo); +Rcpp::NumericVector weighted_ZeroOneLoss(const Rcpp::IntegerVector& actual, const Rcpp::IntegerVector& predicted, const Rcpp::NumericVector& w) +{ + ZeroOneLossClass cook; + return recipe(cook, actual, predicted, w); } //' @rdname zerooneloss //' @method zerooneloss cmatrix //' @export // [[Rcpp::export(zerooneloss.cmatrix)]] -Rcpp::NumericVector zerooneloss_cmatrix(const Rcpp::NumericMatrix& x) { - ZeroOneLossMetric foo; // Instantiate ZeroOneLossMetric - return classification_base(x, foo); +Rcpp::NumericVector cmatrix_ZeroOneLoss(const Rcpp::NumericMatrix& x) +{ + ZeroOneLossClass cook; + return recipe(cook, x); } diff --git a/src/classification_ZeroOneLoss.h b/src/classification_ZeroOneLoss.h index 51a757dd..55b94fe9 100644 --- a/src/classification_ZeroOneLoss.h +++ b/src/classification_ZeroOneLoss.h @@ -6,32 +6,27 @@ #define EIGEN_USE_MKL_ALL EIGEN_MAKE_ALIGNED_OPERATOR_NEW -/* - Simplified ZeroOneLoss class: - Calculates ZeroOneLoss as (tp + tn) / N. -*/ -class ZeroOneLossMetric : public classification { -public: - // Compute overall accuracy - Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { - - // 0) set sizes - // of arrays - Eigen::ArrayXd output(1); - Eigen::ArrayXd tp(matrix.rows()); - - // 1) extract values - TP(matrix, tp); - double N = matrix.sum(); - - // 1) calculate - // Calculate total instances (N), TP, and TN - // double total = matrix.sum(); // Total entries in the confusion matrix - // double tp = matrix.diagonal().sum(); // Sum of the diagonal (True Positives) - output = Eigen::ArrayXd::Constant(1, (N == 0) ? R_NaReal : (N - tp.sum()) / N); - - return Rcpp::wrap(output); // Wrap into NumericVector - } +class ZeroOneLossClass : public classification { + + public: + + Rcpp::NumericVector compute(const Eigen::MatrixXd& matrix) const override { + + // 0) set sizes + // of arrays + Eigen::ArrayXd output(1); + Eigen::ArrayXd tp(matrix.rows()); + + // 1) extract values + TP(matrix, tp); + double N = matrix.sum(); + + // 2) calculate + // output + output = Eigen::ArrayXd::Constant(1, (N == 0) ? R_NaReal : (N - tp.sum()) / N); + + return Rcpp::wrap(output); // Wrap into NumericVector + } }; diff --git a/src/regression_RelativeRootMeanSquaredError.cpp b/src/regression_RelativeRootMeanSquaredError.cpp new file mode 100644 index 00000000..72880c55 --- /dev/null +++ b/src/regression_RelativeRootMeanSquaredError.cpp @@ -0,0 +1,26 @@ +#include +#include "regression_RelativeRootMeanSquaredError.h" +using namespace Rcpp; + +//' @rdname rrmse +//' @method rrmse numeric +//' @export +// [[Rcpp::export(rrmse.numeric)]] +double RelativeRootMeanSquaredError(const std::vector& actual, const std::vector& predicted, const int& normalization = 1) +{ + RelativeRootMeanSquaredErrorClass rrmse; + return rrmse.compute(actual, predicted, normalization); + + +} + +//' @rdname rrmse +//' @method weighted.rrmse numeric +//' @export +// [[Rcpp::export(weighted.rrmse.numeric)]] +double weighted_RelativeRootMeanSquaredError(const std::vector& actual, const std::vector& predicted, const std::vector w, const int& normalization = 1) { + + RelativeRootMeanSquaredErrorClass rrmse; + return rrmse.compute(actual, predicted, w, normalization); + +} \ No newline at end of file diff --git a/src/regression_RelativeRootMeanSquaredError.h b/src/regression_RelativeRootMeanSquaredError.h new file mode 100644 index 00000000..6ee31eba --- /dev/null +++ b/src/regression_RelativeRootMeanSquaredError.h @@ -0,0 +1,163 @@ +#ifndef REGRESSION_RELATIVEROOTMEANSQUAREDERRORR_H +#define REGRESSION_RELATIVEROOTMEANSQUAREDERRORR_H + +#include "regression_Utils.h" +#include +#include +#include +#include +#include + +/** + * Relative Root Mean Squared Error (RRMSE) implementation using RegressionBase. + */ +class RelativeRootMeanSquaredErrorClass : public RegressionBase { +public: + // Unweighted RRMSE + double compute(const std::vector& actual, const std::vector& predicted, const int& normalization) const { + auto errorFunc = [](const double& a, const double& p) { + double diff = a - p; + return diff * diff; + }; + + double rmse = std::sqrt(calculate(actual, predicted, errorFunc)); + double normalization_factor = getNormalizationFactor(&actual, normalization); + + return rmse / normalization_factor; + } + + // Weighted RRMSE + double compute(const std::vector& actual, const std::vector& predicted, const std::vector& weights, const int& normalization) const { + auto errorFunc = [](const double& a, const double& p) { + double diff = a - p; + return diff * diff; + }; + + double rmse = std::sqrt(calculate(actual, predicted, weights, errorFunc)); + double normalization_factor = getNormalizationFactor(&actual, &weights, normalization); + + return rmse / normalization_factor; + } + +private: + // Unweighted normalization factor + double getNormalizationFactor(const std::vector* values, const int& normalization) const { + switch (normalization) { + case 0: // Mean normalization + return mean(values); + case 1: // Range normalization + return range(values); + case 2: // IQR normalization + return IQR(values); + default: + return 1.0; // Default to no normalization + } + } + + // Weighted normalization factor + double getNormalizationFactor(const std::vector* values, const std::vector* weights, const int& normalization) const { + switch (normalization) { + case 0: // Weighted mean normalization + return mean(values, weights); + case 1: // Range normalization (unweighted by default) + return range(values); + case 2: // Weighted IQR normalization + return IQR(values, weights); + default: + return 1.0; // Default to no normalization + } + } + + // Unweighted mean + double mean(const std::vector* values) const { + double sum = std::accumulate(values->begin(), values->end(), 0.0); + return sum / values->size(); + } + + // Weighted mean + double mean(const std::vector* values, const std::vector* weights) const { + double weighted_sum = 0.0; + double weight_sum = 0.0; + + auto value_it = values->begin(); + auto weight_it = weights->begin(); + + for (; value_it != values->end(); ++value_it, ++weight_it) { + weighted_sum += (*value_it) * (*weight_it); + weight_sum += *weight_it; + } + + return weighted_sum / weight_sum; + } + + // Range + double range(const std::vector* values) const { + auto [min_it, max_it] = std::minmax_element(values->begin(), values->end()); + return *max_it - *min_it; + } + + // Unweighted IQR + double IQR(const std::vector* values) const { + return quantile(values, 0.75) - quantile(values, 0.25); + } + + // Weighted IQR + double IQR(const std::vector* values, const std::vector* weights) const { + return quantile(values, weights, 0.75) - quantile(values, weights, 0.25); + } + + + double quantile(const std::vector* values, double alpha) const { + std::vector temp(values->begin(), values->end()); + std::sort(temp.begin(), temp.end()); + + size_t n = temp.size(); + double pos = alpha * (n - 1); + size_t lower_idx = static_cast(pos); + size_t upper_idx = std::min(lower_idx + 1, n - 1); + double frac = pos - lower_idx; + + return temp[lower_idx] + frac * (temp[upper_idx] - temp[lower_idx]); + } + + double quantile(const std::vector* values, const std::vector* weights, double alpha) const { + // Pair values and weights + std::vector> weighted_values; + for (size_t i = 0; i < values->size(); ++i) { + weighted_values.emplace_back((*values)[i], (*weights)[i]); + } + + // Sort by value + std::sort(weighted_values.begin(), weighted_values.end(), [](const auto& a, const auto& b) { + return a.first < b.first; + }); + + double total_weight = std::accumulate(weights->begin(), weights->end(), 0.0); + double cumulative_weight = 0.0; + double target_weight = alpha * total_weight; + + double lower = 0.0, upper = 0.0; + bool lower_set = false; + + for (const auto& [value, weight] : weighted_values) { + cumulative_weight += weight; + + if (!lower_set && cumulative_weight >= target_weight) { + lower = value; + lower_set = true; + } + + if (cumulative_weight >= target_weight) { + upper = value; + break; + } + } + + // Interpolation + return lower + (upper - lower) * ((target_weight - (cumulative_weight - weighted_values.back().second)) / total_weight); + } + + +}; + +#endif diff --git a/tests/testthat/ref-classification-utils.R b/tests/testthat/ref-classification-utils.R new file mode 100644 index 00000000..8111a357 --- /dev/null +++ b/tests/testthat/ref-classification-utils.R @@ -0,0 +1,68 @@ +# script: Classification utilities +# for streamlining the unit-tests +# script start; + +# Confusion Matrix +confusion_matrix <- function(actual, predicted, w = NULL) { + if (is.null(w)) { + SLmetrics::cmatrix( + actual, + predicted + ) + } else { + SLmetrics::weighted.cmatrix( + actual, + predicted, + w + ) + } +} + +# Classification function: +generalized_metric <- function( + actual, + predicted, + w = NULL, + micro = NULL, + metric_expr, + na.rm = TRUE) { + + # 1) Construct confusion matrix + conf_mat <- confusion_matrix(actual, predicted, w = w) + + # 2) Calculate confusion matrix elements + TP <- diag(conf_mat) + FP <- colSums(conf_mat) - TP + TN <- sum(conf_mat) - rowSums(conf_mat) - colSums(conf_mat) + TP + FN <- rowSums(conf_mat) - TP + + # 3) Evaluate the metric expression + output <- eval(substitute(metric_expr)) + + if (is.null(micro)) { + return(output) + } + + # 4) Aggregate if micro is specified + if (micro) { + # Aggregate all values for micro average + total_TP <- sum(TP, na.rm = TRUE) + total_FP <- sum(FP, na.rm = TRUE) + total_TN <- sum(TN, na.rm = TRUE) + total_FN <- sum(FN, na.rm = TRUE) + + output <- eval(substitute(metric_expr), + list(TP = total_TP, FP = total_FP, TN = total_TN, FN = total_FN)) + } else { + # Handle NA values if na.rm is FALSE + if (!na.rm) { + output[!is.finite(output)] <- 0 + } + # Mean across classes + output <- mean(output, na.rm = na.rm) + } + + return(output) +} + +# script end; \ No newline at end of file diff --git a/tests/testthat/ref-classification.R b/tests/testthat/ref-classification.R new file mode 100644 index 00000000..bb2a57ee --- /dev/null +++ b/tests/testthat/ref-classification.R @@ -0,0 +1,291 @@ + +# Reference Recall +ref_recall <- function( + actual, + predicted, + w = NULL , + micro = NULL, + na.rm = TRUE) { + + generalized_metric( + actual = actual, + predicted = predicted, + w = w, + micro = micro, + na.rm = na.rm, + metric_expr = TP / (TP + FN) + ) +} + +# Reference Specificity +ref_specificity <- function( + actual, + predicted, + w = NULL, + micro = NULL, + na.rm = TRUE) { + + generalized_metric( + actual = actual, + predicted = predicted, + w = w, + micro = micro, + na.rm = na.rm, + metric_expr = TN / (TN + FP) + ) +} + +# Reference False Discovery Rate +ref_fdr <- function( + actual, + predicted, + w = NULL, + micro = NULL, + na.rm = TRUE) { + + generalized_metric( + actual = actual, + predicted = predicted, + w = w, + micro = micro, + na.rm = na.rm, + metric_expr = FP / (FP + TP) + ) +} + +# Reference False Positive Rate +ref_fpr <- function( + actual, + predicted, + w = NULL, + micro = NULL, + na.rm = TRUE) { + + generalized_metric( + actual = actual, + predicted = predicted, + w = w, + micro = micro, + na.rm = na.rm, + metric_expr = FP / (FP + TN) + ) +} + +# Reference Negative Predictive Value +ref_npv <- function( + actual, + predicted, + w = NULL, + micro = NULL, + na.rm = TRUE) { + + generalized_metric( + actual = actual, + predicted = predicted, + w = w, + micro = micro, + na.rm = na.rm, + metric_expr = TN / (TN + FN) + ) +} + +# Reference False Omission Rate +ref_fer <- function( + actual, + predicted, + w = NULL, + micro = NULL, + na.rm = TRUE) { + + generalized_metric( + actual = actual, + predicted = predicted, + w = w, + micro = micro, + na.rm = na.rm, + metric_expr = FN / (TN + FN) + ) +} + +# Reference Positive Likelihood Ratio +ref_plr <- function( + actual, + predicted, + w = NULL, + micro = NULL, + na.rm = TRUE) { + + generalized_metric( + actual = actual, + predicted = predicted, + w = w, + micro = micro, + na.rm = na.rm, + metric_expr = (TP / (TP + FN)) / (FP / (FP + TN)) + ) +} + +# Reference Negative Likelihood Ratio +ref_nlr <- function( + actual, + predicted, + w = NULL, + micro = NULL, + na.rm = TRUE) { + + generalized_metric( + actual = actual, + predicted = predicted, + w = w, + micro = micro, + na.rm = na.rm, + metric_expr = (1 - (TP / (TP + FN))) / (TN / (TN + FP)) + ) +} + +# Reference Diagnostic Odds Ratio +ref_dor <- function( + actual, + predicted, + w = NULL, + micro = NULL, + na.rm = TRUE) { + + generalized_metric( + actual = actual, + predicted = predicted, + w = w, + micro = micro, + na.rm = na.rm, + metric_expr = (TP * TN) / (FP * FN) + ) +} + +ref_ROC <-function(actual, response, thresholds = NULL, w = NULL, micro = NULL) { + + n_levels <- length(levels(actual)) + + grid <- expand.grid( + threshold = if (is.null(thresholds)) response else thresholds, + level = 1:n_levels + ) + + fpr_list <- numeric(nrow(grid)) + tpr_list <- numeric(nrow(grid)) + + for (i in seq_len(nrow(grid))) { + threshold <- grid$threshold[i] + level <- grid$level[i] + + predicted <- factor( + x = ifelse( + response >= threshold, + yes = level, + no = (n_levels + 1) - level + ), + labels = letters[1:n_levels], + levels = 1:n_levels + ) + + fpr_list[i] <- generalized_metric( + actual = actual, + predicted = predicted, + metric_expr = FP / (FP + TN), + micro = micro, + w = w + )[level] + + tpr_list[i] <- generalized_metric( + actual = actual, + predicted = predicted, + metric_expr = TP / (TP + FN), + micro = micro, + w = w + )[level] + } + + # Create the output data frame + output <- data.frame( + threshold = grid$threshold, + level = grid$level, + label = letters[grid$level], + fpr = fpr_list, + tpr = tpr_list, + stringsAsFactors = FALSE + ) + + # Sort the output + output <- output[order(output$level, -output$threshold), ] + rownames(output) <- NULL + + output +} + + +ref_prROC <- function( + actual, + response, + thresholds = NULL) { + + n_levels <- length(levels(actual)) + + # Generate all combinations of thresholds and levels + grid <- expand.grid( + threshold = if (is.null(thresholds)) response else thresholds, + level = 1:n_levels + ) + + # Compute predictions, precision, and recall for each combination + precision_list <- numeric(nrow(grid)) + recall_list <- numeric(nrow(grid)) + + for (i in seq_len(nrow(grid))) { + threshold <- grid$threshold[i] + level <- grid$level[i] + + predicted <- factor( + x = ifelse( + response >= threshold, + yes = level, + no = (n_levels + 1) - level + ), + labels = letters[1:n_levels], + levels = 1:n_levels + ) + + precision_list[i] <- generalized_metric( + actual = actual, + predicted = predicted, + metric_expr = TP / (TP + FP) + )[level] + + recall_list[i] <- generalized_metric( + actual = actual, + predicted = predicted, + metric_expr = TP / (TP + FN) + )[level] + } + + # Create the output data frame + output <- data.frame( + threshold = grid$threshold, + level = grid$level, + label = letters[grid$level], + precision = precision_list, + recall = recall_list, + stringsAsFactors = FALSE + ) + + # Sort the output + output <- output[order(output$level, -output$threshold), ] + rownames(output) <- NULL + + # Replace NaN with 0 in numeric columns + numeric_cols <- sapply(output, is.numeric) + output[numeric_cols] <- lapply(output[numeric_cols], function(col) { + col[is.nan(col)] <- 0 + col + }) + + output +} \ No newline at end of file diff --git a/tests/testthat/ref-manual.R b/tests/testthat/ref-manual.R deleted file mode 100644 index 67fd0d8d..00000000 --- a/tests/testthat/ref-manual.R +++ /dev/null @@ -1,549 +0,0 @@ -# script: Manual Calculations of -# measures not found in Python -# date: 2024-10-07 -# author: Serkan Korkmaz, serkor1@duck.com -# objective: These function manually -# calculates the metrics and serves as a reference -# for future changes in the package. -# -# They are named py_foo because it -# was convienient. -# script start; - -# Concordance Correlation Coefficient -# The values have been verified with yardstick and -# epiR -py_ccc <- function(actual, predicted, w = NULL, correction = FALSE) { - - actual <- as.numeric(actual) - predicted <- as.numeric(predicted) - - if (is.null(w)) { - w <- rep(1, length(actual)) - } else { - w <- as.numeric(w) - } - - data <- cbind(actual = actual, predicted = predicted) - cov_matrix <- stats::cov.wt( - x = data, - wt = w, - cor = FALSE, - center = TRUE, - method = "unbiased" - ) - - actual_mean <- weighted.mean(actual, w = w) - predicted_mean <- weighted.mean(predicted, w = w) - actual_variance <- cov_matrix$cov[1, 1] - predicted_variance <- cov_matrix$cov[2, 2] - covariance <- cov_matrix$cov[1, 2] - - if (correction) { - n <- sum(w) - actual_variance <- actual_variance * (n - 1) / n - predicted_variance <- predicted_variance * (n - 1) / n - covariance <- covariance * (n - 1) / n - } - - numerator <- 2 * covariance - denominator <- actual_variance + predicted_variance + (actual_mean - predicted_mean)^2 - ccc_value <- numerator / denominator - - return(ccc_value) -} - - -py_recall <- function( - actual, - predicted, - average = NULL, - w = NULL, - na.rm = TRUE -) { - - # 1) Construct matrix - conf_mat <- SLmetrics::cmatrix( - actual = actual, - predicted = predicted, - w = w - ) - - TP <- diag(conf_mat) # True Positives - FN <- rowSums(conf_mat) - diag(conf_mat) # False Negatives - - # Recall calculation - output <- TP / (TP + FN) - - # 2) Handle averaging - if (!is.null(average)) { - - average <- as.logical(average == "micro") - - if (average) { - - # Micro-averaged recall - output <- sum(TP, na.rm = TRUE) / (sum(TP, na.rm = TRUE) + sum(FN, na.rm = TRUE)) - - } else { - - # Macro-averaged recall - if (!na.rm) { - output[!is.finite(output)] <- 0 - } - - output <- mean( - output, - na.rm = na.rm - ) - - } - - } - - return(output) -} - - -py_specificity <- function( - actual, - predicted, - average = NULL, - w = NULL, - na.rm = TRUE -) { - - # 1) Construct matrix - conf_mat <- SLmetrics::cmatrix( - actual = actual, - predicted = predicted, - w = w - ) - - TN <- sum(conf_mat) - rowSums(conf_mat) - colSums(conf_mat) + diag(conf_mat) - FP <- colSums(conf_mat) - diag(conf_mat) - - - output <- TN/(TN+FP) - - # 2) calculate values - if (!is.null(average)) { - - average <- as.logical(average == "micro") - - if (average) { - - output <- sum(TN, na.rm = TRUE) / (sum(TN, na.rm = TRUE) + sum(FP, na.rm = TRUE)) - - } else { - - if (!na.rm) { - - output[!is.finite(output)] <- 0 - - } - - output <- mean( - output, - na.rm = na.rm - ) - - } - - } - - return( - output - ) - -} - -# False Discovery Rate -py_fdr <- function( - actual, - predicted, - average = NULL, - w = NULL, - na.rm = TRUE) { - - # 1) Construct matrix - conf_mat <- SLmetrics::cmatrix( - actual = actual, - predicted = predicted, - w = w - ) - - # 2) Construct elements - # of the calculation - FP <- colSums(conf_mat) - diag(conf_mat) - PP <- colSums(conf_mat) - - output <- FP / PP - - # 2) calculate values - if (!is.null(average)) { - - average <- as.logical(average == "micro") - - if (average) { - - output <- sum(FP, na.rm = TRUE) / sum(PP, na.rm = TRUE) - - } else { - - if (!na.rm) { - - output[!is.finite(output)] <- 0 - - } - - output <- mean( - output, - na.rm = na.rm - ) - - } - - } - - return( - output - ) - - -} - -py_fpr <- function(actual, predicted, average = NULL, na.rm = TRUE, w = NULL) { - # 1) Construct matrix - conf_mat <- SLmetrics::cmatrix( - actual = actual, - predicted = predicted, - w = w - ) - # Calculate False Positives and True Negatives per class - FP <- colSums(conf_mat) - diag(conf_mat) - TN <- sum(conf_mat) - rowSums(conf_mat) - colSums(conf_mat) + diag(conf_mat) - - # Calculate False Positive Rate per class - fpr_class <- FP / (FP + TN) - - # If averaging is requested - if (!is.null(average)) { - if (average == "micro") { - return(sum(FP, na.rm = TRUE) / (sum(FP, na.rm = TRUE) + sum(TN, na.rm = TRUE))) - } else { - if (!na.rm) { - fpr_class[!is.finite(fpr_class)] <- 0 - } - return(mean(fpr_class, na.rm = na.rm)) - } - } - - return(fpr_class) -} - -py_npv <- function(actual, predicted, average = NULL, na.rm = TRUE, w = NULL) { - # 1) Construct matrix - conf_mat <- SLmetrics::cmatrix( - actual = actual, - predicted = predicted, - w = w - ) - - # Calculate True Negatives and False Negatives per class - TN <- sum(conf_mat) - rowSums(conf_mat) - colSums(conf_mat) + diag(conf_mat) - FN <- rowSums(conf_mat) - diag(conf_mat) - - # Calculate Negative Predictive Value per class - npv_class <- TN / (TN + FN) - - # If averaging is requested - if (!is.null(average)) { - if (average == "micro") { - return(sum(TN, na.rm = TRUE) / (sum(TN, na.rm = TRUE) + sum(FN, na.rm = TRUE))) - } else { - if (!na.rm) { - npv_class[!is.finite(npv_class)] <- 0 - } - return(mean(npv_class, na.rm = na.rm)) - } - } - - return(npv_class) -} - -py_fer <- function(actual, predicted, average = NULL, na.rm = TRUE, w = NULL) { - # 1) Construct matrix - conf_mat <- SLmetrics::cmatrix( - actual = actual, - predicted = predicted, - w = w - ) - - # Calculate False Negatives and True Negatives per class - FN <- rowSums(conf_mat) - diag(conf_mat) - TN <- sum(conf_mat) - rowSums(conf_mat) - colSums(conf_mat) + diag(conf_mat) - - # Calculate False Omission Rate per class - for_class <- FN / (TN + FN) - - # If averaging is requested - if (!is.null(average)) { - if (average == "micro") { - return(sum(FN, na.rm = TRUE) / (sum(TN, na.rm = TRUE) + sum(FN, na.rm = TRUE))) - } else { - if (!na.rm) { - for_class[!is.finite(for_class)] <- 0 - } - return(mean(for_class, na.rm = na.rm)) - } - } - - return(for_class) -} - -py_plr <- function(actual, predicted, average = NULL, na.rm = TRUE, w = NULL) { - # 1) Construct matrix - conf_mat <- SLmetrics::cmatrix( - actual = actual, - predicted = predicted, - w = w -) - - # Calculate True Positives, False Positives, False Negatives, and True Negatives - TP <- diag(conf_mat) - FP <- colSums(conf_mat) - TP - FN <- rowSums(conf_mat) - TP - TN <- sum(conf_mat) - rowSums(conf_mat) - colSums(conf_mat) + TP - - # Calculate TPR and FPR per class - TPR <- TP / (TP + FN) - FPR <- FP / (FP + TN) - - # Calculate Positive Likelihood Ratio per class - plr_class <- TPR / FPR - - return(plr_class) -} - -py_nlr <- function(actual, predicted, average = NULL, na.rm = TRUE, w = NULL) { - # 1) Construct matrix - conf_mat <- SLmetrics::cmatrix( - actual = actual, - predicted = predicted, - w = w - ) - # Calculate True Positives, False Positives, False Negatives, and True Negatives - TP <- diag(conf_mat) - FP <- colSums(conf_mat) - TP - FN <- rowSums(conf_mat) - TP - TN <- sum(conf_mat) - rowSums(conf_mat) - colSums(conf_mat) + TP - - # Calculate TPR and TNR per class - TPR <- TP / (TP + FN) - TNR <- TN / (TN + FP) - - # Calculate Negative Likelihood Ratio per class - nlr_class <- (1 - TPR) / TNR - - # If averaging is requested - if (!is.null(average)) { - if (average == "micro") { - # Micro-average - overall_tpr <- sum(TP, na.rm = TRUE) / (sum(TP, na.rm = TRUE) + sum(FN, na.rm = TRUE)) - overall_tnr <- sum(TN, na.rm = TRUE) / (sum(TN, na.rm = TRUE) + sum(FP, na.rm = TRUE)) - return((1 - overall_tpr) / overall_tnr) - } else { - # Handle non-finite values in macro-average case - if (!na.rm) { - nlr_class[!is.finite(nlr_class)] <- 0 - } - return(mean(nlr_class, na.rm = na.rm)) - } - } - - return(nlr_class) -} - -py_dor <- function(actual, predicted, average = NULL, na.rm = TRUE, w = NULL) { - # Construct confusion matrix - # 1) Construct matrix - conf_mat <- SLmetrics::cmatrix( - actual = actual, - predicted = predicted, - w = w - ) - - # Calculate True Positives, False Positives, False Negatives, and True Negatives - TP <- diag(conf_mat) - FP <- colSums(conf_mat) - TP - FN <- rowSums(conf_mat) - TP - TN <- sum(conf_mat) - rowSums(conf_mat) - colSums(conf_mat) + TP - - # Calculate Diagnostic Odds Ratio per class - dor_class <- (TP * TN) / (FP * FN) - - # If averaging is requested - if (!is.null(average)) { - if (average == "micro") { - # Micro-average - overall_tp <- sum(TP, na.rm = TRUE) - overall_tn <- sum(TN, na.rm = TRUE) - overall_fp <- sum(FP, na.rm = TRUE) - overall_fn <- sum(FN, na.rm = TRUE) - return((overall_tp * overall_tn) / (overall_fp * overall_fn)) - } else { - # Handle non-finite values in macro-average case - if (!na.rm) { - dor_class[!is.finite(dor_class)] <- 0 - } - return(mean(dor_class, na.rm = na.rm)) - } - } - - return(dor_class) -} - - -ref_ROC <- function(actual, response, thresholds) { - - n_thresholds <- length(thresholds) - k <- n_levels <- length(levels(actual)) - total_rows <- n_thresholds * n_levels - - output <- data.frame( - threshold = rep(thresholds, each = n_levels), - level = rep(1:n_levels, times = n_thresholds), - label = rep(letters[1:n_levels], times = n_thresholds), - fpr = numeric(total_rows), - tpr = numeric(total_rows), - stringsAsFactors = FALSE - ) - - for (i in 1:n_levels) { - - for (j in 1:n_thresholds) { - threshold_val <- thresholds[j] - - predicted <- factor( - x = ifelse( - response >= threshold_val, - yes = i, # was i - no = (k+1) - i # was (k+1) - i - ), - labels = letters[1:k], - levels = 1:k - ) - - - row_index <- (j - 1) * n_levels + i - output$fpr[row_index] <- SLmetrics::fpr(actual = actual, predicted = predicted)[i] # was i - output$tpr[row_index] <- SLmetrics::tpr(actual = actual, predicted = predicted)[i] # was i - } - } - - output <- output[order(output$level, -output$threshold),] - rownames(output) <- NULL - - output -} - -ref_prROC <- function(actual, response, thresholds) { - - n_thresholds <- length(thresholds) - k <- n_levels <- length(levels(actual)) - total_rows <- n_thresholds * n_levels - - output <- data.frame( - threshold = rep(thresholds, each = n_levels), - level = rep(1:n_levels, times = n_thresholds), - label = rep(letters[1:n_levels], times = n_thresholds), - precision = numeric(total_rows), - recall = numeric(total_rows), - stringsAsFactors = FALSE - ) - - for (i in 1:n_levels) { - - for (j in 1:n_thresholds) { - threshold_val <- thresholds[j] - - predicted <- factor( - x = ifelse( - response >= threshold_val, - yes = i, # was i - no = (k+1) - i # was (k+1) - i - ), - labels = letters[1:k], - levels = 1:k - ) - - - row_index <- (j - 1) * n_levels + i - output$precision[row_index] <- SLmetrics::precision(actual = actual, predicted = predicted)[i] # was i - output$recall[row_index] <- SLmetrics::recall(actual = actual, predicted = predicted)[i] # was i - } - } - - output <- output[order(output$level, -output$threshold),] - rownames(output) <- NULL - - numeric_cols <- sapply(output, is.numeric) - output[numeric_cols] <- lapply(output[numeric_cols], function(col) { - col[is.nan(col)] <- 0 - col - }) - - output - -} - -# Regression Functions -py_rrse <- function( - actual, - predicted, - w = NULL -) { - - if (is.null(w)) { - w <- rep(1, length(actual)) - } - - sqrt( - sum(w * (actual - predicted)^2) / - sum(w*( actual - weighted.mean(actual, w = w))^2)) - -} - - -py_rae <- function( - actual, - predicted, - w = NULL) { - - if (is.null(w)) { - w <- rep(1, length(actual)) - } - - sum(w * abs(actual - predicted)) / sum( w * abs(actual - weighted.mean(actual, w = w))) -} - - -py_mpe <- function( - predicted, - actual, - w = NULL) { - - if (is.null(w)) { - w <- rep(1, length(actual)) - } - - error <- (actual - predicted) / actual - weighted_mpe <- sum(w * error) / sum(w) - - weighted_mpe -} - - - -# script end; diff --git a/tests/testthat/ref-regression.R b/tests/testthat/ref-regression.R new file mode 100644 index 00000000..f3df3602 --- /dev/null +++ b/tests/testthat/ref-regression.R @@ -0,0 +1,183 @@ +# script: Manual Calculations of +# measures not found in Python +# date: 2024-10-07 +# author: Serkan Korkmaz, serkor1@duck.com +# objective: These function manually +# calculates the metrics and serves as a reference +# for future changes in the package. +# +# They are named py_foo because it +# was convienient. +# script start; + +# Reference Concordance Correlation Coefficient +py_ccc <- function(actual, predicted, w = NULL, correction = FALSE) { + + actual <- as.numeric(actual) + predicted <- as.numeric(predicted) + + if (is.null(w)) { + w <- rep(1, length(actual)) + } else { + w <- as.numeric(w) + } + + data <- cbind(actual = actual, predicted = predicted) + cov_matrix <- stats::cov.wt( + x = data, + wt = w, + cor = FALSE, + center = TRUE, + method = "unbiased" + ) + + actual_mean <- weighted.mean(actual, w = w) + predicted_mean <- weighted.mean(predicted, w = w) + actual_variance <- cov_matrix$cov[1, 1] + predicted_variance <- cov_matrix$cov[2, 2] + covariance <- cov_matrix$cov[1, 2] + + if (correction) { + n <- sum(w) + actual_variance <- actual_variance * (n - 1) / n + predicted_variance <- predicted_variance * (n - 1) / n + covariance <- covariance * (n - 1) / n + } + + numerator <- 2 * covariance + denominator <- actual_variance + predicted_variance + (actual_mean - predicted_mean)^2 + ccc_value <- numerator / denominator + + return(ccc_value) +} + + +# Reference Root Relative Squared Error +py_rrse <- function( + actual, + predicted, + w = NULL +) { + + if (is.null(w)) { + w <- rep(1, length(actual)) + } + + sqrt( + sum(w * (actual - predicted)^2) / + sum(w*( actual - weighted.mean(actual, w = w))^2)) + +} + +# Reference Relative Absolute Error +py_rae <- function( + actual, + predicted, + w = NULL) { + + if (is.null(w)) { + w <- rep(1, length(actual)) + } + + sum(w * abs(actual - predicted)) / sum( w * abs(actual - weighted.mean(actual, w = w))) +} + +# Reference Mean Percentage Error +py_mpe <- function( + predicted, + actual, + w = NULL) { + + if (is.null(w)) { + w <- rep(1, length(actual)) + } + + error <- (actual - predicted) / actual + weighted_mpe <- sum(w * error) / sum(w) + + weighted_mpe +} + +# Reference Relative Root Mean Squared Error +ref_rrmse <- function(actual, predicted, w = NULL, normalization = 0) { + + weighted_quantile <- function(values, weights, alpha) { + # Pair values with weights + data <- data.frame(values = values, weights = weights) + + # Sort by values + data <- data[order(data$values), ] + + # Compute total weight + total_weight <- sum(data$weights) + + # Compute target cumulative weight + target_weight <- alpha * total_weight + + # Initialize cumulative weight + cumulative_weight <- 0.0 + + # Variables to store the lower and upper bounds + lower <- 0.0 + upper <- 0.0 + lower_set <- FALSE + + # Iterate through the sorted data + for (i in seq_len(nrow(data))) { + cumulative_weight <- cumulative_weight + data$weights[i] + + if (!lower_set && cumulative_weight >= target_weight) { + lower <- data$values[i] + lower_set <- TRUE + } + + if (cumulative_weight >= target_weight) { + upper <- data$values[i] + break + } + } + + # Interpolation + return(lower + (upper - lower) * ((target_weight - (cumulative_weight - data$weights[i])) / total_weight)) + } + + + + # Calculate RMSE + RMSE <- sqrt(weighted.mean( + (actual - predicted)^2, + w = if (is.null(w)) rep(1, length(actual)) else w + )) + + if (normalization == 0) { + denominator <- weighted.mean( + actual, + w = if (is.null(w)) rep(1, length(actual)) else w + ) + } + + if (normalization == 1) { + denominator <- diff(range(actual)) + } + + + + if (normalization == 2) { + if (is.null(w)) { + denominator <- IQR( + actual + ) + } else { + denominator <- weighted_quantile(actual, weights = w, alpha = 0.75) - weighted_quantile(actual, weights = w, alpha = 0.25) + } + + } + + + RMSE / denominator + + + +} + +# script end; diff --git a/tests/testthat/scikit-learn.py b/tests/testthat/scikit-learn.py index ed8c7409..afe3aa46 100644 --- a/tests/testthat/scikit-learn.py +++ b/tests/testthat/scikit-learn.py @@ -2,6 +2,7 @@ # sklearn and define functions # that corresponds to SLmetrics from sklearn import metrics +import string from imblearn.metrics import sensitivity_score from imblearn.metrics import specificity_score from sklearn.metrics import confusion_matrix @@ -95,12 +96,13 @@ def py_cmatrix(actual, predicted, w = None): sample_weight = w ) -def py_entropy(actual, response, normalize = True, w = None): +def py_entropy(actual, response, normalize = True, w = None, labels = None): return metrics.log_loss( y_true = actual, y_pred = response, normalize = normalize, - sample_weight = w + sample_weight = w, + labels = labels ) def py_roc(actual, response, pos_label = 1, w = None): @@ -184,3 +186,77 @@ def py_d2pinball(actual, predicted, w = None, alpha = 0.5): alpha = alpha, multioutput = "raw_values" ) + +def py_ROC(actual, response, w=None): + + actual = np.asarray(actual) + response = np.asarray(response) + + unique_labels = np.unique(actual) + results = {} + + for i, label in enumerate(unique_labels): + + letter = string.ascii_lowercase[i] + + fpr, tpr, thresholds = metrics.roc_curve( + actual, + response, + pos_label=label, + sample_weight=w, + drop_intermediate=False + ) + + results[letter] = { + 'threshold': thresholds.tolist(), + 'level': i + 1, + 'label': letter, + 'fpr': fpr.tolist(), + 'tpr': tpr.tolist(), + + } + + return results + +def py_prROC(actual, response, w=None): + + actual = np.asarray(actual) + response = np.asarray(response) + + unique_labels = np.unique(actual) + results = {} + + for i, label in enumerate(unique_labels): + + letter = string.ascii_lowercase[i] + + precision, recall, thresholds = metrics.precision_recall_curve( + actual, + response, + pos_label = label, + sample_weight = w, + drop_intermediate = False + ) + + # Drop the last element + # scikit-learn adds a (1, 0) for recall and precision + precision = precision[:-1] + recall = recall[:-1] + + # Values are returned in ascending + # order. It is needed in descending order + # as in ROC + sorted_indices = np.argsort(-thresholds) + thresholds = thresholds[sorted_indices] + precision = precision[sorted_indices] + recall = recall[sorted_indices] + + results[letter] = { + 'threshold': thresholds.tolist(), + 'level': i + 1, + 'label': letter, + 'precision': precision.tolist(), + 'recall': recall.tolist(), + } + + return results diff --git a/tests/testthat/setup.R b/tests/testthat/setup.R index 2dff0b87..63147494 100644 --- a/tests/testthat/setup.R +++ b/tests/testthat/setup.R @@ -129,13 +129,37 @@ set_equal <- function( # 6) load scripts # globally -reticulate::source_python( - "scikit-learn.py" -) -reticulate::source_python( - "pytorch.py" -) -source("ref-manual.R") +if (interactive()) { + + reticulate::source_python( + "tests/testthat/scikit-learn.py" + ) + + reticulate::source_python( + "tests/testthat/pytorch.py" + ) + + source("tests/testthat/ref-classification-utils.R") + source("tests/testthat/ref-classification.R") + source("tests/testthat/ref-regression.R") + +} else { + + reticulate::source_python( + "scikit-learn.py" + ) + + reticulate::source_python( + "pytorch.py" + ) + + source("ref-classification-utils.R") + source("ref-classification.R") + source("ref-regression.R") + + +} + # 7) define all classification # functions in {SLmetrics} diff --git a/tests/testthat/test-ConfusionMatrix.R b/tests/testthat/test-ConfusionMatrix.R index f9ff2148..e04d1439 100644 --- a/tests/testthat/test-ConfusionMatrix.R +++ b/tests/testthat/test-ConfusionMatrix.R @@ -26,11 +26,23 @@ testthat::test_that( # 2.3) generate confusion # matrix - confusion_matrix <- cmatrix( - actual = actual, - predicted = predicted, - w = if (weighted) w else NULL - ) + if (weighted) { + + confusion_matrix <- weighted.cmatrix( + actual = actual, + predicted = predicted, + w = w + ) + + } else { + + confusion_matrix <- cmatrix( + actual = actual, + predicted = predicted + ) + + } + # 2.3) test that the values # are sensible @@ -54,6 +66,24 @@ testthat::test_that( info = info ) + # 2.6) test that + # methods works + + # 2.6.1) print method + testthat::expect_no_condition( + object = invisible(SLmetrics:::print.cmatrix(confusion_matrix)) + ) + + # 2.6.2) summary method + testthat::expect_no_condition( + object = invisible(SLmetrics:::summary.cmatrix(confusion_matrix)) + ) + + # 2.6.3) plot method + testthat::expect_no_condition( + object = invisible(SLmetrics:::plot.cmatrix(confusion_matrix)) + ) + } diff --git a/tests/testthat/test-CrossEntropy.R b/tests/testthat/test-CrossEntropy.R new file mode 100644 index 00000000..ee0c305f --- /dev/null +++ b/tests/testthat/test-CrossEntropy.R @@ -0,0 +1,81 @@ +# objective: Test that the metric +# implemented in {SLmetrics} is aligned with +# target functions. + +testthat::test_that(desc = "Test `entropy()`-function", code = { + + wrapped_entropy <- function( + actual, + response, + w = NULL, + normalize = TRUE) { + + if (is.null(w)) { + entropy( + actual = actual, + response = response, + normalize = normalize + ) + } else { + weighted.entropy( + actual = actual, + response = response, + w = w, + normalize = normalize + ) + } + + } + + for (k in 2:5) { + + actual <- create_factor(k = k) + n <- length(actual) + + raw_probs <- matrix(rexp(n * k, rate = 1), nrow = n, ncol = k) + row_sums <- rowSums(raw_probs) + response <- raw_probs / row_sums + + w <- runif(n) + + for (weighted in c(FALSE, TRUE)) { + for (normalize in c(FALSE, TRUE)) { + + + score <- wrapped_entropy( + actual = actual, + response = response, + w = if (weighted) w else NULL, + normalize = normalize + ) + + actual_int <- as.integer(actual) + label_seq <- seq_len(k) + + py_score <- py_entropy( + actual = actual_int, + response = response, + normalize = normalize, + w = if (weighted) w else NULL, + labels = label_seq + ) + + info <- paste( + "k =", k, + "weighted =", weighted, + "normalize =", normalize + ) + + testthat::expect_true( + object = set_equal( + current = as.numeric(score), + target = as.numeric(py_score) + ), + info = info + ) + + } + } + + } +}) diff --git a/tests/testthat/test-FalseDiscoveryRate.R b/tests/testthat/test-FalseDiscoveryRate.R index 6af3846b..0fe82804 100644 --- a/tests/testthat/test-FalseDiscoveryRate.R +++ b/tests/testthat/test-FalseDiscoveryRate.R @@ -69,11 +69,11 @@ testthat::test_that( # 2.4) test that the values # are equal to target value - # 2.4.1) calculate py_score - py_score <- py_fdr( + # 2.4.1) calculate ref_score + ref_score <- ref_fdr( actual = actual, predicted = predicted, - average = if (is.na(micro)) { NULL } else ifelse(micro, "micro", "macro"), + micro = if (is.na(micro)) { NULL } else micro, w = if (weighted) w else NULL ) @@ -81,7 +81,7 @@ testthat::test_that( testthat::expect_true( object = set_equal( current = as.numeric(score), - target = as.numeric(py_score) + target = as.numeric(ref_score) ), info = info ) diff --git a/tests/testthat/test-FalseOmissionRate.R b/tests/testthat/test-FalseOmissionRate.R index 691feaf3..5e664e8c 100644 --- a/tests/testthat/test-FalseOmissionRate.R +++ b/tests/testthat/test-FalseOmissionRate.R @@ -69,11 +69,11 @@ testthat::test_that( # 2.4) test that the values # are equal to target value - # 2.4.1) calculate py_score - py_score <- py_fer( + # 2.4.1) calculate ref_score + ref_score <- ref_fer( actual = actual, predicted = predicted, - average = if (is.na(micro)) { NULL } else ifelse(micro, "micro", "macro"), + micro = if (is.na(micro)) { NULL } else micro, w = if (weighted) w else NULL ) @@ -81,7 +81,7 @@ testthat::test_that( testthat::expect_true( object = set_equal( current = as.numeric(score), - target = as.numeric(py_score) + target = as.numeric(ref_score) ), info = info ) diff --git a/tests/testthat/test-FalsePositiveRate.R b/tests/testthat/test-FalsePositiveRate.R index 06edb086..e6802e3b 100644 --- a/tests/testthat/test-FalsePositiveRate.R +++ b/tests/testthat/test-FalsePositiveRate.R @@ -69,11 +69,11 @@ testthat::test_that( # 2.4) test that the values # are equal to target value - # 2.4.1) calculate py_score - py_score <- py_fpr( + # 2.4.1) calculate ref_score + ref_score <- ref_fpr( actual = actual, predicted = predicted, - average = if (is.na(micro)) { NULL } else ifelse(micro, "micro", "macro"), + micro = if (is.na(micro)) { NULL } else micro, w = if (weighted) w else NULL ) @@ -81,7 +81,7 @@ testthat::test_that( testthat::expect_true( object = set_equal( current = as.numeric(score), - target = as.numeric(py_score) + target = as.numeric(ref_score) ), info = info ) diff --git a/tests/testthat/test-NegativePredictiveValue.R b/tests/testthat/test-NegativePredictiveValue.R index b7550b82..bb9e704f 100644 --- a/tests/testthat/test-NegativePredictiveValue.R +++ b/tests/testthat/test-NegativePredictiveValue.R @@ -69,11 +69,11 @@ testthat::test_that( # 2.4) test that the values # are equal to target value - # 2.4.1) calculate py_score - py_score <- py_npv( + # 2.4.1) calculate ref_score + ref_score <- ref_npv( actual = actual, predicted = predicted, - average = if (is.na(micro)) { NULL } else ifelse(micro, "micro", "macro"), + micro = if (is.na(micro)) { NULL } else micro, w = if (weighted) w else NULL ) @@ -81,7 +81,7 @@ testthat::test_that( testthat::expect_true( object = set_equal( current = as.numeric(score), - target = as.numeric(py_score) + target = as.numeric(ref_score) ), info = info ) diff --git a/tests/testthat/test-ROC.R b/tests/testthat/test-ROC.R index 5be4f925..8fea1b59 100644 --- a/tests/testthat/test-ROC.R +++ b/tests/testthat/test-ROC.R @@ -9,306 +9,104 @@ testthat::test_that( desc = "Test that `ROC()`-function works as expected", code = { - n <- 1e3 - k <- 4 - # 1) generate - # factors - set.seed(1903) - actual <- create_factor( - k = k, - n = n - ) - - # 2) generate - # response variable - response <- rbeta( - n = n, - shape1 = 20, - shape2 = 2 - ) - - # 3) generate datasets - # with ROC - current <- SLmetrics::ROC( - actual = actual, - response = response - ) - - target <- ROC( - actual = actual, - response = response, - threshold = response - ) - - # 3.2) check if summaries - # and print methods responds - # correctly - - # 3.2.1) print method - testthat::expect_no_error( - print.ROC( - target - ) - ) - - - # 3.2.2) summary - # method - testthat::expect_no_error( - summary.ROC( - target - ) - ) - - - # 3.2.3) plot method - testthat::expect_no_error( - plot.ROC( - target - ) - ) - - # 4) test if its - # equal - testthat::expect_true( - set_equal( - current = current, - target = target - ) - ) - - # 5) calculate - # values using - # scikit-learn - py_value <- list() - for (i in 1:k) { - - py_metric <- py_roc( - actual = as.numeric( - as.numeric(actual) == i - ), - response = response, - pos_label = 1 - ) - - names(py_metric) <- c("fpr", "tpr", "threshold") - - py_metric <- as.data.frame(py_metric)[-1,] - - - py_metric$level <- i - py_metric$label <- letters[i] - - py_metric[[i]] <- py_metric - + # 0) construct ROC + # wrapper + wrapped_ROC <- function( + actual, + response, + thresholds = NULL, + w = NULL, + micro = TRUE) { + + if (is.null(w)) { + + ROC( + actual, + response, + thresholds = if (is.null(thresholds)) {NULL} else thresholds + ) + + } else { + + weighted.ROC( + actual, + response, + thresholds = if (is.null(thresholds)) {NULL} else thresholds, + w = w + ) + + } + } + # 1) generate class + # values + actual <- create_factor(n = 100, k = 5) + response <- runif(n = length(actual)) + w <- runif(n = length(actual)) + thresholds <- seq(0.1, 0.9, by = 0.1) - py_value <- do.call( - rbind, - py_value - ) - - py_value <- py_value[ , names(current)] - - # 6) test if its - # equal - testthat::expect_true( - set_equal( - current = current, - target = target - ) - ) - - - # 7) Test that custom - # thresholds works - # as expected - thresholds <- seq( - 0, - 1, - length.out = 10 - ) + for (weighted in c(TRUE, FALSE)) { + + # 2) test that the are + # equal to target values + for (micro in c(NA, TRUE, FALSE)) { + + # 2.1) generate sensible + # label information + info <- paste( + "Weighted = ", weighted, + "Micro = ", micro + ) - testthat::expect_true( - set_equal( - current = ROC( - actual = actual, - response = response, - thresholds = thresholds - ), - target = ref_ROC( + # 2.2) generate score + # from {slmetrics} + score <- wrapped_ROC( actual = actual, response = response, - thresholds = thresholds + w = if (weighted) w else NULL, + micro = if (is.na(micro)) { NULL } else micro ) - ) - ) - - - } -) - -testthat::test_that( - desc = "Test that `prROC()`-function works as expected", - code = { - - n <- 1e3 - k <- 4 - # 1) generate - # factors - set.seed(1903) - actual <- create_factor( - k = k, - n = n - ) - - # 2) generate - # response variable - response <- rbeta( - n = n, - shape1 = 20, - shape2 = 2 - ) - - # 3) generate datasets - # with ROC - current <- prROC( - actual = actual, - response = response - ) - target <- prROC( - actual = actual, - response = response, - threshold = response - ) - - # 3.2) check if summaries - # and print methods responds - # correctly - - # 3.2.1) print method - testthat::expect_no_error( - print.prROC( - target - ) - ) - - - # 3.2.2) summary - # method - testthat::expect_no_error( - summary.prROC( - target - ) - ) - - - # 3.2.3) plot method - testthat::expect_no_error( - plot.prROC( - target - ) - ) - - - # 4) test if its - # equal - testthat::expect_true( - set_equal( - current = current, - target = target - ) - ) - - # 5) calculate - # values using - # scikit-learn - py_value <- list() - for (i in 1:k) { - - py_metric <- py_prROC( - actual = as.numeric( - as.numeric(actual) == i - ), - response = response, - pos_label = 1 - ) - - - py_metric <- lapply( - py_metric, - function(x) { - x[1:n] - } - ) - - names(py_metric) <- c("precision", "recall", "threshold") - - - - py_metric$level <- i - py_metric$label <- letters[i] + # 2.3) Test that methods + # works as expected + testthat::expect_no_condition( + object = invisible(capture.output(SLmetrics:::print.ROC(score))), + message = info + ) + testthat::expect_no_condition( + object = SLmetrics:::plot.ROC(score), + message = info + ) + + + # 2.4) test that the values + # are equal to target value + + # 2.4.1) calculate py_score + py_score <- do.call( + rbind, + lapply(py_ROC( + actual = actual, + response = response, + w = if (weighted) w else NULL), + FUN = as.data.frame)) + + # 2.4.2) test for equality + testthat::expect_true( + object = set_equal( + current = score[is.finite(score$thresholds),], + target = py_score[is.finite(py_score$thresholds),] + ), + info = info + ) - py_metric <- as.data.frame(py_metric) - py_metric <- py_metric[order(-py_metric$threshold, py_metric$level, decreasing = FALSE),] - py_value[[i]] <- py_metric + } } - py_value <- do.call( - rbind, - py_value - ) - - py_value <- py_value[ , names(current)] - - # 6) test if its - # equal - testthat::expect_true( - set_equal( - current = prROC( - actual, - response - ), - target = py_value - ) - ) - - - # 7) Test that custom - # thresholds works - # as expected - thresholds <- seq( - 0, - 1, - length.out = 10 - ) - - testthat::expect_true( - set_equal( - current = prROC( - actual = actual, - response = response, - thresholds = thresholds - ), - target = ref_prROC( - actual = actual, - response = response, - thresholds = thresholds - ) - ) - ) - - } ) - -# script end; - - - diff --git a/tests/testthat/test-Recall.R b/tests/testthat/test-Recall.R index 28b59025..54e3efa6 100644 --- a/tests/testthat/test-Recall.R +++ b/tests/testthat/test-Recall.R @@ -69,11 +69,11 @@ testthat::test_that( # 2.4) test that the values # are equal to target value - # 2.4.1) calculate py_score - py_score <- py_recall( + # 2.4.1) calculate ref_score + ref_score <- ref_recall( actual = actual, predicted = predicted, - average = if (is.na(micro)) { NULL } else ifelse(micro, "micro", "macro"), + micro = if (is.na(micro)) { NULL } else micro, w = if (weighted) w else NULL ) @@ -81,7 +81,7 @@ testthat::test_that( testthat::expect_true( object = set_equal( current = as.numeric(score), - target = as.numeric(py_score) + target = as.numeric(ref_score) ), info = info ) diff --git a/tests/testthat/test-RelativeRootMeanSquaredError.R b/tests/testthat/test-RelativeRootMeanSquaredError.R new file mode 100644 index 00000000..15790e7b --- /dev/null +++ b/tests/testthat/test-RelativeRootMeanSquaredError.R @@ -0,0 +1,86 @@ +# objective: Test that the metric +# implemented in {SLmetrics} is aligned with +# target functions. + +testthat::test_that( + desc = "Test `rmse()`-function", code = { + + # 0) construct rmse-wrapperr + wrapped_rrmse <- function( + actual, + predicted, + w = NULL, + normalization = 0) { + if (is.null(w)) { + rrmse( + actual = actual, + predicted = predicted, + normalization = normalization + ) + } else { + weighted.rrmse( + actual = actual, + predicted = predicted, + w = w, + normalization = normalization + ) + } + } + + for (weighted in c(FALSE, TRUE)) { + + for (normalization in c(0,1,2)) { + + # 0) create regression + # for the test + values <- create_regression() + actual <- values$actual + predicted <- values$predicted + w <- if (weighted) values$weight else NULL + + # 1) generate sensible + # label information + info <- paste( + "Weighted = ", weighted, + "Normalization = ", normalization + ) + + # 2) generate score + # from {slmetrics} + score <- wrapped_rrmse( + actual = actual, + predicted = predicted, + w = w, + normalization = normalization + ) + + # 2.1) test that the values + # are sensible + testthat::expect_true(is.numeric(score), info = info) + testthat::expect_true(!is.na(score), info = info) + testthat::expect_true(length(score) == 1, info = info) + + # 2.2) calculate reference value + py_score <- ref_rrmse( + actual = actual, + predicted = predicted, + w = w, + normalization = normalization + ) + + # 2.3) test for equality + testthat::expect_true( + object = set_equal( + current = as.numeric(score), + target = as.numeric(py_score) + ), + info = info + ) + + } + + + + } + } +) \ No newline at end of file diff --git a/tests/testthat/test-S3-classification.R b/tests/testthat/test-S3-classification.R index 0d194f01..9cc3882c 100644 --- a/tests/testthat/test-S3-classification.R +++ b/tests/testthat/test-S3-classification.R @@ -18,7 +18,7 @@ testthat::test_that( predicted = predicted ) - sl_wmatrix <- cmatrix( + sl_wmatrix <- weighted.cmatrix( actual = actual, predicted = predicted, w = w @@ -129,7 +129,7 @@ testthat::test_that( predicted = predicted ) - sl_wmatrix <- cmatrix( + sl_wmatrix <- weighted.cmatrix( actual = actual, predicted = predicted, w = w diff --git a/tests/testthat/test-Specificity.R b/tests/testthat/test-Specificity.R index 7cea445b..5c62cc0d 100644 --- a/tests/testthat/test-Specificity.R +++ b/tests/testthat/test-Specificity.R @@ -69,11 +69,11 @@ testthat::test_that( # 2.4) test that the values # are equal to target value - # 2.4.1) calculate py_score - py_score <- py_specificity( + # 2.4.1) calculate ref_score + ref_score <- ref_specificity( actual = actual, predicted = predicted, - average = if (is.na(micro)) { NULL } else ifelse(micro, "micro", "macro"), + micro = if (is.na(micro)) { NULL } else micro, w = if (weighted) w else NULL ) @@ -81,7 +81,7 @@ testthat::test_that( testthat::expect_true( object = set_equal( current = as.numeric(score), - target = as.numeric(py_score) + target = as.numeric(ref_score) ), info = info ) diff --git a/tests/testthat/test-prROC.R b/tests/testthat/test-prROC.R new file mode 100644 index 00000000..568c4aba --- /dev/null +++ b/tests/testthat/test-prROC.R @@ -0,0 +1,114 @@ +# script: Precision Recall Curve +# date: 2024-12-28 +# author: Serkan Korkmaz, serkor1@duck.com +# objective: Test that it returns +# whatever it should return - and correctly. +# script start; + +testthat::test_that( + desc = "Test that `prROC()`-function works as expected", + code = { + + # 0) construct ROC + # wrapper + wrapped_prROC <- function( + actual, + response, + thresholds = NULL, + w = NULL, + micro = TRUE) { + + if (is.null(w)) { + + prROC( + actual, + response, + thresholds = if (is.null(thresholds)) {NULL} else thresholds + ) + + } else { + + weighted.prROC( + actual, + response, + thresholds = if (is.null(thresholds)) {NULL} else thresholds, + w = w + ) + + } + + + } + + # 1) generate class + # values + actual <- create_factor(n = 100, k = 5) + response <- runif(n = length(actual)) + w <- runif(n = length(actual)) + thresholds <- seq(0.1, 0.9, by = 0.1) + + for (weighted in c(TRUE, FALSE)) { + + # 2) test that the are + # equal to target values + for (micro in c(NA, TRUE, FALSE)) { + + # 2.1) generate sensible + # label information + info <- paste( + "Weighted = ", weighted, + "Micro = ", micro + ) + + # 2.2) generate score + # from {slmetrics} + score <- wrapped_prROC( + actual = actual, + response = response, + w = if (weighted) w else NULL, + micro = if (is.na(micro)) { NULL } else micro + ) + + # 2.3) Test that methods + # works as expected + testthat::expect_no_condition( + object = invisible(capture.output(SLmetrics:::print.prROC(score))), + message = info + ) + + testthat::expect_no_condition( + object = SLmetrics:::plot.prROC(score), + message = info + ) + + + # 2.4) test that the values + # are equal to target value + + # 2.4.1) calculate py_score + py_score <- do.call( + rbind, + lapply(py_prROC( + actual = actual, + response = response, + w = if (weighted) w else NULL), + FUN = as.data.frame) + ) + + # 2.4.2) test for equality + testthat::expect_true( + object = set_equal( + current = score[is.finite(score$thresholds),], + target = py_score[is.finite(py_score$thresholds),] + ), + info = info + ) + + + } + + } + + + } +) diff --git a/tools/modifyRcppExports.R b/tools/modifyRcppExports.R index 7e3bf605..ab4d87e4 100644 --- a/tools/modifyRcppExports.R +++ b/tools/modifyRcppExports.R @@ -23,37 +23,21 @@ updated_content <- gsub(", na_rm = (TRUE|FALSE)\\) \\{", ", na.rm = \\1) {", con # to pass the argument updated_content <- gsub("na_rm)", "na_rm = na.rm)", updated_content) -# 4) update eveything function wise -foo_update <- c( - "accuracy", - "baccuracy", - "mcc", - "phi", - "fmi", - "ckappa", - "zerooneloss", - "rmse", - "mse", - "huberloss", - "rmsle", - "mpe", - "mape", - "smape", - "rsq", - "mae", - "ccc", - "rae", - "rrse", - "pinball" - # , - # "ROC", - # "prROC" +# 4) Modify all functions +# so it ends with ", ...)" +foo_update <- as.vector( + outer( + "[a-z]*", + c("cmatrix", "factor", "numeric", "default"), + paste, + sep = "." + ) ) -foo_update <- as.vector(outer("[a-z]*", c("cmatrix", "factor", "numeric", "default"), paste, sep = ".")) - -# 5) Modify the function signatures in RcppExports to append ', ...' to the argument list +# 4.1) iterate through all +# functions for (fname in foo_update) { + # Match the exact function definition, capture arguments, and append ', ...' pattern <- paste0("\\b(", fname, " <- function\\(.*)\\)") replacement <- "\\1, ...)" @@ -62,8 +46,6 @@ for (fname in foo_update) { updated_content <- gsub(pattern, replacement, updated_content, perl = TRUE) } - - # Write the updated content back to the file writeLines(updated_content, file_path) diff --git a/vignettes/classification_problems.Rmd b/vignettes/classification_problems.Rmd index 9707aa6a..89dfc46d 100644 --- a/vignettes/classification_problems.Rmd +++ b/vignettes/classification_problems.Rmd @@ -238,8 +238,8 @@ The `ROC()`-function accepts a custom `threshold`-argument, which can be passed # 1) create custom # thresholds thresholds <- seq( - from = min(roc$threshold), - to = max(roc$threshold), + from = 0.9, + to = 0.1, length.out = 10 )