diff --git a/.Rbuildignore b/.Rbuildignore index 23287fa..649f707 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -12,3 +12,6 @@ ^docs$ ^pkgdown$ ^.github$ +^.vscode$ +^air.toml$ +^\.github$ diff --git a/.github/workflows/check-standard.yaml b/.github/workflows/check-standard.yaml deleted file mode 100644 index 7e912ef..0000000 --- a/.github/workflows/check-standard.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# For help debugging build failures open an issue on the RStudio community with the 'github-actions' tag. -# https://community.rstudio.com/new-topic?category=Package%20development&tags=github-actions -on: [push] -name: R-CMD-check - -jobs: - R-CMD-check: - runs-on: ${{ matrix.config.os }} - - name: ${{ matrix.config.os }} (${{ matrix.config.r }}) - - strategy: - fail-fast: false - matrix: - config: - #- {os: windows-latest, r: 'release'} - - {os: macOS-latest, r: 'release'} - - {os: ubuntu-20.04, r: 'release', rspm: "https://packagemanager.rstudio.com/cran/__linux__/focal/latest"} - - {os: ubuntu-20.04, r: 'devel', rspm: "https://packagemanager.rstudio.com/cran/__linux__/focal/latest"} - - env: - R_REMOTES_NO_ERRORS_FROM_WARNINGS: true - RSPM: ${{ matrix.config.rspm }} - GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} - - steps: - - uses: actions/checkout@v2 - - - uses: r-lib/actions/setup-r@v1 - with: - r-version: ${{ matrix.config.r }} - - - uses: r-lib/actions/setup-pandoc@v1 - - - name: Query dependencies - run: | - install.packages('remotes') - saveRDS(remotes::dev_package_deps(dependencies = TRUE), ".github/depends.Rds", version = 2) - writeLines(sprintf("R-%i.%i", getRversion()$major, getRversion()$minor), ".github/R-version") - shell: Rscript {0} - - - name: Cache R packages - if: runner.os != 'Windows' - uses: actions/cache@v2 - with: - path: ${{ env.R_LIBS_USER }} - key: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1-${{ hashFiles('.github/depends.Rds') }} - restore-keys: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1- - - - name: Install system dependencies - if: runner.os == 'Linux' - run: | - while read -r cmd - do - eval sudo $cmd - done < <(Rscript -e 'writeLines(remotes::system_requirements("ubuntu", "20.04"))') - - - name: Install dependencies - run: | - remotes::install_deps(dependencies = TRUE) - remotes::install_cran("rcmdcheck") - shell: Rscript {0} - - - name: Check - env: - _R_CHECK_CRAN_INCOMING_REMOTE_: false - run: rcmdcheck::rcmdcheck(args = c("--no-manual", "--as-cran"), error_on = "warning", check_dir = "check") - shell: Rscript {0} - - - name: Upload check results - if: failure() - uses: actions/upload-artifact@main - with: - name: ${{ runner.os }}-r${{ matrix.config.r }}-results - path: check diff --git a/.gitignore b/.gitignore index 3bd343f..e022892 100644 --- a/.gitignore +++ b/.gitignore @@ -58,3 +58,5 @@ examples1.rds doc Meta docs + +/.quarto/ diff --git a/.lintr b/.lintr index c062868..89a2efc 100644 --- a/.lintr +++ b/.lintr @@ -1,12 +1,6 @@ -linters: with_defaults( - line_length_linter(100), - assignment_linter = NULL, - cyclocomp_linter(complexity_limit = 25), # default value - undesirable_operator_linter = undesirable_operator_linter( - with_defaults( - default_undesirable_operators, - "<-" = "Use =, not <-, for assignment." - ) - ) - ) - +linters: linters_with_defaults( + assignment_linter = NULL, + line_length_linter(100), + cyclocomp_linter(complexity_limit = 15), + undesirable_operator_linter = undesirable_operator_linter( + op = list("<-" = "Please use '=' for assignment"))) diff --git a/DESCRIPTION b/DESCRIPTION index 467bc35..10ccdaf 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Type: Package Package: benchmarkme Title: Crowd Sourced System Benchmarks -Version: 1.0.8 +Version: 1.0.9 Authors@R: person("Colin", "Gillespie", , "csgillespie@gmail.com", role = c("aut", "cre"), comment = c(ORCID = "0000-0003-1787-0275")) @@ -10,7 +10,8 @@ Description: Benchmark your CPU and compare against other CPUs. Also provides functions for obtaining system specifications, such as RAM, CPU type, and R version. License: GPL-2 | GPL-3 -URL: https://github.com/csgillespie/benchmarkme, https://csgillespie.github.io/benchmarkme/ +URL: https://github.com/csgillespie/benchmarkme, + https://csgillespie.github.io/benchmarkme/ BugReports: https://github.com/csgillespie/benchmarkme/issues Depends: R (>= 3.5.0) @@ -40,4 +41,4 @@ VignetteBuilder: knitr Encoding: UTF-8 LazyData: TRUE -RoxygenNote: 7.1.2 +RoxygenNote: 7.3.2 diff --git a/NEWS.md b/NEWS.md index 14dc966..512ea62 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,7 +1,12 @@ +# benchmarkme Version 1.0.9 _2024-05-06_ + * fix: #49 thanks to @ptompalski + * feat: Add website to description #5o. Thanks to @olivroy + * chore: Use `air` for formatting + # benchmarkme Version 1.0.8 _2022-06-02_ * fix: `get_ram()` for windows (thanks to @ArkaB-DS @xiaodaigh #45) * internal: linting & format NEWS.md file - + # benchmarkme Version 1.0.7 _2022-01-17_ * Internal: Suppress warnings on `sysctl` calls * Fix: `get_ram()` for windows (thanks to @ArkaB-DS #41) @@ -28,7 +33,7 @@ # benchmarkme Version 1.0.0 * Update version focused on R 3.5 & above. Start anew. Sorry everyone -# benchmarkme Version 0.6.1 +# benchmarkme Version 0.6.1 * Improved BLAS detection (suggested by @ck37 #15) # benchmarkme Version 0.6.0 @@ -40,10 +45,10 @@ * Can now run `benchmark_std()` if the package is not attached (thanks to @YvesCR) * Nicer version of `print.bytes()` (thanks to @richierocks) * Adding parallel benchmarks (thanks to @jknowles) - + # benchmarkme Version 0.5.0 * Bug fix in get_byte_compiler when `cmpfun` was used. - + # benchmarkme Version 0.4.0 * Update to shinyapps.io example * Moved benchmark description to shinyapps.io @@ -54,13 +59,13 @@ * Used `Sys.getpid()` to try and determine the BLAS/LAPACK library (suggested by Ashley Ford). -# benchmarkme Version 0.2.3 +# benchmarkme Version 0.2.3 * Return `NA` for `get_cpu()`/`get_ram()` when it isn't possible to determine CPU/RAM. - + # benchmarkme Version 0.2.2 * First CRAN version - + # benchmarkme Version 0.2.0 * More flexibility in plot and datatable functions - you can now specify the test you want to compare. * The number of cores returned by `get_cpu()`. @@ -84,7 +89,7 @@ * Further RAM and Mac issues. # benchmarkme Version 0.1.4 - * Bug fix: Remove white space from apple RAM output (thanks to @vzemlys). Fixes #2. + * Bug fix: Remove white space from apple RAM output (thanks to @vzemlys). Fixes #2. # benchmarkme Version 0.1.3 * Add a fall-back when getting RAM - grab everything. diff --git a/R/benchmark_io.R b/R/benchmark_io.R index 138ccec..8a75b94 100644 --- a/R/benchmark_io.R +++ b/R/benchmark_io.R @@ -11,11 +11,13 @@ #' @importFrom utils read.csv write.csv #' @rdname benchmark_io #' @export -benchmark_io = function(runs = 3, - size = c(5, 50), - tmpdir = tempdir(), - verbose = TRUE, - cores = 0L) { +benchmark_io = function( + runs = 3, + size = c(5, 50), + tmpdir = tempdir(), + verbose = TRUE, + cores = 0L +) { # Order size largest to smallest for trial run. # Trial on largest @@ -24,13 +26,20 @@ benchmark_io = function(runs = 3, } size = sort(size, decreasing = TRUE) if (cores > 0) { - results = benchmark_io_parallel(runs = runs, size = size, - tmpdir = tmpdir, verbose = verbose, - cores = cores) + results = benchmark_io_parallel( + runs = runs, + size = size, + tmpdir = tmpdir, + verbose = verbose, + cores = cores + ) } else { - results = benchmark_io_serial(runs = runs, size = size, - tmpdir = tmpdir, verbose = verbose) - + results = benchmark_io_serial( + runs = runs, + size = size, + tmpdir = tmpdir, + verbose = verbose + ) } class(results) = c("ben_results", class(results)) results @@ -56,18 +65,40 @@ benchmark_io_serial = function(runs, size, tmpdir, verbose) { benchmark_io_parallel = function(runs, size, tmpdir, verbose, cores) { message("Preparing read/write io") - bm_parallel("bm_write", runs = 1, - size = size[1], tmpdir = tmpdir, - verbose = verbose, cores = max(cores)) + bm_parallel( + "bm_write", + runs = 1, + size = size[1], + tmpdir = tmpdir, + verbose = verbose, + cores = max(cores) + ) results = NULL for (s in size) { - if (verbose) message("# IO benchmarks (2 tests) for size ", s, " MB (parallel)") - results = rbind(results, - bm_parallel("bm_write", runs = runs, size = s, tmpdir = tmpdir, - verbose = verbose, cores = cores)) - results = rbind(results, - bm_parallel("bm_read", runs = runs, size = s, tmpdir = tmpdir, - verbose = verbose, cores = cores)) + if (verbose) + message("# IO benchmarks (2 tests) for size ", s, " MB (parallel)") + results = rbind( + results, + bm_parallel( + "bm_write", + runs = runs, + size = s, + tmpdir = tmpdir, + verbose = verbose, + cores = cores + ) + ) + results = rbind( + results, + bm_parallel( + "bm_read", + runs = runs, + size = s, + tmpdir = tmpdir, + verbose = verbose, + cores = cores + ) + ) } results @@ -75,18 +106,26 @@ benchmark_io_parallel = function(runs, size, tmpdir, verbose, cores) { #' @rdname benchmark_io #' @export -bm_read = function(runs = 3, size = c(5, 50), - tmpdir = tempdir(), verbose = TRUE) { +bm_read = function( + runs = 3, + size = c(5, 50), + tmpdir = tempdir(), + verbose = TRUE +) { n = 12.5e4 * size set.seed(1) on.exit(set.seed(NULL)) x = Rnorm(n) m = data.frame(matrix(x, ncol = 10)) - test = rep(paste0("read", size), runs) - timings = data.frame(user = numeric(runs), system = 0, - elapsed = 0, test = test, - test_group = test, - stringsAsFactors = FALSE) + test = rep(paste0("read", size), runs) + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = test, + test_group = test, + stringsAsFactors = FALSE + ) fname = tempfile(fileext = ".csv", tmpdir = tmpdir) write.csv(m, fname, row.names = FALSE) for (i in 1:runs) { @@ -95,8 +134,12 @@ bm_read = function(runs = 3, size = c(5, 50), read.csv(fname, colClasses = rep("numeric", 10)) })[1:3] if (verbose) { - message(c("\t Reading a csv with ", n, " values", - timings_mean(timings[timings$test_group == paste0("read", size), ]))) + message(c( + "\t Reading a csv with ", + n, + " values", + timings_mean(timings[timings$test_group == paste0("read", size), ]) + )) } } unlink(fname) @@ -106,18 +149,26 @@ bm_read = function(runs = 3, size = c(5, 50), #' @rdname benchmark_io #' @export -bm_write = function(runs = 3, size = c(5, 50), - tmpdir = tempdir(), verbose = TRUE) { +bm_write = function( + runs = 3, + size = c(5, 50), + tmpdir = tempdir(), + verbose = TRUE +) { n = 12.5e4 * size set.seed(1) on.exit(set.seed(NULL)) x = Rnorm(n) m = data.frame(matrix(x, ncol = 10)) - test = rep(paste0("write", size), runs) - timings = data.frame(user = numeric(runs), system = 0, - elapsed = 0, test = test, - test_group = test, - stringsAsFactors = FALSE) + test = rep(paste0("write", size), runs) + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = test, + test_group = test, + stringsAsFactors = FALSE + ) for (i in 1:runs) { fname = tempfile(fileext = ".csv", tmpdir = tmpdir) invisible(gc()) @@ -127,8 +178,12 @@ bm_write = function(runs = 3, size = c(5, 50), unlink(fname) invisible(gc()) if (verbose) { - message(c("\t Writing a csv with ", n, " values", - timings_mean(timings[timings$test_group == paste0("write", size), ]))) + message(c( + "\t Writing a csv with ", + n, + " values", + timings_mean(timings[timings$test_group == paste0("write", size), ]) + )) } } timings diff --git a/R/benchmark_matrix_calculations.R b/R/benchmark_matrix_calculations.R index 190dbe4..299f708 100644 --- a/R/benchmark_matrix_calculations.R +++ b/R/benchmark_matrix_calculations.R @@ -21,9 +21,14 @@ globalVariables(c("a", "b", "ans")) bm_matrix_cal_manip = function(runs = 3, verbose = TRUE) { a = 0 b = 0 - timings = data.frame(user = numeric(runs), system = 0, elapsed = 0, - test = "manip", test_group = "matrix_cal", - stringsAsFactors = FALSE) + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = "manip", + test_group = "matrix_cal", + stringsAsFactors = FALSE + ) for (i in 1:runs) { invisible(gc()) timing = system.time({ @@ -35,22 +40,36 @@ bm_matrix_cal_manip = function(runs = 3, verbose = TRUE) { timings[i, 1:3] = timing[1:3] } if (verbose) - message(c("\tCreation, transp., deformation of a 5,000 x 5,000 matrix", timings_mean(timings))) + message(c( + "\tCreation, transp., deformation of a 5,000 x 5,000 matrix", + timings_mean(timings) + )) timings } #' @rdname bm_matrix_cal_manip #' @export bm_matrix_cal_power = function(runs = 3, verbose = TRUE) { - timings = data.frame(user = numeric(runs), system = 0, elapsed = 0, - test = "power", test_group = "matrix_cal", stringsAsFactors = FALSE) + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = "power", + test_group = "matrix_cal", + stringsAsFactors = FALSE + ) for (i in 1:runs) { a = abs(matrix(Rnorm(2500 * 2500) / 2, ncol = 2500, nrow = 2500)) invisible(gc()) - timings[i, 1:3] = system.time({b <- a^1000})[1:3] #nolint + timings[i, 1:3] = system.time({ + b <- a^1000 + })[1:3] #nolint } if (verbose) - message(c("\t2,500 x 2,500 normal distributed random matrix^1,000", timings_mean(timings))) + message(c( + "\t2,500 x 2,500 normal distributed random matrix^1,000", + timings_mean(timings) + )) timings } @@ -58,12 +77,20 @@ bm_matrix_cal_power = function(runs = 3, verbose = TRUE) { #' @export bm_matrix_cal_sort = function(runs = 3, verbose = TRUE) { b = 0 - timings = data.frame(user = numeric(runs), system = 0, elapsed = 0, - test = "sort", test_group = "matrix_cal", stringsAsFactors = FALSE) + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = "sort", + test_group = "matrix_cal", + stringsAsFactors = FALSE + ) for (i in 1:runs) { a = Rnorm(7000000) invisible(gc()) - timings[i, 1:3] = system.time({b <- sort(a, method = "quick")})[1:3] #nolint + timings[i, 1:3] = system.time({ + b <- sort(a, method = "quick") + })[1:3] #nolint } if (verbose) message(c("\tSorting of 7,000,000 random values", timings_mean(timings))) @@ -74,17 +101,27 @@ bm_matrix_cal_sort = function(runs = 3, verbose = TRUE) { #' @export bm_matrix_cal_cross_product = function(runs = 3, verbose = TRUE) { b = 0 - timings = data.frame(user = numeric(runs), system = 0, elapsed = 0, - test = "cross_product", test_group = "matrix_cal", - stringsAsFactors = FALSE) + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = "cross_product", + test_group = "matrix_cal", + stringsAsFactors = FALSE + ) for (i in 1:runs) { a = Rnorm(2500 * 2500) dim(a) = c(2500, 2500) invisible(gc()) - timings[i, 1:3] = system.time({b <- crossprod(a)})[1:3] #nolint + timings[i, 1:3] = system.time({ + b <- crossprod(a) + })[1:3] #nolint } if (verbose) - message(c("\t2,500 x 2,500 cross-product matrix (b = a' * a)", timings_mean(timings))) + message(c( + "\t2,500 x 2,500 cross-product matrix (b = a' * a)", + timings_mean(timings) + )) timings } @@ -93,16 +130,25 @@ bm_matrix_cal_cross_product = function(runs = 3, verbose = TRUE) { bm_matrix_cal_lm = function(runs = 3, verbose = TRUE) { ans = 0 b = as.double(1:5000) - timings = data.frame(user = numeric(runs), system = 0, elapsed = 0, - test = "lm", test_group = "matrix_cal", - stringsAsFactors = FALSE) + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = "lm", + test_group = "matrix_cal", + stringsAsFactors = FALSE + ) for (i in 1:runs) { - a = new("dgeMatrix", x = Rnorm(5000 * 500), - Dim = as.integer(c(5000, 500))) + a = new("dgeMatrix", x = Rnorm(5000 * 500), Dim = as.integer(c(5000, 500))) invisible(gc()) - timings[i, 1:3] = system.time({ans = solve(crossprod(a), crossprod(a, b))})[1:3] #nolint + timings[i, 1:3] = system.time({ + ans = solve(crossprod(a), crossprod(a, b)) + })[1:3] #nolint } if (verbose) - message(c("\tLinear regr. over a 5,000 x 500 matrix (c = a \\ b')", timings_mean(timings))) + message(c( + "\tLinear regr. over a 5,000 x 500 matrix (c = a \\ b')", + timings_mean(timings) + )) timings } diff --git a/R/benchmark_matrix_functions.R b/R/benchmark_matrix_functions.R index 40941ee..cc0d363 100644 --- a/R/benchmark_matrix_functions.R +++ b/R/benchmark_matrix_functions.R @@ -18,13 +18,20 @@ #' @export bm_matrix_fun_fft = function(runs = 3, verbose = TRUE) { b = 0 - timings = data.frame(user = numeric(runs), system = 0, elapsed = 0, - test = "fft", test_group = "matrix_fun", - stringsAsFactors = FALSE) + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = "fft", + test_group = "matrix_fun", + stringsAsFactors = FALSE + ) for (i in 1:runs) { a = Rnorm(2500000) invisible(gc()) - timings[i, 1:3] = system.time({b <- fft(a)})[1:3] #nolint + timings[i, 1:3] = system.time({ + b <- fft(a) + })[1:3] #nolint } if (verbose) message(c("\tFFT over 2,500,000 random values", timings_mean(timings))) @@ -36,17 +43,26 @@ bm_matrix_fun_fft = function(runs = 3, verbose = TRUE) { #' @export bm_matrix_fun_eigen = function(runs = 3, verbose = TRUE) { b = 0 - timings = data.frame(user = numeric(runs), system = 0, elapsed = 0, - test = "eigen", test_group = "matrix_fun", - stringsAsFactors = FALSE) + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = "eigen", + test_group = "matrix_fun", + stringsAsFactors = FALSE + ) for (i in 1:runs) { a = array(Rnorm(600 * 600), dim = c(600, 600)) invisible(gc()) timings[i, 1:3] = system.time({ - b <- eigen(a, symmetric = FALSE, only.values = TRUE)$Value})[1:3] #nolint + b <- eigen(a, symmetric = FALSE, only.values = TRUE)$Value + })[1:3] #nolint } if (verbose) - message(c("\tEigenvalues of a 640 x 640 random matrix", timings_mean(timings))) + message(c( + "\tEigenvalues of a 640 x 640 random matrix", + timings_mean(timings) + )) timings } @@ -54,17 +70,27 @@ bm_matrix_fun_eigen = function(runs = 3, verbose = TRUE) { #' @export bm_matrix_fun_determinant = function(runs = 3, verbose = TRUE) { b = 0 - timings = data.frame(user = numeric(runs), system = 0, elapsed = 0, - test = "determinant", test_group = "matrix_fun", - stringsAsFactors = FALSE) + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = "determinant", + test_group = "matrix_fun", + stringsAsFactors = FALSE + ) for (i in 1:runs) { a = Rnorm(2500 * 2500) dim(a) = c(2500, 2500) invisible(gc()) - timings[i, 1:3] = system.time({b <- det(a)})[1:3] #nolint + timings[i, 1:3] = system.time({ + b <- det(a) + })[1:3] #nolint } if (verbose) - message(c("\tDeterminant of a 2,500 x 2,500 random matrix", timings_mean(timings))) + message(c( + "\tDeterminant of a 2,500 x 2,500 random matrix", + timings_mean(timings) + )) timings } @@ -73,17 +99,30 @@ bm_matrix_fun_determinant = function(runs = 3, verbose = TRUE) { #' @import Matrix #' @export bm_matrix_fun_cholesky = function(runs = 3, verbose = TRUE) { - timings = data.frame(user = numeric(runs), system = 0, elapsed = 0, - test = "cholesky", test_group = "matrix_fun", - stringsAsFactors = FALSE) + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = "cholesky", + test_group = "matrix_fun", + stringsAsFactors = FALSE + ) for (i in 1:runs) { - a = crossprod(new("dgeMatrix", x = Rnorm(3000 * 3000), - Dim = as.integer(c(3000, 3000)))) + a = crossprod(new( + "dgeMatrix", + x = Rnorm(3000 * 3000), + Dim = as.integer(c(3000, 3000)) + )) invisible(gc()) - timings[i, 1:3] = system.time({b <- chol(a)})[1:3] #nolint + timings[i, 1:3] = system.time({ + b <- chol(a) + })[1:3] #nolint } if (verbose) - message(c("\tCholesky decomposition of a 3,000 x 3,000 matrix", timings_mean(timings))) + message(c( + "\tCholesky decomposition of a 3,000 x 3,000 matrix", + timings_mean(timings) + )) timings } @@ -91,15 +130,29 @@ bm_matrix_fun_cholesky = function(runs = 3, verbose = TRUE) { #' @export bm_matrix_fun_inverse = function(runs = 3, verbose = TRUE) { b = 0 - timings = data.frame(user = numeric(runs), system = 0, elapsed = 0, - test = "inverse", test_group = "matrix_fun", - stringsAsFactors = FALSE) + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = "inverse", + test_group = "matrix_fun", + stringsAsFactors = FALSE + ) for (i in 1:runs) { - a = new("dgeMatrix", x = Rnorm(1600 * 1600), Dim = as.integer(c(1600, 1600))) + a = new( + "dgeMatrix", + x = Rnorm(1600 * 1600), + Dim = as.integer(c(1600, 1600)) + ) invisible(gc()) - timings[i, 1:3] = system.time({b <- solve(a)})[1:3] #nolint + timings[i, 1:3] = system.time({ + b <- solve(a) + })[1:3] #nolint } if (verbose) - message(c("\tInverse of a 1,600 x 1,600 random matrix", timings_mean(timings))) + message(c( + "\tInverse of a 1,600 x 1,600 random matrix", + timings_mean(timings) + )) timings } diff --git a/R/benchmark_parallel.R b/R/benchmark_parallel.R index e7023e9..61da517 100644 --- a/R/benchmark_parallel.R +++ b/R/benchmark_parallel.R @@ -1,9 +1,11 @@ check_export = function(export, cl) { if (class(export) %in% "try-error") { parallel::stopCluster(cl) - stop("You need to call library(benchmarkme) before running parallel tests.\\ + stop( + "You need to call library(benchmarkme) before running parallel tests.\\ If you think you can avoid this, see github.com/csgillespie/benchmarkme/issues/33", - call. = FALSE) + call. = FALSE + ) } return(invisible(NULL)) } @@ -35,8 +37,14 @@ bm_parallel = function(bm, runs, verbose, cores, ...) { args[["runs"]] = 1 #TODO consider dropping first results from parallel results due to overhead - results = data.frame(user = NA, system = NA, elapsed = NA, test = NA, - test_group = NA, cores = NA) + results = data.frame( + user = NA, + system = NA, + elapsed = NA, + test = NA, + test_group = NA, + cores = NA + ) for (core in cores) { cl = parallel::makeCluster(core, outfile = "") @@ -45,8 +53,15 @@ bm_parallel = function(bm, runs, verbose, cores, ...) { parallel::clusterEvalQ(cl, "library('benchmarkme')") doParallel::registerDoParallel(cl) - tmp = data.frame(user = numeric(length(runs)), system = 0, elapsed = 0, - test = NA, test_group = NA, cores = NA, stringsAsFactors = FALSE) + tmp = data.frame( + user = numeric(length(runs)), + system = 0, + elapsed = 0, + test = NA, + test_group = NA, + cores = NA, + stringsAsFactors = FALSE + ) args$runs = 1 for (j in 1:runs) { @@ -59,7 +74,7 @@ bm_parallel = function(bm, runs, verbose, cores, ...) { tmp$test = as.character(out[[1]]$test)[1] tmp$test_group = as.character(out[[1]]$test_group)[1] results = rbind(results, tmp) - parallel::stopCluster(cl)# Would be nice to have on.exit here, but we run out of memory + parallel::stopCluster(cl) # Would be nice to have on.exit here, but we run out of memory } return(stats::na.omit(results)) diff --git a/R/benchmark_programming.R b/R/benchmark_programming.R index 27bf7ec..3f0dd5a 100644 --- a/R/benchmark_programming.R +++ b/R/benchmark_programming.R @@ -14,38 +14,60 @@ #' @importFrom stats runif #' @export bm_prog_fib = function(runs = 3, verbose = TRUE) { - a = 0; b = 0; phi = 1.6180339887498949 #nolint - timings = data.frame(user = numeric(runs), system = 0, elapsed = 0, - test = "fib", test_group = "prog", stringsAsFactors = FALSE) + a = 0 + b = 0 + phi = 1.6180339887498949 #nolint + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = "fib", + test_group = "prog", + stringsAsFactors = FALSE + ) for (i in 1:runs) { a = floor(runif(3500000) * 1000) invisible(gc()) start = proc.time() - b = (phi^a - (-phi) ^ (-a)) / sqrt(5) + b = (phi^a - (-phi)^(-a)) / sqrt(5) stop = proc.time() timings[i, 1:3] = (stop - start)[1:3] } if (verbose) - message(c("\t3,500,000 Fibonacci numbers calculation (vector calc)", timings_mean(timings))) + message(c( + "\t3,500,000 Fibonacci numbers calculation (vector calc)", + timings_mean(timings) + )) timings } #' @rdname bm_prog_fib #' @export bm_prog_hilbert = function(runs = 3, verbose = TRUE) { - a = 3500; b = 0 #nolint - timings = data.frame(user = numeric(runs), system = 0, elapsed = 0, - test = "hilbert", test_group = "prog", stringsAsFactors = FALSE) + a = 3500 + b = 0 #nolint + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = "hilbert", + test_group = "prog", + stringsAsFactors = FALSE + ) for (i in 1:runs) { invisible(gc()) start = proc.time() - b = rep(1:a, a); dim(b) = c(a, a) #nolint + b = rep(1:a, a) + dim(b) = c(a, a) #nolint b = 1 / (t(b) + 0:(a - 1)) stop = proc.time() timings[i, 1:3] = (stop - start)[1:3] } if (verbose) - message(c("\tCreation of a 3,500 x 3,500 Hilbert matrix (matrix calc)", timings_mean(timings))) + message(c( + "\tCreation of a 3,500 x 3,500 Hilbert matrix (matrix calc)", + timings_mean(timings) + )) timings } @@ -53,8 +75,14 @@ bm_prog_hilbert = function(runs = 3, verbose = TRUE) { #' @export bm_prog_gcd = function(runs = 3, verbose = TRUE) { ans = 0 - timings = data.frame(user = numeric(runs), system = 0, elapsed = 0, - test = "gcd", test_group = "prog", stringsAsFactors = FALSE) + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = "gcd", + test_group = "prog", + stringsAsFactors = FALSE + ) gcd2 = function(x, y) { if (sum(y > 1.0E-4) == 0) { x @@ -68,21 +96,29 @@ bm_prog_gcd = function(runs = 3, verbose = TRUE) { b = ceiling(runif(1000000) * 1000) invisible(gc()) start = proc.time() - ans = gcd2(a, b)# gcd2 is a recursive function + ans = gcd2(a, b) # gcd2 is a recursive function stop = proc.time() timings[i, 1:3] = (stop - start)[1:3] } if (verbose) - message(c("\tGrand common divisors of 1,000,000 pairs (recursion)", timings_mean(timings))) + message(c( + "\tGrand common divisors of 1,000,000 pairs (recursion)", + timings_mean(timings) + )) timings } #' @rdname bm_prog_fib #' @export bm_prog_toeplitz = function(runs = 3, verbose = TRUE) { - timings = data.frame(user = numeric(runs), system = 0, elapsed = 0, - test = "toeplitz", test_group = "prog", - stringsAsFactors = FALSE) + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = "toeplitz", + test_group = "prog", + stringsAsFactors = FALSE + ) N = 3000 #nolint ans = rep(0, N * N) dim(ans) = c(N, N) @@ -98,7 +134,10 @@ bm_prog_toeplitz = function(runs = 3, verbose = TRUE) { timings[i, 1:3] = (stop - start)[1:3] } if (verbose) - message(c("\tCreation of a 3,000 x 3,000 Toeplitz matrix (loops)", timings_mean(timings))) + message(c( + "\tCreation of a 3,000 x 3,000 Toeplitz matrix (loops)", + timings_mean(timings) + )) timings } @@ -107,11 +146,29 @@ bm_prog_toeplitz = function(runs = 3, verbose = TRUE) { #' @rdname bm_prog_fib #' @export bm_prog_escoufier = function(runs = 3, verbose = TRUE) { - timings = data.frame(user = numeric(runs), system = 0, elapsed = 0, - test = "escoufier", test_group = "prog", - stringsAsFactors = FALSE) - p = 0; vt = 0; vr = 0; vrt = 0; rvt = 0; RV = 0; j = 0; k = 0; #nolint - x2 = 0; R = 0; r_xx = 0; r_yy = 0; r_xy = 0; r_yx = 0; r_vmax = 0 #nolint + timings = data.frame( + user = numeric(runs), + system = 0, + elapsed = 0, + test = "escoufier", + test_group = "prog", + stringsAsFactors = FALSE + ) + p = 0 + vt = 0 + vr = 0 + vrt = 0 + rvt = 0 + RV = 0 + j = 0 + k = 0 #nolint + x2 = 0 + R = 0 + r_xx = 0 + r_yy = 0 + r_xy = 0 + r_yx = 0 + r_vmax = 0 #nolint # Calculate the trace of a matrix (sum of its diagonal elements) tr = function(y) { sum(c(y)[1 + 0:(min(dim(y)) - 1) * (dim(y)[1] + 1)], na.rm = FALSE) @@ -123,9 +180,9 @@ bm_prog_escoufier = function(runs = 3, verbose = TRUE) { start = proc.time() # Calculation of Escoufier's equivalent vectors p = ncol(x) - vt = 1:p # Variables to test - vr = NULL # Result: ordered variables - rv_cor = 1:p # Result: correlations #nolint + vt = 1:p # Variables to test + vr = NULL # Result: ordered variables + rv_cor = 1:p # Result: correlations #nolint vrt = NULL # loop on the variable number for (j in 1:p) { @@ -133,26 +190,29 @@ bm_prog_escoufier = function(runs = 3, verbose = TRUE) { # loop on the variables for (k in 1:(p - j + 1)) { x2 = cbind(x, x[, vr], x[, vt[k]]) - R = cor(x2) # Correlations table #nolint + R = cor(x2) # Correlations table #nolint r_yy = R[1:p, 1:p] r_xx = R[(p + 1):(p + j), (p + 1):(p + j)] r_xy = R[(p + 1):(p + j), 1:p] r_yx = t(r_xy) rvt = tr(r_yx %*% r_xy) / sqrt(tr(r_yy %*% r_yy) * tr(r_xx %*% r_xx)) # rv_cor calculation if (rvt > r_vmax) { - r_vmax = rvt # test of rv_cor - vrt = vt[k] # temporary held variable + r_vmax = rvt # test of rv_cor + vrt = vt[k] # temporary held variable } } - vr[j] = vrt # Result: variable - rv_cor[j] = r_vmax # Result: correlation - vt = vt[vt != vr[j]] # reidentify variables to test + vr[j] = vrt # Result: variable + rv_cor[j] = r_vmax # Result: correlation + vt = vt[vt != vr[j]] # reidentify variables to test } stop = proc.time() timings[i, 1:3] = (stop - start)[1:3] } if (verbose) - message(c("\tEscoufier's method on a 60 x 60 matrix (mixed)", timings_mean(timings))) + message(c( + "\tEscoufier's method on a 60 x 60 matrix (mixed)", + timings_mean(timings) + )) timings } diff --git a/R/benchmark_std.R b/R/benchmark_std.R index f67ad39..f3d676d 100644 --- a/R/benchmark_std.R +++ b/R/benchmark_std.R @@ -23,7 +23,9 @@ #' plot(res) #' } benchmark_std = function(runs = 3, verbose = TRUE, cores = 0L) { - rbind(benchmark_prog(runs, verbose, cores), - benchmark_matrix_cal(runs, verbose, cores), - benchmark_matrix_fun(runs, verbose, cores)) + rbind( + benchmark_prog(runs, verbose, cores), + benchmark_matrix_cal(runs, verbose, cores), + benchmark_matrix_fun(runs, verbose, cores) + ) } diff --git a/R/benchmarkme-package.R b/R/benchmarkme-package.R index a6c2ba2..256da54 100644 --- a/R/benchmarkme-package.R +++ b/R/benchmarkme-package.R @@ -5,7 +5,6 @@ #' RAM, CPU type, and R version. #' @name benchmarkme-package #' @aliases benchmarkme -#' @docType package #' @author \email{csgillespie@gmail.com} #' @keywords package #' @seealso \url{https://github.com/csgillespie/benchmarkme} diff --git a/R/benchmarks.R b/R/benchmarks.R index a7097e9..f42007b 100644 --- a/R/benchmarks.R +++ b/R/benchmarks.R @@ -1,10 +1,13 @@ run_benchmarks = function(bm, runs, verbose, cores) { if (cores > 0) { - results = lapply(bm, bm_parallel, - runs = runs, verbose = verbose, cores = cores) + results = lapply(bm, bm_parallel, runs = runs, verbose = verbose, cores = cores) } else { - results = lapply(bm, do.call, list(runs = runs, verbose = verbose), - envir = environment(run_benchmarks)) + results = lapply( + bm, + do.call, + list(runs = runs, verbose = verbose), + envir = environment(run_benchmarks) + ) } results = Reduce("rbind", results) results$cores = cores @@ -19,18 +22,21 @@ run_benchmarks = function(bm, runs, verbose, cores) { #' @examples #' get_available_benchmarks() get_available_benchmarks = function() { - c("benchmark_std", "benchmark_prog", "benchmark_matrix_cal", - "benchmark_matrix_fun", "benchmark_io") + c( + "benchmark_std", + "benchmark_prog", + "benchmark_matrix_cal", + "benchmark_matrix_fun", + "benchmark_io" + ) } #' @inheritParams benchmark_std #' @rdname bm_prog_fib #' @export benchmark_prog = function(runs = 3, verbose = TRUE, cores = 0L) { - bm = c("bm_prog_fib", "bm_prog_gcd", "bm_prog_hilbert", - "bm_prog_toeplitz", "bm_prog_escoufier") - if (verbose) - message("# Programming benchmarks (5 tests):") + bm = c("bm_prog_fib", "bm_prog_gcd", "bm_prog_hilbert", "bm_prog_toeplitz", "bm_prog_escoufier") + if (verbose) message("# Programming benchmarks (5 tests):") run_benchmarks(bm, runs, verbose, cores) } @@ -39,10 +45,14 @@ benchmark_prog = function(runs = 3, verbose = TRUE, cores = 0L) { #' @rdname bm_matrix_cal_manip #' @export benchmark_matrix_cal = function(runs = 3, verbose = TRUE, cores = 0L) { - bm = c("bm_matrix_cal_manip", "bm_matrix_cal_power", "bm_matrix_cal_sort", - "bm_matrix_cal_cross_product", "bm_matrix_cal_lm") - if (verbose) - message("# Matrix calculation benchmarks (5 tests):") + bm = c( + "bm_matrix_cal_manip", + "bm_matrix_cal_power", + "bm_matrix_cal_sort", + "bm_matrix_cal_cross_product", + "bm_matrix_cal_lm" + ) + if (verbose) message("# Matrix calculation benchmarks (5 tests):") run_benchmarks(bm, runs, verbose, cores) } @@ -51,10 +61,13 @@ benchmark_matrix_cal = function(runs = 3, verbose = TRUE, cores = 0L) { #' @rdname bm_matrix_fun_fft #' @export benchmark_matrix_fun = function(runs = 3, verbose = TRUE, cores = 0L) { - bm = c("bm_matrix_fun_cholesky", "bm_matrix_fun_determinant", - "bm_matrix_fun_eigen", "bm_matrix_fun_fft", - "bm_matrix_fun_inverse") - if (verbose) - message("# Matrix function benchmarks (5 tests):") + bm = c( + "bm_matrix_fun_cholesky", + "bm_matrix_fun_determinant", + "bm_matrix_fun_eigen", + "bm_matrix_fun_fft", + "bm_matrix_fun_inverse" + ) + if (verbose) message("# Matrix function benchmarks (5 tests):") run_benchmarks(bm, runs, verbose, cores) } diff --git a/R/clean_ram_output.R b/R/clean_ram_output.R index 575fe50..8e7879a 100644 --- a/R/clean_ram_output.R +++ b/R/clean_ram_output.R @@ -2,10 +2,10 @@ to_bytes = function(value) { num = as.numeric(value[1]) units = value[2] power = match(units, c("kB", "MB", "GB", "TB")) - if (!is.na(power)) return(num * 1000 ^ power) + if (!is.na(power)) return(num * 1000^power) power = match(units, c("Kilobytes", "Megabytes", "Gigabytes", "Terabytes")) - if (!is.na(power)) return(num * 1000 ^ power) + if (!is.na(power)) return(num * 1000^power) num } @@ -18,9 +18,12 @@ clean_ram = function(ram, os) { clean_ram = clean_win_ram(ram) # nocov return(unname(clean_ram)) } - if (length(ram) > 1 || + if ( + length(ram) > 1 || is.na(ram) || - length(grep("^solaris", os))) { # Don't care about solaris + length(grep("^solaris", os)) + ) { + # Don't care about solaris return(NA) } diff --git a/R/get_byte_compiler.R b/R/get_byte_compiler.R index 6bc187d..80730d7 100644 --- a/R/get_byte_compiler.R +++ b/R/get_byte_compiler.R @@ -16,8 +16,7 @@ #' get_byte_compiler() get_byte_compiler = function() { comp = Sys.getenv("R_COMPILE_PKGS") - if (nchar(comp) > 0L) comp = as.numeric(comp) - else comp = 0L + if (nchar(comp) > 0L) comp = as.numeric(comp) else comp = 0L ## Try to detect compilePKGS - long shot ## Return to same state as we found it diff --git a/R/get_cpu.R b/R/get_cpu.R index c2512be..1a4df59 100644 --- a/R/get_cpu.R +++ b/R/get_cpu.R @@ -17,8 +17,10 @@ get_cpu = function() { cpu = try(get_cpu_internal(), silent = TRUE) if (inherits(cpu, "try-error")) { - message("\t Unable to detect your CPU. - Please raise an issue at https://github.com/csgillespie/benchmarkme") # nocov + message( + "\t Unable to detect your CPU. + Please raise an issue at https://github.com/csgillespie/benchmarkme" + ) # nocov cpu = list(vendor_id = NA_character_, model_name = NA_character_) # nocov } cpu$no_of_cores = parallel::detectCores() @@ -28,21 +30,28 @@ get_cpu = function() { get_cpu_internal = function() { os = R.version$os if (length(grep("^linux", os))) { - cmd = "awk '/vendor_id/' /proc/cpuinfo" + cmd = "awk '/vendor_id/' /proc/cpuinfo" vendor_id = gsub("vendor_id\t: ", "", unique(system(cmd, intern = TRUE))) - cmd = "awk '/model name/' /proc/cpuinfo" + cmd = "awk '/model name/' /proc/cpuinfo" model_name = gsub("model name\t: ", "", unique(system(cmd, intern = TRUE))) } else if (length(grep("^darwin", os))) { sysctl = get_sysctl() if (is.na(sysctl)) { vendor_id = model_name = NA } else { - vendor_id = suppressWarnings(system2(sysctl, "-n machdep.cpu.vendor", - stdout = TRUE, stderr = NULL)) # nocov - - model_name = suppressWarnings(system2(sysctl, "-n machdep.cpu.brand_string", - stdout = TRUE, stderr = NULL)) # nocov + vendor_id = suppressWarnings(system2( + sysctl, + "-n machdep.cpu.vendor", + stdout = TRUE, + stderr = NULL + )) # nocov + model_name = suppressWarnings(system2( + sysctl, + "-n machdep.cpu.brand_string", + stdout = TRUE, + stderr = NULL + )) # nocov } } else if (length(grep("^solaris", os))) { vendor_id = NA # nocov @@ -52,7 +61,9 @@ get_cpu_internal = function() { model_name = system("wmic cpu get name", intern = TRUE)[2] # nocov vendor_id = system("wmic cpu get manufacturer", intern = TRUE)[2] # nocov } - list(vendor_id = stringr::str_squish(vendor_id), - model_name = stringr::str_squish(model_name), - no_of_cores = parallel::detectCores()) + list( + vendor_id = stringr::str_squish(vendor_id), + model_name = stringr::str_squish(model_name), + no_of_cores = parallel::detectCores() + ) } diff --git a/R/get_linear_algebra.R b/R/get_linear_algebra.R index 0691c3b..fc517a9 100644 --- a/R/get_linear_algebra.R +++ b/R/get_linear_algebra.R @@ -4,8 +4,8 @@ #' @importFrom utils sessionInfo #' @export get_linear_algebra = function() { - s = sessionInfo() - blas = s$BLAS - lapack = s$LAPACK - return(list(blas = blas, lapack = lapack)) + s = sessionInfo() + blas = s$BLAS + lapack = s$LAPACK + return(list(blas = blas, lapack = lapack)) } diff --git a/R/get_ram.R b/R/get_ram.R index 3d96602..a06926c 100644 --- a/R/get_ram.R +++ b/R/get_ram.R @@ -2,10 +2,7 @@ get_windows_ram = function() { ram = try(system("grep MemTotal /proc/meminfo", intern = TRUE), silent = TRUE) if (!inherits(ram, "try-error") && length(ram) != 0) { ram = strsplit(ram, " ")[[1]] - mult = switch(ram[length(ram)], - "B" = 1L, - "kB" = 1024L, - "MB" = 1048576L) + mult = switch(ram[length(ram)], "B" = 1L, "kB" = 1024L, "MB" = 1048576L) ram = as.numeric(ram[length(ram) - 1]) ram_size = ram * mult } else { @@ -59,14 +56,18 @@ get_ram = function() { os = R.version$os ram = suppressWarnings(try(system_ram(os), silent = TRUE)) if (inherits(ram, "try-error") || length(ram) == 0L || any(is.na(ram))) { - message("\t Unable to detect your RAM. # nocov - Please raise an issue at https://github.com/csgillespie/benchmarkme") # nocov + message( + "\t Unable to detect your RAM. # nocov + Please raise an issue at https://github.com/csgillespie/benchmarkme" + ) # nocov ram = structure(NA, class = "ram") # nocov } else { cleaned_ram = suppressWarnings(try(clean_ram(ram, os), silent = TRUE)) if (inherits(cleaned_ram, "try-error") || length(ram) == 0L) { - message("\t Unable to detect your RAM. # nocov - Please raise an issue at https://github.com/csgillespie/benchmarkme") # nocov + message( + "\t Unable to detect your RAM. # nocov + Please raise an issue at https://github.com/csgillespie/benchmarkme" + ) # nocov ram = structure(NA, class = "ram") #nocov } else { ram = structure(cleaned_ram, class = "ram") @@ -92,8 +93,7 @@ print.ram = function(x, digits = 3, unit_system = c("metric", "iec"), ...) { x = x / (base^power) } - formatted = format(signif(x, digits = digits), big.mark = ",", - scientific = FALSE, ...) + formatted = format(signif(x, digits = digits), big.mark = ",", scientific = FALSE, ...) cat(unclass(formatted), " ", unit, "\n", sep = "") invisible(paste(unclass(formatted), unit)) } diff --git a/R/get_sys_details.R b/R/get_sys_details.R index c685d5d..41a0f46 100644 --- a/R/get_sys_details.R +++ b/R/get_sys_details.R @@ -35,42 +35,38 @@ #' @examples #' ## Returns all details about your machine #' get_sys_details(cpu = FALSE, installed_packages = FALSE, ram = FALSE) -get_sys_details = function(sys_info = TRUE, platform_info = TRUE, - r_version = TRUE, ram = TRUE, - cpu = TRUE, byte_compiler = TRUE, - linear_algebra = TRUE, - locale = TRUE, installed_packages = TRUE, - machine = TRUE) { +get_sys_details = function( + sys_info = TRUE, + platform_info = TRUE, + r_version = TRUE, + ram = TRUE, + cpu = TRUE, + byte_compiler = TRUE, + linear_algebra = TRUE, + locale = TRUE, + installed_packages = TRUE, + machine = TRUE +) { l = list() - if (sys_info) l$sys_info = as.list(Sys.info()) - else l$sys_info = NA + if (sys_info) l$sys_info = as.list(Sys.info()) else l$sys_info = NA - if (platform_info) l$platform_info = get_platform_info() - else l$platform_info = NA + if (platform_info) l$platform_info = get_platform_info() else l$platform_info = NA - if (r_version) l$r_version = get_r_version() - else l$r_version = NA + if (r_version) l$r_version = get_r_version() else l$r_version = NA - if (ram) l$ram = get_ram() - else l$ram = NA + if (ram) l$ram = get_ram() else l$ram = NA - if (cpu) l$cpu = get_cpu() - else l$cpu = NA + if (cpu) l$cpu = get_cpu() else l$cpu = NA - if (byte_compiler) l$byte_compiler = get_byte_compiler() - else l$byte_compiler = NA + if (byte_compiler) l$byte_compiler = get_byte_compiler() else l$byte_compiler = NA - if (linear_algebra) l$linear_algebra = get_linear_algebra() - else l$linear_algebra = NA + if (linear_algebra) l$linear_algebra = get_linear_algebra() else l$linear_algebra = NA - if (locale) l$locale = Sys.getlocale() - else l$locale = NA + if (locale) l$locale = Sys.getlocale() else l$locale = NA - if (installed_packages) l$installed_packages = installed.packages() - else l$installed_packages = NA + if (installed_packages) l$installed_packages = installed.packages() else l$installed_packages = NA - if (machine) l$machine = .Machine - else l$machine = NA + if (machine) l$machine = .Machine else l$machine = NA l$package_version = packageDescription("benchmarkme")$Version l$id = paste0(Sys.Date(), "-", sample(1e8, 1)) diff --git a/R/global_variables.R b/R/global_variables.R index 91fe747..d890202 100644 --- a/R/global_variables.R +++ b/R/global_variables.R @@ -1,2 +1 @@ -globalVariables(c("test_group", "cores", "test", - "elapsed", "is_past", "time")) +globalVariables(c("test_group", "cores", "test", "elapsed", "is_past", "time")) diff --git a/R/plot_results.R b/R/plot_results.R index 5a645b5..93a0ed8 100644 --- a/R/plot_results.R +++ b/R/plot_results.R @@ -1,11 +1,13 @@ nice_palette = function() { alpha = 150 - palette(c(rgb(85, 130, 169, alpha = alpha, maxColorValue = 255), - rgb(200, 79, 178, alpha = alpha, maxColorValue = 255), - rgb(105, 147, 45, alpha = alpha, maxColorValue = 255), - rgb(204, 74, 83, alpha = alpha, maxColorValue = 255), - rgb(183, 110, 39, alpha = alpha, maxColorValue = 255), - rgb(131, 108, 192, alpha = alpha, maxColorValue = 255))) + palette(c( + rgb(85, 130, 169, alpha = alpha, maxColorValue = 255), + rgb(200, 79, 178, alpha = alpha, maxColorValue = 255), + rgb(105, 147, 45, alpha = alpha, maxColorValue = 255), + rgb(204, 74, 83, alpha = alpha, maxColorValue = 255), + rgb(183, 110, 39, alpha = alpha, maxColorValue = 255), + rgb(131, 108, 192, alpha = alpha, maxColorValue = 255) + )) } #' Compare results to past tests @@ -28,42 +30,49 @@ nice_palette = function() { #' @examples #' data(sample_results) #' plot(sample_results, blas_optimize = NULL) -plot.ben_results = function(x, - test_group = unique(x$test_group), - blas_optimize = is_blas_optimize(x), - log = "y", ...) { - +plot.ben_results = function( + x, + test_group = unique(x$test_group), + blas_optimize = is_blas_optimize(x), + log = "y", + ... +) { for (i in seq_along(test_group)) { group = x[x$test_group == test_group[i], ] for (core in unique(group$cores)) { - make_plot(x = group[group$cores == core, ], - blas_optimize = blas_optimize, - log = log, ...) + make_plot(x = group[group$cores == core, ], blas_optimize = blas_optimize, log = log, ...) } - if (length(test_group) != i) - readline("Press return to get next plot ") + if (length(test_group) != i) readline("Press return to get next plot ") } } #' @import dplyr make_plot = function(x, blas_optimize, log, ...) { - test_group = unique(x$test_group) - results = benchmarkmeData::select_results(test_group = test_group, - blas_optimize = blas_optimize, - cores = unique(x$cores)) + results = benchmarkmeData::select_results( + test_group = test_group, + blas_optimize = blas_optimize, + cores = unique(x$cores) + ) - ben_rank = rank_results(x, - blas_optimize = blas_optimize, - verbose = TRUE) + ben_rank = rank_results(x, blas_optimize = blas_optimize, verbose = TRUE) no_of_reps = length(x$test) / length(unique(x$test)) ben_sum = sum(x[, 3]) / no_of_reps ## Arrange plot colours and layout - op = par(mar = c(3, 3, 2, 1), mgp = c(2, 0.4, 0), tck = -.01, - cex.axis = 0.8, las = 1, mfrow = c(1, 2)) + op = par( + mar = c(3, 3, 2, 1), + mgp = c(2, 0.4, 0), + tck = -.01, + cex.axis = 0.8, + las = 1, + mfrow = c(1, 2) + ) old_pal = palette() - on.exit({palette(old_pal); par(op)}) #nolint + on.exit({ + palette(old_pal) + par(op) + }) #nolint nice_palette() ## Calculate adjustment for sensible "You" placement @@ -74,25 +83,37 @@ make_plot = function(x, blas_optimize, log, ...) { ymax = max(results$time, ben_sum) ## Standard timings - plot(results$time, xlab = "Rank", ylab = "Total timing (secs)", - ylim = c(ymin, ymax), xlim = c(0.5, nrow(results) + 1), - panel.first = grid(), cex = 0.7, log = log, ...) + plot( + results$time, + xlab = "Rank", + ylab = "Total timing (secs)", + ylim = c(ymin, ymax), + xlim = c(0.5, nrow(results) + 1), + panel.first = grid(), + cex = 0.7, + log = log, + ... + ) points(ben_rank - 1 / 2, ben_sum, bg = 4, pch = 21) abline(v = ben_rank - 1 / 2, col = 4, lty = 3) text(ben_rank - 1 / 2, ymin, "You", col = 4, adj = adj) - if (unique(x$cores) == 0) - title(paste0("Benchmark: ", test_group), cex = 0.9) - else - title(paste0("Benchmark: ", test_group, - "(", unique(x$cores), " cores)"), cex = 0.9) + if (unique(x$cores) == 0) title(paste0("Benchmark: ", test_group), cex = 0.9) else + title(paste0("Benchmark: ", test_group, "(", unique(x$cores), " cores)"), cex = 0.9) ## Relative timings fastest = min(ben_sum, results$time) ymax = ymax / fastest - plot(results$time / fastest, - xlab = "Rank", ylab = "Relative timing", - ylim = c(1, ymax), xlim = c(0.5, nrow(results) + 1), - panel.first = grid(), cex = 0.7, log = log, ...) + plot( + results$time / fastest, + xlab = "Rank", + ylab = "Relative timing", + ylim = c(1, ymax), + xlim = c(0.5, nrow(results) + 1), + panel.first = grid(), + cex = 0.7, + log = log, + ... + ) abline(h = 1, lty = 3) abline(v = ben_rank - 1 / 2, col = 4, lty = 3) points(ben_rank - 1 / 2, ben_sum / fastest, bg = 4, pch = 21) diff --git a/R/rank_results.R b/R/rank_results.R index c87d992..6c08e42 100644 --- a/R/rank_results.R +++ b/R/rank_results.R @@ -11,18 +11,12 @@ benchmarkmeData::is_blas_optimize #' @importFrom tibble tibble #' @import dplyr #' @export -rank_results = function(results, - blas_optimize = is_blas_optimize(results), - verbose = TRUE) { - - - no_of_test_groups = length(unique(results$test_group)) - if (no_of_test_groups != 1) - stop("Can only rank a single group at a time", call. = FALSE) +rank_results = function(results, blas_optimize = is_blas_optimize(results), verbose = TRUE) { + no_of_test_groups = length(unique(results$test_group)) + if (no_of_test_groups != 1) stop("Can only rank a single group at a time", call. = FALSE) no_of_reps = length(results$test) / length(unique(results$test)) - results_tib = tibble(time = sum(results$elapsed) / no_of_reps, - is_past = FALSE) + results_tib = tibble(time = sum(results$elapsed) / no_of_reps, is_past = FALSE) if (is.null(blas_optimize)) blas_optimize = c(FALSE, TRUE) tmp_env = new.env() @@ -42,7 +36,6 @@ rank_results = function(results, ben_rank = which(!rankings$is_past) - if (verbose) - message("You are ranked ", ben_rank, " out of ", nrow(rankings), " machines.") + if (verbose) message("You are ranked ", ben_rank, " out of ", nrow(rankings), " machines.") ben_rank } diff --git a/R/rnorm.R b/R/rnorm.R index 774ce7e..a6de037 100644 --- a/R/rnorm.R +++ b/R/rnorm.R @@ -1,7 +1,5 @@ #' @importFrom stats rnorm -Rnorm = function(n) { #nolint - if (requireNamespace("RcppZiggurat", quietly = TRUE)) - RcppZiggurat::zrnorm(n) - else - rnorm(n) +Rnorm = function(n) { + #nolint + if (requireNamespace("RcppZiggurat", quietly = TRUE)) RcppZiggurat::zrnorm(n) else rnorm(n) } diff --git a/R/upload_results.R b/R/upload_results.R index 93c1768..b9d9b2e 100644 --- a/R/upload_results.R +++ b/R/upload_results.R @@ -35,10 +35,12 @@ create_bundle = function(results, filename = NULL, args = NULL, id_prefix = "") #' res = benchmark_std() #' upload_results(res) #' } -upload_results = function(results, - url = "http://www.mas.ncl.ac.uk/~ncsg3/form.php", - args = NULL, - id_prefix = "") { +upload_results = function( + results, + url = "http://www.mas.ncl.ac.uk/~ncsg3/form.php", + args = NULL, + id_prefix = "" +) { message("Creating temporary file") fname = tempfile(fileext = ".rds") on.exit(unlink(fname)) @@ -46,9 +48,7 @@ upload_results = function(results, type = create_bundle(results, fname, id_prefix = id_prefix) message("Uploading results") - httr::POST(url, - body = list(userFile = httr::upload_file(fname)), - encode = "multipart") + httr::POST(url, body = list(userFile = httr::upload_file(fname)), encode = "multipart") message("Upload complete") message("Tracking id: ", type$id) diff --git a/R/zzz.R b/R/zzz.R index 098e89b..76c7048 100644 --- a/R/zzz.R +++ b/R/zzz.R @@ -1,3 +1,3 @@ -.onAttach = function(...) { #nolint - +.onAttach = function(...) { + #nolint } diff --git a/README.Rmd b/README.Rmd index c91bbb3..f491c90 100644 --- a/README.Rmd +++ b/README.Rmd @@ -1,6 +1,6 @@ --- output: github_document -editor_options: +editor_options: chunk_output_type: console --- @@ -14,20 +14,24 @@ knitr::opts_chunk$set( ) ``` -# System benchmarking +# System benchmarking -[![R-CMD-check](https://github.com/csgillespie/benchmarkme/workflows/R-CMD-check/badge.svg)](https://github.com/csgillespie/benchmarkme/actions) + +[![R-CMD-check](https://github.com/csgillespie/benchmarkme/actions/workflows/R-CMD-check.yaml/badge.svghttps://github.com/csgillespie/benchmarkme/actions/workflows/R-CMD-check.yaml/badge.svghttps://github.com/csgillespie/benchmarkme/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/csgillespie/benchmarkme/actions/workflows/R-CMD-check.yaml) [![codecov.io](https://codecov.io/github/csgillespie/benchmarkme/coverage.svg?branch=master)](https://codecov.io/github/csgillespie/benchmarkme?branch=master) -[![Downloads](http://cranlogs.r-pkg.org/badges/benchmarkme?color=brightgreen)](https://cran.r-project.org/package=benchmarkme) -[![CRAN_Status_Badge](http://www.r-pkg.org/badges/version/benchmarkme)](https://cran.r-project.org/package=benchmarkme) +[![Downloads](https://cranlogs.r-pkg.org/badges/benchmarkme?color=brightgreen)](https://cran.r-project.org/package=benchmarkme) +[![CRAN_Status_Badge](https://www.r-pkg.org/badges/version/benchmarkme)](https://cran.r-project.org/package=benchmarkme) -R benchmarking made easy. The package contains a number of benchmarks, heavily based on the benchmarks at https://mac.R-project.org/benchmarks/R-benchmark-25.R, for assessing -the speed of your system. + + + +R benchmarking made easy. The package contains a number of benchmarks, heavily based on the benchmarks at https://mac.R-project.org/benchmarks/R-benchmark-25.R, for assessing +the speed of your system. The package is for R 3.5 and above. In previous versions R, detecting the effect of the byte compiler was tricky and produced unrealistic comparisons. -## Overview +## Overview A straightforward way of speeding up your analysis is to buy a better computer. Modern desktops are relatively cheap, especially compared to user time. However, it isn't @@ -47,13 +51,13 @@ install.packages("benchmarkme") There are two groups of benchmarks: - * `benchmark_std()`: this benchmarks numerical operations such as loops and matrix operations. The benchmark comprises + * `benchmark_std()`: this benchmarks numerical operations such as loops and matrix operations. The benchmark comprises of three separate benchmarks: `prog`, `matrix_fun`, and `matrix_cal`. * `benchmark_io()`: this benchmarks reading and writing a 5 / 50, MB csv file. ### The benchmark_std() function -This benchmarks numerical operations such as loops and matrix operations. +This benchmarks numerical operations such as loops and matrix operations. This benchmark comprises of three separate benchmarks: `prog`, `matrix_fun`, and `matrix_cal`. If you have less than 3GB of RAM (run `get_ram()` to find out how much is available on your system), then you should kill any memory hungry applications, e.g. @@ -99,7 +103,7 @@ By default the files are written to a temporary directory generated ```{r eval=FALSE} tempdir() ``` -which depends on the value of +which depends on the value of ```{r eval=FALSE} Sys.getenv("TMPDIR") ``` @@ -121,7 +125,7 @@ plot(res_io) ## Previous versions of the package This package was started around 2015. However, multiple changes in the byte compiler -over the last few years, has made it very difficult to use previous results. So we have to +over the last few years, has made it very difficult to use previous results. So we have to start from scratch. The previous data can be obtained via @@ -140,7 +144,7 @@ The package has a few useful functions for extracting system specs: * Is byte compiling enabled: `get_byte_compiler()` * General platform info: `get_platform_info()` * R version: `get_r_version()` - + The above functions have been tested on a number of systems. If they don't work on your system, please raise [GitHub](https://github.com/csgillespie/benchmarkme/issues) issue. diff --git a/man/benchmarkme-package.Rd b/man/benchmarkme-package.Rd index 2565bb3..9bb131f 100644 --- a/man/benchmarkme-package.Rd +++ b/man/benchmarkme-package.Rd @@ -1,6 +1,5 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/benchmarkme-package.R -\docType{package} \name{benchmarkme-package} \alias{benchmarkme-package} \alias{benchmarkme} diff --git a/tests/testthat/test-byte_compiler.R b/tests/testthat/test-byte_compiler.R index 43c405d..0c99c28 100644 --- a/tests/testthat/test-byte_compiler.R +++ b/tests/testthat/test-byte_compiler.R @@ -7,5 +7,4 @@ test_that("Test Byte Compiler", { benchmark_std = compiler::cmpfun(benchmarkme::benchmark_std) assign("benchmark_std", benchmark_std, envir = globalenv()) expect_gt(get_byte_compiler(), 0L) -} -) +}) diff --git a/tests/testthat/test-cpu.R b/tests/testthat/test-cpu.R index ee4445b..60e466a 100644 --- a/tests/testthat/test-cpu.R +++ b/tests/testthat/test-cpu.R @@ -3,5 +3,4 @@ test_that("Test CPU", { cpu = get_cpu() expect_equal(length(cpu), 3) expect_equal(anyNA(cpu), FALSE) -} -) +}) diff --git a/tests/testthat/test-platform_info.R b/tests/testthat/test-platform_info.R index ec6ec5d..4417cac 100644 --- a/tests/testthat/test-platform_info.R +++ b/tests/testthat/test-platform_info.R @@ -1,5 +1,4 @@ test_that("Test Platform Info", { skip_on_cran() expect_equal(get_platform_info(), .Platform) - } -) +}) diff --git a/tests/testthat/test-plot_results.R b/tests/testthat/test-plot_results.R index 4b594e4..847559f 100644 --- a/tests/testthat/test-plot_results.R +++ b/tests/testthat/test-plot_results.R @@ -4,5 +4,4 @@ test_that("Test plot_past", { data(sample_results, envir = tmp_env, package = "benchmarkme") res = tmp_env$sample_results expect_null(plot(res)) -} -) +}) diff --git a/tests/testthat/test-ram.R b/tests/testthat/test-ram.R index 989f8f4..6ac62ba 100644 --- a/tests/testthat/test-ram.R +++ b/tests/testthat/test-ram.R @@ -4,6 +4,5 @@ test_that("Test RAM", { expect_true(get_ram() > 0) expect_output(benchmarkme:::print.ram(1.63e+10), regexp = "GB") expect_output(benchmarkme:::print.ram(10), regexp = "B") - expect_equal(benchmarkme:::to_bytes(c(16.4, "GB")), 1.64e+10) -} -) + expect_equal(benchmarkme:::to_bytes(c(16.4, "GB")), 1.64e+10) +}) diff --git a/tests/testthat/test-ranking.R b/tests/testthat/test-ranking.R index 2c6138a..9bfd67d 100644 --- a/tests/testthat/test-ranking.R +++ b/tests/testthat/test-ranking.R @@ -5,5 +5,4 @@ test_that("Test ranking", { res = tmp_env$sample_results res = res[res$test_group == "prog", ] expect_gt(rank_results(res), 0) -} -) +}) diff --git a/tests/testthat/test-rnorm.R b/tests/testthat/test-rnorm.R index 84dc618..08bf9bd 100644 --- a/tests/testthat/test-rnorm.R +++ b/tests/testthat/test-rnorm.R @@ -1,5 +1,4 @@ test_that("Test Rnorm", { skip_on_cran() expect_true(is.numeric(benchmarkme:::Rnorm(1))) -} -) +}) diff --git a/tests/testthat/test-sys_details.R b/tests/testthat/test-sys_details.R index 9009c7f..6aee967 100644 --- a/tests/testthat/test-sys_details.R +++ b/tests/testthat/test-sys_details.R @@ -4,5 +4,4 @@ test_that("Test Sys Details", { expect_equal(length(sys), 13) expect_equal(is.na(sys$sys_info), TRUE) expect_equal(is.na(sys$installed_packages), TRUE) - } -) +}) diff --git a/tests/testthat/test-timings.R b/tests/testthat/test-timings.R index e02d4f7..151dbde 100644 --- a/tests/testthat/test-timings.R +++ b/tests/testthat/test-timings.R @@ -2,5 +2,4 @@ test_that("Test Timing mean", { skip_on_cran() data("sample_results", package = "benchmarkme") expect_true(is.character(benchmarkme:::timings_mean(sample_results))) -} -) +}) diff --git a/tests/testthat/test-upload_results.R b/tests/testthat/test-upload_results.R index 1074f7e..4257b7e 100644 --- a/tests/testthat/test-upload_results.R +++ b/tests/testthat/test-upload_results.R @@ -12,6 +12,4 @@ test_that("Test upload_results", { res = create_bundle(NULL, fname, args = list(sys_info = FALSE)) expect_true(is.na(res$sys_info)) unlink(fname) - - } -) +})