Skip to content

Commit

Permalink
#164 Merge branch 'main' into 164-logging
Browse files Browse the repository at this point in the history
  • Loading branch information
jmbarbone committed Mar 12, 2023
2 parents 2dc08f0 + ecf1048 commit dc89ef9
Show file tree
Hide file tree
Showing 98 changed files with 989 additions and 726 deletions.
13 changes: 8 additions & 5 deletions .github/workflows/lint.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Workflow derived from https://github.com/r-lib/actions/tree/master/examples
# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples
# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
on:
push:
Expand All @@ -14,16 +14,19 @@ jobs:
env:
GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3

- uses: r-lib/actions/setup-r@v1
- uses: r-lib/actions/setup-r@v2
with:
use-public-rspm: true

- uses: r-lib/actions/setup-r-dependencies@v1
- uses: r-lib/actions/setup-r-dependencies@v2
with:
extra-packages: lintr
extra-packages: any::lintr, local::.
needs: lint

- name: Lint
run: lintr::lint_package()
shell: Rscript {0}
env:
LINTR_ERROR_ON_LINT: true
3 changes: 2 additions & 1 deletion NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@

## Fixes

* `date_from_partial()` works again [#155](https://github.com/jmbarbone/mark/issues/155)] after fixing an issue with an internal utility `is_valid_date_string()` that wasn't recognizing `%Y-%m-%d` (and potentially others)
* `date_from_partial()` works again [#155](https://github.com/jmbarbone/mark/issues/155) after fixing an issue with an internal utility `is_valid_date_string()` that wasn't recognizing `%Y-%m-%d` (and potentially others)
* `lintr` GitHub action updated [#173](https://github.com/jmbarbone/mark/issues/173); this includes plenty of internal improvements and code cleanup

# mark 0.5.3

Expand Down
23 changes: 16 additions & 7 deletions R/base-conversion.R
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,12 @@ base_alpha_single <- function(x, base) {
a <- match(a, letters[1:base], nomatch = NA_integer_)

if (anyNA(a)) {
stop(sprintf('Cannot calculate alpha base "%s" for "%s" which has letters beyond "%s"',
base, x, x[base]),
call. = FALSE)
msg <-
sprintf(
'Cannot calculate alpha base "%s" for "%s" which has letters beyond "%s"', # nolint: line_length_linter.
base, x, x[base]
)
stop(msg, call. = FALSE)
}

n <- length(a)
Expand Down Expand Up @@ -69,7 +72,10 @@ base_n <- function(x, from = 10, to = 10) {
}

if (to != 10) {
stop("base_n() is currently only valid for conversions to base 10", call. = FALSE)
stop(
"base_n() is currently only valid for conversions to base 10",
call. = FALSE
)
}

check_base(from)
Expand All @@ -80,8 +86,11 @@ base_n_single <- function(x, base) {
ints <- as.integer(chr_split(x))

if (any(ints >= base)) {
stop('Cannot caluclate base "', base, '" for "', x, '" which has ',
"numbers greater than or equal to the base value", call. = FALSE )
stop(
'Cannot caluclate base "', base, '" for "', x, '" which has ',
"numbers greater than or equal to the base value",
call. = FALSE
)
}

seqs <- (length(ints) - 1L):0L
Expand All @@ -107,7 +116,7 @@ check_base <- function(b, high = 9) {
stop("base must be an integer", call. = FALSE)
}

if (b > high | b <= 1) {
if (b > high || b <= 1) {
stop("base must be between 1 and ", high, call. = FALSE)
}
}
6 changes: 3 additions & 3 deletions R/between-more.R
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
#'
#' @param x A numeric vector of values
#' @param left,right Boundary values
#' @param type Abbreviation for the evaluation of `left` on `right` (see details)
#' @param type Abbreviation for the evaluation of `left` on `right` (see
#' details)
#'
#' @details
#' Type can be one of the below:
#' @details Type can be one of the below:
#'
#' \describe{
#' \item{g}{is greater than (>)}
Expand Down
24 changes: 13 additions & 11 deletions R/bib.R
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
# nolint start: line_length_linter.

#' Read Bib file
#'
#' Read a bib file into a data.frame
Expand Down Expand Up @@ -49,6 +51,8 @@
#' }
#' }

# nolint end: line_length_linter.

read_bib <- function(file, skip = 0L, max_lines = NULL, encoding = "UTF-8") {
# Account for nul values found in encoding?
# skipNul = TRUE could do this but an error can still be caused later
Expand Down Expand Up @@ -82,8 +86,6 @@ read_bib <- function(file, skip = 0L, max_lines = NULL, encoding = "UTF-8") {
fields <- tolower(fields)

# TODO Implement checks for duplicate categories?
# categories <- lapply(item_list, get_bib_categories)
# values <- lapply(item_list, get_bib_values)
out <- lapply(item_list, parse_bib)
categories <- lapply(out, "[[", "cat")
values <- lapply(out, "[[", "val")
Expand Down Expand Up @@ -121,7 +123,6 @@ parse_bib_val <- function(x) {
x[!lengths(x)] <- NA_character_
# There may be something better than this
# Would like to maintain the { and }
# x <- gsub("\\{|\\}|,?$", "", x)
x <- trimws(x)
x <- gsub("^(\\{|\")|(\"|\\})[,]?$", "", x)
x <- gsub(",$", "", x)
Expand All @@ -142,7 +143,10 @@ parse_bib_val <- function(x) {
process_bib_dataframe <- function(categories, values, fields, keys) {
# Determine all categories for missing values inside Map
ucats <- unique(remove_na(unlist(categories)))
ucats_df <- quick_dfl(category = ucats, value = rep(NA_character_, length(ucats)))
ucats_df <- quick_dfl(
category = ucats,
value = rep(NA_character_, length(ucats))
)

x <- mapply(
function(cats, vals, field, key) {
Expand All @@ -156,7 +160,11 @@ process_bib_dataframe <- function(categories, values, fields, keys) {
bad <- lens > 1L

if (any(bad)) {
msg <- sprintf("The key `%s` has duplicate categories of `%s`", key, names(lens)[bad])
msg <- sprintf(
"The key `%s` has duplicate categories of `%s`",
key,
names(lens)[bad]
)
stop(simpleError(msg))
}

Expand Down Expand Up @@ -192,11 +200,6 @@ process_bib_list <- function(keys, fields, categories, values) {

x <- mapply(
function(key, field, cats, vals) {
# # vals[is.na(vals)] <- ""
# new <- c(key, field, vals)
# names(new) <- c("key", "field", cats)
# class(new) <- c("character", "mark_bib_entry", .Names = key)
# new
struct(
c(key, field, vals),
class = c("character", "mark_bib_entry"),
Expand Down Expand Up @@ -297,7 +300,6 @@ print.mark_bib_df <- function(x, list = FALSE, ...) {
y <- x
attr(y, "bib_list") <- NULL
NextMethod()
# print(y, ...)
}

invisible(x)
Expand Down
2 changes: 1 addition & 1 deletion R/char2fact.R
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#'
#' @param x A vector of characters
#' @param n The limit to the number of unique values for the factor
#' @seealso #' @seealso [fact2char()]
#' @seealso [fact2char()]
#' @family factors
#' @export
char2fact <- function(x, n = 5) {
Expand Down
40 changes: 24 additions & 16 deletions R/clipboard.R
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,13 @@ write_clipboard.default <- function(x, ...) {

#' @export
write_clipboard.data.frame <- function(x, sep = "\t", ...) {
utils::write.table(x, file = "clipboard-128", sep = sep, row.names = FALSE, ...)
utils::write.table(
x,
file = "clipboard-128",
sep = sep,
row.names = FALSE,
...
)
}

#' @export
Expand All @@ -66,7 +72,7 @@ write_clipboard.matrix <- function(x, sep = "\t", ...) {
}

#' @export
write_clipboard.list <- function(x, sep = "\t", show_NA = FALSE, ...) {
write_clipboard.list <- function(x, sep = "\t", show_NA = FALSE, ...) { # nolint: object_name_linter, line_length_linter..
ls <- list2df(x, show_NA = show_NA)
write_clipboard(ls, sep = "\t", ...)
}
Expand Down Expand Up @@ -112,21 +118,23 @@ read_clipboard <- function(method = c("default", "data.frame", "tibble"), ...) {
#'
#' @inheritParams utils::read.table
#' @noRd
# nolint start: object_name_linter.
do_read_table_clipboard <- function(
header = TRUE,
# Copying form Excel produces tab separations
sep = "\t",
row.names = NULL,
# Excel formula for NA produces #N/A -- sometimes people use N/A...
na.strings = c("", "NA", "N/A", "#N/A"),
check.names = FALSE,
stringsAsFactors = FALSE,
encoding = "UTF-8",
# occasionally "#' is used as a column name -- may cause issues
comment.char = "",
blank.lines.skip = FALSE,
fill = TRUE,
...
header = TRUE,
# Copying form Excel produces tab separations
sep = "\t",
row.names = NULL,
# Excel formula for NA produces #N/A -- sometimes people use N/A...
na.strings = c("", "NA", "N/A", "#N/A"),
check.names = FALSE,
stringsAsFactors = FALSE,
encoding = "UTF-8",
# occasionally "#' is used as a column name -- may cause issues
comment.char = "",
blank.lines.skip = FALSE,
fill = TRUE,
...
# nolint end: objecT_name_linter.
) {
utils::read.table(
file = "clipboard-128",
Expand Down
14 changes: 7 additions & 7 deletions R/counts.R
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,6 @@ counts.default <- function(x, sort = FALSE, ...) {
out
}

# x <- structure(c(2L, NA, 3L, NA, NA, 3L, NA, NA, 1L, 1L), .Label = c("2", "4", "1", "0"), class = "factor")

#' @export
counts.factor <- function(x, ...) {
x <- fact(x)
Expand Down Expand Up @@ -109,7 +107,7 @@ props <- function(x, ...) {
#' @rdname counts
#' @export
#' @param na.rm If `TRUE` will remove NA values from proportions
props.default <- function(x, sort = FALSE, na.rm = FALSE, ...) {
props.default <- function(x, sort = FALSE, na.rm = FALSE, ...) { # nolint: object_name_linter, line_length_linter.
res <- counts(x, sort = sort)

n <- length(res)
Expand All @@ -128,7 +126,7 @@ props.data.frame <- function(
x,
cols,
sort = FALSE,
na.rm = FALSE,
na.rm = FALSE, # nolint: object_name_linter.
...,
.name = "prop"
) {
Expand All @@ -142,7 +140,9 @@ props.data.frame <- function(
na_ind <- if (na.rm) which(!stats::complete.cases(values))
props_n(values, sort = sort, name = .name, na_ind = na_ind)
} else {
vector2df(props(x[[cols]], sort = sort, na.rm = na.rm), cols, .name %||% "prop")
vector2df(
props(x[[cols]], sort = sort, na.rm = na.rm), cols, .name %||% "prop"
)
}

remake_df(out, x[, cols, drop = FALSE])
Expand All @@ -164,7 +164,7 @@ counts_n <- function(x, name = "freq", sort = FALSE) {
len <- length(res)
non_dupe <- !duplicated(ints, nmax = len)
out <- x[non_dupe, ]
attr(out, "row.names") <- 1:len
attr(out, "row.names") <- seq_len(len) # nolint: object_name_linter.

cn <- colnames(x)
colnames(out) <- cn
Expand All @@ -178,7 +178,7 @@ counts_n <- function(x, name = "freq", sort = FALSE) {
props_n <- function(
x,
sort = FALSE,
na.rm = FALSE,
na.rm = FALSE, # nolint: object_name_linter.
name = "props",
na_ind = NULL
) {
Expand Down

0 comments on commit dc89ef9

Please sign in to comment.