CRAN Package Check Results for Package easyPubMed

Last updated on 2019-01-24 06:47:06 CET.

Flavor Version Tinstall Tcheck Ttotal Status Flags
r-devel-linux-x86_64-debian-clang 2.11 1.29 46.26 47.55 WARN --no-examples
r-devel-linux-x86_64-debian-gcc 2.11 1.17 41.16 42.33 WARN --no-examples
r-devel-linux-x86_64-fedora-clang 2.11 88.72 OK
r-devel-linux-x86_64-fedora-gcc 2.11 45.89 OK
r-devel-windows-ix86+x86_64 2.11 5.00 150.00 155.00 OK
r-patched-linux-x86_64 2.5 1.71 25.91 27.62 WARN --no-examples
r-patched-solaris-x86 2.11 109.50 OK --no-examples
r-release-linux-x86_64 2.5 1.59 25.78 27.37 WARN --no-examples
r-release-windows-ix86+x86_64 2.5 4.00 100.00 104.00 ERROR
r-release-osx-x86_64 2.5 OK
r-oldrel-windows-ix86+x86_64 2.5 5.00 74.00 79.00 WARN
r-oldrel-osx-x86_64 2.5 OK

Check Details

Version: 2.11
Flags: --no-examples
Check: re-building of vignette outputs
Result: WARN
    Error(s) in re-building vignettes:
     ...
    --- re-building ‘easyPubMed_01_getting_started.Rmd’ using rmarkdown
    Processing PubMed data ...................................... ----------- FAILURE REPORT --------------
     --- failure: the condition has length > 1 ---
     --- srcref ---
    :
     --- package (from environment) ---
    easyPubMed
     --- call from context ---
    custom_grep(xml_data = tmp.authors, tag = "Author", format = "char")
     --- call from argument ---
    while (nchar(x) > 0 & regexpr(tag.op, x) > 0 & regexpr(tag.cl,
     x) > 0) {
     tag.op.pos <- regexpr(tag.op, x)
     nu.x <- substr(x, (tag.op.pos - 1), nchar(x))
     inner.trim <- regexpr(">", nu.x, fixed = TRUE)
     nu.x <- substr(nu.x, (inner.trim + 1), nchar(nu.x))
     tag.cl.pos <- regexpr(tag.cl, nu.x)
     tag.cl.full <- tag.cl.pos + attributes(tag.cl.pos)$match.length +
     1
     x <- substr(nu.x, tag.cl.full, nchar(x))
     nu.x <- substr(nu.x, 1, (tag.cl.pos - 1))
     out.result[[i]] <- nu.x
     i <- i + 1
    }
     --- R stacktrace ---
    where 1: custom_grep(xml_data = tmp.authors, tag = "Author", format = "char")
    where 2: doTryCatch(return(expr), name, parentenv, handler)
    where 3: tryCatchOne(expr, names, parentenv, handlers[[1L]])
    where 4: tryCatchList(expr, classes, parentenv, handlers)
    where 5: tryCatch({
     tmp.article <- custom_grep(xml_data = pubmedArticle, tag = "PubmedArticle",
     format = "char")
     if (is.null(tmp.article)) {
     message("An error occurred")
     return(NULL)
     }
     tmp.title <- custom_grep(xml_data = tmp.article, tag = "ArticleTitle",
     format = "char")
     if (length(tmp.title) > 1) {
     tmp.title <- paste(tmp.title, collapse = " ", sep = " ")
     }
     else if (length(tmp.title) < 1) {
     tmp.title <- NA
     }
     tmp.abstract <- custom_grep(xml_data = tmp.article, tag = "AbstractText",
     format = "char")
     if (length(tmp.abstract) > 1) {
     tmp.abstract <- paste(tmp.abstract, collapse = " ", sep = " ")
     if (max_chars >= 0) {
     tmp.abstract <- gsub("</{0,1}i>", "", tmp.abstract,
     ignore.case = T)
     tmp.abstract <- gsub("</{0,1}b>", "", tmp.abstract,
     ignore.case = T)
     tmp.abstract <- gsub("</{0,1}sub>", "", tmp.abstract,
     ignore.case = T)
     tmp.abstract <- gsub("</{0,1}exp>", "", tmp.abstract,
     ignore.case = T)
     tmp.abstract <- substr(tmp.abstract, 0, max_chars)
     }
     }
     else if (length(tmp.abstract) < 1) {
     tmp.abstract <- NA
     }
     else {
     if (max_chars >= 0) {
     tmp.abstract <- substr(tmp.abstract, 0, max_chars)
     tmp.abstract <- gsub("</{0,1}i>", "", tmp.abstract,
     ignore.case = T)
     tmp.abstract <- gsub("</{0,1}b>", "", tmp.abstract,
     ignore.case = T)
     tmp.abstract <- gsub("</{0,1}sub>", "", tmp.abstract,
     ignore.case = T)
     tmp.abstract <- gsub("</{0,1}exp>", "", tmp.abstract,
     ignore.case = T)
     }
     }
     my.dateType <- c("DateCompleted", "DateCreated", "DateRevised",
     "PubDate")
     sel.dateType <- which(sapply(my.dateType, (function(xi) {
     regexpr(xi, tmp.article) > 0
     })))
     if (length(sel.dateType) < 1) {
     tmp.date <- c(Year = NA, Month = NA, Day = NA)
     }
     else {
     sel.dateType <- sel.dateType[1]
     tmp.date <- custom_grep(xml_data = tmp.article, tag = my.dateType[sel.dateType],
     format = "char")
     tmp.date <- sapply(c("Year", "Month", "Day"), (function(tt) {
     tdat.el <- custom_grep(xml_data = tmp.date, tag = tt,
     format = "char")
     ifelse(is.null(tdat.el), NA, tdat.el[1])
     }))
     }
     tmp.paperID <- custom_grep(xml_data = tmp.article, tag = "ArticleIdList",
     format = "char")
     if (is.null(tmp.paperID)) {
     message("An error occurred")
     return(NULL)
     }
     else {
     tmp.paperID <- gsub("[[:space:]]", "", tmp.paperID[1])
     }
     tmp.PMID <- gsub("^(.*ArticleIdIdType=\\\"pubmed\\\")([[:space:]]|[[:alnum:]]){0,20}>",
     "", tmp.paperID)
     tmp.PMID <- gsub("<.*$", "", tmp.PMID)
     tmp.DOI <- gsub("^(.*ArticleIdIdType=\\\"doi\\\")([[:space:]]|[[:alnum:]]){0,20}>",
     "", tmp.paperID)
     tmp.DOI <- gsub("<.*$", "", tmp.DOI)
     tmp.jabbrv <- custom_grep(xml_data = tmp.article, tag = "ISOAbbreviation",
     format = "char")
     tmp.jabbrv <- ifelse(is.null(tmp.jabbrv), NA, tmp.jabbrv)
     tmp.journal <- custom_grep(xml_data = tmp.article, tag = "Title",
     format = "char")
     tmp.journal <- ifelse(is.null(tmp.journal), NA, tmp.journal)
     tmp.keys <- tryCatch({
     if (getKeywords) {
     tmp.keys <- custom_grep(xml_data = tmp.article, tag = "Keyword",
     format = "char")
     tmp.mesh <- custom_grep(xml_data = tmp.article, tag = "MeshHeading",
     format = "char")
     if (length(tmp.mesh) > 0) {
     tmp.mesh <- sapply(tmp.mesh, function(xxm) {
     custom_grep(xml_data = xxm, tag = "DescriptorName",
     format = "char")
     })
     }
     tmp.keys <- c(tmp.keys, tmp.mesh)
     if (length(tmp.keys) > 1) {
     tmp.keys <- paste(tmp.keys, collapse = "; ")
     }
     else if (length(tmp.keys) < 1) {
     tmp.keys <- NA
     }
     }
     else {
     NA
     }
     }, error = function(e) {
     NA
     })
     tmp.resout <- c(pmid = tmp.PMID, doi = tmp.DOI, title = tmp.title,
     abstract = tmp.abstract, year = as.vector(tmp.date[1]),
     month = as.vector(tmp.date[2]), day = as.vector(tmp.date[3]),
     jabbrv = tmp.jabbrv, journal = tmp.journal, keywords = tmp.keys)
     tmp.authors <- custom_grep(xml_data = tmp.article, tag = "AuthorList",
     format = "char")
     if (length(tmp.authors) < 1 | !getAuthors) {
     final.mat <- data.frame(rbind(c(tmp.resout, lastname = NA,
     firstname = NA, address = NA, email = NA)), stringsAsFactors = FALSE)
     }
     else {
     author.list <- custom_grep(xml_data = tmp.authors, tag = "Author",
     format = "char")
     final.mat <- do.call(rbind, lapply(author.list, (function(al) {
     tmp.lastnm <- custom_grep(xml_data = al, tag = "LastName",
     format = "char")
     tmp.firstnm <- custom_grep(xml_data = al, tag = "ForeName",
     format = "char")
     tmp.email <- regexpr("([[:alnum:]]|\\.|\\-\\_){3,200}@([[:alnum:]]|\\.|\\-\\_){3,200}(\\.)([[:alnum:]]){2,6}",
     al)
     if (tmp.email > 0) {
     tmp.email <- substr(al, tmp.email, tmp.email +
     attributes(tmp.email)$match.length - 1)
     }
     else {
     tmp.email <- NA
     }
     if (regexpr("Affiliation", al) > 0) {
     tmp.add <- custom_grep(al, "Affiliation", format = "char")[1]
     tmp.add <- trim_address(tmp.add)
     }
     else {
     tmp.add <- NA
     }
     c(tmp.resout, lastname = tmp.lastnm, firstname = tmp.firstnm,
     address = tmp.add, email = tmp.email)
     })))
     rownames(final.mat) <- NULL
     final.mat <- data.frame(final.mat, stringsAsFactors = FALSE)
     DESELECT <- is.na(final.mat$lastname) | is.na(final.mat$firstname)
     if (length(DESELECT) > 0 & sum(DESELECT) > 0)
     final.mat <- final.mat[!DESELECT, ]
     if (autofill) {
     tmp.address <- final.mat[, "address"]
     na.pos <- is.na(tmp.address)
     if (sum(na.pos) != length(tmp.address)) {
     tmp.list <- lapply(tmp.address, function(x) {
     x
     })
     cur.add <- tmp.list[[(which(!na.pos)[1])]]
     for (i in 1:length(na.pos)) {
     if (na.pos[i]) {
     tmp.list[[i]] <- cur.add
     }
     else {
     cur.add <- tmp.list[[i]]
     }
     }
     final.mat[, "address"] <- do.call(c, tmp.list)
     }
     }
     }
     if (ncol(final.mat) != 14) {
     final.mat <- NULL
     }
    }, error = function(e) {
     NULL
    }, finally = {
     options(warn = 0)
     return(final.mat)
    })
    where 6: article_to_df(pubmedArticle = art, autofill = autofill, max_chars = max_chars,
     getKeywords = getKeywords, getAuthors = TRUE)
    where 7: doTryCatch(return(expr), name, parentenv, handler)
    where 8: tryCatchOne(expr, names, parentenv, handlers[[1L]])
    where 9: tryCatchList(expr, classes, parentenv, handlers)
    where 10: tryCatch({
     article_to_df(pubmedArticle = art, autofill = autofill, max_chars = max_chars,
     getKeywords = getKeywords, getAuthors = TRUE)
    }, error = function(e) {
     NULL
    })
    where 11: FUN(X[[i]], ...)
    where 12: lapply(1:length(paper.data), (function(i) {
     if (length(paper.data) > 50) {
     rep.dot <- as.integer(seq(1, length(paper.data), length.out = 50))
     if (i %in% rep.dot)
     message(".", appendLF = FALSE)
     }
     else {
     message(".", appendLF = FALSE)
     }
     art <- paper.data[[i]]
     out <- tryCatch({
     article_to_df(pubmedArticle = art, autofill = autofill,
     max_chars = max_chars, getKeywords = getKeywords,
     getAuthors = TRUE)
     }, error = function(e) {
     NULL
     })
     if (is.null(out)) {
     out <- data.frame(pmid = NA, doi = NA, title = NA, abstract = NA,
     year = NA, month = NA, day = NA, jabbrv = NA, journal = NA,
     keywords = NA, lastname = NA, firstname = NA, address = NA,
     email = NA)
     }
     if (included_authors == "first") {
     out <- out[1, ]
     }
     else if (included_authors == "last") {
     out <- out[nrow(out), ]
     }
     out2 <- data.frame(rebuild = (1:nrow(out)))
     for (jj in 1:length(expFields)) {
     if (expFields[jj] %in% names(out)) {
     out2[, expFields[jj]] <- out[, expFields[jj]]
     }
     else {
     out2[, expFields[jj]] <- NA
     }
     }
     out2[, -1]
    }))
    where 13: table_articles_byAuth(pubmed_data = new_PM_file, included_authors = "first",
     max_chars = 0, encoding = "ASCII")
    where 14: eval(expr, envir, enclos)
    where 15: eval(expr, envir, enclos)
    where 16: withVisible(eval(expr, envir, enclos))
    where 17: withCallingHandlers(withVisible(eval(expr, envir, enclos)), warning = wHandler,
     error = eHandler, message = mHandler)
    where 18: handle(ev <- withCallingHandlers(withVisible(eval(expr, envir,
     enclos)), warning = wHandler, error = eHandler, message = mHandler))
    where 19: timing_fn(handle(ev <- withCallingHandlers(withVisible(eval(expr,
     envir, enclos)), warning = wHandler, error = eHandler, message = mHandler)))
    where 20: evaluate_call(expr, parsed$src[[i]], envir = envir, enclos = enclos,
     debug = debug, last = i == length(out), use_try = stop_on_error !=
     2L, keep_warning = keep_warning, keep_message = keep_message,
     output_handler = output_handler, include_timing = include_timing)
    where 21: evaluate::evaluate(...)
    where 22: evaluate(code, envir = env, new_device = FALSE, keep_warning = !isFALSE(options$warning),
     keep_message = !isFALSE(options$message), stop_on_error = if (options$error &&
     options$include) 0L else 2L, output_handler = knit_handlers(options$render,
     options))
    where 23: in_dir(input_dir(), evaluate(code, envir = env, new_device = FALSE,
     keep_warning = !isFALSE(options$warning), keep_message = !isFALSE(options$message),
     stop_on_error = if (options$error && options$include) 0L else 2L,
     output_handler = knit_handlers(options$render, options)))
    where 24: block_exec(params)
    where 25: call_block(x)
    where 26: process_group.block(group)
    where 27: process_group(group)
    where 28: withCallingHandlers(if (tangle) process_tangle(group) else process_group(group),
     error = function(e) {
     setwd(wd)
     cat(res, sep = "\n", file = output %n% "")
     message("Quitting from lines ", paste(current_lines(i),
     collapse = "-"), " (", knit_concord$get("infile"),
     ") ")
     })
    where 29: process_file(text, output)
    where 30: knitr::knit(knit_input, knit_output, envir = envir, quiet = quiet,
     encoding = encoding)
    where 31: rmarkdown::render(file, encoding = encoding, quiet = quiet, envir = globalenv(),
     ...)
    where 32: vweave_rmarkdown(...)
    where 33: engine$weave(file, quiet = quiet, encoding = enc)
    where 34: doTryCatch(return(expr), name, parentenv, handler)
    where 35: tryCatchOne(expr, names, parentenv, handlers[[1L]])
    where 36: tryCatchList(expr, classes, parentenv, handlers)
    where 37: tryCatch({
     engine$weave(file, quiet = quiet, encoding = enc)
     setwd(startdir)
     output <- find_vignette_product(name, by = "weave", engine = engine)
     if (!have.makefile && vignette_is_tex(output)) {
     texi2pdf(file = output, clean = FALSE, quiet = quiet)
     output <- find_vignette_product(name, by = "texi2pdf",
     engine = engine)
     }
     outputs <- c(outputs, output)
    }, error = function(e) {
     thisOK <<- FALSE
     fails <<- c(fails, file)
     message(gettextf("Error: processing vignette '%s' failed with diagnostics:\n%s",
     file, conditionMessage(e)))
    })
    where 38: tools:::buildVignettes(dir = "/home/hornik/tmp/R.check/r-devel-clang/Work/PKGS/easyPubMed.Rcheck/vign_test/easyPubMed",
     ser_elibs = "/tmp/RtmputxGNu/file7835f9cd80e.rds")
    
     --- value of length: 2 type: logical ---
    [1] TRUE TRUE
     --- function from context ---
    function (xml_data, tag, format = "list")
    {
     x <- xml_data
     tag.op <- paste("\\<", tag, "((\\>)|([[:space:]]([^[<]]*)\\>))",
     sep = "")
     tag.cl <- paste("(<\\/)", tag, "(\\>)", sep = "")
     out.result <- list()
     i = 1
     while (nchar(x) > 0 & regexpr(tag.op, x) > 0 & regexpr(tag.cl,
     x) > 0) {
     tag.op.pos <- regexpr(tag.op, x)
     nu.x <- substr(x, (tag.op.pos - 1), nchar(x))
     inner.trim <- regexpr(">", nu.x, fixed = TRUE)
     nu.x <- substr(nu.x, (inner.trim + 1), nchar(nu.x))
     tag.cl.pos <- regexpr(tag.cl, nu.x)
     tag.cl.full <- tag.cl.pos + attributes(tag.cl.pos)$match.length +
     1
     x <- substr(nu.x, tag.cl.full, nchar(x))
     nu.x <- substr(nu.x, 1, (tag.cl.pos - 1))
     out.result[[i]] <- nu.x
     i <- i + 1
     }
     if (format != "list") {
     out.result <- do.call(c, out.result)
     }
     return(out.result)
    }
    <bytecode: 0x19aa428>
    <environment: namespace:easyPubMed>
     --- function search by body ---
    Function custom_grep in namespace easyPubMed has this body.
     ----------- END OF FAILURE REPORT --------------
    Fatal error: the condition has length > 1
Flavor: r-devel-linux-x86_64-debian-clang

Version: 2.11
Flags: --no-examples
Check: re-building of vignette outputs
Result: WARN
    Error(s) in re-building vignettes:
     ...
    --- re-building ‘easyPubMed_01_getting_started.Rmd’ using rmarkdown
    Processing PubMed data ........................................... done!
    Processing PubMed data .......... ----------- FAILURE REPORT --------------
     --- failure: the condition has length > 1 ---
     --- srcref ---
    :
     --- package (from environment) ---
    easyPubMed
     --- call from context ---
    custom_grep(xml_data = tmp.authors, tag = "Author", format = "char")
     --- call from argument ---
    while (nchar(x) > 0 & regexpr(tag.op, x) > 0 & regexpr(tag.cl,
     x) > 0) {
     tag.op.pos <- regexpr(tag.op, x)
     nu.x <- substr(x, (tag.op.pos - 1), nchar(x))
     inner.trim <- regexpr(">", nu.x, fixed = TRUE)
     nu.x <- substr(nu.x, (inner.trim + 1), nchar(nu.x))
     tag.cl.pos <- regexpr(tag.cl, nu.x)
     tag.cl.full <- tag.cl.pos + attributes(tag.cl.pos)$match.length +
     1
     x <- substr(nu.x, tag.cl.full, nchar(x))
     nu.x <- substr(nu.x, 1, (tag.cl.pos - 1))
     out.result[[i]] <- nu.x
     i <- i + 1
    }
     --- R stacktrace ---
    where 1: custom_grep(xml_data = tmp.authors, tag = "Author", format = "char")
    where 2: doTryCatch(return(expr), name, parentenv, handler)
    where 3: tryCatchOne(expr, names, parentenv, handlers[[1L]])
    where 4: tryCatchList(expr, classes, parentenv, handlers)
    where 5: tryCatch({
     tmp.article <- custom_grep(xml_data = pubmedArticle, tag = "PubmedArticle",
     format = "char")
     if (is.null(tmp.article)) {
     message("An error occurred")
     return(NULL)
     }
     tmp.title <- custom_grep(xml_data = tmp.article, tag = "ArticleTitle",
     format = "char")
     if (length(tmp.title) > 1) {
     tmp.title <- paste(tmp.title, collapse = " ", sep = " ")
     }
     else if (length(tmp.title) < 1) {
     tmp.title <- NA
     }
     tmp.abstract <- custom_grep(xml_data = tmp.article, tag = "AbstractText",
     format = "char")
     if (length(tmp.abstract) > 1) {
     tmp.abstract <- paste(tmp.abstract, collapse = " ", sep = " ")
     if (max_chars >= 0) {
     tmp.abstract <- gsub("</{0,1}i>", "", tmp.abstract,
     ignore.case = T)
     tmp.abstract <- gsub("</{0,1}b>", "", tmp.abstract,
     ignore.case = T)
     tmp.abstract <- gsub("</{0,1}sub>", "", tmp.abstract,
     ignore.case = T)
     tmp.abstract <- gsub("</{0,1}exp>", "", tmp.abstract,
     ignore.case = T)
     tmp.abstract <- substr(tmp.abstract, 0, max_chars)
     }
     }
     else if (length(tmp.abstract) < 1) {
     tmp.abstract <- NA
     }
     else {
     if (max_chars >= 0) {
     tmp.abstract <- substr(tmp.abstract, 0, max_chars)
     tmp.abstract <- gsub("</{0,1}i>", "", tmp.abstract,
     ignore.case = T)
     tmp.abstract <- gsub("</{0,1}b>", "", tmp.abstract,
     ignore.case = T)
     tmp.abstract <- gsub("</{0,1}sub>", "", tmp.abstract,
     ignore.case = T)
     tmp.abstract <- gsub("</{0,1}exp>", "", tmp.abstract,
     ignore.case = T)
     }
     }
     my.dateType <- c("DateCompleted", "DateCreated", "DateRevised",
     "PubDate")
     sel.dateType <- which(sapply(my.dateType, (function(xi) {
     regexpr(xi, tmp.article) > 0
     })))
     if (length(sel.dateType) < 1) {
     tmp.date <- c(Year = NA, Month = NA, Day = NA)
     }
     else {
     sel.dateType <- sel.dateType[1]
     tmp.date <- custom_grep(xml_data = tmp.article, tag = my.dateType[sel.dateType],
     format = "char")
     tmp.date <- sapply(c("Year", "Month", "Day"), (function(tt) {
     tdat.el <- custom_grep(xml_data = tmp.date, tag = tt,
     format = "char")
     ifelse(is.null(tdat.el), NA, tdat.el[1])
     }))
     }
     tmp.paperID <- custom_grep(xml_data = tmp.article, tag = "ArticleIdList",
     format = "char")
     if (is.null(tmp.paperID)) {
     message("An error occurred")
     return(NULL)
     }
     else {
     tmp.paperID <- gsub("[[:space:]]", "", tmp.paperID[1])
     }
     tmp.PMID <- gsub("^(.*ArticleIdIdType=\\\"pubmed\\\")([[:space:]]|[[:alnum:]]){0,20}>",
     "", tmp.paperID)
     tmp.PMID <- gsub("<.*$", "", tmp.PMID)
     tmp.DOI <- gsub("^(.*ArticleIdIdType=\\\"doi\\\")([[:space:]]|[[:alnum:]]){0,20}>",
     "", tmp.paperID)
     tmp.DOI <- gsub("<.*$", "", tmp.DOI)
     tmp.jabbrv <- custom_grep(xml_data = tmp.article, tag = "ISOAbbreviation",
     format = "char")
     tmp.jabbrv <- ifelse(is.null(tmp.jabbrv), NA, tmp.jabbrv)
     tmp.journal <- custom_grep(xml_data = tmp.article, tag = "Title",
     format = "char")
     tmp.journal <- ifelse(is.null(tmp.journal), NA, tmp.journal)
     tmp.keys <- tryCatch({
     if (getKeywords) {
     tmp.keys <- custom_grep(xml_data = tmp.article, tag = "Keyword",
     format = "char")
     tmp.mesh <- custom_grep(xml_data = tmp.article, tag = "MeshHeading",
     format = "char")
     if (length(tmp.mesh) > 0) {
     tmp.mesh <- sapply(tmp.mesh, function(xxm) {
     custom_grep(xml_data = xxm, tag = "DescriptorName",
     format = "char")
     })
     }
     tmp.keys <- c(tmp.keys, tmp.mesh)
     if (length(tmp.keys) > 1) {
     tmp.keys <- paste(tmp.keys, collapse = "; ")
     }
     else if (length(tmp.keys) < 1) {
     tmp.keys <- NA
     }
     }
     else {
     NA
     }
     }, error = function(e) {
     NA
     })
     tmp.resout <- c(pmid = tmp.PMID, doi = tmp.DOI, title = tmp.title,
     abstract = tmp.abstract, year = as.vector(tmp.date[1]),
     month = as.vector(tmp.date[2]), day = as.vector(tmp.date[3]),
     jabbrv = tmp.jabbrv, journal = tmp.journal, keywords = tmp.keys)
     tmp.authors <- custom_grep(xml_data = tmp.article, tag = "AuthorList",
     format = "char")
     if (length(tmp.authors) < 1 | !getAuthors) {
     final.mat <- data.frame(rbind(c(tmp.resout, lastname = NA,
     firstname = NA, address = NA, email = NA)), stringsAsFactors = FALSE)
     }
     else {
     author.list <- custom_grep(xml_data = tmp.authors, tag = "Author",
     format = "char")
     final.mat <- do.call(rbind, lapply(author.list, (function(al) {
     tmp.lastnm <- custom_grep(xml_data = al, tag = "LastName",
     format = "char")
     tmp.firstnm <- custom_grep(xml_data = al, tag = "ForeName",
     format = "char")
     tmp.email <- regexpr("([[:alnum:]]|\\.|\\-\\_){3,200}@([[:alnum:]]|\\.|\\-\\_){3,200}(\\.)([[:alnum:]]){2,6}",
     al)
     if (tmp.email > 0) {
     tmp.email <- substr(al, tmp.email, tmp.email +
     attributes(tmp.email)$match.length - 1)
     }
     else {
     tmp.email <- NA
     }
     if (regexpr("Affiliation", al) > 0) {
     tmp.add <- custom_grep(al, "Affiliation", format = "char")[1]
     tmp.add <- trim_address(tmp.add)
     }
     else {
     tmp.add <- NA
     }
     c(tmp.resout, lastname = tmp.lastnm, firstname = tmp.firstnm,
     address = tmp.add, email = tmp.email)
     })))
     rownames(final.mat) <- NULL
     final.mat <- data.frame(final.mat, stringsAsFactors = FALSE)
     DESELECT <- is.na(final.mat$lastname) | is.na(final.mat$firstname)
     if (length(DESELECT) > 0 & sum(DESELECT) > 0)
     final.mat <- final.mat[!DESELECT, ]
     if (autofill) {
     tmp.address <- final.mat[, "address"]
     na.pos <- is.na(tmp.address)
     if (sum(na.pos) != length(tmp.address)) {
     tmp.list <- lapply(tmp.address, function(x) {
     x
     })
     cur.add <- tmp.list[[(which(!na.pos)[1])]]
     for (i in 1:length(na.pos)) {
     if (na.pos[i]) {
     tmp.list[[i]] <- cur.add
     }
     else {
     cur.add <- tmp.list[[i]]
     }
     }
     final.mat[, "address"] <- do.call(c, tmp.list)
     }
     }
     }
     if (ncol(final.mat) != 14) {
     final.mat <- NULL
     }
    }, error = function(e) {
     NULL
    }, finally = {
     options(warn = 0)
     return(final.mat)
    })
    where 6: article_to_df(pubmedArticle = art, autofill = autofill, max_chars = max_chars,
     getKeywords = getKeywords, getAuthors = TRUE)
    where 7: doTryCatch(return(expr), name, parentenv, handler)
    where 8: tryCatchOne(expr, names, parentenv, handlers[[1L]])
    where 9: tryCatchList(expr, classes, parentenv, handlers)
    where 10: tryCatch({
     article_to_df(pubmedArticle = art, autofill = autofill, max_chars = max_chars,
     getKeywords = getKeywords, getAuthors = TRUE)
    }, error = function(e) {
     NULL
    })
    where 11: FUN(X[[i]], ...)
    where 12: lapply(1:length(paper.data), (function(i) {
     if (length(paper.data) > 50) {
     rep.dot <- as.integer(seq(1, length(paper.data), length.out = 50))
     if (i %in% rep.dot)
     message(".", appendLF = FALSE)
     }
     else {
     message(".", appendLF = FALSE)
     }
     art <- paper.data[[i]]
     out <- tryCatch({
     article_to_df(pubmedArticle = art, autofill = autofill,
     max_chars = max_chars, getKeywords = getKeywords,
     getAuthors = TRUE)
     }, error = function(e) {
     NULL
     })
     if (is.null(out)) {
     out <- data.frame(pmid = NA, doi = NA, title = NA, abstract = NA,
     year = NA, month = NA, day = NA, jabbrv = NA, journal = NA,
     keywords = NA, lastname = NA, firstname = NA, address = NA,
     email = NA)
     }
     if (included_authors == "first") {
     out <- out[1, ]
     }
     else if (included_authors == "last") {
     out <- out[nrow(out), ]
     }
     out2 <- data.frame(rebuild = (1:nrow(out)))
     for (jj in 1:length(expFields)) {
     if (expFields[jj] %in% names(out)) {
     out2[, expFields[jj]] <- out[, expFields[jj]]
     }
     else {
     out2[, expFields[jj]] <- NA
     }
     }
     out2[, -1]
    }))
    where 13: table_articles_byAuth(pubmed_data = fetched_data, included_authors = "first",
     max_chars = 0, encoding = "ASCII")
    where 14: eval(expr, envir, enclos)
    where 15: eval(expr, envir, enclos)
    where 16: withVisible(eval(expr, envir, enclos))
    where 17: withCallingHandlers(withVisible(eval(expr, envir, enclos)), warning = wHandler,
     error = eHandler, message = mHandler)
    where 18: handle(ev <- withCallingHandlers(withVisible(eval(expr, envir,
     enclos)), warning = wHandler, error = eHandler, message = mHandler))
    where 19: timing_fn(handle(ev <- withCallingHandlers(withVisible(eval(expr,
     envir, enclos)), warning = wHandler, error = eHandler, message = mHandler)))
    where 20: evaluate_call(expr, parsed$src[[i]], envir = envir, enclos = enclos,
     debug = debug, last = i == length(out), use_try = stop_on_error !=
     2L, keep_warning = keep_warning, keep_message = keep_message,
     output_handler = output_handler, include_timing = include_timing)
    where 21: evaluate::evaluate(...)
    where 22: evaluate(code, envir = env, new_device = FALSE, keep_warning = !isFALSE(options$warning),
     keep_message = !isFALSE(options$message), stop_on_error = if (options$error &&
     options$include) 0L else 2L, output_handler = knit_handlers(options$render,
     options))
    where 23: in_dir(input_dir(), evaluate(code, envir = env, new_device = FALSE,
     keep_warning = !isFALSE(options$warning), keep_message = !isFALSE(options$message),
     stop_on_error = if (options$error && options$include) 0L else 2L,
     output_handler = knit_handlers(options$render, options)))
    where 24: block_exec(params)
    where 25: call_block(x)
    where 26: process_group.block(group)
    where 27: process_group(group)
    where 28: withCallingHandlers(if (tangle) process_tangle(group) else process_group(group),
     error = function(e) {
     setwd(wd)
     cat(res, sep = "\n", file = output %n% "")
     message("Quitting from lines ", paste(current_lines(i),
     collapse = "-"), " (", knit_concord$get("infile"),
     ") ")
     })
    where 29: process_file(text, output)
    where 30: knitr::knit(knit_input, knit_output, envir = envir, quiet = quiet,
     encoding = encoding)
    where 31: rmarkdown::render(file, encoding = encoding, quiet = quiet, envir = globalenv(),
     ...)
    where 32: vweave_rmarkdown(...)
    where 33: engine$weave(file, quiet = quiet, encoding = enc)
    where 34: doTryCatch(return(expr), name, parentenv, handler)
    where 35: tryCatchOne(expr, names, parentenv, handlers[[1L]])
    where 36: tryCatchList(expr, classes, parentenv, handlers)
    where 37: tryCatch({
     engine$weave(file, quiet = quiet, encoding = enc)
     setwd(startdir)
     output <- find_vignette_product(name, by = "weave", engine = engine)
     if (!have.makefile && vignette_is_tex(output)) {
     texi2pdf(file = output, clean = FALSE, quiet = quiet)
     output <- find_vignette_product(name, by = "texi2pdf",
     engine = engine)
     }
     outputs <- c(outputs, output)
    }, error = function(e) {
     thisOK <<- FALSE
     fails <<- c(fails, file)
     message(gettextf("Error: processing vignette '%s' failed with diagnostics:\n%s",
     file, conditionMessage(e)))
    })
    where 38: tools:::buildVignettes(dir = "/home/hornik/tmp/R.check/r-devel-gcc/Work/PKGS/easyPubMed.Rcheck/vign_test/easyPubMed",
     ser_elibs = "/home/hornik/tmp/scratch/Rtmp5jRl2M/file623055cd4118.rds")
    
     --- value of length: 2 type: logical ---
    [1] TRUE TRUE
     --- function from context ---
    function (xml_data, tag, format = "list")
    {
     x <- xml_data
     tag.op <- paste("\\<", tag, "((\\>)|([[:space:]]([^[<]]*)\\>))",
     sep = "")
     tag.cl <- paste("(<\\/)", tag, "(\\>)", sep = "")
     out.result <- list()
     i = 1
     while (nchar(x) > 0 & regexpr(tag.op, x) > 0 & regexpr(tag.cl,
     x) > 0) {
     tag.op.pos <- regexpr(tag.op, x)
     nu.x <- substr(x, (tag.op.pos - 1), nchar(x))
     inner.trim <- regexpr(">", nu.x, fixed = TRUE)
     nu.x <- substr(nu.x, (inner.trim + 1), nchar(nu.x))
     tag.cl.pos <- regexpr(tag.cl, nu.x)
     tag.cl.full <- tag.cl.pos + attributes(tag.cl.pos)$match.length +
     1
     x <- substr(nu.x, tag.cl.full, nchar(x))
     nu.x <- substr(nu.x, 1, (tag.cl.pos - 1))
     out.result[[i]] <- nu.x
     i <- i + 1
     }
     if (format != "list") {
     out.result <- do.call(c, out.result)
     }
     return(out.result)
    }
    <bytecode: 0x5609fc8de618>
    <environment: namespace:easyPubMed>
     --- function search by body ---
    Function custom_grep in namespace easyPubMed has this body.
     ----------- END OF FAILURE REPORT --------------
    Fatal error: the condition has length > 1
Flavor: r-devel-linux-x86_64-debian-gcc

Version: 2.5
Flags: --no-examples
Check: re-building of vignette outputs
Result: WARN
    Error in re-building vignettes:
     ...
    Quitting from lines 39-50 (easyPM_vignette_html.Rmd)
    Error: processing vignette ‘easyPM_vignette_html.Rmd’ failed with diagnostics:
    1: Input is not proper UTF-8, indicate encoding !
    Bytes: 0xF0 0xE3 0xF9 0xD9
    2: Opening and ending tag mismatch: Citation line 2046 and Reference
    3: expected '>'
    4: Opening and ending tag mismatch: ReferenceList line 1918 and PubmedData
    5: Opening and ending tag mismatch: PubmedData line 1871 and PubmedArticle
    6: Opening and ending tag mismatch: ArticleIdList line 2748 and Article
    7: Opening and ending tag mismatch: Reference line 2746 and MedlineCitation
    8: Opening and ending tag mismatch: ReferenceList line 2589 and PubmedArticle
    9: Opening and ending tag mismatch: PubmedData line 2542 and PubmedArticleSet
    10: Premature end of data in tag PubmedArticle line 2290
    11: Premature end of data in tag PubmedArticle line 1589
    12: Premature end of data in tag PubmedArticleSet line 3
    Execution halted
Flavor: r-patched-linux-x86_64

Version: 2.5
Flags: --no-examples
Check: re-building of vignette outputs
Result: WARN
    Error in re-building vignettes:
     ...
    Quitting from lines 39-50 (easyPM_vignette_html.Rmd)
    Error: processing vignette ‘easyPM_vignette_html.Rmd’ failed with diagnostics:
    1: Input is not proper UTF-8, indicate encoding !
    Bytes: 0xF0 0x03 0x1C 0x01
    2: PCDATA invalid Char value 3
    3: PCDATA invalid Char value 28
    4: PCDATA invalid Char value 1
    5: Opening and ending tag mismatch: Citation line 1066 and Reference
    6: expected '>'
    7: Opening and ending tag mismatch: ReferenceList line 746 and PubmedData
    8: Opening and ending tag mismatch: PubmedData line 704 and PubmedArticle
    9: PCDATA invalid Char value 4
    10: PCDATA invalid Char value 28
    11: PCDATA invalid Char value 1
    12: Opening and ending tag mismatch: AbstractText line 2302 and Abstract
    13: Opening and ending tag mismatch: Abstract line 2301 and Article
    14: Opening and ending tag mismatch: Article line 2281 and MedlineCitation
    15: Opening and ending tag mismatch: MedlineCitation line 2269 and PubmedArticle
    16: Unescaped '<' not allowed in attributes values
    17: attributes construct error
    18: Couldn't find end of Start Tag Qu
    Execution halted
Flavor: r-release-linux-x86_64

Version: 2.5
Check: examples
Result: ERROR
    Running examples in 'easyPubMed-Ex.R' failed
    The error most likely occurred in:
    
    > ### Name: fetch_pubmed_data
    > ### Title: Retrieve PubMed Data in XML or TXT Format
    > ### Aliases: fetch_pubmed_data
    >
    > ### ** Examples
    >
    > ## Not run:
    > ##D # Example 01: retrieve data in XML format
    > ##D dami_query_string <- "Damiano Fantini[AU]"
    > ##D dami_on_pubmed <- get_pubmed_ids(dami_query_string)
    > ##D dami_papers <- fetch_pubmed_data(dami_on_pubmed)
    > ##D titles <- unlist(xpathApply(dami_papers, "//ArticleTitle", saveXML))
    > ##D title_pos <- regexpr("<ArticleTitle>.*<\\/ArticleTitle>", titles)
    > ##D titles <- substr(titles, title_pos + 14, title_pos + attributes(title_pos)$match.length - 16)
    > ##D print(titles)
    > ##D #
    > ## End(Not run)
    > # Example 02: retrieve data in TXT format
    > dami_query_string <- "Damiano Fantini[AU]"
    > dami_on_pubmed <- get_pubmed_ids(dami_query_string)
    > dami_papers <- fetch_pubmed_data(dami_on_pubmed, format = "abstract")
    Warning in url(efetch_url, open = "rb") :
     cannot open URL 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&WebEnv=NCID_1_156002692_130.14.18.34_9001_1548153742_297454284_0MetA0_S_MegaStore&query_key=1&retstart=0&retmax=500&rettype=abstract&retmode=text': HTTP status was '429 Too Many Requests'
    Error in url(efetch_url, open = "rb") : cannot open the connection
    Calls: fetch_pubmed_data -> url
    Execution halted
Flavor: r-release-windows-ix86+x86_64

Version: 2.5
Check: re-building of vignette outputs
Result: WARN
    Error in re-building vignettes:
     ...
    Quitting from lines 90-100 (easyPM_vignette_html.Rmd)
    Error: processing vignette 'easyPM_vignette_html.Rmd' failed with diagnostics:
    undefined columns selected
    Execution halted
Flavors: r-release-windows-ix86+x86_64, r-oldrel-windows-ix86+x86_64