diff --git a/.Rbuildignore b/.Rbuildignore index 19624de80..8f4e9970b 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -1,16 +1,25 @@ +^.*-in_tree$ ^.*\.Rproj$ -^\.Rproj\.user$ ^API$ -^README\.Rmd$ +^CONTRIBUTING\.md$ +^LICENSE\.md$ +^Meta$ ^README-.*\.png$ -^.travis.yml -^appveyor\.yml$ -^tic\.R$ -^\.travis\.yml$ -^docs$ -^_pkgdown\.yml$ -CONTRIBUTING.md +^README\.Rmd$ +^\.Rproj\.user$ +^\.github$ ^\.gitsum$ -^gitsum$ - +^\.lintr$ +^\.pre-commit-config\.yaml$ +^_pkgdown\.yaml$ ^cran-comments\.md$ +^doc$ +^docs$ +^gitsum$ +^inst/WORDLIST$ +^inst/hooks/.*$ +^revdep$ +^tests/testmanual$ +^tic\.R$ +^touchstone$ +^vignettes/gsoc_proposal$ diff --git a/.gitattributes b/.gitattributes index f2e9ea02a..0f2dadb50 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +1 @@ -tests/testthat/**/*_tree linguist-generated=true \ No newline at end of file +tests/testthat/**/*_tree linguist-generated=true diff --git a/.github/.gitignore b/.github/.gitignore new file mode 100644 index 000000000..2d19fc766 --- /dev/null +++ b/.github/.gitignore @@ -0,0 +1 @@ +*.html diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml new file mode 100644 index 000000000..70b670eda --- /dev/null +++ b/.github/dependabot.yaml @@ -0,0 +1,8 @@ +version: 2 + +updates: + # Keep dependencies for GitHub Actions up-to-date + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" diff --git a/.github/workflows/check-all-examples.yaml b/.github/workflows/check-all-examples.yaml new file mode 100644 index 000000000..7de30a17f --- /dev/null +++ b/.github/workflows/check-all-examples.yaml @@ -0,0 +1,42 @@ +# Make sure all examples run successfully, even the ones that are not supposed +# to be run or tested on CRAN machines by default. +# +# The examples that fail should use +# - `if (FALSE) { ... }` (if example is included only for illustrative purposes) +# - `try({ ... })` (if the intent is to show the error) +# +# This workflow helps find such failing examples that need to be modified. + +on: + push: + branches: [main, master] + pull_request: + branches: [main, master] + +name: check-all-examples + +jobs: + check-all-examples: + runs-on: ubuntu-latest + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + + steps: + - uses: actions/checkout@v3 + + - uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true + + - uses: r-lib/actions/setup-r-dependencies@v2 + with: + pak-version: devel + extra-packages: | + any::devtools + local::. + + - name: Run examples + run: | + options(crayon.enabled = TRUE) + devtools::run_examples(fresh = TRUE, run_dontrun = TRUE, run_donttest = TRUE) + shell: Rscript {0} diff --git a/.github/workflows/check-full.yaml b/.github/workflows/check-full.yaml new file mode 100644 index 000000000..8f98653d0 --- /dev/null +++ b/.github/workflows/check-full.yaml @@ -0,0 +1,61 @@ +# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples +# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help +on: + push: + branches: [main, master] + pull_request: + branches: [main, master] + +name: R-CMD-check + +jobs: + R-CMD-check: + runs-on: ${{ matrix.config.os }} + + name: ${{ matrix.config.os }} (${{ matrix.config.r }}) + + strategy: + fail-fast: false + matrix: + config: + - {os: macos-latest, r: 'release'} + + - {os: windows-latest, r: 'release'} + # Use 3.6 to trigger usage of RTools35 + - {os: windows-latest, r: '3.6'} + # use 4.1 to check with rtools40's older compiler + - {os: windows-latest, r: '4.1'} + + - {os: ubuntu-latest, r: 'devel', http-user-agent: 'release'} + - {os: ubuntu-latest, r: 'release'} + - {os: ubuntu-latest, r: 'oldrel-1'} + - {os: ubuntu-latest, r: 'oldrel-2'} + - {os: ubuntu-latest, r: 'oldrel-3'} + - {os: ubuntu-latest, r: 'oldrel-4'} + + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + R_KEEP_PKG_SOURCE: yes + + steps: + - uses: actions/checkout@v3 + + - uses: r-lib/actions/setup-pandoc@v2 + + - uses: r-lib/actions/setup-r@v2 + with: + r-version: ${{ matrix.config.r }} + http-user-agent: ${{ matrix.config.http-user-agent }} + use-public-rspm: true + + - uses: r-lib/actions/setup-r-dependencies@v2 + with: + extra-packages: any::rcmdcheck + needs: check + + - uses: r-lib/actions/check-r-package@v2 + with: + upload-snapshots: true + error-on: 'ifelse(getRversion() > 3.6, "warning", "note")' + env: + _R_CHECK_FORCE_SUGGESTS_: false diff --git a/.github/workflows/check-link-rot.yaml b/.github/workflows/check-link-rot.yaml new file mode 100644 index 000000000..666d27c86 --- /dev/null +++ b/.github/workflows/check-link-rot.yaml @@ -0,0 +1,43 @@ +on: + pull_request: + branches: [main, master] + schedule: + # * is a special character in YAML so you have to quote this string + # Trigger once a month at 00:00 on the 1st day of the month. + - cron: "0 0 1 * *" + +name: check-link-rot + +jobs: + check-link-rot: + runs-on: ubuntu-latest + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + R_KEEP_PKG_SOURCE: yes + steps: + - uses: actions/checkout@v3 + + - uses: r-lib/actions/setup-pandoc@v2 + + - uses: r-lib/actions/setup-r@v2 + with: + r-version: "devel" + http-user-agent: "release" + use-public-rspm: true + + - uses: r-lib/actions/setup-r-dependencies@v2 + with: + pak-version: devel + extra-packages: | + any::rcmdcheck + any::urlchecker + + - name: Run URL checker + run: | + options(crayon.enabled = TRUE) + rotten_links <- urlchecker::url_check(progress = FALSE) + print(rotten_links) + if (length(rotten_links$URL) > 0L) { + stop("Some URLs are outdated and need to be updated.", call. = FALSE) + } + shell: Rscript {0} diff --git a/.github/workflows/pkgdown.yaml b/.github/workflows/pkgdown.yaml new file mode 100644 index 000000000..d227c59cf --- /dev/null +++ b/.github/workflows/pkgdown.yaml @@ -0,0 +1,46 @@ +# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples +# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help +on: + push: + branches: [main, master] + pull_request: + branches: [main, master] + release: + types: [published] + workflow_dispatch: + +name: pkgdown + +jobs: + pkgdown: + runs-on: ubuntu-latest + # Only restrict concurrency for non-PR jobs + concurrency: + group: pkgdown-${{ github.event_name != 'pull_request' || github.run_id }} + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + steps: + - uses: actions/checkout@v3 + + - uses: r-lib/actions/setup-pandoc@v2 + + - uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true + + - uses: r-lib/actions/setup-r-dependencies@v2 + with: + extra-packages: any::pkgdown, local::. + needs: website + + - name: Build site + run: pkgdown::build_site_github_pages(new_process = FALSE, install = FALSE) + shell: Rscript {0} + + - name: Deploy to GitHub pages 🚀 + if: github.event_name != 'pull_request' + uses: JamesIves/github-pages-deploy-action@v4.4.2 + with: + clean: false + branch: gh-pages + folder: docs diff --git a/.github/workflows/pre-commit.yaml b/.github/workflows/pre-commit.yaml new file mode 100644 index 000000000..0ab0603db --- /dev/null +++ b/.github/workflows/pre-commit.yaml @@ -0,0 +1,58 @@ +name: pre-commit +on: + push: + branches-ignore: + - "master" + - "main" + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + +jobs: + pre-commit: + runs-on: ubuntu-latest + if: >- + !contains(github.event.head_commit.message, 'ci skip') && + ( + startsWith(github.ref, 'refs/heads') || + github.event.pull_request.draft == false + ) + steps: + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.11.0 + with: + access_token: ${{ github.token }} + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Install system dependencies + if: runner.os == 'Linux' + run: | + # your system installation code here + # sudo apt-get install -y libcurl4-openssl-dev + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: "3.9" + architecture: "x64" + - name: Run pre-commit + uses: pre-commit/action@v3.0.0 + env: + SKIP: pkgdown + - name: Commit files + if: failure() && startsWith(github.ref, 'refs/heads') + run: | + if [[ `git status --porcelain --untracked-files=no` ]]; then + git config --local user.email "github-actions[bot]@users.noreply.github.com" + git config --local user.name "github-actions[bot]" + git checkout -- .github/workflows + git commit -m "pre-commit" -a + fi + - name: Push changes + if: failure() && startsWith(github.ref, 'refs/heads') + uses: ad-m/github-push-action@master + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + branch: ${{ github.ref }} + env: + RENV_CONFIG_CACHE_ENABLED: FALSE + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/test-coverage.yaml b/.github/workflows/test-coverage.yaml new file mode 100644 index 000000000..fe87549b5 --- /dev/null +++ b/.github/workflows/test-coverage.yaml @@ -0,0 +1,31 @@ +# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples +# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help +on: + push: + branches: [main, master] + pull_request: + branches: [main, master] + +name: test-coverage + +jobs: + test-coverage: + runs-on: ubuntu-latest + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + + steps: + - uses: actions/checkout@v3 + + - uses: r-lib/actions/setup-r@v2 + with: + use-public-rspm: true + + - uses: r-lib/actions/setup-r-dependencies@v2 + with: + extra-packages: any::covr + needs: coverage + + - name: Test coverage + run: covr::codecov(quiet = FALSE) + shell: Rscript {0} diff --git a/.github/workflows/touchstone-comment.yaml b/.github/workflows/touchstone-comment.yaml new file mode 100644 index 000000000..c3b8eea04 --- /dev/null +++ b/.github/workflows/touchstone-comment.yaml @@ -0,0 +1,22 @@ +name: Continuous Benchmarks (Comment) + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref }} + cancel-in-progress: true + +on: + workflow_run: + workflows: ["Continuous Benchmarks (Receive)"] + types: + - completed + +jobs: + upload: + runs-on: ubuntu-latest + if: > + ${{ github.event.workflow_run.event == 'pull_request' && + github.event.workflow_run.conclusion == 'success' }} + steps: + - uses: lorenzwalthert/touchstone/actions/comment@main + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/touchstone-receive.yaml b/.github/workflows/touchstone-receive.yaml new file mode 100644 index 000000000..b1b462650 --- /dev/null +++ b/.github/workflows/touchstone-receive.yaml @@ -0,0 +1,58 @@ +name: Continuous Benchmarks (Receive) + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref }} + cancel-in-progress: true + +on: + pull_request: + paths: + # Directories with source code and benchmarking code + - "inst/**" + - "R/**" + - "src/**" + - "touchstone/**" + # Benchmarking config file + - ".github/workflows/touchstone-*.yaml" + # Package metadata + - DESCRIPTION + +jobs: + prepare: + runs-on: ubuntu-latest + outputs: + config: ${{ steps.read_touchstone_config.outputs.config }} + steps: + - name: Checkout repo + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - id: read_touchstone_config + run: | + content=`cat ./touchstone/config.json` + # the following lines are only required for multi line json + content="${content//'%'/'%25'}" + content="${content//$'\n'/'%0A'}" + content="${content//$'\r'/'%0D'}" + # end of optional handling for multi line json + echo "::set-output name=config::$content" + build: + needs: prepare + runs-on: ${{ matrix.config.os }} + strategy: + fail-fast: false + matrix: + config: + - ${{ fromJson(needs.prepare.outputs.config) }} + env: + R_REMOTES_NO_ERRORS_FROM_WARNINGS: true + RSPM: ${{ matrix.config.rspm }} + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + steps: + - uses: lorenzwalthert/touchstone/actions/receive@main + with: + cache-version: 1 + benchmarking_repo: ${{ matrix.config.benchmarking_repo }} + benchmarking_ref: ${{ matrix.config.benchmarking_ref }} + benchmarking_path: ${{ matrix.config.benchmarking_path }} diff --git a/.gitignore b/.gitignore index f16fd6970..930a72e86 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,16 @@ -.Rproj.user -.Rhistory .RData -inst/doc -docs +.Rhistory +.Rproj.user .gitsum +Meta +R/scratch +doc +docs gitsum +inst/doc +revdep/ +!revdep/*.md +!revdep/problems.md +touchstone/* +!touchstone/config.json +!touchstone/script.R diff --git a/.lintr b/.lintr new file mode 100644 index 000000000..8244162db --- /dev/null +++ b/.lintr @@ -0,0 +1,27 @@ +linters: linters_with_defaults( + commented_code_linter = NULL, + cyclocomp_linter = cyclocomp_linter(40), + fixed_regex_linter = NULL, + function_argument_linter = NULL, + indentation_linter = NULL, + line_length_linter(120), + namespace_linter = NULL, + nested_ifelse_linter = NULL, + object_name_linter = NULL, + object_length_linter(70), + object_usage_linter = NULL, + todo_comment_linter = NULL, + extraction_operator_linter = NULL, + nonportable_path_linter = NULL, + string_boundary_linter = NULL, + undesirable_function_linter = NULL, + undesirable_operator_linter = NULL, + defaults = linters_with_tags(tags = NULL) + ) +exclusions: list( + "inst", + "man", + "tests", + "touchstone", + "vignettes" + ) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..a45eaf64f --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,134 @@ +# All available hooks: https://pre-commit.com/hooks.html +# R specific hooks: https://github.com/lorenzwalthert/precommit +default_stages: [commit] +default_language_version: + python: python3 + +repos: + - repo: https://github.com/lorenzwalthert/precommit + rev: f3498c421d68a1db26de1a1fe3ecc91dd6f03b5e + hooks: + - id: style-files + args: + ['--ignore-start="^# styler: off$"', '--ignore-stop="^# styler: on$"'] + exclude: > + (?x)^( + tests/testthat/.*/.*\.R(md|nw)?| + vignettes/customizing_styler\.Rmd| + tests/testthat/public-api/xyzfile-rnw/random4\.Rnw| + vignettes/detect-alignment\.Rmd| + tests/testmanual/addins/.*invalid.*| + tests/testmanual/addins/r-valid\.R| + )$ + - id: lintr + additional_dependencies: + - r-lib/lintr + exclude: > + (?x)^( + inst/.*| + man/.*| + tests/.*| + touchstone/.*| + vignettes/.*| + )$ + - id: roxygenize + additional_dependencies: + - r-lib/pkgapi + - dplyr@1.0.9 + - roxygen2@7.2.2 + - id: use-tidy-description + - id: spell-check + exclude: > + (?x)^( + \.github/.*\.yaml| + data/.*| + tests/testthat/.*| + touchstone/config\.json| + (.*/|)\.Rprofile| + (.*/|)\.Renviron| + (.*/|)\.gitignore| + (.*/|)NAMESPACE| + (.*/|)WORDLIST| + (.*/|)\.travis.yml| + (.*/|)appveyor.yml| + (.*/|)\.Rbuildignore| + (.*/|)\.pre-commit-.*| + .*\.[rR]| + .*\.Rproj| + .*\.py| + .*\.feather| + .*\.rds| + .*\.Rds| + .*\.sh| + .*\.RData| + .*-in_tree + )$ + - id: readme-rmd-rendered + - id: parsable-R + exclude: > + (?x)^( + tests/testthat/public-api/xyzaddin/addin_region-.*| + tests/.*invalid.*| + tests/testthat/rmd/no-tidy-out\.Rmd| + tests/testthat/escaping/basic-escape-out\.R| + tests/testthat/indention_operators/.*pipe.*| + tests/testthat/line_breaks_and_other/.*pipe.*| + tests/testthat/exception_handling/parser-error\.R| + tests/testthat/public-api/xyzfile_rmd/random4\.Rmd| + tests/testthat/rmd/no-tidy-(in|out)\.Rmd| + )$ + - id: no-browser-statement + exclude: > + (?x)^( + tests/testthat/public-api/xyzaddin/addin_region-.*| + tests/testmanual/addins/r-invalid\.R| + tests/testthat/escaping/basic-escape-out\.R| + tests/testthat/indention_operators/.*pipe.*| + tests/testthat/line_breaks_and_other/.*pipe.*| + tests/testthat/exception_handling/parser-error.R| + tests/testmanual/| + )$ + - id: deps-in-desc + exclude: > + (?x)^( + touchstone/.*| + tests/testmanual/addins/.*invalid.*| + tests/testthat/escaping/basic-escape-out\.R| + tests/testthat/rnw/011-conditional-eval-out\.Rnw| + tests/testthat/.*\.R(md)? + )$ + - id: pkgdown + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-added-large-files + args: ["--maxkb=200"] + - id: check-yaml + - id: mixed-line-ending + - id: file-contents-sorter + files: "\\.Rbuildignore$" + - id: end-of-file-fixer + exclude: > + (?x)^( + \.Rd| + tests/testthat/exception_handling/empty_file\.R| + tests/testthat/parse_comments/eol_eof_spaces-.*| + tests/testthat/reference-objects/.*| + tests/testthat/_snaps/.*| + )$ + - repo: https://github.com/lorenzwalthert/gitignore-tidy + rev: 475bf5d96927a1887ce2863ff3075b1d7240bc51 + hooks: + - id: tidy-gitignore + - repo: local + hooks: + - id: forbid-to-commit + name: Don't commit common R artifacts + entry: Cannot commit .Rhistory, .RData, .Rds or .rds. + language: fail + files: '\.Rhistory|\.RData|\.Rds|\.rds$' + # `exclude: ` to allow committing specific files. + +ci: + skip: [pkgdown] + autoupdate_schedule: monthly diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 68dab75ae..000000000 --- a/.travis.yml +++ /dev/null @@ -1,49 +0,0 @@ -# R for travis: see documentation at https://docs.travis-ci.com/user/languages/r -# Default configuration for use with tic package -# Usually you shouldn't need to change the first part of the file - -# DO NOT CHANGE THE CODE BELOW -before_install: R -q -e 'install.packages(c("remotes", "curl")); remotes::install_github("ropenscilabs/tic"); tic::prepare_all_stages(); tic::before_install()' -install: R -q -e 'tic::install()' -after_install: R -q -e 'tic::after_install()' -before_script: R -q -e 'tic::before_script()' -script: R -q -e 'tic::script()' -after_success: R -q -e 'tic::after_success()' -after_failure: R -q -e 'tic::after_failure()' -before_deploy: R -q -e 'tic::before_deploy()' -deploy: - provider: script - script: R -q -e 'tic::deploy()' - on: - all_branches: true -after_deploy: R -q -e 'tic::after_deploy()' -after_script: R -q -e 'tic::after_script()' -# DO NOT CHANGE THE CODE ABOVE - -# Custom parts: - -# Header -language: r -sudo: false -dist: trusty -cache: packages -latex: false - -matrix: - include: - - r: 3.1 - - r: 3.2 - - r: oldrel - - r: release - env: - - BUILD_PKGDOWN: true - - r: devel - -#env -env: - global: - - _R_CHECK_FORCE_SUGGESTS_=false - - MAKEFLAGS="-j 2" - -#services -services: diff --git a/API b/API index 27492bf95..46b34bbcd 100644 --- a/API +++ b/API @@ -2,14 +2,37 @@ ## Exported functions -create_style_guide(initialize = default_style_guide_attributes, line_break = NULL, space = NULL, token = NULL, indention = NULL, use_raw_indention = FALSE, reindention = tidyverse_reindention()) +cache_activate(cache_name = NULL, verbose = !getOption("styler.quiet", FALSE)) +cache_clear(cache_name = NULL, ask = TRUE) +cache_deactivate(verbose = !getOption("styler.quiet", FALSE)) +cache_info(cache_name = NULL, format = "both") +compute_parse_data_nested(text, transformers = tidyverse_style(), more_specs = NULL) +create_style_guide(initialize = default_style_guide_attributes, line_break = NULL, space = NULL, token = NULL, indention = NULL, use_raw_indention = FALSE, reindention = tidyverse_reindention(), style_guide_name = NULL, style_guide_version = NULL, more_specs_style_guide = NULL, transformers_drop = specify_transformers_drop(), indent_character = " ") default_style_guide_attributes(pd_flat) -specify_math_token_spacing(zero = NULL, one = c("'+'", "'-'", "'*'", "'/'", "'^'")) -specify_reindention(regex_pattern = NULL, indention = 0, comments_only = TRUE) -style_dir(path = ".", ..., style = tidyverse_style, transformers = style(...), filetype = "R", recursive = TRUE, exclude_files = NULL) -style_file(path, ..., style = tidyverse_style, transformers = style(...)) -style_pkg(pkg = ".", ..., style = tidyverse_style, transformers = style(...), filetype = "R", exclude_files = "R/RcppExports.R") -style_text(text, ..., style = tidyverse_style, transformers = style(...)) +is_asymmetric_tilde_expr(pd) +is_comment(pd) +is_conditional_expr(pd) +is_curly_expr(pd) +is_for_expr(pd) +is_function_call(pd) +is_function_declaration(pd) +is_symmetric_tilde_expr(pd) +is_tilde_expr(pd, tilde_pos = c(1L, 2L)) +is_while_expr(pd) +next_non_comment(pd, pos) +previous_non_comment(pd, pos) +scope_normalize(scope, name = substitute(scope)) +specify_math_token_spacing(zero = "'^'", one = c("'+'", "'-'", "'*'", "'/'")) +specify_reindention(regex_pattern = NULL, indention = 0L, comments_only = TRUE) +specify_transformers_drop(spaces = NULL, indention = NULL, line_breaks = NULL, tokens = NULL) +style_dir(path = ".", ..., style = tidyverse_style, transformers = style(...), filetype = c("R", "Rprofile", "Rmd", "Rmarkdown", "Rnw", "Qmd"), recursive = TRUE, exclude_files = NULL, exclude_dirs = c("packrat", "renv"), include_roxygen_examples = TRUE, base_indention = 0L, dry = "off") +style_file(path, ..., style = tidyverse_style, transformers = style(...), include_roxygen_examples = TRUE, base_indention = 0L, dry = "off") +style_pkg(pkg = ".", ..., style = tidyverse_style, transformers = style(...), filetype = c("R", "Rprofile", "Rmd", "Rmarkdown", "Rnw", "Qmd"), exclude_files = c("R/RcppExports.R", "R/cpp11.R"), exclude_dirs = c("packrat", "renv"), include_roxygen_examples = TRUE, base_indention = 0L, dry = "off") +style_text(text, ..., style = tidyverse_style, transformers = style(...), include_roxygen_examples = TRUE, base_indention = 0L) tidyverse_math_token_spacing() tidyverse_reindention() -tidyverse_style(scope = "tokens", strict = TRUE, indent_by = 2, start_comments_with_one_space = FALSE, reindention = tidyverse_reindention(), math_token_spacing = tidyverse_math_token_spacing()) +tidyverse_style(scope = "tokens", strict = TRUE, indent_by = 2L, start_comments_with_one_space = FALSE, reindention = tidyverse_reindention(), math_token_spacing = tidyverse_math_token_spacing()) + +## Foreign S3 methods + +print.vertical(x, ..., colored = getOption("styler.colored_print.vertical"), style = prettycode::default_style()) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 32975f76c..f9759dc00 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,209 @@ -# Issue Management +# Contributing to {styler} -This project follows the issue management recommendations outlined by [saamwerk](https://github.com/lorenzwalthert/saamwerk/blob/master/issue-management/labelling-strategy.md). -In particular, issues labelled with `Status: Postponed` are closed even if they are not resolved. +## Introduction + +This project follows the contributing recommendations outlined by [saamwerk](https://lorenzwalthert.github.io/saamwerk/). +In particular, issues labelled with `Status: Postponed` are closed even if they +are not resolved. + +## Contributing code + +* Open a PR only when your idea was approved of by a contributor in an issue. +* Follow guidelines in [tidyverse style guide](http://style.tidyverse.org) for + your code contributions. +* Make sure your commit pass the pre-commit hooks in this repo. See the + `{precommit}` [README.md](https://github.com/lorenzwalthert/precommit) + on how to install the pre-commit framework and the R package on your system and + then run `precommit::use_precommit()` to make sure the hooks are activated + in your local styler clone. If you skip a hook, describe why in the PR. + +## How to dive in and understanding the source code + +Read the vignettes. If you are done, come back here. + +```r +devtools::load_all() + +debug(style_text) + +style_text("call(1, 2 + 1)") +``` + +Go broad before you go deep. Before going into the very deep layers of function +calls of `style_text()`, try to understand that `style_text()` consists of a few +function calls only. +Go into each of them and try to understand one layer deep. That is, try to +understand what `make_transformer()` does by reading the names of the functions +that get called, the name of the objects that are created by assigning the output of +these function calls. Before looking into a functions source code, look at the +documentation for that function. All internal important functions +are documented and documentation is available also for unexported objects via +`?` (if you did `devtools::load_all()`). +Then, go into `parse_transform_serialize()`, and so on. + +To understand the most fundamental operation in styler, the manipulation of the +columns related to spacing and line break information, pick a rule from +`R/rules-*.R` (e.g. `R/rules-spacing`), add a break point to a rule, and style a +string where you think this rule will be active. Then, see what happens and how +this rule is applied on each level of nesting. + +## Static code analysis + +There are multiple packages that can be used to analyze a code base: + +* [gitsum](https://github.com/lorenzwalthert/gitsum): Parses and summarises git + repository history. +* [parsesum](https://github.com/lorenzwalthert/parsesum): Analyses source code + through parsing. + +Check out the links above to see how the tools listed could help you +understanding styler. + +## Project setup + +* The package is developed with {devtools} suite, which includes {roxgen2} for + documentation, {testthat} for unit testing, {pkgdown} for HTML documentation. +* Continuous integration uses github-actions. +* A key development principle of styler is to separate infrastructure from + style guide. Hence, whenever possible, transformer functions should be + adapted, instead of changing the infrastructure for a specific style guide. +* {styler} was created in 2017 by Kirill Müller. It was then turned from a + proof-of-concept into a ready-for-production tool as part of GSOC 2017 with + Kirill Müller and Yihui Xie as mentors and Lorenz Walthert as student. + +## File Structure + +The source code is organized as follows: + +| File | Description | +| -------------: |:-----------------------------------------------------------| +| addins.R | ui and helpers for the Addins of styler. | +| communicate.R | function to communicate to the user via the console. | +| compat-dplyr.R | compatibility functions. Since styler does not depend on dplyr, we define the dplyr functions ourself.| +| compat-tidyr.R | compatibility functions. Since styler does not depend on tidy, we define the tidyr functions ourself.| +| expr-is.R | Functions to check whether an expression matches a predicate (e.g. whether it *is* a function call, a curly brace expression etc.). | +| indent.R | Computation of whether indention is needed (`needs_indention()`), if so which indices are indented and how indention is it is triggered. | +| initialize.R | initializer called with the visitor at each nest. | +| nest.R | converting from a text representation into a flat and then into a nested parse table representation. | +| nested-to-tree.R | utilities to create a tree representation from text (after text was converted into a nested parse table). | +| parse.R | parse text into parse table, minor token manipulation, verification of parsed objects. | +| reindent.R | Deals with token-dependent indention and re-indention, opposed to indent.R where all indention is token independent (i.e. a brace just adds one level of indention, whereas in function declaration headers (if mutli-line), indention depends on token position of "function"). | +| relevel.R | Reorganizing the nested parse table, namely relocates expressions on both sides of "%>%" to the same nest. | +| rules-line-break.R, rules-other.R, rules-replacement.R, rules-spacing.R | transformer rules | +| serialize.R | converts flattened parse table into text representation. Complement operation to the functions in nest.R | +| set-assert-args.R | Assertion and setting of arguments. | +| style-guides.R | How to create style guide objects from transformers. | +|styler.R | General package information. | +| testing.R | function used for testing. | +| token-create.R | Utilities for creating tokens, mostly to insert braces around mutli-line if statements. | +| token-define.R | Defines which tokens belong to which group. | +| transform-code.R, transform-files.R | Transformation of code for APIs that manipulate files (e.g. `style_file()`). | +| ui.R | User interaces. Top-level functions for styling. | +| unindent.R | Certain tokens cause unindention, e.g. closing braces. | +| utils.R | low-level general purpose utilities. | +| vertical.R | S3 class for pretty printing of styled code. | +| visit.R | Functions that apply functions to each level of nesting, either inside out or outside in. | +| zzz.R | backport imports. | + +## Obtaining contextual information + +You may have problems understanding some code because documentation is minimal, +some code / functions seem to solve problems you don't understand or handle +cases that seem unreasonable or otherwise incomprehensible. You can resort to +the following strategies: + +* Use full-text search to see where functions are defined or called and how + different parts of {styler} depend on it. +* Use `$ git blame` to see where changes were introduced. Look at the commit + message, check changes that were made to the code in the same commit. If you + are using the GUI of GitHub, you can easily obtain more contextual information + such as the pull request with which a change was introduced. Often, + functionality was introduced with testing. So, you can easily see which new + tests are related to the new functionality. You can remove the changes in the + source code and re-run the tests and see what fails and why. +* Search Issues and Pull Requests on GitHub with the full text search. Make + sure you also search for closed Issues and PRs. + +## High-level conventions + +* The project follows a highly functional approach. This means that + functionality should be capsuled into functions, even if they are only called + once. This makes abstraction from the code easier, reduces the number of lines + for each function declaration considerably, and makes it easier for people not + familiar with the codebase to dive into it. +* All internal functions (except if they are 100% self-explanatory) are to be + documented. +* New functionality (e.g. in terms of styling rules) needs to be unit tested. If + the new functionality changes how code is to be styled, the infrastructure + with `test_collection()` should be used. +* Cases that are not yet formatted correctly can be labelled with a `FIXME`. +* GitHub is the platform where communication about source code happens. We + refrain from adding extensive in-line code comments. One can use `$ git blame` + to track when changes were introduced and find the corresponding pull request + and associated issues to understand the thought process that lead to a change + in the source code. This also implies that issues and / or pull request + contain verbose explanation of problems and solutions provided. + +## Low-level coventions + +This project follows the [tidyverse style guide](http://style.tidyverse.org). +If we refer to specific variables / values etc. in the following sections, you +can use RStudio's full text search to find where +`remove_line_break_before_round_closing_after_curly()` is declared or called. + +### Files + +* File names only contain alphanumeric characters and dashes. +* Files are named according to topics / contexts, not according to functions + that live in these files. + +### Functions + +* Function names should be verbs. No abbreviations should be used, we don't + care if function names are particularly long. For example, there is a + function with the name `remove_line_break_before_round_closing_after_curly()`. +* Only very low-level functions or functions that don't fit in any other file + go to `utils.R`. + +### Control Flow + +* Conditional statements should always evaluate to `TRUE` or `FALSE`, i.e. we + don't encourage `if (length(x))`, but rather `if (length(x) > 0L)`. +* We avoid loops whenever possible and use functions like `purrr::map()` and + friends when possible and prefer them over R base counterparts like + `base::lapply()`. + +### Boolean Values + +Functions that return Boolean values or variables that hold Boolean values are +to be prefixed with `is` or `has`. For example, `is_rmd_file(path)` is a +function that returns `TRUE` if `path` is the path to a `.Rmd` file and `FALSE` +otherwise. + +### Vectors with indices + +Vectors that hold indices are often suffixed with `idx`. For example, `else_idx` +indicates for every row in a parse table whether it contains an `else` token. + +### Closures + +The use of closures is discouraged. We prefer to prefill a template function +with `purrr::partial()`. + +## Testing + +We have a testing framework powered by `test_collection()`. +Essentially, there is an \*-in.R file and a \*-out.R file. The \*-in.R file is the +input that is transformed and - if it matches the *-out.R file, the test will +pass. You can create an \*-in.R file, run `devtools::test(f = "[your file]")` +and an \*-out.R file is generated. If the file matches your expectation, +you can commit it. **Note that files are overwritten and version control should be +used to track failed tests.** +The files are placed in `tests/testthat` under the category they fit. +Please have a look at the documentation for `test_collection()` and see other +unit tests. + +## Feedback + +Please open an issue if something is unclear so that we can improve the +contributing guidelines. diff --git a/DESCRIPTION b/DESCRIPTION index f179bae57..434ff833e 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,67 +1,106 @@ +Type: Package Package: styler Title: Non-Invasive Pretty Printing of R Code -Version: 1.0.0 -Authors@R: c(person("Kirill", "Müller", role = c("aut"), email = "krlmlr+r@mailbox.org"), - person("Lorenz", "Walthert", role = c("cre", "aut"), email = "lorenz.walthert@icloud.com")) -Description: - Pretty-prints R code without changing the user's formatting intent. -Imports: - backports, - cli, - enc, - magrittr, - purrr, - rematch2, - rlang, - rprojroot, - tibble, - withr -Suggests: - data.tree, +Version: 1.10.1 +Authors@R: + c(person(given = "Kirill", + family = "Müller", + role = "aut", + email = "kirill@cynkra.com", + comment = c(ORCID = "0000-0002-1416-3412")), + person(given = "Lorenz", + family = "Walthert", + role = c("cre", "aut"), + email = "lorenz.walthert@icloud.com"), + person(given = "Indrajeet", + family = "Patil", + role = "ctb", + email = "patilindrajeet.science@gmail.com", + comment = c(ORCID = "0000-0003-1995-6531", Twitter = "@patilindrajeets"))) +Description: Pretty-prints R code without changing the user's formatting + intent. +License: MIT + file LICENSE +URL: https://github.com/r-lib/styler, https://styler.r-lib.org +BugReports: https://github.com/r-lib/styler/issues +Depends: + R (>= 3.6.0) +Imports: + cli (>= 3.1.1), + magrittr (>= 2.0.0), + purrr (>= 0.2.3), + R.cache (>= 0.15.0), + rlang (>= 1.0.0), + rprojroot (>= 1.1), + tools, + vctrs (>= 0.4.1), + withr (>= 2.3.0), +Suggests: + data.tree (>= 0.1.6), + digest, dplyr, here, knitr, - mockr, + prettycode, rmarkdown, - rstudioapi, - testthat -License: GPL-3 + roxygen2, + rstudioapi (>= 0.7), + tibble (>= 1.4.2), + testthat (>= 3.0.0) +VignetteBuilder: + knitr Encoding: UTF-8 -LazyData: true -Date: 2017-12-05 -BugReports: https://github.com/r-lib/styler/issues -URL: https://github.com/r-lib/styler, https://r-lib.github.io/styler/ -Roxygen: list(markdown = TRUE, roclets = c("rd", "namespace", "collate", "pkgapi::api_roclet")) -RoxygenNote: 6.0.1 -VignetteBuilder: knitr +Roxygen: list(markdown = TRUE, roclets = c( "rd", "namespace", "collate", + if (rlang::is_installed("pkgapi")) "pkgapi::api_roclet" else { + warning("Please install r-lib/pkgapi to make sure the file API is kept + up to date"); NULL})) +RoxygenNote: 7.2.3 +Config/testthat/edition: 3 +Config/testthat/parallel: true Collate: 'addins.R' 'communicate.R' + 'compat-dplyr.R' 'compat-tidyr.R' - 'dplyr.R' + 'detect-alignment-utils.R' + 'detect-alignment.R' + 'environments.R' 'expr-is.R' 'indent.R' 'initialize.R' + 'io.R' 'nest.R' - 'nested_to_tree.R' + 'nested-to-tree.R' 'parse.R' 'reindent.R' 'token-define.R' 'relevel.R' - 'rules-line_break.R' - 'rules-other.R' - 'rules-replacement.R' - 'rules-spacing.R' + 'roxygen-examples-add-remove.R' + 'roxygen-examples-find.R' + 'roxygen-examples-parse.R' + 'roxygen-examples.R' + 'rules-indention.R' + 'rules-line-breaks.R' + 'rules-spaces.R' + 'rules-tokens.R' 'serialize.R' - 'serialized_tests.R' 'set-assert-args.R' - 'style_guides.R' - 'styler.R' + 'style-guides.R' + 'styler-package.R' + 'stylerignore.R' + 'testing-mocks.R' + 'testing-public-api.R' + 'ui-caching.R' + 'testing.R' 'token-create.R' + 'transform-block.R' 'transform-code.R' 'transform-files.R' - 'ui.R' + 'ui-styling.R' 'unindent.R' + 'utils-cache.R' + 'utils-files.R' + 'utils-navigate-nest.R' + 'utils-strings.R' 'utils.R' 'vertical.R' 'visit.R' diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..31caf274f --- /dev/null +++ b/LICENSE @@ -0,0 +1,2 @@ +YEAR: 2021 +COPYRIGHT HOLDER: styler authors diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 000000000..7c5d716d3 --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,27 @@ +--- +editor_options: + markdown: + wrap: 79 +--- + +# MIT License + +Copyright (c) 2021 styler authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/NAMESPACE b/NAMESPACE index 41b19166a..1c39bc4ca 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,10 +1,29 @@ # Generated by roxygen2: do not edit by hand S3method(print,vertical) +export(cache_activate) +export(cache_clear) +export(cache_deactivate) +export(cache_info) +export(compute_parse_data_nested) export(create_style_guide) export(default_style_guide_attributes) +export(is_asymmetric_tilde_expr) +export(is_comment) +export(is_conditional_expr) +export(is_curly_expr) +export(is_for_expr) +export(is_function_call) +export(is_function_declaration) +export(is_symmetric_tilde_expr) +export(is_tilde_expr) +export(is_while_expr) +export(next_non_comment) +export(previous_non_comment) +export(scope_normalize) export(specify_math_token_spacing) export(specify_reindention) +export(specify_transformers_drop) export(style_dir) export(style_file) export(style_pkg) @@ -12,21 +31,28 @@ export(style_text) export(tidyverse_math_token_spacing) export(tidyverse_reindention) export(tidyverse_style) -import(tibble) importFrom(magrittr,"%>%") +importFrom(purrr,compact) importFrom(purrr,flatten) importFrom(purrr,flatten_chr) importFrom(purrr,flatten_int) importFrom(purrr,map) importFrom(purrr,map2) +importFrom(purrr,map2_chr) +importFrom(purrr,map_at) importFrom(purrr,map_chr) -importFrom(purrr,map_dfr) +importFrom(purrr,map_int) importFrom(purrr,map_lgl) importFrom(purrr,partial) importFrom(purrr,pmap) importFrom(purrr,pwalk) -importFrom(purrr,reduce) -importFrom(purrr,when) +importFrom(rlang,"%||%") +importFrom(rlang,abort) +importFrom(rlang,check_installed) +importFrom(rlang,is_installed) importFrom(rlang,seq2) -importFrom(utils,tail) -importFrom(utils,write.table) +importFrom(rlang,set_names) +importFrom(rlang,warn) +importFrom(vctrs,vec_rbind) +importFrom(vctrs,vec_slice) +importFrom(vctrs,vec_split) diff --git a/NEWS.md b/NEWS.md index 2d151660d..6dc69a941 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,71 +1,1119 @@ -## styler 1.0.0 (2017-12-05) +# styler 1.10.1 -Initial release. +This release was requested by CRAN due to accidentally populating a user cache while building vignettes for R >= 4.3.0. -### stylers -These are functions used to style code. They style a directory, a whole package, -a file or a string. -``` -style_dir(path = ".", - ..., style = tidyverse_style, transformers = style(...), - filetype = "R", recursive = TRUE, exclude_files = NULL -) +* Code quality improvements (#1122). +* Bump JamesIves/github-pages-deploy-action from 4.4.1 to 4.4.2 (#1123). + +Thanks to everyone who contributed to this release: [@olivroy](https://github.com/olivroy) and [@krlmlr](https://github.com/krlmlr). + +# styler 1.10.0 + +This release contains speed-ups between 20% and 40% depending on your use case +thanks to replacing {base} functionality with {vctrs} (#1114). With the speed +boost introduced in version 1.8.0 in Oct. 2022, {styler} is now up to 2x as fast +as before release 1.8.0. + +This release was created upon a request by the CRAN team to actively manage not +just cached files but also the potentially empty cache directories they live in +(#1118). Here are the changes in detail: + + +- Require at least R 3.6 (#1101). +- Prefer {vctrs} functions over slower {base} equivalents (#1114). +- Replace deprecated use of `rlang::with_handlers()` (#1103). +- Remove tail recursion in favor of `repeat` (#1113). +- split `test-public_api.R` for better sharding (#1109). +- 0-pad filenames for sharding (#1110) +- add missing {testthat} snapshots (#1115). +- Bump {touchstone} config (#1104, #1107). +- Bump `actions/checkout` to version 3 in GitHub Actions (#1098). + +Thanks for everyone contributing to this release: + +[@IndrajeetPatil](https://github.com/IndrajeetPatil), [@krlmlr](https://github.com/krlmlr), [@kyleam](https://github.com/kyleam), [@MichaelChirico](https://github.com/MichaelChirico), [@mvanaman](https://github.com/mvanaman), [@olivroy](https://github.com/olivroy), and [@vvarik](https://github.com/vvarik). + +# styler 1.9.1 + +**Bug fixes** + +- Fix interaction between cache and stylerignore that could produce invalid code (#1072). +- Don't remove line break around `{{` and comments that can yield invalid code (#1070). +- Styling empty roxygen code examples don't cause errors anymore (#1096). +- Double indent is also kept if there is only one argument (#1094). +- Improved blank lines handling for roxygen examples (#1085). +- style roxygen examples even if nothing comes after (#1089). + +**Other** + +- Document `"qmd"` as a valid `filetype` (#1091). + +Thanks for everyone who contributed to this release: + +[@dpprdan](https://github.com/dpprdan), [@flying-sheep](https://github.com/flying-sheep), [@giocomai](https://github.com/giocomai) and [@MichaelChirico](https://github.com/MichaelChirico). + + +# styler 1.9.0 -style_pkg(pkg = ".", - ..., style = tidyverse_style, transformers = style(...), filetype = "R", - exclude_files = "R/RcppExports.R" -) +**Features** +- The tidyverse recently introduced double-indention for function declarations + that don't fit on one line. It indents two levels, i.e. 4 spaces if you `indent_by` two spaces. -style_file(path, - ..., style = tidyverse_style, transformers = style(...) -) +```r +# old style: remains compliant and won't be re-styled +my_fun <- function(long_argument = 2, + indent_up_to_first = "x") { + # ... +} -style_text(text, ..., style = tidyverse_style, transformers = style(...)) +# new style: now also compliant and won't be re-styled +my_fun <- function( + long_argument = 2, + indent_double = "x") { + # ... +} ``` -### style guides +You can also use the R package [{codegrip}](https://github.com/lionel-/codegrip) +to toggle between the two modes (#1083). + +**Bug fixes** + +- Previously styled code that is now stylerignored should always be formatted + correctly. It boils down to the requirement that stylerignore sequences must + always be in the same block (#1082). +- styling around `{{` and comments now yields parsable output (#1088). +- trailing blank lines in roxygen code examples are removed (#1085). +- roxygen code examples that don't have any code following after them are now + also styled (#1067). + +**Other user-facing changes** + +- Less noisy communication if R option `styler.cache_root` is not set (#1063). + +**Infrastructure** + +- use {lintr} config (#1057, #1059) and pre-commit hook (#1064). +- use new {pkgdown} hook, check for parsable yaml and mixed line ending (#1080, + #1081). +- update GitHub Actions workflow versions one time (#1073) and add dependabot + for the future (#1974). +- bdr test for additional examples (#1068). +- check for link rot regularly (#1077, #1086). + +**Internals** + +- replace retired `purrr::when()` with `if` statements (#1066). +- more integer literals (#1054). +- Consistently use `@examplesIf` for conditionally running examples (#1071). +- document imports in a single file (#1060). +- format YAML files (#1061). + +A big shout out to anyone who contributed to this release: + +[@balthasars](https://github.com/balthasars), +[@hadley](https://github.com/hadley), +[@IndrajeetPatil](https://github.com/IndrajeetPatil), +[@juliangrimm225](https://github.com/) and +[@krlmlr](https://github.com/krlmlr). + +# styler 1.8.1 + +**Features** + +- Expose internals used with other style guides (\@Robinlovelace + + collaborators, #1043, #1052). + + +**Other** + +- Bump minimal version requirement on {withr} as `...` in + `withr::local_options()` was introduced only in v.2.3.0 (#1051). +- Rename internal function `set_linebreak_after_ggplot2_plus()` to + `set_line_break_after_ggplot2_plus()` for consistency (\@Polkas, #1049). +- Reformat contributing guidelines (#1047). +- Improve YAML formatting for pkgdown (#1042). +- Simplify caching internal's conditionals with `rlang::%||%` (#1041). +- Only run {pkgapi} if available (#1039). +- Typos (\@MichaelChirico, #1038) + +This release was requested by CRAN to resolve an R CMD Check note (#1044). +A big hand to everyone who made this release possible: + +[@DaveJarvis](https://github.com/DaveJarvis), +[@IndrajeetPatil](https://github.com/IndrajeetPatil), +[@lorenzwalthert](https://github.com/lorenzwalthert), +[@MichaelChirico](https://github.com/MichaelChirico), +[@Polkas](https://github.com/Polkas), and +[@Robinlovelace](https://github.com/Robinlovelace). + + +# styler 1.8.0 + +{styler} 1.8.0 comes with a host of new features, around 40% speed improvement, +bug fixes and the removal of 8 recursive dependencies. We also welcome +\@IndrajeetPatil as a new contributor to {styler}, who has contributed +significantly to this and and previous releases. + +**Features** + +- `style_dir()` and `style_pkg()` now default to styling all supported file + formats (`.R`, `.Rmd`, `.Rmarkdown`, `.Rnw`, and `.qmd`) in the (package) + directory (#965, #931, #1033). +- `style_pkg()` now excludes the auto-generated `R/cpp11.R` file (#977). +- minimum needed R version is now bumped to `3.5` (#986). + +- alignment is now detected for function declaration in a similar way as for + function calls (#968). +- new R option `styler.ignore_alignment` controls if alignment should be + detected (and preserved) or not (#932). + + +**Bug Fixes** + +- alignment is detected in `tibble::tribble()` (and similar) calls with more + than 3 columns when left aligned (#945). + +- fix alignment detection for one column, mixed named/unnamed (#1035). + +- if there are only empty lines in a code chunk, they are all removed (#936). + +- apply rules for [ to [[ and its closing counterpair (#1030) + +- there is now at most one line break after `{` and before `#` (#952, #1022). + +- line breaks may be added to function calls to ensure indention symmetry for + round braces (#975). + +- the cache is also invalidated on changing the stylerignore markers (#932). + +- `{` is not put on a new line after `=` and in `function() {` for some edge +cases (#939). + +- `while ({})` statements are now handled the same way as function statements + with regards to breaking lines (#967). + +- parsing of {roxygen2} example comments now also works for edge cases when + there is no literal code immediately following after the end of the example + section (#940). + +- files with no tokens in it are now transformed into zero-byte files (#962). + +**Documentation** + +- old (and outdated) vignettes have been removed (#955). To access them, do + `git checkout v1.0.0`. +- minor improvements to the documentation (#958). +- turned off `styler.colored_print.vertical` in vignettes so ANSI output of + {prettycode} not messing with {pkgdown} (#956, #957). + + +**Performance and code quality improvements** + +- use integer literals and avoid coercions where needed (#994). +- don't preserve names for `unlist()` (#998). +- remove unused variables (#999). +- get rid of lints with performance implications (#1000). +- use more efficient match() alternative (#1001). +- don't use `nrow` arg in `new_tibble()` calls (#1003). +- performance improvements with `if()` + `else()` instead of `ifelse()` (#1006). +- replace tibbles with data frames to improve performance (#1007). +- simplify `styler_df()` signature (#1009). +- minor cleanup (#1016). +- non-exported and unused functions `odd()` and `even()` were removed + (#989). +- all (R)md files in this project's source code are now formatted with default + pandoc markdown formatter. This conversion is required when using the visual + mode in RStudio (#941). +- improved code quality by fixing {lintr} warnings (#960, #1028). + + +**Dependency related changes** + +In total, 8 recursive dependencies are removed: {ellipsis}, {pillar}, +{rematch2}, {tibble}, {utf8}, {fansi}, {lifecycle}, {pkgconfig}. + +- don't import entire tibble package (#1007). +- drop {rematch2} dependency (#1011). + + +**Infrastructure** + +- upgrade testing infra to testthat 3e (#949). +- run tests in parallel (#978). +- run some tests sequentially (#1031) +- better stack tracing for profiling (#979, #980). +- add flags to skip code coverage for zzz.R (#1005). +- error now on R CMD note (#987). +- test on latest Ubuntu instead of Ubuntu 18.04 (#982). +- use latest GitHub Actions for R (#1034). +- update {pkgdown} action to always build, but only deploy on default branch + (#946). +- remove pre-commit push hook for news entry (#1023). + +A big hand to everyone who made this release possible: + +[@behrman](https://github.com/behrman), +[@EngineerDanny](https://github.com/EngineerDanny), [@gavinsimpson](https://github.com/gavinsimpson), [@IndrajeetPatil](https://github.com/IndrajeetPatil), [@jabenninghoff](https://github.com/jabenninghoff), +[@krlmlr](https://github.com/krlmlr), +[@lorenzwalthert](https://github.com/lorenzwalthert), [@MichaelChirico](https://github.com/MichaelChirico), [@moodymudskipper](https://github.com/moodymudskipper), [@RaymondBalise](https://github.com/RaymondBalise), [@Robinlovelace](https://github.com/Robinlovelace), [@sebffischer](https://github.com/sebffischer), +[@sgorm123](https://github.com/sgorm123), [@stefanoborini](https://github.com/stefanoborini), and [@wdkrnls](https://github.com/wdkrnls). + +# styler 1.7.0 + +- if `else` follows directly after `if`, line breaks are removed (#935). + +**API changes** + +- new R option `styler.cache_root` (defaulting to `"styler"`) that determines + the sub-directory under the {R.cache} cache directory that {styler} uses. + Non- default caches won't be cleaned up by {styler}. We suggest + `"styler-perm"` (also used by {precommit}). + +- stylerignore markers are now interpreted as regular expressions instead of + comments that must match exactly. This allows to specify multiple markers + in one regular expression for `styler.ignore_start` and + `styler.ignore_stop`, e.g. to use markers for lintr and styler on the same + line, you can use + `options(styler.ignore_start = "nolint start|styler: off"`: + + ``` r + # nolint start, styler: off + 1 +1 + # nolint end + # styler: on + ``` + + As a consequence of this approach, the defaults for `styler.ignore_start` + and `styler.ignore_stop` omit the `#` (#849). + +**Features** + +- {styler} can be ran via GitHub Actions using + `usethis::use_github_action("style")` (#914). +- added guarantee that styled code is parsable (#892). +- Developers can now create style guides with indention characters other than + spaces (#916). + +**Documentation** + +- Add vignette on distributing style guide (#846, #861). +- Fix argument name `filetype` in Example for `style_dir()` (#855). + +**Bug fixes** + +- Piped function without brackets `substitute(x %>% y)` don't get `()` added + anymore for one level deep (not more yet, see #889), as this can change + outcome of the code (#876). +- `.onLoad()` method no longer broken with {cli} \>= 3.1 (#893). +- Function calls containing `+` should no longer give any error on styling + when there are comments and line breaks under certain circumstances (#905). +- rules that add tokens don't break stylerignore sequences anymore (#891). +- Alignment detection respects stylerignore (#850). +- Unaligned expressions with quoted key (e.g. `c("x" = 2)`) are now correctly + detected (#881). +- `~` causes now indention, like `+`, `-`, `|` etc. (#902). +- `Warning: Unknown or uninitialised column:` was fixed (#885). +- function calls with unequal number of token on different lines are no + longer deemed aligned if there are arbitrary spaces around the tokens on + the lines with most tokens (#902). +- if a line starts with `EQ_SUB` (`=`), the corresponding key is moved to + that line too (#923). +- ensure a trailing blank line also if the input is cached (#867). +- Preserve trailing blank line in roxygen examples to simplify concatenation + of examples (#880). +- `indenty_by` is now also respected when curly braces are added to an if + statement by {styler} (#915). +- An error is now thrown on styling if input unicode characters can't be + correctly parsed for Windows and R \< 4.2 (#883). +- styling of text does not error anymore when the R option `OutDec` is set to + a non-default value (#912). + +**Infrastructure** + +- Remove dependency on {xfun} (#866). +- Remove {glue} dependency that was only used by {touchstone} script and is + declared as a dependency already in the respective action (#910). +- Bump minimal R requirement to 3.4 in line with the + [tidyverse](https://www.tidyverse.org/blog/2019/04/r-version-support/), + which allowed to remove the dependency at {backports} and some exception + handling. +- rename default branch to main (#859). +- the built package size has been reduced by \~50% by listing `*-in_tree` + files in `.Rbuildignore` (#879). +- Enable pre-commit.ci (#843). +- use pre-commit via GitHub Actions (#872). +- terminate running jobs on new push to save resources (#888). +- Use the {touchstone} GitHub Action instead of the literal script (#889). +- upgrade R CMD check Github Actions to use `v2`. +- {styler} test are relaxed to not assume a specific error message on the + wrong usage of `_` (#929). + +Thanks to all contributors that made this release possible: + +[\@bersbersbers](https://github.com/bersbersbers), +[\@daniel-wrench](https://github.com/daniel-wrench), +[\@dbykova](https://github.com/dbykova), +[\@EngrStudent](https://github.com/EngrStudent), +[\@hadley](https://github.com/hadley), +[\@IndrajeetPatil](https://github.com/IndrajeetPatil), +[\@jam1015](https://github.com/jam1015), +[\@jooyoungseo](https://github.com/jooyoungseo), +[\@kalaschnik](https://github.com/kalaschnik), +[\@kaytif](https://github.com/kaytif), [\@kpagacz](https://github.com/kpagacz), +[\@krlmlr](https://github.com/krlmlr), [\@lionel-](https://github.com/lionel-), +[\@lorenzwalthert](https://github.com/lorenzwalthert), +[\@maelle](https://github.com/maelle), +[\@MichaelChirico](https://github.com/MichaelChirico), +[\@mine-cetinkaya-rundel](https://github.com/mine-cetinkaya-rundel), +[\@neuwirthe](https://github.com/neuwirthe), +[\@Polkas](https://github.com/Polkas), [\@pwang2](https://github.com/pwang2), +[\@sebffischer](https://github.com/sebffischer), +[\@ShixiangWang](https://github.com/ShixiangWang), +[\@ssh352](https://github.com/ssh352), and +[\@xjtusjtu](https://github.com/xjtusjtu). + +# styler 1.6.2 + +- clean up cache files older than one week (#842). + +# styler 1.6.1 + +- Files with `.Rmarkdown` extension are now recognized as an R markdown files + in `style_file()` and friends (#824). + +- Don't break line before comments in pipes (#822). + +- Ordinary comments (starting with `#`) that break a roxygen code example + block (starting with `#'`) are now recognized and preserved (#830). + +- `@examplesIf` conditions longer than one line after styling throw an error + for compatibility with {roxygen2} (#833). + +- R Markdown chunk headers are no longer required to be parsable R code + (#832). + +- Break the line between `%>%` and `{` inside and outside function calls + (#825). + +- Add language server to third-party integration vignette (#835). + +- improved test setup with fixtures and similar (#798). + +We'd like to thank all people who helped making this release possible: + +[\@bersbersbers](https://github.com/bersbersbers), +[\@eutwt](https://github.com/eutwt), +[\@IndrajeetPatil](https://github.com/IndrajeetPatil), +[\@j-mammen](https://github.com/j-mammen), +[\@jennybc](https://github.com/jennybc), +[\@JohannesNE](https://github.com/JohannesNE), +[\@jonkeane](https://github.com/jonkeane), +[\@lorenzwalthert](https://github.com/lorenzwalthert), and +[\@MichaelChirico](https://github.com/MichaelChirico). + +# styler 1.5.1 + +## Alignment detection + +- Code with left alignment after `=` in function calls is now recognized as + aligned and won't be reformatted (#774, #777). + + # already detected previously + call( + x = 12345, + y2 = 17 + ) + + # newly detected + call( + x = 12345, + y2 = 17 + ) + +- Similarly, left aligned after comma is now detected (#785, #786). + + # previously detected + call( + x = 12345, "It's old", + y2 = 17, "before" + ) + + tribble( + ~x, ~y, + "another", 1:3, + "b", 1211234 + ) + + # newly detected + call( + x = 2, p = "another", + y = "hhjkjkbew", x = 3 + ) + + + tribble( + ~x, ~y, + "another", 1:3, + "b", 1211234 + ) + + Also see `vignette("detect-alignment")`. + +## Other new features + +- The base R pipe as introduced in R 4.1.0 is now styled the same way the + magrittr pipe is (#803). +- code chunks with explicit `tidy = FALSE` in an Rmd or Rnw code header are + not styled anymore. This can be handy when the code can't be parsed, e.g. + within a learnr tutorial (#790). +- `#>` is recognized as an output marker and no space is added after `#` + (#771). + +## Minor changes and fixes + +- No curly braces are added to else statements if they are within a pipe, as + this can change evaluation logic of code involving the magrittr dot in rare + cases (#816). +- Line breaks between `}` and `else` are removed (#793). +- In function calls, code after `= #\n` is indented correctly (#814). +- Multi-expressions containing multiple assignments no longer remove line + breaks if they are not causing blank lines (#809). +- `exclude_dirs` in `style_pkg()` is now properly respected if it is a + sub-directory of a directory that is scheduled for styling (e.g. + `test/testthat/some/dir`) (#811). +- The user is not prompted anymore to confirm the creation of a permanent + cache as R.cache \>= 0.15.0 uses a standard location in line with CRAN + policies (#819). +- R code chunks in nested non-R chunks in R markdown don't yield an error + anymore when document is styled, chunks are still not styled (#788, #794). +- `cache_activate()` and `cache_deactivate()` now respect the R option + `styler.quiet` (#797). +- `multi_line` attribute in parse table is now integer, not boolean (#782). +- The style guide used in Addin is verified when set via R option (#789). +- Improve pkgdown author URLs (#775). +- Upgrade touchstone infra (#799, #805). +- Don't test on R 3.3 anymore as tidyverse [supports only four previous + releases](https://www.tidyverse.org/blog/2019/04/r-version-support/) + (#804). +- Update Github Actions workflow (#810). + +We'd like to thank everyone who has furthered the development of the latest +release of styler through their contributions in issues and pull requests: + +[\@ardydavari](https://github.com/ardydavari), +[\@gadenbuie](https://github.com/gadenbuie), +[\@IndrajeetPatil](https://github.com/IndrajeetPatil), \@jasonhan-vassar, +[\@laresbernardo](https://github.com/laresbernardo), +[\@lorenzwalthert](https://github.com/lorenzwalthert), +[\@MichaelChirico](https://github.com/MichaelChirico), +[\@Moohan](https://github.com/Moohan), +[\@njtierney](https://github.com/njtierney), +[\@pat-s](https://github.com/pat-s), +[\@psychelzh](https://github.com/psychelzh), +[\@pvalders](https://github.com/pvalders), +[\@RoyalTS](https://github.com/RoyalTS), and +[\@russHyde](https://github.com/russHyde). + +# styler 1.4.1 + +- fix interaction between cache and `base_indention`. This also fixes the + Addin for styling a selection with base indention repeatedly (#764). +- add more examples to `styler_*` helpfiles (#762). +- hexadecimal integers now preserve the trailing `L` when styled (#761). +- add a pre-push hook to make sure news bullets are added to each PR (#765). + +Thanks to everyone who contributed to this release: + +[\@krlmlr](https://github.com/krlmlr), +[\@lorenzwalthert](https://github.com/lorenzwalthert), and +[\@renkun-ken](https://github.com/renkun-ken). + +# styler 1.4.0 + +## API Changes + +**new** + +- `style_file()` and friends gain argument `dry` to control if changes should + be applied to files or not (#634). + +- `style_file()` and friends gain argument `base_indention` (defaulting to 0) + to control by how much the output code is indented (#649, #692). The Addin + for styling a selection picks that up, e.g. you can style a function body + and indention is preserved (#725). + +- added an option for disabling all communication when using the package + (`styler.quiet`) (#640). + +- `scope` in `tidyverse_style()` can now be specified with higher granularity + through `I()`, e.g. `I(c('spaces', 'tokens'))` allows us to style spaces + and tokens without styling line breaks and indention. Previously, only a + string was allowed and all less invasive scopes were included, e.g. if you + wanted to style tokens, you had to always also style spaces, indention, + line breaks as well (#705, #707). + +- added an option (`styler.test_dir_writeable`) that changes test behavior to + not directly modify test files in the current directory (#548). + +- New argument `transformers_drop` in `create_style_guide()` to be populated + with new helper function `specify_transformers_drop()` for specifying + conditions under which transformers are not going to be used and can + therefore be omitted without effecting the result of styling (#711). + +**deprecated** + +- The environment variable `save_after_styling` is deprecated in favor of the + R option `styler.save_after_styling` to control if a file is saved after + styling with the RStudio Addin. Note than in RStudio \>= 1.3.0, you can + auto-save edits in general (Code -\> Saving -\> Auto-Save), e.g. on idle + editor or focus loss, so this feature becomes less relevant (#631, #726). + +## Major changes + +- styler is now distributed under the MIT license (#751). + +- Documentation overhaul: New README, new "Get started" pkgdown page, new + vignettes on `strict = FALSE`, `Adoption` renamed to + `Third-party integrations` (#741), adding search to pkgdown (#623), group + functions in pkgdown reference page (#625), minor other doc improvements + (#643, #618, #614, #677, #651, #667, #672, #687, #752, #754). + +- `@exampleIsf` roxygen tag for conditional examples is now supported (#743). + +- blank lines in function calls and headers are now removed, for the former + only when there are no comments before or after the blank line (#629, #630, + #635, #723). + +- speed improvements: 15% faster on new code, 70% on repeated styling of + compliant code (The latter is not so relevant because it was almost + instantaneous already). Most relevant contributions were #679, #691, #681, + #711, #739. + +- `#<<` is now recognized as the xaringan marker and no space is added + after`#` (#700). + +## Minor changes and fixes + +- `style_dir()` and `style_pkg()` now apply directory exclusion recursively + with `exclude_dirs` (#676). + +- `switch()` now has line breaks after every argument to match the tidyverse + style guide (#722, #727). + +- unary `+` before a function call does not give an error anymore, as before + version 1.3.0 (#697). + +- certain combinations of `stylerignore` markers and cached expressions now + don't give an error anymore (#738). + +- cache is now correctly invalidated when style guide arguments change + (#647). + +- empty lines are now removed between pipes and assignments (#645, #710). + +- multiple `@examples` roxygen tags in a code block of `#'` are no longer + squashed (#748). + +- roxygen code examples starting on the same line as the `@examples` tag are + no longer moved to the next line (#748). + +- always strip trailing spaces and make cache insensitive to it (#626). + +- `style_text()` can now style all input that `is.character()`, not just if + it inherits from classes `character`, `utf8` or `vertical` (#693). + +- logical operators within square braces are now moved from the start of a + line to the end of the previous line (#709). + +- spaces are now removed before `[` and `[[` (#713). + +- The internal `create_tree()` only used in testing of styler now works when + the cache is activated (#688). + +- simplification of internals (#692). + +## Infrastructure changes + +- switched from travis and AppVeyor to GitHub Actions (#653, #660). + +- Added basic continuous benchmarking with + [lorenzwalthert/touchstone](https://github.com/lorenzwalthert/touchstone) + (#674, #684, #698). + +- include `test-*` files in styling pre-commit hook (#724). + +Thanks to all the people who made this release possible: + +[\@assignUser](https://github.com/assignUser), +[\@ColmanHumphrey](https://github.com/ColmanHumphrey), +[\@davidchall](https://github.com/davidchall), +[\@espinielli](https://github.com/espinielli), +[\@giko45](https://github.com/giko45), [\@hadley](https://github.com/hadley), +[\@IndrajeetPatil](https://github.com/IndrajeetPatil), +[\@intiben](https://github.com/intiben), +[\@jamespeapen](https://github.com/jamespeapen), +[\@jthomasmock](https://github.com/jthomasmock), +[\@Kalaschnik](https://github.com/Kalaschnik), +[\@kevinushey](https://github.com/kevinushey), +[\@krlmlr](https://github.com/krlmlr), +[\@lcolladotor](https://github.com/lcolladotor), +[\@MichaelChirico](https://github.com/MichaelChirico), +[\@michaelquinn32](https://github.com/michaelquinn32), +[\@mine-cetinkaya-rundel](https://github.com/mine-cetinkaya-rundel), +[\@pat-s](https://github.com/pat-s), +[\@PMassicotte](https://github.com/PMassicotte), +[\@QuLogic](https://github.com/QuLogic), +[\@renkun-ken](https://github.com/renkun-ken), +[\@RichardJActon](https://github.com/RichardJActon), +[\@seed-of-apricot](https://github.com/seed-of-apricot), +[\@select-id-from-users](https://github.com/select-id-from-users), +[\@SimonDedman](https://github.com/SimonDedman), +[\@stefanoborini](https://github.com/stefanoborini), +[\@swsoyee](https://github.com/swsoyee), and +[\@Winterstorm-j](https://github.com/Winterstorm-j). + +# styler 1.3.2 + +Release upon request by the CRAN team. + +## Minor changes and fixes + +- Add search and reference sections to pkgdown webpage (#623, #625). +- various fixes to handle special cases for caching and stylerignore and + their interaction (#611, #610, #609, #607, #602, #600). +- also test on macOS (#604). +- skip timing tests on CRAN as requested by CRAN team because they did not + pass on all machines (#603). + +# styler 1.3.1 + +Emergency release. In case multiple expressions are on one line and only some +of them are cached, styler can remove code. To reach this state, some of the +expressions must have been styled previously alone and the cache must be +active. Example: + + library(styler) + cache_activate() + #> Using cache 1.3.0 at ~/.Rcache/styler/1.3.0. + style_text("1") + #> 1 + style_text("1 # comment") + #> # comment + +This is obviously detrimental. We have added additional tests and fixed the +problem (#593, #595), but we want repeat the warning from `?style_file` that +all style APIs apart from `style_text()` overwrite code and that styler can +only check the AST remains valid with `scope < "tokens"`. So use this if you +are conservative. Or deactivate the cache with `deactivate_cache()` until it +has fully matured. + +We thank the people who have contributed to this release: + +[\@ellessenne](https://github.com/ellessenne) and +[\@renkun-ken](https://github.com/renkun-ken). + +# styler 1.3.0 + +## Breaking changes + +- `style_pkg()` and `style_dir()` gain a new argument `exclude_dirs` to + exclude directories from styling, by default `renv` and `packrat`. Note + that the defaults won't change the behavior of `style_pkg()` because it + does anyways does not style these directories and they were set for + consistency. + +- `style_file()` and friends now strip `./` in file paths returned invisibly, + i.e. `./script.R` becomes `script.R` (#568). + +## New features + +- ignore certain lines using `# styler: off` and `#styler: on` or custom + markers, see `?stylerignore` (#560). + +- styler caches results of styling, so applying styler to code it has styled + before will be instantaneous. This brings large speed boosts in many + situations, e.g. when `style_pkg()` is run but only a few files have + changed since the last styling or when using the [styler pre-commit + hook](https://github.com/lorenzwalthert/precommit). Because styler caches + by expression, you will also get speed boosts in large files with many + expressions when you only change a few of them. See `?caching` for details + (#538, #578). + +- `create_style_guide()` gains two arguments `style_guide_name` and + `style_guide_version` that are carried as meta data, in particular to + version third-party style guides and ensure the proper functioning of + caching. This change is completely invisible to users who don't create and + distribute their own style guide like `tidyverse_style()` (#572). + +## Minor changes and fixes + +- lines are now broken after `+` in `ggplot2` calls for `strict = TRUE` + (#569). + +- function documentation now contains many more line breaks due to roxygen2 + update to version 7.0.1 (#566). + +- spaces next to the braces in subsetting expressions `[` and `[[` are now + removed (#580). + +- Adapt to changes in the R parser to make styler pass R CMD check again. + (#583). + +Thanks to all contributors involved, in particular +[\@colearendt](https://github.com/colearendt), +[\@davidski](https://github.com/davidski), +[\@IndrajeetPatil](https://github.com/IndrajeetPatil), +[\@pat-s](https://github.com/pat-s), and +[\@programming-wizard](https://github.com). + +# styler 1.2.0 + +## Breaking changes + +- `style_file()` now correctly styles multiple files from different + directories. We no longer display the file name of the styled file, but the + absolute path. This is also reflected in the invisible return value of the + function (#522). + +- `style_file()` and friends do not write content back to a file when styling + does not cause any changes in the file. This means the modification date of + styled files is only changed when the content is changed (#532). + +## New features + +- Aligned function calls are detected and remain unchanged if they match the + styler [definition for aligned function + calls](https://styler.r-lib.org/articles/detect-alignment.html) (#537). + +- curly-curly (`{{`) syntactic sugar introduced with rlang 0.4.0 is now + explicitly handled, where previously it was just treated as two consecutive + curly braces (#528). + +- `style_pkg()`, `style_dir()` and the Addins can now style `.Rprofile`, and + hidden files are now also styled (#530). + +## Minor improvements and fixes + +- Roxygen code examples: leverage `roxygen2` for correct escaping of + expressions that contain `\`, in particular in `dontrun{}` and friends, + allow quoted braces that are not matched (#729). + +- Brace expressions in function calls are formatted in a less compact way to + improve readability. Typical use case: `tryCatch()` (#543). + +- Arguments in function declarations in a context which is indented multiple + times should now be correct. This typically affects `R6::R6Class()` (#546). + +- Escape characters in roxygen code examples are now correctly escaped + (#512). + +- Special characters such as `\n` in strings are now preserved in text and + not turned into literal values like a line break (#554). + +- Style selection Addin now preserves line break when the last line selected + is an entire line (#520). + +- Style file Addin can now properly handle cancelling (#511). + +- The body of a multi-line function declaration is now indented correctly for + `strict = FALSE` and also wrapped in curly braces for `strict = TRUE` + (#536). + +- Advice for contributors in `CONTRIBUTING.md` was updated (#508). + +## Adaption + +- styler is now available through the pre-commit hook `style-files` in + . + +Thanks to all contributors involved, in particular + +[\@Banana1530](https://github.com/Banana1530), +[\@batpigandme](https://github.com/batpigandme), +[\@cpsievert](https://github.com/cpsievert), +[\@ellessenne](https://github.com/ellessenne), +[\@Emiller88](https://github.com/Emiller88), +[\@hadley](https://github.com/hadley), +[\@IndrajeetPatil](https://github.com/IndrajeetPatil), +[\@krlmlr](https://github.com/krlmlr), +[\@lorenzwalthert](https://github.com/lorenzwalthert), +[\@lwjohnst86](https://github.com/lwjohnst86), +[\@michaelquinn32](https://github.com/michaelquinn32), +[\@mine-cetinkaya-rundel](https://github.com/mine-cetinkaya-rundel), +[\@Moohan](https://github.com/Moohan), [\@nxskok](https://github.com/nxskok), +[\@oliverbeagley](https://github.com/oliverbeagley), +[\@pat-s](https://github.com/pat-s), \@reddy-ia, and +[\@russHyde](https://github.com/russHyde) + +# styler 1.1.1 + +This is primarily a maintenance release upon the request of the CRAN team +(#490). + +## Major changes + +- Users can now control style configurations for styler Addins (#463, #500), + using the `Set style` Addin. See `?styler::styler_addins` for details. + +- `return()` is now always put in braces and put on a new line when used in a + conditional statement (#492). + +- `%>%` almost always causes a line break now for `strict = TRUE` (#503). + +## Minor changes + +- `style_pkg()` now also styles the "demo" directory by default (#453). + +- multi-line strings are now styled more consistently (#459). + +- indention in roxygen code example styling (#455) and EOF spacing (#469) was + fixed. + +- indention for for loop edge case (#457) and comments in pipe chain (#482) + were fixed. + +- line-break styling around comma is improved (#479). + +- bug that can cause an error when the variable `text` in any name space + before styler on the search path was defined and did not have length 1 is + fixed (#484). + +- slightly confusing warning about empty strings caused with roxygen code + examples and Rmd was removed. + +- right apostrophe to let package pass R CMD Check in strict Latin-1 locale + was removed (#490, reason for release). + +## Adaption of styler + +Since it's never been mentioned in the release notes, we also mention here +where else you can use styler functionality: + +- `usethis::use_tidy_style()` styles your project according to the tidyverse + style guide. + +- `reprex::reprex(style = TRUE)` to prettify reprex code before printing. To + permanently use `style = TRUE` without specifying it every time, you can + add the following line to your `.Rprofile` (via + `usethis::edit_r_profile()`): `options(reprex.styler = TRUE)`. + +- you can pretty-print your R code in RMarkdown reports without having styler + modifying the source. This feature is implemented as a code chunk option in + knitr. use `tidy = "styler"` in the header of a code chunks (e.g. + ```` ```{r name-of-the-chunk, tidy = "styler"} ````), or + `knitr::opts_chunk$set(tidy = "styler")` at the top of your RMarkdown + script. + +- pretty-printing of [drake](https://github.com/ropensci/drake) workflow data + frames with `drake::drake_plan_source()`. + +- Adding styler as a fixer to the [ale + Plug-in](https://github.com/dense-analysis/ale/pull/2401) for VIM. + +Thanks to all contributors involved, in particular +[\@ArthurPERE](https://github.com/ArthurPERE), +[\@hadley](https://github.com/hadley), [\@igordot](https://github.com/igordot), +[\@IndrajeetPatil](https://github.com/IndrajeetPatil), +[\@jackwasey](https://github.com/jackwasey), +[\@jcrodriguez1989](https://github.com/jcrodriguez1989), +[\@jennybc](https://github.com/jennybc), +[\@jonmcalder](https://github.com/jonmcalder), +[\@katrinleinweber](https://github.com/katrinleinweber), +[\@krlmlr](https://github.com/krlmlr), +[\@lorenzwalthert](https://github.com/lorenzwalthert), +[\@michaelquinn32](https://github.com/michaelquinn32), +[\@msberends](https://github.com/msberends), +[\@raynamharris](https://github.com/raynamharris), +[\@riccardoporreca](https://github.com/riccardoporreca), +[\@rjake](https://github.com/rjake), +[\@Robinlovelace](https://github.com/Robinlovelace), +[\@skirmer](https://github.com/skirmer), +[\@thalesmello](https://github.com/thalesmello), +[\@tobiasgerstenberg](https://github.com/tobiasgerstenberg), +[\@tvatter](https://github.com/tvatter), +[\@wdearden](https://github.com/wdearden), +[\@wmayner](https://github.com/wmayner), and \@yech1990. + +# styler 1.1.0 + +This release introduces new features and is fully backward-compatible. It also +adapts to changes in the R parser committed into R devel (#419). + +## Major Changes + +- styler can now style roxygen code examples in the source code of package + (#332) as well as Rnw files (#431). + +- the print method for the output of `style_text()` (`print.vertical()`) now + returns syntax-highlighted code by default, controllable via the option + `styler.colored_print.vertical` (#417). + +- the README was redesigned (#413). + +- semi-colon expression that contained multiple assignments was fixed (#404). + +## Minor Changes + +- cursor position is remembered for styling via Addin (#416). + +- adapt spacing around tilde for multi-token expressions(#424) and brace edge + case (#425). + +- only add brackets to piped function call if RHS is a symbol (#422). + +- increase coverage again to over 90% (#412). + +- move rule that turns single quotes into double quotes to token modifier in + \`tidyverse_style_guide() (#406). + +- remove line-breaks before commas (#405). + +- removed package dependency enc in favor of xfun (#442). + +Thanks to all contributors for patches, issues and the like: @jonmcalder, +@krlmlr, @IndrajeetPatil, @kalibera, @Hasnep, @kiranmaiganji, @dirkschumacher, +@ClaytonJY, @wlandau, @maurolepore + +# styler 1.0.2 + +This is a maintenance release without any breaking API changes. + +## Major Changes + +- Fixed indention for named multi-line function calls (#372). + +- Non-R code chunks in `.Rmd` files are now respected and won't get styled + (#386). + +## Minor Changes + +- Fixing an edge case in which, if very long strings were present in the + code, tokens could be replaced with wrong text (#384). + +- Spacing around tilde in formulas depends now on whether there is a LHS in + the formula (#379). + +- Spaces are now also added around `EQ_SUB` (`=`) (#380). + +- Added `CONTRIBUTING.md` to outline guidelines for contributing to styler. + +- More informative error messages for parsing problems (#401, #400). + +- Improved documentation (#387). + +Thanks to all contributors for patches, issues and the like: @katrinleinweber, +@krlmlr, @dchiu911, @ramnathv, @aedobbyn, @Bio7, @tonytonov, @samhinshaw, @fny, +@vnijs, @martin-mfg, @NGaffney, @dchiu911. + +# styler 1.0.1 + +This is a maintenance release without any breaking API changes. + +## Major & dependency related changes + +- Removed implicit `dplyr` dependency via `purrr:::map_dfr()` (thanks + @jimhester, #324). + +- Added required minimal version dependency for purr (`>= 0.2.3`) (#338). + +- We rely on the tibble package which was optimized for speed in `v1.4.2` so + styler should run \~2x as fast + [(#348)](https://github.com/tidyverse/tibble/pull/348). For that reason, + styler now depends on `tibble >= 1.4.2`. + +- In the dependency `enc`, a bug was fixed that removed/changed non-ASCII + characters. Hence, styler now depends on `enc >= 0.2` (#348). + +## Minor changes + +- We're now recognizing and respecting more DSLs used in R comments: rplumber + (`#*`, #306), shebang `#/!` (#345), knitr chunk headers for spinning (`#+` + / `#-`, #362). + +- Named arguments can stay on the first line if call is multi-line (#318). + +- No space anymore with `tidyverse_style()` after `!!` since with + `rlang 0.2`, `!!` now binds tighter (#322), spacing around `~` (#316), no + space anymore around `^` (#308). + +- Code chunks in Rmd documents that don't use the R engine are no longer + formatted (#313). + +- Various bug fixes and edge case improvements. + +Thanks to all contributors for patches, issues and the like: @devSJR, @klrmlr, +@yutannihilation, @samhinshaw, @martin-mfg, @jjramsey, @RMHogervorst, @wlandau, +@llrs, @aaronrudkin, @crew102, @jkgrain, @jennybc, @joranE. + +# styler 1.0.0 + +Initial release. + +## stylers + +These are functions used to style code. They style a directory, a whole +package, a file or a string. + + style_dir(path = ".", + ..., style = tidyverse_style, transformers = style(...), + filetype = "R", recursive = TRUE, exclude_files = NULL + ) + + style_pkg(pkg = ".", + ..., style = tidyverse_style, transformers = style(...), filetype = "R", + exclude_files = "R/RcppExports.R" + ) + + + style_file(path, + ..., style = tidyverse_style, transformers = style(...) + ) + + style_text(text, ..., style = tidyverse_style, transformers = style(...)) + +## style guides + These functions are the style guides implemented. -``` -tidyverse_style( - scope = "tokens", - strict = TRUE, - indent_by = 2, - start_comments_with_one_space = FALSE, - reindention = tidyverse_reindention(), - math_token_spacing = tidyverse_math_token_spacing() -) -tidyverse_reindention() -tidyverse_math_token_spacing()) -``` -### style guide creators + tidyverse_style( + scope = "tokens", + strict = TRUE, + indent_by = 2, + start_comments_with_one_space = FALSE, + reindention = tidyverse_reindention(), + math_token_spacing = tidyverse_math_token_spacing() + ) + tidyverse_reindention() + tidyverse_math_token_spacing()) + +## style guide creators + This function is used to create a style guide. -``` -create_style_guide( - initialize = default_style_guide_attributes, - line_break = NULL, - space = NULL, - token = NULL, - indention = NULL, - use_raw_indention = FALSE, - reindention = tidyverse_reindention() -) -``` -### Helpers + create_style_guide( + initialize = default_style_guide_attributes, + line_break = NULL, + space = NULL, + token = NULL, + indention = NULL, + use_raw_indention = FALSE, + reindention = tidyverse_reindention() + ) + +## Helpers + These are helper functions used to specify the style guides in use. -``` -specify_math_token_spacing( - zero = NULL, - one = c("'+'", "'-'", "'*'", "'/'", "'^'") -) - -specify_reindention( - regex_pattern = NULL, - indention = 0, - comments_only = TRUE -) -initialize_default_attributes(pd_flat) -``` + specify_math_token_spacing( + zero = NULL, + one = c("'+'", "'-'", "'*'", "'/'", "'^'") + ) + + specify_reindention( + regex_pattern = NULL, + indention = 0, + comments_only = TRUE + ) + initialize_default_attributes(pd_flat) diff --git a/R/addins.R b/R/addins.R index c5d8a9488..61dcb24e3 100644 --- a/R/addins.R +++ b/R/addins.R @@ -1,40 +1,200 @@ #' Stylers for RStudio Addins #' #' Helper functions for styling via RStudio Addins. -#' +#' @section Addins: +#' - Set style: Select the style transformers to use. For flexibility, the user +#' input is passed to the `transformers` argument, not the `style` argument, +#' so entering `styler::tidyverse_style(scope = "spaces")` in the Addin is +#' equivalent to `styler::style_text("1+1", scope = "spaces")` and +#' `styler::style_text("1+1", transformers = styler::tidyverse_style(scope = "spaces"))` +#' if the text to style is `1+1`. The style transformers are memorized +#' within an R session via the R option `styler.addins_style_transformer` so +#' if you want it to persist over sessions, set the option +#' `styler.addins_style_transformer` in your `.Rprofile`. +#' - Style active file: Styles the active file, by default with +#' [tidyverse_style()] or the value of the option +#' `styler.addins_style_transformer` if specified. +#' - Style selection: Same as *Style active file*, but styles the highlighted +#' code instead of the whole file. #' @section Auto-Save Option: #' By default, both of the RStudio Addins will apply styling to the (selected) #' file contents without saving changes. Automatic saving can be enabled by -#' setting the environment variable `save_after_styling` to `TRUE`. -#' +#' setting the R option `styler.save_after_styling` to `TRUE`. #' Consider setting this in your `.Rprofile` file if you want to persist #' this setting across multiple sessions. Untitled files will always need to be #' saved manually after styling. -#' +#' @section Life cycle: +#' The way of specifying the style in the Addin as well as the auto-save option +#' (see below) are experimental. We are currently considering letting the user +#' specify the defaults for other style APIs like [styler::style_text()], +#' either via R options, config files or other ways as well. +#' See [r-lib/styler#319](https://github.com/r-lib/styler/issues/319) for +#' the current status of this. #' @name styler_addins #' @family stylers -#' @seealso [Sys.setenv()] +#' @examples +#' \dontrun{ +#' # save after styling when using the Addin +#' options(styler.save_after_styling = TRUE) +#' # only style with scope = "spaces" when using the Addin +#' val <- "styler::tidyverse_style(scope = 'spaces')" +#' options( +#' styler.addins_style_transformer = val +#' ) +#' } NULL -#' @describeIn styler_addins Styles the active file with [tidyverse_style()] and -#' `strict = TRUE`. + + +#' @keywords internal style_active_file <- function() { - transformer <- make_transformer(tidyverse_style()) + communicate_addins_style_transformers() context <- get_rstudio_context() - if (is_rmd_file(context$path)) { - out <- transform_rmd(context$contents, transformer) - } else if (is_plain_r_file(context$path) | is_unsaved_file(context$path)) { + transformer <- make_transformer(get_addins_style_transformer(), + include_roxygen_examples = TRUE, + base_indention = 0L, + warn_empty = is_plain_r_file(context$path) + ) + is_r_file <- any( + is_plain_r_file(context$path), + is_unsaved_file(context$path), + is_rprofile_file(context$path) + ) + + if (is_rmd_file(context$path) || is_qmd_file(context$path)) { + out <- transform_mixed(context$contents, transformer, filetype = "Rmd") + } else if (is_rnw_file(context$path)) { + out <- transform_mixed(context$contents, transformer, filetype = "Rnw") + } else if (is_r_file) { out <- try_transform_as_r_file(context, transformer) } else { - stop("Can only style .R and .Rmd files.", call. = FALSE) + abort("Can only style .R, .Rmd and .Rnw files.") } rstudioapi::modifyRange( - c(1, 1, length(context$contents) + 1, 1), - paste0(out, collapse = "\n"), id = context$id + c(1L, 1L, length(context$contents) + 1L, 1L), + paste0(ensure_last_n_empty(out), collapse = "\n"), + id = context$id ) - if (Sys.getenv("save_after_styling") == TRUE && context$path != "") { + if (save_after_styling_is_active() && context$path != "") { rstudioapi::documentSave(context$id) } + rstudioapi::setCursorPosition(context$selection[[1L]]$range) +} + +#' Wrapper around [style_pkg()] for access via Addin. +#' @keywords internal +style_active_pkg <- function() { + communicate_addins_style_transformers() + style_pkg(transformers = get_addins_style_transformer()) +} + +#' Heuristic to see if a file styled with the addin should be saved or not. +#' +#' Using the R option `"styler.save_after_styling"` and if unset, checks legacy +#' method via environment variable `save_after_styling`. +#' @keywords internal +save_after_styling_is_active <- function() { + op_old <- as.logical(toupper(Sys.getenv("save_after_styling"))) + op_new <- getOption("styler.save_after_styling", default = "") + if (!is.na(op_old)) { + rlang::warn(paste( + "Using the environment variable save_after_styling is depreciated and", + "won't work in a future version of styler. Please use the R option", + "`styler.save_after_styling` to control the behavior. If both are set,", + "the R option is taken." + )) + } + + if (op_new == "") { + if (is.na(op_old)) { + op <- FALSE + } else { + op <- op_old + } + } else { + op <- op_new + } + op +} + +#' Styles the highlighted selection in a `.R` or `.Rmd` file. +#' @keywords internal +style_selection <- function() { + communicate_addins_style_transformers() + context <- get_rstudio_context() + text <- context$selection[[1L]]$text + if (all(nchar(text) == 0L)) abort("No code selected") + out <- style_text( + text, + transformers = get_addins_style_transformer(), + base_indention = nchar(gsub("^( *).*", "\\1", text)) + ) + rstudioapi::modifyRange( + context$selection[[1L]]$range, + paste0(c( + out, + if (context$selection[[1L]]$range$end[2L] == 1L) "" + ), collapse = "\n"), + id = context$id + ) + if (save_after_styling_is_active() && context$path != "") { + invisible(rstudioapi::documentSave(context$id)) + } +} + +get_rstudio_context <- function() { + rstudioapi::getActiveDocumentContext() +} + +#' Asks the user to supply a style +#' @keywords internal +set_style_transformers <- function() { + current_style <- get_addins_style_transformer_name() + new_style <- + rstudioapi::showPrompt( + "Select a style", + "Enter the name of a style transformer, e.g. `styler::tidyverse_style()`", + current_style + ) + if (!is.null(new_style)) { + parsed_new_style <- rlang::try_fetch( + { + transformers <- eval(parse(text = new_style)) + style_text( + c("a = 2", "function() {", "NULL", "}"), + transformers = transformers + ) + }, + error = function(e) { + abort(paste0( + "The selected style transformers \"", + new_style, "\" is not valid: ", e$message + )) + } + ) + options(styler.addins_style_transformer = new_style) + } + + invisible(current_style) +} + +#' Return the style function or name +#' @keywords internal +get_addins_style_transformer_name <- function() { + getOption("styler.addins_style_transformer") +} + +#' @rdname get_addins_style_transformer_name +#' @keywords internal +get_addins_style_transformer <- function() { + eval(parse(text = get_addins_style_transformer_name())) +} + +communicate_addins_style_transformers <- function() { + style_name <- get_addins_style_transformer_name() + if (!getOption("styler.quiet", FALSE)) { + cat("Using style transformers `", style_name, "`\n", sep = "") + } } #' Style a file as if it was an .R file @@ -46,35 +206,22 @@ style_active_file <- function() { #' @param context The context from `styler:::get_rstudio_context()`. #' @param transformer A transformer function most conveniently constructed with #' [make_transformer()]. +#' @keywords internal try_transform_as_r_file <- function(context, transformer) { - tryCatch( + rlang::try_fetch( transformer(context$contents), - error = function(e) stop( - paste( - "Styling of unsaved files is only supported for R files with valid code.", - "Please save the file (as .R or .Rmd) and make sure that the R code in it", - "can be parsed. Then, try to style again.", - "The error was \n", e - ), call. = FALSE + error = function(e) { + preamble_for_unsaved <- paste( + "Styling of unsaved files is only supported for R files with valid ", + "code. Please save the file (as .R or .Rmd) and make sure that the R ", + "code in it can be parsed. Then, try to style again." ) - ) -} -#' @describeIn styler_addins Styles the highlighted selection in a `.R` or -#' `.Rmd` file. -style_selection <- function() { - context <- get_rstudio_context() - text <- context$selection[[1]]$text - if (all(nchar(text) == 0)) stop("No code selected") - out <- style_text(text) - rstudioapi::modifyRange( - context$selection[[1]]$range, paste0(out, collapse = "\n"), id = context$id + if (context$path == "") { + abort(paste0(preamble_for_unsaved, " The error was \n", e$message)) + } else { + abort(e$message) + } + } ) - if (Sys.getenv("save_after_styling") == TRUE && context$path != "") { - rstudioapi::documentSave(context$id) - } -} - -get_rstudio_context <- function() { - rstudioapi::getActiveDocumentContext() } diff --git a/R/communicate.R b/R/communicate.R index b7f14e520..5b1ca30a0 100644 --- a/R/communicate.R +++ b/R/communicate.R @@ -1,13 +1,17 @@ #' Communicate a warning if necessary #' -#' If roundtrip verification was not possible, issue a warning to review the +#' If round trip verification was not possible, issue a warning to review the #' changes carefully. #' @param changed Boolean with indicating for each file whether or not it has #' been changed. -#' @inheritParams can_verify_roundtrip +#' @inheritParams parse_tree_must_be_identical +#' @keywords internal communicate_warning <- function(changed, transformers) { - if (any(changed, na.rm = TRUE) && !can_verify_roundtrip(transformers)) { - cat("Please review the changes carefully!") + if (any(changed, na.rm = TRUE) && + !parse_tree_must_be_identical(transformers) && + !getOption("styler.quiet", FALSE) + ) { + cat("Please review the changes carefully!", fill = TRUE) } } @@ -16,11 +20,22 @@ communicate_warning <- function(changed, transformers) { #' @param changed Boolean with indicating for each file whether or not it has #' been changed. #' @param ruler_width Integer used to determine the width of the ruler. +#' @keywords internal communicate_summary <- function(changed, ruler_width) { - cli::cat_rule(width = max(40, ruler_width)) - cat("Status\tCount\tLegend \n") - cli::cat_bullet("\t", sum(!changed, na.rm = TRUE), "\tFile unchanged.", bullet = "tick") - cli::cat_bullet("\t", sum(changed, na.rm = TRUE), "\tFile changed.", bullet = "info") - cli::cat_bullet(bullet = "cross", "\t", sum(is.na(changed)), "\tStyling threw an eror.") - cli::cat_rule(width = max(40, ruler_width)) + if (!getOption("styler.quiet", FALSE)) { + cli::cat_rule(width = max(40L, ruler_width)) + cat("Status\tCount\tLegend \n") + cli::cat_bullet( + "\t", sum(!changed, na.rm = TRUE), "\tFile unchanged.", + bullet = "tick" + ) + cli::cat_bullet( + "\t", sum(changed, na.rm = TRUE), "\tFile changed.", + bullet = "info" + ) + cli::cat_bullet( + bullet = "cross", "\t", sum(is.na(changed)), "\tStyling threw an error." + ) + cli::cat_rule(width = max(40L, ruler_width)) + } } diff --git a/R/compat-dplyr.R b/R/compat-dplyr.R new file mode 100644 index 000000000..340810003 --- /dev/null +++ b/R/compat-dplyr.R @@ -0,0 +1,59 @@ +lag <- function(x, n = 1L, default = NA) { + xlen <- length(x) + n <- pmin(n, xlen) + c(rep(default, n), x[seq_len(xlen - n)]) +} + +lead <- function(x, n = 1L, default = NA) { + xlen <- length(x) + n <- pmin(n, xlen) + c(x[-seq_len(n)], rep(default, n)) +} + + +arrange <- function(.data, ...) { + ord <- eval(substitute(order(...)), .data, parent.frame()) + vec_slice(.data, ord) +} + +arrange_pos_id <- function(data) { + pos_id <- data$pos_id + if (is.unsorted(pos_id)) { + data <- vec_slice(data, order(pos_id)) + } + data +} + +filter <- function(.data, ...) { + subset(.data, ...) +} + +left_join <- function(x, y, by) { + if (rlang::is_named(by)) { + by_x <- names(by) + by_y <- unname(by) + } else { + by_x <- by_y <- by + } + + res <- merge(x, y, by.x = by_x, by.y = by_y, all.x = TRUE, sort = FALSE) %>% + arrange_pos_id() + res <- new_styler_df(res) + # dplyr::left_join set unknown list columns to NULL, merge sets them + # to NA + if (exists("child", res) && anyNA(res$child)) { + res$child[is.na(res$child)] <- list(NULL) + } + res +} + + +last <- function(x) { + x[[length(x)]] +} + +map_dfr <- function(.x, .f, ...) { + .f <- purrr::as_mapper(.f, ...) + res <- map(.x, .f, ...) + vec_rbind(!!!res) +} diff --git a/R/compat-tidyr.R b/R/compat-tidyr.R index 0574fade9..4606a186b 100644 --- a/R/compat-tidyr.R +++ b/R/compat-tidyr.R @@ -3,8 +3,8 @@ nest_ <- function(data, key_col, nest_cols = character()) { key_data <- data[[key_column]] key_levels <- unique(key_data) key_factor <- factor(key_data, levels = key_levels) - res <- list() - res[[key_column]] <- key_levels - res[[key_col]] <- split(data[, nest_cols], key_factor) - as_tibble(res) + + res <- vec_split(data[, nest_cols], key_factor) + names(res) <- c(key_column, key_col) + res } diff --git a/R/detect-alignment-utils.R b/R/detect-alignment-utils.R new file mode 100644 index 000000000..ead1d041e --- /dev/null +++ b/R/detect-alignment-utils.R @@ -0,0 +1,201 @@ +#' Ensure the closing brace of the call is removed +#' +#' Must be after dropping comments because the closing brace is only guaranteed +#' to be the last token in that case. +#' @inheritParams alignment_drop_comments +#' @keywords internal +alignment_ensure_no_closing_brace <- function(pd_by_line, + last_line_droped_early) { + if (last_line_droped_early) { + return(pd_by_line) + } + last <- last(pd_by_line) + if (nrow(last) == 1L) { + # can drop last line completely + pd_by_line[-length(pd_by_line)] + } else { + # only drop last elment of last line + pd_by_line[[length(pd_by_line)]] <- vec_slice(last, seq2(1L, nrow(last) - 1L)) + pd_by_line + } +} + +#' Remove all comment tokens +#' +#' Must be after split by line because it invalidates (lag)newlines, which are +#' used for splitting by line. +#' @param pd_by_line A list, each element corresponding to a potentially +#' incomplete parse table that represents all token from one line. +#' @keywords internal +alignment_drop_comments <- function(pd_by_line) { + map(pd_by_line, function(x) { + out <- vec_slice(x, x$token != "COMMENT") + if (nrow(out) < 1L) { + return(NULL) + } else { + out + } + }) %>% + compact() +} + + +#' Remove last expression +#' +#' In a *nest*, if the last token is an `expr`, the *nest* represents either +#' an if, while or for statement or a function call. We don't call about that +#' part, in fact it's important to remove it for alignment. See 'Examples'. +#' +#' @examplesIf FALSE +#' call( +#' x = 12, +#' y = 3, +#' ) +#' +#' function(a = 33, +#' qq = 4) { +#' # we don't care about this part for alignment detection +#' } +#' @keywords internal +alignment_drop_last_expr <- function(pds_by_line) { + # TODO could be skipped if we know it's not a function dec + pd_last_line <- pds_by_line[[length(pds_by_line)]] + last_two_lines <- pd_last_line$token[c(nrow(pd_last_line) - 1L, nrow(pd_last_line))] + if (identical(last_two_lines, c("')'", "expr"))) { + pd_last_line <- vec_slice(pd_last_line, -nrow(pd_last_line)) + } + pds_by_line[[length(pds_by_line)]] <- pd_last_line + pds_by_line +} + + +#' Ensure last pd has a trailing comma +#' +#' Must be after [alignment_ensure_no_closing_brace()] because if it comes after +#' [alignment_ensure_trailing_comma()], the last expression would not be a +#' brace, which would make removal complicated. +#' @inheritParams alignment_drop_comments +#' @keywords internal +alignment_ensure_trailing_comma <- function(pd_by_line) { + last_pd <- last(pd_by_line) + # needed to make sure comma is added without space + last_pd$spaces[nrow(last_pd)] <- 0L + if (last(last_pd$token) == "','") { + return(pd_by_line) + } else { + tokens <- create_tokens( + tokens = "','", + texts = ",", + lag_newlines = 0L, + spaces = 0L, + pos_ids = NA, + stylerignore = last_pd$stylerignore[1L], + indents = last_pd$indent[1L] + ) + tokens$.lag_spaces <- 0L + + tokens$lag_newlines <- tokens$pos_id <- NULL + pd_by_line[[length(pd_by_line)]] <- rbind(last_pd, tokens) + pd_by_line + } +} + +#' Checks if all arguments of column 1 are named +#' @param relevant_pd_by_line A list with parse tables of a multi-line call, +#' excluding first and last column. +#' @keywords internal +alignment_col1_all_named <- function(relevant_pd_by_line) { + map_lgl(relevant_pd_by_line, function(x) { + if (nrow(x) < 3L) { + return(FALSE) + } + x$token[3L] == "expr" && + any(c("SYMBOL_SUB", "STR_CONST", "SYMBOL_FORMALS") == x$token[1L]) && + any(c("EQ_SUB", "EQ_FORMALS", "SPECIAL-IN", "LT", "GT", "EQ", "NE") == x$token[2L]) + }) %>% + all() +} + +#' Serialize all lines for a given column +#' @param column The index of the column to serialize. +#' @inheritParams alignment_col1_all_named +#' @keywords internal +alignment_serialize_column <- function(relevant_pd_by_line, column) { + map(relevant_pd_by_line, alignment_serialize_line, column = column) +} + +#' Serialize one line for a column +#' +#' @inheritParams alignment_serialize_column +#' @inheritParams alignment_col1_all_named +#' @keywords internal +alignment_serialize_line <- function(relevant_pd_by_line, column) { + # TODO + # better also add lover bound for column. If you already checked up to + # comma 2, you don't need to re-construct text again, just check if text + # between comma 2 and 3 has the same length. + comma_idx <- which(relevant_pd_by_line$token == "','") + n_cols <- length(comma_idx) + if (column > n_cols) { + # line does not have values at that column + return(NULL) + } + between_commas <- seq2(max(1L, comma_idx[column - 1L]), comma_idx[column]) + relevant_pd_by_line <- vec_slice(relevant_pd_by_line, between_commas) + alignment_serialize(relevant_pd_by_line) +} + +#' Serialize text from a parse table +#' +#' Line breaks are ignored as they are expected to be checked in +#' [token_is_on_aligned_line()]. +#' @inheritParams alignment_drop_comments +#' @keywords internal +alignment_serialize <- function(pd_sub) { + out <- Map(function(terminal, text, child, spaces, newlines) { + if (terminal) { + return(paste0(text, rep_char(" ", spaces))) + } else { + return(paste0(alignment_serialize(child), rep_char(" ", spaces))) + } + }, pd_sub$terminal, pd_sub$text, pd_sub$child, pd_sub$spaces, pd_sub$newlines) + if (anyNA(out)) { + return(NA) + } else { + paste0(out, collapse = "") + } +} + +#' Check if spacing around comma is correct +#' +#' At least one space after comma, none before, for all but the last comma on +#' the line +#' @param pd_sub The subset of a parse table corresponding to one line. +#' +#' @keywords internal +alignment_has_correct_spacing_around_comma <- function(pd_sub) { + comma_tokens <- which(pd_sub$token == "','") + if (length(comma_tokens) == 0L) { + return(TRUE) + } + relevant_comma_token <- comma_tokens[seq2(1L, length(comma_tokens) - 1L)] + correct_spaces_before <- pd_sub$.lag_spaces[relevant_comma_token] == 0L + correct_spaces_after <- pd_sub$spaces[relevant_comma_token] > 0L + all(correct_spaces_before) && all(correct_spaces_after) +} + +#' Check if spacing around `=` is correct +#' +#' At least one space around `EQ_SUB` +#' @inheritParams alignment_has_correct_spacing_around_comma +#' @keywords internal +alignment_has_correct_spacing_around_eq_sub <- function(pd_sub) { + relevant_eq_sub_token <- which(pd_sub$token == "EQ_SUB") + if (length(relevant_eq_sub_token) == 0L) { + return(TRUE) + } + + correct_spaces_before <- pd_sub$.lag_spaces[relevant_eq_sub_token] >= 1L + correct_spaces_after <- pd_sub$spaces[relevant_eq_sub_token] >= 1L + all(correct_spaces_before) && all(correct_spaces_after) +} diff --git a/R/detect-alignment.R b/R/detect-alignment.R new file mode 100644 index 000000000..970ab6ffd --- /dev/null +++ b/R/detect-alignment.R @@ -0,0 +1,199 @@ +#' Check if tokens are aligned +#' +#' If all tokens are aligned, `TRUE` is returned, otherwise `FALSE`. The +#' function only checks for alignment of function calls. This can be +#' recycled conveniently later if needed as a vector with length > 1. +#' @param pd_flat A flat parse table. +#' @details +#' Multiple lines are called aligned if the following conditions hold for all +#' but the first line of the expression: +#' +#' * lag spaces of column 1 must agree. +#' * spacing around comma (0 before, > 1 after) and spacing around `=` (at least +#' one around). +#' * all positions of commas of col > 2 must agree (needs recursive creation of +#' `text`). +#' +#' Because of the last requirement, this function is very expensive to run. For +#' this reason, the following approach is taken: +#' +#' * Only invoke the function when certain that alignment is possible. +#' * Check the cheap conditions first. +#' * For the recursive creation of text, greedily check column by column to make +#' sure we can stop as soon as we found that columns are not aligned. +#' +#' @keywords internal +#' @examples +#' library("magrittr") +#' withr::with_options( +#' list(styler.cache_name = NULL), # temporarily deactivate cache +#' { +#' transformers <- tidyverse_style() +#' pd_nested <- compute_parse_data_nested(c( +#' "call(", +#' " ab = 1L,", +#' " a = 2", +#' ")" +#' )) %>% +#' styler:::post_visit(transformers$initialize) +#' nest <- pd_nested$child[[1L]] +#' styler:::token_is_on_aligned_line(nest) +#' } +#' ) +token_is_on_aligned_line <- function(pd_flat) { + line_idx <- 1L + cumsum(pd_flat$lag_newlines) + # cannot use lag_newlines anymore since we removed tokens + # pos_id too expensive to construct in alignment_ensure_trailing_comma() + pd_flat$lag_newlines <- pd_flat$pos_id <- NULL + pd_flat$.lag_spaces <- lag(pd_flat$spaces) + pd_by_line_split <- vec_split(pd_flat, line_idx) + + # FIXME: Why are we using names here? + pd_by_line <- pd_by_line_split[[2L]] + names(pd_by_line) <- as.character(pd_by_line_split[[1L]]) + + pd_by_line[purrr::map_lgl(pd_by_line, ~ any(.x$stylerignore))] <- NULL + if (length(pd_by_line) < 1L) { + return(TRUE) + } + last_line_is_closing_brace_only <- nrow(last(pd_by_line)) == 1L + last_idx <- if (last_line_is_closing_brace_only) { + length(pd_by_line) - 1L + } else { + length(pd_by_line) + } + relevant_idx <- seq2(2L, last_idx) + pd_by_line <- pd_by_line[relevant_idx] + + relevant_lag_spaces_col_1 <- map_int(pd_by_line, ~ .x$.lag_spaces[1L]) + + col1_is_aligned <- length(unique(relevant_lag_spaces_col_1)) == 1L + if (!col1_is_aligned) { + return(FALSE) + } + has_correct_spacing_around_comma <- map_lgl( + pd_by_line, alignment_has_correct_spacing_around_comma + ) + if (!all(has_correct_spacing_around_comma)) { + return(FALSE) + } + + has_correct_spacing_around_eq_sub <- map_lgl( + pd_by_line, alignment_has_correct_spacing_around_eq_sub + ) + + if (!all(has_correct_spacing_around_eq_sub)) { + return(FALSE) + } + starting_with_comma <- map_lgl(pd_by_line, ~ .x$token[1L] == "','") + if (any(starting_with_comma)) { + return(FALSE) + } + pd_is_multi_line <- map_lgl( + pd_by_line, + ~ any(.x$multi_line > 0L, na.rm = TRUE) + ) + if (any(pd_is_multi_line)) { + return(FALSE) + } + + pd_by_line <- alignment_drop_comments(pd_by_line) + if (length(pd_by_line) < 1L) { + return(TRUE) + } + pd_by_line <- alignment_drop_last_expr(pd_by_line) %>% + alignment_ensure_no_closing_brace(last_line_is_closing_brace_only) + + pd_by_line <- pd_by_line %>% + alignment_ensure_trailing_comma() + # now, pd only contains arguments separated by values, ideal for iterating + # over columns. + n_cols <- map_int(pd_by_line, ~ sum(.x$token == "','")) + previous_line <- current_col <- 0L + # if all col1 are named or there is at max 1 column, + # start at column 1, else start at column 2 + start_eval <- if (max(n_cols) == 1L || alignment_col1_all_named(pd_by_line)) { + 1L + } else { + 2L + } + for (column in seq2(1L, max(n_cols))) { + by_line <- alignment_serialize_column(pd_by_line, column) %>% + compact() %>% + unlist() %>% + trimws(which = "right") + # check 1: match by comma + # might have fewer lines in subsequent columns. + max_previous_col <- max(current_col) + + # first col has no leading , + current_col <- nchar(by_line) - as.integer(column > 1L) + # Problem `by_line` counting from comma before column 3, previous_line + # counting 1 space before ~ + if (column > 1L) { + previous_line <- previous_line[ + intersect(names(previous_line), names(by_line)) + ] + # must add previous columns, as first column might not align + current_col <- current_col + previous_line + } + + is_aligned <- length(unique(current_col)) == 1L + if (!is_aligned || length(current_col) < 2L) { + # check 2: left aligned after , (comma to next token) + current_col <- "^(,[\\s\\t]*)[^ ]*.*$" %>% + gsub("\\1", by_line, perl = TRUE) %>% + nchar() %>% + magrittr::subtract(1L) + + if (column > 1L) { + # must add previous columns, as first column might not align + current_col <- previous_line + current_col + } + if (length(current_col) > 1L) { + is_aligned <- length(unique(current_col)) == 1L + } else { + is_aligned <- current_col - max_previous_col == 1L + current_col <- max_previous_col + current_col + } + + if (is_aligned) { + # if left aligned after , + start_eval <- 2L + previous_line <- nchar(by_line) - 1L + previous_line # comma to comma + } + } else { + previous_line <- current_col + } + if (is_aligned) { + next + } + # check 3: match by = (no extra spaces around it allowed) + # match left aligned after = + start_after_eq <- regexpr("= [^ ]", by_line) + names(start_after_eq) <- names(by_line) + start_after_eq <- start_after_eq[start_after_eq > 0L] + + if (column >= start_eval) { + if (length(start_after_eq) == 0L) { + return(FALSE) + } + # when match via , unsuccessful, matching by = must yield at least one = + if (column == 1L) { + current_col <- start_after_eq + } else { + current_col <- start_after_eq + + previous_line[intersect(names(previous_line), names(start_after_eq))] + } + is_aligned <- all( + length(unique(current_col)) == 1L, + length(start_after_eq) > 1L + ) + if (!is_aligned) { + return(FALSE) + } + } + previous_line <- nchar(by_line) + previous_line + } + TRUE +} diff --git a/R/dplyr.R b/R/dplyr.R deleted file mode 100644 index d55cc1908..000000000 --- a/R/dplyr.R +++ /dev/null @@ -1,112 +0,0 @@ -lag <- function(x, n = 1L, default = NA, ...) { - if (n == 0) { - return(x) - } - xlen <- length(x) - n <- pmin(n, xlen) - out <- c(rep(default, n), x[seq_len(xlen - n)]) - attributes(out) <- attributes(x) - out -} - -lead <- function(x, n = 1L, default = NA, ...) { - if (n == 0) { - return(x) - } - xlen <- length(x) - n <- pmin(n, xlen) - out <- c(x[-seq_len(n)], rep(default, n)) - attributes(out) <- attributes(x) - out -} - -arrange <- function(.data, ...) { - stopifnot(is.data.frame(.data)) - ord <- eval(substitute(order(...)), .data, parent.frame()) - if (length(ord) != nrow(.data)) { - stop( - "Length of ordering vectors don't match data frame size", - call. = FALSE - ) - } - .data[ord, , drop = FALSE] -} - -if_else <- function(condition, true, false, missing = NULL) { - stopifnot(length(condition) == length(true)) - stopifnot(length(condition) == length(false)) - if (!is.null(missing)) stop("missing arg not yet implemented") - ifelse(condition, true, false) -} - -bind_rows <- function(x, y = NULL, ...) { - if (is.null(x) && is.null(y)) { - return(tibble()) - } - if (is.null(x)) { - if (inherits(y, "data.frame")) { - return(y) - } - return(do.call(rbind.data.frame, x)) - } - if (is.null(y)) { - if (inherits(x, "data.frame")) { - return(x) - } - return(do.call(rbind.data.frame, x)) - } - if (NCOL(x) != NCOL(y)) { - for (nme in setdiff(names(x), names(y))) { - y[[nme]] <- NA - } - } - bind_rows(rbind.data.frame(x, y), ...) -} - -filter <- function(.data, ...) { - subset(.data, ...) -} - -left_join <- function(x, y, by, ...) { - if (rlang::is_named(by)) { - by_x <- names(by) - by_y <- unname(by) - } else { - by_x <- by_y <- by - } - res <- as_tibble(merge(x, y, by.x = by_x, by.y = by_y, all.x = TRUE, ...)) - res <- arrange(res, pos_id) - - # dplyr::left_join set unknown list columns to NULL, merge sets them - # to NA - if (exists("child", res) && any(is.na(res$child))) { - res$child[is.na(res$child)] <- list(NULL) - } - res -} - -nth <- function(x, n, order_by = NULL, default = x[NA_real_]) { - stopifnot(length(n) == 1, is.numeric(n)) - n <- trunc(n) - if (n == 0 || n > length(x) || n < -length(x)) { - return(default) - } - if (n < 0) { - n <- length(x) + n + 1 - } - if (is.null(order_by)) { - x[[n]] - } - else { - x[[order(order_by)[[n]]]] - } -} - - -last <- function(x, order_by = NULL, default = x[NA_real_]) { - nth(x, -1L, order_by = order_by, default = default) -} - -slice <- function(.data, ...) { - .data[c(...), , drop = FALSE] -} diff --git a/R/environments.R b/R/environments.R new file mode 100755 index 000000000..83863ce31 --- /dev/null +++ b/R/environments.R @@ -0,0 +1,63 @@ +#' Work with parser versions +#' +#' The structure of the parse data affects many operations in styler. There was +#' unexpected behavior of the parser that styler was initially designed to work +#' around. Examples are [#187](https://github.com/r-lib/styler/issues/187), +#' [#216](https://github.com/r-lib/styler/issues/216), +#' [#100](https://github.com/r-lib/styler/issues/100) and others. With +#' [#419](https://github.com/r-lib/styler/issues/419), the structure of the +#' parse data changes and we need to dispatch for older versions. As it is +#' inconvenient to pass a parser version down in the call stack in various +#' places, the environment `env_current` is used to store the current version +#' *globally* but internally. +#' +#' We version the parser as follows: +#' +#' * version 1: Before fix mentioned in #419. R < 3.6 +#' * version 2: After #419. R >= 3.6 +#' # version 3: After #582. R >= 4.0 +#' +#' The following utilities are available: +#' +#' * `parser_version_set()` sets the parser version in the environment +#' `env_current`. +#' * `parser_version_get()` retrieves the parser version from the +#' environment `env_current`. +#' * `parser_version_find()` determines the version of the parser from parse +#' data. This does not necessarily mean that the version found is the +#' actual version, but it *behaves* like it. For example, code that does not +#' contain `EQ_ASSIGN` is parsed the same way with version 1 and 2. If the +#' behavior is identical, the version is set to 1. +#' @param version The version of the parser to be used. +#' @param pd A parse table such as the output from +#' `utils::getParseData(parse(text = text))`. +#' @keywords internal +parser_version_set <- function(version) { + env_current$parser_version <- version +} + +#' @rdname parser_version_set +parser_version_get <- function() { + env_current$parser_version +} + +#' @rdname parser_version_set +parser_version_find <- function(pd) { + if (any(pd$token == "equal_assign")) 2L else 3L +} + + +#' The elements that are added to this environment are: +#' +#' @details +#' * `parser_version`: Needed to dispatch between parser versions, see +#' [parser_version_set()] for details. +#' * `stylerignore`: A data frame with parse data containing tokens that fall within +#' a stylerignore sequence. This is used after serializing the flattened +#' parse table to apply the initial formatting to these tokens. See +#' [stylerignore] for details. +#' * `any_stylerignore`: Whether there is any stylerignore marker. The idea is +#' to check early in the runtime if this is the case and then if so, take +#' as many short-cuts as possible. See [stylerignore] for details. +#' @keywords internal +env_current <- rlang::new_environment(parent = rlang::empty_env()) diff --git a/R/expr-is.R b/R/expr-is.R index 695ee1333..cbbecadbf 100644 --- a/R/expr-is.R +++ b/R/expr-is.R @@ -1,35 +1,185 @@ -#' Check whether a parse table corresponds to a certain expression +#' What is a parse table representing? #' -#' @param pd A parse table. +#' Check whether a parse table corresponds to a certain expression. #' @name pd_is +#' +#' @param pd A parse table. +#' @param tilde_pos Integer vector indicating row-indices that should be +#' checked for tilde. See 'Details'. +#' +#' @family third-party style guide helpers NULL -#' @describeIn pd_is Checks whether `pd` contains an expression wrapped in -#' curly brackets. +#' @describeIn pd_is Checks whether `pd` contains an expression wrapped in curly brackets. +#' @examples +#' code <- "if (TRUE) { 1 }" +#' pd <- compute_parse_data_nested(code) +#' is_curly_expr(pd) +#' child_of_child <- pd$child[[1]]$child[[5]] +#' is_curly_expr(child_of_child) +#' +#' @export is_curly_expr <- function(pd) { - if (is.null(pd)) return(FALSE) - pd$token[1] == "'{'" + if (is.null(pd)) { + return(FALSE) + } + pd$token[1L] == "'{'" } -is_subset_expr <- function(pd) { - if (is.null(pd) || nrow(pd) == 1) return(FALSE) - pd$token[2] == "'['" +#' @describeIn pd_is Checks whether `pd` contains a `for` loop. +#' @examples +#' code <- "for (i in 1:5) print(1:i)" +#' pd <- compute_parse_data_nested(code) +#' is_for_expr(pd) +#' is_for_expr(pd$child[[1]]) +#' +#' @export +is_for_expr <- function(pd) { + pd$token[1L] == "FOR" +} + +#' @describeIn pd_is Checks whether `pd` contains is a conditional expression. +#' @examples +#' code <- "if (TRUE) x <- 1 else x <- 0" +#' pd <- compute_parse_data_nested(code) +#' is_conditional_expr(pd) +#' is_conditional_expr(pd$child[[1]]) +#' +#' @export +is_conditional_expr <- function(pd) { + pd$token[1L] == "IF" } +#' @describeIn pd_is Checks whether `pd` contains a `while` loop. +#' @export +is_while_expr <- function(pd) { + pd$token[1L] == "WHILE" +} + + #' @describeIn pd_is Checks whether `pd` is a function call. +#' @examples +#' code <- "x <- list(1:3)" +#' pd <- compute_parse_data_nested(code) +#' is_function_call(pd) +#' child_of_child <- pd$child[[1]]$child[[3]] +#' is_function_call(child_of_child) +#' +#' @export is_function_call <- function(pd) { - if (is.null(pd)) return(FALSE) - if (is.na(pd$token_before[2])) return(FALSE) - pd$token_before[2] == "SYMBOL_FUNCTION_CALL" + if (is.null(pd)) { + return(FALSE) + } + if (is.na(pd$token_before[2L])) { + return(FALSE) + } + pd$token_before[2L] == "SYMBOL_FUNCTION_CALL" } #' @describeIn pd_is Checks whether `pd` is a function declaration. -is_function_dec <- function(pd) { - if (is.null(pd)) return(FALSE) - pd$token[1] == "FUNCTION" +#' @examples +#' code <- "foo <- function() NULL" +#' pd <- compute_parse_data_nested(code) +#' is_function_declaration(pd) +#' child_of_child <- pd$child[[1]]$child[[3]] +#' is_function_declaration(child_of_child) +#' +#' @export +is_function_declaration <- function(pd) { + if (is.null(pd)) { + return(FALSE) + } + pd$token[1L] == "FUNCTION" +} + +#' @describeIn pd_is Checks for every token whether or not it is a comment. +#' @examples +#' code <- "x <- 1 # TODO: check value" +#' pd <- compute_parse_data_nested(code) +#' is_comment(pd) +#' +#' @export +is_comment <- function(pd) { + if (is.null(pd)) { + return(FALSE) + } + pd$token == "COMMENT" +} + +#' @describeIn pd_is Checks whether `pd` contains a tilde. +#' @details +#' A tilde is on the top row in the parse table if it is an asymmetric tilde +#' expression (like `~column`), in the second row if it is a symmetric tilde +#' expression (like `a~b`). +#' @examples +#' code <- "lm(wt ~ mpg, mtcars)" +#' pd <- compute_parse_data_nested(code) +#' is_tilde_expr(pd$child[[1]]$child[[3]]) +#' is_symmetric_tilde_expr(pd$child[[1]]$child[[3]]) +#' is_asymmetric_tilde_expr(pd$child[[1]]$child[[3]]) +#' +#' @export +is_tilde_expr <- function(pd, tilde_pos = c(1L, 2L)) { + if (is.null(pd) || nrow(pd) == 1L) { + return(FALSE) + } + any(pd$token[tilde_pos] == "'~'") +} + +#' @describeIn pd_is If `pd` contains a tilde, checks whether it is asymmetrical. +#' @export +is_asymmetric_tilde_expr <- function(pd) { + is_tilde_expr(pd, tilde_pos = 1L) +} + +#' @describeIn pd_is If `pd` contains a tilde, checks whether it is symmetrical. +#' @export +is_symmetric_tilde_expr <- function(pd) { + is_tilde_expr(pd, tilde_pos = 2L) +} + +is_subset_expr <- function(pd) { + if (is.null(pd) || nrow(pd) == 1L) { + return(FALSE) + } + pd$token[2L] %in% subset_token_opening } +#' Identify comments that are shebangs +#' +#' Shebangs should be preserved and no space should be inserted between +#' `#` and `!`. A comment is a shebang if it is the first top-level token +#' (identified with `pos_id`) and if it starts with `#!`. +#' @param pd A parse table. +#' @examples +#' style_text("#!/usr/bin/env Rscript") +#' @keywords internal +is_shebang <- function(pd) { + is_first_comment <- pd$pos_id == 1L + is_first_comment[is_first_comment] <- startsWith(pd$text[is_first_comment], "#!") + is_first_comment +} + +#' Identify spinning code chunk header or xaringan +#' +#' Wrongly identifies a comment without a preceding line break as a code chunk +#' header. +#' See https://yihui.name/knitr/demo/stitch/#spin-comment-out-texts for details. +#' @examples +#' style_text(c( +#' "# title", +#' "some_code <- function() {}", +#' "#+ chunk-label, opt1=value1", +#' "call(3, 2, c(3:2))", +#' "#> 99" +#' )) +#' @param pd A parse table. +#' @keywords internal +is_code_chunk_header_or_xaringan_or_code_output <- function(pd) { + grepl("^#[\\+|\\-|<<|>]", pd$text, perl = TRUE) +} + contains_else_expr <- function(pd) { any(pd$token == "ELSE") } @@ -41,19 +191,15 @@ contains_else_expr <- function(pd) { #' else-if will be visited separately with the visitor. This applies to all #' conditional statements with more than one alternative. #' @param pd A parse table +#' @keywords internal contains_else_expr_that_needs_braces <- function(pd) { else_idx <- which(pd$token == "ELSE") - if (length(else_idx) > 0) { + if (length(else_idx) > 0L) { non_comment_after_else <- next_non_comment(pd, else_idx) sub_expr <- pd$child[[non_comment_after_else]] # needs braces if NOT if_condition, NOT curly expr - !is_cond_expr(sub_expr) && !is_curly_expr(sub_expr) + !is_conditional_expr(sub_expr) && !is_curly_expr(sub_expr) } else { FALSE } } - - -is_cond_expr <- function(pd) { - pd$token[1] == "IF" -} diff --git a/R/indent.R b/R/indent.R index d35ccfd4b..b6e12285b 100644 --- a/R/indent.R +++ b/R/indent.R @@ -5,95 +5,62 @@ #' @param indent_by How many spaces should be added after the token of interest. #' @param token The token the indention should be based on. #' @name update_indention +#' @keywords internal NULL -#' @describeIn update_indention Inserts indention based on round, square and -#' curly brackets. -indent_braces <- function(pd, indent_by) { - indent_indices <- compute_indent_indices( - pd, - token_opening = c("'('", "'['", "'{'"), - token_closing = c("')'", "']'", "'}'") - ) - pd$indent[indent_indices] <- pd$indent[indent_indices] + indent_by - set_unindention_child(pd, token = "')'", unindent_by = indent_by) -} - -#' @describeIn update_indention Indents operators -indent_op <- function(pd, - indent_by, - token = c( - math_token, - logical_token, - special_token, - "LEFT_ASSIGN", - "EQ_ASSIGN", - "'$'" - )) { - indent_indices <- compute_indent_indices(pd, token) - pd$indent[indent_indices] <- pd$indent[indent_indices] + indent_by - pd -} - -#' @describeIn update_indention Updates indention for token EQ_SUB. Only differs -#' from indent_op in the sense that the last token on the table where EQ_SUB -#' occurs is not indented (see[compute_indent_indices()]) -indent_eq_sub <- function(pd, - indent_by, - token = "EQ_SUB") { - eq_sub <- which(pd$token == "EQ_SUB") - if (length(eq_sub) == 0) return(pd) - has_line_break <- which(pd$lag_newlines > 0) - indent_indices <- intersect(eq_sub + 1, has_line_break) - pd$indent[indent_indices] <- pd$indent[indent_indices] + indent_by - pd -} - - -#' @describeIn update_indention Same as indent_op, but only indents one token -#' after `token`, not all remaining. -indent_assign <- function(pd, indent_by, token = NULL) { - indent_indices <- compute_indent_indices(pd, token) - pd$indent[indent_indices] <- pd$indent[indent_indices] + indent_by - pd -} - -#' @describeIn update_indention Is used to indent for / while / if / if-else -#' statements that do not have curly parenthesis. -indent_without_paren <- function(pd, indent_by = 2) { - pd %>% - indent_without_paren_for_while_fun(indent_by) %>% - indent_without_paren_if_else(indent_by) -} - -#' @describeIn update_indention Is used to indent for and statements and function -#' definitions without parenthesis. +#' @describeIn update_indention Is used to indent for and statements and +#' function definitions without parenthesis. +#' @keywords internal indent_without_paren_for_while_fun <- function(pd, indent_by) { + tokens <- c("FOR", "WHILE", "FUNCTION") nrow <- nrow(pd) - if (!(pd$token[1] %in% c("FOR", "WHILE", "FUNCTION"))) return(pd) - if (is_curly_expr(pd$child[[nrow]])) return(pd) + if (!(pd$token[1L] %in% tokens)) { + return(pd) + } + if (is_curly_expr(pd$child[[nrow]])) { + return(pd) + } + + if (pd$newlines[length(pd$newlines) - 1L] == 0L) { + return(pd) + } pd$indent[nrow] <- indent_by pd } #' @describeIn update_indention Is used to indent if and if-else statements. -#' @importFrom rlang seq2 +#' +#' @keywords internal indent_without_paren_if_else <- function(pd, indent_by) { - expr_after_if <- next_non_comment(pd, which(pd$token == "')'")[1]) - has_if_without_curly <- - pd$token[1] %in% "IF" && pd$child[[expr_after_if]]$token[1] != "'{'" - if (has_if_without_curly) { + expr_after_if <- next_non_comment(pd, which(pd$token == "')'")[1L]) + is_if <- pd$token[1L] == "IF" + if (!is_if) { + return(pd) + } + needs_indention_now <- pd$lag_newlines[ + next_non_comment(pd, which(pd$token == "')'")) + ] > 0L + + if (needs_indention_now) { pd$indent[expr_after_if] <- indent_by } else_idx <- which(pd$token == "ELSE") + if (length(else_idx) == 0L) { + return(pd) + } expr_after_else_idx <- next_non_comment(pd, else_idx) has_else_without_curly_or_else_chid <- any(pd$token == "ELSE") && - pd$child[[expr_after_else_idx]]$token[1] != "'{'" && - pd$child[[expr_after_else_idx]]$token[1] != "IF" - if (has_else_without_curly_or_else_chid) { - pd$indent[seq(else_idx + 1, nrow(pd))] <- indent_by + pd$child[[expr_after_else_idx]]$token[1L] != "'{'" && + pd$child[[expr_after_else_idx]]$token[1L] != "IF" + + needs_indention_now <- pd$lag_newlines[ + next_non_comment(pd, which(pd$token == "ELSE")) + ] > 0L + + if (has_else_without_curly_or_else_chid && needs_indention_now) { + pd$indent[seq(else_idx + 1L, nrow(pd))] <- indent_by } pd } @@ -120,20 +87,33 @@ indent_without_paren_if_else <- function(pd, indent_by) { #' example in if-else expressions, this is not the case and indenting #' everything between '(' and the penultimate token would result in the wrong #' formatting. -#' @importFrom rlang seq2 +#' @section Handing of `[[`: +#' Since text `[[` has token `"LBB"` and text `]]` is parsed as two independent +#' `]` (see 'Examples'), indention has to stop at the first `]`. +# one token earlier +#' +#' @keywords internal +#' @examples +#' styler:::parse_text("a[1]") +#' styler:::parse_text("a[[1\n]]") compute_indent_indices <- function(pd, token_opening, token_closing = NULL) { npd <- nrow(pd) potential_triggers <- which(pd$token %in% token_opening) - needs_indention <- needs_indention(pd, potential_triggers) - trigger <- potential_triggers[needs_indention][1] - if (is.na(trigger)) return(numeric(0)) - start <- trigger + 1 + needs_indention <- needs_indention(pd, potential_triggers, + other_trigger_tokens = c("EQ_SUB", "EQ_FORMALS") + ) + trigger <- potential_triggers[needs_indention][1L] + if (is.na(trigger)) { + return(numeric(0L)) + } + start <- trigger + 1L if (is.null(token_closing)) { stop <- npd } else { - stop <- last(which(pd$token %in% token_closing)[needs_indention]) - 1 + offset <- if (any(pd$token == "LBB")) 2L else 1L + stop <- last(which(pd$token %in% token_closing)[needs_indention]) - offset } seq2(start, stop) @@ -144,28 +124,79 @@ compute_indent_indices <- function(pd, #' #' Checks for each potential trigger token in `pd` whether it actually should #' cause indention. -#' @param potential_triggers A vector with indices of the potential trigger +#' @param potential_triggers_pos A vector with indices of the potential trigger #' tokens in `pd`. #' @inheritParams needs_indention_one -needs_indention <- function(pd, potential_triggers) { - map_lgl(potential_triggers, needs_indention_one, pd = pd) +#' @keywords internal +needs_indention <- function(pd, + potential_triggers_pos, + other_trigger_tokens = NULL) { + map_lgl(potential_triggers_pos, needs_indention_one, + pd = pd, other_trigger_tokens = other_trigger_tokens + ) } #' Check whether indention is needed #' -#' Indention is needed if and only if there is no multi-line token between the -#' trigger and the first line break. +#' Determine whether the tokens corresponding to `potential_trigger_pos` should +#' cause indention, considering that there might be other potential triggers +#' `other_trigger_tokens` that are going to cause indention. +#' Indention is needed if the two conditions apply: +#' +#' * there is no multi-line token between the trigger and the first line break. +#' * there is no other token between the potential trigger and the first line +#' break that is going to cause indention. Note that such an other trigger +#' only causes indention if there is a line break after that other triggering +#' token, not otherwise. If it causes indention, it is said to be an active +#' trigger, if it does not, it is called an inactive trigger. +#' See 'Details' for an example where there is an other trigger token, but +#' since the next token is on the same line as the other trigger, +#' the trigger is passive. #' @param pd A parse table. -#' @param potential_trigger the index of the token in the parse table +#' @param potential_trigger_pos the index of the token in the parse table #' for which it should be checked whether it should trigger indention. #' @return Returns `TRUE` if indention is needed, `FALSE` otherwise. +#' @param other_trigger_tokens Other tokens that are going to cause indention +#' if on the same line as the token corresponding to `potential_trigger` and +#' directly followed by a line break. #' @return `TRUE` if indention is needed, `FALSE` otherwise. -#' @importFrom rlang seq2 -needs_indention_one <- function(pd, potential_trigger) { - before_first_break <- which(pd$lag_newlines > 0)[1] - 1 - if (is.na(before_first_break)) return(FALSE) - !any(pd$multi_line[seq2(potential_trigger, before_first_break)]) +#' +#' @keywords internal +#' @examples +#' style_text(c( +#' "call(named = c,", +#' "named = b)" +#' ), strict = FALSE) +needs_indention_one <- function(pd, + potential_trigger_pos, + other_trigger_tokens) { + before_first_break <- which(pd$lag_newlines > 0L)[1L] - 1L + if (is.na(before_first_break)) { + return(FALSE) + } + row_idx_between_trigger_and_line_break <- seq2( + potential_trigger_pos, before_first_break + ) + multi_line_token <- pd_is_multi_line( + vec_slice(pd, row_idx_between_trigger_and_line_break) + ) + remaining_row_idx_between_trigger_and_line_break <- setdiff( + row_idx_between_trigger_and_line_break, + potential_trigger_pos + ) + + other_trigger_on_same_line <- ( + pd$token[remaining_row_idx_between_trigger_and_line_break] %in% + other_trigger_tokens + ) + line_break_after_other_trigger <- + pd$lag_newlines[remaining_row_idx_between_trigger_and_line_break + 1L] > 0L + + active_trigger_on_same_line <- + other_trigger_on_same_line & line_break_after_other_trigger + + !any(multi_line_token) & !any(active_trigger_on_same_line) } @@ -173,11 +204,11 @@ needs_indention_one <- function(pd, potential_trigger) { #' Set the multi-line column #' #' Sets the column `multi_line` in `pd` by checking row-wise whether any child -#' of a token is a multi-line token. +#' of a token is a multi-line token. #' @param pd A parse table. -#' @importFrom purrr map_lgl +#' @keywords internal set_multi_line <- function(pd) { - pd$multi_line <- map_lgl(pd$child, pd_is_multi_line) + pd$multi_line <- unname(map_int(pd$child, pd_multi_line)) pd } @@ -188,24 +219,31 @@ set_multi_line <- function(pd) { #' * it contains a line break. #' * it has at least one child that is a multi-line expression itself. #' @param pd A parse table. +#' @keywords internal pd_is_multi_line <- function(pd) { - any(pd$multi_line, pd$lag_newlines > 0) + pd_multi_line(pd) > 0L +} + +pd_multi_line <- function(pd) { + sum(pd$multi_line, pd$lag_newlines) } #' Update the newlines attribute #' #' As we work only with the `lag_newlines` attribute for setting the line -#' breaks, (R/rules-line_break.R) but we need `newlines` to determine -#' whether or not to set `spaces` (R/rules-spacing.R), we have to update the +#' breaks (`R/rules-line_breaks.R`), but we need `newlines` to determine +#' whether or not to set `spaces` (`R/rules-spaces.R`), we have to update the #' attribute. We cannot simply use `dplyr::lead(pd$lag_newlines)` since we would #' lose information for the last token. `spaces` is left as is in #' R/rules-spacing.R for tokens at the end of a line since this allows styling #' without touching indention. #' @param pd A parse table. -#' @return A parse table with synchronized `lag_newlines` and `newlines` columns. +#' @return A parse table with synchronized `lag_newlines` and `newlines` +#' columns. #' @seealso choose_indention +#' @keywords internal update_newlines <- function(pd) { - npd <- nrow(pd) - 1 - pd$newlines[seq_len(npd)] <- pd$lag_newlines[seq_len(npd) + 1] + seq_pd <- seq_len(nrow(pd) - 1L) + pd$newlines[seq_pd] <- pd$lag_newlines[seq_pd + 1L] pd } diff --git a/R/initialize.R b/R/initialize.R index a3e8a1134..87b0a1e62 100644 --- a/R/initialize.R +++ b/R/initialize.R @@ -1,35 +1,42 @@ #' Initialize default style guide attributes #' -#' This function initialises and removes various variables from the parse +#' This function initializes and removes various variables from the parse #' table. #' @param pd_flat A parse table. -#' @importFrom utils tail #' @examples -#' string_to_format <- "call( 3)" -#' pd <- styler:::compute_parse_data_nested(string_to_format) -#' styler:::pre_visit(pd, c(default_style_guide_attributes)) +#' withr::with_options( +#' list(styler.cache_name = NULL), # temporarily deactivate cache +#' { +#' string_to_format <- "call( 3)" +#' pd <- compute_parse_data_nested(string_to_format) +#' styler:::pre_visit_one(pd, default_style_guide_attributes) +#' } +#' ) #' @export +#' @keywords internal default_style_guide_attributes <- function(pd_flat) { - init_pd <- - initialize_newlines(pd_flat) %>% + initialize_newlines(pd_flat) %>% initialize_spaces() %>% remove_attributes(c("line1", "line2", "col1", "col2", "parent", "id")) %>% initialize_multi_line() %>% initialize_indention_ref_pos_id() %>% initialize_indent() %>% validate_parse_data() - init_pd } + + #' Initialize attributes #' #' @name initialize_attributes #' @inheritParams default_style_guide_attributes +#' @keywords internal NULL #' @describeIn initialize_attributes Initializes `newlines` and `lag_newlines`. +#' @keywords internal initialize_newlines <- function(pd_flat) { - pd_flat$line3 <- lead(pd_flat$line1, default = tail(pd_flat$line2, 1)) + pd_flat$line3 <- lead(pd_flat$line1, default = utils::tail(pd_flat$line2, 1L)) pd_flat$newlines <- pd_flat$line3 - pd_flat$line2 pd_flat$lag_newlines <- lag(pd_flat$newlines, default = 0L) pd_flat$line3 <- NULL @@ -37,14 +44,14 @@ initialize_newlines <- function(pd_flat) { } #' @describeIn initialize_attributes Initializes `spaces`. +#' @keywords internal initialize_spaces <- function(pd_flat) { - pd_flat$col3 <- lead(pd_flat$col1, default = tail(pd_flat$col2, 1) + 1L) - pd_flat$col2_nl <- if_else(pd_flat$newlines > 0L, + pd_flat$col3 <- lead(pd_flat$col1, default = utils::tail(pd_flat$col2, 1L) + 1L) + pd_flat$col2_nl <- ifelse(pd_flat$newlines > 0L, rep(0L, nrow(pd_flat)), pd_flat$col2 ) pd_flat$spaces <- pd_flat$col3 - pd_flat$col2_nl - 1L - pd_flat$col3 <- NULL - pd_flat$col2_nl <- NULL + pd_flat$col3 <- pd_flat$col2_nl <- NULL pd_flat } @@ -54,33 +61,38 @@ remove_attributes <- function(pd_flat, attributes) { } #' @describeIn initialize_attributes Initializes `multi_line`. +#' @keywords internal initialize_multi_line <- function(pd_flat) { nrow <- nrow(pd_flat) - pd_flat$multi_line <- if_else(pd_flat$terminal, - rep(FALSE, nrow), + pd_flat$multi_line <- ifelse(pd_flat$terminal, + rep(0L, nrow), rep(NA, nrow) ) pd_flat } #' @describeIn initialize_attributes Initializes `indention_ref_ind`. +#' @keywords internal initialize_indention_ref_pos_id <- function(pd_flat) { pd_flat$indention_ref_pos_id <- NA pd_flat } #' @describeIn initialize_attributes Initializes `indent`. +#' @keywords internal initialize_indent <- function(pd_flat) { if (!("indent" %in% names(pd_flat))) { - pd_flat$indent <- 0 + pd_flat$indent <- 0L } pd_flat } + #' @describeIn initialize_attributes validates the parse data. +#' @keywords internal validate_parse_data <- function(pd_flat) { if (any(pd_flat$spaces < 0L)) { - stop("Invalid parse data") + abort("Invalid parse data") } pd_flat } diff --git a/R/io.R b/R/io.R new file mode 100644 index 000000000..565afb400 --- /dev/null +++ b/R/io.R @@ -0,0 +1,123 @@ +#' Apply a function to the contents of a file +#' +#' Transforms a file with a function. +#' @inheritParams transform_utf8_one +#' @keywords internal +transform_utf8 <- function(path, fun, dry) { + map_lgl(path, transform_utf8_one, fun = fun, dry = dry) %>% + set_names(path) +} + +#' Potentially transform a file +#' +#' @param path A vector with file paths to transform. +#' @param fun A function that returns a character vector. +#' @param dry To indicate whether styler should run in *dry* mode, i.e. refrain +#' from writing back to files .`"on"` and `"fail"` both don't write back, the +#' latter returns an error if the input code is not identical to the result +#' of styling. "off", the default, writes back if the input and output of +#' styling are not identical. +#' @keywords internal +transform_utf8_one <- function(path, fun, dry) { + rlang::arg_match(dry, c("on", "off", "fail")) + rlang::try_fetch( + { + file_with_info <- read_utf8(path) + # only write back when changed OR when there was a missing newline + new <- unclass(fun(file_with_info$text)) + if (identical(new, "")) { + new <- character(0L) + } + identical_content <- identical(file_with_info$text, new) + identical <- identical_content && !file_with_info$missing_EOF_line_break + if (!identical) { + if (dry == "fail") { + rlang::abort( + paste0( + "File `", path, "` would be modified by styler and argument dry", + " is set to 'fail'." + ), + class = "dryError" + ) + } else if (dry == "on") { + # don't do anything + } else if (dry == "off") { + write_utf8(new, path) + } else { + # not implemented + } + } + !identical + }, + error = function(e) { + if (inherits(e, "dryError")) { + rlang::abort(conditionMessage(e)) + } else { + warn(paste0("When processing ", path, ": ", conditionMessage(e))) + } + NA + } + ) +} + +#' Read UTF-8 +#' +#' Reads an UTF-8 file, returning the content and whether or not the final line +#' was blank. This information is required higher up in the call stack because +#' we should write back if contents changed or if there is no blank line at the +#' EOF. A perfectly styled file with no EOF blank line will gain such a line +#' with this implementation. +#' @param path A path to a file to read. +#' @keywords internal +read_utf8 <- function(path) { + out <- rlang::try_fetch( + read_utf8_bare(path), + warning = function(w) w, + error = function(e) e + ) + if (is.character(out)) { + list( + text = out, + missing_EOF_line_break = FALSE + ) + } else if (inherits(out, "error")) { + rlang::abort(out$message) + } else if (inherits(out, "warning")) { + list( + text = read_utf8_bare(path, warn = FALSE), + missing_EOF_line_break = grepl("incomplete", out$message, fixed = TRUE) + ) + } +} + +#' Drop-in replacement for `xfun::read_utf8()`, with an optional `warn` +#' argument. +#' @keywords internal +read_utf8_bare <- function(con, warn = TRUE) { + x <- readLines(con, encoding = "UTF-8", warn = warn) + i <- invalid_utf8(x) + n <- length(i) + if (n > 0L) { + stop( + c( + "The file ", con, " is not encoded in UTF-8. ", + "These lines contain invalid UTF-8 characters: " + ), + toString(c(utils::head(i), if (n > 6L) "...")) + ) + } + x +} + +#' Drop-in replacement for `xfun:::invalid_utf8()` +#' @keywords internal +invalid_utf8 <- function(x) { + which(!is.na(x) & is.na(iconv(x, "UTF-8", "UTF-8"))) +} + +#' Drop-in replacement for `xfun::write_utf8()` +#' @keywords internal +write_utf8 <- function(text, con, ...) { + withr::local_options(encoding = "native.enc") + writeLines(enc2utf8(text), con, ..., useBytes = TRUE) +} diff --git a/R/nest.R b/R/nest.R index 76b44a35a..f56cdc6e3 100644 --- a/R/nest.R +++ b/R/nest.R @@ -1,31 +1,213 @@ #' Obtain a nested parse table from a character vector #' #' Parses `text` to a flat parse table and subsequently changes its -#' representation into a nested parse table with -#' [nest_parse_data()]. -#' @param text A character vector to parse. +#' representation into a nested parse table with [nest_parse_data()]. +#' @inheritParams text_to_flat_pd #' @return A nested parse table. See [tokenize()] for details on the columns #' of the parse table. -#' @importFrom purrr when -compute_parse_data_nested <- function(text) { - parse_data <- tokenize(text) %>% - add_terminal_token_before() %>% - add_terminal_token_after() - +#' @examples +#' code <- " +#' ab <- 1L # some comment +#' abcdef <- 2L +#' " +#' writeLines(code) +#' compute_parse_data_nested(code) +#' @export +compute_parse_data_nested <- function(text, + transformers = tidyverse_style(), + more_specs = NULL) { + parse_data <- text_to_flat_pd(text, transformers, more_specs = more_specs) + env_add_stylerignore(parse_data) parse_data$child <- rep(list(NULL), length(parse_data$text)) pd_nested <- parse_data %>% nest_parse_data() %>% flatten_operators() %>% - when(any(parse_data$token == "EQ_ASSIGN") ~ relocate_eq_assign(.), ~.) + add_cache_block() pd_nested } +#' Creates a flat parse table with minimal initialization +#' +#' Creates a flat parse table with minimal initialization and makes the parse +#' table shallow where appropriate. +#' @details +#' This includes: +#' +#' * token before and after. +#' * stylerignore attribute. +#' * caching attributes. +#' @inheritParams tokenize +#' @inheritParams add_attributes_caching +#' @details +#' Note that the parse table might be shallow if caching is enabled and some +#' values are cached. +#' @keywords internal +text_to_flat_pd <- function(text, transformers, more_specs) { + tokenize(text) %>% + add_terminal_token_before() %>% + add_terminal_token_after() %>% + add_stylerignore() %>% + add_attributes_caching(transformers, more_specs = more_specs) %>% + shallowify() +} + +#' Add the block id to a parse table +#' +#' Must be after [nest_parse_data()] because requires a nested parse table as +#' input. +#' @param pd_nested A top-level nest. +#' @keywords internal +add_cache_block <- function(pd_nested) { + if (cache_is_activated()) { + pd_nested$block <- cache_find_block(pd_nested) + } else { + pd_nested$block <- rep(1L, length(pd_nested$block)) + } + pd_nested +} + + +#' Shallowify the parse table +#' +#' Cached expressions don't need to be transformed with `transformers` in +#' [parse_transform_serialize_r_block()], we simply return `text` for the +#' top-level token. +#' @details +#' Expressions that are cached are already styled correctly. We can make the +#' parse table shallow at these locations, fully relying on the `text` column: +#' +#' * remove all children, as they are not needed anymore. +#' * mark the expression as a terminal. +#' +#' @section Top-level comments: +#' Note that we do not cache top-level comments. Because package code has a lot +#' of roxygen comments and each of them is a top-level expression, checking is +#' very expensive. More expensive than styling, because comments are always +#' terminals. This will also yield large speed improvements in +#' [compute_parse_data_nested()] because nesting is expensive and will not be +#' done for cached expressions. +#' @section Implementation: +#' Because the structure of the parse table is not always "top-level expression +#' first, then children", this function creates a temporary parse table that has +#' this property and then extract the ids and subset the original parse table so +#' it is shallow in the right places. +#' @keywords internal +shallowify <- function(pd) { + if (cache_is_activated()) { + order <- order(pd$line1, pd$col1, -pd$line2, -pd$col2, as.integer(pd$terminal)) + pd_parent_first <- vec_slice(pd, order) + pd_parent_first_split <- vec_split(pd_parent_first, cumsum(pd_parent_first$parent == 0L)) + pos_ids_to_keep <- pd_parent_first_split[[2L]] %>% + map(find_pos_id_to_keep) %>% + unlist(use.names = FALSE) + shallow <- vec_slice(pd, pd$pos_id %in% pos_ids_to_keep) + shallow$terminal[shallow$is_cached] <- TRUE + # all cached expressions need to be marked as terminals because to + # [apply_stylerignore()], we rely on terminals only. + shallow + } else { + pd + } +} + +#' Find the pos ids to keep +#' +#' To make a parse table shallow, we must know which ids to keep. +#' `split(cumsum(pd_parent_first$parent == 0L))` above puts comments with +#' negative parents in the same block as proceeding expressions (but also with +#' positive). +#' `find_pos_id_to_keep()` must hence always keep negative comments. We did not +#' use `split(cumsum(pd_parent_first$parent < 1L))` because then every top-level +#' comment is an expression on its own and processing takes much longer for +#' typical roxygen annotated code. +#' @param pd A temporary top-level nest where the first expression is always a +#' top-level expression, potentially cached. +#' @details +#' Note that top-level comments **above** code have negative parents +#' (the negative value of the parent of the code expression that follows after, +#' another comment might be in the way though), all comments that are not top +#' level have positive ids. All comments for which no code follows afterwards +#' have parent 0. +#' @examples +#' styler:::get_parse_data(c("#", "1")) +#' styler:::get_parse_data(c("c(#", "1)")) +#' styler:::get_parse_data(c("", "c(#", "1)", "#")) +#' @keywords internal +find_pos_id_to_keep <- function(pd) { + if (pd$is_cached[1L]) { + pd$pos_id[pd$parent <= 0L] + } else { + pd$pos_id + } +} + + +#' Turn off styling for parts of the code +#' +#' Using stylerignore markers, you can temporarily turn off styler. Beware that +#' for `styler > 1.2.0`, some alignment is +#' [detected by styler](https://styler.r-lib.org/articles/detect-alignment.html), +#' making stylerignore redundant. See a few illustrative examples below. +#' @details +#' Styling is on for all lines by default when you run styler. +#' +#' - To mark the start of a sequence where you want to turn styling off, use +#' `# styler: off`. +#' - To mark the end of this sequence, put `# styler: on` in your code. After +#' that line, styler will again format your code. +#' - To ignore an inline statement (i.e. just one line), place `# styler: off` +#' at the end of the line. +#' To use something else as start and stop markers, set the R options +#' `styler.ignore_start` and +#' `styler.ignore_stop` using [options()]. For styler version > 1.6.2, the +#' option supports character vectors longer than one and the marker are not +#' exactly matched, but using a regular expression, which means you can have +#' multiple marker on one line, e.g. `# nolint start styler: off`. +# nolint end +#' @name stylerignore +#' @examples +#' # as long as the order of the markers is correct, the lines are ignored. +#' style_text( +#' " +#' 1+1 +#' # styler: off +#' 1+1 +#' # styler: on +#' 1+1 +#' " +#' ) +#' +#' # if there is a stop marker before a start marker, styler won't be able +#' # to figure out which lines you want to ignore and won't ignore anything, +#' # issuing a warning. +#' \dontrun{ +#' style_text( +#' " +#' 1+1 +#' # styler: off +#' 1+1 +#' # styler: off +#' 1+1 +#' " +#' ) +#' } +#' # some alignment of code is detected, so you don't need to use stylerignore +#' style_text( +#' "call( +#' xyz = 3, +#' x = 11 +#' )" +#' ) +NULL + + #' Enhance the mapping of text to the token "SPECIAL" #' #' Map text corresponding to the token "SPECIAL" to a (more) unique token -#' description. +#' description. #' @param pd A parse table. +#' @keywords internal enhance_mapping_special <- function(pd) { pipes <- pd$token == "SPECIAL" & pd$text == "%>%" pd$token[pipes] <- special_and("PIPE") @@ -47,37 +229,68 @@ special_and <- function(text) { #' #' @param pd_flat A flat parse table. #' @name add_token_terminal +#' @keywords internal NULL #' @rdname add_token_terminal +#' @keywords internal add_terminal_token_after <- function(pd_flat) { terminals <- pd_flat %>% filter(terminal) %>% - arrange(pos_id) + arrange_pos_id() + + rhs <- new_styler_df( + list( + pos_id = terminals$pos_id, + token_after = lead(terminals$token, default = "") + ) + ) - data_frame(pos_id = terminals$pos_id, token_after = lead(terminals$token, default = "")) %>% - left_join(pd_flat, ., by = "pos_id") + left_join(pd_flat, rhs, by = "pos_id") } #' @rdname add_token_terminal +#' @keywords internal add_terminal_token_before <- function(pd_flat) { terminals <- pd_flat %>% filter(terminal) %>% - arrange(pos_id) + arrange_pos_id() - data_frame( - id = terminals$id, - token_before = lag(terminals$token, default = "") - ) %>% - left_join(pd_flat, ., by = "id") + rhs <- new_styler_df( + list( + id = terminals$id, + token_before = lag(terminals$token, default = "") + ) + ) + + left_join(pd_flat, rhs, by = "id") } -#' @describeIn add_token_terminal Removes column `terimnal_token_before`. Might -#' be used to prevent the use of invalidated information, e.g. if tokens were -#' added to the nested parse table. -remove_terminal_token_before_and_after <- function(pd_flat) { - pd_flat$token_before <- NULL - pd_flat$token_after <- NULL + +#' Initialise variables related to caching +#' +#' Note that this does function must be called in [compute_parse_data_nested()] +#' and we cannot wait to initialize this attribute until [apply_transformers()], +#' where all other attributes are initialized with +#' [default_style_guide_attributes()] (when using [tidyverse_style()]) because +#' for cached code, we don't build up the nested structure and leave it shallow +#' (to speed up things), see also [shallowify()]. +#' @inheritParams is_cached +#' @describeIn add_token_terminal Initializes `newlines` and `lag_newlines`. +#' @keywords internal +add_attributes_caching <- function(pd_flat, transformers, more_specs) { + pd_flat$block <- rep(NA, nrow(pd_flat)) + pd_flat$is_cached <- rep(FALSE, nrow(pd_flat)) + if (cache_is_activated()) { + is_parent <- pd_flat$parent == 0L + pd_flat$is_cached[is_parent] <- map_lgl( + pd_flat$text[pd_flat$parent == 0L], + is_cached, transformers, + more_specs = more_specs + ) + is_comment <- pd_flat$token == "COMMENT" + pd_flat$is_cached[is_comment] <- rep(FALSE, sum(is_comment)) + } pd_flat } @@ -90,69 +303,74 @@ remove_terminal_token_before_and_after <- function(pd_flat) { #' @return An integer vector of length spaces_after_prefix, which is either #' one (if `force_one = TRUE`) or `space_after_prefix` with all values #' below one set to one. +#' @return +#' Numeric vector indicating the number of spaces. +#' @keywords internal set_spaces <- function(spaces_after_prefix, force_one) { if (force_one) { - n_of_spaces <- rep(1, length(spaces_after_prefix)) + rep(1L, length(spaces_after_prefix)) } else { - n_of_spaces <- pmax(spaces_after_prefix, 1L) + pmax(spaces_after_prefix, 1L) } - n_of_spaces } #' Nest a flat parse table #' #' `nest_parse_data` groups `pd_flat` into a parse table with tokens that are -#' a parent to other tokens (called internal) and such that are not (called -#' child). Then, the token in child are joined to their parents in internal -#' and all token information of the children is nested into a column "child". -#' This is done recursively until we are only left with a nested tibble that -#' contains one row: The nested parse table. +#' a parent to other tokens (called internal) and such that are not (called +#' child). Then, the token in child are joined to their parents in internal +#' and all token information of the children is nested into a column "child". +#' This is done recursively until we are only left with a nested data frame that +#' contains one row: The nested parse table. #' @param pd_flat A flat parse table including both terminals and non-terminals. #' @seealso [compute_parse_data_nested()] #' @return A nested parse table. -#' @importFrom purrr map2 +#' @keywords internal nest_parse_data <- function(pd_flat) { - if (all(pd_flat$parent <= 0)) return(pd_flat) - pd_flat$internal <- with(pd_flat, (id %in% parent) | (parent <= 0)) - split_data <- split(pd_flat, pd_flat$internal) - - child <- split_data$`FALSE` - internal <- split_data$`TRUE` - - internal$internal_child <- internal$child - internal$child <- NULL - - child$parent_ <- child$parent - joined <- - child %>% - nest_(., "child", setdiff(names(.), "parent_")) %>% - left_join(internal, ., by = c("id" = "parent_")) - nested <- joined - nested$child <- map2(nested$child, nested$internal_child, combine_children) - nested <- nested[, setdiff(names(nested), "internal_child")] - nest_parse_data(nested) + repeat { + if (all(pd_flat$parent <= 0L)) { + return(pd_flat) + } + pd_flat$internal <- with(pd_flat, (id %in% parent) | (parent <= 0L)) + + child <- vec_slice(pd_flat, !pd_flat$internal) + internal <- vec_slice(pd_flat, pd_flat$internal) + + internal$internal_child <- internal$child + internal$child <- NULL + + child$parent_ <- child$parent + + rhs <- nest_(child, "child", setdiff(names(child), "parent_")) + + nested <- left_join(internal, rhs, by = c("id" = "parent_")) + + children <- nested$child + for (i in seq_along(children)) { + new <- combine_children(children[[i]], nested$internal_child[[i]]) + # Work around is.null(new) + children[i] <- list(new) + } + nested$child <- children + nested$internal_child <- NULL + pd_flat <- nested + } } #' Combine child and internal child #' #' Binds two parse tables together and arranges them so that the tokens are in -#' the correct order. +#' the correct order. #' @param child A parse table or `NULL`. #' @param internal_child A parse table or `NULL`. -#' @details Essentially, this is a wrapper around [dplyr::bind_rows()], but -#' returns `NULL` if the result of [dplyr::bind_rows()] is a data frame with +#' @details Essentially, this is a wrapper around vctrs::vec_rbind()], but +#' returns `NULL` if the result of vctrs::vec_rbind()] is a data frame with #' zero rows. +#' @keywords internal combine_children <- function(child, internal_child) { - bound <- bind_rows(child, internal_child) - if (nrow(bound) == 0) return(NULL) - bound[order(bound$pos_id), ] -} - -#' Get the start right -#' -#' On what line does the first token occur? -#' @param pd_nested A nested parse table. -#' @return The line number on which the first token occurs. -find_start_line <- function(pd_nested) { - pd_nested$line1[1] + bound <- vec_rbind(child, internal_child) + if (nrow(bound) == 0L) { + return(NULL) + } + vec_slice(bound, order(bound$pos_id)) } diff --git a/R/nested_to_tree.R b/R/nested-to-tree.R similarity index 53% rename from R/nested_to_tree.R rename to R/nested-to-tree.R index 56da7fdef..c51445140 100644 --- a/R/nested_to_tree.R +++ b/R/nested-to-tree.R @@ -4,36 +4,56 @@ #' @param text A character vector. #' @inheritParams create_node_from_nested_root #' @return A data frame. -#' @importFrom purrr when +#' @keywords internal create_tree <- function(text, structure_only = FALSE) { - compute_parse_data_nested(text) %>% - pre_visit(c(default_style_guide_attributes)) %>% + compute_parse_data_nested(text, transformers = NULL) %>% + pre_visit_one(default_style_guide_attributes) %>% + create_tree_from_pd_with_default_style_attributes(structure_only) +} + +create_tree_from_pd_with_default_style_attributes <- function(pd, + structure_only = FALSE) { + pd %>% create_node_from_nested_root(structure_only) %>% + # don't use `styler_df()` here; `vctrs::data_frame()` only accepts a vector, not a object as.data.frame() } -#' Convert a nested tibble into a node tree + +#' Convert a nested data frame into a node tree #' -#' This function is convenient to display all nesting levels of a nested tibble -#' at once. -#' @param pd_nested A nested tibble. +#' This function is convenient to display all nesting levels of a nested data frame +#' at once. +#' @param pd_nested A nested data frame. #' @param structure_only Whether or not create a tree that represents the #' structure of the expression without any information on the tokens. Useful #' to check whether two structures are identical. #' @return An object of class "Node" and "R6". #' @examples -#' if (getRversion() >= 3.2) { -#' code <- "a <- function(x) { if(x > 1) { 1+1 } else {x} }" -#' nested_pd <- styler:::compute_parse_data_nested(code) -#' initialized <- styler:::pre_visit(nested_pd, c(default_style_guide_attributes)) -#' styler:::create_node_from_nested_root(initialized, structure_only = FALSE) +#' if (rlang::is_installed("data.tree")) { +#' withr::with_options( +#' list(styler.cache_name = NULL), # temporarily deactivate cache +#' { +#' code <- "a <- function(x) { if(x > 1) { 1+1 } else {x} }" +#' nested_pd <- compute_parse_data_nested(code) +#' initialized <- styler:::pre_visit_one( +#' nested_pd, default_style_guide_attributes +#' ) +#' styler:::create_node_from_nested_root(initialized, +#' structure_only = FALSE +#' ) +#' } +#' ) #' } +#' @keywords internal create_node_from_nested_root <- function(pd_nested, structure_only) { - if (getRversion() < 3.2) stop_insufficient_r_version() - n <- data.tree::Node$new(ifelse( - structure_only, "Hierarchical structure", + check_installed("data.tree") + name <- if (structure_only) { + "Hierarchical structure" + } else { "ROOT (token: short_text [lag_newlines/spaces] {pos_id})" - )) + } + n <- data.tree::Node$new(name) create_node_from_nested(pd_nested, n, structure_only) n } @@ -41,7 +61,7 @@ create_node_from_nested_root <- function(pd_nested, structure_only) { #' #' @inheritParams create_node_from_nested_root #' @param parent The parent of the node to be created. -#' @importFrom purrr map2 map +#' @keywords internal create_node_from_nested <- function(pd_nested, parent, structure_only) { if (is.null(pd_nested)) { return() @@ -57,7 +77,9 @@ create_node_from_nested <- function(pd_nested, parent, structure_only) { } create_node_info <- function(pd_nested, structure_only) { - if (structure_only) return(seq2(1L, nrow(pd_nested))) + if (structure_only) { + return(seq2(1L, nrow(pd_nested))) + } paste0( pd_nested$token, ": ", pd_nested$short, " [", diff --git a/R/parse.R b/R/parse.R index 60394e49d..6beadd942 100644 --- a/R/parse.R +++ b/R/parse.R @@ -1,3 +1,58 @@ +#' Save parsing from text +#' +#' Parses text safely, i.e. throws an informative error if EOL style does not +#' match LF or indicates the exact position where the parsing failed. Note +#' that we can only detect wrong EOL style if it occurs on the first line +#' already. +#' @param text Text to parse. +#' @param ... Parameters passed to [base::parse()]. +#' @keywords internal +#' @examples +#' try(styler:::parse_safely("a + 3 -4 -> x\r\n glück + 1")) +#' # This cannot be detected as a EOL style problem because the first +#' # line ends as expected with \n +#' try(styler:::parse_safely("a + 3 -4 -> x\nx + 2\r\n glück + 1")) +#' +#' styler:::parse_safely("a + 3 -4 -> \n glück + 1") +parse_safely <- function(text, ...) { + tried_parsing <- rlang::try_fetch( + parse(text = text, ...), + error = function(e) e, + warning = function(w) w + ) + if (inherits(tried_parsing, "error")) { + if (has_crlf_as_first_line_sep(tried_parsing$message, text)) { + abort(paste0( + "The code to style seems to use Windows style line endings (CRLF). ", + "styler currently only supports Unix style line endings (LF). ", + "Please change the EOL character in your editor to Unix style and try ", + "again.\nThe parsing error was:\n", tried_parsing$message + )) + } else { + abort(tried_parsing$message) + } + } else if (inherits(tried_parsing, "warning")) { + warn(tried_parsing$message) + } + tried_parsing +} + +#' Check if a string uses CRLF EOLs +#' +#' @param message A message returned with `tryCatch()`. +#' @param initial_text The initial text to style. +#' @keywords internal +has_crlf_as_first_line_sep <- function(message, initial_text) { + split <- strsplit(message, ":", fixed = TRUE)[[1L]] + if (length(split) > 1L && split[1L] == "") { + start_char <- as.numeric(split[3L]) + offending_line <- initial_text[as.integer(split[2L])] + if (!is.na(offending_line) && substr(offending_line, start_char, start_char + 1L) == "\r\n") { + return(TRUE) + } + } + FALSE +} #' Obtain token table from text #' #' [utils::getParseData()] is used to obtain a flat parse table from `text`. @@ -9,69 +64,142 @@ #' * A column "pos_id" for (positional id) which can be used for sorting #' (because "id" cannot be used in general). Note that the nth value of this #' column corresponds to n as long as no tokens are inserted. -#' * A column "child" that contains the nested subtibbles. +#' * A column "child" that contains *nest*s. #' -#' @param text A character vector. +#' @inheritParams get_parse_data #' @return A flat parse table -#' @importFrom rlang seq2 +#' +#' @keywords internal tokenize <- function(text) { - get_parse_data(text, include_text = NA) %>% - verify_str_txt(text) %>% + get_parse_data(text, include_text = TRUE) %>% + ensure_correct_txt(text) %>% enhance_mapping_special() } #' Obtain robust parse data #' #' Wrapper around `utils::getParseData(parse(text = text))` that returns a flat -#' parse table. +#' parse table. When caching information should be added, make sure that +#' the cache is activated with `cache_activate()` and both `transformers` and +#' `cache_dir` are non-`NULL`. #' @param text The text to parse. #' @param include_text Passed to [utils::getParseData()] as `includeText`. #' @param ... Other arguments passed to [utils::getParseData()]. +#' @keywords internal get_parse_data <- function(text, include_text = TRUE, ...) { # avoid https://bugs.r-project.org/bugzilla3/show_bug.cgi?id=16041 - parse(text = text, keep.source = TRUE) - parsed <- parse(text = text, keep.source = TRUE) - as_tibble(utils::getParseData(parsed, includeText = include_text)) %>% + parse_safely(text, keep.source = TRUE) + parsed <- parse_safely(text, keep.source = TRUE) + pd <- utils::getParseData(parsed, includeText = include_text) %>% + styler_df() + if (getRversion() < "4.2") { + is_unicode_parsing_error <- grepl("^\"\"$", pd$text) + if (any(is_unicode_parsing_error)) { + rlang::abort(paste0( + "Can't parse input due to unicode restriction in base R. Please ", + "upgrade R to >= 4.2 to style this input. ", + "Context: https://github.com/r-lib/styler/issues/847" + )) + } + } + pd <- pd %>% add_id_and_short() + + pd } #' Add column `pos_id` and `short` #' #' Adds column `pos_id` and `short` to a flat parse table. #' @param pd A flat parse table +#' @keywords internal add_id_and_short <- function(pd) { pd$pos_id <- seq2(1L, nrow(pd)) - pd$short <- substr(pd$text, 1, 5) + pd$short <- substr(pd$text, 1L, 5L) pd } -#' Verify the text of strings +#' Ensure a correct `text` of all strings and numeric constants #' -#' Make sure `text` of the tokens `STR_CONST` is correct and adapt if necessary. -#' We first parse `text` again and include also non-terminal text. Then, we -#' replace offending `text` in the terminal expressions with the text of their -#' parents. -#' @param pd_with_terminal_text A parse table. -#' @param text The text from which `pd_with_terminal_text` was created. Needed -#' for potential reparsing. -verify_str_txt <- function(pd_with_terminal_text, text) { - string_ind <- pd_with_terminal_text$token == "STR_CONST" - strings <- pd_with_terminal_text[string_ind, ] - parent_of_strings_ind <- pd_with_terminal_text$id %in% strings$parent - other_ind <- !(string_ind | parent_of_strings_ind) - if (nrow(strings) == 0 || !any(substr(strings$text, 1, 1) == "[")) { - return(pd_with_terminal_text) +#' Make sure `text` of the tokens `STR_CONST` and `NUM_CONST` is correct and +#' adapt if necessary. We replace offending `text` in the terminal expressions +#' with the text of their parents if their line / col position matches and +#' return an error otherwise. +#' @param pd A parse table. +#' @keywords internal +ensure_correct_txt <- function(pd, text) { + is_problematic_text <- magrittr::or( + is_insufficiently_parsed_string(pd), + is_insufficiently_parsed_number(pd) + ) + if (!any(is_problematic_text)) { + return(pd) } + problematic_text <- vec_slice(pd, is_problematic_text) + is_parent_of_problematic_string <- pd$id %in% problematic_text$parent + + is_unaffected_token <- !magrittr::or( + is_problematic_text, is_parent_of_problematic_string + ) + pd_with_all_text <- get_parse_data(text, include_text = TRUE) - parent_of_strings <- pd_with_all_text[parent_of_strings_ind, c("id", "text", "short")] - strings$text <- NULL - strings$short <- NULL - new_strings <- merge(strings, parent_of_strings, by.x = "parent", by.y = "id") - bind_rows( - new_strings, - pd_with_terminal_text[other_ind, ], - pd_with_terminal_text[parent_of_strings_ind, ] + parent_cols_for_merge <- c("id", "text", "short", line_col_names()) + parent_of_problematic_text <- + pd_with_all_text[is_parent_of_problematic_string, parent_cols_for_merge] + problematic_text$text <- NULL + problematic_text$short <- NULL + new_text <- merge(problematic_text, parent_of_problematic_text, + by.x = "parent", + by.y = "id", + suffixes = c("", "parent") + ) %>% + styler_df() + + if (!lines_and_cols_match(new_text)) { + abort(paste( + "Error in styler:::ensure_correct_txt()." + ), .internal = TRUE) + } + names_to_keep <- setdiff( + names(new_text), + paste0(line_col_names(), "parent") + ) + vec_rbind( + new_text[, names_to_keep], + vec_slice(pd, is_unaffected_token), + vec_slice(pd, is_parent_of_problematic_string) ) %>% - arrange(pos_id) + arrange_pos_id() +} + + +#' Identify strings that were not fully parsed +#' +#' Identifies strings that were not fully parsed due to their vast length. +#' @details +#' The meaning of the variable `is_problematic_string` in the source code +#' changes from "all strings" to "all problematic strings", is partly +#' misleading and this approach was chosen for performance reasons only. +#' @param pd A parse table. +#' @param text The initial code to style. +#' @keywords internal +is_insufficiently_parsed_string <- function(pd) { + grepl("^\\[", pd$text) & pd$token == "STR_CONST" +} + +is_insufficiently_parsed_number <- function(pd) { + grepl("^0x", pd$text) & pd$token == "NUM_CONST" +} + +#' Check whether columns match +#' @keywords internal +#' @noRd +lines_and_cols_match <- function(data) { + left <- paste0(line_col_names(), "") + right <- paste0(line_col_names(), "parent") + identical( + unlist(data[left], use.names = FALSE), + unlist(data[right], use.names = FALSE) + ) } diff --git a/R/reindent.R b/R/reindent.R index db140bd12..fa78aa840 100644 --- a/R/reindent.R +++ b/R/reindent.R @@ -1,70 +1,3 @@ - -#' Update the indention reference -#' -#' @param pd_nested A nested parse table. -#' @name update_indention_ref -NULL - - -#' @describeIn update_indention_ref Updates the reference pos_id for all -#' tokens in `pd_nested` if `pd_nested` contains a function call. Tokens that -#' start on the same line as the opening parenthesis, are not themselves -#' function calls or expressions wrapped in curly brackets are re-indented, -#' that is, they are indented up to the level at which the call ends in -#' terms of col2. We need to take the last from the first child because calls -#' like package::function() can have three elements. -#' @examples -#' \dontrun{ -#' # not re-indented -#' call(call( -#' xyz -#' )) -#' # re-indented -#' call(call(1, -#' 2)) -#' } -#' @importFrom purrr map_lgl -#' @importFrom rlang seq2 -update_indention_ref_fun_call <- function(pd_nested) { - current_is_call <- pd_nested$token_before[2] %in% c("SYMBOL_FUNCTION_CALL") - non_comment <- which(pd_nested$token != "COMMENT") - first_non_comment_after_call <- non_comment[non_comment > 2][1] - if ((current_is_call) && - pd_nested$lag_newlines[first_non_comment_after_call] == 0) { - candidates <- seq2(3, nrow(pd_nested) - 1) - - child_is_call <- map_lgl(pd_nested$child, is_function_call) - child_is_curly_expr <- map_lgl(pd_nested$child, is_curly_expr) - child_is_on_same_line <- cumsum(pd_nested$lag_newlines) == 0 - call_on_same_line <- child_is_call & child_is_on_same_line - to_indent <- setdiff(candidates, which(call_on_same_line | child_is_curly_expr)) - - pd_nested$indention_ref_pos_id[to_indent] <- last(pd_nested$child[[1]]$pos_id) - } - pd_nested -} - -#' @describeIn update_indention_ref Updates the reference pos_id for all -#' tokens in `pd_nested` if `pd_nested` contains a function declaration. -#' Tokens inside a function declaration are are re-indented, -#' that is, they are indented up to the level at which the token FUNCTION -#' ends in terms of col2. -#' @examples -#' \dontrun{ -#' a <- function(x, -#' y) { -#' x + y -#' } -#' } -#' @importFrom rlang seq2 -update_indention_ref_fun_dec <- function(pd_nested) { - if (pd_nested$token[1] == "FUNCTION") { - seq <- seq2(3, nrow(pd_nested) - 1) - pd_nested$indention_ref_pos_id[seq] <- pd_nested$pos_id[2] - } - pd_nested -} - #' Apply reference indention to tokens #' #' Applies the reference indention created with functions @@ -72,12 +5,15 @@ update_indention_ref_fun_dec <- function(pd_nested) { #' is applied to all token that inherit from a reference token sequentially, #' i.e. by looping over the target tokens. #' @inheritParams apply_ref_indention_one +#' @keywords internal apply_ref_indention <- function(flattened_pd) { - target_tokens <- which(flattened_pd$pos_id %in% flattened_pd$indention_ref_pos_id) - flattened_pd <- reduce( - target_tokens, + target_tokens <- which( + flattened_pd$pos_id %in% flattened_pd$indention_ref_pos_id + ) + flattened_pd <- Reduce( apply_ref_indention_one, - .init = flattened_pd + target_tokens, + init = flattened_pd ) flattened_pd } @@ -91,16 +27,12 @@ apply_ref_indention <- function(flattened_pd) { #' @param flattened_pd A flattened parse table #' @param target_token The index of the token from which the indention level #' should be applied to other tokens. +#' @keywords internal apply_ref_indention_one <- function(flattened_pd, target_token) { - token_points_to_ref <- - flattened_pd$indention_ref_pos_id == flattened_pd$pos_id[target_token] - first_token_on_line <- flattened_pd$lag_newlines > 0L - token_to_update <- which(token_points_to_ref & first_token_on_line) - - # udate spaces + token_to_update <- find_tokens_to_update(flattened_pd, target_token) + # update spaces copied_spaces <- flattened_pd$col2[target_token] - old_spaces <- flattened_pd$lag_spaces[token_to_update[1]] - shift <- copied_spaces - old_spaces + shift <- copied_spaces flattened_pd$lag_spaces[token_to_update] <- flattened_pd$lag_spaces[token_to_update] + shift @@ -111,7 +43,31 @@ apply_ref_indention_one <- function(flattened_pd, target_token) { flattened_pd } - +#' Find the tokens to update when applying a reference indention +#' +#' Given a target token and a flattened parse table, the token for which the +#' spacing information needs to be updated are computed. Since indention is +#' already embedded in the column `lag_spaces`, only tokens at the beginning of +#' a line are of concern. +#' @param flattened_pd A flattened parse table. +#' @inheritParams apply_ref_indention_one +#' @seealso apply_ref_indention_one() +#' @examples +#' style_text("function(a = +#' b, +#' dd +#' ) {}", scope = "indention") +#' style_text("function(a, +#' b, +#' dd +#' ) {}", scope = "indention") +#' @keywords internal +find_tokens_to_update <- function(flattened_pd, target_token) { + token_points_to_ref <- + flattened_pd$indention_ref_pos_id == flattened_pd$pos_id[target_token] + first_token_on_line <- flattened_pd$lag_newlines > 0L + which(token_points_to_ref & first_token_on_line) +} #' Set indention of tokens that match regex @@ -120,26 +76,28 @@ apply_ref_indention_one <- function(flattened_pd, target_token) { #' expression pattern to be a certain amount of spaces. The rule #' is only active for the first tokens on a line. #' @param flattened_pd A flattened parse table. -#' @param pattern A character with regular expressions to match against the token -#' in `flattened_pd`. +#' @param pattern A character with regular expressions to match against the +#' token in `flattened_pd`. #' @param target_indention The desired level of indention of the tokens that #' match `pattern`. #' @param comments_only Boolean indicating whether only comments should be #' checked or all tokens. #' @return A flattened parse table with indention set to `target_indention` for #' the tokens that match `regex.` -#' @importFrom purrr map flatten_int +#' @keywords internal set_regex_indention <- function(flattened_pd, pattern, - target_indention = 0, + target_indention = 0L, comments_only = TRUE) { if (comments_only) { cond <- which( - (flattened_pd$token == "COMMENT") & (flattened_pd$lag_newlines > 0) + (flattened_pd$token == "COMMENT") & (flattened_pd$lag_newlines > 0L) ) - if (length(cond) < 1) return(flattened_pd) - to_check <- flattened_pd[cond, ] - not_to_check <- flattened_pd[-cond, ] + if (length(cond) < 1L) { + return(flattened_pd) + } + to_check <- vec_slice(flattened_pd, cond) + not_to_check <- vec_slice(flattened_pd, -cond) } else { to_check <- flattened_pd not_to_check <- NULL @@ -150,6 +108,6 @@ set_regex_indention <- function(flattened_pd, flatten_int() to_check$lag_spaces[indices_to_force] <- target_indention - bind_rows(to_check, not_to_check) %>% - arrange(pos_id) + vec_rbind(to_check, not_to_check) %>% + arrange_pos_id() } diff --git a/R/relevel.R b/R/relevel.R index 8ca6f18ed..c7e5c95cc 100644 --- a/R/relevel.R +++ b/R/relevel.R @@ -4,31 +4,31 @@ #' Flatten some token in the nested parse table based on operators #' #' Certain tokens are not placed optimally in the nested parse data with -#' [compute_parse_data_nested()]. For example, the token of arithmetic -#' operations 1 + 1 + 1 should all be on the same level of nesting since -#' the indention is the same for all but the first two terminals. Setting the -#' indention correctly is easier to achieve if they are put on the same level -#' of nesting. +#' [compute_parse_data_nested()]. For example, the token of arithmetic +#' operations 1 + 1 + 1 should all be on the same level of nesting since the +#' indention is the same for all but the first two terminals. Setting the +#' indention correctly is easier to achieve if they are put on the same level of +#' nesting. #' @param pd_nested A nested parse table to partially flatten. +#' @keywords internal flatten_operators <- function(pd_nested) { - pd_nested %>% - post_visit(c(flatten_operators_one)) + post_visit_one(pd_nested, flatten_operators_one) } #' Flatten one level of nesting with its child #' -#' Flattening is done in two ways. We can flatten a parse table by moving -#' the left hand token of an operator one level up. Or doing that with the -#' right hand token. +#' Flattening is done in two ways. We can flatten a parse table by moving the +#' left hand token of an operator one level up. Or doing that with the right +#' hand token. #' @param pd_nested A nested parse table. #' @include token-define.R +#' @keywords internal flatten_operators_one <- function(pd_nested) { - pd_token_left <- c(special_token, math_token, "'$'") - pd_token_right <- c(special_token, "LEFT_ASSIGN", "'+'", "'-'") - bound <- pd_nested %>% + pd_token_left <- c(special_token, "PIPE", math_token, "'$'") + pd_token_right <- c(special_token, "PIPE", "LEFT_ASSIGN", "EQ_ASSIGN", "'+'", "'-'", "'~'") + pd_nested %>% flatten_pd(pd_token_left, left = TRUE) %>% flatten_pd(pd_token_right, left = FALSE) - bound } @@ -46,12 +46,29 @@ flatten_operators_one <- function(pd_nested) { #' occur in the child in order to flatten the parse table. #' @param left Flag that indicates whether the parse table should be flattened #' from left or from right. +#' @keywords internal flatten_pd <- function(pd_nested, token, child_token = token, left = TRUE) { - token_pos <- which(pd_nested$token[-1] %in% token) + 1 - if (length(token_pos) == 0) return(pd_nested) - pos <- token_pos[if_else(left, 1, length(token_pos))] + if_else(left, -1L, 1L) - if (pos < 1) return(pd_nested) - if (!any(pd_nested$child[[pos]]$token[-1] %in% child_token)) return(pd_nested) + token_pos_candidates <- which(pd_nested$token[-1L] %in% token) + 1L + if (length(token_pos_candidates) == 0L) { + return(pd_nested) + } + token_pos_idx <- if (left) { + 1L + } else { + length(token_pos_candidates) + } + token_pos <- token_pos_candidates[token_pos_idx] + if (left) { + pos <- previous_non_comment(pd_nested, token_pos) + } else { + pos <- next_non_comment(pd_nested, token_pos) + } + if (pos < 1L) { + return(pd_nested) + } + if (!any(pd_nested$child[[pos]]$token[-1L] %in% child_token)) { + return(pd_nested) + } bind_with_child(pd_nested, pos) } @@ -61,11 +78,12 @@ flatten_pd <- function(pd_nested, token, child_token = token, left = TRUE) { #' according to the appearance of the tokens. #' @param pd_nested A nested parse table. #' @param pos The position of the child to bind. +#' @keywords internal bind_with_child <- function(pd_nested, pos) { pd_nested %>% - slice(-pos) %>% - bind_rows(pd_nested$child[[pos]]) %>% - arrange(pos_id) + vec_slice(-pos) %>% + vec_rbind(pd_nested$child[[pos]]) %>% + arrange_pos_id() } #' Wrap an expression into an expression @@ -73,138 +91,14 @@ bind_with_child <- function(pd_nested, pos) { #' Takes a parse table and wraps it in a new parse table that contains the #' expression as a child. #' @param pd A parse table. +#' @keywords internal wrap_expr_in_expr <- function(pd) { - expr <- create_tokens( + create_tokens( "expr", "", - pos_ids = create_pos_ids(pd, 1, after = FALSE), + pos_ids = create_pos_ids(pd, 1L, after = FALSE), child = pd, - terminal = FALSE + terminal = FALSE, + stylerignore = pd$stylerignore[1L], + indents = pd$indent[1L] ) } - - -# ____________________________________________________________________________ -# Relocate EQ_ASSIGN #### - -#' Relocate the expressions containing the token `EQ_ASSIGN` within the nested -#' parse table -#' -#' Although syntactically identical, [utils::getParseData()] does not produce -#' the same hierarchy of the parse table (parent and id relationship) for `<-` -#' and `=` (See 'Examples'). -#' This is considered to be a bug and causes problems because the -#' nested parse table constructed with [compute_parse_data_nested()] is not -#' consistent if `EQ_ASSIGN` occurs in the expression to style. In particular, -#' `EQ_ASSIGN` and the tokens to its left and right are located too high up in -#' the hierarchy of the nested parse data. Hence, this function wraps the -#' sub-expression into an expression, similar to [wrap_expr_in_curly()]. -#' Since `wrap_expr_in_curly()` is called from within a visitor -#' (and `relocate_eq_assign()` not), we need to -#' wrap the the implementation [relocate_eq_assign_nest()] that operates on -#' *nests* into a visitor call. -#' @param pd A parse table. -#' @examples -#' styler:::get_parse_data("a <- b <- 3") -#' styler:::get_parse_data("a = b = 3") -#' styler:::get_parse_data( -#' "x = 5 -#' if(x >= 5) -#' y = TRUE else -#' y = FALSE", -#' ) -#' styler:::get_parse_data( -#' "x <- 5 -#' if(x >= 5) -#' y <- TRUE else -#' y <- FALSE", -#' ) -relocate_eq_assign <- function(pd) { - pd %>% - post_visit(c(relocate_eq_assign_nest)) -} - - -#' Relocate all assignment expressions that contain `EQ_ASSIGN` within a *nest* -#' -#' Implements the relocation of an `EQ_ASSIGN` and associated tokens -#' within a *nest* (nested parse table at one level of nesting). -#' Note that one assignment expression (such as "a = b = c") can include -#' multiple assignment operators, an assignment involves just one assignment -#' operator. -#' For the relocation of assignment expressions that contain `EQ_ASSIGN` within -#' a *nest*, we need to first find the expressions that contain `=` and then -#' split the *nest* into parse tables each containing one such assignment -#' expression and then relocate each of them separately. -#' We can't do all of them together because: -#' -#' * An assignment can contain more than just three tokens, e.g. (a <- b <- c). -#' * Two assignments can be in the same nest although they don't belong to the -#' same assignment (if-else statement). -#' -#' Please refer to the section 'Examples' in [relocate_eq_assign()] for details. -#' @param pd A parse table. -#' @importFrom rlang seq2 -#' @importFrom purrr map_dfr -relocate_eq_assign_nest <- function(pd) { - idx_eq_assign <- which(pd$token == "EQ_ASSIGN") - if (length(idx_eq_assign) > 0) { - block_id <- find_block_id(pd) - blocks <- split(pd, block_id) - pd <- map_dfr(blocks, relocate_eq_assign_one) - } - pd -} - - -#' Find the block to which a token belongs -#' -#' Two assignment tokens `EQ_ASSIGN` belong to the same block if they are not -#' separated by more than one token. Token between `EQ_ASSIGN` tokens belong -#' to the `EQ_ASSIGN` token occurring before them, except the token right before -#' `EQ_ASSING` already belongs to the `EQ_ASSING` after it. -#' @param pd A parse table. -find_block_id <- function(pd) { - idx_eq_assign <- which(pd$token == "EQ_ASSIGN") - eq_belongs_to_block <- c(0, cumsum(diff(idx_eq_assign)) > 2) - - empty_seq <- rep(0, nrow(pd)) - empty_seq[idx_eq_assign - 1] <- eq_belongs_to_block - block_id <- cumsum(empty_seq) - block_id -} - -#' Relocate an assignment expression -#' -#' Relocates an assignment expression within a parse table containing one -#' assignment expression. Note that one assignment can include multiple -#' assignment operators such as "a = b = c". -#' @param pd A parse table with one assignment expression to relocate. -relocate_eq_assign_one <- function(pd) { - idx_eq_assign <- which(pd$token == "EQ_ASSIGN") - eq_ind <- seq2(idx_eq_assign[1] - 1L, last(idx_eq_assign) + 1L) - eq_expr <- pd[eq_ind, ] %>% - wrap_expr_in_expr() %>% - add_line_col_to_wrapped_expr() %>% - remove_attributes(c( - "multi_line", "indention_ref_pos_id", - "newlines", "indent", "spaces", "lag_newlines" - )) - eq_expr$id <- NA - eq_expr$parent <- NA - non_eq_expr <- pd[-eq_ind, ] - pd <- bind_rows(eq_expr, non_eq_expr) %>% - arrange(pos_id) - pd -} - -#' Adds line and col information to an expression from its child -#' -#' @param pd A parse table. -add_line_col_to_wrapped_expr <- function(pd) { - if (nrow(pd) > 1) stop("pd must be a wrapped expression that has one row.") - pd$line1 <- pd$child[[1]]$line1[1] - pd$line2 <- last(pd$child[[1]]$line2) - pd$col1 <- pd$child[[1]]$col1[1] - pd$col2 <- last(pd$child[[1]]$col2) - pd -} diff --git a/R/roxygen-examples-add-remove.R b/R/roxygen-examples-add-remove.R new file mode 100644 index 000000000..3a7ecb40b --- /dev/null +++ b/R/roxygen-examples-add-remove.R @@ -0,0 +1,62 @@ +#' Remove dont* mask +#' +#' @param roxygen Roxygen code examples that contains a dont* segment only. +#' @keywords internal +#' +remove_dont_mask <- function(roxygen) { + mask <- c( + 1L, 2L, if (roxygen[3L] == "\n") 3L, last(which(roxygen == "}")) + ) %>% sort() + list( + code = roxygen[-mask], mask = paste(roxygen[seq2(1L, 2L)], collapse = "") + ) +} + +remove_blank_lines <- function(code) { + code[code != "\n"] +} + +remove_roxygen_mask <- function(text) { + code_with_header <- gsub(pattern = "^#'\\s?", "", text) + remove_roxygen_header(code_with_header) +} + +#' Remove roxygen header +#' +#' Can't simply remove the element with the regex because it may happen that +#' the roxygen tag is on the same line as its contents start. +#' @examples +#' #' @examples c(1, 2) +#' @keywords internal +remove_roxygen_header <- function(text) { + gsub("^[\\s\t]*@examples(If)?(\\s|\t)*", "", text, perl = TRUE) +} + +#' Add the roxygen mask to code +#' +#' This function compares `text` with `initial_text` to make sure a mask is only +#' added to roxygen comments, not ordinary comments +#' @param text Character vector with code. +#' @param initial_text The roxygen code example to style with mask and +#' potentially ordinary comments. +#' @param example_type Either 'examples' or 'examplesIf'. +#' @keywords internal +add_roxygen_mask <- function(text, initial_text, example_type) { + space <- ifelse(text == "", "", " ") + out <- c( + paste0("#' @", example_type, space[1L], text[1L]), + map2_chr(space[-1L], text[-1L], ~ paste0("#'", .x, .y)) + ) + + ordinary_comment <- grep("^#[^']", initial_text, value = TRUE) + if (length(ordinary_comment) == 0L) { + return(out) + } + without_mask <- remove_roxygen_mask(out) + for (idx in seq_along(ordinary_comment)) { + to_replace <- which(ordinary_comment[idx] == without_mask)[1L] + out[to_replace] <- ordinary_comment[idx] + without_mask[to_replace] <- NA + } + out +} diff --git a/R/roxygen-examples-find.R b/R/roxygen-examples-find.R new file mode 100644 index 000000000..bf61983c9 --- /dev/null +++ b/R/roxygen-examples-find.R @@ -0,0 +1,71 @@ +#' Figure out where code examples start and stop +#' +#' Finds the sequence from start to stop of the lines in `text` that are +#' code examples in roxygen comments. +#' @param text A text consisting of code and/or roxygen comments. +#' @keywords internal +identify_start_to_stop_of_roxygen_examples_from_text <- function(text) { + starts <- grep("^#'(\\s|\t)*@examples(If\\s|\\s|\t|$)", text, perl = TRUE) + if (length(starts) < 1L) { + return(integer()) + } + stop_candidates <- which(magrittr::or( + # starts with code or a tag + grepl("(^[^#]|^#'[\\s\t]*@)", text, perl = TRUE), + # starts with a roxygen comment with a blank line after + grepl("^ *\t*$", text) & grepl("^#' *", lead(text)) + )) %>% + c(length(text) + 1L) # if ending with a roxygen example + stops <- map(starts, match_stop_to_start, stop_candidates) %>% + flatten_int() + if (length(stops) < 1L) { + return(integer()) + } + + map2(starts, stops, seq2) +} + +identify_start_to_stop_of_roxygen_examples <- function(path) { + content <- read_utf8_bare(path) + identify_start_to_stop_of_roxygen_examples_from_text(content) +} + +#' Match a stop candidate to a start +#' @param start An integer. +#' @param stop_candidates Potential stop candidates. +#' @examples +#' styler:::match_stop_to_start(1, c(3, 4, 5)) +#' @keywords internal +match_stop_to_start <- function(start, stop_candidates) { + is_stop_candidate <- stop_candidates > start + if (any(is_stop_candidate)) { + min(stop_candidates[is_stop_candidate]) - 1L + } else { + integer() + } +} + +#' Find `dontrun` and friend sequences +#' +#' Returns the indices of the lines that correspond to a `dontrun` or +#' friends sequence. +#' @param bare Bare code. +#' @keywords internal +find_dont_seqs <- function(bare) { + dont_openings <- which(bare %in% dont_keywords()) + dont_closings <- map_int(dont_openings + 1L, find_dont_closings, bare = bare) + map2(dont_openings, dont_closings, seq2) +} + +#' +find_dont_closings <- function(bare, dont_openings) { + opening <- cumsum(bare == "{") + closing <- cumsum(bare == "}") + diff <- opening - closing + level_dont <- diff[dont_openings] + match_closing <- intersect( + seq2(dont_openings + 1L, length(bare)), + which(diff == level_dont - 1L) + )[1L] + match_closing + 1L +} diff --git a/R/roxygen-examples-parse.R b/R/roxygen-examples-parse.R new file mode 100644 index 000000000..6383fa337 --- /dev/null +++ b/R/roxygen-examples-parse.R @@ -0,0 +1,184 @@ +#' Parse roxygen comments into text +#' +#' Used to parse roxygen code examples. Removes line break before +#' `\\dontrun{...}` and friends because it does not occur for segments other +#' than `\\dont{...}` and friends. +#' @param roxygen Roxygen comments. +#' @examples +#' styler:::parse_roxygen(c( +#' "#' @examples", +#' "#' 1+ 1" +#' )) +#' styler:::parse_roxygen(c( +#' "#' @examples 33", +#' "#'1+ 1" +#' )) +#' @keywords internal +parse_roxygen <- function(roxygen) { + emulated <- emulate_rd(roxygen) + connection <- textConnection(emulated$text) + had_warning <- FALSE + parsed <- withCallingHandlers( + { + parsed <- tools::parse_Rd(connection, fragment = TRUE) %>% + as.character(deparse = FALSE) + if (had_warning) { + roxygen_remove_extra_brace(parsed) + } else { + parsed + } + }, + warning = function(w) { + had_warning <<- TRUE + invokeRestart("muffleWarning") + } + ) + close(connection) + list(text = parsed, example_type = emulated$example_type) +} + +#' Fix [tools::parse_Rd()] output +#' +#' Since [tools::parse_Rd()] treats braces in quotes as literal braces when +#' determining brace symmetry, a brace might be added in error to the parsed +#' data (at the end). We'll remove one at the time, check if output is parsable +#' until no braces are left. If we end up with no braces left, we signal a +#' parsing error, otherwise, we return the initial (not parsable input due to +#' *dont* sequence) with the trailing braces removed. +#' @examples +#' styler:::parse_roxygen( +#' c( +#' "#' @examples", +#' "#' x <- '{'", +#' "#' \\dontrun{", +#' "#' fu(x = 3)", +#' "#' }" +#' ) +#' ) +#' styler:::parse_roxygen( +#' c( +#' "#' @examples", +#' "#' x <- '{'", +#' "#' \\dontrun{", +#' "#' c('{', \"'{{{\" ,\"[\")", +#' "#' }" +#' ) +#' ) +#' @keywords internal +roxygen_remove_extra_brace <- function(parsed) { + parsed <- rlang::try_fetch( + { + parse(text = paste0(gsub("^\\\\[[:alpha:]]*", "", parsed), collapse = "")) + parsed + }, + error = function(e) { + # might have extra braces that are not needed: try to remove them + + # if fails, you need initial input for best error message + parsed_ <- gsub("^\\\\[[:alpha:]]+", "", parsed) + worth_trying_to_remove_brace <- any(parsed == "}") + if (worth_trying_to_remove_brace) { + # try to remove one and see if you can parse. If not, another one, until + # you don't have any brace left. + + while (worth_trying_to_remove_brace) { + # remove brace + brace <- which(parsed == "}") + if (length(brace) > 0L) { + parsed <- parsed[-last(brace)] + } + linebreak <- which(parsed == "\n") + if (length(linebreak) > 0L) { + parsed <- parsed[-last(linebreak)] + } + # try if can be parsed (need remve dontrun) + worth_trying_to_remove_brace <- rlang::try_fetch( + { + # this will error informatively + parse(text = gsub("^\\\\[[:alpha:]]+", "", parsed)) + # if parsing succeeds, we can stop tryint to remove brace and move + # on with parsed + FALSE + }, + error = function(...) { + # continue if braces are left, otherwise give up + if (any(last(parsed) %in% c("}", "\n"))) { + TRUE + } else { + # this will error informatively. If not, outer loop will fail + # informatively + parse(text = gsub("^\\\\[[:alpha:]]+", "", parsed_)) + FALSE + } + } + ) + } + } else { + # this will error informatively + parse(text = gsub("^\\\\[[:alpha:]]*", "", parsed_)) + } + parsed + } + ) +} + +#' Convert roxygen comments to Rd code +#' +#' We leverage roxygen2 workhorse function [roxygen2::roc_proc_text()] if +#' our input contains character that have to be escaped. Since this is an +#' expensive operation, we opt out of it and perform a simple +#' `remove_roxygen_mask()` when there are no characters to escape. +#' @keywords internal +emulate_rd <- function(roxygen) { + example_type <- gsub( + "^#'(\\s|\t)*@examples(If)?(\\s|\t)*(.*)", "examples\\2", roxygen[1L] + ) + if (needs_rd_emulation(roxygen)) { + roxygen <- c( + "#' Example", + gsub( + "^#'(\\s|\t)*@examples(If)?(\\s|\t)*(.*)", "#' @examples \\4", roxygen + ), + "x <- 1" + ) + roxygen <- gsub("(^#)[^']", "#' #", roxygen) + + processed <- roxygen2::roc_proc_text( + roxygen2::rd_roclet(), + paste(roxygen, collapse = "\n") + ) + + text <- processed[[1L]]$get_section("examples") + text <- as.character(text)[-1L] + text <- c( + if (grepl("^#'(\\s|\t)*@examples(\\s|\t)*$", roxygen[2L])) "", + text + ) + } else { + text <- remove_roxygen_mask(roxygen) + } + list( + text = text, + example_type = example_type + ) +} + +#' Check if rd emulation is required with [roxygen2::roc_proc_text()] +#' @keywords internal +needs_rd_emulation <- function(roxygen) { + # escape characters \ and % count, but not macros like \dontrun + any(grepl("\\\\|%", gsub("^#'\\s*\\\\[[:alpha:]]*", "", roxygen))) +} + +#' Changing the line definition +#' +#' Input: New line denoted with `\\n`. Lines can span across elements. +#' Output: Each element in the vector is one line. +#' +#' @param raw Raw code to post-process. +#' @keywords internal +post_parse_roxygen <- function(raw) { + raw %>% + paste0(collapse = "") %>% + convert_newlines_to_linebreaks() +} diff --git a/R/roxygen-examples.R b/R/roxygen-examples.R new file mode 100644 index 000000000..242d422e1 --- /dev/null +++ b/R/roxygen-examples.R @@ -0,0 +1,149 @@ +#' Style a roxygen code example that may contain dontrun and friends +#' +#' Parses roxygen2 comments into code, breaks it into dont* (dontrun, donttest, +#' dontshow) and run sections and processes each segment individually using +#' [style_roxygen_example_snippet()]. +#' @inheritParams parse_transform_serialize_r +#' @param example Roxygen example code. +#' @inheritSection parse_transform_serialize_roxygen Hierarchy +#' @keywords internal +style_roxygen_code_example <- function(example, transformers, base_indention) { + example <- vec_split(example, cumsum(grepl("^#' *@examples", example))) + purrr::map( + example[[2L]], style_roxygen_code_example_one, + transformers = transformers, base_indention = base_indention + ) %>% + flatten_chr() +} + +#' Style a roxygen code example with exactly one `@example` or `@exampleIf` +#' @inheritParams style_roxygen_code_example +#' @param example_one A character vector, one element per line, that contains in +#' total at most one example tag. +#' @keywords internal +style_roxygen_code_example_one <- function(example_one, + transformers, + base_indention) { + # Workaround for imperfect parsing of roxygen2 examples + example_one <- example_one[example_one != ""] + + bare <- parse_roxygen(example_one) + one_dont <- vec_split(bare$text, factor(cumsum(bare$text %in% dont_keywords()))) + unmasked <- map(one_dont[[2L]], style_roxygen_code_example_segment, + transformers = transformers, + base_indention = base_indention + ) %>% + flatten_chr() + if (bare$example_type == "examplesIf") { + rlang::try_fetch( + parse_text(unmasked[1L]), + error = function(e) { + abort(paste0( + "Could not style condition in `@examplesIf` because it would result ", + "in multi-line condition, which is currently not supported in ", + "{roxygen2} (see https://github.com/r-lib/roxygen2/issues/1242)." + )) + } + ) + } + unmasked %>% + add_roxygen_mask(example_one, bare$example_type) +} + +#' Style a roxygen code example segment +#' +#' A roxygen code example segment corresponds to roxygen example code that +#' contains at most one `\\dontrun{...}` or friends. +#' We drop all newline characters first because otherwise the code segment +#' passed to this function was previously parsed with [parse_roxygen()] and +#' line-breaks in and after the `\\dontrun{...}` are expressed with `"\n"`, +#' which contradicts to the definition used elsewhere in this package, where +#' every element in a vector corresponds to a line. These line-breaks don't get +#' eliminated because they move to the front of a `code_segment` and +#' `style_text("\n1")` gives `"\n1"`, i.e. trailing newlines are not +#' eliminated. +#' @param one_dont Bare R code containing at most one `\\dontrun{...}` or +#' friends. +#' @inheritParams parse_transform_serialize_r +#' @inheritSection parse_transform_serialize_roxygen Hierarchy +#' @keywords internal +style_roxygen_code_example_segment <- function(one_dont, + transformers, + base_indention) { + if (length(one_dont) < 1L) { + return(character()) + } else if (identical(one_dont, "\n")) { + return(character(1L)) + } + dont_seqs <- find_dont_seqs(one_dont) + split_segments <- split_roxygen_segments(one_dont, unlist(dont_seqs)) + is_dont <- seq2(1L, length(split_segments$separated)) %in% split_segments$selectors + + map2(split_segments$separated, is_dont, + style_roxygen_example_snippet, + transformers = transformers, + base_indention = base_indention + ) %>% + flatten_chr() +} + +#' Given a code snippet is dont* or run, style it +#' +#' @param code_snippet A character vector with code to style. +#' @param is_dont Whether the snippet to process is a dontrun, dontshow, +#' donttest segment or not. +#' @inheritParams parse_transform_serialize_r +#' @inheritSection parse_transform_serialize_roxygen Hierarchy +#' @keywords internal +style_roxygen_example_snippet <- function(code_snippet, + transformers, + is_dont, + base_indention) { + if (is_dont) { + decomposed <- remove_dont_mask(code_snippet) + code_snippet <- decomposed$code + mask <- decomposed$mask + } + code_snippet <- post_parse_roxygen(code_snippet) + append_empty <- !is_dont && + length(code_snippet) > 1L && + last(code_snippet) == "" + + cache_is_active <- cache_is_activated() + is_cached <- is_cached( + code_snippet, transformers, + cache_more_specs( + include_roxygen_examples = TRUE, + base_indention = base_indention + ) + ) + if (!is_cached || !cache_is_active) { + code_snippet <- code_snippet %>% + parse_transform_serialize_r( + transformers, + base_indention = base_indention, + warn_empty = FALSE, + is_roxygen_code_example = TRUE + ) + } + + code_snippet <- ensure_last_n_empty(code_snippet, n = as.integer(append_empty)) + + if (!is_cached && cache_is_active) { + cache_write( + code_snippet, transformers, + cache_more_specs( + include_roxygen_examples = TRUE, base_indention = base_indention + ) + ) + } + + if (is_dont) { + code_snippet <- c(mask, code_snippet, "}") + } + code_snippet +} + +dont_keywords <- function() { + c("\\dontrun", "\\dontshow", "\\donttest") +} diff --git a/R/rules-indention.R b/R/rules-indention.R new file mode 100644 index 000000000..7019ef7e6 --- /dev/null +++ b/R/rules-indention.R @@ -0,0 +1,140 @@ +#' @describeIn update_indention Inserts indention based on round, square and +#' curly brackets. +#' @keywords internal +indent_braces <- function(pd, indent_by) { + indent_indices <- compute_indent_indices( + pd, + token_opening = c("'('", "'['", "'{'", "LBB"), + token_closing = c("')'", "']'", "'}'") + ) + pd$indent[indent_indices] <- pd$indent[indent_indices] + indent_by + set_unindention_child(pd, token = "')'", unindent_by = indent_by) +} + +#' Revert the indention of function declaration header +#' +#' Necessary for consistent indention of the function declaration header. +#' @param pd A parse table. +#' @inheritParams is_double_indent_function_declaration +#' @seealso set_unindention_child update_indention_ref_fun_dec +#' @keywords internal +unindent_fun_dec <- function(pd, indent_by = 2L) { + if (is_function_declaration(pd)) { + idx_closing_brace <- which(pd$token %in% "')'") + fun_dec_head <- seq2(2L, idx_closing_brace) + if (is_double_indent_function_declaration(pd, indent_by = indent_by)) { + pd$indent[fun_dec_head] <- 2L * indent_by + } else { + pd$indent[fun_dec_head] <- 0L + } + } + pd +} + +#' Is the function declaration double indented? +#' +#' Assumes you already checked if it's a function with +#' `is_function_declaration`. It is double indented if the first token +#' after the first line break that is a `"SYMBOL_FORMALS"`. +#' @param pd A parse table. +#' @inheritParams tidyverse_style +#' @keywords internal +is_double_indent_function_declaration <- function(pd, indent_by = 2L) { + head_pd <- vec_slice(pd, -nrow(pd)) + line_break_in_header <- which(head_pd$lag_newlines > 0L & head_pd$token == "SYMBOL_FORMALS") + if (length(line_break_in_header) > 0L) { + # indent results from applying the rules, spaces is the initial spaces + # (which is indention if a newline is ahead) + pd$spaces[line_break_in_header[1L] - 1L] <= 2L * indent_by + } else { + FALSE + } +} + + + +#' @describeIn update_indention Indents *all* tokens after `token` - including +#' the last token. +#' @keywords internal +indent_op <- function(pd, + indent_by, + token = c( + math_token, + logical_token, + special_token, + "PIPE", + "LEFT_ASSIGN", + "EQ_ASSIGN", + "'$'", + "'~'" + )) { + indent_indices <- compute_indent_indices(pd, token) + pd$indent[indent_indices] <- pd$indent[indent_indices] + indent_by + pd +} + +#' @describeIn update_indention Updates indention for token EQ_SUB. Only differs +#' from [indent_op()] in the sense that not all subsequent tokens in the parse +#' table are necessarily indented, as `EQ_SUB` and `EQ_FORMALS` can occur +#' multiple times in a parse table. +#' occurs is not indented (see[compute_indent_indices()]) +#' @keywords internal +indent_eq_sub <- function(pd, + indent_by, + token = c("EQ_SUB", "EQ_FORMALS")) { + eq_sub <- pd$token %in% token + if (!any(eq_sub)) { + return(pd) + } + has_line_break <- pd$lag_newlines > 0L | pd$token == "COMMENT" + indent_indices <- which(lag(eq_sub, default = FALSE) & has_line_break) + if (any(pd$token[indent_indices] == "COMMENT")) { + indent_indices <- purrr::map_int(indent_indices, function(idx) { + if (pd$token[idx] == "COMMENT") { + next_non_comment(pd, idx) + } else { + idx + } + }) + } + pd$indent[indent_indices] <- pd$indent[indent_indices] + indent_by + pd +} + +#' @describeIn update_indention Is used to indent for / while / if / if-else +#' statements that do not have curly parenthesis. +#' @keywords internal +indent_without_paren <- function(pd, indent_by = 2L) { + pd %>% + indent_without_paren_for_while_fun(indent_by) %>% + indent_without_paren_if_else(indent_by) +} + +#' Update the indention reference +#' +#' @param pd_nested A nested parse table. +#' @name update_indention_ref +#' @keywords internal +NULL + +#' @describeIn update_indention_ref Updates the reference pos_id for all +#' tokens in `pd_nested` if `pd_nested` contains a function declaration. +#' Tokens inside a function declaration are are re-indented, +#' that is, they are indented up to the level at which the token FUNCTION +#' ends in terms of col2. +#' @examples +#' \dontrun{ +#' a <- function(x, +#' y) { +#' x + y +#' } +#' } +#' +#' @keywords internal +update_indention_ref_fun_dec <- function(pd_nested) { + if (is_function_declaration(pd_nested) && !is_double_indent_function_declaration(pd_nested)) { + seq <- seq2(3L, nrow(pd_nested) - 2L) + pd_nested$indention_ref_pos_id[seq] <- pd_nested$pos_id[2L] + } + pd_nested +} diff --git a/R/rules-line-breaks.R b/R/rules-line-breaks.R new file mode 100644 index 000000000..badff81eb --- /dev/null +++ b/R/rules-line-breaks.R @@ -0,0 +1,420 @@ +#' Set line break before a curly brace +#' +#' Rule: +#' * Principle: Function arguments that consist of a braced expression always +#' need to start on a new line +#' * Exception: [...] unless it's the last argument and all other +#' arguments fit on the line of the function call +#' * Exception: [...] or they are named. +#' * Extension: Also, expressions following on braced expressions also cause a +#' line trigger. +#' @keywords internal +#' @examplesIf FALSE +#' tryCatch( +#' { +#' f(8) +#' }, +#' error = function(e) NULL +#' ) +#' # last-argument case +#' testthat("braces braces are cool", { +#' code(to = execute) +#' }) +#' call2( +#' x = 2, { +#' code(to = execute) +#' }, +#' c = { +#' # this is the named case +#' g(x = 7) +#' } +#' ) +#' tryGugus( +#' { +#' g5(k = na) +#' }, +#' a + b # line break also here because +#' # preceded by brace expression +#' ) +#' +#' # brace expressions go on new line if part of a pipe, in function call... +#' c( +#' data %>% +#' filter(bar) %>% +#' { +#' cor(.$col1, .$col2, use = "complete.obs") +#' } +#' ) +#' # ... or outside +#' data %>% +#' filter(bar) %>% +#' { +#' cor(.$col1, .$col2, use = "complete.obs") +#' } +set_line_break_before_curly_opening <- function(pd) { + line_break_to_set_idx <- which( + (pd$token_after == "'{'") & !(pd$token %in% c("COMMENT", "EQ_FORMALS")) + ) + + line_break_to_set_idx <- setdiff(line_break_to_set_idx, nrow(pd)) + if (length(line_break_to_set_idx) > 0L) { + is_not_curly_curly <- map_chr( + line_break_to_set_idx + 1L, + ~ next_terminal(vec_slice(pd, .x), vars = "token_after")$token_after + ) != "'{'" + last_expr_idx <- max(which(pd$token == "expr")) + is_last_expr <- if (any(c("IF", "WHILE") == pd$token[1L])) { + # rule not applicable for if and while + TRUE + } else { + (line_break_to_set_idx + 1L) == last_expr_idx + } + no_line_break_before_curly_idx <- any(pd$token[line_break_to_set_idx] == "EQ_SUB") + linebreak_before_curly <- ifelse(is_function_call(pd), + # if in function call and has pipe, it is not recognized as function call + # and goes to else case + any(pd$lag_newlines[seq2(1L, line_break_to_set_idx[1L])] > 0L), + # if not a function call, only break line if it is a pipe followed by {} + pd$token[line_break_to_set_idx] %in% c("SPECIAL-PIPE", "PIPE") + ) + # no line break before last brace expression and named brace expression to + should_be_on_same_line <- is_not_curly_curly & + ( + (is_last_expr & !linebreak_before_curly) | + no_line_break_before_curly_idx + ) + is_not_curly_curly_idx <- line_break_to_set_idx[should_be_on_same_line] + pd$lag_newlines[1L + is_not_curly_curly_idx] <- 0L + + + # other cases: line breaks + should_not_be_on_same_line <- is_not_curly_curly & + ( + (!is_last_expr | linebreak_before_curly) & + !no_line_break_before_curly_idx + ) + should_not_be_on_same_line_idx <- line_break_to_set_idx[ + should_not_be_on_same_line + ] + if (is_function_declaration(pd)) { + should_not_be_on_same_line_idx <- setdiff( + 1L + should_not_be_on_same_line_idx, nrow(pd) + ) + } else { + should_not_be_on_same_line_idx <- 1L + should_not_be_on_same_line_idx + } + pd$lag_newlines[should_not_be_on_same_line_idx] <- 1L + + # non-curly expressions after curly expressions must have line breaks + if (length(should_not_be_on_same_line_idx) > 0L) { + comma_exprs_idx <- which(pd$token == "','") + comma_exprs_idx <- setdiff(comma_exprs_idx, 1L + is_not_curly_curly_idx) + non_comment_after_comma <- map_int(comma_exprs_idx, + next_non_comment, + pd = pd + ) + pd$lag_newlines[non_comment_after_comma] <- 1L + } + } + pd +} + + +set_line_break_around_comma_and_or <- function(pd, strict) { + ops <- c("','", "AND", "OR", "AND2", "OR2") + comma_with_line_break_that_can_be_removed_before <- + (pd$token %in% ops) & + (pd$lag_newlines > 0L) & + (pd$token_before != "COMMENT") & + !(lag(pd$token) %in% subset_token_opening) + + pd$lag_newlines[comma_with_line_break_that_can_be_removed_before] <- 0L + pd$lag_newlines[lag(comma_with_line_break_that_can_be_removed_before)] <- 1L + + comma_with_line_break_that_can_be_moved_two_tokens_left <- which( + (pd$token == "EQ_SUB") & + (pd$lag_newlines > 0L) & + (pd$token_before != "COMMENT") & + !(lag(pd$token) %in% subset_token_opening) + ) + + pd$lag_newlines[comma_with_line_break_that_can_be_moved_two_tokens_left] <- 0L + token_before <- map_int( + comma_with_line_break_that_can_be_moved_two_tokens_left, + previous_non_comment, + pd = pd + ) + pd$lag_newlines[token_before] <- 1L + pd +} + +style_line_break_around_curly <- function(strict, pd) { + if (is_curly_expr(pd) && nrow(pd) > 2L) { + closing_before <- pd$token == "'}'" + opening_before <- (pd$token == "'{'") + to_break <- lag(opening_before, default = FALSE) | closing_before + len_to_break <- sum(to_break) + pd$lag_newlines[to_break] <- ifelse( + pd$token[to_break] == "COMMENT", + pmin(1L, pd$lag_newlines[to_break]), + if (strict) 1L else pmax(1L, pd$lag_newlines[to_break]) + ) + } else { + is_else <- pd$token == "ELSE" + if (any(pd$token_before[is_else] == "'}'")) { + pd$lag_newlines[is_else] <- 0L + pd$spaces[c(is_else, FALSE)[-1L]] <- 1L + } + is_if_after_else <- pd$token == "ELSE" & pd$token_after == "IF" + pd$lag_newlines[lag(is_if_after_else)] <- 0L + } + pd +} + +#' Styling around `\{\{` +#' +#' With \{rlang\} version 0.4, a new syntactic sugar is introduced, the +#' curly-curly operator. It interprets this code in a special way: +#' `call(\{\{ x \}\})`. See this +#' [blog post](https://www.tidyverse.org/blog/2019/06/rlang-0-4-0/) +#' on the topic. Here, the curly-curly sugar is understood as two opening +#' curly braces, followed by an expression followed by two closing curly braces, +#' e.g. `\{\{1\}\}`. `\{\{1\} + 1\}` does not contain the curly-curly syntactic +#' sugar according to the above definition. On the other hand `\{\{ x + y \}\}` +#' is recognized by styler as containing it (and is parsable code) +#' but will most likely give an error at runtime because the way the syntactic +#' sugar is defined in rlang is to use a single token within curly-curly. In +#' addition, because rlang parses `\{\{` in a special way (just as `!!`), the +#' expression `\{\{ x \}\}` will give a runtime error when used outside of a +#' context that is capable of handling it, e.g. on the top-level (that is, not +#' within function call like `rlang_fun(\{\{ x \}\})`) or within a base R +#' function such as [c()]. However, these differences are assumed to be +#' irrelevant for styling curly-curly, as much as they were for styling `!!`. +#' curly-curly affects styling of line break and spaces, namely: +#' +#' * No line break after first or second `\{`, before third and fourth `\{`. +#' * No space after first and third `\{`, one space after second and before +#' third `\}`. +#' * No line breaks within curly-curly, e.g. `\{\{ x \}\}` can only contain line +#' breaks after the last brace or before the first brace. But these are not +#' dependent on curly-curly specifically. +#' @param pd A parse table. +#' @keywords internal +#' @seealso style_text_without_curly_curly +set_line_break_around_curly_curly <- function(pd) { + if (is_curly_expr(pd)) { + # none after { + opening_before <- (pd$token == "'{'") & + (pd$token_before == "'{'" | pd$token_after == "'{'") + + # none before } + closing_before <- (pd$token == "'}'") & + (pd$token_after == "'}'" | pd$token_before == "'}'") + if (any(opening_before) && any(closing_before)) { + pos_opening_idx <- lag(opening_before, default = FALSE) & pd$token != "COMMENT" + pd$lag_newlines[pos_opening_idx] <- 0L + if (any(pos_opening_idx)) { + # if line is broken with opening `{`, also break it with closing + pd$lag_newlines[closing_before & pd$token_after != "COMMENT"] <- 0L + } + } + } + pd +} + +# if ) follows on }, don't break line +remove_line_break_before_round_closing_after_curly <- function(pd) { + round_after_curly <- pd$token == "')'" & (pd$token_before == "'}'") + pd$lag_newlines[round_after_curly] <- 0L + pd +} + +remove_line_breaks_in_fun_dec <- function(pd) { + if (is_function_declaration(pd)) { + is_double_indention <- is_double_indent_function_declaration(pd) + round_after <- ( + pd$token == "')'" | pd$token_before == "'('" + ) & + pd$token_before != "COMMENT" + pd$lag_newlines[pd$lag_newlines > 1L] <- 1L + pd$lag_newlines[round_after] <- 0L + if (is_double_indention) { + pd$lag_newlines[lag(pd$token == "'('")] <- 1L + } + } + pd +} + +#' +add_line_break_after_pipe <- function(pd) { + is_pipe <- pd$token %in% c("SPECIAL-PIPE", "PIPE") + pd$lag_newlines[lag(is_pipe) & pd$lag_newlines > 1L] <- 1L + + if (sum(is_pipe & pd$token_after != "COMMENT") > 1L && + !(next_terminal(pd, vars = "token_before")$token_before %in% c("'('", "EQ_SUB", "','"))) { + pd$lag_newlines[lag(is_pipe) & pd$token != "COMMENT"] <- 1L + } + pd +} + +set_line_break_after_assignment <- function(pd) { + is_assignment <- lag(pd$token, default = FALSE) %in% c("LEFT_ASSIGN", "EQ_ASSIGN") + if (any(is_assignment)) { + pd$lag_newlines[is_assignment] <- pmin(1L, pd$lag_newlines[is_assignment]) + } + pd +} + + +#' Set line break for multi-line function calls +#' @param pd A parse table. +#' @param except_token_after A character vector with tokens after "'('" that do +#' not cause a line break after "'('". +#' @param except_text_before A character vector with text before "'('" that do +#' not cause a line break after "'('". +#' @param except_token_before A character vector with text before "')'" that do +#' not cause a line break before "')'". +#' @param force_text_before A character vector with text before "'('" that +#' forces a line break after every argument in the call. +#' @name set_line_break_if_call_is_multi_line +#' +#' @keywords internal +NULL + +#' Sets line break after opening parenthesis +#' +#' @details +#' In general, every call that is multi-line has a line break after the opening +#' parenthesis. Exceptions: +#' +#' * The token right after the parenthesis is a comment, then, the line should +#' be broken after the comment only. Governed by `except_token_after`. +#' * The name of the function called is `ifelse()` or similar, where we can +#' allow the condition on the same line as the function name, and we don't +#' impose rules on the line breaks for the subsequent arguments. Governed +#' by `except_text_before`. +#' * Some calls like `switch()` statements are always forced to become multi- +#' line. Governed by `force_text_before`. +#' +#' @keywords internal +set_line_break_after_opening_if_call_is_multi_line <- function(pd, + except_token_after = NULL, + except_text_before = NULL, + force_text_before = NULL) { + if (!is_function_call(pd) && !is_subset_expr(pd)) { + return(pd) + } + has_force_text_before <- last(pd$child[[1L]]$text) %in% force_text_before + if (has_force_text_before) { + break_pos <- c( + which(lag(pd$token %in% c("','", "COMMENT"))), + nrow(pd) # always break before last because this is multi-line + ) + } else { + if (!any(pd$lag_newlines[seq2(3L, nrow(pd))] > 0L)) { + return(pd) + } + break_pos <- find_line_break_position_in_multiline_call(pd) + idx_nested <- next_non_comment(pd, 2L) + if (pd_is_multi_line(pd$child[[idx_nested]]) && sum(pd$lag_newlines) > 0L) { + break_pos <- c(break_pos, idx_nested) + } + } + exception_pos <- c( + which(pd$token %in% except_token_after), + ifelse(last(pd$child[[1L]]$text) %in% except_text_before, break_pos, NA) + ) + pd$lag_newlines[setdiff(break_pos, exception_pos)] <- 1L + if (has_force_text_before) { + first_arg <- which(pd$token == "expr")[2L] + if (lag(pd$token)[first_arg] != "COMMENT") { + pd$lag_newlines[first_arg] <- 0L + } + } + pd +} + + +#' Find index of the token before which the line should be broken +#' +#' Given a multi-line function call parse table, this function finds the +#' position of the first named argument and breaks returns the index of it. +#' If there is no named argument, the line is broken right after the opening +#' parenthesis. +#' @inheritParams set_line_break_if_call_is_multi_line +#' @keywords internal +find_line_break_position_in_multiline_call <- function(pd) { + candidate <- (which(pd$token == "EQ_SUB") - 1L)[1L] + if (is.na(candidate)) { + 3L + } else { + candidate + } +} + + +#' @describeIn set_line_break_if_call_is_multi_line Sets line break before +#' closing parenthesis. +#' @keywords internal +set_line_break_before_closing_call <- function(pd, except_token_before) { + if (!is_function_call(pd) && !is_subset_expr(pd)) { + return(pd) + } + npd <- nrow(pd) + is_multi_line <- any(pd$lag_newlines[seq2(3L, npd - 1L)] > 0L) + if (is_multi_line == 0L) { + exception <- which(pd$token_before %in% except_token_before) + pd$lag_newlines[setdiff(npd, exception)] <- 0L + return(pd) + } + idx_non_comment <- previous_non_comment(pd, npd) + if (pd$token[idx_non_comment] == "']'") { + pd$lag_newlines[idx_non_comment] <- 1L + } else { + pd$lag_newlines[npd] <- 1L + } + pd +} + + +#' @rdname set_line_break_if_call_is_multi_line +#' @keywords internal +remove_line_break_in_fun_call <- function(pd, strict) { + if (is_function_call(pd)) { + # no blank lines within function calls + if (strict) { + pd$lag_newlines[ + lag(pd$token == "','") & pd$lag_newlines > 1L & pd$token != "COMMENT" + ] <- 1L + } + if (nrow(pd) == 3L) { + pd$lag_newlines[3L] <- 0L + } + } + pd +} + + +set_line_break_after_ggplot2_plus <- function(pd) { + # if expression is unary, first token is +. Exclude this case. + is_plus_raw <- c(FALSE, pd$token[-1L] == "'+'") + if (any(is_plus_raw)) { + first_plus <- which(is_plus_raw)[1L] + next_non_comment <- next_non_comment(pd, first_plus) + is_plus_or_comment_after_plus_before_fun_call <- + lag(is_plus_raw, next_non_comment - first_plus - 1L, default = FALSE) & + (pd$token_after == "SYMBOL_FUNCTION_CALL" | pd$token_after == "SYMBOL_PACKAGE") + if (any(is_plus_or_comment_after_plus_before_fun_call, na.rm = TRUE)) { + gg_call <- pd$child[[previous_non_comment(pd, first_plus)]]$child[[1L]] + if (!is.null(gg_call) && isTRUE(gg_call$text[gg_call$token == "SYMBOL_FUNCTION_CALL"] == "ggplot")) { + plus_without_comment_after <- setdiff( + which(is_plus_raw), + which(lead(pd$token == "COMMENT")) + ) + + pd$lag_newlines[plus_without_comment_after + 1L] <- 1L + } + } + } + pd +} diff --git a/R/rules-line_break.R b/R/rules-line_break.R deleted file mode 100644 index f9f54d05b..000000000 --- a/R/rules-line_break.R +++ /dev/null @@ -1,107 +0,0 @@ -# A { should never go on its own line -remove_line_break_before_curly_opening <- function(pd) { - rm_break <- (pd$token_after == "'{'") & (pd$token != "COMMENT") - pd$lag_newlines[lag(rm_break)] <- 0L - pd -} - -style_line_break_around_curly <- function(strict, pd) { - if (is_curly_expr(pd) && nrow(pd) > 2) { - closing_before <- pd$token == "'}'" - opening_before <- (pd$token == "'{'") & (pd$token_after != "COMMENT") - to_break <- lag(opening_before, default =FALSE) | closing_before - len_to_break <- sum(to_break) - pd$lag_newlines[to_break] <- ifelse(rep(strict, len_to_break), - 1L, - pmax(1L, pd$lag_newlines[to_break]) - ) - } - pd -} - -# if ) follows on }, don't break line -remove_line_break_before_round_closing_after_curly <- function(pd) { - round_after_curly <- pd$token == "')'" & (pd$token_before == "'}'") - pd$lag_newlines[round_after_curly] <- 0L - pd -} - -remove_line_break_before_round_closing_fun_dec <- function(pd) { - if (is_function_dec(pd)) { - round_after <- pd$token == "')'" & pd$token_before != "COMMENT" - pd$lag_newlines[round_after] <- 0L - } - pd -} - - -#' @importFrom rlang seq2 -add_line_break_after_pipe <- function(pd) { - is_special <- pd$token == c("SPECIAL-PIPE") & pd$token_after != "COMMENT" - if (any(pd$lag_newlines != 0L)) { - pd$lag_newlines[lag(is_special)] <- 1L - } - pd -} - - -#' Set line break for multi-line function calls -#' @param pd A parse table. -#' @param except_token_after A character vector with tokens after "'('" that do -#' not cause a line break after "'('". -#' @param except_text_before A character vector with text before "'('" that do -#' not cause a line break after "'('". -#' @param except_token_before A character vector with text before "')'" that do -#' not cause a line break before "')'". -#' @name set_line_break_if_call_is_multi_line -#' @importFrom rlang seq2 -NULL - -#' @describeIn set_line_break_if_call_is_multi_line Sets line break after -#' opening parenthesis. -set_line_break_after_opening_if_call_is_multi_line <- - function(pd, - except_token_after = NULL, - except_text_before = NULL) { - if (!is_function_call(pd) && !is_subset_expr(pd)) return(pd) - npd <- nrow(pd) - seq_x <- seq2(3L, npd - 1L) - is_multi_line <- any( - (pd$lag_newlines[seq_x] > 0) | - (pd$token[seq_x] == "COMMENT") - ) - if (!is_multi_line) { - return(pd) - } - exception_pos <- c( - which(pd$token %in% except_token_after), - if_else(pd$child[[1]]$text[1] %in% except_text_before, 3L, NA) - ) - pd$lag_newlines[setdiff(3, exception_pos)] <- 1L - pd -} - - -#' @describeIn set_line_break_if_call_is_multi_line Sets line break before -#' closing parenthesis. -set_line_break_before_closing_call <- function(pd, except_token_before) { - if (!is_function_call(pd) && !is_subset_expr(pd)) return(pd) - npd <- nrow(pd) - is_multi_line <- any(pd$lag_newlines[seq2(3L, npd - 1L)] > 0) - if (!is_multi_line) { - exception <- which(pd$token_before %in% except_token_before) - pd$lag_newlines[setdiff(npd, exception)] <- 0L - return(pd) - } - pd$lag_newlines[npd] <- 1L - pd -} - - -#' @rdname set_line_break_if_call_is_multi_line -remove_line_break_in_empty_fun_call <- function(pd) { - if (is_function_call(pd) && nrow(pd) == 3) { - pd$lag_newlines[3] <- 0L - } - pd -} diff --git a/R/rules-other.R b/R/rules-other.R deleted file mode 100644 index 8eeceb05b..000000000 --- a/R/rules-other.R +++ /dev/null @@ -1,107 +0,0 @@ -#' @importFrom purrr reduce -add_brackets_in_pipe <- function(pd) { - is_pipe <- pd$token == "SPECIAL-PIPE" - reduce(which(is_pipe), add_brackets_in_pipe_one, .init = pd) -} - -add_brackets_in_pipe_one <- function(pd, pos) { - next_non_comment <- next_non_comment(pd, pos) - if (nrow(pd$child[[next_non_comment]]) < 2) { - new_pos_ids <- create_pos_ids(pd$child[[next_non_comment]], 1, after = TRUE, n = 2) - new_pd <- create_tokens( - tokens = c("'('", "')'"), texts = c("(", ")"), pos_ids = new_pos_ids, - lag_newlines = rep(0, 2) - ) - pd$child[[next_non_comment]] <- - bind_rows(pd$child[[next_non_comment]], new_pd) %>% - arrange(pos_id) - } - pd -} - -#' Wrap if-else statement in curly braces -#' -#' Wrap an if-else statement in curly braces if it is not already wrapped in -#' a such. -#' @param pd A parse table. -#' @param indent_by The amount of spaces used to indent an expression in curly -#' braces. Used for unindention. -wrap_if_else_multi_line_in_curly <- function(pd, indent_by = 2) { - if (is_cond_expr(pd)) { - pd <- pd %>% - wrap_if_multiline_curly(indent_by) %>% - wrap_else_multiline_curly(indent_by) - } - pd -} - - -wrap_if_multiline_curly <- function(pd, indent_by) { - if (if_part_requires_braces(pd)) { - closing_brace_ind <- which(pd$token == "')'")[1] - pd$spaces[closing_brace_ind] <- 1L - - to_be_wrapped_expr_with_child <- next_non_comment( - pd, - which(pd$token == "')'")[1] - ) - - all_to_be_wrapped_ind <- seq2( - closing_brace_ind + 1L, to_be_wrapped_expr_with_child - ) - - pd <- wrap_subexpr_in_curly( - pd, all_to_be_wrapped_ind, indent_by - ) - - if (nrow(pd) > 5) pd$lag_newlines[6] <- 0L - } - pd -} - -wrap_else_multiline_curly <- function(pd, indent_by = 2) { - if (contains_else_expr(pd) && - pd_is_multi_line(pd) && - contains_else_expr_that_needs_braces(pd)) { - else_idx <- which(pd$token == "ELSE") - pd$spaces[else_idx] <- 1L - all_to_be_wrapped_ind <- seq2(else_idx + 1L, nrow(pd)) - - pd <- wrap_subexpr_in_curly( - pd, all_to_be_wrapped_ind, indent_by - ) - } - pd -} - -#' Wrap a sub-expression in curly braces -#' -#' Wraps some rows of a parse table into a sub-expression. -#' @inheritParams wrap_if_else_multi_line_in_curly -#' @param ind_to_be_wrapped The indices of the rows that should be wrapped -#' into a new expression. -wrap_subexpr_in_curly <- function(pd, - ind_to_be_wrapped, - indent_by) { - to_be_wrapped_starts_with_comment <- - pd$token[ind_to_be_wrapped[1]] == "COMMENT" - new_expr <- wrap_expr_in_curly( - pd[ind_to_be_wrapped, ], - stretch_out = c(!to_be_wrapped_starts_with_comment, TRUE) - ) - new_expr$indent <- pd$indent[last(ind_to_be_wrapped)] - indent_by - new_expr_in_expr <- new_expr %>% - wrap_expr_in_expr() %>% - remove_attributes(c("token_before", "token_after")) - - pd %>% - slice(-ind_to_be_wrapped) %>% - bind_rows(new_expr_in_expr) %>% - set_multi_line() %>% - arrange(pos_id) -} - -if_part_requires_braces <- function(pd) { - pd_is_multi_line(pd) && - !is_curly_expr(pd$child[[next_non_comment(pd, which(pd$token == "')'")[1])]]) -} diff --git a/R/rules-replacement.R b/R/rules-replacement.R deleted file mode 100644 index 2066e6fdb..000000000 --- a/R/rules-replacement.R +++ /dev/null @@ -1,15 +0,0 @@ -force_assignment_op <- function(pd) { - to_replace <- pd$token == "EQ_ASSIGN" - pd$token[to_replace] <- "LEFT_ASSIGN" - pd$text[to_replace] <- "<-" - pd -} - - -resolve_semicolon <- function(pd) { - is_semicolon <- pd$token == "';'" - if (!any(is_semicolon)) return(pd) - pd$lag_newlines[lag(is_semicolon)] <- 1L - pd <- pd[!is_semicolon, ] - pd -} diff --git a/R/rules-spaces.R b/R/rules-spaces.R new file mode 100644 index 000000000..f9dbb293f --- /dev/null +++ b/R/rules-spaces.R @@ -0,0 +1,364 @@ +#' Set spaces around operators +#' +#' Alignment is kept, if detected. +#' @include token-define.R +#' @keywords internal +#' @include token-define.R +set_space_around_op <- function(pd_flat, strict) { + # spacing and operator in same function because alternative is + # calling token_is_on_aligned_line() twice because comma and operator spacing + # depends on it. + pd_flat <- add_space_after_comma(pd_flat) + op_after <- pd_flat$token %in% op_token + op_before <- lead(op_after, default = FALSE) + # include comma, but only for after + op_after <- op_after | pd_flat$token == "','" + if (!any(op_after)) { + return(pd_flat) + } + + sum_lag_newlines <- sum(pd_flat$lag_newlines) + if ( + !getOption("styler.ignore_alignment", FALSE) && + ( + (is_function_call(pd_flat) && sum_lag_newlines > 2L) || + (is_function_declaration(pd_flat) && sum_lag_newlines > 1L) + ) && + any(pd_flat$token %in% c("EQ_SUB", "','", "EQ_FORMALS")) + ) { + is_on_aligned_line <- token_is_on_aligned_line(pd_flat) + } else { + is_on_aligned_line <- FALSE + } + # operator + must_have_space_before <- op_before & (pd_flat$newlines == 0L) & !is_on_aligned_line + pd_flat$spaces[must_have_space_before] <- if (strict) { + 1L + } else { + pmax(pd_flat$spaces[must_have_space_before], 1L) + } + must_have_space_after <- op_after & (pd_flat$newlines == 0L) & !is_on_aligned_line + pd_flat$spaces[must_have_space_after] <- if (strict) { + 1L + } else { + pmax(pd_flat$spaces[must_have_space_after], 1L) + } + pd_flat +} + +#' Style spacing around math tokens +#' @inheritParams style_space_around_token +#' @param one Character vector with tokens that should be surrounded by at +#' least one space (depending on `strict = TRUE` in the styling functions +#' [style_text()] and friends). See 'Examples'. +#' @param zero Character vector of tokens that should be surrounded with zero +#' spaces. +#' @keywords internal +style_space_around_math_token <- function(strict, zero, one, pd_flat) { + # We remove spaces for zero (e.g., around ^ in the tidyverse style guide) + # even for strict = FALSE to be consistent with the : operator + if (any(pd_flat$token %in% zero)) { + pd_flat <- pd_flat %>% + style_space_around_token( + strict = TRUE, tokens = zero, level_before = 0L, level_after = 0L + ) + } + if (any(pd_flat$token %in% one)) { + pd_flat <- pd_flat %>% + style_space_around_token( + strict = strict, tokens = one, level_before = 1L, level_after = 1L + ) + } + pd_flat +} + +#' Set spacing of token to a certain level +#' +#' Set the spacing of all `tokens` in `pd_flat` to `level` if `strict = TRUE` or +#' to at least to `level` if `strict = FALSE`. +#' @param pd_flat A nest or a flat parse table. +#' @param strict Whether the rules should be applied strictly or not. +#' @param tokens Character vector with tokens that should be styled. +#' @param level_before,level_after Scalar indicating the amount of spaces that +#' should be inserted around the `tokens` on the left and right position +#' respectively. +#' @keywords internal +style_space_around_token <- function(pd_flat, + strict, + tokens, + level_before, + level_after = level_before) { + op_after <- pd_flat$token %in% tokens + op_before <- lead(op_after, default = FALSE) + idx_before <- op_before & (pd_flat$newlines == 0L) + idx_after <- op_after & (pd_flat$newlines == 0L) + if (strict) { + pd_flat$spaces[idx_before] <- level_before + pd_flat$spaces[idx_after] <- level_after + } else { + pd_flat$spaces[idx_before] <- pmax(pd_flat$spaces[idx_before], level_before) + pd_flat$spaces[idx_after] <- pmax(pd_flat$spaces[idx_after], level_after) + } + pd_flat +} + +style_space_around_tilde <- function(pd_flat, strict) { + if (is_symmetric_tilde_expr(pd_flat)) { + pd_flat <- style_space_around_token(pd_flat, + strict, "'~'", + level_before = 1L, level_after = 1L + ) + } + + if (is_asymmetric_tilde_expr(pd_flat)) { + pd_flat <- style_space_around_token(pd_flat, + strict = TRUE, "'~'", level_before = 1L, + level_after = as.integer(nrow(pd_flat$child[[2L]]) > 1L) + ) + } + + pd_flat +} + +remove_space_after_unary_pm_nested <- function(pd) { + if (any(pd$token[1L] %in% c("'+'", "'-'"))) { + pd$spaces[1L] <- 0L + } + + pd +} + +remove_space_before_opening_paren <- function(pd_flat) { + paren_after <- pd_flat$token %in% c("'('", "'['", "LBB") + if (!any(paren_after)) { + return(pd_flat) + } + paren_before <- lead(paren_after, default = FALSE) + pd_flat$spaces[paren_before & (pd_flat$newlines == 0L)] <- 0L + pd_flat +} + +remove_space_after_opening_paren <- function(pd_flat) { + paren_after <- pd_flat$token %in% c("'('", "'['", "LBB") + if (!any(paren_after)) { + return(pd_flat) + } + pd_flat$spaces[paren_after & (pd_flat$newlines == 0L)] <- 0L + pd_flat +} + +remove_space_before_closing_paren <- function(pd_flat) { + paren_after <- pd_flat$token %in% c("')'", "']'") + if (!any(paren_after)) { + return(pd_flat) + } + paren_before <- lead(paren_after, default = FALSE) + pd_flat$spaces[paren_before & (pd_flat$newlines == 0L)] <- 0L + pd_flat +} + +add_space_after_for_if_while <- function(pd_flat) { + comma_after <- pd_flat$token %in% c("FOR", "IF", "WHILE") + if (!any(comma_after)) { + return(pd_flat) + } + idx <- comma_after & (pd_flat$newlines == 0L) + pd_flat$spaces[idx] <- pmax(pd_flat$spaces[idx], 1L) + pd_flat +} + +#' @rdname set_line_break_around_curly_curly +#' @keywords internal +set_space_in_curly_curly <- function(pd) { + if (is_curly_expr(pd)) { + after_inner_opening <- pd$token == "'{'" & pd$token_before == "'{'" + before_inner_closing <- lead(pd$token == "'}'" & pd$token_after == "'}'") + is_curly_curly_inner <- any(after_inner_opening, na.rm = TRUE) && + any(before_inner_closing, na.rm = TRUE) + if (is_curly_curly_inner) { + pd$spaces[after_inner_opening] <- 1L + pd$spaces[before_inner_closing] <- 1L + } + + after_outer_opening <- pd$token == "'{'" & pd$token_after == "'{'" + before_outer_closing <- lead(pd$token == "'}'" & pd$token_before == "'}'") + is_curly_curly_outer <- any(after_outer_opening, na.rm = TRUE) && + any(before_outer_closing, nna.rm = TRUE) + if (is_curly_curly_outer) { + pd$spaces[after_outer_opening] <- 0L + pd$spaces[before_outer_closing] <- 0L + } + } + pd +} + +add_space_after_comma <- function(pd_flat) { + comma_after <- (pd_flat$token == "','") & (pd_flat$newlines == 0L) + pd_flat$spaces[comma_after] <- pmax(pd_flat$spaces[comma_after], 1L) + pd_flat +} + +set_space_after_comma <- function(pd_flat) { + comma_after <- (pd_flat$token == "','") & (pd_flat$newlines == 0L) + pd_flat$spaces[comma_after] <- 1L + pd_flat +} + +remove_space_before_comma <- function(pd_flat) { + comma_after <- pd_flat$token == "','" + if (!any(comma_after)) { + return(pd_flat) + } + comma_before <- lead(comma_after, default = FALSE) + idx <- comma_before & (pd_flat$newlines == 0L) + pd_flat$spaces[idx] <- 0L + pd_flat +} + + +#' Set space between levels of nesting +#' +#' With the nested approach, certain rules do not have an effect anymore because +#' of the nature of the nested structure. Setting spacing before curly +#' brackets in for / if / while statements and function declarations will be +#' such a case since a curly bracket is always at the first position in a parse +#' table, so spacing cannot be set after the previous token. +#' @param pd_flat A flat parse table. +#' @keywords internal +set_space_between_levels <- function(pd_flat) { + if (pd_flat$token[1L] %in% c("FUNCTION", "IF", "WHILE")) { + index <- pd_flat$token == "')'" & pd_flat$newlines == 0L + pd_flat$spaces[index] <- 1L + } else if (pd_flat$token[1L] == "FOR") { + index <- pd_flat$token == "forcond" & pd_flat$newlines == 0L + pd_flat$spaces[index] <- 1L + } + pd_flat +} + +#' Start comments with a space +#' +#' Forces comments to start with a space, that is, after the regular expression +#' `#+['\\*]`, at least one space must follow if the comment is *non-empty*, i.e +#' there is not just spaces within the comment. Multiple spaces may be legit for +#' indention in some situations. +#' @section Exceptions: +#' Spaces won't be added to comments when they are: +#' +#' * shebangs +#' * code chunk headers +#' * xaringan markers +#' +#' @param pd A parse table. +#' @param force_one Whether or not to force one space or allow multiple spaces. +#' @keywords internal +start_comments_with_space <- function(pd, force_one = FALSE) { + is_comment <- is_comment(pd) + + if (any(is_comment)) { + is_comment <- is_comment & + !is_shebang(pd) & + !is_code_chunk_header_or_xaringan_or_code_output(pd) + if (!any(is_comment)) { + return(pd) + } + } else { + return(pd) + } + + comments <- re_match( + pd$text[is_comment], + "^(?#+['\\*]*)(? *)(?.*)$" + ) + comments$space_after_prefix <- nchar( + comments$space_after_prefix, + type = "width" + ) + comments$space_after_prefix <- set_spaces( + spaces_after_prefix = comments$space_after_prefix, + force_one + ) + + pd$text[is_comment] <- + paste0( + comments$prefix, + map_chr(comments$space_after_prefix, rep_char, char = " "), + comments$text + ) %>% + trimws("right") + pd$short[is_comment] <- substr(pd$text[is_comment], 1L, 5L) + pd +} + + +set_space_before_comments <- function(pd_flat) { + comment_after <- (pd_flat$token == "COMMENT") & (pd_flat$lag_newlines == 0L) + if (!any(comment_after)) { + return(pd_flat) + } + comment_before <- lead(comment_after, default = FALSE) + pd_flat$spaces[comment_before & (pd_flat$newlines == 0L)] <- 1L + pd_flat +} + +add_space_before_comments <- function(pd_flat) { + comment_after <- (pd_flat$token == "COMMENT") & (pd_flat$lag_newlines == 0L) + if (!any(comment_after)) { + return(pd_flat) + } + comment_before <- lead(comment_after, default = FALSE) + pd_flat$spaces[comment_before & (pd_flat$newlines == 0L)] <- + pmax(pd_flat$spaces[comment_before], 1L) + pd_flat +} + + +remove_space_after_excl <- function(pd_flat) { + excl <- (pd_flat$token == "'!'") & + (pd_flat$token_after != "'!'") & + (pd_flat$newlines == 0L) + pd_flat$spaces[excl] <- 0L + pd_flat +} + +set_space_after_bang_bang <- function(pd_flat) { + last_bang <- (pd_flat$token == "'!'") & + (pd_flat$token_after != "'!'") & + (pd_flat$newlines == 0L) & + (pd_flat$token_before == "'!'") + + pd_flat$spaces[last_bang] <- 0L + pd_flat +} + +remove_space_before_dollar <- function(pd_flat) { + dollar_after <- (pd_flat$token == "'$'") & (pd_flat$lag_newlines == 0L) + dollar_before <- lead(dollar_after, default = FALSE) + pd_flat$spaces[dollar_before] <- 0L + pd_flat +} + +remove_space_after_fun_dec <- function(pd_flat) { + fun_after <- (pd_flat$token == "FUNCTION") & (pd_flat$lag_newlines == 0L) + pd_flat$spaces[fun_after] <- 0L + pd_flat +} + +remove_space_around_colons <- function(pd_flat) { + one_two_or_three_col_after <- pd_flat$token %in% c("':'", "NS_GET_INT", "NS_GET") + one_two_or_three_col_before <- lead(one_two_or_three_col_after, default = FALSE) + + col_around <- one_two_or_three_col_before | one_two_or_three_col_after + + pd_flat$spaces[col_around & (pd_flat$newlines == 0L)] <- 0L + pd_flat +} + +#' Set space between `EQ_SUB` and `"','"` +#' @param pd A parse table. +#' @keywords internal +set_space_between_eq_sub_and_comma <- function(pd) { + op_before <- which(pd$token == "EQ_SUB" & lead(pd$token == "','")) + pd$spaces[op_before] <- 1L + pd +} diff --git a/R/rules-spacing.R b/R/rules-spacing.R deleted file mode 100644 index 577380653..000000000 --- a/R/rules-spacing.R +++ /dev/null @@ -1,281 +0,0 @@ -#' @include token-define.R -add_space_around_op <- function(pd_flat) { - op_after <- pd_flat$token %in% op_token - op_before <- lead(op_after, default = FALSE) - idx_before <- op_before & (pd_flat$newlines == 0L) - pd_flat$spaces[idx_before] <- pmax(pd_flat$spaces[idx_before], 1L) - idx_after <- op_after & (pd_flat$newlines == 0L) - pd_flat$spaces[idx_after] <- pmax(pd_flat$spaces[idx_after], 1L) - pd_flat -} - -#' @include token-define.R -set_space_around_op <- function(pd_flat) { - op_after <- pd_flat$token %in% op_token - if (!any(op_after)) return(pd_flat) - op_before <- lead(op_after, default = FALSE) - pd_flat$spaces[op_before & (pd_flat$newlines == 0L)] <- 1L - pd_flat$spaces[op_after & (pd_flat$newlines == 0L)] <- 1L - pd_flat -} - -#' Style spacing around math tokens -#' @inheritParams style_space_around_math_token_one -#' @param one Character vector with tokens that should be surrounded by at -#' least one space (depending on `strict = TRUE` in the styling functions -#' [style_text()] and friends). See 'Examples'. -#' @param zero Character vector of tokens that should be surrounded with zero -#' spaces. -style_space_around_math_token <- function(strict, zero, one, pd_flat) { - pd_flat %>% - style_space_around_math_token_one(strict, zero, 0L) %>% - style_space_around_math_token_one(strict, one, 1L) -} - -#' Set spacing of token to a certain level -#' -#' Set the spacing of all `tokens` in `pd_flat` to `level` if `strict = TRUE` or -#' to at least to `level` if `strict = FALSE`. -#' @param pd_flat A nest or a flat parse table. -#' @param strict Whether the rules should be applied strictly or not. -#' @param tokens Character vector with tokens that should be styled. -#' @param level Scalar indicating the amount of spaces that should be inserted -#' around the `tokens`. -style_space_around_math_token_one <- function(pd_flat, strict, tokens, level) { - op_after <- pd_flat$token %in% tokens - op_before <- lead(op_after, default = FALSE) - idx_before <- op_before & (pd_flat$newlines == 0L) - idx_after <- op_after & (pd_flat$newlines == 0L) - if (strict) { - pd_flat$spaces[idx_before | idx_after] <- level - } else { - pd_flat$spaces[idx_before | idx_after] <- - pmax(pd_flat$spaces[idx_before | idx_after], level) - } - pd_flat -} - -# depreciated! -#' @include token-define.R -remove_space_after_unary_pm <- function(pd_flat) { - op_pm <- c("'+'", "'-'") - op_pm_unary_after <- c(op_pm, op_token, "'('", "','") - - pm_after <- pd_flat$token %in% op_pm - pd_flat$spaces[pm_after & (pd_flat$newlines == 0L) & - (lag(pd_flat$token) %in% op_pm_unary_after)] <- 0L - pd_flat -} - - -remove_space_after_unary_pm_nested <- function(pd) { - if (any(pd$token[1] %in% c("'+'", "'-'"))) { - pd$spaces[1] <- 0L - } - - pd -} - - -fix_quotes <- function(pd_flat) { - str_const <- pd_flat$token == "STR_CONST" - str_const_change <- grepl("^'([^\"]*)'$", pd_flat$text[str_const]) - pd_flat$text[str_const][str_const_change] <- - vapply( - lapply(pd_flat$text[str_const][str_const_change], parse_text), - deparse, - character(1L) - ) - pd_flat -} - -remove_space_before_opening_paren <- function(pd_flat) { - paren_after <- pd_flat$token == "'('" - if (!any(paren_after)) return(pd_flat) - paren_before <- lead(paren_after, default = FALSE) - pd_flat$spaces[paren_before & (pd_flat$newlines == 0L)] <- 0L - pd_flat -} - -remove_space_after_opening_paren <- function(pd_flat) { - paren_after <- pd_flat$token == "'('" - if (!any(paren_after)) return(pd_flat) - pd_flat$spaces[paren_after & (pd_flat$newlines == 0L)] <- 0L - pd_flat -} - -remove_space_before_closing_paren <- function(pd_flat) { - paren_after <- pd_flat$token == "')'" - if (!any(paren_after)) return(pd_flat) - paren_before <- lead(paren_after, default = FALSE) - pd_flat$spaces[paren_before & (pd_flat$newlines == 0L)] <- 0L - pd_flat -} - -add_space_after_for_if_while <- function(pd_flat) { - comma_after <- pd_flat$token %in% c("FOR", "IF", "WHILE") - if (!any(comma_after)) return(pd_flat) - idx <- comma_after & (pd_flat$newlines == 0L) - pd_flat$spaces[idx] <- pmax(pd_flat$spaces[idx], 1L) - pd_flat -} - -add_space_before_brace <- function(pd_flat) { - op_after <- pd_flat$token %in% "'{'" - if (!any(op_after)) return(pd_flat) - op_before <- lead(op_after, default = FALSE) - idx_before <- op_before & (pd_flat$newlines == 0L) & pd_flat$token != "'('" - pd_flat$spaces[idx_before] <- pmax(pd_flat$spaces[idx_before], 1L) - pd_flat -} - -add_space_after_comma <- function(pd_flat) { - comma_after <- (pd_flat$token == "','") & (pd_flat$newlines == 0L) - pd_flat$spaces[comma_after] <- pmax(pd_flat$spaces[comma_after], 1L) - pd_flat -} - -set_space_after_comma <- function(pd_flat) { - comma_after <- (pd_flat$token == "','") & (pd_flat$newlines == 0L) - pd_flat$spaces[comma_after] <- 1L - pd_flat -} - -remove_space_before_comma <- function(pd_flat) { - comma_after <- pd_flat$token == "','" - if (!any(comma_after)) return(pd_flat) - comma_before <- lead(comma_after, default = FALSE) - idx <- comma_before & (pd_flat$newlines == 0L) - pd_flat$spaces[idx] <- 0L - pd_flat -} - - -#' Set space between levels of nesting -#' -#' With the nested approach, certain rules do not have an effect anymore because -#' of the nature of the nested structure. Setting spacing before curly -#' brackets in for / if / while statements and function declarations will be -#' such a case since a curly bracket is always at the first position in a -#' parse table, so spacing cannot be set after the previous token. -#' @param pd_flat A flat parse table. -set_space_between_levels <- function(pd_flat) { - if (pd_flat$token[1] %in% c("FUNCTION", "IF", "WHILE")) { - index <- pd_flat$token == "')'" & pd_flat$newlines == 0L - pd_flat$spaces[index] <- 1L - } else if (pd_flat$token[1] == "FOR") { - index <- 2 - pd_flat$spaces[index] <- 1L - } - pd_flat -} - -#' Start comments with a space -#' -#' Forces comments to start with a space, that is, after the regular expression -#' "^#+'*", at least one space must follow if the comment is *non-empty*, i.e -#' there is not just spaces within the comment. Multiple spaces may be legit -#' for indention in some situations. -#' @param pd A parse table. -#' @param force_one Whether or not to force one space or allow multiple spaces -#' after the regex "^#+'*". -#' @importFrom purrr map_chr -start_comments_with_space <- function(pd, force_one = FALSE) { - comment_pos <- pd$token == "COMMENT" - if (!any(comment_pos)) return(pd) - - comments <- rematch2::re_match( - pd$text[comment_pos], - "^(?#+'*)(? *)(?.*)$" - ) - - comments$space_after_prefix <- nchar( - comments$space_after_prefix, type = "width" - ) - comments$space_after_prefix <- set_spaces( - spaces_after_prefix = comments$space_after_prefix, - force_one - ) - - pd$text[comment_pos] <- - paste0( - comments$prefix, - map_chr(comments$space_after_prefix, rep_char, char = " "), - comments$text - ) %>% - trimws("right") - pd$short[comment_pos] <- substr(pd$text[comment_pos], 1, 5) - pd -} - - -set_space_before_comments <- function(pd_flat) { - comment_after <- (pd_flat$token == "COMMENT") & (pd_flat$lag_newlines == 0L) - if (!any(comment_after)) return(pd_flat) - comment_before <- lead(comment_after, default = FALSE) - pd_flat$spaces[comment_before & (pd_flat$newlines == 0L)] <- 1L - pd_flat -} - -add_space_before_comments <- function(pd_flat) { - comment_after <- (pd_flat$token == "COMMENT") & (pd_flat$lag_newlines == 0L) - if (!any(comment_after)) return(pd_flat) - comment_before <- lead(comment_after, default = FALSE) - pd_flat$spaces[comment_before & (pd_flat$newlines == 0L)] <- - pmax(pd_flat$spaces[comment_before], 1L) - pd_flat -} - - -remove_space_after_excl <- function(pd_flat) { - excl <- (pd_flat$token == "'!'") & - (pd_flat$token_after != "'!'") & - (pd_flat$newlines == 0L) - pd_flat$spaces[excl] <- 0L - pd_flat -} - -set_space_after_bang_bang <- function(pd_flat) { - last_bang <- (pd_flat$token == "'!'") & - (pd_flat$token_after != "'!'") & - (pd_flat$newlines == 0L) & - (pd_flat$token_before == "'!'") - - pd_flat$spaces[last_bang] <- 1L - pd_flat -} - -remove_space_before_dollar <- function(pd_flat) { - dollar_after <- (pd_flat$token == "'$'") & (pd_flat$lag_newlines == 0L) - dollar_before <- lead(dollar_after, default = FALSE) - pd_flat$spaces[dollar_before] <- 0L - pd_flat -} - -remove_space_after_fun_dec <- function(pd_flat) { - fun_after <- (pd_flat$token == "FUNCTION") & (pd_flat$lag_newlines == 0L) - pd_flat$spaces[fun_after] <- 0L - pd_flat -} - -remove_space_around_colons <- function(pd_flat) { - one_two_or_three_col_after <- - pd_flat$token %in% c("':'", "NS_GET_INT", "NS_GET") - - one_two_or_three_col_before <- - lead(one_two_or_three_col_after, default = FALSE) - - col_around <- - one_two_or_three_col_before | one_two_or_three_col_after - - pd_flat$spaces[col_around & (pd_flat$newlines == 0L)] <- 0L - pd_flat -} - -#' Set space between EQ_SUB and "','" -#' @param pd A parse table. -set_space_between_eq_sub_and_comma <- function(pd) { - op_before <- which(pd$token == "EQ_SUB" & lead(pd$token == "','")) - pd$spaces[op_before] <- 1L - pd -} diff --git a/R/rules-tokens.R b/R/rules-tokens.R new file mode 100644 index 000000000..f36ba289a --- /dev/null +++ b/R/rules-tokens.R @@ -0,0 +1,221 @@ +force_assignment_op <- function(pd) { + to_replace <- pd$token == "EQ_ASSIGN" + pd$token[to_replace] <- "LEFT_ASSIGN" + pd$text[to_replace] <- "<-" + pd +} + + +resolve_semicolon <- function(pd) { + is_semicolon <- pd$token == "';'" + if (!any(is_semicolon)) { + return(pd) + } + pd$lag_newlines[lag(is_semicolon)] <- 1L + pd <- vec_slice(pd, !is_semicolon) + pd +} + +add_brackets_in_pipe <- function(pd) { + if (!identical(pd$text[next_non_comment(pd, 0L)], "substitute")) { + pd$child <- map(pd$child, add_brackets_in_pipe_child) + } + pd +} + +add_brackets_in_pipe_child <- function(pd) { + is_pipe <- pd$token %in% c("SPECIAL-PIPE", "PIPE") + Reduce(add_brackets_in_pipe_one, which(is_pipe), init = pd) +} + +add_brackets_in_pipe_one <- function(pd, pos) { + next_non_comment <- next_non_comment(pd, pos) + rh_child <- pd$child[[next_non_comment]] + if (nrow(rh_child) < 2L && rh_child$token == "SYMBOL") { + child <- pd$child[[next_non_comment]] + new_pos_ids <- create_pos_ids(child, 1L, after = TRUE, n = 2L) + new_pd <- create_tokens( + texts = c("(", ")"), + lag_newlines = rep(0L, 2L), + spaces = 0L, + pos_ids = new_pos_ids, + token_before = c(child$token[1L], "'('"), + token_after = c("')'", child$token_after[1L]), + indention_ref_pos_ids = NA, + indents = child$indent[1L], + tokens = c("'('", "')'"), + terminal = TRUE, + child = NULL, + stylerignore = child$stylerignore[1L], + # block??? + block = NA, + is_cached = FALSE + ) + pd$child[[next_non_comment]] <- vec_rbind(pd$child[[next_non_comment]], new_pd) %>% + arrange_pos_id() + } + pd +} + +#' Wrap if-else, while and for statements in curly braces +#' +#' Wrap statements in curly braces if it is not already wrapped in a such. +#' @param pd A parse table. +#' @param indent_by The amount of spaces used to indent an expression in curly +#' braces. Used for unindention. +#' @keywords internal +wrap_if_else_while_for_fun_multi_line_in_curly <- function(pd, indent_by = 2L) { + key_token <- NULL + + if (is_for_expr(pd)) { + key_token <- "forcond" + } else if (is_conditional_expr(pd) || is_while_expr(pd) || is_function_declaration(pd)) { + key_token <- "')'" + } + + if (length(key_token) > 0L) { + pd <- pd %>% + wrap_multiline_curly(indent_by, + key_token = key_token, + space_after = as.integer(contains_else_expr(pd)) + ) + } + if (is_conditional_expr(pd)) { + pd <- pd %>% + wrap_else_multiline_curly(indent_by, space_after = 0L) + } + pd +} + +#' Wrap a multi-line statement in curly braces +#' +#' @inheritParams wrap_if_else_while_for_fun_multi_line_in_curly +#' @inheritParams wrap_subexpr_in_curly +#' @param key_token The token that comes right before the token that contains +#' the expression to be wrapped (ignoring comments). For if and while loops, +#' this is the closing "')'", for a for-loop it's "forcond". +#' @keywords internal +wrap_multiline_curly <- function(pd, indent_by, key_token, space_after = 1L) { + to_be_wrapped_expr_with_child <- next_non_comment( + pd, which(pd$token == key_token)[1L] + ) + next_terminal <- next_terminal(vec_slice(pd, to_be_wrapped_expr_with_child))$text + requires_braces <- if_for_while_part_requires_braces(pd, key_token) && !any(pd$stylerignore) + if (requires_braces || next_terminal == "return") { + closing_brace_ind <- which(pd$token == key_token)[1L] + pd$spaces[closing_brace_ind] <- 1L + + all_to_be_wrapped_ind <- seq2( + closing_brace_ind + 1L, to_be_wrapped_expr_with_child + ) + + pd <- wrap_subexpr_in_curly( + pd, all_to_be_wrapped_ind, indent_by, space_after + ) + + if (nrow(pd) > 5L) pd$lag_newlines[6L] <- 0L + } + pd +} + +#' Add curly braces to else +#' +#' Wrap the else part of a conditional expression into curly braces if not +#' already wrapped into a such. +#' @inheritParams wrap_multiline_curly +#' @keywords internal +wrap_else_multiline_curly <- function(pd, indent_by = 2L, space_after = 0L) { + if (contains_else_expr(pd) && + pd_is_multi_line(pd) && + contains_else_expr_that_needs_braces(pd) && + !any(pd$stylerignore) && + pd$token_before[1L] != "SPECIAL-PIPE") { + else_idx <- which(pd$token == "ELSE") + pd$spaces[else_idx] <- 1L + all_to_be_wrapped_ind <- seq2(else_idx + 1L, nrow(pd)) + + pd <- wrap_subexpr_in_curly( + pd, all_to_be_wrapped_ind, indent_by, space_after + ) + } + pd +} + +#' Wrap a sub-expression in curly braces +#' +#' Wraps some rows of a parse table into a sub-expression. +#' @inheritParams wrap_multiline_curly +#' @param ind_to_be_wrapped The indices of the rows that should be wrapped +#' into a new expression. +#' @inheritParams wrap_expr_in_curly +#' @keywords internal +wrap_subexpr_in_curly <- function(pd, + ind_to_be_wrapped, + indent_by, + space_after) { + to_be_wrapped_starts_with_comment <- + pd$token[ind_to_be_wrapped[1L]] == "COMMENT" + new_expr <- wrap_expr_in_curly( + vec_slice(pd, ind_to_be_wrapped), + stretch_out = c(!to_be_wrapped_starts_with_comment, TRUE), + space_after = space_after + ) + new_expr$indent <- max(pd$indent[last(ind_to_be_wrapped)] - indent_by, 0L) + new_expr_in_expr <- new_expr %>% + wrap_expr_in_expr() %>% + remove_attributes(c("token_before", "token_after")) + + pd %>% + vec_slice(-ind_to_be_wrapped) %>% + vec_rbind(new_expr_in_expr) %>% + set_multi_line() %>% + arrange_pos_id() +} + +#' Check if if, for or while loop expression require a braces. +#' +#' This is the case if they are multi-line and not yet wrapped into curly +#' braces. +#' @inheritParams wrap_multiline_curly +#' @keywords internal +if_for_while_part_requires_braces <- function(pd, key_token) { + pos_first_key_token <- which(pd$token == key_token)[1L] + child <- pd$child[[next_non_comment(pd, pos_first_key_token)]] + pd_is_multi_line(pd) && !is_curly_expr(child) +} + +#' Replace single quotes with double quotes +#' +#' We do not use `deparse()` as in previous implementations but `paste0()` since +#' the former approach escapes the reverse backslash in the line break character +#' `\\n` whereas the solution with `paste0()` does not. +#' @examples +#' style_text("'here +#' is a string +#' '") +#' @param pd_flat A flat parse table. +#' @keywords internal +fix_quotes <- function(pd_flat) { + str_const <- which(pd_flat$token == "STR_CONST") + if (rlang::is_empty(str_const)) { + return(pd_flat) + } + + pd_flat$text[str_const] <- map_chr(pd_flat$text[str_const], fix_quotes_one) + pd_flat +} + +fix_quotes_one <- function(x) { + rx <- "^'([^\"]*)'$" + i <- grep(rx, x) + if (rlang::is_empty(i)) { + return(x) + } + + # replace outer single quotes + xi <- gsub(rx, '"\\1"', x[i]) + + # Replace inner escaped quotes (\') by ' and keep all other instances of \., including \\ + x[i] <- gsub("\\\\(')|(\\\\[^'])", "\\1\\2", xi) + x +} diff --git a/R/serialize.R b/R/serialize.R index 01ba4fe2e..89bd0e730 100644 --- a/R/serialize.R +++ b/R/serialize.R @@ -1,15 +1,24 @@ #' Serialize flattened parse data #' #' Collapses a flattened parse table into character vector representation. -#' @param flattened_pd A flattened parse table. -#' @param start_line The line number on which the code starts. -serialize_parse_data_flattened <- function(flattened_pd, start_line = 1) { - flattened_pd$lag_newlines[1] <- start_line - 1 - res <- with(flattened_pd, +#' @inheritParams apply_stylerignore +#' @param indent_character The character that is used for indention. We strongly +#' advise for using spaces as indention characters. +#' @keywords internal +serialize_parse_data_flattened <- function(flattened_pd, indent_character = "") { + flattened_pd <- apply_stylerignore(flattened_pd) + flattened_pd$lag_newlines[1L] <- 0L # resolve start_line elsewhere + with( + flattened_pd, paste0( collapse = "", - map(lag_newlines, add_newlines), map(lag_spaces, add_spaces), text + map(lag_newlines, add_newlines), + map2( + ifelse(lag_newlines > 0L, indent_character, " "), + lag_spaces, + rep_char + ), + text ) ) - strsplit(res, "\n")[[1L]] } diff --git a/R/set-assert-args.R b/R/set-assert-args.R index 0301d4786..db49c4097 100644 --- a/R/set-assert-args.R +++ b/R/set-assert-args.R @@ -2,18 +2,52 @@ #' #' Sets the argument `write_tree` in [test_collection()] to be `TRUE` for R #' versions higher or equal to 3.2, and `FALSE` otherwise since the second-level -#' dependency `DiagrammeR` from `data.table` is not available for R < 3.2. +#' dependency `DiagrammeR` from `data.tree` is not available for R < 3.2. #' @param write_tree Whether or not to write tree. +#' @keywords internal set_arg_write_tree <- function(write_tree) { - sufficient_version <- getRversion() >= 3.2 if (is.na(write_tree)) { - write_tree <- ifelse(sufficient_version, TRUE, FALSE) - } else if (!sufficient_version && write_tree) { - stop_insufficient_r_version() + write_tree <- is_installed("data.tree") + } else if (write_tree) { + check_installed("data.tree") } write_tree } +#' Assert the transformers +#' +#' Actually only assert name and version of style guide in order to make sure +#' caching works correctly. +#' @inheritParams make_transformer +#' @keywords internal +assert_transformers <- function(transformers) { + version_cutoff <- 2.0 + no_name <- is.null(transformers$style_guide_name) + no_version <- is.null(transformers$style_guide_version) + if (no_name || no_version) { + action <- if (utils::packageVersion("styler") >= version_cutoff) { + "are not supported anymore" + } else { + "deprecated and will be removed in a future version of styler." + } + message <- paste( + "Style guides without a name and a version field are", + action, "\nIf you are a user: Open an issue on", + "https://github.com/r-lib/styler and provide a reproducible example", + "of this error. \nIf you are a developer:", + "When you create a style guide with {.fn styler::create_style_guide}, the", + "argument `style_guide_name` and `style_guide_version` should be", + "non-NULL. See {.help styler::create_style_guide} for how to set them." + ) + + if (utils::packageVersion("styler") >= version_cutoff) { + cli::cli_abort(message) + } else { + cli::cli_warn(message) + } + } +} + #' Set the file type argument #' #' Sets and asserts the file type argument to a standard format for further internal @@ -22,9 +56,8 @@ set_arg_write_tree <- function(write_tree) { #' standard format. #' @examples #' styler:::set_and_assert_arg_filetype("rMd") -#' \dontrun{ -#' styler:::set_and_assert_arg_filetype("xyz") -#' } +#' try(styler:::set_and_assert_arg_filetype("xyz")) +#' @keywords internal set_and_assert_arg_filetype <- function(filetype) { without_dot <- gsub("^\\.", "", tolower(filetype)) assert_filetype(without_dot) @@ -34,39 +67,55 @@ set_and_assert_arg_filetype <- function(filetype) { #' Make sure all supplied file types are allowed #' #' @param lowercase_filetype A vector with file types to check, all lower case. + +#' @keywords internal assert_filetype <- function(lowercase_filetype) { - if (!all(lowercase_filetype %in% c("r", "rmd"))) { - stop( - "filetype must not contain other values than 'R'", - "or 'Rmd' (case is ignored).", call. = FALSE - ) + allowed_types <- c("r", "rmd", "rmarkdown", "rnw", "rprofile", "qmd") + if (!all(lowercase_filetype %in% allowed_types)) { + abort(paste( + "filetype must not contain other values than 'R', 'Rprofile',", + "'Rmd', 'Rmarkdown', 'qmd' or 'Rnw' (case is ignored)." + )) } } - #' Assert text to be of positive length and replace it with the empty #' string otherwise. #' @param text The input to style. +#' @keywords internal assert_text <- function(text) { - if (length(text) < 1) { + if (length(text) < 1L) { text <- "" } text } - #' Check token validity #' #' Check whether one or more tokens exist and have a unique token-text mapping #' @param tokens Tokens to check. + +#' @keywords internal assert_tokens <- function(tokens) { invalid_tokens <- tokens[!(tokens %in% lookup_tokens()$token)] - if (length(invalid_tokens) > 0) { - stop( - "Token(s) ", paste0(invalid_tokens, collapse = ", "), " are invalid. ", - "You can lookup all valid tokens and their text ", - "with styler:::lookup_tokens(). Make sure you supply the values of ", + if (length(invalid_tokens) > 0L) { + abort(paste( + "Token(s)", toString(invalid_tokens), "are invalid.", + "You can lookup all valid tokens and their text", + "with styler:::lookup_tokens(). Make sure you supply the values of", "the column 'token', not 'text'." - ) + )) } } + +#' Standardize paths in root +#' +#' Standardization required to use `setdiff()` with paths. +#' @param path A path. +#' @keywords internal +#' @seealso dir_without_. +#' @examples +#' styler:::set_arg_paths(c("./file.R", "file.R", "../another-file.R")) +set_arg_paths <- function(path) { + gsub("^[.]/", "", path) +} diff --git a/R/style-guides.R b/R/style-guides.R new file mode 100644 index 000000000..e6a68ae7d --- /dev/null +++ b/R/style-guides.R @@ -0,0 +1,553 @@ +#' Style guides +#' +#' Format code according to a style guide. Style guides are the input to the +#' argument `style` in [style_file()] and friends. +#' The available style guides are: +#' * the tidyverse style guide (see [tidyverse_style()]). +#' @name style_guides +#' @keywords internal +NULL + + +#' The tidyverse style +#' +#' Style code according to the tidyverse style guide. +#' @param scope The extent of manipulation. Can range from "none" (least +#' invasive) to "tokens" (most invasive). See 'Details'. This argument is a +#' string or a vector of class `AsIs`. +#' @param indent_by How many spaces of indention should be inserted after +#' operators such as '('. +#' @param strict A logical value indicating whether a set of strict +#' or not so strict transformer functions should be returned. Compare the +#' functions returned with or without `strict = TRUE`. For example, +#' `strict = TRUE` means force *one* space e.g. after "," and *one* line break +#' e.g. after a closing curly brace. `strict = FALSE` means to set spaces and +#' line breaks to one if there is none and leave the code untouched otherwise. +#' See 'Examples'. +#' @param start_comments_with_one_space Whether or not comments should start +#' with only one space (see [start_comments_with_space()]). +#' @inheritParams create_style_guide +#' @param math_token_spacing A list of parameters that define spacing around +#' math token, conveniently constructed using [specify_math_token_spacing()]. +#' @details +#' +#' The following levels for `scope` are available: +#' +#' * "none": Performs no transformation at all. +#' * "spaces": Manipulates spacing between token on the same line. +#' * "indention": Manipulates the indention, i.e. number of spaces at the +#' beginning of each line. +#' * "line_breaks": Manipulates line breaks between tokens. +#' * "tokens": manipulates tokens. +#' +#' `scope` can be specified in two ways: +#' +#' - As a string: In this case all less invasive scope levels are implied, e.g. +#' "line_breaks" includes "indention", "spaces". This is brief and what most +#' users need. +#' - As vector of class `AsIs`: Each level has to be listed explicitly by +#' wrapping one ore more levels of the scope in [I()]. This offers more +#' granular control at the expense of more verbosity. +#' +#' See 'Examples' for details. +#' +#' @family obtain transformers +#' @family style_guides +#' @examples +#' style_text("call( 1)", style = tidyverse_style, scope = "spaces") +#' style_text("call( 1)", transformers = tidyverse_style(strict = TRUE)) +#' style_text(c("ab <- 3", "a <-3"), strict = FALSE) # keeps alignment of "<-" +#' style_text(c("ab <- 3", "a <-3"), strict = TRUE) # drops alignment of "<-" +#' +#' # styling line breaks only without spaces +#' style_text(c("ab <- 3", "a =3"), strict = TRUE, scope = I(c("line_breaks", "tokens"))) +#' @export +tidyverse_style <- function(scope = "tokens", + strict = TRUE, + indent_by = 2L, + start_comments_with_one_space = FALSE, + reindention = tidyverse_reindention(), + math_token_spacing = tidyverse_math_token_spacing()) { + args <- as.list(environment()) + scope <- scope_normalize(scope) + indent_character <- " " + + + indention_manipulators <- if ("indention" %in% scope) { + list( + indent_braces = partial(indent_braces, indent_by = indent_by), + unindent_fun_dec = unindent_fun_dec, + indent_op = partial(indent_op, indent_by = indent_by), + indent_eq_sub = partial(indent_eq_sub, indent_by = indent_by), + indent_without_paren = partial(indent_without_paren, + indent_by = indent_by + ), + update_indention_ref_fun_dec = update_indention_ref_fun_dec + ) + } + space_manipulators <- if ("spaces" %in% scope) { + list( + remove_space_before_closing_paren = remove_space_before_closing_paren, + remove_space_before_opening_paren = if (strict) { + remove_space_before_opening_paren + }, + add_space_after_for_if_while = add_space_after_for_if_while, + remove_space_before_comma = remove_space_before_comma, + style_space_around_math_token = partial( + style_space_around_math_token, strict, + math_token_spacing$zero, + math_token_spacing$one + ), + style_space_around_tilde = partial( + style_space_around_tilde, + strict = strict + ), + spacing_around_op = purrr::partial(set_space_around_op, + strict = strict + ), + remove_space_after_opening_paren = remove_space_after_opening_paren, + remove_space_after_excl = remove_space_after_excl, + set_space_after_bang_bang = set_space_after_bang_bang, + remove_space_before_dollar = remove_space_before_dollar, + remove_space_after_fun_dec = remove_space_after_fun_dec, + remove_space_around_colons = remove_space_around_colons, + start_comments_with_space = partial(start_comments_with_space, + force_one = start_comments_with_one_space + ), + remove_space_after_unary_pm_nested = remove_space_after_unary_pm_nested, + spacing_before_comments = if (strict) { + set_space_before_comments + } else { + add_space_before_comments + }, + set_space_between_levels = set_space_between_levels, + set_space_between_eq_sub_and_comma = set_space_between_eq_sub_and_comma, + set_space_in_curly_curly = set_space_in_curly_curly + ) + } + + use_raw_indention <- !("indention" %in% scope) + + line_break_manipulators <- if ("line_breaks" %in% scope) { + list( + set_line_break_around_comma_and_or = set_line_break_around_comma_and_or, + set_line_break_after_assignment = set_line_break_after_assignment, + set_line_break_before_curly_opening = set_line_break_before_curly_opening, + remove_line_break_before_round_closing_after_curly = + if (strict) remove_line_break_before_round_closing_after_curly, + remove_line_breaks_in_fun_dec = + if (strict) remove_line_breaks_in_fun_dec, + style_line_break_around_curly = partial( + style_line_break_around_curly, + strict + ), + # must be after style_line_break_around_curly as it remove line + # breaks again for {{. + set_line_break_around_curly_curly = set_line_break_around_curly_curly, + set_line_break_before_closing_call = if (strict) { + partial( + set_line_break_before_closing_call, + except_token_before = "COMMENT" + ) + }, + set_line_break_after_opening_if_call_is_multi_line = if (strict) { + partial( + set_line_break_after_opening_if_call_is_multi_line, + except_token_after = "COMMENT", + # don't modify line break here + except_text_before = c("ifelse", "if_else"), + force_text_before = "switch" # force line break after first token + ) + }, + remove_line_break_in_fun_call = purrr::partial( + remove_line_break_in_fun_call, + strict = strict + ), + add_line_break_after_pipe = if (strict) add_line_break_after_pipe, + set_line_break_after_ggplot2_plus = if (strict) { + set_line_break_after_ggplot2_plus + } + ) + } + + token_manipulators <- if ("tokens" %in% scope) { + list( + fix_quotes = fix_quotes, + force_assignment_op = force_assignment_op, + resolve_semicolon = resolve_semicolon, + add_brackets_in_pipe = add_brackets_in_pipe, + wrap_if_else_while_for_fun_multi_line_in_curly = + if (strict) { + purrr::partial( + wrap_if_else_while_for_fun_multi_line_in_curly, + indent_by = indent_by + ) + } + ) + } + + transformers_drop <- specify_transformers_drop( + spaces = list( + # remove_space_before_closing_paren = c("')'", "']'"), + # remove_space_before_opening_paren = c("'('", "'['", "LBB"), + add_space_after_for_if_while = c("IF", "WHILE", "FOR"), + # remove_space_before_comma = "','", + set_space_between_eq_sub_and_comma = "EQ_SUB", + style_space_around_math_token = c( + math_token_spacing$zero, + math_token_spacing$one + ), + style_space_around_tilde = "'~'", + # remove_space_after_opening_paren = c("'('", "'['", "LBB"), + remove_space_after_excl = "'!'", + set_space_after_bang_bang = "'!'", + remove_space_before_dollar = "'$'", + remove_space_after_fun_dec = "FUNCTION", + remove_space_around_colons = c("':'", "NS_GET_INT", "NS_GET"), + start_comments_with_space = "COMMENT", + remove_space_after_unary_pm_nested = c("'+'", "'-'"), + spacing_before_comments = "COMMENT", + set_space_in_curly_curly = c("'{'", "'}'") + ), + indention = list( + # indent_braces = c("'('", "'['", "'{'", "')'", "']'", "'}'"), + unindent_fun_dec = "FUNCTION", + indent_eq_sub = c("EQ_SUB", "EQ_FORMALS"), # TODO rename + update_indention_ref_fun_dec = "FUNCTION" + ), + line_breaks = list( + set_line_break_before_curly_opening = "'{'", + remove_line_break_before_round_closing_after_curly = "'}'", + remove_line_breaks_in_fun_dec = "FUNCTION", + set_line_break_around_curly_curly = "'{'", + style_line_break_around_curly = "'{'", + add_line_break_after_pipe = c("SPECIAL-PIPE", "PIPE") + ), + tokens = list( + resolve_semicolon = "';'", + add_brackets_in_pipe = c("SPECIAL-PIPE", "PIPE"), + # before 3.6, these assignments are not wrapped into top-level expression + # and `text` supplied to transformers_drop() is "", so it appears to not + # contain EQ_ASSIGN, and the transformer is falsely removed. + # compute_parse_data_nested / text_to_flat_pd ('a = 4') + force_assignment_op = "EQ_ASSIGN", + wrap_if_else_while_for_fun_multi_line_in_curly = c( + "IF", "WHILE", "FOR", "FUNCTION" + ) + ) + ) + + style_guide_name <- "styler::tidyverse_style@https://github.com/r-lib" + create_style_guide( + # transformer functions + initialize = default_style_guide_attributes, + line_break = line_break_manipulators, + space = space_manipulators, + token = token_manipulators, + indention = indention_manipulators, + # transformer options + use_raw_indention = use_raw_indention, + reindention = reindention, + style_guide_name = style_guide_name, + style_guide_version = styler_version, + more_specs_style_guide = args, + transformers_drop = transformers_drop, + indent_character = indent_character + ) +} + +#' Create a style guide +#' +#' This is a helper function to create a style guide, which is technically +#' speaking a named list of groups of transformer functions where each +#' transformer function corresponds to one styling rule. The output of this +#' function can be used as an argument for `style` in top-level functions +#' like [style_text()] and friends. Note that for caching to work properly, +#' unquote all inputs to the transformer function if possible with rlang's `!!`, +#' otherwise, they will be passed as references (generic variable names) instead +#' of literals and `styler:::is_cached()` won't pick up changes. See how it's +#' done in [tidyverse_style()] with `indent_by` and other arguments. +#' @param initialize The bare name of a function that initializes various +#' variables on each level of nesting. +#' @param line_break A list of transformer functions that manipulate line_break +#' information. +#' @param space A list of transformer functions that manipulate spacing +#' information. +#' @param token A list of transformer functions that manipulate token text. +#' @param indention A list of transformer functions that manipulate indention. +#' @param use_raw_indention Boolean indicating whether or not the raw indention +#' should be used. +#' @param reindention A list of parameters for regex re-indention, most +#' conveniently constructed using [specify_reindention()]. +#' @param style_guide_name The name of the style guide. Used as a meta attribute +#' inside the created style guide, for example for caching. By convention, +#' this is the style guide qualified by the package namespace plus the +#' location of the style guide, separated by `@`. For example, +#' `"styler::tidyverse_style@https://github.com/r-lib"`. +#' @param style_guide_version The version of the style guide. Used as a meta +#' attribute inside the created style guide, for example for caching. This +#' should correspond to the version of the R package that exports the +#' style guide. +#' @param more_specs_style_guide Named vector (coercible to character) +#' with all arguments passed to the style guide and used for cache +#' invalidation. You can easily capture them in your style guide function +#' declaration with `as.list(environment())` (compare source code of +#' `tidyverse_style()`). +#' @param transformers_drop A list specifying under which conditions +#' transformer functions can be dropped since they have no effect on the +#' code to format, most easily constructed with +#' [specify_transformers_drop()]. This is argument experimental and may +#' change in future releases without prior notification. It was mainly +#' introduced to improve speed. Listing transformers here that occur almost +#' always in code does not make sense because the process of excluding them +#' also takes some time. +#' @inheritParams serialize_parse_data_flattened +#' @examples +#' set_line_break_before_curly_opening <- function(pd_flat) { +#' op <- pd_flat$token %in% "'{'" +#' pd_flat$lag_newlines[op] <- 1L +#' pd_flat +#' } +#' set_line_break_before_curly_opening_style <- function() { +#' create_style_guide( +#' line_break = list(set_line_break_before_curly_opening), +#' style_guide_name = "some-style-guide", +#' style_guide_version = "some-version" +#' ) +#' } +#' style_text( +#' "a <- function(x) { x }", +#' style = set_line_break_before_curly_opening_style +#' ) +#' @export +create_style_guide <- function(initialize = default_style_guide_attributes, + line_break = NULL, + space = NULL, + token = NULL, + indention = NULL, + use_raw_indention = FALSE, + reindention = tidyverse_reindention(), + style_guide_name = NULL, + style_guide_version = NULL, + more_specs_style_guide = NULL, + transformers_drop = specify_transformers_drop(), + indent_character = " ") { + list( + # transformer functions + initialize = list(initialize = initialize), + line_break = line_break, + space = space, + token = token, + indention = indention, + # transformer options + use_raw_indention = use_raw_indention, + reindention = reindention, + style_guide_name = style_guide_name, + style_guide_version = style_guide_version, + more_specs_style_guide = more_specs_style_guide, + transformers_drop = transformers_drop, + indent_character = indent_character + ) %>% + map(compact) +} + +#' Specify which tokens must be absent for a transformer to be dropped +#' +#' `{styler}` can remove transformer functions safely removed from the list of +#' transformers to be applied on every *nest* with [transformers_drop()] if the +#' tokens that trigger a manipulation of the parse data are absent in the text +#' to style. `specify_transformers_drop()` helps you specify these +#' conditions. +#' +#' Note that the negative formulation (must be absent in order to be dropped) +#' means that when you add a new rule and you forget +#' to add a rule for when to drop it, it will not be dropped. If we required to +#' specify the complement (which tokens must be present for the transformer to +#' be kept), the transformer would be silently removed, which is less save. +#' @param spaces,indention,line_breaks,tokens Each a list (or `NULL`) where +#' the name of each element is the concerning transformer, the value is an +#' unnamed vector with tokens that match the rule. See 'Examples'. +#' +#' @section Warning: +#' It is the responsibility of the developer to ensure expected behavior, in +#' particular that: +#' * the name of the supplied dropping criteria matches the name of the +#' transformer function. +#' * the dropping criteria (name + token) reflects correctly under which +#' circumstances the transformer does not have an impact on styling and can +#' therefore be safely removed without affecting the styling outcome. +#' +#' You can use the unexported function [test_transformers_drop()] for some +#' checks. +#' @examples +#' dropping <- specify_transformers_drop( +#' spaces = c(remove_space_after_excl = "'!'") +#' ) +#' style_guide <- create_style_guide( +#' space = list(remove_space_after_excl = styler:::remove_space_after_excl), +#' transformers_drop = dropping +#' ) +#' # transformers_drop() will remove the transformer when the code does not +#' # contain an exclamation mark +#' style_guide_with_some_transformers_dropped <- styler:::transformers_drop( +#' "x <- 3;2", style_guide +#' ) +#' setdiff( +#' names(style_guide$space), +#' names(style_guide_with_some_transformers_dropped) +#' ) +#' # note that dropping all transformers of a scope means that this scope +#' # has an empty named list for this scope +#' style_guide_with_some_transformers_dropped$space +#' # this is not the same as if this scope was never specified. +#' tidyverse_style(scope = "none")$space +#' # Hence, styler should check for length 0 to decide if a scope is present or +#' # not, not via `is.null()` and we can use the `is.null()` check to see if +#' # this scope was initially required by the user. +#' @export +specify_transformers_drop <- function(spaces = NULL, + indention = NULL, + line_breaks = NULL, + tokens = NULL) { + list( + space = spaces, indention = indention, line_break = line_breaks, + token = tokens + ) +} + +#' Specify what is re-indented how +#' +#' This function returns a list that can be used as an input for the argument +#' `reindention` of the function [tidyverse_style()]. It features sensible +#' defaults, so the user can specify deviations from them conveniently without +#' the need of setting all arguments explicitly. +#' @param regex_pattern Character vector with regular expression patterns that +#' are to be re-indented with spaces, `NULL` if no reindention needed. +#' @param indention The indention tokens should have if they match +#' `regex_pattern`. +#' @param comments_only Whether the `regex_reindention_pattern` should only be +#' matched against comments or against all tokens. Mainly added for +#' performance. +#' @name reindention +NULL + +#' @describeIn reindention Allows to specify which tokens are reindented and +#' how. +#' @examples +#' style_text("a <- xyz", reindention = specify_reindention( +#' regex_pattern = "xyz", indention = 4, comments_only = FALSE +#' )) +#' @export +specify_reindention <- function(regex_pattern = NULL, + indention = 0L, + comments_only = TRUE) { + list( + regex_pattern = regex_pattern, + indention = indention, + comments_only = comments_only + ) +} + +#' @describeIn reindention Simple forwarder to +#' `specify_reindention` with reindention according to the tidyverse style +#' guide. +#' @examples +#' style_text("a <- xyz", reindention = tidyverse_reindention()) +#' @export +tidyverse_reindention <- function() { + specify_reindention( + regex_pattern = NULL, indention = 0L, comments_only = TRUE + ) +} + +#' Convert the styling scope to its lower-level representation +#' +#' If `scope` is of class `character` and of length one, the value of the +#' argument and all less-invasive levels are included too (e.g. +#' styling tokens includes styling spaces). If +#' `scope` is of class `AsIs`, every level to be included has to be declared +#' individually. See compare [tidyverse_style()] for the possible levels and +#' their order. +#' @param scope A character vector of length one or a vector of class `AsIs`. +#' @param name The name of the character vector to be displayed if the +#' construction of the factor fails. + +#' @examples +#' scope_normalize(I("tokens")) +#' scope_normalize(I(c("indention", "tokens"))) +#' @family third-party style guide helpers +#' @export +scope_normalize <- function(scope, name = substitute(scope)) { + levels <- c("none", "spaces", "indention", "line_breaks", "tokens") + if (!all((scope %in% levels))) { + abort(paste( + "all values in", name, "must be one of the following:", + toString(levels) + )) + } + + if (inherits(scope, "AsIs")) { + factor(as.character(scope), levels = levels, ordered = TRUE) + } else if (length(scope) == 1L) { + scope <- levels[as.logical(rev(cumsum(scope == rev(levels))))] + factor(scope, levels = levels, ordered = TRUE) + } else { + abort( + "argument `scope` has to be either of class `AsIs` or length one." + ) + } +} + +#' Specify spacing around math tokens +#' +#' Helper function to create the input for the argument `math_token_spacing` in +#' [tidyverse_style()]. +#' @inheritParams style_space_around_math_token +#' @examples +#' style_text( +#' "1+1 -3", +#' math_token_spacing = specify_math_token_spacing(zero = "'+'"), +#' strict = FALSE +#' ) +#' style_text( +#' "1+1 -3", +#' math_token_spacing = specify_math_token_spacing(zero = "'+'"), +#' strict = TRUE +#' ) +#' @name math_token_spacing +NULL + +#' @describeIn math_token_spacing Allows to fully specify the math token +#' spacing. +#' @export +specify_math_token_spacing <- + function(zero = "'^'", + one = c("'+'", "'-'", "'*'", "'/'")) { + assert_tokens(c(one, zero)) + list( + one = setdiff(c(math_token, one), zero), + zero = zero + ) + } + +#' @describeIn math_token_spacing Simple forwarder to +#' `specify_math_token_spacing` with spacing around math tokens according to the +#' tidyverse style guide. +#' @examples +#' style_text( +#' "1+1 -3", +#' math_token_spacing = tidyverse_math_token_spacing(), +#' strict = FALSE +#' ) +#' style_text( +#' "1+1 -3", +#' math_token_spacing = tidyverse_math_token_spacing(), +#' strict = TRUE +#' ) +#' @export +tidyverse_math_token_spacing <- function() { + specify_math_token_spacing( + zero = "'^'", + one = c("'+'", "'-'", "'*'", "'/'") + ) +} diff --git a/R/style_guides.R b/R/style_guides.R deleted file mode 100644 index 1d5359d5f..000000000 --- a/R/style_guides.R +++ /dev/null @@ -1,324 +0,0 @@ -#' Style guides -#' -#' Format code according to a style guide. Style guides are the input to the -#' argument `style` in [style_file()] and friends. -#' The available style guides are: -#' * the tidyverse style guide (see [tidyverse_style()]). -#' @name style_guides -NULL - - -#' The tidyverse style -#' -#' Style code according to the tidyverse style guide. -#' @param scope The extent of manipulation. Can range from "none" (least -#' invasive) to "token" (most invasive). See 'Details'. This argument is a -#' vector of length one. -#' @param indent_by How many spaces of indention should be inserted after -#' operators such as '('. -#' @param strict A logical value indicating whether a set of strict -#' or not so strict transformer functions should be returned. Compare the -#' functions returned with or without `strict = TRUE`. For example, -#' `strict = TRUE` means force *one* space e.g. after "," and *one* line break -#' e.g. after a closing curly brace. `strict = FALSE` means to set spaces and -#' line breaks to one if there is none and leave the code untouched otherwise. -#' See 'Examples'. -#' @param start_comments_with_one_space Whether or not comments should start -#' with only one space (see `start_comments_with_space()`). -#' @inheritParams create_style_guide -#' @param math_token_spacing A list of parameters that define spacing around -#' math token, conveniently constructed using [specify_math_token_spacing()]. - -#' @details The following options for `scope` are available. -#' -#' * "none": Performs no transformation at all. -#' * "spaces": Manipulates spacing between token on the same line. -#' * "indention": In addition to "spaces", this option also manipulates the -#' indention level. -#' * "line_breaks": In addition to "indention", this option also manipulates -#' line breaks. -#' * "tokens": In addition to "line_breaks", this option also manipulates -#' tokens. -#' -#' As it becomes clear from this description, more invasive operations can only -#' be performed if all less invasive operations are performed too. -#' @family obtain transformers -#' @family style_guides -#' @examples -#' style_text("call( 1)", style = tidyverse_style, scope = "spaces") -#' style_text("call( 1)", transformers = tidyverse_style(strict = TRUE)) -#' style_text(c("ab <- 3", "a <-3"), strict = FALSE) # keeps alignment of "<-" -#' style_text(c("ab <- 3", "a <-3"), strict = TRUE) # drops alignment of "<-" -#' @importFrom purrr partial -#' @export -tidyverse_style <- function(scope = "tokens", - strict = TRUE, - indent_by = 2, - start_comments_with_one_space = FALSE, - reindention = tidyverse_reindention(), - math_token_spacing = tidyverse_math_token_spacing()) { - scope <- character_to_ordered( - scope, - c("none", "spaces", "indention", "line_breaks", "tokens") - ) - - space_manipulators <- if (scope >= "spaces") { - lst( - partial(indent_braces, indent_by = indent_by), - partial(indent_op, indent_by = indent_by), - partial(indent_eq_sub, indent_by = indent_by), - partial(indent_without_paren, indent_by = indent_by), - - fix_quotes, - remove_space_before_closing_paren, - if (strict) remove_space_before_opening_paren else identity, - add_space_after_for_if_while, - add_space_before_brace, - remove_space_before_comma, - partial( - style_space_around_math_token, strict, - math_token_spacing$zero, - math_token_spacing$one - ), - if (strict) set_space_around_op else add_space_around_op, - if (strict) set_space_after_comma else add_space_after_comma, - remove_space_after_opening_paren, - remove_space_after_excl, - set_space_after_bang_bang, - remove_space_before_dollar, - remove_space_after_fun_dec, - remove_space_around_colons, - partial( - start_comments_with_space, - force_one = start_comments_with_one_space - ), - - remove_space_after_unary_pm_nested, - if (strict) set_space_before_comments else add_space_before_comments, - set_space_between_levels, - set_space_between_eq_sub_and_comma - ) - } - - use_raw_indention <- scope < "indention" - - line_break_manipulators <- if (scope >= "line_breaks") { - lst( - remove_line_break_before_curly_opening, - if (strict) remove_line_break_before_round_closing_after_curly else identity, - if (strict) remove_line_break_before_round_closing_fun_dec else identity, - partial(style_line_break_around_curly, strict), - if (strict) { - partial( - set_line_break_after_opening_if_call_is_multi_line, - except_token_after = "COMMENT", - except_text_before = c("switch", "ifelse", "if_else") - ) - } else { - identity - } , - if (strict) { - partial( - set_line_break_before_closing_call, except_token_before = "COMMENT" - ) - } else { - identity - } , - remove_line_break_in_empty_fun_call, - add_line_break_after_pipe - ) - } - - token_manipulators <- if (scope >= "tokens") { - lst( - force_assignment_op, - resolve_semicolon, - add_brackets_in_pipe, - remove_terminal_token_before_and_after, - if (strict) wrap_if_else_multi_line_in_curly else identity - ) - } - - - indention_modifier <- - c( - update_indention_ref_fun_dec, - NULL - ) - - create_style_guide( - # transformer functions - initialize = default_style_guide_attributes, - line_break = line_break_manipulators, - space = space_manipulators, - token = token_manipulators, - indention = indention_modifier, - # transformer options - use_raw_indention = use_raw_indention, - reindention = reindention - ) -} - -#' Create a style guide -#' -#' This is a helper function to create a style guide, which is technically -#' speaking a named list of groups of transformer functions where each -#' transformer function corresponds to one styling rule. The output of this -#' function can be used as an argument for \code{style} in top level functions -#' like [style_text()] and friends. -#' @param initialize The bare name of a function that initializes various -#' variables on each level of nesting. -#' @param line_break A list of transformer functions that manipulate line_break -#' information. -#' @param space A list of transformer functions that manipulate spacing -#' information. -#' @param token A list of transformer functions that manipulate token text. -#' @param indention A list of transformer functions that manipulate indention. -#' @param use_raw_indention Boolean indicating whether or not the raw indention -#' should be used. -#' @param reindention A list of parameters for regex re-indention, most -#' conveniently constructed using [specify_reindention()]. -#' @examples -#' set_line_break_before_curly_opening <- function(pd_flat) { -#' op <- pd_flat$token %in% "'{'" -#' pd_flat$lag_newlines[op] <- 1L -#' pd_flat -#' } -#' set_line_break_before_curly_opening_style <- function() { -#' create_style_guide(line_break = set_line_break_before_curly_opening) -#' } -#' style_text("a <- function(x) { x }", style = set_line_break_before_curly_opening_style) -#' @export -create_style_guide <- function(initialize = default_style_guide_attributes, - line_break = NULL, - space = NULL, - token = NULL, - indention = NULL, - use_raw_indention = FALSE, - reindention = tidyverse_reindention()) { - lst( - # transformer functions - initialize, - line_break, - space, - token, - indention, - # transformer options - use_raw_indention, - reindention - ) -} - - -#' Specify what is re-indented how -#' -#' This function returns a list that can be used as an input for the argument -#' `reindention` of the function [tidyverse_style()]. It features sensible -#' defaults, so the user can specify deviations from them conveniently without -#' the need of setting all arguments explicitly. -#' @param regex_pattern Character vector with regular expression patterns that -#' are to be re-indented with spaces, `NULL` if no reindention needed. -#' @param indention The indention tokens should have if they match -#' `regex_pattern`. -#' @param comments_only Whether the `regex_reindention_pattern` should only be -#' matched against comments or against all tokens. Mainly added for -#' performance. -#' @name reindention -NULL - -#' @describeIn reindention Allows to specify which tokens are reindented and -#' how. -#' @examples -#' style_text("a <- xyz", reindention = specify_reindention( -#' regex_pattern = "xyz", indention = 4, comments_only = FALSE) -#' ) -#' @export -specify_reindention <- function(regex_pattern = NULL, - indention = 0, - comments_only = TRUE) - lst( - regex_pattern, - indention, - comments_only - ) - -#' @describeIn reindention Simple forwarder to -#' `specify_reindention` with reindention according to the tidyverse style -#' guide. -#' @examples -#' style_text("a <- xyz", reindention = tidyverse_reindention()) -#' @export -tidyverse_reindention <- function() { - specify_reindention( - regex_pattern = NULL, indention = 0, comments_only = TRUE - ) -} - -#' Convert a character vector to an ordered factor -#' -#' Convert a vector to an ordered factor but stop if any of the values in -#' `x` does not match the predefined levels in `levels.` -#' @param x A character vector. -#' @param levels A vector with levels. -#' @param name The name of the character vector to be displayed if the -#' construction of the factor fails. -character_to_ordered <- function(x, levels, name = substitute(x)) { - if (!all((x %in% levels))) { - stop( - "all values in ", name, " must be one of the following: ", - paste(levels, collapse = ", "), call. = FALSE - ) - } - factor(x, levels = levels, ordered = TRUE) -} - -#' Specify spacing around math tokens -#' -#' Helper function to create the input for the argument `math_token_spacing` in -#' [tidyverse_style()]. -#' @inheritParams style_space_around_math_token -#' @examples -#' style_text( -#' "1+1 -3", -#' math_token_spacing = specify_math_token_spacing(zero = "'+'"), -#' strict = FALSE -#' ) -#' style_text( -#' "1+1 -3", -#' math_token_spacing = specify_math_token_spacing(zero = "'+'"), -#' strict = TRUE -#' ) -#' style_text( -#' "1+1 -3", -#' math_token_spacing = tidyverse_math_token_spacing(), -#' strict = TRUE -#' ) -#' @name math_token_spacing -NULL - -#' @describeIn math_token_spacing Allows to fully specify the math token -#' spacing. -#' @export -specify_math_token_spacing <- - function(zero = NULL, - one = c("'+'", "'-'", "'*'", "'/'", "'^'")) { - assert_tokens(c(one, zero)) - lst( - one = setdiff(c(math_token, one), zero), - zero - ) - } - -#' @describeIn math_token_spacing Simple forwarder to -#' `specify_math_token_spacing` with spacing around math tokens according to the -#' tidyverse style guide. -#' @examples -#' style_text( -#' "1+1 -3", -#' math_token_spacing = tidyverse_math_token_spacing(), -#' strict = TRUE -#' ) -#' @export -tidyverse_math_token_spacing <- function() { - specify_math_token_spacing(one = c("'+'", "'-'", "'*'", "'/'", "'^'")) -} diff --git a/R/styler-package.R b/R/styler-package.R new file mode 100644 index 000000000..c1558ef4e --- /dev/null +++ b/R/styler-package.R @@ -0,0 +1,41 @@ +#' Non-invasive pretty printing of R code +#' +#' styler allows you to format `.R`, `.Rmd`, `.Rmarkdown` and/or +#' `.qmd`, `.Rnw` files, R packages, or entire R source trees +#' according to a style guide. +#' The following functions can be used for styling: +#' * [style_text()] to style a character vector. +#' * [style_file()] to style a single file. +#' * [style_dir()] to style all files in a directory. +#' * [style_pkg()] to style the source files of an R package. +#' * [styler_addins] (RStudio Addins) to style either selected code or the +#' active file. +#' @examples +#' style_text("call( 1)") +#' style_text("1 + 1", strict = FALSE) +#' style_text("a%>%b", scope = "spaces") +#' style_text("a%>%b; a", scope = "line_breaks") +#' style_text("a%>%b; a", scope = "tokens") +"_PACKAGE" + +## usethis namespace: start +#' +#' @importFrom magrittr "%>%" +#' @importFrom purrr compact partial flatten flatten_int flatten_chr +#' @importFrom purrr map map_lgl map_int map_chr map2 map2_chr map_at pmap pwalk +#' @importFrom rlang abort warn seq2 check_installed is_installed "%||%" set_names +#' @importFrom vctrs vec_rbind vec_slice vec_split +## usethis namespace: end +NULL + + +utils::globalVariables(c( + ".", + "pd", "pd_nested", "pd_flat", "flattened_pd", + "line1", "line2", "col1", "col2", "parent", + "terminal", "text", "short", + "spaces", "lag_spaces", + "newlines", "lag_newlines", + "pos_id", + NULL +)) diff --git a/R/styler.R b/R/styler.R deleted file mode 100644 index 9f76a17b6..000000000 --- a/R/styler.R +++ /dev/null @@ -1,30 +0,0 @@ -#' Non-invasive pretty printing of R code -#' -#' styler allows you to format .R files, packages or entire R source trees -#' according to a style guide. -#' The following functions can be used for styling: -#' * [style_text()] to style a character vector. -#' * [style_file()] to style a single .R file. -#' * [style_dir()] to style all .R files in a directory. -#' * [style_pkg()] to style the source files of an R package. -#' * [styler_addins] (RStudio Addins) to style either selected code or the -#' active file. -#' @examples -#' style_text("call( 1)") -#' style_text("1 + 1", strict = FALSE) -#' style_text("a%>%b", scope = "spaces") -#' style_text("a%>%b; a", scope = "line_breaks") -#' style_text("a%>%b; a", scope = "tokens") -"_PACKAGE" -if (getRversion() >= "2.15.1") { - utils::globalVariables(c( - ".", - "pd", "pd_nested", "pd_flat", "flattened_pd", - "line1", "line2", "col1", "col2", "parent", - "terminal", "text", "short", - "spaces", "lag_spaces", - "newlines", "lag_newlines", - "pos_id", - NULL - )) -} diff --git a/R/stylerignore.R b/R/stylerignore.R new file mode 100644 index 000000000..4cf0b30bd --- /dev/null +++ b/R/stylerignore.R @@ -0,0 +1,168 @@ +#' Add positional information of token to next terminal +#' +#' This is needed because at serialization time, we also have terminals only +#' and positional argument of non-terminals were already propagated to terminals +#' with [context_to_terminals()]. Because tokens can be added or removed during +#' styling, we must not only keep the pos_id, but rather we must remember the +#' pos_id of the first token in the stylerignore sequence (the marker, or the +#' first token on a line if the stylerignore marker is an inline marker), for +#' which we know it will still be there, and join these markers later with all +#' tokens in the stylerignore sequence (this is a one to many join, i.e. one +#' start marker can have many tokens). +#' @inheritParams add_stylerignore +#' @keywords internal +env_add_stylerignore <- function(pd_flat) { + if (!env_current$any_stylerignore) { + env_current$stylerignore <- vec_slice(pd_flat, 0L) + return() + } + # the whole stylerignore sequence must be contained in one block. + # this means the block can contain cached and uncached expressions. + pd_flat_temp <- vec_slice(pd_flat, pd_flat$terminal) %>% + default_style_guide_attributes() + is_stylerignore_switchpoint <- pd_flat_temp$stylerignore != lag( + pd_flat_temp$stylerignore, + default = pd_flat_temp$stylerignore[1L] + ) + + pos_id_split <- vec_split( + pd_flat_temp$pos_id, cumsum(is_stylerignore_switchpoint) + ) + + pd_flat_temp$first_pos_id_in_segment <- pos_id_split[[2L]] %>% + map(~ rep(.x[1L], length(.x))) %>% + unlist(use.names = FALSE) + pd_flat_temp$lag_newlines <- pd_flat_temp$lag_newlines + pd_flat_temp$lag_spaces <- lag(pd_flat_temp$spaces, default = 0L) + is_terminal_to_ignore <- pd_flat_temp$terminal & pd_flat_temp$stylerignore + env_current$stylerignore <- vec_slice(pd_flat_temp, is_terminal_to_ignore) +} + +#' Adds the stylerignore column +#' +#' If a token should be ignored, the column is set to `TRUE`, +#' otherwise to `FALSE`. +#' @details +#' A token is ignored iff one of the two conditions hold: +#' +#' - it falls between a start and a stop marker whereas the markers are on +#' their own line. Which tokens are recognized as markers is controlled with +#' the R options `styler.ignore_start` and `styler.ignore_stop`. +#' - it is not a comment, but the last token on the line is a marker. +#' +#' See examples in [stylerignore]. Note that you should reuse the stylerignore +#' column to compute switch points or similar and not a plain +#' `pd$text %in% option_read("styler.ignore_start")` because that will fail to +#' give correct switch points in the case stylerignore sequences are invalid. +#' @param pd_flat A parse table. +#' @keywords internal +add_stylerignore <- function(pd_flat) { + parse_text <- trimws(pd_flat$text) + start_candidate <- grepl( + option_read("styler.ignore_start"), parse_text + ) & pd_flat$token == "COMMENT" + pd_flat$stylerignore <- rep(FALSE, length(start_candidate)) + env_current$any_stylerignore <- any(start_candidate) + if (!env_current$any_stylerignore) { + return(pd_flat) + } + pd_flat_lat_line1 <- lag(pd_flat$line2, default = 0L) + on_same_line <- pd_flat$line1 == pd_flat_lat_line1 + cumsum_start <- cumsum(start_candidate & !on_same_line) + cumsum_stop <- cumsum( + grepl(option_read("styler.ignore_stop"), parse_text) & + pd_flat$token == "COMMENT" + ) + pd_flat$indicator_off <- cumsum_start + cumsum_stop + is_invalid <- cumsum_start - cumsum_stop < 0L | cumsum_start - cumsum_stop > 1L + if (any(is_invalid)) { + cli::cli_warn(paste0( + "Invalid stylerignore sequences found, potentially ignoring some of the ", + "markers set.\nSee {.help styler::stylerignore}." + )) + } + + to_ignore <- as.logical(pd_flat$indicator_off %% 2L) + to_ignore[is_invalid] <- FALSE + single_lines_to_ignore <- pd_flat$line1[start_candidate & on_same_line] + to_ignore[pd_flat$line1 %in% single_lines_to_ignore] <- TRUE + pd_flat$indicator_off <- NULL + pd_flat[to_ignore, "stylerignore"] <- TRUE + pd_flat +} + +#' Ensure correct positional information for stylerignore expressions +#' +#' @param flattened_pd A flattened parse table. +#' @details +#' * Get the positional information for tokens with a stylerignore tag from +#' `env_current`, which recorded that information from the input text. +#' * Replace the computed lag_newlines and lag_spaces information in the parse +#' table with this information. +#' * Because we may remove or add tokens when applying the transformers, it is +#' not save to merge via the pos_id of each token in a stylerignore sequence. +#' We assume that the start and stop markers are the same after styling, so we +#' join all tokens that were initially in a stylerignore sequence via the +#' first pos_id in that stylerignore sequence. +#' @keywords internal +apply_stylerignore <- function(flattened_pd) { + if (!env_current$any_stylerignore) { + return(flattened_pd) + } + env_current$stylerignore$pos_id_ <- env_current$stylerignore$pos_id + colnames_required_apply_stylerignore <- c( + "pos_id_", "lag_newlines", "lag_spaces", "text", "first_pos_id_in_segment" + ) + # cannot rely on flattened_pd$text == option_read("styler.ignore_start") + # because if the marker logic is not correct (twice off in a row), we'll + # get it wrong. + to_ignore <- flattened_pd$stylerignore + not_first <- flattened_pd$stylerignore == lag( + flattened_pd$stylerignore, + default = FALSE + ) + + flattened_pd <- merge( + vec_slice(flattened_pd, !(to_ignore & not_first)), + env_current$stylerignore[, colnames_required_apply_stylerignore], + by.x = "pos_id", by.y = "first_pos_id_in_segment", all.x = TRUE, + sort = FALSE + ) + + flattened_pd %>% + stylerignore_consolidate_col("lag_newlines") %>% + stylerignore_consolidate_col("lag_spaces") %>% + stylerignore_consolidate_col("text") %>% + stylerignore_consolidate_col("pos_id", "pos_id", "pos_id_") %>% + arrange_pos_id() +} + +#' Consolidate columns after a merge +#' +#' After [base::merge()], all non-id columns that were present in `x` and `y` +#' do get a suffix `.x` and `.y`. If the `y` value is missing, use the `x` +#' value (because the information for this token was not stylerignored), +#' otherwise the `y` value (i.e. the styled value). +#' @param col A string indicating the name of the column that should be +#' consolidated. +#' @param col_x,col_y The name of the column from the left (right) parent to +#' consolidate. +#' @inheritParams apply_stylerignore +#' @keywords internal +stylerignore_consolidate_col <- function(flattened_pd, + col, + col_x = paste0(col, ".x"), + col_y = paste0(col, ".y")) { + flattened_pd[[col]] <- ifelse(is.na(flattened_pd[[col_y]]), + flattened_pd[[col_x]], + flattened_pd[[col_y]] + ) + if (col != col_x) { + flattened_pd[[col_x]] <- NULL + } + if (col != col_y) { + flattened_pd[[col_y]] <- NULL + } + + flattened_pd +} diff --git a/R/testing-mocks.R b/R/testing-mocks.R new file mode 100644 index 000000000..cc52625fb --- /dev/null +++ b/R/testing-mocks.R @@ -0,0 +1,35 @@ +#' `style_text()` without rules for `\{\{` +#' +#' This function mocks [style_text()], but without taking into consideration the +#' rules for the curly-curly syntactic sugar (introduced in rlang 0.4). +#' This function (`style_text_without_curly_curly()`) is needed for testing +#' only, namely to test indention +#' with multiple curly braces in a sequence. It is important to maintain testing +#' for indention rules even as the curly-curly expression is always kept on the +#' same line in the tidyverse style guide because we should +#' ensure the underlying mechanics for indention work correctly. When +#' indention mechanisms are changed later, e.g. by simplifying +#' [compute_indent_indices()], we must have +#' a way of testing this without the interaction of `\{\{`. +#' @examples +#' styler:::style_text_without_curly_curly("rlang::list2({{ x }} := 2L)") +#' styler:::style_text("rlang::list2({{ x }} := 3)") +#' @keywords internal +#' @seealso set_line_break_around_curly_curly +style_text_without_curly_curly <- function(text, + ..., + style = tidyverse_style, + transformers = style(...), + include_roxygen_examples = TRUE) { + dots <- list(...) + if ("strict" %in% names(dots)) { + strict <- dots$strict + } else { + strict <- TRUE + } + transformers$line_break$set_line_break_around_curly_curly <- NULL + style_text(text, ..., + style = NULL, transformers = transformers, + include_roxygen_examples = include_roxygen_examples + ) +} diff --git a/R/testing-public-api.R b/R/testing-public-api.R new file mode 100644 index 000000000..0cc325414 --- /dev/null +++ b/R/testing-public-api.R @@ -0,0 +1,54 @@ +#' Capture and post-process the output of `style_file` without causing side +#' effects +#' +#' @param file_in A vector with paths relative to `tests/testthat` the path +#' to the reference file. +#' @return +#' A list. Each element is a character vector with the captured output of +#' [style_file()] called on +#' `file_in` ran in a temp dir to avoid side effects on the input file (because +#' the next time the test would ran, the file would not need styling). The +#' styling is carried out with a temporary working directory change to keep +#' filenames relative and avoid portability issues in the exact output +#' comparison which is needed when the system that runs the unit testing (CI) +#' is a different system than the one that created the reference value. +#' This also implies that the ruler width, which depend on the path +#' length, will again have the same width on all systems and is independent of +#' how many characters the path of the temporary directory has. +#' @keywords internal +catch_style_file_output <- function(file_in) { + file_in <- testthat_file(file_in) + temp_path <- copy_to_tempdir(file_in) + raw_output <- withr::with_dir( + dirname(temp_path), + utils::capture.output( + style_file(basename(temp_path), scope = "tokens") + ) + ) + unlink(dirname(temp_path)) + raw_output +} + +ls_testable_encodings <- function() { + c("non-utf8", if (cli::is_utf8_output()) "utf8") +} + +#' Test the dry argument +#' @param path A path to pass to the `styler`. +#' @param styler A function that takes `path`, typically a user exposed styler +#' function that has side effects, like [style_file()]. +#' @keywords internal +test_dry <- function(path, styler, styled = FALSE) { + before <- readLines(path) + summary <- styler(path, dry = "on") + checker <- ifelse(styled, testthat::expect_false, testthat::expect_true) + checker(summary$changed) + testthat::expect_identical(before, readLines(path)) + + if (styled) { + testthat::expect_error(styler(path, dry = "fail"), NA) + } else { + testthat::expect_error(styler(path, dry = "fail"), "would be modified") + } + testthat::expect_error(styler(path, dry = "other option"), "one of") +} diff --git a/R/serialized_tests.R b/R/testing.R similarity index 50% rename from R/serialized_tests.R rename to R/testing.R index 691fb0cbc..5d48d6852 100644 --- a/R/serialized_tests.R +++ b/R/testing.R @@ -8,21 +8,17 @@ #' @param sub_test A regex pattern to further reduce the amount of test files #' to be tested in the test. `sub_test` must match the beginning of file #' names in tests/testthat. `NULL` matches all files. -#' @details Each file name that matches `test` and `sub_test` and ends with -#' "-in.R" is considered as an input to test. Its counterpart, -#' the reference to compare it against is the *-out.R file. It is constructed -#' by taking the substring of the *-in.R file before the -#' first dash and adding -out.R. This allows for multiple in.R files to -#' share one out.R file. You could have one_line-out.R as the reference to -#' compare one_line-random-something-stuff-in.R and -#' one_line-random-but-not-so-much-in.R. -#' -#' This also implies that -out.R files cannot have more than one dash in -#' their name, i.e. just the one before out.R. +#' @details +#' Each file name that matches `test` and `sub_test` and ends with +#' "-in.R" is considered as an input to test. Its counterpart, +#' the reference to compare it against is the *-out.R file. It is constructed +#' by taking the substring of the *-in.R file before the +#' last dash and adding -out.R. In contrast to older versions of this +#' function, every *-out.R file has just one in file. #' @inheritParams transform_and_check -#' @importFrom purrr flatten_chr pwalk map +#' @keywords internal test_collection <- function(test, sub_test = NULL, - write_back = TRUE, + dry = "off", write_tree = NA, transformer, ...) { @@ -30,7 +26,7 @@ test_collection <- function(test, sub_test = NULL, pattern <- paste0( if (!is.null(sub_test)) paste0("^", sub_test, ".*"), - "in\\.R(?:|md)$" + "in\\.R(?:|md|nw)$" ) in_names <- list.files( @@ -39,19 +35,26 @@ test_collection <- function(test, sub_test = NULL, full.names = FALSE ) - if (length(in_names) < 1) stop("no items to check") + if (length(in_names) < 1L) abort("no items to check") out_names <- construct_out(in_names) - out_items <- file.path(path, out_names) - in_items <- file.path(path, in_names) - - out_trees <- construct_tree(in_items) + if (getOption("styler.test_dir_writable", TRUE)) { + out_items <- file.path(path, out_names) + in_items <- file.path(path, in_names) + out_trees <- construct_tree(in_items) + } else { + in_items <- file.path(path, in_names) + out_items <- file.path(tempdir(), out_names) + ref_items <- file.path(path, out_names) + file.copy(ref_items, out_items, overwrite = TRUE, copy.mode = FALSE) + out_trees <- file.path(tempdir(), construct_tree(in_names)) + } pwalk(list(in_items, out_items, in_names, out_names, out_trees), transform_and_check, transformer = transformer, - write_back = write_back, + dry = dry, write_tree = write_tree, ... ) @@ -63,16 +66,20 @@ test_collection <- function(test, sub_test = NULL, #' *-out.R file, everything after the first dash is replaced by *-out.R. #' @param in_paths A character vector that denotes paths to *-in.R files. #' @examples -#' styler:::construct_out(c("path/to/file/first-in.R", -#' "path/to/file/first-extended-in.R")) +#' styler:::construct_out(c( +#' "path/to/file/first-in.R", +#' "path/to/file/first-extended-in.R" +#' )) +#' @keywords internal construct_out <- function(in_paths) { - gsub("\\-.*([.]R(?:|md))$", "\\-out\\1", in_paths) + gsub("\\-in([.]R(?:|md|nw))$", "\\-out\\1", in_paths) } #' Construct paths of a tree object given the paths of *-in.R files #' #' @param in_paths Character vector of *-in.R files. #' @param suffix Suffix for the tree object. +#' @keywords internal construct_tree <- function(in_paths, suffix = "_tree") { gsub("\\.R$", suffix, in_paths) } @@ -85,46 +92,47 @@ construct_tree <- function(in_paths, suffix = "_tree") { #' @param in_name The label of the in_item, defaults to `in_item`. #' @param out_name The label of the out_item, defaults to `out_item`. #' @param transformer A function to apply to the content of `in_item`. -#' @param write_back Whether the results of the transformation should be written -#' to the output file. #' @param write_tree Whether or not the tree structure of the test should be #' computed and written to a file. Note that this needs R >= 3.2 #' (see [set_arg_write_tree()]). If the argument is set to `NA`, the function #' determines whether R >= 3.2 is in use and if so, trees will be written. #' @param ... Parameters passed to transformer function. #' @param out_tree Name of tree file if written out. -#' @importFrom utils write.table +#' @inheritParams transform_utf8 +#' @keywords internal transform_and_check <- function(in_item, out_item, in_name = in_item, out_name = out_item, - transformer, write_back, + transformer, dry, write_tree = NA, out_tree = "_tree", ...) { write_tree <- set_arg_write_tree(write_tree) - read_in <- enc::read_lines_enc(in_item) + read_in <- read_utf8_bare(in_item) if (write_tree) { create_tree(read_in) %>% - write.table(out_tree, col.names = FALSE, row.names = FALSE, quote = FALSE) + utils::write.table(out_tree, + col.names = FALSE, row.names = FALSE, quote = FALSE, + fileEncoding = "UTF-8" + ) } transformed_text <- read_in %>% transformer(...) %>% unclass() - transformed <- enc::transform_lines_enc( + if (!file.exists(out_item)) { + warn(paste( + "File", out_item, "does not exist. Creating it from transormation." + )) + file.create(out_item) + } + transformed <- transform_utf8( out_item, function(x) transformed_text, - write_back = write_back, - verbose = FALSE + dry = dry ) if (transformed) { - warning( - in_name, " was different from ", out_name, - immediate. = TRUE, call. = FALSE - ) + warn(paste(in_name, "was different from", out_name)) } else { - message( - in_name, " was identical to ", out_name, - immediate. = TRUE, call. = FALSE - ) + message(in_name, " was identical to ", out_name) } } @@ -142,42 +150,53 @@ transform_and_check <- function(in_item, out_item, #' As inputs for [test_collection()], we can also use top-level functions such #' as [style_text()]. #' @rdname test_transformer +#' @keywords internal NULL #' @describeIn test_transformer Nest and unnest `text` without applying any #' transformations but remove EOL spaces and indention due to the way the #' serialization is set up. -style_empty <- function(text) { +#' @keywords internal +style_empty <- function(text, base_indention = 0L) { transformers <- list( # transformer functions initialize = default_style_guide_attributes, - line_break = NULL, - space = NULL, - token = NULL, + line_break = NULL, + space = NULL, + token = NULL, # transformer options use_raw_indention = FALSE, - reindention = specify_reindention(), + reindention = specify_reindention(), + indent_character = " ", NULL ) - transformed_text <- parse_transform_serialize(text, transformers) + transformed_text <- parse_transform_serialize_r(text, + transformers = transformers, + base_indention = base_indention + ) transformed_text } #' @describeIn test_transformer Transformations for indention based on operators -style_op <- function(text) { +#' @keywords internal +style_op <- function(text, base_indention = 0L) { transformers <- list( # transformer functions initialize = default_style_guide_attributes, line_break = NULL, - space = partial(indent_op, indent_by = 2), + space = partial(indent_op, indent_by = 2L), token = NULL, # transformer options use_raw_indention = FALSE, reindention = specify_reindention(), + indent_character = " ", NULL ) - transformed_text <- parse_transform_serialize(text, transformers) + transformed_text <- parse_transform_serialize_r(text, + transformers = transformers, + base_indention = base_indention + ) transformed_text } @@ -185,16 +204,17 @@ style_op <- function(text) { #' Create the path to a test that file #' @param ... Arguments passed to [file.path()] to construct the path after #' ".../tests/testthat/" +#' @keywords internal testthat_file <- function(...) { file.path(rprojroot::find_testthat_root_file(), ...) } - #' Copy a file to a temporary directory #' #' Takes the path to a file as input and returns the path where the temporary #' file is stored. Don't forget to unlink once you are done. #' @param path_perm The path of the file to copy. +#' @keywords internal copy_to_tempdir <- function(path_perm = testthat_file()) { dir <- tempfile("styler") dir.create(dir) @@ -203,14 +223,53 @@ copy_to_tempdir <- function(path_perm = testthat_file()) { file.path(dir, base) } +#' Times two function calls with temporarily enabled cache +#' +#' This can be helpful for benchmarking. +#' @param ... Arguments passed to `fun`. +#' @param fun The function that should be timed. +#' @param n The number of times the experiment should be repeated. +#' @return +#' A scalar indicating the relative difference of the second compared to the +#' first run. +#' @keywords internal +n_times_faster_with_cache <- function(x1, x2 = x1, ..., + fun = styler::style_text, + n = 3L, + clear = "always") { + rlang::arg_match(clear, c("always", "final", "never", "all but last")) + + out <- purrr::map(1L:n, n_times_faster_bench, + x1 = x1, x2 = x2, fun = fun, + ..., n = n, clear = clear + ) + out <- out %>% + purrr::map_dbl( + ~ unname(.x$first["elapsed"] / .x$second["elapsed"]) + ) %>% + mean() + + out +} + + +n_times_faster_bench <- function(i, x1, x2, fun, ..., n, clear) { + local_test_setup(cache = TRUE) + first <- system.time(fun(x1, ...)) -stop_insufficient_r_version <- function() { - stop(paste0( - "Can't write tree with R version ", getRversion(), - "since data.tree not available. Needs at least R version 3.2." - ), call. = FALSE) + if (is.null(x2)) { + second <- c(elapsed = 1L) + } else { + second <- system.time(fun(x2, ...)) + } + list( + first = first, + second = second, + cache = cache_info(format = "tabular") + ) } + #' Generate a comprehensive collection test cases for comment / insertion #' interaction #' Test consist of if / if-else / if-else-if-else cases, paired with various @@ -220,14 +279,15 @@ stop_insufficient_r_version <- function() { #' test cases to *-in.R files that can be tested with [test_collection()]. Note #' that a few of the test cases are invalid and need to be removed / commented #' out manually. +#' @keywords internal generate_test_samples <- function() { gen <- function(x) { - if (length(x) == 0) { + if (length(x) == 0L) { "" } else { c( - paste0(x[1], gen(x[-1])), - paste0(x[1], " # comment\n", paste(x[-1], collapse = "")) + paste0(x[1L], gen(x[-1L])), + paste0(x[1L], " # comment\n", paste(x[-1L], collapse = "")) ) } } @@ -250,3 +310,74 @@ generate_test_samples <- function() { file = "tests/testthat/insertion_comment_interaction/if_else_if_else-in.R" ) } + +#' @include ui-caching.R +clear_testthat_cache <- purrr::partial(cache_clear, "testthat", ask = FALSE) +activate_testthat_cache <- purrr::partial(cache_activate, "testthat") + +#' Establish testing setup for current environment +#' +#' @param cache Whether or not to create and activate a cache in a temporary +#' directory. +#' @param .local_envir The environment to use for scoping. +#' @details +#' * make styler quiet. +#' @keywords internal +local_test_setup <- function(cache = FALSE, + .local_envir = parent.frame()) { + current_cache <- cache_info(format = "tabular") + withr::local_options( + list("styler.quiet" = TRUE, "R.cache.rootPath" = tempfile()), + .local_envir = .local_envir + ) + if (cache) { + withr::defer( + { + clear_testthat_cache() + cache_activate(basename(current_cache$location)) + if (!current_cache$activated) { + cache_deactivate() + } + }, + envir = .local_envir + ) + activate_testthat_cache() + } +} + +cache_more_specs_default <- function() { + cache_more_specs(include_roxygen_examples = TRUE, base_indention = 0L) +} + +#' Test `transformers_drop` for consistency +#' +#' Check if the argument `transformers_drop` in [create_style_guide()] is +#' consistent with the transformers specified in that function. +#' @param transformers The output of [create_style_guide()] we want to test. +#' @keywords internal +test_transformers_drop <- function(transformers) { + scopes <- intersect( + names(transformers$transformers_drop), + names(transformers) + ) + + purrr::walk2(transformers$transformers_drop, transformers[scopes], function(x, y) { + # all x must be in y. select the x that are not in y + diff <- setdiff(names(x), names(y)) + if (length(diff) > 0L) { + rlang::abort(paste( + "transformers_drop specifies exclusion rules for transformers that ", + "are not in the style guilde. Please add the rule to the style guide ", + "or remove the dropping rules:", toString(diff) + )) + } + }) +} + + +skip_during_parallel <- function() { + Sys.getenv("STYLER_TEST_IS_TRULY_PARALLEL", TRUE) %>% + toupper() %>% + as.logical() %>% + testthat::skip_if("Running in parallel") +} diff --git a/R/token-create.R b/R/token-create.R index fa617bd05..d793b89f8 100644 --- a/R/token-create.R +++ b/R/token-create.R @@ -18,35 +18,48 @@ #' @param terminal Boolean vector indicating whether a token is a terminal or #' not. #' @param child The children of the tokens. +#' @param stylerignore Boolean to indicate if the line should be ignored by +#' styler. Must take value from token before, can't have a default. +#' @param block The block (of caching) to which the token belongs. An integer. +#' @param is_cached Whether the token is cached already. #' @family token creators +#' @keywords internal create_tokens <- function(tokens, texts, - lag_newlines = 0, - spaces = 0, + lag_newlines = 0L, + spaces = 0L, pos_ids, token_before = NA, token_after = NA, indention_ref_pos_ids = NA, - indents = 0, + indents, terminal = TRUE, - child = NULL) { - len_text <- length(text) - data_frame( - token = tokens, - text = texts, - short = substr(texts, 1, 5), - lag_newlines = lag_newlines, - newlines = lead(lag_newlines), - pos_id = pos_ids, - token_before = token_before, - token_after = token_after, - terminal = rep(terminal, len_text), - internal = rep(FALSE, len_text), - spaces = spaces, - multi_line = rep(FALSE, len_text), - indention_ref_pos_id = indention_ref_pos_ids, - indent = indents, - child = rep(list(child), len_text) + child = NULL, + stylerignore, + block = NA, + is_cached = FALSE) { + len_text <- length(texts) + new_styler_df( + list( + token = tokens, + text = texts, + short = substr(texts, 1L, 5L), + lag_newlines = lag_newlines, + newlines = lead(lag_newlines), + pos_id = pos_ids, + token_before = token_before, + token_after = token_after, + terminal = rep(terminal, len_text), + internal = rep(FALSE, len_text), + spaces = spaces, + multi_line = rep(0L, len_text), + indention_ref_pos_id = indention_ref_pos_ids, + indent = indents, + child = rep(list(child), len_text), + stylerignore = stylerignore, + block = block, + is_cached = is_cached + ) ) } @@ -63,10 +76,17 @@ create_tokens <- function(tokens, #' Returns a valid sequences of pos_ids or an error if it was not possible to #' create one. The validation is done with [validate_new_pos_ids()] #' @family token creators -create_pos_ids <- function(pd, pos, by = 0.1, after = FALSE, n = 1) { - direction <- ifelse(after, 1L, -1L) +#' @keywords internal +create_pos_ids <- function(pd, pos, by = 0.1, after = FALSE, n = 1L) { + direction <- if (after) { + 1L + } else { + -1L + } first <- find_start_pos_id(pd, pos, by, direction, after) - new_ids <- seq(first, to = first + direction * (n - 1) * by, by = by * direction) + new_ids <- seq(first, + to = first + direction * (n - 1L) * by, by = by * direction + ) validate_new_pos_ids(new_ids, after) new_ids } @@ -80,16 +100,37 @@ create_pos_ids <- function(pd, pos, by = 0.1, after = FALSE, n = 1) { #' @param candidates The `pos_ids` of the candidates that origin from other #' nests. #' @inheritParams create_pos_ids -find_start_pos_id <- function(pd, pos, by, direction, after, candidates = NULL) { +#' @keywords internal +find_start_pos_id <- function(pd, + pos, + by, + direction, + after, + candidates = NULL) { candidates <- append(candidates, pd$pos_id[pos]) if (is.null(pd$child[[pos]])) { - ifelse(after, max(candidates), min(candidates)) + by * direction + start_pos_idx <- if (after) { + max(candidates) + } else { + min(candidates) + } + start_pos_idx <- start_pos_idx + (by * direction) } else { - find_start_pos_id( - pd$child[[pos]], if_else(after, nrow(pd$child[[pos]]), 1L), - by, direction, after, candidates + start_pos_idx <- find_start_pos_id( + pd$child[[pos]], + if (after) { + nrow(pd$child[[pos]]) + } else { + 1L + }, + by, + direction, + after, + candidates ) } + + start_pos_idx } @@ -103,10 +144,17 @@ find_start_pos_id <- function(pd, pos, by, direction, after, candidates = NULL) #' @param new_ids A vector with new ids #' @param after Whether the ids are created with `after = TRUE` (and hence #' should be in the range x.0-x.45) or not. + #' @family token creators +#' @keywords internal validate_new_pos_ids <- function(new_ids, after) { - ref <- ifelse(after, floor(new_ids), ceiling(new_ids)) - if (any(abs(new_ids - ref) > 0.5)) stop("too many ids assigned") + ref <- if (after) { + floor(new_ids) + } else { + ceiling(new_ids) + } + + if (any(abs(new_ids - ref) > 0.5)) abort("too many ids assigned.") } #' Wrap an expression in curly braces @@ -116,23 +164,33 @@ validate_new_pos_ids <- function(new_ids, after) { #' @param pd A parse table. #' @param stretch_out Whether or not to create a line break after the opening #' curly brace and before the closing curly brace. -wrap_expr_in_curly <- function(pd, stretch_out = c(FALSE, FALSE)) { - if (is_curly_expr(pd)) return(pd) - if (stretch_out[1]) { - pd$lag_newlines[1] <- 1L +#' @param space_after How many spaces should be inserted after the closing brace. +#' @keywords internal +wrap_expr_in_curly <- function(pd, + stretch_out = c(FALSE, FALSE), + space_after = 1L) { + if (is_curly_expr(pd)) { + return(pd) + } + if (stretch_out[1L]) { + pd$lag_newlines[1L] <- 1L } - opening <- create_tokens( - "'{'", "{", - pos_ids = create_pos_ids(pd, 1, after = FALSE), - spaces = 1 - as.integer(stretch_out[1]) + opening <- create_tokens("'{'", "{", + pos_ids = create_pos_ids(pd, 1L, after = FALSE), + spaces = 1L - as.integer(stretch_out[1L]), + stylerignore = pd$stylerignore[1L], + indents = pd$indent[1L] ) closing <- create_tokens( - "'}'", "}", spaces = 1, lag_newlines = as.integer(stretch_out[2]), - pos_ids = create_pos_ids(pd, nrow(pd), after = TRUE) + "'}'", "}", + spaces = space_after, lag_newlines = as.integer(stretch_out[2L]), + pos_ids = create_pos_ids(pd, nrow(pd), after = TRUE), + stylerignore = pd$stylerignore[1L], + indents = pd$indent[1L] ) - bind_rows(opening, pd, closing) %>% + vec_rbind(opening, pd, closing) %>% set_multi_line() } diff --git a/R/token-define.R b/R/token-define.R index fc203bf7f..0954bcb80 100644 --- a/R/token-define.R +++ b/R/token-define.R @@ -1,52 +1,56 @@ -token <- tribble( - ~text , ~class , ~token , - "&" , "logical" , "AND" , - "&&" , "logical" , "AND2" , - "|" , "logical" , "OR" , - "||" , "logical" , "OR2" , - ">" , "logical" , "GT" , - "<" , "logical" , "LT" , - "<=" , "logical" , "LE" , - ">=" , "logical" , "GE" , - "!=" , "logical" , "NE" , - "==" , "logical" , "EQ" , - "=" , "assign_left" , "EQ_SUB" , - "=" , "assign_left" , "EQ_ASSIGN" , - "<-" , "assign_left" , "LEFT_ASSIGN" , - "->" , "assign_right", "RIGHT_ASSIGN", - "+" , "math" , "'+'" , - "-" , "math" , "'-'" , - "*" , "math" , "'*'" , - "/" , "math" , "'/'" , - "^" , "math" , "'^'" , - "if" , "cond" , "IF" , - "else" , "cond" , "ELSE" , - "in" , "loop_cond" , "IN" , - "while", "loop_cond" , "WHILE" +# styler: off +token <- rbind.data.frame( + c("&", "logical", "AND"), + c("&&", "logical", "AND2"), + c("|", "logical", "OR"), + c("||", "logical", "OR2"), + c(">", "logical", "GT"), + c("<", "logical", "LT"), + c("<=", "logical", "LE"), + c(">=", "logical", "GE"), + c("!=", "logical", "NE"), + c("==", "logical", "EQ"), + c("=", "assign_left", "EQ_SUB"), + c("=", "assign_left", "EQ_ASSIGN"), + c("<-", "assign_left", "LEFT_ASSIGN"), + c("->", "assign_right", "RIGHT_ASSIGN"), + c("+", "math", "'+'"), + c("-", "math", "'-'"), + c("*", "math", "'*'"), + c("/", "math", "'/'"), + c("^", "math", "'^'"), + c("~", "formula", "'~'"), + c("if", "cond", "IF"), + c("else", "cond", "ELSE"), + c("in", "loop_cond", "IN"), + c("while", "loop_cond", "WHILE"), + stringsAsFactors = FALSE ) +# styler: on +colnames(token) <- c("text", "class", "token") math_token <- token$token[token$class == "math"] logical_token <- token$token[token$class == "logical"] left_assignment_token <- token$token[token$class == "assign_left"] right_assignment_token <- token$token[token$class == "assign_right"] #' Lookup all tokens that have a unique token-text mapping -#' +#' @keywords internal lookup_tokens <- function() { token } -#' lookup which new tokens were created from "SPECIAL" +#' Lookup which new tokens were created from "SPECIAL" #' #' @param regex A regular expression pattern to search for. -#' @importFrom purrr map_chr +#' @keywords internal lookup_new_special <- function(regex = NA) { new_special <- c("PIPE", "IN", "OTHER") potential_regex <- grep(regex, new_special, value = TRUE, ignore.case = TRUE) if (is.na(regex)) { mapping <- new_special - } else if (length(potential_regex) > 0) { + } else if (length(potential_regex) > 0L) { mapping <- potential_regex } else { return(NA) @@ -58,11 +62,12 @@ special_token <- lookup_new_special() op_token <- c( special_token, + "PIPE", logical_token, left_assignment_token, right_assignment_token, - "EQ_SUB", "ELSE", "IN" + "EQ_SUB", "ELSE", "IN", + "EQ_FORMALS" ) - - +subset_token_opening <- c("'['", "LBB") diff --git a/R/transform-block.R b/R/transform-block.R new file mode 100644 index 000000000..ba6ccd5f5 --- /dev/null +++ b/R/transform-block.R @@ -0,0 +1,142 @@ +#' Parse, transform and serialize a nested parse table +#' +#' We process blocks of nested parse tables for speed. See [cache_find_block()] +#' for details on how a top-level nest is split into blocks. +#' @param pd_nested A block of the nested parse table. +#' @param start_line The line number on which the code starts. +#' @param base_indention Integer scalar indicating by how many spaces the whole +#' output text should be indented. Note that this is not the same as splitting +#' by line and add a `base_indention` spaces before the code in the case +#' multi-line strings are present. See 'Examples'. +#' @inheritParams apply_transformers +#' @examples +#' text_in <- 'x<- function() +#' "here +#' is" +#' NULL +#' 1+ 1 +#' ' +#' style_text(text_in, base_indention = 3) +#' # not equal to the naive approach +#' styler:::construct_vertical( +#' paste0(styler:::add_spaces(3), style_text(text_in), sep = "") +#' ) +#' @keywords internal +parse_transform_serialize_r_block <- function(pd_nested, + start_line, + transformers, + base_indention) { + if (!all(pd_nested$is_cached, na.rm = TRUE) || !cache_is_activated()) { + transformed_pd <- apply_transformers(pd_nested, transformers) + flattened_pd <- + # Special transformer: returns a list of pd + vec_rbind(!!!post_visit_one(transformed_pd, extract_terminals)) %>% + enrich_terminals(transformers$use_raw_indention) %>% + apply_ref_indention() %>% + set_regex_indention( + pattern = transformers$reindention$regex_pattern, + target_indention = transformers$reindention$indention, + comments_only = transformers$reindention$comments_only + ) + is_on_newline <- flattened_pd$lag_newlines > 0L + is_on_newline[1L] <- TRUE + flattened_pd$lag_spaces[is_on_newline] <- flattened_pd$lag_spaces[is_on_newline] + base_indention + serialized_transformed_text <- serialize_parse_data_flattened( + flattened_pd, + indent_character = transformers$indent_character + ) + } else { + serialized_transformed_text <- map2( + c(0L, find_blank_lines_to_next_expr(pd_nested)[-1L] - 1L), + paste0(rep_char(" ", base_indention), pd_nested$text), + ~ c(rep("", .x), .y) + ) %>% + unlist(use.names = FALSE) + } + c( + rep("", start_line - as.integer(start_line > 0L)), + serialized_transformed_text + ) +} + +#' Find the groups of expressions that should be processed together +#' +#' @param pd A top-level parse table. +#' +#' @details +#' +#' We want blocks to be formed according to these rules: +#' +#' - Blocks should contain either cached or uncached expressions only. If a +#' block contains cached expressions only, it does not have to be processed +#' and can be returned immediately. If a block contains uncached expressions, +#' it makes sense to put as many uncached expression in it, since processing +#' one bigger block has less overhead than processing many smaller blocks. +#' +#' - Multiple expressions can sit on one row, e.g. in-line comment and commands +#' separated with ";". This creates a problem when processing each expression +#' separately because when putting them together, we need complicated handling +#' of line breaks between them, as it is not *a priori* clear that there is a +#' line break separating them. To avoid this, we put top-level expressions +#' that sit on the same line into one block, so the assumption that there is a +#' line break between each block of expressions holds. +#' +#' - All expressions in a stylerignore sequence must be in the same block. If +#' that's not the case, the first expression in a block might not be a +#' top-level terminal, but another top-level expression. +#' [apply_stylerignore()] joins `env_current$stylerignore`, which contains +#' only terminals, with the first expression in a stylerignore sequence, based +#' on the first `pos_id` in that stylerignore sequence +#' (`first_pos_id_in_segment`). +#' +#' @param pd A top-level nest. +#' @keywords internal +cache_find_block <- function(pd) { + first_after_cache_state_switch <- pd$is_cached != lag(pd$is_cached, default = !pd$is_cached[1L]) + + not_first_on_line <- find_blank_lines_to_next_expr(pd) == 0L + invalid_turning_point_idx <- which( + not_first_on_line & first_after_cache_state_switch + ) + + first_on_line_idx <- which(!not_first_on_line) + valid_replacements <- map_int(invalid_turning_point_idx, function(x) { + first_on_line_idx[last(which(x > first_on_line_idx))] + }) + + turning_points <- setdiff( + which(first_after_cache_state_switch), + c(which(pd$stylerignore), invalid_turning_point_idx) + ) %>% + c(1L, valid_replacements) %>% + unique() %>% + sort() + + turning_points %>% + unwhich(nrow(pd)) %>% + cumsum() +} + + +#' Find blank lines +#' +#' What number of line breaks lay between the expressions? +#' @param pd_nested A nested parse table. +#' @return The line number on which the first token occurs. +#' @keywords internal +find_blank_lines_to_next_expr <- function(pd_nested) { + pd_nested$line1 - lag(pd_nested$line2, default = 0L) +} + +#' Number of lines between cache blocks +#' +#' This is relevant when putting expressions together into a block and preserve +#' blank lines between them. Note that because code does not need to start on +#' line 1, the first element of the output is the number of lines until the +#' first block. +#' @param pd A top-level nest. +#' @keywords internal +find_blank_lines_to_next_block <- function(pd) { + block_boundary <- pd$block != lag(pd$block, default = 0L) + find_blank_lines_to_next_expr(pd)[block_boundary] +} diff --git a/R/transform-code.R b/R/transform-code.R index 1eaf51b63..4f2b0337d 100644 --- a/R/transform-code.R +++ b/R/transform-code.R @@ -1,76 +1,175 @@ -#' Transform code from R or Rmd files +#' Transform code from R, Rmd or Rnw files #' -#' A wrapper for [enc::transform_lines_enc()] which initiates the styling of -#' either R or Rmd files by passing the relevant transformer function for each -#' case. +#' A wrapper which initiates the styling of +#' either R, Rmd or Rnw files by passing the relevant transformer function for +#' each case. #' -#' @inheritParams enc::transform_lines_enc -#' @param ... Further arguments passed to `enc::transform_lines_enc()`. -transform_code <- function(path, fun, verbose = FALSE, ...) { - if (is_plain_r_file(path)) { - enc::transform_lines_enc(path, fun = fun, ..., verbose = verbose) - } else if (is_rmd_file(path)) { - enc::transform_lines_enc( - path, fun = partial(transform_rmd, transformer_fun = fun), ..., - verbose = verbose +#' @inheritParams transform_utf8 +#' @param ... Further arguments passed to [transform_utf8()]. + +#' @keywords internal +transform_code <- function(path, fun, ..., dry) { + if (is_plain_r_file(path) || is_rprofile_file(path)) { + transform_utf8(path, fun = fun, ..., dry = dry) + } else if (is_rmd_file(path) || is_qmd_file(path)) { + transform_utf8(path, + fun = partial(transform_mixed, transformer_fun = fun, filetype = "Rmd"), + ..., dry = dry + ) + } else if (is_rnw_file(path)) { + transform_utf8(path, + fun = partial(transform_mixed, transformer_fun = fun, filetype = "Rnw"), + ..., dry = dry ) } else { - stop(path, " is not an R or Rmd file") + abort(paste(path, "is not an R, Rmd, qmd, or Rnw file")) } } -#' Transform Rmd contents +#' Transform mixed contents #' #' Applies the supplied transformer function to code chunks identified within -#' an Rmd file and recombines the resulting (styled) code chunks with the text -#' chunks. +#' an Rmd or Rnw file and recombines the resulting (styled) code chunks with the +#' text chunks. #' -#' @param lines A character vector of lines from an Rmd file -#' @param transformer_fun A styler transformer function -#' @importFrom purrr flatten_chr -transform_rmd <- function(lines, transformer_fun) { - chunks <- identify_chunks(lines) - chunks$r_chunks <- map(chunks$r_chunks, transformer_fun) - - map2(chunks$text_chunks, c(chunks$r_chunks, list(character(0))), c) %>% +#' @param transformer_fun A styler transformer function. +#' @inheritParams separate_chunks +#' @keywords internal +transform_mixed <- function(lines, transformer_fun, filetype) { + chunks <- separate_chunks(lines, filetype) + chunks$r_chunks <- map(chunks$r_chunks, transform_mixed_non_empty, + transformer_fun = transformer_fun + ) + map2(chunks$text_chunks, c(chunks$r_chunks, list(character(0L))), c) %>% flatten_chr() } +#' Ensure for `.Rmd` and friends that a code chunk without code is formatted as +#' a code chunk without any lines. +#' @keywords internal +transform_mixed_non_empty <- function(r_chunk, transformer_fun) { + trimmed <- trimws(r_chunk) + if (all(trimmed == "") || identical(trimmed, character(0L))) { + character(0L) + } else { + transformer_fun(r_chunk) + } +} -#' Identify chunks within Rmd contents +#' Separate chunks within Rmd and Rnw contents #' -#' Identifies the code and text chunks within an Rmd file, and returns these -#' as a nested list. -#' -#' @param lines a character vector of lines from an Rmd file +#' Identifies and separates the code and text chunks (the latter includes non-R +#' code) within an Rmd or Rnw file, and returns these separately. +#' @param lines A character vector of lines from an Rmd or Rnw file. +#' @param filetype A string indicating the filetype - either 'Rmd' or 'Rnw'. +#' @keywords internal +separate_chunks <- function(lines, filetype) { + r_raw_chunks <- identify_raw_chunks(lines, filetype = filetype) + + r_chunks <- map2( + r_raw_chunks$starts, r_raw_chunks$ends, ~ lines[seq2(.x + 1L, .y - 1L)] + ) + + text_chunks <- map2( + c(1L, r_raw_chunks$ends), c(r_raw_chunks$starts, length(lines)), + ~ lines[seq2(.x, .y)] + ) + list(r_chunks = r_chunks, text_chunks = text_chunks) +} + +#' Identifies raw Rmd or Rnw code chunks #' -#' @importFrom purrr map2 -#' @importFrom rlang seq2 -identify_chunks <- function(lines) { - pattern <- get_knitr_pattern(lines) +#' Raw in the sense that these chunks don't contain pure R code, but they +#' contain a header and footer of markdown. Only code chunks that have an engine +#' whose name matches `engine-pattern` are considered as R code. +#' For every opening, we match the next closing. If there are not the same +#' amount of closing and openings after this matching, we throw an error. +#' Similarly, if there are two openings before a closing, the closing gets +#' matched twice, on which we throw an error. +#' @inheritParams separate_chunks +#' @param engine_pattern A regular expression that must match the engine name. + +#' @keywords internal +identify_raw_chunks <- function(lines, + filetype, + engine_pattern = get_engine_pattern()) { + pattern <- get_knitr_pattern(filetype) if (is.null(pattern$chunk.begin) || is.null(pattern$chunk.end)) { - stop("Unrecognized chunk pattern!", call. = FALSE) + abort("Unrecognized chunk pattern!") } - starts <- grep(pattern$chunk.begin, lines, perl = TRUE) - ends <- grep(pattern$chunk.end, lines, perl = TRUE) - - if (length(starts) != length(ends)) { - stop("Malformed file!", call. = FALSE) + if (filetype == "Rmd") { + starts <- grep( + "^[\t >]*```+\\s*\\{([Rr]( *[ ,].*)?)\\}\\s*$", lines, + perl = TRUE + ) + ends <- grep("^[\t >]*```+\\s*$", lines, perl = TRUE) + ends <- purrr::imap_int(starts, ~ ends[which(ends > .x)[1L]]) %>% + stats::na.omit() + if (length(starts) != length(ends) || anyDuplicated(ends) != 0L) { + abort("Malformed file!") + } + } else if (filetype == "Rnw") { + starts <- grep(pattern$chunk.begin, lines, perl = TRUE) + ends <- grep(pattern$chunk.end, lines, perl = TRUE) + if (length(starts) != length(ends)) { + abort("Malformed file!") + } } - r_chunks <- map2(starts, ends, ~lines[seq2(.x + 1, .y - 1)]) + purrr::map2(starts, ends, finalize_raw_chunks, + filetype = filetype, lines = lines + ) %>% + purrr::compact() %>% + purrr::transpose() +} - text_chunks <- map2(c(1, ends), c(starts, length(lines)), ~lines[seq2(.x, .y)]) +#' Drop start / stop, when formatting is turned off +#' +#' If `tidy = FALSE` (the knitr code chunk default), code is not styled upon +#' knitting. If it is explicitly added to a code chunk, the code chunk is in +#' addition not styled with styler when formatting the document. +#' @keywords internal +finalize_raw_chunks <- function(start, end, filetype, lines) { + header <- gsub(get_knitr_pattern(filetype)$chunk.begin, "\\2", lines[start]) + # matches last , tidy = TRUE, ignoring quotes! + extracted_false <- gsub( + ".*,\\s*\\t*tidy\\s*\\t*=\\s*\\t*(F|FALSE)(\\s+.*|\\t+.*|,+.*|)$", + "\\1", + header + ) + if (extracted_false %in% c("F", "FALSE")) { + NULL + } else { + list(starts = start, ends = end) + } +} - lst(r_chunks, text_chunks) +#' What's the engine pattern for rmd code chunks? +#' +#' The function returns the regular expression pattern that identifies +#' all r engines in Rmd chunks. Defaults to `[Rr]`. You probably only want to +#' change this if you create a knitr engine that processes R code but is not +#' the default engine `r`. +#' The pattern must be followed by a space (in the case the chunk is given +#' a name), a comma (if no name is given but further options are passed to the +#' engine) or a closing curly brace (in case no option and no name is given to +#' the chunk). +#' @keywords internal +get_engine_pattern <- function() { + "[rR]" } #' Get chunk pattern #' #' Determine a regex pattern for identifying R code chunks. #' -#' @inheritParams identify_chunks -get_knitr_pattern <- function(lines) { - knitr::all_patterns[["md"]] +#' @inheritParams separate_chunks +#' @keywords internal +get_knitr_pattern <- function(filetype) { + if (filetype == "Rnw") { + knitr::all_patterns[["rnw"]] + } else if (filetype == "Rmd") { + knitr::all_patterns[["md"]] + } } diff --git a/R/transform-files.R b/R/transform-files.R index 37daadb77..3fb769fbc 100644 --- a/R/transform-files.R +++ b/R/transform-files.R @@ -1,120 +1,335 @@ #' Transform files with transformer functions #' #' `transform_files` applies transformations to file contents and writes back -#' the result. +#' the result. #' @param files A character vector with paths to the file that should be #' transformed. #' @inheritParams make_transformer +#' @inheritParams transform_file #' @section Value: #' Invisibly returns a data frame that indicates for each file considered for -#' styling whether or not it was actually changed. -transform_files <- function(files, transformers) { - transformer <- make_transformer(transformers) - max_char <- min(max(nchar(files), 0), 80) - if (length(files) > 0L) { - cat("Styling ", length(files), " files:\n") +#' styling whether or not it was actually changed (or would be changed when +#' `dry` is not "off"). +#' @keywords internal +transform_files <- function(files, + transformers, + include_roxygen_examples, + base_indention, + dry) { + transformer <- make_transformer( + transformers, include_roxygen_examples, base_indention + ) + max_char <- min(max(nchar(files), 0L), getOption("width")) + len_files <- length(files) + if (len_files > 0L && !getOption("styler.quiet", FALSE)) { + cat("Styling ", len_files, " files:\n") } - changed <- map_lgl( - files, transform_file, fun = transformer, max_char_path = max_char + changed <- map_lgl(files, transform_file, + fun = transformer, max_char_path = max_char, dry = dry ) communicate_summary(changed, max_char) communicate_warning(changed, transformers) - data_frame(file = files, changed = changed) + new_styler_df(list(file = files, changed = changed)) } #' Transform a file and output a customized message #' -#' Wraps `enc::transform_lines_enc()` and outputs customized messages. +#' Transforms file contents and outputs customized messages. #' @param max_char_path The number of characters of the longest path. Determines #' the indention level of `message_after`. #' @param message_before The message to print before the path. #' @param message_after The message to print after the path. #' @param message_after_if_changed The message to print after `message_after` if #' any file was transformed. -#' @inheritParams enc::transform_lines_enc -#' @param ... Further arguments passed to `enc::transform_lines_enc()`. +#' @inheritParams transform_code +#' @param ... Further arguments passed to [transform_utf8()]. +#' @keywords internal transform_file <- function(path, fun, - verbose = FALSE, max_char_path, message_before = "", message_after = " [DONE]", message_after_if_changed = " *", - ...) { - char_after_path <- nchar(message_before) + nchar(path) + 1 - max_char_after_message_path <- nchar(message_before) + max_char_path + 1 + ..., + dry) { + char_after_path <- nchar(message_before) + nchar(path) + 1L + max_char_after_message_path <- nchar(message_before) + max_char_path + 1L n_spaces_before_message_after <- max_char_after_message_path - char_after_path - cat( - message_before, - path, - rep_char(" ", max(0, n_spaces_before_message_after)), - append = FALSE - ) - changed <- transform_code(path, fun = fun, verbose = verbose, ...) + if (!getOption("styler.quiet", FALSE)) { + cat( + message_before, path, + rep_char(" ", max(0L, n_spaces_before_message_after)), + append = FALSE + ) + } + changed <- transform_code(path, fun = fun, ..., dry = dry) - bullet <- ifelse(is.na(changed), - "warning", - ifelse(changed, - "info", + bullet <- if (is.na(changed)) { + "warning" + } else { + if (changed) { + "info" + } else { "tick" - ) - ) + } + } - cli::cat_bullet( - bullet = bullet - ) + if (!getOption("styler.quiet", FALSE)) { + cli::cat_bullet(bullet = bullet) + } invisible(changed) } #' Closure to return a transformer function #' #' This function takes a list of transformer functions as input and -#' returns a function that can be applied to character strings -#' that should be transformed. +#' returns a function that can be applied to character strings +#' that should be transformed. #' @param transformers A list of transformer functions that operate on flat #' parse tables. -make_transformer <- function(transformers) { +#' @param include_roxygen_examples Whether or not to style code in roxygen +#' examples. +#' @inheritParams parse_transform_serialize_r +#' @keywords internal +make_transformer <- function(transformers, + include_roxygen_examples, + base_indention, + warn_empty = TRUE) { force(transformers) + assert_transformers(transformers) + function(text) { - transformed_text <- parse_transform_serialize(text, transformers) - transformed_text + text <- ensure_last_n_empty(trimws(text, which = "right"), n = 0L) + should_use_cache <- cache_is_activated() + + if (should_use_cache) { + use_cache <- is_cached( + text, transformers, + cache_more_specs( + include_roxygen_examples = include_roxygen_examples, + base_indention = base_indention + ) + ) + } else { + use_cache <- FALSE + } + + if (!use_cache) { + transformed_code <- text %>% + parse_transform_serialize_r(transformers, + base_indention = base_indention, + warn_empty = warn_empty + ) + + if (include_roxygen_examples) { + transformed_code <- parse_transform_serialize_roxygen( + transformed_code, + transformers = transformers, + base_indention = base_indention + ) + } + + if (should_use_cache) { + cache_write( + transformed_code, transformers, + cache_more_specs(include_roxygen_examples, base_indention) + ) + } + + transformed_code + } else { + text + } + } +} + +#' Parse, transform and serialize roxygen comments +#' +#' Splits `text` into roxygen code examples and non-roxygen code examples and +#' then maps over these examples by applying +#' [style_roxygen_code_example()]. +#' @section Hierarchy: +#' Styling involves splitting roxygen example code into segments, and segments +#' into snippets. This describes the process for input of +#' [parse_transform_serialize_roxygen()]: +#' +#' - Splitting code into roxygen example code and other code. Downstream, +#' we are only concerned about roxygen code. See +#' [parse_transform_serialize_roxygen()]. +#' - Every roxygen example code can have zero or more +#' dontrun / dontshow / donttest sequences. We next create segments of roxygen +#' code examples that contain at most one of these. See +#' [style_roxygen_code_example()]. +#' - We further split the segment that contains at most one dont* sequence into +#' snippets that are either don* or not. See +#' [style_roxygen_code_example_segment()]. +#' +#' Finally, that we have roxygen code snippets that are either dont* or not, +#' we style them in [style_roxygen_example_snippet()] using +#' [parse_transform_serialize_r()]. +#' @keywords internal +parse_transform_serialize_roxygen <- function(text, + transformers, + base_indention) { + roxygen_seqs <- identify_start_to_stop_of_roxygen_examples_from_text(text) + if (length(roxygen_seqs) < 1L) { + return(text) + } + if (!rlang::is_installed("roxygen2")) { + rlang::abort(paste0( + "To style roxygen code examples, you need to have the package ", + "`{roxygen2}` installed. To exclude them from styling, set ", + "`include_roxygen_examples = FALSE`." + )) + } + split_segments <- split_roxygen_segments(text, unlist(roxygen_seqs)) + map_at(split_segments$separated, split_segments$selectors, + style_roxygen_code_example, + transformers = transformers, + base_indention = base_indention + ) %>% + flatten_chr() +} + + +#' Split text into roxygen and non-roxygen example segments +#' +#' @param text Roxygen comments +#' @param roxygen_examples Integer sequence that indicates which lines in `text` +#' are roxygen examples. Most conveniently obtained with +#' [identify_start_to_stop_of_roxygen_examples_from_text]. +#' @return +#' A list with two elements: +#' +#' * A list that contains elements grouped into roxygen and non-roxygen +#' sections. This list is named `separated`. +#' * An integer vector with the indices that correspond to roxygen code +#' examples in `separated`. +#' +#' @keywords internal +split_roxygen_segments <- function(text, roxygen_examples) { + if (is.null(roxygen_examples)) { + return(list(separated = list(text), selectors = NULL)) } + all_lines <- seq2(1L, length(text)) + active_segment <- as.integer(all_lines %in% roxygen_examples) + segment_id <- cumsum(abs(c(0L, diff(active_segment)))) + 1L + separated <- vec_split(text, factor(segment_id))[[2L]] + restyle_selector <- if (roxygen_examples[1L] == 1L) { + odd_index + } else { + even_index + } + + list(separated = separated, selectors = restyle_selector(separated)) } #' Parse, transform and serialize text #' #' Wrapper function for the common three operations. +#' @param warn_empty Whether or not a warning should be displayed when `text` +#' does not contain any tokens. +#' @param is_roxygen_code_example Is code a roxygen examples block? #' @inheritParams compute_parse_data_nested -#' @inheritParams apply_transformers -parse_transform_serialize <- function(text, transformers) { +#' @inheritParams parse_transform_serialize_r_block +#' @seealso [parse_transform_serialize_roxygen()] + +#' @keywords internal +parse_transform_serialize_r <- function(text, + transformers, + base_indention, + warn_empty = TRUE, + is_roxygen_code_example = FALSE) { + more_specs <- cache_more_specs( + include_roxygen_examples = TRUE, base_indention = base_indention + ) + text <- assert_text(text) - pd_nested <- compute_parse_data_nested(text) - start_line <- find_start_line(pd_nested) - if (nrow(pd_nested) == 0) { - warning( - "Text to style did not contain any tokens. Returning empty string.", - call. = FALSE - ) + if (identical(unique(text), "")) { + if (warn_empty) { + warn("Text to style did not contain any tokens. Returning empty string.") + } return("") } - transformed_pd <- apply_transformers(pd_nested, transformers) - flattened_pd <- post_visit(transformed_pd, list(extract_terminals)) %>% - enrich_terminals(transformers$use_raw_indention) %>% - apply_ref_indention() %>% - set_regex_indention( - pattern = transformers$reindention$regex_pattern, - target_indention = transformers$reindention$indention, - comments_only = transformers$reindention$comments_only + pd_nested <- compute_parse_data_nested(text, transformers, more_specs) + transformers <- transformers_drop( + pd_nested$text[!pd_nested$is_cached], + transformers + ) + + strict <- transformers$more_specs_style_guide$strict %||% TRUE + pd_split <- vec_split(pd_nested, pd_nested$block)[[2L]] + pd_blank <- find_blank_lines_to_next_block(pd_nested) + + text_out <- vector("list", length(pd_split)) + for (i in seq_along(pd_split)) { + # if the first block: only preserve for roxygen or not strict + # if a later block: always preserve line breaks + start_line <- if (i == 1L) { + if (is_roxygen_code_example || !strict) pd_blank[[i]] else 1L + } else { + pd_blank[[i]] + } + + + text_out[[i]] <- parse_transform_serialize_r_block( + pd_split[[i]], + start_line = start_line, + transformers = transformers, + base_indention = base_indention ) - serialized_transformed_text <- - serialize_parse_data_flattened(flattened_pd, start_line = start_line) + } + + text_out <- unlist(text_out, use.names = FALSE) + + verify_roundtrip( + text, text_out, + parsable_only = !parse_tree_must_be_identical(transformers) + ) - if (can_verify_roundtrip(transformers)) { - verify_roundtrip(text, serialized_transformed_text) + text_out <- convert_newlines_to_linebreaks(text_out) + if (cache_is_activated()) { + cache_by_expression(text_out, transformers, more_specs = more_specs) } - serialized_transformed_text + text_out +} + + +#' Remove transformers that are not needed +#' +#' The goal is to speed up styling by removing all rules that are only +#' applicable in contexts that don't occur often, e.g. for most code, we don't +#' expect ";" to be in it, so we don't need to apply `resolve_semicolon()` on +#' every *nest*. +#' @param text Text to parse. Can also be the column `text` of the output of +#' [compute_parse_data_nested()], where each element is a token (instead of a +#' line). +#' @param transformers the transformers. +#' @keywords internal +#' @seealso specify_transformers_drop +transformers_drop <- function(text, transformers) { + if (length(text) > 0L) { + is_colon <- text == ";" + if (any(is_colon)) { + # ; can only be parsed when on the same line as other token, not the case + # here since text is output of compute_parse_data_nested. + text <- c(text[!is_colon], "1;") + } + token <- unique(tokenize(text)$token) + } else { + token <- character() + } + for (scope in c("line_break", "space", "token", "indention")) { + rules <- transformers$transformers_drop[[scope]] + for (rule in names(rules)) { + if (!any(rules[[rule]] %in% token)) { + transformers[[scope]][rule] <- NULL + } + } + } + transformers } #' Apply transformers to a parse table @@ -131,22 +346,21 @@ parse_transform_serialize <- function(text, transformers) { #' hence line breaks must be modified first). #' * spacing rules (must be after line-breaks and updating newlines and #' multi-line). +#' * indention. #' * token manipulation / replacement (is last since adding and removing tokens #' will invalidate columns token_after and token_before). #' * Update indention reference (must be after line breaks). #' #' @param pd_nested A nested parse table. #' @param transformers A list of *named* transformer functions -#' @importFrom purrr flatten +#' @keywords internal apply_transformers <- function(pd_nested, transformers) { - transformed_line_breaks <- pre_visit( - pd_nested, - c(transformers$initialize, transformers$line_break) - ) - transformed_updated_multi_line <- post_visit( - transformed_line_breaks, - c(set_multi_line, update_newlines) + pd_nested, + c( + transformers$initialize, transformers$line_break, set_multi_line, + if (length(transformers$line_break) != 0L) update_newlines + ) ) transformed_all <- pre_visit( @@ -156,9 +370,9 @@ apply_transformers <- function(pd_nested, transformers) { transformed_absolute_indent <- context_to_terminals( transformed_all, - outer_lag_newlines = 0, - outer_indent = 0, - outer_spaces = 0, + outer_lag_newlines = 0L, + outer_indent = 0L, + outer_spaces = 0L, outer_indention_refs = NA ) transformed_absolute_indent @@ -166,39 +380,54 @@ apply_transformers <- function(pd_nested, transformers) { -#' Check whether a roundtrip verification can be carried out +#' Check whether a round trip verification can be carried out #' #' If scope was set to "line_breaks" or lower (compare [tidyverse_style()]), #' we can compare the expression before and after styling and return an error if #' it is not the same. #' @param transformers The list of transformer functions used for styling. #' Needed for reverse engineering the scope. -can_verify_roundtrip <- function(transformers) { - is.null(transformers$token) +#' @keywords internal +parse_tree_must_be_identical <- function(transformers) { + length(transformers$token) == 0L } #' Verify the styling #' #' If scope was set to "line_breaks" or lower (compare [tidyverse_style()]), #' we can compare the expression before and after styling and return an error if -#' it is not the same. Note that this method ignores comments and no -#' verification can be conducted if scope > "line_breaks". +#' it is not the same. +#' If that's not possible, a weaker guarantee that we want to give is that the +#' resulting code is parsable. +#' @param parsable_only If we should only check for the code to be parsable. #' @inheritParams expressions_are_identical +#' @section Limitation: +#' Note that this method ignores roxygen code examples and +#' comments and no verification can be conducted if tokens are in the styling +#' scope. + #' @examples #' styler:::verify_roundtrip("a+1", "a + 1") #' styler:::verify_roundtrip("a+1", "a + 1 # comments are dropped") -#' \dontrun{ -#' styler:::verify_roundtrip("a+1", "b - 3") -#' } -verify_roundtrip <- function(old_text, new_text) { - if (!expressions_are_identical(old_text, new_text)) { +#' try(styler:::verify_roundtrip("a+1", "b - 3")) +#' @keywords internal +verify_roundtrip <- function(old_text, new_text, parsable_only = FALSE) { + if (parsable_only) { + rlang::try_fetch( + parse_safely(new_text), + error = function(e) { + rlang::abort(paste0( + "Styling resulted in code that isn't parsable. This should not ", + "happen." + ), .internal = TRUE) + } + ) + } else if (!expressions_are_identical(old_text, new_text)) { msg <- paste( "The expression evaluated before the styling is not the same as the", - "expression after styling. This should not happen. Please file a", - "bug report on GitHub (https://github.com/r-lib/styler/issues)", - "using a reprex." + "expression after styling. This should not happen." ) - stop(msg, call. = FALSE) + abort(msg, .internal = TRUE) } } @@ -206,9 +435,10 @@ verify_roundtrip <- function(old_text, new_text) { #' #' @param old_text The initial expression in its character representation. #' @param new_text The styled expression in its character representation. +#' @keywords internal expressions_are_identical <- function(old_text, new_text) { identical( - parse(text = old_text, keep.source = FALSE), - parse(text = new_text, keep.source = FALSE) + parse_safely(old_text, keep.source = FALSE), + parse_safely(new_text, keep.source = FALSE) ) } diff --git a/R/ui-caching.R b/R/ui-caching.R new file mode 100644 index 000000000..9aace1eb1 --- /dev/null +++ b/R/ui-caching.R @@ -0,0 +1,163 @@ +#' Clear the cache +#' +#' Clears the cache that stores which files are already styled. You won't be +#' able to undo this. Note that the file corresponding to the cache (a folder +#' on your file system) won't be deleted, but it will be empty after calling +#' `cache_clear`. +#' @param cache_name The name of the styler cache to use. If +#' `NULL`, the option "styler.cache_name" is considered which defaults to +#' the version of styler used. +#' @details +#' Each version of styler has its own cache by default, because styling is +#' potentially different with different versions of styler. +#' @param ask Whether or not to interactively ask the user again. +#' @family cache managers +#' @export +cache_clear <- function(cache_name = NULL, ask = TRUE) { + path_cache <- cache_find_path(cache_name) + R.cache::clearCache(path_cache, prompt = ask) + cache_deactivate(verbose = FALSE) +} + + +#' Remember the past to be quicker in the future +#' +#' Caching makes styler faster on repeated styling and is shared across all APIs +#' (e.g. `style_text()` and Addin). That means if you style code that already +#' complies to a style guide and you have previously styled that code, it will +#' be quicker. +#' +#' @section Configuring the cache: +#' +#' To comply with the CRAN policy, \{styler\} will by default clean up cache files +#' that are older than 6 days. This implies that you loose the benefit of the cache +#' for the files not styled in the last 6 days. +#' +#' If you want to avoid this, i.e., if you want the cache to last longer, you can use the +#' R option `styler.cache_root` to opt for an indefinitely long-lived cache by setting it to +#' `options(styler.cache_root = "styler-perm")`. +#' +#' If you are happy with the cache being cleared after 6 days, you can confirm the default and +#' silence this message by setting it instead to `options(styler.cache_root = "styler")`. +#' +#' You can make this change in your `.Rprofile` using `usethis::edit_r_profile()`. +#' +#' @section Manage the cache: +#' See [cache_info()],[cache_activate()] or [cache_clear()] for utilities to +#' manage the cache. You can deactivate it altogether with [cache_deactivate()]. +#' Since we leverage `{R.cache}` to manage the cache, you can also use any +#' `{R.cache}` functionality to manipulate it. +#' +#' In some cases, you want to use a non-standard cache location. In +#' that situation, you can set the path to the cache with the R option +#' `R.cache.rootPath` or the environment variable `R_CACHE_ROOTPATH` to an +#' existent path before you call the styler API. +#' +#' @section Invalidation: +#' The cache is specific to a version of styler by default, because different +#' versions potentially format code differently. This means after upgrading +#' styler or a style guide you use, the cache will be re-built. +#' +#' @section Mechanism and size: +#' The cache works by storing hashed output code as a whole and by expression, +#' which is why it takes zero space on disk (the cache is a directory with +#' empty files which have the hash of output code as name). +#' +#' The cache literally takes zero space on your disk, only the inode, and you +#' can always manually clean up with [cache_clear()] or just go to the +#' directory where the cache lives (find it with [cache_info()]) and manually +#' delete files. +#' +#' @section Using a cache for styler in CI/CD: +#' If you want to set up caching in a CI/CD pipeline, we suggest to set the +#' `{R.cache}` root path to a directory for which you have the cache enabled. +#' This can often be set in config files of CI/CD tools, e.g. see the +#' [Travis documentation on caching](https://docs.travis-ci.com/user/caching). +#' +#' @name caching +#' @family cache managers +NULL + +#' Show information about the styler cache +#' +#' Gives information about the cache. Note that the size consumed by the cache +#' will always be displayed as zero because all the cache does is creating an +#' empty file of size 0 bytes for every cached expression. The inode is +#' excluded from this displayed size but negligible. +#' @param cache_name The name of the cache for which to show details. If +#' `NULL`, the active cache is used. If none is active the cache corresponding +#' to the installed styler version is used. +#' @param format Either "lucid" for a summary emitted with [base::cat()], +#' "tabular" for a tabular summary from [base::file.info()] or "both" for +#' both. +#' @family cache managers +#' @export +cache_info <- function(cache_name = NULL, format = "both") { + rlang::arg_match(format, c("tabular", "lucid", "both")) + path_cache <- cache_find_path(cache_name) + files <- list.files(path_cache, full.names = TRUE) + file_info <- file.info(files) + + tbl <- styler_df( + n = nrow(file_info), + size = sum(file_info$size), + last_modified = suppressWarnings(max(file_info$mtime)), + created = file.info(path_cache)$ctime, + location = path_cache, + activated = cache_is_activated(cache_name), + stringsAsFactors = FALSE + ) + + if (any(c("lucid", "both") == format)) { + cat( + "Size:\t\t", tbl$size, " bytes (", tbl$n, " cached expressions)", + "\nLast modified:\t", as.character(tbl$last_modified), + "\nCreated:\t", as.character(tbl$created), + "\nLocation:\t", path_cache, + "\nActivated:\t", tbl$activated, + "\n", + sep = "" + ) + } + if (format == "tabular") { + tbl + } else if (format == "both") { + invisible(tbl) + } +} + +#' Activate or deactivate the styler cache +#' +#' Helper functions to control the behavior of caching. Simple wrappers around +#' [base::options()]. +#' @inheritParams cache_clear +#' @param verbose Whether or not to print an informative message about what the +#' function is doing. +#' +#' @family cache managers +#' @export +cache_activate <- function(cache_name = NULL, + verbose = !getOption("styler.quiet", FALSE)) { + options("styler.cache_name" = cache_name %||% styler_version) + path <- cache_find_path(cache_name) + + if (verbose) { + cat( + "Using cache ", cache_get_name(), " at ", + path, ".\n", + sep = "" + ) + } + + invisible(path) +} + +#' @rdname cache_activate +#' @export +cache_deactivate <- function(verbose = !getOption("styler.quiet", FALSE)) { + options("styler.cache_name" = NULL) + + if (verbose) { + cat("Deactivated cache.\n") + } +} diff --git a/R/ui-styling.R b/R/ui-styling.R new file mode 100644 index 000000000..bb4cc136c --- /dev/null +++ b/R/ui-styling.R @@ -0,0 +1,359 @@ +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package +#' (code and tests), `.Rmd`, `.Rmarkdown` and/or +#' `.qmd`, `.Rnw` files (vignettes and readme). +#' Carefully examine the results after running this function! +#' +#' @param pkg Path to a (subdirectory of an) R package. +#' @param ... Arguments passed on to the `style` function, +#' see [tidyverse_style()] for the default argument. +#' @param style A function that creates a style guide to use, by default +#' [`tidyverse_style`]. Not used +#' further except to construct the argument `transformers`. See +#' [style_guides()] for details. +#' @param transformers A set of transformer functions. This argument is most +#' conveniently constructed via the `style` argument and `...`. See +#' 'Examples'. +#' @inheritParams prettify_pkg +#' @section Warning: +#' This function overwrites files (if styling results in a change of the +#' code to be formatted and `dry = "off"`). It is strongly suggested to only +#' style files that are under version control or to create a backup copy. +#' +#' We suggest to first style with `scope < "tokens"` and inspect and commit +#' changes, because these changes are guaranteed to leave the abstract syntax +#' tree (AST) unchanged. See section 'Round trip validation' for details. +#' +#' Then, we suggest to style with `scope = "tokens"` (if desired) and carefully +#' inspect the changes to make sure the AST is not changed in an unexpected way +#' that invalidates code. +#' @section Round trip validation: +#' The following section describes when and how styling is guaranteed to +#' yield correct code. +#' +#' If tokens are not in the styling scope (as specified with the `scope` +#' argument), no tokens are changed and the abstract syntax tree (AST) should +#' not change. +#' Hence, it is possible to validate the styling by comparing whether the parsed +#' expression before and after styling have the same AST. +#' This comparison omits roxygen code examples and comments. styler throws an +#' error if the AST has changed through styling. +#' +#' Note that if tokens are to be styled, such a comparison is not conducted because +#' the AST might well change and such a change is intended. There is no way +#' styler can validate styling, that is why we inform the user to carefully +#' inspect the changes. +#' +#' See section 'Warning' for a good strategy to apply styling safely. +#' @inheritSection transform_files Value +#' @family stylers +#' @examplesIf FALSE +#' # the following is identical (because of ... and defaults) +#' # but the first is most convenient: +#' style_pkg(strict = TRUE) +#' style_pkg(style = tidyverse_style, strict = TRUE) +#' style_pkg(transformers = tidyverse_style(strict = TRUE)) +#' +#' # more options from `tidyverse_style()` +#' style_pkg( +#' scope = "line_breaks", +#' math_token_spacing = specify_math_token_spacing(zero = "'+'") +#' ) +#' +#' # don't write back and fail if input is not already styled +#' style_pkg("/path/to/pkg/", dry = "fail") +#' @export +style_pkg <- function(pkg = ".", + ..., + style = tidyverse_style, + transformers = style(...), + filetype = c("R", "Rprofile", "Rmd", "Rmarkdown", "Rnw", "Qmd"), + exclude_files = c("R/RcppExports.R", "R/cpp11.R"), + exclude_dirs = c("packrat", "renv"), + include_roxygen_examples = TRUE, + base_indention = 0L, + dry = "off") { + pkg_root <- rprojroot::find_package_root_file(path = pkg) + changed <- withr::with_dir(pkg_root, prettify_pkg( + transformers, + filetype, exclude_files, exclude_dirs, include_roxygen_examples, + base_indention, + dry + )) + invisible(changed) +} + +#' Prettify a package +#' +#' @param filetype Vector of file extensions indicating which file types should +#' be styled. Case is ignored, and the `.` is optional, e.g. `c(".R", +#' ".Rmd")`, or `c("r", "rmd")`. Supported values (after standardization) are: +#' "r", "rprofile", "rmd", "rmarkdown", "rnw", "qmd". Rmarkdown is treated as +#' Rmd. +#' @param exclude_files Character vector with paths to files that should be +#' excluded from styling. +#' @param exclude_dirs Character vector with directories to exclude +#' (recursively). Note that the default values were set for consistency with +#' [style_dir()] and as these directories are anyways not styled. +#' @inheritParams transform_files +#' @keywords internal +prettify_pkg <- function(transformers, + filetype, + exclude_files, + exclude_dirs, + include_roxygen_examples, + base_indention, + dry) { + filetype_ <- set_and_assert_arg_filetype(filetype) + r_files <- rprofile_files <- vignette_files <- readme <- NULL + exclude_files <- c( + set_arg_paths(exclude_files), + dir_without_.(exclude_dirs, pattern = map_filetype_to_pattern(filetype)) + ) + if ("\\.r" %in% filetype_) { + r_files <- dir_without_.( + path = c("R", "tests", "data-raw", "demo"), + pattern = "\\.r$" + ) + } + + if ("\\.rprofile" %in% filetype_) { + rprofile_files <- dir_without_.( + path = ".", pattern = "^\\.rprofile$" + ) + } + if ("\\.rmd" %in% filetype_) { + vignette_files <- dir_without_.( + path = "vignettes", pattern = "\\.rmd$" + ) + readme <- dir_without_.( + path = ".", + pattern = "^readme\\.rmd$" + ) + } + + if ("\\.rmarkdown" %in% filetype_) { + vignette_files <- append( + vignette_files, + dir_without_.( + path = "vignettes", pattern = "\\.rmarkdown$" + ) + ) + readme <- append( + readme, + dir_without_.( + path = ".", pattern = "^readme\\.rmarkdown$" + ) + ) + } + + if ("\\.rnw" %in% filetype_) { + vignette_files <- append( + vignette_files, + dir_without_.( + path = "vignettes", pattern = "\\.rnw$" + ) + ) + } + + if ("\\.qmd" %in% filetype_) { + vignette_files <- append( + vignette_files, + dir_without_.( + path = ".", + pattern = "\\.qmd$" + ) + ) + } + + files <- setdiff( + c(r_files, rprofile_files, vignette_files, readme), + exclude_files + ) + transform_files(files, + transformers = transformers, + include_roxygen_examples = include_roxygen_examples, + base_indention = base_indention, + dry = dry + ) +} + +#' Style a string +#' +#' Styles a character vector. Each element of the character vector corresponds +#' to one line of code. +#' @param text A character vector with text to style. +#' @inheritParams style_pkg +#' @family stylers +#' @examples +#' style_text("call( 1)") +#' style_text("1 + 1", strict = FALSE) +#' +#' # the following is identical (because of ... and defaults) +#' # but the first is most convenient: +#' style_text("a<-3++1", strict = TRUE) +#' style_text("a<-3++1", style = tidyverse_style, strict = TRUE) +#' style_text("a<-3++1", transformers = tidyverse_style(strict = TRUE)) +#' +#' # more invasive scopes include less invasive scopes by default +#' style_text("a%>%b", scope = "spaces") +#' style_text("a%>%b; a", scope = "line_breaks") +#' style_text("a%>%b; a", scope = "tokens") +#' +#' # opt out with I() to only style specific levels +#' style_text("a%>%b; a", scope = I("tokens")) +#' @export +style_text <- function(text, + ..., + style = tidyverse_style, + transformers = style(...), + include_roxygen_examples = TRUE, + base_indention = 0L) { + transformer <- make_transformer(transformers, + include_roxygen_examples = include_roxygen_examples, + base_indention = base_indention + ) + styled_text <- transformer(text) + construct_vertical(styled_text) +} + +#' Prettify arbitrary R code +#' +#' Performs various substitutions in all `.R`, `.Rmd`, `.Rmarkdown`, `qmd` +#' and/or `.Rnw` files in a directory (by default only `.R` files are styled - +#' see `filetype` argument). +#' Carefully examine the results after running this function! +#' @param path Path to a directory with files to transform. +#' @param recursive A logical value indicating whether or not files in +#' sub directories of `path` should be styled as well. +#' @param exclude_dirs Character vector with directories to exclude +#' (recursively). +##' @inheritParams style_pkg +#' @inheritSection transform_files Value +#' @inheritSection style_pkg Warning +#' @inheritSection style_pkg Round trip validation +#' @family stylers +#' @examplesIf FALSE +#' style_dir("path/to/dir", filetype = c("rmd", ".R")) +#' +#' # the following is identical (because of ... and defaults) +#' # but the first is most convenient: +#' style_dir(strict = TRUE) +#' style_dir(style = tidyverse_style, strict = TRUE) +#' style_dir(transformers = tidyverse_style(strict = TRUE)) +#' @export +style_dir <- function(path = ".", + ..., + style = tidyverse_style, + transformers = style(...), + filetype = c("R", "Rprofile", "Rmd", "Rmarkdown", "Rnw", "Qmd"), + recursive = TRUE, + exclude_files = NULL, + exclude_dirs = c("packrat", "renv"), + include_roxygen_examples = TRUE, + base_indention = 0L, + dry = "off") { + changed <- withr::with_dir( + path, prettify_any( + transformers, + filetype, recursive, exclude_files, exclude_dirs, + include_roxygen_examples, base_indention, dry + ) + ) + invisible(changed) +} + +# nolint: start +#' Prettify R code in current working directory +#' +#' This is a helper function for style_dir. +#' @inheritParams style_pkg +#' @param recursive A logical value indicating whether or not files in +#' subdirectories should be styled as well. +#' @keywords internal +prettify_any <- function(transformers, + filetype, + recursive, + exclude_files, + exclude_dirs, + include_roxygen_examples, + base_indention = 0L, + dry) { + exclude_files <- set_arg_paths(exclude_files) + exclude_dirs <- exclude_dirs %>% + list.dirs(recursive = TRUE, full.names = TRUE) %>% + set_arg_paths() + files_root <- dir( + path = ".", pattern = map_filetype_to_pattern(filetype), + ignore.case = TRUE, recursive = FALSE, all.files = TRUE + ) + if (recursive) { + files_other <- list.dirs(full.names = FALSE, recursive = TRUE) %>% + setdiff(c("", exclude_dirs)) %>% + dir_without_.( + pattern = map_filetype_to_pattern(filetype), + recursive = FALSE + ) + } else { + files_other <- NULL + } + + transform_files( + setdiff(c(files_root, files_other), exclude_files), + transformers, + include_roxygen_examples, + base_indention, + dry + ) +} +# nolint: end + +#' Style files with R source code +#' +#' Performs various substitutions in the files specified. +#' Carefully examine the results after running this function! +#' @section Encoding: +#' UTF-8 encoding is assumed. Please convert your code to UTF-8 if necessary +#' before applying styler. +#' @param path A character vector with paths to files to style. Supported +#' extensions: `.R`, `.Rmd`, `.Rmarkdown`, `.qmd` and `.Rnw`. +#' @inheritParams style_pkg +#' @inheritSection transform_files Value +#' @inheritSection style_pkg Warning +#' @inheritSection style_pkg Round trip validation +#' @examples +#' file <- tempfile("styler", fileext = ".R") +#' writeLines("1++1", file) +#' +#' # the following is identical (because of ... and defaults), +#' # but the first is most convenient: +#' style_file(file, strict = TRUE) +#' style_file(file, style = tidyverse_style, strict = TRUE) +#' style_file(file, transformers = tidyverse_style(strict = TRUE)) +#' +#' # only style indention and less invasive levels (i.e. spaces) +#' style_file(file, scope = "indention", strict = TRUE) +#' # name levels explicitly to not style less invasive levels +#' style_file(file, scope = I(c("tokens", "spaces")), strict = TRUE) +#' +#' readLines(file) +#' unlink(file) +#' @family stylers +#' @export +style_file <- function(path, + ..., + style = tidyverse_style, + transformers = style(...), + include_roxygen_examples = TRUE, + base_indention = 0L, + dry = "off") { + path <- set_arg_paths(path) + changed <- transform_files(path, + transformers = transformers, + include_roxygen_examples = include_roxygen_examples, + base_indention = base_indention, + dry = dry + ) + invisible(changed) +} diff --git a/R/unindent.R b/R/unindent.R index 53be19c03..24cf1f2d5 100644 --- a/R/unindent.R +++ b/R/unindent.R @@ -1,32 +1,39 @@ #' Unindent a child if necessary #' -#' check whether any of the children of `pd` has `token` on the same line as -#' the closing `token` of pd. If so, unindent that token. +#' check whether any of the children of `pd` has `token` on the same line as the +#' closing `token` of pd. If so, unindent that token. #' @inheritParams unindent_child -#' @importFrom purrr map -#' @importFrom rlang seq2 +#' @keywords internal set_unindention_child <- function(pd, token = "')'", unindent_by) { - if (all(pd$indent == 0) || all(pd$terminal)) return(pd) + if (all(pd$indent == 0L) || all(pd$terminal)) { + return(pd) + } closing <- which(pd$token %in% token) - if (length(closing) == 0 || pd$lag_newlines[closing] > 0) return(pd) + if (length(closing) == 0L || pd$lag_newlines[closing] > 0L) { + return(pd) + } - first_on_last_line <- last(c(1, which(pd$lag_newlines > 0 | pd$multi_line))) - on_same_line <- seq2(first_on_last_line, closing - 1) + first_on_last_line <- last( + c(1L, which(pd$lag_newlines > 0L | pd$multi_line > 0L)) + ) + on_same_line <- seq2(first_on_last_line, closing - 1L) cand_ind <- setdiff(on_same_line, which(pd$terminal)) - if (length(cand_ind) < 1) return(pd) + if (length(cand_ind) < 1L) { + return(pd) + } - candidates <- pd[cand_ind, ] + candidates <- vec_slice(pd, cand_ind) - non_candidates <- pd[-cand_ind, ] + non_candidates <- vec_slice(pd, -cand_ind) candidates$child <- map(candidates$child, unindent_child, - unindent_by = abs(pd$indent[closing] - pd$indent[closing - 1]) + unindent_by = abs(pd$indent[closing] - pd$indent[closing - 1L]) ) - bind_rows(candidates, non_candidates) %>% - arrange(pos_id) + vec_rbind(candidates, non_candidates) %>% + arrange_pos_id() } #' Unindent a child @@ -34,12 +41,13 @@ set_unindention_child <- function(pd, token = "')'", unindent_by) { #' @param pd A parse table. #' @param token The token the unindention should be based on. #' @param unindent_by By how many spaces one level of indention is reversed. -unindent_child <- function(pd, token = c("')'", "'}'"), unindent_by = 2) { +#' @keywords internal +unindent_child <- function(pd, token = c("')'", "'}'"), unindent_by = 2L) { closing <- which(pd$token %in% token) if (!("indent" %in% names(pd))) { - pd$indent <- 0 + pd$indent <- 0L } - if ((length(closing) > 0) && (closing == nrow(pd))) { + if ((length(closing) > 0L) && (closing == nrow(pd))) { pd$indent[closing] <- pd$indent[closing] - unindent_by } pd diff --git a/R/utils-cache.R b/R/utils-cache.R new file mode 100644 index 000000000..2465a51a2 --- /dev/null +++ b/R/utils-cache.R @@ -0,0 +1,220 @@ +#' Standardize text for hashing +#' +#' Make sure text after styling results in the same hash as text before styling +#' if it is indeed identical. This function expects trailing blank lines in +#' `text` were removed prior to passing it to this function. +#' @param text A character vector. +#' @keywords internal +hash_standardize <- function(text) { + text %>% + convert_newlines_to_linebreaks() %>% + enc2utf8() %>% + paste0(collapse = "\n") %>% + list() +} + +#' Check if text is cached +#' +#' This boils down to check if the hash exists at the caching dir as a file. +#' @param text Passed to [cache_make_key()] to generate a key. +#' @param transformers Passed to [cache_make_key()] to generate a key. +#' @param more_specs Passed to [cache_make_key()] to generate a key. +#' @param cache_dir The caching directory relative to the `.Rcache` root to +#' look for a cached value. +#' @keywords internal +is_cached <- function(text, + transformers, + more_specs, + cache_dir = get_cache_dir()) { + R.cache::generateCache( + key = cache_make_key(text, transformers, more_specs), + dirs = cache_dir + ) %>% + file.exists() +} + + +#' Make a key for `R.cache` +#' +#' This is used to determine if caching already corresponds to a style guide. +#' @param text Code to create a cache for. This should be styled text, as the +#' approach used by styler does not cache input, but styled code. +#' @param transformers A list of transformer functions, because we can only +#' know if text is already correct if we know which transformer function it +#' should be styled with. +#' @param more_specs A named vector coercible to character that determines the +#' styling but are style guide independent, such as `include_roxygen_examples` +#' or `base_indention`. +#' +#' @details +#' We need to compare: +#' +#' * text to style. Will be passed to hash function as is. +#' * styler version. Not an issue because for every version of styler, we build +#' a new cache. +#' * transformers. Cannot easily hash them because two environments won't be +#' identical even if they contain the same objects (see 'Experiments'). Simple +#' `as.character(transformers)` will not consider infinitively recursive +#' code dependencies. +#' To fix this, transformers must have names and version number as described +#' in [create_style_guide()]. Now, the only way to fool the cache invalidation +#' is to replace a transformer with the same function body (but changing +#' the function definition of the functions called in that body) interactively +#' without changing version number of name at the same time. +#' Remaining problem: `purrr::partial()` calls will render generic code, e.g. +#' see `as.character(list(purrr::partial(sum, x = 4)))`. For that reason, +#' all arguments passed to a `purrr::partial()` call must be put in the +#' style guide under `more_specs_style_guide`. +#' +#' @section Experiments: +#' +#' There is unexplainable behavior in conjunction with hashing and +#' environments: +#' * Functions created with `purrr::partial()` are not identical when compared +#' with `identical()` +#' ([StackOverflow](https://stackoverflow.com/questions/58656033/when-are-purrrpartial-ized-functions-identical)) +#' * except when they have the exact same parent environment, which must be an +#' object created and then passed to `purrr::partial(.env = ...)`, not +#' created in-place. +#' * `purrr::partial()` seems to ignore `.env` after version 0.2.5, so until +#' this is fixed, we'd have to work with version 0.2.5. +#' * Our caching backend package, `R.cache`, uses +#' `R.cache:::getChecksum.default` (which uses `digest::digest()`) to hash the +#' input. The latter does not seem to care if the environments are exactly +#' equal (see 'Examples'). +#' * However, under some circumstances, it does: Commit 9c94c022 (if not +#' overwritten / rebased by now) contains a reprex. Otherwise, search for +#' 43219ixmypi in commit messages and restore this commit to reproduce the +#' behavior. +#' +#' @examples +#' add <- function(x, y) { +#' x + y +#' } +#' add1 <- purrr::partial(add, x = 1) +#' add2 <- purrr::partial(add, x = 1) +#' identical(add1, add2) +#' identical(digest::digest(add1), digest::digest(add2)) +#' identical(digest::digest(styler::tidyverse_style()), digest::digest(styler::tidyverse_style())) +#' @keywords internal +cache_make_key <- function(text, transformers, more_specs) { + list( + text = hash_standardize(text), + style_guide_name = transformers$style_guide_name, + style_guide_version = transformers$style_guide_version, + more_specs_style_guide = as.character(transformers$more_specs_style_guide) %>% + set_names(names(transformers$more_specs_style_guide)), + more_specs = more_specs + ) +} + +#' Where is the cache? +#' +#' Finds the path to the cache and creates it if it does not exist. +#' @inheritParams cache_clear +#' @keywords internal +cache_find_path <- function(cache_name = NULL) { + cache_name <- cache_get_or_derive_name(cache_name) + R.cache::getCachePath(get_cache_dir(cache_name)) +} + +#' Check if a cache is activated +#' +#' @param cache_name The name of the cache to check. If `NULL`, we check if +#' any cache is activated. If not `NULL`, we check if a specific cache is +#' activated. +#' @keywords internal +cache_is_activated <- function(cache_name = NULL) { + current_cache <- cache_get_name() + + if (is.null(cache_name)) { + return(!is.null(current_cache)) + } + + if (!is.null(current_cache)) { + return(cache_name == current_cache) + } + + return(FALSE) +} + +#' Cache text +#' +#' Splits `text` into expressions and adds these to the cache. Note that +#' top-level comments are **not** cached because caching and in particular +#' checking if they are cached is too expensive. Comments may be cached as part +#' of the whole text (as opposed to on an expression by expression basis) using +#' `cache_write()` directly. Also, we must not cache stylerignore sequence, +#' because we might see the same expression that does not comply with the style +#' guide outside a stylerignore sequence and wrongly think we should leave it as +#' is. +#' @param text A character vector with one or more expressions. +#' @inheritParams cache_write +#' @keywords internal +cache_by_expression <- function(text, + transformers, + more_specs) { + expressions <- parse(text = text, keep.source = TRUE) %>% + utils::getParseData(includeText = TRUE) + if (env_current$any_stylerignore) { + expressions <- expressions %>% + add_stylerignore() + } else { + expressions$stylerignore <- rep(FALSE, length(expressions$text)) + } + # TODO base_indention should be set to 0 on write and on read for expressions + # (only) to make it possible to use the cache for expressions with different + # indention. when not the whole input text is cached, we go trough all + # expressions and check if they are cached, if yes, we take the input (from + # which the indention + # was removed via parse, same as it is in cache_by_expression) and add the + # base indention. + expressions[expressions$parent == 0L & expressions$token != "COMMENT" & !expressions$stylerignore, "text"] %>% + map(cache_write, transformers = transformers, more_specs) +} + + +#' Write to the cache +#' +#' @inheritParams cache_make_key +#' @keywords internal +cache_write <- function(text, transformers, more_specs) { + R.cache::generateCache( + key = cache_make_key(text, transformers, more_specs), + dirs = get_cache_dir() + ) %>% + file.create() +} + +styler_version <- unlist(unname(read.dcf("DESCRIPTION")[, "Version"]), use.names = FALSE) + +cache_get_name <- function() { + getOption("styler.cache_name") +} + +cache_get_or_derive_name <- function(cache_name = NULL) { + cache_name <- cache_name %||% cache_get_name() + cache_name <- cache_name %||% styler_version + cache_name +} + +get_cache_dir <- function(cache_name = cache_get_name()) { + c(getOption("styler.cache_root", "styler"), cache_name) +} + + +#' Create more specs +#' +#' Syntactic sugar for creating more specs. This is useful when we want to add +#' more arguments (because we can search for this function in the source code). +#' @keywords internal +cache_more_specs <- function(include_roxygen_examples, + base_indention) { + list( + include_roxygen_examples = include_roxygen_examples, + base_indention = base_indention, + ignore_alignment = getOption("styler.ignore_alignment", FALSE), + ignore_start = getOption("styler.ignore_start", .default_ignore_start), + ignore_stop = getOption("styler.ignore_start", .default_ignore_stop) + ) +} diff --git a/R/utils-files.R b/R/utils-files.R new file mode 100644 index 000000000..974485549 --- /dev/null +++ b/R/utils-files.R @@ -0,0 +1,75 @@ +is_plain_r_file <- function(path) { + grepl("\\.R$", path, ignore.case = TRUE) +} + +is_rprofile_file <- function(path) { + grepl(".rprofile", path, ignore.case = TRUE) +} +is_rmd_file <- function(path) { + grepl("\\.(Rmd|Rmarkdown)$", path, ignore.case = TRUE) +} + +is_rnw_file <- function(path) { + grepl("\\.Rnw$", path, ignore.case = TRUE) +} + +is_qmd_file <- function(path) { + grepl("\\.qmd$", path, ignore.case = TRUE) +} + + +is_unsaved_file <- function(path) { + path == "" +} + +#' Map the file type to a corresponding regular expression +#' +#' @param filetype The file type to map to a regex. +#' @examples +#' styler:::map_filetype_to_pattern(c(".rMd", "R")) +#' @keywords internal +map_filetype_to_pattern <- function(filetype) { + paste0( + "(", + paste(set_and_assert_arg_filetype(filetype), collapse = "|"), + ")$" + ) +} + +#' `dir()`, but without dot-prefix and different defaults +#' +#' When using `dir()`, you can set `full.names = FALSE`, but then you can only +#' pass a character vector of length one as `path` to not loose the information +#' about where the files are. This function solves that case. It's needed when +#' one wants to standardize paths to use set operations on them, i.e. when the +#' user supplied input does not have a dot prefix. See 'Examples'. +#' +#' For different defaults, see `dir_without_._one`. +#' @param path A path. +#' @param ... Passed to [base::dir()]. +#' @seealso set_and_assert_arg_paths +#' @keywords internal +#' @examples +#' setdiff("./file.R", "file.R") # you want to standardize first. +dir_without_. <- function(path, recursive = TRUE, ...) { + purrr::map(path, dir_without_._one, recursive = recursive, ...) %>% + unlist(use.names = FALSE) +} + +#' `dir()`, but with full names, ignored case, and included hidden files and +#' recursive. +#' @keywords internal +dir_without_._one <- function(path, recursive, ...) { + relative <- dir( + path = path, + full.names = FALSE, + ignore.case = TRUE, + recursive = recursive, + all.files = TRUE, + ... + ) + if (path == ".") { + return(relative) + } + file.path(path, relative) +} diff --git a/R/utils-navigate-nest.R b/R/utils-navigate-nest.R new file mode 100644 index 000000000..0ef6cf5da --- /dev/null +++ b/R/utils-navigate-nest.R @@ -0,0 +1,102 @@ +#' Find the index of the next or previous non-comment in a parse table. +#' @param pd A parse table. +#' @param pos The position of the token to start the search from. +#' +#' @examples +#' code <- "a <- # hi \n x %>% b()" +#' writeLines(code) +#' pd <- compute_parse_data_nested(code) +#' child <- pd$child[[1]] +#' previous_non_comment(child, 4L) +#' next_non_comment(child, 2L) +#' @family third-party style guide helpers +#' @export +next_non_comment <- function(pd, pos) { + if (length(pos) < 1L || is.na(pos) || pos >= nrow(pd)) { + return(integer(0L)) + } + candidates <- seq2(pos + 1L, nrow(pd)) + if (all(candidates %in% which(pd$token == "COMMENT"))) { + return(integer(0L)) + } + setdiff(candidates, which(pd$token == "COMMENT"))[1L] +} + +#' @export +#' @rdname next_non_comment +previous_non_comment <- function(pd, pos) { + if (length(pos) < 1L || is.na(pos) || pos > nrow(pd)) { + return(integer(0L)) + } + candidates <- seq2(1L, pos - 1L) + if (all(candidates %in% which(pd$token == "COMMENT"))) { + return(integer(0L)) + } + last(setdiff(candidates, which(pd$token == "COMMENT"))) +} + +#' Tell me what the next terminal is +#' +#' If the first is a terminal, return it. If not, go inside it and search the +#' next terminal +#' @param pd A nest. +#' @param stack Whether or not to also return information on the tokens that +#' are between `pd` and the first terminal, so the returned data frame can be +#' understood as a transition path from `pd` to the next terminal, instead of +#' the information at the terminal only. The order is inside-out, +#' i.e. the first non-terminal on top, the terminal last. +#' @param vars The variables to return. +#' @param tokens_exclude A vector with tokens to exclude. This can be helpful if +#' one wants to find the next token that is not a comment for example. +#' @return +#' Returns a data frame (which is **not** a valid parse table for +#' `stack = TRUE`), with `vars` and another variable `position` that denotes +#' the index each element in the transition. This can be helpful in conjunction +#' with [purrr::pluck()] or [purrr::modify_in()] to reach the terminal in the +#' nested structure. +#' @keywords internal +#' @examples +#' withr::with_options( +#' list(styler.cache_name = NULL), # temporarily deactivate cache +#' { +#' pd <- compute_parse_data_nested("if (TRUE) f()") +#' styler:::next_terminal(pd) +#' } +#' ) +next_terminal <- function(pd, + stack = FALSE, + vars = c("pos_id", "token", "text"), + tokens_exclude = NULL) { + pd$position <- seq2(1L, nrow(pd)) + pd <- vec_slice(pd, !(pd$token %in% tokens_exclude)) + if (pd$terminal[1L]) { + pd[1L, c("position", vars)] + } else { + current <- next_terminal( + pd$child[[1L]], + stack = stack, vars = vars, tokens_exclude = tokens_exclude + ) + if (stack) { + vec_rbind(pd[1L, c("position", vars)], current) + } else { + current + } + } +} + + +#' Find the index of the last comment in the sequence of comments-only tokens +#' after the token that has position `pos` in `pd`. +#' @param pd A parse table. +#' @param pos The position of the token to start the search from. +#' @keywords internal +extend_if_comment <- function(pd, pos) { + if (pos == nrow(pd)) { + return(pos) + } + if (pd$token[pos + 1L] == "COMMENT") { + extend_if_comment(pd, pos + 1L) + } else { + pos + } +} diff --git a/R/utils-strings.R b/R/utils-strings.R new file mode 100644 index 000000000..46644b65a --- /dev/null +++ b/R/utils-strings.R @@ -0,0 +1,29 @@ +#' Repeat elements of a character vector `times` times and collapse it +#' +#' @param char A character vector. +#' @param times an integer giving the number of repetitions. +#' @return A character vector. +#' @keywords internal +rep_char <- function(char, times) { + paste(rep.int(char, times), collapse = "") +} + +#' Concentrate newlines or spaces in a string +#' +#' @param n Scalar indicating how many characters should be concentrated +#' @return A string. +#' @name add_spaces_or_newlines +#' @keywords internal +NULL + +#' @rdname add_spaces_or_newlines +#' @keywords internal +add_newlines <- function(n) { + rep_char("\n", n) +} + +#' @rdname add_spaces_or_newlines +#' @keywords internal +add_spaces <- function(n) { + rep_char(" ", n) +} diff --git a/R/utils.R b/R/utils.R index 5f7dee157..e1a235469 100644 --- a/R/utils.R +++ b/R/utils.R @@ -1,29 +1,96 @@ -parse_text <- function(x) parse(text = x)[[1L]] +parse_text <- function(x) parse_safely(x)[[1L]] -#' Repeat elements of a character vector `times` times and collapse it -#' -#' @param char A character vector. -#' @param times an integer giving the number of repetitions. -#' @return A character vector. -rep_char <- function(char, times) { - paste(rep.int(char, times), collapse = "") +line_col_names <- function() { + c("line1", "line2", "col1", "col2") +} + +#' Wrapper functions to encapsulate data frame creation +#' @keywords internal +#' @noRd +styler_df <- function(...) { + vctrs::data_frame(..., .name_repair = "minimal") +} + +#' @keywords internal +#' @noRd +new_styler_df <- function(x) { + vctrs::data_frame(!!!x) } -#' Concentrate newlines or spaces in a string +#' Ensure there is one (and only one) blank line at the end of a vector +#' @examples +#' styler:::ensure_last_n_empty("") +#' styler:::ensure_last_n_empty(letters) +#' styler:::ensure_last_n_empty(c(letters, "", "", "")) +#' @keywords internal +ensure_last_n_empty <- function(x, n = 1L) { + if (all(x == "")) { + return("") + } + x <- c(x, "", "") + x <- x[seq(1L, length(x) - which(rev(x) != "")[1L] + 1L)] + c(x, rep("", n)) +} + +#' @note Slightly simplified version of `rematch2::re_match()` (License: MIT). +#' @keywords internal +#' @noRd +re_match <- function(text, pattern) { + stopifnot(is.character(pattern), length(pattern) == 1L, !is.na(pattern)) + text <- as.character(text) + match <- regexpr(pattern, text, perl = TRUE) + start <- as.vector(match) + length <- attr(match, "match.length") + end <- start + length - 1L + matchstr <- substring(text, start, end) + matchstr[start == -1L] <- NA_character_ + res <- data.frame(stringsAsFactors = FALSE, .text = text, .match = matchstr) + + gstart <- attr(match, "capture.start") + glength <- attr(match, "capture.length") + gend <- gstart + glength - 1L + groupstr <- substring(text, gstart, gend) + groupstr[gstart == -1L] <- NA_character_ + dim(groupstr) <- dim(gstart) + res <- cbind(groupstr, res, stringsAsFactors = FALSE) + + names(res) <- c(attr(match, "capture.names"), ".text", ".match") + res +} + +#' Replace the newline character with a line break #' -#' @param n Scalar indicating how many characters should be concentrated -#' @return A string. -#' @name add_spaces_or_newlines -NULL +#' @param text A character vector +#' @examples +#' styler:::convert_newlines_to_linebreaks("x\n2") +#' # a simple strsplit approach does not cover both cases +#' unlist(strsplit("x\n\n2", "\n", fixed = TRUE)) +#' unlist(strsplit(c("x", "", "2"), "\n", fixed = TRUE)) +#' styler:::convert_newlines_to_linebreaks(c("x", "2")) +#' @keywords internal +convert_newlines_to_linebreaks <- function(text) { + split <- strsplit(text, "\n", fixed = TRUE) + map(split, ~ if (identical(.x, character(0L))) { + "" + } else { + .x + }) %>% + unlist(use.names = FALSE) +} + +odd_index <- function(x) { + if (length(x) < 1L) { + return(NULL) + } + seq(1L, length(x), by = 2L) +} -#' @rdname add_spaces_or_newlines -add_newlines <- function(n) { - rep_char("\n", n) +even_index <- function(x) { + seq(2L, length(x), by = 2L) } -#' @rdname add_spaces_or_newlines -add_spaces <- function(n) { - rep_char(" ", n) +is_windows <- function() { + identical(.Platform$OS.type, "windows") } #' Invoke a system command @@ -32,55 +99,34 @@ add_spaces <- function(n) { #' operating system. #' @param sys_call The call to be executed. #' @param ... Arguments passed to [shell()] or [system()]. +#' @keywords internal calls_sys <- function(sys_call, ...) { - if (Sys.info()[1] == "Windows") { + if (is_windows()) { error <- shell(sys_call, ...) } else { error <- system(sys_call, ...) } + error } -is_plain_r_file <- function(path) { - grepl("\\.R$", path, ignore.case = TRUE) -} - -is_rmd_file <- function(path) { - grepl("\\.Rmd$", path, ignore.case = TRUE) -} - -is_unsaved_file <- function(path) { - path == "" -} - -#' Find the index of the next non-comment in a parse table -#' @param pd A parse table. -#' @param pos The position of the token to start the search from. -#' @importFrom rlang seq2 -next_non_comment <- function(pd, pos) { - if (length(pos) < 1 || is.na(pos) || pos >= nrow(pd)) return(integer(0)) - candidates <- seq2(pos + 1L, nrow(pd)) - if (all(candidates %in% which(pd$token == "COMMENT"))) return(integer(0)) - setdiff(candidates, which(pd$token == "COMMENT"))[1] -} - -#' Find the index of the last comment in the sequence of comments-only tokens -#' after the token that has position `pos` in `pd`. -#' @param pd A parse table. -#' @param pos The position of the token to start the search from. -extend_if_comment <- function(pd, pos) { - if (pos == nrow(pd)) return(pos) - if (pd$token[pos + 1] == "COMMENT") { - extend_if_comment(pd, pos + 1L) +#' Get the value of an option +#' +#' Basically a `getOptions()` that fails fast by default. +#' @inheritParams base::getOption +#' @param error_if_not_found Whether or not an error should be returned if the +#' option was not set. +#' @keywords internal +option_read <- function(x, default = NULL, error_if_not_found = TRUE) { + if (x %in% names(options()) || !error_if_not_found) { + getOption(x, default) } else { - pos + rlang::abort(paste("R option", x, "must be set.")) } } -#' Map the file type to a corresponding regular expression -#' -#' @param filetype The file type to map to a regex. -#' @examples -#' styler:::map_filetype_to_pattern(c(".rMd", "R")) -map_filetype_to_pattern <- function(filetype) { - paste0("(", paste(set_and_assert_arg_filetype(filetype), collapse = "|"), ")$") +#' @keywords internal +unwhich <- function(x, length) { + x_ <- rep(FALSE, length) + x_[x] <- TRUE + x_ } diff --git a/R/vertical.R b/R/vertical.R index b79cc5b55..8062185a1 100644 --- a/R/vertical.R +++ b/R/vertical.R @@ -3,12 +3,33 @@ #' Sole purpose of the class vertical is to have a print method that #' aligns the output vertically. #' @param x A character vector or an object of class "vertical". +#' @keywords internal construct_vertical <- function(x) { - stopifnot(inherits(x, what = c("utf8", "character", "vertical"))) + stopifnot(is.character(x)) structure(x, class = "vertical") } +#' Print styled code +#' +#' @param x A character vector, one element corresponds to one line of code. +#' @param ... Not currently used. +#' @param colored Whether or not the output should be colored with +#' `prettycode::highlight()`. +#' @param style Passed to `prettycode::highlight()`. #' @export -print.vertical <- function(x, ...) { +print.vertical <- function(x, ..., + colored = getOption("styler.colored_print.vertical"), + style = prettycode::default_style()) { + if (colored) { + if (is_installed("prettycode")) { + x <- prettycode::highlight(x, style = style) + } else { + cli::cli_warn(paste( + "Could not use `colored = TRUE`, as the package prettycode is not", + "installed. Please install it if you want to see colored output", + "or see {.help styler::print.vertical} for more information." + )) + } + } cat(x, sep = "\n") } diff --git a/R/visit.R b/R/visit.R index 67f031688..1d048bbfa 100644 --- a/R/visit.R +++ b/R/visit.R @@ -1,70 +1,138 @@ #' Visit'em all #' #' Apply a list of functions to each level in a nested parse table. -#' `pre_visit()` applies `funs` before it proceeds to the children, -#' (that is, starts from the outermost level of nesting progressing -#' to the innermost level), `post_visit()` proceeds to its children -#' before applying the functions (meaning it first applies the functions -#' to the innermost level of nesting first and then going outwards). +#' `pre_visit()` applies `funs` before it proceeds to the children, +#' (that is, starts from the outermost level of nesting progressing +#' to the innermost level), `post_visit()` proceeds to its children +#' before applying the functions (meaning it first applies the functions +#' to the innermost level of nesting first and then going outwards). #' @param pd_nested A nested parse table. #' @inheritParams visit_one #' @family visitors -#' @importFrom purrr map #' @name visit +#' @keywords internal NULL #' @rdname visit +#' @keywords internal pre_visit <- function(pd_nested, funs) { - if (is.null(pd_nested)) return() - pd_transformed <- visit_one(pd_nested, funs) + if (is.null(pd_nested)) { + return() + } + if (length(funs) == 0L) { + return(pd_nested) + } + pd_nested <- visit_one(pd_nested, funs) - pd_transformed$child <- map(pd_transformed$child, pre_visit, funs = funs) - pd_transformed + children <- pd_nested$child + for (i in seq_along(children)) { + child <- children[[i]] + if (!is.null(child)) { + children[[i]] <- pre_visit(child, funs) + } + } + pd_nested$child <- children + pd_nested +} + +#' @rdname visit +#' @keywords internal +pre_visit_one <- function(pd_nested, fun) { + if (is.null(pd_nested)) { + return() + } + pd_nested <- fun(pd_nested) + + children <- pd_nested$child + for (i in seq_along(children)) { + child <- children[[i]] + if (!is.null(child)) { + children[[i]] <- pre_visit_one(child, fun) + } + } + pd_nested$child <- children + pd_nested } #' @rdname visit +#' @keywords internal post_visit <- function(pd_nested, funs) { - if (is.null(pd_nested)) return() - pd_transformed <- pd_nested + if (is.null(pd_nested)) { + return() + } + if (length(funs) == 0L) { + return(pd_nested) + } - pd_transformed$child <- map(pd_transformed$child, post_visit, funs = funs) - visit_one(pd_transformed, funs) + children <- pd_nested$child + for (i in seq_along(children)) { + child <- children[[i]] + if (!is.null(child)) { + children[[i]] <- post_visit(child, funs) + } + } + pd_nested$child <- children + + visit_one(pd_nested, funs) +} + +#' @rdname visit +#' @keywords internal +post_visit_one <- function(pd_nested, fun) { + if (is.null(pd_nested)) { + return() + } + force(fun) + + children <- pd_nested$child + for (i in seq_along(children)) { + child <- children[[i]] + if (!is.null(child)) { + children[[i]] <- post_visit_one(child, fun) + } + } + pd_nested$child <- children + + fun(pd_nested) } #' Transform a flat parse table with a list of transformers #' -#' Uses [purrr::reduce()] to apply each function of `funs` sequentially to +#' Uses [Reduce()] to apply each function of `funs` sequentially to #' `pd_flat`. #' @param pd_flat A flat parse table. #' @param funs A list of transformer functions. #' @family visitors -#' @importFrom purrr reduce +#' @keywords internal visit_one <- function(pd_flat, funs) { - reduce( - funs, function(x, fun) fun(x), - .init = pd_flat - ) + for (f in funs) { + pd_flat <- f(pd_flat) + } + pd_flat } - #' Propagate context to terminals #' #' Implements a very specific pre-visiting scheme, namely to propagate -#' indention, spaces and lag_newlines to inner token to terminals. This means -#' that information regarding indention, line breaks and spaces (which is -#' relative in `pd_nested`) will be converted into absolute. +#' indention, spaces and lag_newlines to inner token to terminals. This means +#' that information regarding indention, line breaks and spaces (which is +#' relative in `pd_nested`) will be converted into absolute. #' @inherit context_towards_terminals #' @seealso context_towards_terminals visitors -#' @importFrom purrr pmap +#' @keywords internal context_to_terminals <- function(pd_nested, outer_lag_newlines, outer_indent, outer_spaces, outer_indention_refs) { - if (is.null(pd_nested)) return() + if (is.null(pd_nested)) { + return() + } pd_transformed <- context_towards_terminals( - pd_nested, outer_lag_newlines, outer_indent, outer_spaces, outer_indention_refs + pd_nested, + outer_lag_newlines, outer_indent, + outer_spaces, outer_indention_refs ) pd_transformed$child <- pmap( @@ -80,14 +148,12 @@ context_to_terminals <- function(pd_nested, pd_transformed } - #' Update the a parse table given outer context #' #' `outer_lag_newlines` are added to the first token in `pd`, -#' `outer_indent` is added to all tokens in `pd`, `outer_spaces` is added to -#' the last token in `pd`. [context_to_terminals()] calls this function -#' repeatedly, which means the propagation of the parse information to the -#' terminal tokens. +#' `outer_indent` is added to all tokens in `pd`, `outer_spaces` is added to the +#' last token in `pd`. [context_to_terminals()] calls this function repeatedly, +#' which means the propagation of the parse information to the terminal tokens. #' @param pd_nested A nested parse table. #' @param outer_lag_newlines The lag_newlines to be propagated inwards. #' @param outer_indent The indention depth to be propagated inwards. @@ -96,15 +162,20 @@ context_to_terminals <- function(pd_nested, #' inwards. #' @return An updated parse table. #' @seealso context_to_terminals +#' @keywords internal context_towards_terminals <- function(pd_nested, outer_lag_newlines, outer_indent, outer_spaces, outer_indention_refs) { - pd_nested$indent <- pd_nested$indent + outer_indent + pd_nested$indent <- pd_nested$indent + ifelse( + is.na(pd_nested$indention_ref_pos_id), + outer_indent, + 0L + ) ref_pos_id_is_na <- !is.na(pd_nested$indention_ref_pos_id) pd_nested$indention_ref_pos_id[!ref_pos_id_is_na] <- outer_indention_refs - pd_nested$lag_newlines[1] <- pd_nested$lag_newlines[1] + outer_lag_newlines + pd_nested$lag_newlines[1L] <- pd_nested$lag_newlines[1L] + outer_lag_newlines pd_nested$spaces[nrow(pd_nested)] <- pd_nested$spaces[nrow(pd_nested)] + outer_spaces pd_nested @@ -113,31 +184,43 @@ context_towards_terminals <- function(pd_nested, #' Extract terminal tokens #' #' Turns a nested parse table into a flat parse table and extracts *all* -#' attributes +#' attributes. #' @param pd_nested A nested parse table. +#' @keywords internal extract_terminals <- function(pd_nested) { - if (is.null(pd_nested)) return(pd) - pd_split <- split(pd_nested, seq_len(nrow(pd_nested))) - bind_rows(if_else(pd_nested$terminal, pd_split, pd_nested$child)) -} + terminal <- pd_nested$terminal + is_cached <- pd_nested$is_cached + + child <- pd_nested$child + for (i in seq_len(nrow(pd_nested))) { + if (terminal[[i]] || is_cached[[i]]) { + child[[i]] <- list(vec_slice(pd_nested, i)) + } + } + + # child is a list of data frame lists here + unlist(unname(child), recursive = FALSE) +} #' Enrich flattened parse table #' #' Enriches a flattened parse table with terminals only. In particular, it is -#' possible to compute the exact position a token will have (line and column) -#' when it will be serialized. -#' @details Since we have only terminal tokens now, the line on which a token -#' starts we also be the line on which it ends. We call `line1` the line on -#' which the token starts. `line1` has the same meaning as `line1` that can be -#' found in a flat parse table (see [tokenize()]), just that the `line1` -#' created by `enrich_terminals()` is the updated version of the former -#' `line1`. The same applies for `col1` and `col2`. Note that this function -#' does remove the columns `indent` and `spaces.` All information of the former -#' is stored in `lag_spaces` now. The later was removed because it is redundant -#' after adding the column `lag_spaces`, which is more convenient to work with, -#' in particular when serializing the parse table. +#' possible to compute the exact position a token will have (line and column) +#' when it will be serialized. +#' @details +#' Since we have only terminal tokens now, the line on which a token starts we +#' also be the line on which it ends. We call `line1` the line on which the +#' token starts. `line1` has the same meaning as `line1` that can be found in a +#' flat parse table (see [tokenize()]), just that the `line1` created by +#' `enrich_terminals()` is the updated version of the former `line1`. The same +#' applies for `col1` and `col2`. Note that this function does remove the +#' columns `indent` and `spaces.` All information of the former is stored in +#' `lag_spaces` now. The later was removed because it is redundant after adding +#' the column `lag_spaces`, which is more convenient to work with, in particular +#' when serializing the parse table. #' @inheritParams choose_indention +#' @keywords internal enrich_terminals <- function(flattened_pd, use_raw_indention = FALSE) { flattened_pd$lag_spaces <- lag(flattened_pd$spaces, default = 0L) flattened_pd$spaces <- NULL # depreciate spaces @@ -148,13 +231,12 @@ enrich_terminals <- function(flattened_pd, use_raw_indention = FALSE) { flattened_pd$newlines <- lead(flattened_pd$lag_newlines, default = 0L) flattened_pd$nchar <- nchar(flattened_pd$text, type = "width") groups <- flattened_pd$line1 - flattened_pd <- flattened_pd %>% - split(groups) %>% - lapply(function(.x) { + split_pd <- vec_split(flattened_pd, groups)[[2L]] + flattened_pd <- split_pd %>% + map_dfr(function(.x) { .x$col2 <- cumsum(.x$nchar + .x$lag_spaces) .x - }) %>% - bind_rows() + }) flattened_pd$col1 <- flattened_pd$col2 - flattened_pd$nchar flattened_pd } @@ -162,25 +244,25 @@ enrich_terminals <- function(flattened_pd, use_raw_indention = FALSE) { #' Choose the indention method for the tokens #' #' Either use the raw indention, which is just the spaces computed between -#' the first token on a new line and the token before it, or use the indention -#' computed according to the transformer used, which is stored in the column -#' `indention`. -#' -#' All indention information will be combined with the space information for -#' the first token on a new line. -#' If `use_raw_indention` is set, information in the column `indention` will -#' be discarded anyways. If it is not set, the first token on a new line will -#' "inherit" the indention of the whole line. -#' The column `indention` will be removed since all information necessary is -#' contained in the spacing information of the first token on a new line and -#' the position of the tokens will not be changed anymore at this stage. +#' the first token on a new line and the token before it, or use the indention +#' computed according to the transformer used, which is stored in the column +#' `indention`. +#' All indention information will be combined with the space information for +#' the first token on a new line. +#' If `use_raw_indention` is set, information in the column `indention` will +#' be discarded anyways. If it is not set, the first token on a new line will +#' "inherit" the indention of the whole line. +#' The column `indention` will be removed since all information necessary is +#' contained in the spacing information of the first token on a new line and +#' the position of the tokens will not be changed anymore at this stage. #' @param flattened_pd A nested parse table that was turned into a flat parse #' table using [extract_terminals()]. #' @param use_raw_indention Boolean indicating whether or not the raw indention #' should be used. +#' @keywords internal choose_indention <- function(flattened_pd, use_raw_indention) { if (!use_raw_indention) { - flattened_pd$lag_spaces <- if_else(flattened_pd$lag_newlines > 0, + flattened_pd$lag_spaces <- ifelse(flattened_pd$lag_newlines > 0L, flattened_pd$indent, flattened_pd$lag_spaces ) diff --git a/R/zzz.R b/R/zzz.R index 373b4d4c2..77b90ee17 100644 --- a/R/zzz.R +++ b/R/zzz.R @@ -1,3 +1,101 @@ +# nocov start +.default_ignore_start <- "styler: off" +.default_ignore_stop <- "styler: on" + .onLoad <- function(libname, pkgname) { - backports::import(pkgname, "trimws") + op <- options() + op.styler <- list( + styler.addins_style_transformer = "styler::tidyverse_style()", + styler.cache_root = NULL, + styler.cache_name = styler_version, + styler.colored_print.vertical = TRUE, + styler.ignore_alignment = FALSE, + styler.ignore_start = .default_ignore_start, + styler.ignore_stop = .default_ignore_stop, + styler.quiet = FALSE, + styler.test_dir_writable = TRUE + ) + toset <- !(names(op.styler) %in% names(op)) + if (any(toset)) options(op.styler[toset]) + ask_to_switch_to_non_default_cache_root() + remove_cache_old_versions() + remove_old_cache_files() + invisible() } + +#' Delete a cache or temp directory +#' +#' For safety, `path` is only deleted if it is a sub-directory of a temporary +#' directory or user cache. Since this function relies on `tools::R_user_dir()`, +#' it early returns `FALSE` on `R < 4.0.0`. +#' @param path Absolute path to a directory to delete. +#' @returns `TRUE` if anything was deleted, `FALSE` otherwise. +#' @keywords internal +delete_if_cache_directory <- function(path) { + path <- normalizePath(path) + if (getRversion() < package_version("4.0.0")) { + return(FALSE) + } + designated_cache_path <- normalizePath(tools::R_user_dir("R.cache", which = "cache")) + is_in_tools_cache <- startsWith(path, designated_cache_path) + temp_dir <- normalizePath(dirname(tempdir())) + is_in_generic_cache <- startsWith(path, temp_dir) + if (is_in_tools_cache || is_in_generic_cache) { + all_files <- list.files(path, + full.names = TRUE, + recursive = TRUE, + all.files = FALSE + ) + if (length(all_files) < 1L) { + unlink(path, recursive = TRUE) + return(TRUE) + } + } + FALSE +} + + +ask_to_switch_to_non_default_cache_root <- function(ask = interactive()) { + if (ask && stats::runif(1L) > 0.9 && is.null(getOption("styler.cache_root"))) { + ask_to_switch_to_non_default_cache_root_impl() + options(styler.cache_root = "styler") + } +} + + +ask_to_switch_to_non_default_cache_root_impl <- function() { + cli::cli_inform(paste0( + "{{styler}} cache is cleared after 6 days. ", + "See {.help styler::caching} to configure differently or silence this message." + )) +} + +remove_old_cache_files <- function() { + path_version_specific <- R.cache::getCachePath(c("styler", styler_version)) + all_cached <- list.files( + path_version_specific, + full.names = TRUE, recursive = TRUE + ) + date_boundary <- Sys.time() - as.difftime(6L, units = "days") + file.remove( + all_cached[file.info(all_cached)$mtime < date_boundary] + ) + path_styler_specific <- dirname(path_version_specific) + path_r_cache_specific <- dirname(path_styler_specific) + paths <- normalizePath( + c(path_version_specific, path_styler_specific, path_r_cache_specific) + ) + purrr::walk( + paths, + delete_if_cache_directory + ) +} + + +remove_cache_old_versions <- function() { + dirs <- list.dirs(R.cache::getCachePath("styler"), recursive = FALSE) + old_package_dirs <- dirs[basename(dirs) != as.character(styler_version)] + purrr::walk(old_package_dirs, unlink, recursive = TRUE, force = TRUE) +} + +# nocov end diff --git a/README.Rmd b/README.Rmd index 5b975d7f0..14fa12a86 100644 --- a/README.Rmd +++ b/README.Rmd @@ -1,7 +1,10 @@ --- output: github_document: - html_preview: false + html_preview: true +editor_options: + markdown: + wrap: 79 --- @@ -14,54 +17,56 @@ knitr::opts_chunk$set( ) ``` - # styler -[![Build Status](https://travis-ci.org/r-lib/styler.svg?branch=master)](https://travis-ci.org/r-lib/styler) -[![AppVeyor Build Status](https://ci.appveyor.com/api/projects/status/github/r-lib/styler?branch=master&svg=true)](https://ci.appveyor.com/project/r-lib/styler) -[![Project Status: Active – The project has reached a stable, usable state and is being actively developed.](http://www.repostatus.org/badges/latest/active.svg)](http://www.repostatus.org/#active) -[![codecov](https://codecov.io/gh/r-lib/styler/branch/master/graph/badge.svg)](https://codecov.io/gh/r-lib/styler) -[![cran version](http://www.r-pkg.org/badges/version/styler)](https://cran.r-project.org/package=styler) + +[![R build +status](https://github.com/r-lib/styler/workflows/R-CMD-check/badge.svg)](https://github.com/r-lib/styler/actions) +[![Life cycle: +stable](https://img.shields.io/badge/lifecycle-stable-brightgreen.svg)](https://lifecycle.r-lib.org/articles/stages.html) +[![codecov test +coverage](https://app.codecov.io/gh/r-lib/styler/branch/main/graph/badge.svg)](https://app.codecov.io/gh/r-lib/styler) +[![CRAN +Status](https://www.r-pkg.org/badges/version/styler)](https://cran.r-project.org/package=styler) -The goal of styler is to provide non-invasive pretty-printing of R source code -while adhering to the [tidyverse](https://github.com/tidyverse/style) formatting -rules. Support for custom style guides is planned. + -You can install the package from GitHub: +# Overview -```{r, eval = FALSE} -# install.packages("remotes") -remotes::install_github("krlmlr/styler") -``` +styler formats your code according to the [tidyverse style +guide](https://style.tidyverse.org) (or your custom style guide) so you can +direct your attention to the content of your code. It helps to keep the coding +style consistent across projects and facilitate collaboration. You can access +styler through +- the RStudio Addin as demonstrated below +- R functions like `style_pkg()`, `style_file()` or `style_text()` +- various other tools described in `vignette("third-party-integrations")` -You can style a simple character vector of code with `style_text()`: -```{r echo=FALSE, message=FALSE} -pkgload::load_all() -library("magrittr") +```{r, out.width = "650px", echo = FALSE} +knitr::include_graphics("https://raw.githubusercontent.com/lorenzwalthert/some_raw_data/master/styler_0.1.gif") ``` +## Installation -```{r} -ugly_code <- "a<-function( x){1+1} " -style_text(ugly_code) +You can install the package from CRAN. +```{r, eval = FALSE} +install.packages("styler") ``` -There are a few variants of `style_text()`: +Or get the development version from GitHub: -* `style_file()` styles .R and/or .Rmd files. -* `style_dir()` styles all .R files in a directory. -* `style_pkg()` styles the source files of an R package. -* RStudio Addins for styling the active file, styling the current package and - styling the highlighted code region. - -```{r, out.width = "650px", echo = FALSE} -knitr::include_graphics("https://raw.githubusercontent.com/lorenzwalthert/some_raw_data/master/styler_0.1.gif") +```{r, eval = FALSE} +# install.packages("remotes") +remotes::install_github("r-lib/styler") ``` +## Documentation + +The following online docs are available: +- [latest CRAN release](https://styler.r-lib.org). -You can find more information on the wiki of [Google Summer of Code 2017](https://github.com/rstats-gsoc/gsoc2017/wiki/Noninvasive-source-code-formatting) -or check out the [pkgdown](https://r-lib.github.io/styler/) page. +- [GitHub development version](https://styler.r-lib.org/dev/). diff --git a/README.md b/README.md index da0ad192d..08213d935 100644 --- a/README.md +++ b/README.md @@ -1,36 +1,55 @@ -styler -====== -[![Build Status](https://travis-ci.org/r-lib/styler.svg?branch=master)](https://travis-ci.org/r-lib/styler) [![AppVeyor Build Status](https://ci.appveyor.com/api/projects/status/github/r-lib/styler?branch=master&svg=true)](https://ci.appveyor.com/project/r-lib/styler) [![Project Status: Active – The project has reached a stable, usable state and is being actively developed.](http://www.repostatus.org/badges/latest/active.svg)](http://www.repostatus.org/#active) [![codecov](https://codecov.io/gh/r-lib/styler/branch/master/graph/badge.svg)](https://codecov.io/gh/r-lib/styler) [![cran version](http://www.r-pkg.org/badges/version/styler)](https://cran.r-project.org/package=styler) +# styler -The goal of styler is to provide non-invasive pretty-printing of R source code while adhering to the [tidyverse](https://github.com/tidyverse/style) formatting rules. Support for custom style guides is planned. + -You can install the package from GitHub: +[![R build +status](https://github.com/r-lib/styler/workflows/R-CMD-check/badge.svg)](https://github.com/r-lib/styler/actions) +[![Life cycle: +stable](https://img.shields.io/badge/lifecycle-stable-brightgreen.svg)](https://lifecycle.r-lib.org/articles/stages.html) +[![codecov test +coverage](https://app.codecov.io/gh/r-lib/styler/branch/main/graph/badge.svg)](https://app.codecov.io/gh/r-lib/styler) +[![CRAN +Status](https://www.r-pkg.org/badges/version/styler)](https://cran.r-project.org/package=styler) + + + +# Overview + +styler formats your code according to the [tidyverse style +guide](https://style.tidyverse.org) (or your custom style guide) so you +can direct your attention to the content of your code. It helps to keep +the coding style consistent across projects and facilitate +collaboration. You can access styler through + +- the RStudio Addin as demonstrated below +- R functions like `style_pkg()`, `style_file()` or `style_text()` +- various other tools described in + `vignette("third-party-integrations")` + + + +## Installation + +You can install the package from CRAN. ``` r -# install.packages("remotes") -remotes::install_github("krlmlr/styler") +install.packages("styler") ``` -You can style a simple character vector of code with `style_text()`: +Or get the development version from GitHub: ``` r -ugly_code <- "a<-function( x){1+1} " -style_text(ugly_code) -#> a <- function(x) { -#> 1 + 1 -#> } +# install.packages("remotes") +remotes::install_github("r-lib/styler") ``` -There are a few variants of `style_text()`: +## Documentation -- `style_file()` styles .R and/or .Rmd files. -- `style_dir()` styles all .R files in a directory. -- `style_pkg()` styles the source files of an R package. -- RStudio Addins for styling the active file, styling the current package and styling the highlighted code region. +The following online docs are available: - +- [latest CRAN release](https://styler.r-lib.org). -You can find more information on the wiki of [Google Summer of Code 2017](https://github.com/rstats-gsoc/gsoc2017/wiki/Noninvasive-source-code-formatting) or check out the [pkgdown](https://r-lib.github.io/styler/) page. +- [GitHub development version](https://styler.r-lib.org/dev/). diff --git a/_pkgdown.yaml b/_pkgdown.yaml new file mode 100644 index 000000000..ed50bc3f2 --- /dev/null +++ b/_pkgdown.yaml @@ -0,0 +1,87 @@ +home: + strip_header: true + +reference: + - title: "Styling API" + desc: > + Functions for styling code + - contents: + - style_text + - style_file + - style_pkg + - style_dir + - styler_addins + - title: "Fine-tune styling" + desc: "Customize style guides" + - contents: + - tidyverse_style + - tidyverse_reindention + - tidyverse_math_token_spacing + - create_style_guide + - specify_math_token_spacing + - specify_reindention + - specify_transformers_drop + - title: "Non-functional documentation" + desc: "Explaining features" + contents: + - caching + - stylerignore + - styler-package + - title: "Caching" + desc: "Utilities to help manage the styler cache" + - contents: + - starts_with("cache") + - title: "Third-party style guide helpers" + desc: "Utilities for customizing styler for non-tidyverse style guides" + - contents: + - compute_parse_data_nested + - has_concept("third-party style guide helpers") + - title: "Other" + contents: + - print.vertical + +template: + params: + bootswatch: flatly # https://bootswatch.com/flatly/ + docsearch: + api_key: 13580d327d8a7159f83a7cff178d2141 + index_name: r-lib_styler + +authors: + Kirill Müller: + href: https://krlmlr.info + Lorenz Walthert: + href: https://lorenzwalthert.com + +development: + mode: auto + +url: https://styler.r-lib.org + +news: + releases: + - text: "Version 1.0.0" + href: https://www.tidyverse.org/blog/2017/12/styler-1.0.0/ + - text: "Version 1.2.0" + href: https://www.tidyverse.org/blog/2019/11/styler-1-2-0/ + - text: "Version 1.3.0" + href: https://lorenzwalthert.netlify.com/post/styler-1-3-0/ + - text: "Version 1.4.0" + href: https://lorenzwalthert.netlify.app/post/styler-1-4-0/ + +articles: + - title: Get started + navbar: ~ + contents: + - styler + - detect-alignment + - strict + - third-party-integrations + + - title: Developers + navbar: Developers + contents: + - remove_rules + - customizing_styler + - distribute_custom_style_guides + - caching diff --git a/_pkgdown.yml b/_pkgdown.yml deleted file mode 100644 index 7c4df1cb0..000000000 --- a/_pkgdown.yml +++ /dev/null @@ -1,3 +0,0 @@ -templates: - params: - bootswatch: flatly # https://bootswatch.com/flatly/ diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 88eb11afc..000000000 --- a/appveyor.yml +++ /dev/null @@ -1,59 +0,0 @@ -# DO NOT CHANGE the "init" and "install" sections below - -# Download script file from GitHub -init: - ps: | - $ErrorActionPreference = "Stop" - Invoke-WebRequest http://raw.github.com/krlmlr/r-appveyor/master/scripts/appveyor-tool.ps1 -OutFile "..\appveyor-tool.ps1" - Import-Module '..\appveyor-tool.ps1' - -install: - - ps: Bootstrap - - cmd: Rscript -e "writeLines('options(repos = \'https://cloud.r-project.org\')', '~/.Rprofile')" - - cmd: Rscript -e "getOption('repos')" - - cmd: Rscript -e "install.packages('remotes'); remotes::install_github('ropenscilabs/tic'); tic::prepare_all_stages()" - -cache: -- C:\RLibrary - -before_build: Rscript -e "tic::before_install()" -build_script: Rscript -e "tic::install()" -after_build: Rscript -e "tic::after_install()" -before_test: Rscript -e "tic::before_script()" -test_script: Rscript -e "tic::script()" -on_success: Rscript -e "try(tic::after_success(), silent = TRUE)" -on_failure: Rscript -e "tic::after_failure()" -before_deploy: Rscript -e "tic::before_deploy()" -deploy_script: Rscript -e "tic::deploy()" -after_deploy: Rscript -e "tic::after_deploy()" -on_finish: Rscript -e "tic::after_script()" - -# Adapt as necessary starting from here - -#on_failure: -# - 7z a failure.zip *.Rcheck\* -# - appveyor PushArtifact failure.zip - -environment: - USE_RTOOLS: true - GITHUB_PAT: - secure: VXO22OHLkl4YhVIomSMwCZyOTx03Xf2WICaVng9xH7gISlAg8a+qrt1DtFtk8sK5 - -artifacts: - - path: '*.Rcheck\**\*.log' - name: Logs - - - path: '*.Rcheck\**\*.out' - name: Logs - - - path: '*.Rcheck\**\*.fail' - name: Logs - - - path: '*.Rcheck\**\*.Rout' - name: Logs - - - path: '\*_*.tar.gz' - name: Bits - - - path: '\*_*.zip' - name: Bits diff --git a/cran-comments.md b/cran-comments.md index 3d4d67b2f..ed38adf29 100644 --- a/cran-comments.md +++ b/cran-comments.md @@ -1,10 +1,43 @@ +--- +editor_options: + markdown: + wrap: 79 +--- + +This is a release requested by the CRAN team to delete the population of the +user's cache while building vignettes. + + ## Test environments -* local OS X install, R 3.4.2, R 3.4.3 -* ubuntu 12.04 (on travis-ci), R 3.4.2 -* win-builder (devel and release) + +- ubuntu 20.04 (on GitHub Actions): R devel, R 4.3.0, R 4.2.1, 4.1.2, R 4.0.5, + R 3.6 +- Windows Server 10 (on GitHub Actions): R devel, R 4.3.0, R 4.2.1, R 4.1.2, + R 3.6. +- win-builder: R devel ## R CMD check results -0 errors | 0 warnings | 1 note +0 ERRORS \| 0 WARNINGS \| 1 NOTES + +The note was generated on winbuilder when incoming checks were enabled only and +contained many blocks like this: + + Found the following (possibly) invalid URLs: + URL: https://github.com/ropensci/drake + From: inst/doc/third-party-integrations.html + NEWS.md + Status: 429 + Message: Too Many Requests + +It seems my package contains many URLs to GitHub and their rate limit prevents +the checking of all of them. I confirm that all URLs in my package are +compliant with the requirements of CRAN. + +## Downstream Dependencies + +I also ran R CMD check on all 39 downstream dependencies of styler using the +revdepcheck package. -* This is a new release. +All of them finished R CMD CHECK with the same number of ERRORS, WARNINGS and +NOTES. diff --git a/inst/WORDLIST b/inst/WORDLIST new file mode 100644 index 000000000..1e14ad162 --- /dev/null +++ b/inst/WORDLIST @@ -0,0 +1,311 @@ +addin +Addin +addins +Addins +api +AppVeyor +apriori +arg +AsIs +AST +aut +autothresholdr +backport +bdr +benchmarking +biocthis +bootswatch +BugReports +cancelling +cff +chnages +ci +cli +CMD +codebase +codecov +codegrip +coercible +coercions +compat +config +CONST +counterpair +coventions +covr +cpp +cran +cre +ctb +cyclocomp +cynkra +datastructures +dec +dependabot +deps +desc +dev +devtools +dir +docsearch +dont +dontrun +dontshow +dontshowdontrun +donttest +dplyr +DSLs +emacs +emph +env +EOF +EOL +EOLs +epigraphdb +eq +EQ +eval +examplesIf +exampletestr +expr +expr EQ +fansi +fileext +filetype +forcond +formatter +funct +gadenbuie +gcc +getChecksum +getOption +getRversion +ggplot +ghclass +github +gitsum +gmail +grkstyle +GSOC +hashFiles +helpfiles +href +http +https +icloud +ifelse +impl +Indrajeet +infinitively +initializer +inode +integrations +interaces +internal's +invasiveness +iNZightTools +io +ixmypi +ized +JamesIves +Jupyterlab +Kirill +kirill +knitr +krlmlr +labelled +languageserver +LazyData +learnr +levelName +LF +LIBS +lifecycle +Ligges +linter +linters +lintr +linux +lorenz +lorenzwalthert +lst +macOS +magrittr +md +MERCHANTABILITY +mlr +Müller +mutli +na +navbar +netlify +netReg +nocomments +NONINFRINGEMENT +nonportable +nph +NUM +oldrel +oneliner +ORCID +os +ourself +packagemanager +packrat +pandoc +params +paren +parsable +parsesum +Patil +patilindrajeet +patilindrajeets +pgkdown +pkgapi +pkgconfig +pkgdown +pkgs +pos +pre +precommit +prefill +prettycode +priori +PRs +purrr +qmd +Qmd +questionr +rcmdcheck +RcppExports +rds +readline +readme +README +rebased +reindent +reindented +reindention +relevel +renv +repo +reprex +revdepcheck +RHUB +rlang +rlang's +rmarkdown +RMarkdown +rmd +Rmd +rnw +Rnw +roadmap +roclet +roclets +rootPath +ROOTPATH +ropensci +roundtrip +roxgen +roxygen +Roxygen +roxygenise +RoxygenNote +rplumber +rprofile +Rprofile +rprojroot +Rscript +rspm +RSPM +rstudio +RStudio +RStudio's +rstudioapi +saamwerk +saveRDS +seealso +semicoloner +sep +sessioninfo +setCacheRootPath +setdiff +setenv +Shallowify +sharding +shinydashboardPlus +shinymeta +shinyMonacoEditor +shinyobjects +ShinyQuickStarter +spaceout +sprintf +stackoverflow +StackOverflow +startsWith +staticimports +STR +styler +stylerignore +stylerignored +stylers +subexpr +sublicense +sudo +summarises +Sys +sysreq +sysreqs +systemPipeShiny +tempfile +testthat +tibble +tibbles +tidyeval +tidypaleo +tidyr +tidyverse +Tidyverse +Tierney +todo +tokenized +travis +tryCatch +tryGugus +ubuntu +ui +uncached +unexplainable +unicode +unindent +unindention +unlink +unlinkunindention +unnest +unparsable +unstyled +upsetjs +usethis +utf +Uwe +vctrs +vec +VignetteBuilder +Visit'em +walthert +Walthert +wch +winbuilder +withr +writeLines +www +xaringan +xenial +xfun +Xie +xyzpackage +YAML +yaml +yihui +zzz diff --git a/inst/hooks/require-news-update.R b/inst/hooks/require-news-update.R new file mode 100755 index 000000000..77757c578 --- /dev/null +++ b/inst/hooks/require-news-update.R @@ -0,0 +1,10 @@ +#! /usr/local/bin/Rscript +args <- system2( + "git", + c("diff", "origin/main", "--name-only"), + stdout = TRUE +) + +if (!any(args == "NEWS.md")) { + rlang::abort("Must have a news entry before pushing.") +} diff --git a/inst/rstudio/addins.dcf b/inst/rstudio/addins.dcf index 1735c5b11..fcef8f90d 100644 --- a/inst/rstudio/addins.dcf +++ b/inst/rstudio/addins.dcf @@ -1,6 +1,11 @@ -Name: Style package -Description: Pretty-print package source code -Binding: style_pkg +Name: Set style +Description: Prompt for and set the style transformers used by all styler addins +Binding: set_style_transformers +Interactive: true + +Name: Style selection +Description: Pretty-print selection +Binding: style_selection Interactive: true Name: Style active file @@ -8,7 +13,7 @@ Description: Pretty-print active file Binding: style_active_file Interactive: true -Name: Style selection -Description: Pretty-print selection -Binding: style_selection +Name: Style active package +Description: Pretty-print active package +Binding: style_active_pkg Interactive: true diff --git a/man/add_cache_block.Rd b/man/add_cache_block.Rd new file mode 100644 index 000000000..f9aef3410 --- /dev/null +++ b/man/add_cache_block.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/nest.R +\name{add_cache_block} +\alias{add_cache_block} +\title{Add the block id to a parse table} +\usage{ +add_cache_block(pd_nested) +} +\arguments{ +\item{pd_nested}{A top-level nest.} +} +\description{ +Must be after \code{\link[=nest_parse_data]{nest_parse_data()}} because requires a nested parse table as +input. +} +\keyword{internal} diff --git a/man/add_id_and_short.Rd b/man/add_id_and_short.Rd new file mode 100644 index 000000000..bd368b7a9 --- /dev/null +++ b/man/add_id_and_short.Rd @@ -0,0 +1,15 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/parse.R +\name{add_id_and_short} +\alias{add_id_and_short} +\title{Add column \code{pos_id} and \code{short}} +\usage{ +add_id_and_short(pd) +} +\arguments{ +\item{pd}{A flat parse table} +} +\description{ +Adds column \code{pos_id} and \code{short} to a flat parse table. +} +\keyword{internal} diff --git a/man/add_roxygen_mask.Rd b/man/add_roxygen_mask.Rd new file mode 100644 index 000000000..9141a5821 --- /dev/null +++ b/man/add_roxygen_mask.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/roxygen-examples-add-remove.R +\name{add_roxygen_mask} +\alias{add_roxygen_mask} +\title{Add the roxygen mask to code} +\usage{ +add_roxygen_mask(text, initial_text, example_type) +} +\arguments{ +\item{text}{Character vector with code.} + +\item{initial_text}{The roxygen code example to style with mask and +potentially ordinary comments.} + +\item{example_type}{Either 'examples' or 'examplesIf'.} +} +\description{ +This function compares \code{text} with \code{initial_text} to make sure a mask is only +added to roxygen comments, not ordinary comments +} +\keyword{internal} diff --git a/man/add_spaces_or_newlines.Rd b/man/add_spaces_or_newlines.Rd new file mode 100644 index 000000000..9499c0051 --- /dev/null +++ b/man/add_spaces_or_newlines.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-strings.R +\name{add_spaces_or_newlines} +\alias{add_spaces_or_newlines} +\alias{add_newlines} +\alias{add_spaces} +\title{Concentrate newlines or spaces in a string} +\usage{ +add_newlines(n) + +add_spaces(n) +} +\arguments{ +\item{n}{Scalar indicating how many characters should be concentrated} +} +\value{ +A string. +} +\description{ +Concentrate newlines or spaces in a string +} +\keyword{internal} diff --git a/man/add_stylerignore.Rd b/man/add_stylerignore.Rd new file mode 100644 index 000000000..e9262cc31 --- /dev/null +++ b/man/add_stylerignore.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/stylerignore.R +\name{add_stylerignore} +\alias{add_stylerignore} +\title{Adds the stylerignore column} +\usage{ +add_stylerignore(pd_flat) +} +\arguments{ +\item{pd_flat}{A parse table.} +} +\description{ +If a token should be ignored, the column is set to \code{TRUE}, +otherwise to \code{FALSE}. +} +\details{ +A token is ignored iff one of the two conditions hold: +\itemize{ +\item it falls between a start and a stop marker whereas the markers are on +their own line. Which tokens are recognized as markers is controlled with +the R options \code{styler.ignore_start} and \code{styler.ignore_stop}. +\item it is not a comment, but the last token on the line is a marker. +} + +See examples in \link{stylerignore}. Note that you should reuse the stylerignore +column to compute switch points or similar and not a plain +\code{pd$text \%in\% option_read("styler.ignore_start")} because that will fail to +give correct switch points in the case stylerignore sequences are invalid. +} +\keyword{internal} diff --git a/man/add_token_terminal.Rd b/man/add_token_terminal.Rd new file mode 100644 index 000000000..31216d8a5 --- /dev/null +++ b/man/add_token_terminal.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/nest.R +\name{add_token_terminal} +\alias{add_token_terminal} +\alias{add_terminal_token_after} +\alias{add_terminal_token_before} +\alias{add_attributes_caching} +\title{Add information about previous / next token to each terminal} +\usage{ +add_terminal_token_after(pd_flat) + +add_terminal_token_before(pd_flat) + +add_attributes_caching(pd_flat, transformers, more_specs) +} +\arguments{ +\item{pd_flat}{A flat parse table.} + +\item{transformers}{Passed to \code{\link[=cache_make_key]{cache_make_key()}} to generate a key.} + +\item{more_specs}{Passed to \code{\link[=cache_make_key]{cache_make_key()}} to generate a key.} +} +\description{ +Note that this does function must be called in \code{\link[=compute_parse_data_nested]{compute_parse_data_nested()}} +and we cannot wait to initialize this attribute until \code{\link[=apply_transformers]{apply_transformers()}}, +where all other attributes are initialized with +\code{\link[=default_style_guide_attributes]{default_style_guide_attributes()}} (when using \code{\link[=tidyverse_style]{tidyverse_style()}}) because +for cached code, we don't build up the nested structure and leave it shallow +(to speed up things), see also \code{\link[=shallowify]{shallowify()}}. +} +\section{Functions}{ +\itemize{ +\item \code{add_attributes_caching()}: Initializes \code{newlines} and \code{lag_newlines}. + +}} +\keyword{internal} diff --git a/man/alignment_col1_all_named.Rd b/man/alignment_col1_all_named.Rd new file mode 100644 index 000000000..ba03141f1 --- /dev/null +++ b/man/alignment_col1_all_named.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/detect-alignment-utils.R +\name{alignment_col1_all_named} +\alias{alignment_col1_all_named} +\title{Checks if all arguments of column 1 are named} +\usage{ +alignment_col1_all_named(relevant_pd_by_line) +} +\arguments{ +\item{relevant_pd_by_line}{A list with parse tables of a multi-line call, +excluding first and last column.} +} +\description{ +Checks if all arguments of column 1 are named +} +\keyword{internal} diff --git a/man/alignment_drop_comments.Rd b/man/alignment_drop_comments.Rd new file mode 100644 index 000000000..9822dd6ca --- /dev/null +++ b/man/alignment_drop_comments.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/detect-alignment-utils.R +\name{alignment_drop_comments} +\alias{alignment_drop_comments} +\title{Remove all comment tokens} +\usage{ +alignment_drop_comments(pd_by_line) +} +\arguments{ +\item{pd_by_line}{A list, each element corresponding to a potentially +incomplete parse table that represents all token from one line.} +} +\description{ +Must be after split by line because it invalidates (lag)newlines, which are +used for splitting by line. +} +\keyword{internal} diff --git a/man/alignment_drop_last_expr.Rd b/man/alignment_drop_last_expr.Rd new file mode 100644 index 000000000..f67092d76 --- /dev/null +++ b/man/alignment_drop_last_expr.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/detect-alignment-utils.R +\name{alignment_drop_last_expr} +\alias{alignment_drop_last_expr} +\title{Remove last expression} +\usage{ +alignment_drop_last_expr(pds_by_line) +} +\description{ +In a \emph{nest}, if the last token is an \code{expr}, the \emph{nest} represents either +an if, while or for statement or a function call. We don't call about that +part, in fact it's important to remove it for alignment. See 'Examples'. +} +\examples{ +\dontshow{if (FALSE) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} +call( + x = 12, + y = 3, +) + +function(a = 33, + qq = 4) { + # we don't care about this part for alignment detection +} +\dontshow{\}) # examplesIf} +} +\keyword{internal} diff --git a/man/alignment_ensure_no_closing_brace.Rd b/man/alignment_ensure_no_closing_brace.Rd new file mode 100644 index 000000000..78710ea55 --- /dev/null +++ b/man/alignment_ensure_no_closing_brace.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/detect-alignment-utils.R +\name{alignment_ensure_no_closing_brace} +\alias{alignment_ensure_no_closing_brace} +\title{Ensure the closing brace of the call is removed} +\usage{ +alignment_ensure_no_closing_brace(pd_by_line, last_line_droped_early) +} +\arguments{ +\item{pd_by_line}{A list, each element corresponding to a potentially +incomplete parse table that represents all token from one line.} +} +\description{ +Must be after dropping comments because the closing brace is only guaranteed +to be the last token in that case. +} +\keyword{internal} diff --git a/man/alignment_ensure_trailing_comma.Rd b/man/alignment_ensure_trailing_comma.Rd new file mode 100644 index 000000000..70794c39d --- /dev/null +++ b/man/alignment_ensure_trailing_comma.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/detect-alignment-utils.R +\name{alignment_ensure_trailing_comma} +\alias{alignment_ensure_trailing_comma} +\title{Ensure last pd has a trailing comma} +\usage{ +alignment_ensure_trailing_comma(pd_by_line) +} +\arguments{ +\item{pd_by_line}{A list, each element corresponding to a potentially +incomplete parse table that represents all token from one line.} +} +\description{ +Must be after \code{\link[=alignment_ensure_no_closing_brace]{alignment_ensure_no_closing_brace()}} because if it comes after +\code{\link[=alignment_ensure_trailing_comma]{alignment_ensure_trailing_comma()}}, the last expression would not be a +brace, which would make removal complicated. +} +\keyword{internal} diff --git a/man/alignment_has_correct_spacing_around_comma.Rd b/man/alignment_has_correct_spacing_around_comma.Rd new file mode 100644 index 000000000..71fcdc9b8 --- /dev/null +++ b/man/alignment_has_correct_spacing_around_comma.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/detect-alignment-utils.R +\name{alignment_has_correct_spacing_around_comma} +\alias{alignment_has_correct_spacing_around_comma} +\title{Check if spacing around comma is correct} +\usage{ +alignment_has_correct_spacing_around_comma(pd_sub) +} +\arguments{ +\item{pd_sub}{The subset of a parse table corresponding to one line.} +} +\description{ +At least one space after comma, none before, for all but the last comma on +the line +} +\keyword{internal} diff --git a/man/alignment_has_correct_spacing_around_eq_sub.Rd b/man/alignment_has_correct_spacing_around_eq_sub.Rd new file mode 100644 index 000000000..173e5cb87 --- /dev/null +++ b/man/alignment_has_correct_spacing_around_eq_sub.Rd @@ -0,0 +1,15 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/detect-alignment-utils.R +\name{alignment_has_correct_spacing_around_eq_sub} +\alias{alignment_has_correct_spacing_around_eq_sub} +\title{Check if spacing around \code{=} is correct} +\usage{ +alignment_has_correct_spacing_around_eq_sub(pd_sub) +} +\arguments{ +\item{pd_sub}{The subset of a parse table corresponding to one line.} +} +\description{ +At least one space around \code{EQ_SUB} +} +\keyword{internal} diff --git a/man/alignment_serialize.Rd b/man/alignment_serialize.Rd new file mode 100644 index 000000000..ce1c9113a --- /dev/null +++ b/man/alignment_serialize.Rd @@ -0,0 +1,13 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/detect-alignment-utils.R +\name{alignment_serialize} +\alias{alignment_serialize} +\title{Serialize text from a parse table} +\usage{ +alignment_serialize(pd_sub) +} +\description{ +Line breaks are ignored as they are expected to be checked in +\code{\link[=token_is_on_aligned_line]{token_is_on_aligned_line()}}. +} +\keyword{internal} diff --git a/man/alignment_serialize_column.Rd b/man/alignment_serialize_column.Rd new file mode 100644 index 000000000..1a7ccc2e3 --- /dev/null +++ b/man/alignment_serialize_column.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/detect-alignment-utils.R +\name{alignment_serialize_column} +\alias{alignment_serialize_column} +\title{Serialize all lines for a given column} +\usage{ +alignment_serialize_column(relevant_pd_by_line, column) +} +\arguments{ +\item{relevant_pd_by_line}{A list with parse tables of a multi-line call, +excluding first and last column.} + +\item{column}{The index of the column to serialize.} +} +\description{ +Serialize all lines for a given column +} +\keyword{internal} diff --git a/man/alignment_serialize_line.Rd b/man/alignment_serialize_line.Rd new file mode 100644 index 000000000..aa932befc --- /dev/null +++ b/man/alignment_serialize_line.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/detect-alignment-utils.R +\name{alignment_serialize_line} +\alias{alignment_serialize_line} +\title{Serialize one line for a column} +\usage{ +alignment_serialize_line(relevant_pd_by_line, column) +} +\arguments{ +\item{relevant_pd_by_line}{A list with parse tables of a multi-line call, +excluding first and last column.} + +\item{column}{The index of the column to serialize.} +} +\description{ +Serialize one line for a column +} +\keyword{internal} diff --git a/man/apply_ref_indention.Rd b/man/apply_ref_indention.Rd new file mode 100644 index 000000000..cf4395170 --- /dev/null +++ b/man/apply_ref_indention.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/reindent.R +\name{apply_ref_indention} +\alias{apply_ref_indention} +\title{Apply reference indention to tokens} +\usage{ +apply_ref_indention(flattened_pd) +} +\arguments{ +\item{flattened_pd}{A flattened parse table} +} +\description{ +Applies the reference indention created with functions +\code{\link[=update_indention_ref]{update_indention_ref()}} to the flattened parse table. The indention +is applied to all token that inherit from a reference token sequentially, +i.e. by looping over the target tokens. +} +\keyword{internal} diff --git a/man/apply_ref_indention_one.Rd b/man/apply_ref_indention_one.Rd new file mode 100644 index 000000000..778ad3b37 --- /dev/null +++ b/man/apply_ref_indention_one.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/reindent.R +\name{apply_ref_indention_one} +\alias{apply_ref_indention_one} +\title{Applying reference indention of a target token} +\usage{ +apply_ref_indention_one(flattened_pd, target_token) +} +\arguments{ +\item{flattened_pd}{A flattened parse table} + +\item{target_token}{The index of the token from which the indention level +should be applied to other tokens.} +} +\description{ +Applies the indention level of \code{target_token} to all tokens that have +\code{target_token} as a reference. This includes adding spaces to the first +tokens on a line and updating the column \code{col1} and \code{col2} for all tokens +on that line so they are kept updated. +} +\keyword{internal} diff --git a/man/apply_stylerignore.Rd b/man/apply_stylerignore.Rd new file mode 100644 index 000000000..2107100a0 --- /dev/null +++ b/man/apply_stylerignore.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/stylerignore.R +\name{apply_stylerignore} +\alias{apply_stylerignore} +\title{Ensure correct positional information for stylerignore expressions} +\usage{ +apply_stylerignore(flattened_pd) +} +\arguments{ +\item{flattened_pd}{A flattened parse table.} +} +\description{ +Ensure correct positional information for stylerignore expressions +} +\details{ +\itemize{ +\item Get the positional information for tokens with a stylerignore tag from +\code{env_current}, which recorded that information from the input text. +\item Replace the computed lag_newlines and lag_spaces information in the parse +table with this information. +\item Because we may remove or add tokens when applying the transformers, it is +not save to merge via the pos_id of each token in a stylerignore sequence. +We assume that the start and stop markers are the same after styling, so we +join all tokens that were initially in a stylerignore sequence via the +first pos_id in that stylerignore sequence. +} +} +\keyword{internal} diff --git a/man/apply_transformers.Rd b/man/apply_transformers.Rd new file mode 100644 index 000000000..96689bf1d --- /dev/null +++ b/man/apply_transformers.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-files.R +\name{apply_transformers} +\alias{apply_transformers} +\title{Apply transformers to a parse table} +\usage{ +apply_transformers(pd_nested, transformers) +} +\arguments{ +\item{pd_nested}{A nested parse table.} + +\item{transformers}{A list of \emph{named} transformer functions} +} +\description{ +The column \code{multi_line} is updated (after the line break information is +modified) and the rest of the transformers are applied afterwards, +The former requires two pre visits and one post visit. +} +\details{ +The order of the transformations is: +\itemize{ +\item Initialization (must be first). +\item Line breaks (must be before spacing due to indention). +\item Update of newline and multi-line attributes (must not change afterwards, +hence line breaks must be modified first). +\item spacing rules (must be after line-breaks and updating newlines and +multi-line). +\item indention. +\item token manipulation / replacement (is last since adding and removing tokens +will invalidate columns token_after and token_before). +\item Update indention reference (must be after line breaks). +} +} +\keyword{internal} diff --git a/man/assert_filetype.Rd b/man/assert_filetype.Rd new file mode 100644 index 000000000..eebbc7fa4 --- /dev/null +++ b/man/assert_filetype.Rd @@ -0,0 +1,15 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/set-assert-args.R +\name{assert_filetype} +\alias{assert_filetype} +\title{Make sure all supplied file types are allowed} +\usage{ +assert_filetype(lowercase_filetype) +} +\arguments{ +\item{lowercase_filetype}{A vector with file types to check, all lower case.} +} +\description{ +Make sure all supplied file types are allowed +} +\keyword{internal} diff --git a/man/assert_text.Rd b/man/assert_text.Rd new file mode 100644 index 000000000..b0037fbdd --- /dev/null +++ b/man/assert_text.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/set-assert-args.R +\name{assert_text} +\alias{assert_text} +\title{Assert text to be of positive length and replace it with the empty +string otherwise.} +\usage{ +assert_text(text) +} +\arguments{ +\item{text}{The input to style.} +} +\description{ +Assert text to be of positive length and replace it with the empty +string otherwise. +} +\keyword{internal} diff --git a/man/assert_tokens.Rd b/man/assert_tokens.Rd new file mode 100644 index 000000000..7dca8d633 --- /dev/null +++ b/man/assert_tokens.Rd @@ -0,0 +1,15 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/set-assert-args.R +\name{assert_tokens} +\alias{assert_tokens} +\title{Check token validity} +\usage{ +assert_tokens(tokens) +} +\arguments{ +\item{tokens}{Tokens to check.} +} +\description{ +Check whether one or more tokens exist and have a unique token-text mapping +} +\keyword{internal} diff --git a/man/assert_transformers.Rd b/man/assert_transformers.Rd new file mode 100644 index 000000000..c5b9e08b7 --- /dev/null +++ b/man/assert_transformers.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/set-assert-args.R +\name{assert_transformers} +\alias{assert_transformers} +\title{Assert the transformers} +\usage{ +assert_transformers(transformers) +} +\arguments{ +\item{transformers}{A list of transformer functions that operate on flat +parse tables.} +} +\description{ +Actually only assert name and version of style guide in order to make sure +caching works correctly. +} +\keyword{internal} diff --git a/man/bind_with_child.Rd b/man/bind_with_child.Rd new file mode 100644 index 000000000..13166d128 --- /dev/null +++ b/man/bind_with_child.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/relevel.R +\name{bind_with_child} +\alias{bind_with_child} +\title{Bind a parse table with one of its children} +\usage{ +bind_with_child(pd_nested, pos) +} +\arguments{ +\item{pd_nested}{A nested parse table.} + +\item{pos}{The position of the child to bind.} +} +\description{ +Bind a parse table with one of its children and return parse table, ordered +according to the appearance of the tokens. +} +\keyword{internal} diff --git a/man/cache_activate.Rd b/man/cache_activate.Rd new file mode 100644 index 000000000..5187937a0 --- /dev/null +++ b/man/cache_activate.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ui-caching.R +\name{cache_activate} +\alias{cache_activate} +\alias{cache_deactivate} +\title{Activate or deactivate the styler cache} +\usage{ +cache_activate(cache_name = NULL, verbose = !getOption("styler.quiet", FALSE)) + +cache_deactivate(verbose = !getOption("styler.quiet", FALSE)) +} +\arguments{ +\item{cache_name}{The name of the styler cache to use. If +\code{NULL}, the option "styler.cache_name" is considered which defaults to +the version of styler used.} + +\item{verbose}{Whether or not to print an informative message about what the +function is doing.} +} +\description{ +Helper functions to control the behavior of caching. Simple wrappers around +\code{\link[base:options]{base::options()}}. +} +\seealso{ +Other cache managers: +\code{\link{cache_clear}()}, +\code{\link{cache_info}()}, +\code{\link{caching}} +} +\concept{cache managers} diff --git a/man/cache_by_expression.Rd b/man/cache_by_expression.Rd new file mode 100644 index 000000000..3ae78e774 --- /dev/null +++ b/man/cache_by_expression.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-cache.R +\name{cache_by_expression} +\alias{cache_by_expression} +\title{Cache text} +\usage{ +cache_by_expression(text, transformers, more_specs) +} +\arguments{ +\item{text}{A character vector with one or more expressions.} + +\item{transformers}{A list of transformer functions, because we can only +know if text is already correct if we know which transformer function it +should be styled with.} + +\item{more_specs}{A named vector coercible to character that determines the +styling but are style guide independent, such as \code{include_roxygen_examples} +or \code{base_indention}.} +} +\description{ +Splits \code{text} into expressions and adds these to the cache. Note that +top-level comments are \strong{not} cached because caching and in particular +checking if they are cached is too expensive. Comments may be cached as part +of the whole text (as opposed to on an expression by expression basis) using +\code{cache_write()} directly. Also, we must not cache stylerignore sequence, +because we might see the same expression that does not comply with the style +guide outside a stylerignore sequence and wrongly think we should leave it as +is. +} +\keyword{internal} diff --git a/man/cache_clear.Rd b/man/cache_clear.Rd new file mode 100644 index 000000000..3a97a3bff --- /dev/null +++ b/man/cache_clear.Rd @@ -0,0 +1,32 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ui-caching.R +\name{cache_clear} +\alias{cache_clear} +\title{Clear the cache} +\usage{ +cache_clear(cache_name = NULL, ask = TRUE) +} +\arguments{ +\item{cache_name}{The name of the styler cache to use. If +\code{NULL}, the option "styler.cache_name" is considered which defaults to +the version of styler used.} + +\item{ask}{Whether or not to interactively ask the user again.} +} +\description{ +Clears the cache that stores which files are already styled. You won't be +able to undo this. Note that the file corresponding to the cache (a folder +on your file system) won't be deleted, but it will be empty after calling +\code{cache_clear}. +} +\details{ +Each version of styler has its own cache by default, because styling is +potentially different with different versions of styler. +} +\seealso{ +Other cache managers: +\code{\link{cache_activate}()}, +\code{\link{cache_info}()}, +\code{\link{caching}} +} +\concept{cache managers} diff --git a/man/cache_find_block.Rd b/man/cache_find_block.Rd new file mode 100644 index 000000000..9d14e2b36 --- /dev/null +++ b/man/cache_find_block.Rd @@ -0,0 +1,39 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-block.R +\name{cache_find_block} +\alias{cache_find_block} +\title{Find the groups of expressions that should be processed together} +\usage{ +cache_find_block(pd) +} +\arguments{ +\item{pd}{A top-level nest.} +} +\description{ +Find the groups of expressions that should be processed together +} +\details{ +We want blocks to be formed according to these rules: +\itemize{ +\item Blocks should contain either cached or uncached expressions only. If a +block contains cached expressions only, it does not have to be processed +and can be returned immediately. If a block contains uncached expressions, +it makes sense to put as many uncached expression in it, since processing +one bigger block has less overhead than processing many smaller blocks. +\item Multiple expressions can sit on one row, e.g. in-line comment and commands +separated with ";". This creates a problem when processing each expression +separately because when putting them together, we need complicated handling +of line breaks between them, as it is not \emph{a priori} clear that there is a +line break separating them. To avoid this, we put top-level expressions +that sit on the same line into one block, so the assumption that there is a +line break between each block of expressions holds. +\item All expressions in a stylerignore sequence must be in the same block. If +that's not the case, the first expression in a block might not be a +top-level terminal, but another top-level expression. +\code{\link[=apply_stylerignore]{apply_stylerignore()}} joins \code{env_current$stylerignore}, which contains +only terminals, with the first expression in a stylerignore sequence, based +on the first \code{pos_id} in that stylerignore sequence +(\code{first_pos_id_in_segment}). +} +} +\keyword{internal} diff --git a/man/cache_find_path.Rd b/man/cache_find_path.Rd new file mode 100644 index 000000000..3d7d0c673 --- /dev/null +++ b/man/cache_find_path.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-cache.R +\name{cache_find_path} +\alias{cache_find_path} +\title{Where is the cache?} +\usage{ +cache_find_path(cache_name = NULL) +} +\arguments{ +\item{cache_name}{The name of the styler cache to use. If +\code{NULL}, the option "styler.cache_name" is considered which defaults to +the version of styler used.} +} +\description{ +Finds the path to the cache and creates it if it does not exist. +} +\keyword{internal} diff --git a/man/cache_info.Rd b/man/cache_info.Rd new file mode 100644 index 000000000..363336316 --- /dev/null +++ b/man/cache_info.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ui-caching.R +\name{cache_info} +\alias{cache_info} +\title{Show information about the styler cache} +\usage{ +cache_info(cache_name = NULL, format = "both") +} +\arguments{ +\item{cache_name}{The name of the cache for which to show details. If +\code{NULL}, the active cache is used. If none is active the cache corresponding +to the installed styler version is used.} + +\item{format}{Either "lucid" for a summary emitted with \code{\link[base:cat]{base::cat()}}, +"tabular" for a tabular summary from \code{\link[base:file.info]{base::file.info()}} or "both" for +both.} +} +\description{ +Gives information about the cache. Note that the size consumed by the cache +will always be displayed as zero because all the cache does is creating an +empty file of size 0 bytes for every cached expression. The inode is +excluded from this displayed size but negligible. +} +\seealso{ +Other cache managers: +\code{\link{cache_activate}()}, +\code{\link{cache_clear}()}, +\code{\link{caching}} +} +\concept{cache managers} diff --git a/man/cache_is_activated.Rd b/man/cache_is_activated.Rd new file mode 100644 index 000000000..ca3031b78 --- /dev/null +++ b/man/cache_is_activated.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-cache.R +\name{cache_is_activated} +\alias{cache_is_activated} +\title{Check if a cache is activated} +\usage{ +cache_is_activated(cache_name = NULL) +} +\arguments{ +\item{cache_name}{The name of the cache to check. If \code{NULL}, we check if +any cache is activated. If not \code{NULL}, we check if a specific cache is +activated.} +} +\description{ +Check if a cache is activated +} +\keyword{internal} diff --git a/man/cache_make_key.Rd b/man/cache_make_key.Rd new file mode 100644 index 000000000..ec1cdb9d8 --- /dev/null +++ b/man/cache_make_key.Rd @@ -0,0 +1,80 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-cache.R +\name{cache_make_key} +\alias{cache_make_key} +\title{Make a key for \code{R.cache}} +\usage{ +cache_make_key(text, transformers, more_specs) +} +\arguments{ +\item{text}{Code to create a cache for. This should be styled text, as the +approach used by styler does not cache input, but styled code.} + +\item{transformers}{A list of transformer functions, because we can only +know if text is already correct if we know which transformer function it +should be styled with.} + +\item{more_specs}{A named vector coercible to character that determines the +styling but are style guide independent, such as \code{include_roxygen_examples} +or \code{base_indention}.} +} +\description{ +This is used to determine if caching already corresponds to a style guide. +} +\details{ +We need to compare: +\itemize{ +\item text to style. Will be passed to hash function as is. +\item styler version. Not an issue because for every version of styler, we build +a new cache. +\item transformers. Cannot easily hash them because two environments won't be +identical even if they contain the same objects (see 'Experiments'). Simple +\code{as.character(transformers)} will not consider infinitively recursive +code dependencies. +To fix this, transformers must have names and version number as described +in \code{\link[=create_style_guide]{create_style_guide()}}. Now, the only way to fool the cache invalidation +is to replace a transformer with the same function body (but changing +the function definition of the functions called in that body) interactively +without changing version number of name at the same time. +Remaining problem: \code{purrr::partial()} calls will render generic code, e.g. +see \code{as.character(list(purrr::partial(sum, x = 4)))}. For that reason, +all arguments passed to a \code{purrr::partial()} call must be put in the +style guide under \code{more_specs_style_guide}. +} +} +\section{Experiments}{ + + +There is unexplainable behavior in conjunction with hashing and +environments: +\itemize{ +\item Functions created with \code{purrr::partial()} are not identical when compared +with \code{identical()} +(\href{https://stackoverflow.com/questions/58656033/when-are-purrrpartial-ized-functions-identical}{StackOverflow}) +\item except when they have the exact same parent environment, which must be an +object created and then passed to \code{purrr::partial(.env = ...)}, not +created in-place. +\item \code{purrr::partial()} seems to ignore \code{.env} after version 0.2.5, so until +this is fixed, we'd have to work with version 0.2.5. +\item Our caching backend package, \code{R.cache}, uses +\code{R.cache:::getChecksum.default} (which uses \code{digest::digest()}) to hash the +input. The latter does not seem to care if the environments are exactly +equal (see 'Examples'). +\item However, under some circumstances, it does: Commit 9c94c022 (if not +overwritten / rebased by now) contains a reprex. Otherwise, search for +43219ixmypi in commit messages and restore this commit to reproduce the +behavior. +} +} + +\examples{ +add <- function(x, y) { + x + y +} +add1 <- purrr::partial(add, x = 1) +add2 <- purrr::partial(add, x = 1) +identical(add1, add2) +identical(digest::digest(add1), digest::digest(add2)) +identical(digest::digest(styler::tidyverse_style()), digest::digest(styler::tidyverse_style())) +} +\keyword{internal} diff --git a/man/cache_more_specs.Rd b/man/cache_more_specs.Rd new file mode 100644 index 000000000..05f78d2de --- /dev/null +++ b/man/cache_more_specs.Rd @@ -0,0 +1,13 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-cache.R +\name{cache_more_specs} +\alias{cache_more_specs} +\title{Create more specs} +\usage{ +cache_more_specs(include_roxygen_examples, base_indention) +} +\description{ +Syntactic sugar for creating more specs. This is useful when we want to add +more arguments (because we can search for this function in the source code). +} +\keyword{internal} diff --git a/man/cache_write.Rd b/man/cache_write.Rd new file mode 100644 index 000000000..703ba5328 --- /dev/null +++ b/man/cache_write.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-cache.R +\name{cache_write} +\alias{cache_write} +\title{Write to the cache} +\usage{ +cache_write(text, transformers, more_specs) +} +\arguments{ +\item{text}{Code to create a cache for. This should be styled text, as the +approach used by styler does not cache input, but styled code.} + +\item{transformers}{A list of transformer functions, because we can only +know if text is already correct if we know which transformer function it +should be styled with.} + +\item{more_specs}{A named vector coercible to character that determines the +styling but are style guide independent, such as \code{include_roxygen_examples} +or \code{base_indention}.} +} +\description{ +Write to the cache +} +\keyword{internal} diff --git a/man/caching.Rd b/man/caching.Rd new file mode 100644 index 000000000..59a7db6cb --- /dev/null +++ b/man/caching.Rd @@ -0,0 +1,75 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ui-caching.R +\name{caching} +\alias{caching} +\title{Remember the past to be quicker in the future} +\description{ +Caching makes styler faster on repeated styling and is shared across all APIs +(e.g. \code{style_text()} and Addin). That means if you style code that already +complies to a style guide and you have previously styled that code, it will +be quicker. +} +\section{Configuring the cache}{ + + +To comply with the CRAN policy, \{styler\} will by default clean up cache files +that are older than 6 days. This implies that you loose the benefit of the cache +for the files not styled in the last 6 days. + +If you want to avoid this, i.e., if you want the cache to last longer, you can use the +R option \code{styler.cache_root} to opt for an indefinitely long-lived cache by setting it to +\code{options(styler.cache_root = "styler-perm")}. + +If you are happy with the cache being cleared after 6 days, you can confirm the default and +silence this message by setting it instead to \code{options(styler.cache_root = "styler")}. + +You can make this change in your \code{.Rprofile} using \code{usethis::edit_r_profile()}. +} + +\section{Manage the cache}{ + +See \code{\link[=cache_info]{cache_info()}},\code{\link[=cache_activate]{cache_activate()}} or \code{\link[=cache_clear]{cache_clear()}} for utilities to +manage the cache. You can deactivate it altogether with \code{\link[=cache_deactivate]{cache_deactivate()}}. +Since we leverage \code{{R.cache}} to manage the cache, you can also use any +\code{{R.cache}} functionality to manipulate it. + +In some cases, you want to use a non-standard cache location. In +that situation, you can set the path to the cache with the R option +\code{R.cache.rootPath} or the environment variable \code{R_CACHE_ROOTPATH} to an +existent path before you call the styler API. +} + +\section{Invalidation}{ + +The cache is specific to a version of styler by default, because different +versions potentially format code differently. This means after upgrading +styler or a style guide you use, the cache will be re-built. +} + +\section{Mechanism and size}{ + +The cache works by storing hashed output code as a whole and by expression, +which is why it takes zero space on disk (the cache is a directory with +empty files which have the hash of output code as name). + +The cache literally takes zero space on your disk, only the inode, and you +can always manually clean up with \code{\link[=cache_clear]{cache_clear()}} or just go to the +directory where the cache lives (find it with \code{\link[=cache_info]{cache_info()}}) and manually +delete files. +} + +\section{Using a cache for styler in CI/CD}{ + +If you want to set up caching in a CI/CD pipeline, we suggest to set the +\code{{R.cache}} root path to a directory for which you have the cache enabled. +This can often be set in config files of CI/CD tools, e.g. see the +\href{https://docs.travis-ci.com/user/caching}{Travis documentation on caching}. +} + +\seealso{ +Other cache managers: +\code{\link{cache_activate}()}, +\code{\link{cache_clear}()}, +\code{\link{cache_info}()} +} +\concept{cache managers} diff --git a/man/calls_sys.Rd b/man/calls_sys.Rd new file mode 100644 index 000000000..14899a3dc --- /dev/null +++ b/man/calls_sys.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{calls_sys} +\alias{calls_sys} +\title{Invoke a system command} +\usage{ +calls_sys(sys_call, ...) +} +\arguments{ +\item{sys_call}{The call to be executed.} + +\item{...}{Arguments passed to \code{\link[=shell]{shell()}} or \code{\link[=system]{system()}}.} +} +\description{ +Wraps a system command into \code{\link[=shell]{shell()}} or \code{\link[=system]{system()}}, depending on the +operating system. +} +\keyword{internal} diff --git a/man/catch_style_file_output.Rd b/man/catch_style_file_output.Rd new file mode 100644 index 000000000..2a2d2750f --- /dev/null +++ b/man/catch_style_file_output.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/testing-public-api.R +\name{catch_style_file_output} +\alias{catch_style_file_output} +\title{Capture and post-process the output of \code{style_file} without causing side +effects} +\usage{ +catch_style_file_output(file_in) +} +\arguments{ +\item{file_in}{A vector with paths relative to \code{tests/testthat} the path +to the reference file.} +} +\value{ +A list. Each element is a character vector with the captured output of +\code{\link[=style_file]{style_file()}} called on +\code{file_in} ran in a temp dir to avoid side effects on the input file (because +the next time the test would ran, the file would not need styling). The +styling is carried out with a temporary working directory change to keep +filenames relative and avoid portability issues in the exact output +comparison which is needed when the system that runs the unit testing (CI) +is a different system than the one that created the reference value. +This also implies that the ruler width, which depend on the path +length, will again have the same width on all systems and is independent of +how many characters the path of the temporary directory has. +} +\description{ +Capture and post-process the output of \code{style_file} without causing side +effects +} +\keyword{internal} diff --git a/man/choose_indention.Rd b/man/choose_indention.Rd new file mode 100644 index 000000000..4d748c705 --- /dev/null +++ b/man/choose_indention.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/visit.R +\name{choose_indention} +\alias{choose_indention} +\title{Choose the indention method for the tokens} +\usage{ +choose_indention(flattened_pd, use_raw_indention) +} +\arguments{ +\item{flattened_pd}{A nested parse table that was turned into a flat parse +table using \code{\link[=extract_terminals]{extract_terminals()}}.} + +\item{use_raw_indention}{Boolean indicating whether or not the raw indention +should be used.} +} +\description{ +Either use the raw indention, which is just the spaces computed between +the first token on a new line and the token before it, or use the indention +computed according to the transformer used, which is stored in the column +\code{indention}. +All indention information will be combined with the space information for +the first token on a new line. +If \code{use_raw_indention} is set, information in the column \code{indention} will +be discarded anyways. If it is not set, the first token on a new line will +"inherit" the indention of the whole line. +The column \code{indention} will be removed since all information necessary is +contained in the spacing information of the first token on a new line and +the position of the tokens will not be changed anymore at this stage. +} +\keyword{internal} diff --git a/man/combine_children.Rd b/man/combine_children.Rd new file mode 100644 index 000000000..8e4f68a1e --- /dev/null +++ b/man/combine_children.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/nest.R +\name{combine_children} +\alias{combine_children} +\title{Combine child and internal child} +\usage{ +combine_children(child, internal_child) +} +\arguments{ +\item{child}{A parse table or \code{NULL}.} + +\item{internal_child}{A parse table or \code{NULL}.} +} +\description{ +Binds two parse tables together and arranges them so that the tokens are in +the correct order. +} +\details{ +Essentially, this is a wrapper around vctrs::vec_rbind()], but +returns \code{NULL} if the result of vctrs::vec_rbind()] is a data frame with +zero rows. +} +\keyword{internal} diff --git a/man/communicate_summary.Rd b/man/communicate_summary.Rd new file mode 100644 index 000000000..b0c49b96f --- /dev/null +++ b/man/communicate_summary.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/communicate.R +\name{communicate_summary} +\alias{communicate_summary} +\title{Communicate the summary of styling} +\usage{ +communicate_summary(changed, ruler_width) +} +\arguments{ +\item{changed}{Boolean with indicating for each file whether or not it has +been changed.} + +\item{ruler_width}{Integer used to determine the width of the ruler.} +} +\description{ +Communicate the summary of styling +} +\keyword{internal} diff --git a/man/communicate_warning.Rd b/man/communicate_warning.Rd new file mode 100644 index 000000000..869901a88 --- /dev/null +++ b/man/communicate_warning.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/communicate.R +\name{communicate_warning} +\alias{communicate_warning} +\title{Communicate a warning if necessary} +\usage{ +communicate_warning(changed, transformers) +} +\arguments{ +\item{changed}{Boolean with indicating for each file whether or not it has +been changed.} + +\item{transformers}{The list of transformer functions used for styling. +Needed for reverse engineering the scope.} +} +\description{ +If round trip verification was not possible, issue a warning to review the +changes carefully. +} +\keyword{internal} diff --git a/man/compute_indent_indices.Rd b/man/compute_indent_indices.Rd new file mode 100644 index 000000000..ff16421e4 --- /dev/null +++ b/man/compute_indent_indices.Rd @@ -0,0 +1,47 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/indent.R +\name{compute_indent_indices} +\alias{compute_indent_indices} +\title{Compute the indices that need indention} +\usage{ +compute_indent_indices(pd, token_opening, token_closing = NULL) +} +\arguments{ +\item{pd}{A parse table.} + +\item{token_opening}{A character vector with tokens that could induce +indention for subsequent tokens.} + +\item{token_closing}{A character vector with tokens that could terminate +indention for previous tokens. If \code{NULL} (the default), indention should +end with the last token in the parse table.} +} +\description{ +Based on \code{token}, find the rows in \code{pd} that need to be indented. +} +\details{ +Two cases are fundamentally different: +\itemize{ +\item Indention based on operators (e.g '+'), where all subsequent tokens should +be indented. +\item Indention based on braces (e.g. '('), where just the tokens between the +opening and the closing brace have to be indented. +} + +To cover the second case, we need \code{token_closing} because it cannot be taken +for granted that \code{token_closing} is always the last token in \code{pd}. For +example in if-else expressions, this is not the case and indenting +everything between '(' and the penultimate token would result in the wrong +formatting. +} +\section{Handing of \code{[[}}{ + +Since text \code{[[} has token \code{"LBB"} and text \verb{]]} is parsed as two independent +\verb{]} (see 'Examples'), indention has to stop at the first \verb{]}. +} + +\examples{ +styler:::parse_text("a[1]") +styler:::parse_text("a[[1\n]]") +} +\keyword{internal} diff --git a/man/compute_parse_data_nested.Rd b/man/compute_parse_data_nested.Rd new file mode 100644 index 000000000..0c6f4768d --- /dev/null +++ b/man/compute_parse_data_nested.Rd @@ -0,0 +1,35 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/nest.R +\name{compute_parse_data_nested} +\alias{compute_parse_data_nested} +\title{Obtain a nested parse table from a character vector} +\usage{ +compute_parse_data_nested( + text, + transformers = tidyverse_style(), + more_specs = NULL +) +} +\arguments{ +\item{text}{The text to parse.} + +\item{transformers}{Passed to \code{\link[=cache_make_key]{cache_make_key()}} to generate a key.} + +\item{more_specs}{Passed to \code{\link[=cache_make_key]{cache_make_key()}} to generate a key.} +} +\value{ +A nested parse table. See \code{\link[=tokenize]{tokenize()}} for details on the columns +of the parse table. +} +\description{ +Parses \code{text} to a flat parse table and subsequently changes its +representation into a nested parse table with \code{\link[=nest_parse_data]{nest_parse_data()}}. +} +\examples{ +code <- " +ab <- 1L # some comment +abcdef <- 2L +" +writeLines(code) +compute_parse_data_nested(code) +} diff --git a/man/construct_out.Rd b/man/construct_out.Rd new file mode 100644 index 000000000..7bbc6a919 --- /dev/null +++ b/man/construct_out.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/testing.R +\name{construct_out} +\alias{construct_out} +\title{Construct *-out.R from a *-in.R} +\usage{ +construct_out(in_paths) +} +\arguments{ +\item{in_paths}{A character vector that denotes paths to *-in.R files.} +} +\description{ +Multiple *-in.R files can have the same *-out.R file since to create the +*-out.R file, everything after the first dash is replaced by *-out.R. +} +\examples{ +styler:::construct_out(c( + "path/to/file/first-in.R", + "path/to/file/first-extended-in.R" +)) +} +\keyword{internal} diff --git a/man/construct_tree.Rd b/man/construct_tree.Rd new file mode 100644 index 000000000..969a20ac1 --- /dev/null +++ b/man/construct_tree.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/testing.R +\name{construct_tree} +\alias{construct_tree} +\title{Construct paths of a tree object given the paths of *-in.R files} +\usage{ +construct_tree(in_paths, suffix = "_tree") +} +\arguments{ +\item{in_paths}{Character vector of *-in.R files.} + +\item{suffix}{Suffix for the tree object.} +} +\description{ +Construct paths of a tree object given the paths of *-in.R files +} +\keyword{internal} diff --git a/man/construct_vertical.Rd b/man/construct_vertical.Rd index 85e3d471f..02d5f3d11 100644 --- a/man/construct_vertical.Rd +++ b/man/construct_vertical.Rd @@ -13,3 +13,4 @@ construct_vertical(x) Sole purpose of the class vertical is to have a print method that aligns the output vertically. } +\keyword{internal} diff --git a/man/contains_else_expr_that_needs_braces.Rd b/man/contains_else_expr_that_needs_braces.Rd new file mode 100644 index 000000000..f17990bf9 --- /dev/null +++ b/man/contains_else_expr_that_needs_braces.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/expr-is.R +\name{contains_else_expr_that_needs_braces} +\alias{contains_else_expr_that_needs_braces} +\title{Check whether an else expression needs braces} +\usage{ +contains_else_expr_that_needs_braces(pd) +} +\arguments{ +\item{pd}{A parse table} +} +\description{ +Checks whether an else expression in a nest needs braces. Note that for +if-else-if expressions, there is no need to add braces since the if in +else-if will be visited separately with the visitor. This applies to all +conditional statements with more than one alternative. +} +\keyword{internal} diff --git a/man/context_to_terminals.Rd b/man/context_to_terminals.Rd new file mode 100644 index 000000000..d390c6b8b --- /dev/null +++ b/man/context_to_terminals.Rd @@ -0,0 +1,39 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/visit.R +\name{context_to_terminals} +\alias{context_to_terminals} +\title{Propagate context to terminals} +\usage{ +context_to_terminals( + pd_nested, + outer_lag_newlines, + outer_indent, + outer_spaces, + outer_indention_refs +) +} +\arguments{ +\item{pd_nested}{A nested parse table.} + +\item{outer_lag_newlines}{The lag_newlines to be propagated inwards.} + +\item{outer_indent}{The indention depth to be propagated inwards.} + +\item{outer_spaces}{The number of spaces to be propagated inwards.} + +\item{outer_indention_refs}{The reference pos id that should be propagated +inwards.} +} +\value{ +An updated parse table. +} +\description{ +Implements a very specific pre-visiting scheme, namely to propagate +indention, spaces and lag_newlines to inner token to terminals. This means +that information regarding indention, line breaks and spaces (which is +relative in \code{pd_nested}) will be converted into absolute. +} +\seealso{ +context_towards_terminals visitors +} +\keyword{internal} diff --git a/man/context_towards_terminals.Rd b/man/context_towards_terminals.Rd new file mode 100644 index 000000000..07072299d --- /dev/null +++ b/man/context_towards_terminals.Rd @@ -0,0 +1,39 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/visit.R +\name{context_towards_terminals} +\alias{context_towards_terminals} +\title{Update the a parse table given outer context} +\usage{ +context_towards_terminals( + pd_nested, + outer_lag_newlines, + outer_indent, + outer_spaces, + outer_indention_refs +) +} +\arguments{ +\item{pd_nested}{A nested parse table.} + +\item{outer_lag_newlines}{The lag_newlines to be propagated inwards.} + +\item{outer_indent}{The indention depth to be propagated inwards.} + +\item{outer_spaces}{The number of spaces to be propagated inwards.} + +\item{outer_indention_refs}{The reference pos id that should be propagated +inwards.} +} +\value{ +An updated parse table. +} +\description{ +\code{outer_lag_newlines} are added to the first token in \code{pd}, +\code{outer_indent} is added to all tokens in \code{pd}, \code{outer_spaces} is added to the +last token in \code{pd}. \code{\link[=context_to_terminals]{context_to_terminals()}} calls this function repeatedly, +which means the propagation of the parse information to the terminal tokens. +} +\seealso{ +context_to_terminals +} +\keyword{internal} diff --git a/man/convert_newlines_to_linebreaks.Rd b/man/convert_newlines_to_linebreaks.Rd new file mode 100644 index 000000000..9ac928a95 --- /dev/null +++ b/man/convert_newlines_to_linebreaks.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{convert_newlines_to_linebreaks} +\alias{convert_newlines_to_linebreaks} +\title{Replace the newline character with a line break} +\usage{ +convert_newlines_to_linebreaks(text) +} +\arguments{ +\item{text}{A character vector} +} +\description{ +Replace the newline character with a line break +} +\examples{ +styler:::convert_newlines_to_linebreaks("x\n2") +# a simple strsplit approach does not cover both cases +unlist(strsplit("x\n\n2", "\n", fixed = TRUE)) +unlist(strsplit(c("x", "", "2"), "\n", fixed = TRUE)) +styler:::convert_newlines_to_linebreaks(c("x", "2")) +} +\keyword{internal} diff --git a/man/copy_to_tempdir.Rd b/man/copy_to_tempdir.Rd new file mode 100644 index 000000000..7c09db776 --- /dev/null +++ b/man/copy_to_tempdir.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/testing.R +\name{copy_to_tempdir} +\alias{copy_to_tempdir} +\title{Copy a file to a temporary directory} +\usage{ +copy_to_tempdir(path_perm = testthat_file()) +} +\arguments{ +\item{path_perm}{The path of the file to copy.} +} +\description{ +Takes the path to a file as input and returns the path where the temporary +file is stored. Don't forget to unlink once you are done. +} +\keyword{internal} diff --git a/man/create_node_from_nested.Rd b/man/create_node_from_nested.Rd new file mode 100644 index 000000000..755f8d9fd --- /dev/null +++ b/man/create_node_from_nested.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/nested-to-tree.R +\name{create_node_from_nested} +\alias{create_node_from_nested} +\title{Create node from nested parse data} +\usage{ +create_node_from_nested(pd_nested, parent, structure_only) +} +\arguments{ +\item{pd_nested}{A nested data frame.} + +\item{parent}{The parent of the node to be created.} + +\item{structure_only}{Whether or not create a tree that represents the +structure of the expression without any information on the tokens. Useful +to check whether two structures are identical.} +} +\description{ +Create node from nested parse data +} +\keyword{internal} diff --git a/man/create_node_from_nested_root.Rd b/man/create_node_from_nested_root.Rd new file mode 100644 index 000000000..db4280645 --- /dev/null +++ b/man/create_node_from_nested_root.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/nested-to-tree.R +\name{create_node_from_nested_root} +\alias{create_node_from_nested_root} +\title{Convert a nested data frame into a node tree} +\usage{ +create_node_from_nested_root(pd_nested, structure_only) +} +\arguments{ +\item{pd_nested}{A nested data frame.} + +\item{structure_only}{Whether or not create a tree that represents the +structure of the expression without any information on the tokens. Useful +to check whether two structures are identical.} +} +\value{ +An object of class "Node" and "R6". +} +\description{ +This function is convenient to display all nesting levels of a nested data frame +at once. +} +\examples{ +if (rlang::is_installed("data.tree")) { + withr::with_options( + list(styler.cache_name = NULL), # temporarily deactivate cache + { + code <- "a <- function(x) { if(x > 1) { 1+1 } else {x} }" + nested_pd <- compute_parse_data_nested(code) + initialized <- styler:::pre_visit_one( + nested_pd, default_style_guide_attributes + ) + styler:::create_node_from_nested_root(initialized, + structure_only = FALSE + ) + } + ) +} +} +\keyword{internal} diff --git a/man/create_pos_ids.Rd b/man/create_pos_ids.Rd new file mode 100644 index 000000000..4498e19f0 --- /dev/null +++ b/man/create_pos_ids.Rd @@ -0,0 +1,35 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/token-create.R +\name{create_pos_ids} +\alias{create_pos_ids} +\title{Create valid pos_ids if possible} +\usage{ +create_pos_ids(pd, pos, by = 0.1, after = FALSE, n = 1L) +} +\arguments{ +\item{pd}{A parse table.} + +\item{pos}{The position where the new id should be inserted.} + +\item{by}{By how much the reference \code{pos_id} should be increased / decreased +to create a new id.} + +\item{after}{Boolean indicating whether it should be inserted after or before +\code{pos}.} + +\item{n}{Number of ids to generate.} +} +\value{ +Returns a valid sequences of pos_ids or an error if it was not possible to +create one. The validation is done with \code{\link[=validate_new_pos_ids]{validate_new_pos_ids()}} +} +\description{ +Create valid pos_ids if possible +} +\seealso{ +Other token creators: +\code{\link{create_tokens}()}, +\code{\link{validate_new_pos_ids}()} +} +\concept{token creators} +\keyword{internal} diff --git a/man/create_style_guide.Rd b/man/create_style_guide.Rd index f553b5288..7792998cf 100644 --- a/man/create_style_guide.Rd +++ b/man/create_style_guide.Rd @@ -1,12 +1,23 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/style_guides.R +% Please edit documentation in R/style-guides.R \name{create_style_guide} \alias{create_style_guide} \title{Create a style guide} \usage{ -create_style_guide(initialize = default_style_guide_attributes, - line_break = NULL, space = NULL, token = NULL, indention = NULL, - use_raw_indention = FALSE, reindention = tidyverse_reindention()) +create_style_guide( + initialize = default_style_guide_attributes, + line_break = NULL, + space = NULL, + token = NULL, + indention = NULL, + use_raw_indention = FALSE, + reindention = tidyverse_reindention(), + style_guide_name = NULL, + style_guide_version = NULL, + more_specs_style_guide = NULL, + transformers_drop = specify_transformers_drop(), + indent_character = " " +) } \arguments{ \item{initialize}{The bare name of a function that initializes various @@ -27,13 +38,46 @@ should be used.} \item{reindention}{A list of parameters for regex re-indention, most conveniently constructed using \code{\link[=specify_reindention]{specify_reindention()}}.} + +\item{style_guide_name}{The name of the style guide. Used as a meta attribute +inside the created style guide, for example for caching. By convention, +this is the style guide qualified by the package namespace plus the +location of the style guide, separated by \code{@}. For example, +\code{"styler::tidyverse_style@https://github.com/r-lib"}.} + +\item{style_guide_version}{The version of the style guide. Used as a meta +attribute inside the created style guide, for example for caching. This +should correspond to the version of the R package that exports the +style guide.} + +\item{more_specs_style_guide}{Named vector (coercible to character) +with all arguments passed to the style guide and used for cache +invalidation. You can easily capture them in your style guide function +declaration with \code{as.list(environment())} (compare source code of +\code{tidyverse_style()}).} + +\item{transformers_drop}{A list specifying under which conditions +transformer functions can be dropped since they have no effect on the +code to format, most easily constructed with +\code{\link[=specify_transformers_drop]{specify_transformers_drop()}}. This is argument experimental and may +change in future releases without prior notification. It was mainly +introduced to improve speed. Listing transformers here that occur almost +always in code does not make sense because the process of excluding them +also takes some time.} + +\item{indent_character}{The character that is used for indention. We strongly +advise for using spaces as indention characters.} } \description{ This is a helper function to create a style guide, which is technically speaking a named list of groups of transformer functions where each transformer function corresponds to one styling rule. The output of this -function can be used as an argument for \code{style} in top level functions -like \code{\link[=style_text]{style_text()}} and friends. +function can be used as an argument for \code{style} in top-level functions +like \code{\link[=style_text]{style_text()}} and friends. Note that for caching to work properly, +unquote all inputs to the transformer function if possible with rlang's \verb{!!}, +otherwise, they will be passed as references (generic variable names) instead +of literals and \code{styler:::is_cached()} won't pick up changes. See how it's +done in \code{\link[=tidyverse_style]{tidyverse_style()}} with \code{indent_by} and other arguments. } \examples{ set_line_break_before_curly_opening <- function(pd_flat) { @@ -42,7 +86,14 @@ set_line_break_before_curly_opening <- function(pd_flat) { pd_flat } set_line_break_before_curly_opening_style <- function() { - create_style_guide(line_break = set_line_break_before_curly_opening) + create_style_guide( + line_break = list(set_line_break_before_curly_opening), + style_guide_name = "some-style-guide", + style_guide_version = "some-version" + ) } -style_text("a <- function(x) { x }", style = set_line_break_before_curly_opening_style) +style_text( + "a <- function(x) { x }", + style = set_line_break_before_curly_opening_style +) } diff --git a/man/create_tokens.Rd b/man/create_tokens.Rd new file mode 100644 index 000000000..24ef302e3 --- /dev/null +++ b/man/create_tokens.Rd @@ -0,0 +1,69 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/token-create.R +\name{create_tokens} +\alias{create_tokens} +\title{Create a terminal token} +\usage{ +create_tokens( + tokens, + texts, + lag_newlines = 0L, + spaces = 0L, + pos_ids, + token_before = NA, + token_after = NA, + indention_ref_pos_ids = NA, + indents, + terminal = TRUE, + child = NULL, + stylerignore, + block = NA, + is_cached = FALSE +) +} +\arguments{ +\item{tokens}{Character vector with tokens to create.} + +\item{texts}{Character vector with texts of the token to create.} + +\item{lag_newlines}{Character vector with lag_newlines corresponding to the +tokens.} + +\item{spaces}{Character vector with spaces corresponding to the tokens.} + +\item{pos_ids}{Character vector with positional id corresponding to the +tokens.} + +\item{token_before}{Character vector corresponding to the columns +\code{token_before}.} + +\item{token_after}{Character vector corresponding to the columns +\code{token_after}.} + +\item{indention_ref_pos_ids}{Character vector with indention ref ids +corresponding to the tokens.} + +\item{indents}{Vector with indents corresponding to the tokens.} + +\item{terminal}{Boolean vector indicating whether a token is a terminal or +not.} + +\item{child}{The children of the tokens.} + +\item{stylerignore}{Boolean to indicate if the line should be ignored by +styler. Must take value from token before, can't have a default.} + +\item{block}{The block (of caching) to which the token belongs. An integer.} + +\item{is_cached}{Whether the token is cached already.} +} +\description{ +Creates a terminal token represented as (a row of) a parse table. +} +\seealso{ +Other token creators: +\code{\link{create_pos_ids}()}, +\code{\link{validate_new_pos_ids}()} +} +\concept{token creators} +\keyword{internal} diff --git a/man/create_tree.Rd b/man/create_tree.Rd new file mode 100644 index 000000000..5dc008bdd --- /dev/null +++ b/man/create_tree.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/nested-to-tree.R +\name{create_tree} +\alias{create_tree} +\title{Create a tree from text} +\usage{ +create_tree(text, structure_only = FALSE) +} +\arguments{ +\item{text}{A character vector.} + +\item{structure_only}{Whether or not create a tree that represents the +structure of the expression without any information on the tokens. Useful +to check whether two structures are identical.} +} +\value{ +A data frame. +} +\description{ +Create a tree representation from a text. +} +\keyword{internal} diff --git a/man/default_style_guide_attributes.Rd b/man/default_style_guide_attributes.Rd index 172cc5276..ec5805cbe 100644 --- a/man/default_style_guide_attributes.Rd +++ b/man/default_style_guide_attributes.Rd @@ -10,11 +10,17 @@ default_style_guide_attributes(pd_flat) \item{pd_flat}{A parse table.} } \description{ -This function initialises and removes various variables from the parse +This function initializes and removes various variables from the parse table. } \examples{ -string_to_format <- "call( 3)" -pd <- styler:::compute_parse_data_nested(string_to_format) -styler:::pre_visit(pd, c(default_style_guide_attributes)) +withr::with_options( + list(styler.cache_name = NULL), # temporarily deactivate cache + { + string_to_format <- "call( 3)" + pd <- compute_parse_data_nested(string_to_format) + styler:::pre_visit_one(pd, default_style_guide_attributes) + } +) } +\keyword{internal} diff --git a/man/delete_if_cache_directory.Rd b/man/delete_if_cache_directory.Rd new file mode 100644 index 000000000..a79745cab --- /dev/null +++ b/man/delete_if_cache_directory.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/zzz.R +\name{delete_if_cache_directory} +\alias{delete_if_cache_directory} +\title{Delete a cache or temp directory} +\usage{ +delete_if_cache_directory(path) +} +\arguments{ +\item{path}{Absolute path to a directory to delete.} +} +\value{ +\code{TRUE} if anything was deleted, \code{FALSE} otherwise. +} +\description{ +For safety, \code{path} is only deleted if it is a sub-directory of a temporary +directory or user cache. Since this function relies on \code{tools::R_user_dir()}, +it early returns \code{FALSE} on \verb{R < 4.0.0}. +} +\keyword{internal} diff --git a/man/dir_without_..Rd b/man/dir_without_..Rd new file mode 100644 index 000000000..61c560c8e --- /dev/null +++ b/man/dir_without_..Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-files.R +\name{dir_without_.} +\alias{dir_without_.} +\title{\code{dir()}, but without dot-prefix and different defaults} +\usage{ +dir_without_.(path, recursive = TRUE, ...) +} +\arguments{ +\item{path}{A path.} + +\item{...}{Passed to \code{\link[base:list.files]{base::dir()}}.} +} +\description{ +When using \code{dir()}, you can set \code{full.names = FALSE}, but then you can only +pass a character vector of length one as \code{path} to not loose the information +about where the files are. This function solves that case. It's needed when +one wants to standardize paths to use set operations on them, i.e. when the +user supplied input does not have a dot prefix. See 'Examples'. +} +\details{ +For different defaults, see \code{dir_without_._one}. +} +\examples{ +setdiff("./file.R", "file.R") # you want to standardize first. +} +\seealso{ +set_and_assert_arg_paths +} +\keyword{internal} diff --git a/man/dir_without_._one.Rd b/man/dir_without_._one.Rd new file mode 100644 index 000000000..631179f6c --- /dev/null +++ b/man/dir_without_._one.Rd @@ -0,0 +1,14 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-files.R +\name{dir_without_._one} +\alias{dir_without_._one} +\title{\code{dir()}, but with full names, ignored case, and included hidden files and +recursive.} +\usage{ +dir_without_._one(path, recursive, ...) +} +\description{ +\code{dir()}, but with full names, ignored case, and included hidden files and +recursive. +} +\keyword{internal} diff --git a/man/emulate_rd.Rd b/man/emulate_rd.Rd new file mode 100644 index 000000000..40ec4753c --- /dev/null +++ b/man/emulate_rd.Rd @@ -0,0 +1,15 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/roxygen-examples-parse.R +\name{emulate_rd} +\alias{emulate_rd} +\title{Convert roxygen comments to Rd code} +\usage{ +emulate_rd(roxygen) +} +\description{ +We leverage roxygen2 workhorse function \code{\link[roxygen2:roc_proc_text]{roxygen2::roc_proc_text()}} if +our input contains character that have to be escaped. Since this is an +expensive operation, we opt out of it and perform a simple +\code{remove_roxygen_mask()} when there are no characters to escape. +} +\keyword{internal} diff --git a/man/enhance_mapping_special.Rd b/man/enhance_mapping_special.Rd new file mode 100644 index 000000000..244e16f62 --- /dev/null +++ b/man/enhance_mapping_special.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/nest.R +\name{enhance_mapping_special} +\alias{enhance_mapping_special} +\title{Enhance the mapping of text to the token "SPECIAL"} +\usage{ +enhance_mapping_special(pd) +} +\arguments{ +\item{pd}{A parse table.} +} +\description{ +Map text corresponding to the token "SPECIAL" to a (more) unique token +description. +} +\keyword{internal} diff --git a/man/enrich_terminals.Rd b/man/enrich_terminals.Rd new file mode 100644 index 000000000..50bde213e --- /dev/null +++ b/man/enrich_terminals.Rd @@ -0,0 +1,33 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/visit.R +\name{enrich_terminals} +\alias{enrich_terminals} +\title{Enrich flattened parse table} +\usage{ +enrich_terminals(flattened_pd, use_raw_indention = FALSE) +} +\arguments{ +\item{flattened_pd}{A nested parse table that was turned into a flat parse +table using \code{\link[=extract_terminals]{extract_terminals()}}.} + +\item{use_raw_indention}{Boolean indicating whether or not the raw indention +should be used.} +} +\description{ +Enriches a flattened parse table with terminals only. In particular, it is +possible to compute the exact position a token will have (line and column) +when it will be serialized. +} +\details{ +Since we have only terminal tokens now, the line on which a token starts we +also be the line on which it ends. We call \code{line1} the line on which the +token starts. \code{line1} has the same meaning as \code{line1} that can be found in a +flat parse table (see \code{\link[=tokenize]{tokenize()}}), just that the \code{line1} created by +\code{enrich_terminals()} is the updated version of the former \code{line1}. The same +applies for \code{col1} and \code{col2}. Note that this function does remove the +columns \code{indent} and \code{spaces.} All information of the former is stored in +\code{lag_spaces} now. The later was removed because it is redundant after adding +the column \code{lag_spaces}, which is more convenient to work with, in particular +when serializing the parse table. +} +\keyword{internal} diff --git a/man/ensure_correct_txt.Rd b/man/ensure_correct_txt.Rd new file mode 100644 index 000000000..f02fe7420 --- /dev/null +++ b/man/ensure_correct_txt.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/parse.R +\name{ensure_correct_txt} +\alias{ensure_correct_txt} +\title{Ensure a correct \code{text} of all strings and numeric constants} +\usage{ +ensure_correct_txt(pd, text) +} +\arguments{ +\item{pd}{A parse table.} +} +\description{ +Make sure \code{text} of the tokens \code{STR_CONST} and \code{NUM_CONST} is correct and +adapt if necessary. We replace offending \code{text} in the terminal expressions +with the text of their parents if their line / col position matches and +return an error otherwise. +} +\keyword{internal} diff --git a/man/ensure_last_n_empty.Rd b/man/ensure_last_n_empty.Rd new file mode 100644 index 000000000..37c82f804 --- /dev/null +++ b/man/ensure_last_n_empty.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{ensure_last_n_empty} +\alias{ensure_last_n_empty} +\title{Ensure there is one (and only one) blank line at the end of a vector} +\usage{ +ensure_last_n_empty(x, n = 1L) +} +\description{ +Ensure there is one (and only one) blank line at the end of a vector +} +\examples{ +styler:::ensure_last_n_empty("") +styler:::ensure_last_n_empty(letters) +styler:::ensure_last_n_empty(c(letters, "", "", "")) +} +\keyword{internal} diff --git a/man/env_add_stylerignore.Rd b/man/env_add_stylerignore.Rd new file mode 100644 index 000000000..0abb1a398 --- /dev/null +++ b/man/env_add_stylerignore.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/stylerignore.R +\name{env_add_stylerignore} +\alias{env_add_stylerignore} +\title{Add positional information of token to next terminal} +\usage{ +env_add_stylerignore(pd_flat) +} +\arguments{ +\item{pd_flat}{A parse table.} +} +\description{ +This is needed because at serialization time, we also have terminals only +and positional argument of non-terminals were already propagated to terminals +with \code{\link[=context_to_terminals]{context_to_terminals()}}. Because tokens can be added or removed during +styling, we must not only keep the pos_id, but rather we must remember the +pos_id of the first token in the stylerignore sequence (the marker, or the +first token on a line if the stylerignore marker is an inline marker), for +which we know it will still be there, and join these markers later with all +tokens in the stylerignore sequence (this is a one to many join, i.e. one +start marker can have many tokens). +} +\keyword{internal} diff --git a/man/env_current.Rd b/man/env_current.Rd new file mode 100644 index 000000000..bdd407371 --- /dev/null +++ b/man/env_current.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/environments.R +\docType{data} +\name{env_current} +\alias{env_current} +\title{The elements that are added to this environment are:} +\format{ +An object of class \code{environment} of length 0. +} +\usage{ +env_current +} +\description{ +The elements that are added to this environment are: +} +\details{ +\itemize{ +\item \code{parser_version}: Needed to dispatch between parser versions, see +\code{\link[=parser_version_set]{parser_version_set()}} for details. +\item \code{stylerignore}: A data frame with parse data containing tokens that fall within +a stylerignore sequence. This is used after serializing the flattened +parse table to apply the initial formatting to these tokens. See +\link{stylerignore} for details. +\item \code{any_stylerignore}: Whether there is any stylerignore marker. The idea is +to check early in the runtime if this is the case and then if so, take +as many short-cuts as possible. See \link{stylerignore} for details. +} +} +\keyword{internal} diff --git a/man/expressions_are_identical.Rd b/man/expressions_are_identical.Rd new file mode 100644 index 000000000..89bb4f37f --- /dev/null +++ b/man/expressions_are_identical.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-files.R +\name{expressions_are_identical} +\alias{expressions_are_identical} +\title{Check whether two expressions are identical} +\usage{ +expressions_are_identical(old_text, new_text) +} +\arguments{ +\item{old_text}{The initial expression in its character representation.} + +\item{new_text}{The styled expression in its character representation.} +} +\description{ +Check whether two expressions are identical +} +\keyword{internal} diff --git a/man/extend_if_comment.Rd b/man/extend_if_comment.Rd new file mode 100644 index 000000000..eeaee1f99 --- /dev/null +++ b/man/extend_if_comment.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-navigate-nest.R +\name{extend_if_comment} +\alias{extend_if_comment} +\title{Find the index of the last comment in the sequence of comments-only tokens +after the token that has position \code{pos} in \code{pd}.} +\usage{ +extend_if_comment(pd, pos) +} +\arguments{ +\item{pd}{A parse table.} + +\item{pos}{The position of the token to start the search from.} +} +\description{ +Find the index of the last comment in the sequence of comments-only tokens +after the token that has position \code{pos} in \code{pd}. +} +\keyword{internal} diff --git a/man/extract_terminals.Rd b/man/extract_terminals.Rd new file mode 100644 index 000000000..25d3578cb --- /dev/null +++ b/man/extract_terminals.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/visit.R +\name{extract_terminals} +\alias{extract_terminals} +\title{Extract terminal tokens} +\usage{ +extract_terminals(pd_nested) +} +\arguments{ +\item{pd_nested}{A nested parse table.} +} +\description{ +Turns a nested parse table into a flat parse table and extracts \emph{all} +attributes. +} +\keyword{internal} diff --git a/man/finalize_raw_chunks.Rd b/man/finalize_raw_chunks.Rd new file mode 100644 index 000000000..7fdfdf186 --- /dev/null +++ b/man/finalize_raw_chunks.Rd @@ -0,0 +1,14 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-code.R +\name{finalize_raw_chunks} +\alias{finalize_raw_chunks} +\title{Drop start / stop, when formatting is turned off} +\usage{ +finalize_raw_chunks(start, end, filetype, lines) +} +\description{ +If \code{tidy = FALSE} (the knitr code chunk default), code is not styled upon +knitting. If it is explicitly added to a code chunk, the code chunk is in +addition not styled with styler when formatting the document. +} +\keyword{internal} diff --git a/man/find_blank_lines_to_next_block.Rd b/man/find_blank_lines_to_next_block.Rd new file mode 100644 index 000000000..adff7a145 --- /dev/null +++ b/man/find_blank_lines_to_next_block.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-block.R +\name{find_blank_lines_to_next_block} +\alias{find_blank_lines_to_next_block} +\title{Number of lines between cache blocks} +\usage{ +find_blank_lines_to_next_block(pd) +} +\arguments{ +\item{pd}{A top-level nest.} +} +\description{ +This is relevant when putting expressions together into a block and preserve +blank lines between them. Note that because code does not need to start on +line 1, the first element of the output is the number of lines until the +first block. +} +\keyword{internal} diff --git a/man/find_blank_lines_to_next_expr.Rd b/man/find_blank_lines_to_next_expr.Rd new file mode 100644 index 000000000..e03ae2e37 --- /dev/null +++ b/man/find_blank_lines_to_next_expr.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-block.R +\name{find_blank_lines_to_next_expr} +\alias{find_blank_lines_to_next_expr} +\title{Find blank lines} +\usage{ +find_blank_lines_to_next_expr(pd_nested) +} +\arguments{ +\item{pd_nested}{A nested parse table.} +} +\value{ +The line number on which the first token occurs. +} +\description{ +What number of line breaks lay between the expressions? +} +\keyword{internal} diff --git a/man/find_dont_seqs.Rd b/man/find_dont_seqs.Rd new file mode 100644 index 000000000..75a90e6c7 --- /dev/null +++ b/man/find_dont_seqs.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/roxygen-examples-find.R +\name{find_dont_seqs} +\alias{find_dont_seqs} +\title{Find \code{dontrun} and friend sequences} +\usage{ +find_dont_seqs(bare) +} +\arguments{ +\item{bare}{Bare code.} +} +\description{ +Returns the indices of the lines that correspond to a \code{dontrun} or +friends sequence. +} +\keyword{internal} diff --git a/man/find_line_break_position_in_multiline_call.Rd b/man/find_line_break_position_in_multiline_call.Rd new file mode 100644 index 000000000..3efe976e4 --- /dev/null +++ b/man/find_line_break_position_in_multiline_call.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-line-breaks.R +\name{find_line_break_position_in_multiline_call} +\alias{find_line_break_position_in_multiline_call} +\title{Find index of the token before which the line should be broken} +\usage{ +find_line_break_position_in_multiline_call(pd) +} +\arguments{ +\item{pd}{A parse table.} +} +\description{ +Given a multi-line function call parse table, this function finds the +position of the first named argument and breaks returns the index of it. +If there is no named argument, the line is broken right after the opening +parenthesis. +} +\keyword{internal} diff --git a/man/find_pos_id_to_keep.Rd b/man/find_pos_id_to_keep.Rd new file mode 100644 index 000000000..8d4324d14 --- /dev/null +++ b/man/find_pos_id_to_keep.Rd @@ -0,0 +1,35 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/nest.R +\name{find_pos_id_to_keep} +\alias{find_pos_id_to_keep} +\title{Find the pos ids to keep} +\usage{ +find_pos_id_to_keep(pd) +} +\arguments{ +\item{pd}{A temporary top-level nest where the first expression is always a +top-level expression, potentially cached.} +} +\description{ +To make a parse table shallow, we must know which ids to keep. +\code{split(cumsum(pd_parent_first$parent == 0L))} above puts comments with +negative parents in the same block as proceeding expressions (but also with +positive). +\code{find_pos_id_to_keep()} must hence always keep negative comments. We did not +use \code{split(cumsum(pd_parent_first$parent < 1L))} because then every top-level +comment is an expression on its own and processing takes much longer for +typical roxygen annotated code. +} +\details{ +Note that top-level comments \strong{above} code have negative parents +(the negative value of the parent of the code expression that follows after, +another comment might be in the way though), all comments that are not top +level have positive ids. All comments for which no code follows afterwards +have parent 0. +} +\examples{ +styler:::get_parse_data(c("#", "1")) +styler:::get_parse_data(c("c(#", "1)")) +styler:::get_parse_data(c("", "c(#", "1)", "#")) +} +\keyword{internal} diff --git a/man/find_start_pos_id.Rd b/man/find_start_pos_id.Rd new file mode 100644 index 000000000..8e2357314 --- /dev/null +++ b/man/find_start_pos_id.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/token-create.R +\name{find_start_pos_id} +\alias{find_start_pos_id} +\title{Find legit starting value for a new positional id} +\usage{ +find_start_pos_id(pd, pos, by, direction, after, candidates = NULL) +} +\arguments{ +\item{pd}{A parse table.} + +\item{pos}{The position where the new id should be inserted.} + +\item{by}{By how much the reference \code{pos_id} should be increased / decreased +to create a new id.} + +\item{direction}{Derived from \code{after}. \code{1} if \code{after = TRUE}, \code{-1} otherwise.} + +\item{after}{Boolean indicating whether it should be inserted after or before +\code{pos}.} + +\item{candidates}{The \code{pos_ids} of the candidates that origin from other +nests.} +} +\description{ +Looks at the current nest as well as into its children (if necessary) to make +sure the right id is returned. Otherwise, ordering of tokens might not be +preserved. +} +\keyword{internal} diff --git a/man/find_tokens_to_update.Rd b/man/find_tokens_to_update.Rd new file mode 100644 index 000000000..276fcd7d4 --- /dev/null +++ b/man/find_tokens_to_update.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/reindent.R +\name{find_tokens_to_update} +\alias{find_tokens_to_update} +\title{Find the tokens to update when applying a reference indention} +\usage{ +find_tokens_to_update(flattened_pd, target_token) +} +\arguments{ +\item{flattened_pd}{A flattened parse table.} + +\item{target_token}{The index of the token from which the indention level +should be applied to other tokens.} +} +\description{ +Given a target token and a flattened parse table, the token for which the +spacing information needs to be updated are computed. Since indention is +already embedded in the column \code{lag_spaces}, only tokens at the beginning of +a line are of concern. +} +\examples{ +style_text("function(a = +b, +dd +) {}", scope = "indention") +style_text("function(a, +b, +dd +) {}", scope = "indention") +} +\seealso{ +apply_ref_indention_one() +} +\keyword{internal} diff --git a/man/fix_quotes.Rd b/man/fix_quotes.Rd new file mode 100644 index 000000000..ab0586bc2 --- /dev/null +++ b/man/fix_quotes.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-tokens.R +\name{fix_quotes} +\alias{fix_quotes} +\title{Replace single quotes with double quotes} +\usage{ +fix_quotes(pd_flat) +} +\arguments{ +\item{pd_flat}{A flat parse table.} +} +\description{ +We do not use \code{deparse()} as in previous implementations but \code{paste0()} since +the former approach escapes the reverse backslash in the line break character +\verb{\\\\n} whereas the solution with \code{paste0()} does not. +} +\examples{ +style_text("'here +is a string +'") +} +\keyword{internal} diff --git a/man/flatten_operators.Rd b/man/flatten_operators.Rd new file mode 100644 index 000000000..0e463d8ae --- /dev/null +++ b/man/flatten_operators.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/relevel.R +\name{flatten_operators} +\alias{flatten_operators} +\title{Flatten some token in the nested parse table based on operators} +\usage{ +flatten_operators(pd_nested) +} +\arguments{ +\item{pd_nested}{A nested parse table to partially flatten.} +} +\description{ +Certain tokens are not placed optimally in the nested parse data with +\code{\link[=compute_parse_data_nested]{compute_parse_data_nested()}}. For example, the token of arithmetic +operations 1 + 1 + 1 should all be on the same level of nesting since the +indention is the same for all but the first two terminals. Setting the +indention correctly is easier to achieve if they are put on the same level of +nesting. +} +\keyword{internal} diff --git a/man/flatten_operators_one.Rd b/man/flatten_operators_one.Rd new file mode 100644 index 000000000..36ae86b30 --- /dev/null +++ b/man/flatten_operators_one.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/relevel.R +\name{flatten_operators_one} +\alias{flatten_operators_one} +\title{Flatten one level of nesting with its child} +\usage{ +flatten_operators_one(pd_nested) +} +\arguments{ +\item{pd_nested}{A nested parse table.} +} +\description{ +Flattening is done in two ways. We can flatten a parse table by moving the +left hand token of an operator one level up. Or doing that with the right +hand token. +} +\keyword{internal} diff --git a/man/flatten_pd.Rd b/man/flatten_pd.Rd new file mode 100644 index 000000000..662813232 --- /dev/null +++ b/man/flatten_pd.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/relevel.R +\name{flatten_pd} +\alias{flatten_pd} +\title{Flatten a parse table} +\usage{ +flatten_pd(pd_nested, token, child_token = token, left = TRUE) +} +\arguments{ +\item{pd_nested}{A nested parse table.} + +\item{token}{A character vector with tokens of which at least one has to +occur in \code{pd_nested} in order to flatten it.} + +\item{child_token}{A character vector of tokens of which at least one has to +occur in the child in order to flatten the parse table.} + +\item{left}{Flag that indicates whether the parse table should be flattened +from left or from right.} +} +\description{ +Flattens a parse table if certain tokens occur in this table or its child, +either flattening from left or from right. If one of \code{token} is present in +\code{pd_nested} and one of \code{child_token} is present in one of the children next +to \code{token} in \code{pd_nested}, the nested parse table is flattened. Otherwise, it +is returned unmodified. +} +\keyword{internal} diff --git a/man/generate_test_samples.Rd b/man/generate_test_samples.Rd new file mode 100644 index 000000000..40177fc80 --- /dev/null +++ b/man/generate_test_samples.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/testing.R +\name{generate_test_samples} +\alias{generate_test_samples} +\title{Generate a comprehensive collection test cases for comment / insertion +interaction +Test consist of if / if-else / if-else-if-else cases, paired with various +line-break and comment configurations. Used for internal testing.} +\usage{ +generate_test_samples() +} +\value{ +The function is called for its side effects, i.e. to write the +test cases to *-in.R files that can be tested with \code{\link[=test_collection]{test_collection()}}. Note +that a few of the test cases are invalid and need to be removed / commented +out manually. +} +\description{ +Generate a comprehensive collection test cases for comment / insertion +interaction +Test consist of if / if-else / if-else-if-else cases, paired with various +line-break and comment configurations. Used for internal testing. +} +\keyword{internal} diff --git a/man/get_addins_style_transformer_name.Rd b/man/get_addins_style_transformer_name.Rd new file mode 100644 index 000000000..deccf5ff3 --- /dev/null +++ b/man/get_addins_style_transformer_name.Rd @@ -0,0 +1,15 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/addins.R +\name{get_addins_style_transformer_name} +\alias{get_addins_style_transformer_name} +\alias{get_addins_style_transformer} +\title{Return the style function or name} +\usage{ +get_addins_style_transformer_name() + +get_addins_style_transformer() +} +\description{ +Return the style function or name +} +\keyword{internal} diff --git a/man/get_engine_pattern.Rd b/man/get_engine_pattern.Rd new file mode 100644 index 000000000..d0b4d75ff --- /dev/null +++ b/man/get_engine_pattern.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-code.R +\name{get_engine_pattern} +\alias{get_engine_pattern} +\title{What's the engine pattern for rmd code chunks?} +\usage{ +get_engine_pattern() +} +\description{ +The function returns the regular expression pattern that identifies +all r engines in Rmd chunks. Defaults to \verb{[Rr]}. You probably only want to +change this if you create a knitr engine that processes R code but is not +the default engine \code{r}. +The pattern must be followed by a space (in the case the chunk is given +a name), a comma (if no name is given but further options are passed to the +engine) or a closing curly brace (in case no option and no name is given to +the chunk). +} +\keyword{internal} diff --git a/man/get_knitr_pattern.Rd b/man/get_knitr_pattern.Rd new file mode 100644 index 000000000..28433fe28 --- /dev/null +++ b/man/get_knitr_pattern.Rd @@ -0,0 +1,15 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-code.R +\name{get_knitr_pattern} +\alias{get_knitr_pattern} +\title{Get chunk pattern} +\usage{ +get_knitr_pattern(filetype) +} +\arguments{ +\item{filetype}{A string indicating the filetype - either 'Rmd' or 'Rnw'.} +} +\description{ +Determine a regex pattern for identifying R code chunks. +} +\keyword{internal} diff --git a/man/get_parse_data.Rd b/man/get_parse_data.Rd new file mode 100644 index 000000000..7b02964ae --- /dev/null +++ b/man/get_parse_data.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/parse.R +\name{get_parse_data} +\alias{get_parse_data} +\title{Obtain robust parse data} +\usage{ +get_parse_data(text, include_text = TRUE, ...) +} +\arguments{ +\item{text}{The text to parse.} + +\item{include_text}{Passed to \code{\link[utils:getParseData]{utils::getParseData()}} as \code{includeText}.} + +\item{...}{Other arguments passed to \code{\link[utils:getParseData]{utils::getParseData()}}.} +} +\description{ +Wrapper around \code{utils::getParseData(parse(text = text))} that returns a flat +parse table. When caching information should be added, make sure that +the cache is activated with \code{cache_activate()} and both \code{transformers} and +\code{cache_dir} are non-\code{NULL}. +} +\keyword{internal} diff --git a/man/has_crlf_as_first_line_sep.Rd b/man/has_crlf_as_first_line_sep.Rd new file mode 100644 index 000000000..707844800 --- /dev/null +++ b/man/has_crlf_as_first_line_sep.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/parse.R +\name{has_crlf_as_first_line_sep} +\alias{has_crlf_as_first_line_sep} +\title{Check if a string uses CRLF EOLs} +\usage{ +has_crlf_as_first_line_sep(message, initial_text) +} +\arguments{ +\item{message}{A message returned with \code{tryCatch()}.} + +\item{initial_text}{The initial text to style.} +} +\description{ +Check if a string uses CRLF EOLs +} +\keyword{internal} diff --git a/man/hash_standardize.Rd b/man/hash_standardize.Rd new file mode 100644 index 000000000..104b8e85b --- /dev/null +++ b/man/hash_standardize.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-cache.R +\name{hash_standardize} +\alias{hash_standardize} +\title{Standardize text for hashing} +\usage{ +hash_standardize(text) +} +\arguments{ +\item{text}{A character vector.} +} +\description{ +Make sure text after styling results in the same hash as text before styling +if it is indeed identical. This function expects trailing blank lines in +\code{text} were removed prior to passing it to this function. +} +\keyword{internal} diff --git a/man/identify_raw_chunks.Rd b/man/identify_raw_chunks.Rd new file mode 100644 index 000000000..aee9260ff --- /dev/null +++ b/man/identify_raw_chunks.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-code.R +\name{identify_raw_chunks} +\alias{identify_raw_chunks} +\title{Identifies raw Rmd or Rnw code chunks} +\usage{ +identify_raw_chunks(lines, filetype, engine_pattern = get_engine_pattern()) +} +\arguments{ +\item{lines}{A character vector of lines from an Rmd or Rnw file.} + +\item{filetype}{A string indicating the filetype - either 'Rmd' or 'Rnw'.} + +\item{engine_pattern}{A regular expression that must match the engine name.} +} +\description{ +Raw in the sense that these chunks don't contain pure R code, but they +contain a header and footer of markdown. Only code chunks that have an engine +whose name matches \code{engine-pattern} are considered as R code. +For every opening, we match the next closing. If there are not the same +amount of closing and openings after this matching, we throw an error. +Similarly, if there are two openings before a closing, the closing gets +matched twice, on which we throw an error. +} +\keyword{internal} diff --git a/man/identify_start_to_stop_of_roxygen_examples_from_text.Rd b/man/identify_start_to_stop_of_roxygen_examples_from_text.Rd new file mode 100644 index 000000000..7bb975d59 --- /dev/null +++ b/man/identify_start_to_stop_of_roxygen_examples_from_text.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/roxygen-examples-find.R +\name{identify_start_to_stop_of_roxygen_examples_from_text} +\alias{identify_start_to_stop_of_roxygen_examples_from_text} +\title{Figure out where code examples start and stop} +\usage{ +identify_start_to_stop_of_roxygen_examples_from_text(text) +} +\arguments{ +\item{text}{A text consisting of code and/or roxygen comments.} +} +\description{ +Finds the sequence from start to stop of the lines in \code{text} that are +code examples in roxygen comments. +} +\keyword{internal} diff --git a/man/if_for_while_part_requires_braces.Rd b/man/if_for_while_part_requires_braces.Rd new file mode 100644 index 000000000..a6f1d67f0 --- /dev/null +++ b/man/if_for_while_part_requires_braces.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-tokens.R +\name{if_for_while_part_requires_braces} +\alias{if_for_while_part_requires_braces} +\title{Check if if, for or while loop expression require a braces.} +\usage{ +if_for_while_part_requires_braces(pd, key_token) +} +\arguments{ +\item{pd}{A parse table.} + +\item{key_token}{The token that comes right before the token that contains +the expression to be wrapped (ignoring comments). For if and while loops, +this is the closing "')'", for a for-loop it's "forcond".} +} +\description{ +This is the case if they are multi-line and not yet wrapped into curly +braces. +} +\keyword{internal} diff --git a/man/initialize_attributes.Rd b/man/initialize_attributes.Rd new file mode 100644 index 000000000..931fe7dcf --- /dev/null +++ b/man/initialize_attributes.Rd @@ -0,0 +1,46 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/initialize.R +\name{initialize_attributes} +\alias{initialize_attributes} +\alias{initialize_newlines} +\alias{initialize_spaces} +\alias{initialize_multi_line} +\alias{initialize_indention_ref_pos_id} +\alias{initialize_indent} +\alias{validate_parse_data} +\title{Initialize attributes} +\usage{ +initialize_newlines(pd_flat) + +initialize_spaces(pd_flat) + +initialize_multi_line(pd_flat) + +initialize_indention_ref_pos_id(pd_flat) + +initialize_indent(pd_flat) + +validate_parse_data(pd_flat) +} +\arguments{ +\item{pd_flat}{A parse table.} +} +\description{ +Initialize attributes +} +\section{Functions}{ +\itemize{ +\item \code{initialize_newlines()}: Initializes \code{newlines} and \code{lag_newlines}. + +\item \code{initialize_spaces()}: Initializes \code{spaces}. + +\item \code{initialize_multi_line()}: Initializes \code{multi_line}. + +\item \code{initialize_indention_ref_pos_id()}: Initializes \code{indention_ref_ind}. + +\item \code{initialize_indent()}: Initializes \code{indent}. + +\item \code{validate_parse_data()}: validates the parse data. + +}} +\keyword{internal} diff --git a/man/invalid_utf8.Rd b/man/invalid_utf8.Rd new file mode 100644 index 000000000..ae94777c7 --- /dev/null +++ b/man/invalid_utf8.Rd @@ -0,0 +1,12 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/io.R +\name{invalid_utf8} +\alias{invalid_utf8} +\title{Drop-in replacement for \code{xfun:::invalid_utf8()}} +\usage{ +invalid_utf8(x) +} +\description{ +Drop-in replacement for \code{xfun:::invalid_utf8()} +} +\keyword{internal} diff --git a/man/is_cached.Rd b/man/is_cached.Rd new file mode 100644 index 000000000..1f1568aa3 --- /dev/null +++ b/man/is_cached.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-cache.R +\name{is_cached} +\alias{is_cached} +\title{Check if text is cached} +\usage{ +is_cached(text, transformers, more_specs, cache_dir = get_cache_dir()) +} +\arguments{ +\item{text}{Passed to \code{\link[=cache_make_key]{cache_make_key()}} to generate a key.} + +\item{transformers}{Passed to \code{\link[=cache_make_key]{cache_make_key()}} to generate a key.} + +\item{more_specs}{Passed to \code{\link[=cache_make_key]{cache_make_key()}} to generate a key.} + +\item{cache_dir}{The caching directory relative to the \code{.Rcache} root to +look for a cached value.} +} +\description{ +This boils down to check if the hash exists at the caching dir as a file. +} +\keyword{internal} diff --git a/man/is_code_chunk_header_or_xaringan_or_code_output.Rd b/man/is_code_chunk_header_or_xaringan_or_code_output.Rd new file mode 100644 index 000000000..04eb145be --- /dev/null +++ b/man/is_code_chunk_header_or_xaringan_or_code_output.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/expr-is.R +\name{is_code_chunk_header_or_xaringan_or_code_output} +\alias{is_code_chunk_header_or_xaringan_or_code_output} +\title{Identify spinning code chunk header or xaringan} +\usage{ +is_code_chunk_header_or_xaringan_or_code_output(pd) +} +\arguments{ +\item{pd}{A parse table.} +} +\description{ +Wrongly identifies a comment without a preceding line break as a code chunk +header. +See https://yihui.name/knitr/demo/stitch/#spin-comment-out-texts for details. +} +\examples{ +style_text(c( + "# title", + "some_code <- function() {}", + "#+ chunk-label, opt1=value1", + "call(3, 2, c(3:2))", + "#> 99" +)) +} +\keyword{internal} diff --git a/man/is_double_indent_function_declaration.Rd b/man/is_double_indent_function_declaration.Rd new file mode 100644 index 000000000..d9a36a367 --- /dev/null +++ b/man/is_double_indent_function_declaration.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-indention.R +\name{is_double_indent_function_declaration} +\alias{is_double_indent_function_declaration} +\title{Is the function declaration double indented?} +\usage{ +is_double_indent_function_declaration(pd, indent_by = 2L) +} +\arguments{ +\item{pd}{A parse table.} + +\item{indent_by}{How many spaces of indention should be inserted after +operators such as '('.} +} +\description{ +Assumes you already checked if it's a function with +\code{is_function_declaration}. It is double indented if the first token +after the first line break that is a \code{"SYMBOL_FORMALS"}. +} +\keyword{internal} diff --git a/man/is_insufficiently_parsed_string.Rd b/man/is_insufficiently_parsed_string.Rd new file mode 100644 index 000000000..83cdfb67a --- /dev/null +++ b/man/is_insufficiently_parsed_string.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/parse.R +\name{is_insufficiently_parsed_string} +\alias{is_insufficiently_parsed_string} +\title{Identify strings that were not fully parsed} +\usage{ +is_insufficiently_parsed_string(pd) +} +\arguments{ +\item{pd}{A parse table.} + +\item{text}{The initial code to style.} +} +\description{ +Identifies strings that were not fully parsed due to their vast length. +} +\details{ +The meaning of the variable \code{is_problematic_string} in the source code +changes from "all strings" to "all problematic strings", is partly +misleading and this approach was chosen for performance reasons only. +} +\keyword{internal} diff --git a/man/is_shebang.Rd b/man/is_shebang.Rd new file mode 100644 index 000000000..f3c053e34 --- /dev/null +++ b/man/is_shebang.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/expr-is.R +\name{is_shebang} +\alias{is_shebang} +\title{Identify comments that are shebangs} +\usage{ +is_shebang(pd) +} +\arguments{ +\item{pd}{A parse table.} +} +\description{ +Shebangs should be preserved and no space should be inserted between +\verb{#} and \code{!}. A comment is a shebang if it is the first top-level token +(identified with \code{pos_id}) and if it starts with \verb{#!}. +} +\examples{ +style_text("#!/usr/bin/env Rscript") +} +\keyword{internal} diff --git a/man/local_test_setup.Rd b/man/local_test_setup.Rd new file mode 100644 index 000000000..ddc76702c --- /dev/null +++ b/man/local_test_setup.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/testing.R +\name{local_test_setup} +\alias{local_test_setup} +\title{Establish testing setup for current environment} +\usage{ +local_test_setup(cache = FALSE, .local_envir = parent.frame()) +} +\arguments{ +\item{cache}{Whether or not to create and activate a cache in a temporary +directory.} + +\item{.local_envir}{The environment to use for scoping.} +} +\description{ +Establish testing setup for current environment +} +\details{ +\itemize{ +\item make styler quiet. +} +} +\keyword{internal} diff --git a/man/lookup_new_special.Rd b/man/lookup_new_special.Rd new file mode 100644 index 000000000..0dfb87fc8 --- /dev/null +++ b/man/lookup_new_special.Rd @@ -0,0 +1,15 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/token-define.R +\name{lookup_new_special} +\alias{lookup_new_special} +\title{Lookup which new tokens were created from "SPECIAL"} +\usage{ +lookup_new_special(regex = NA) +} +\arguments{ +\item{regex}{A regular expression pattern to search for.} +} +\description{ +Lookup which new tokens were created from "SPECIAL" +} +\keyword{internal} diff --git a/man/lookup_tokens.Rd b/man/lookup_tokens.Rd new file mode 100644 index 000000000..ad8c4c95b --- /dev/null +++ b/man/lookup_tokens.Rd @@ -0,0 +1,12 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/token-define.R +\name{lookup_tokens} +\alias{lookup_tokens} +\title{Lookup all tokens that have a unique token-text mapping} +\usage{ +lookup_tokens() +} +\description{ +Lookup all tokens that have a unique token-text mapping +} +\keyword{internal} diff --git a/man/make_transformer.Rd b/man/make_transformer.Rd new file mode 100644 index 000000000..70722e975 --- /dev/null +++ b/man/make_transformer.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-files.R +\name{make_transformer} +\alias{make_transformer} +\title{Closure to return a transformer function} +\usage{ +make_transformer( + transformers, + include_roxygen_examples, + base_indention, + warn_empty = TRUE +) +} +\arguments{ +\item{transformers}{A list of transformer functions that operate on flat +parse tables.} + +\item{include_roxygen_examples}{Whether or not to style code in roxygen +examples.} + +\item{base_indention}{Integer scalar indicating by how many spaces the whole +output text should be indented. Note that this is not the same as splitting +by line and add a \code{base_indention} spaces before the code in the case +multi-line strings are present. See 'Examples'.} + +\item{warn_empty}{Whether or not a warning should be displayed when \code{text} +does not contain any tokens.} +} +\description{ +This function takes a list of transformer functions as input and +returns a function that can be applied to character strings +that should be transformed. +} +\keyword{internal} diff --git a/man/map_filetype_to_pattern.Rd b/man/map_filetype_to_pattern.Rd new file mode 100644 index 000000000..249ca22cd --- /dev/null +++ b/man/map_filetype_to_pattern.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-files.R +\name{map_filetype_to_pattern} +\alias{map_filetype_to_pattern} +\title{Map the file type to a corresponding regular expression} +\usage{ +map_filetype_to_pattern(filetype) +} +\arguments{ +\item{filetype}{The file type to map to a regex.} +} +\description{ +Map the file type to a corresponding regular expression +} +\examples{ +styler:::map_filetype_to_pattern(c(".rMd", "R")) +} +\keyword{internal} diff --git a/man/match_stop_to_start.Rd b/man/match_stop_to_start.Rd new file mode 100644 index 000000000..9701156af --- /dev/null +++ b/man/match_stop_to_start.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/roxygen-examples-find.R +\name{match_stop_to_start} +\alias{match_stop_to_start} +\title{Match a stop candidate to a start} +\usage{ +match_stop_to_start(start, stop_candidates) +} +\arguments{ +\item{start}{An integer.} + +\item{stop_candidates}{Potential stop candidates.} +} +\description{ +Match a stop candidate to a start +} +\examples{ +styler:::match_stop_to_start(1, c(3, 4, 5)) +} +\keyword{internal} diff --git a/man/math_token_spacing.Rd b/man/math_token_spacing.Rd index c6417ac68..128d05211 100644 --- a/man/math_token_spacing.Rd +++ b/man/math_token_spacing.Rd @@ -1,13 +1,12 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/style_guides.R +% Please edit documentation in R/style-guides.R \name{math_token_spacing} \alias{math_token_spacing} \alias{specify_math_token_spacing} \alias{tidyverse_math_token_spacing} \title{Specify spacing around math tokens} \usage{ -specify_math_token_spacing(zero = NULL, one = c("'+'", "'-'", "'*'", "'/'", - "'^'")) +specify_math_token_spacing(zero = "'^'", one = c("'+'", "'-'", "'*'", "'/'")) tidyverse_math_token_spacing() } @@ -25,14 +24,14 @@ Helper function to create the input for the argument \code{math_token_spacing} } \section{Functions}{ \itemize{ -\item \code{specify_math_token_spacing}: Allows to fully specify the math token +\item \code{specify_math_token_spacing()}: Allows to fully specify the math token spacing. -\item \code{tidyverse_math_token_spacing}: Simple forwarder to +\item \code{tidyverse_math_token_spacing()}: Simple forwarder to \code{specify_math_token_spacing} with spacing around math tokens according to the tidyverse style guide. -}} +}} \examples{ style_text( "1+1 -3", @@ -47,7 +46,7 @@ style_text( style_text( "1+1 -3", math_token_spacing = tidyverse_math_token_spacing(), - strict = TRUE + strict = FALSE ) style_text( "1+1 -3", diff --git a/man/n_times_faster_with_cache.Rd b/man/n_times_faster_with_cache.Rd new file mode 100644 index 000000000..b79701c03 --- /dev/null +++ b/man/n_times_faster_with_cache.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/testing.R +\name{n_times_faster_with_cache} +\alias{n_times_faster_with_cache} +\title{Times two function calls with temporarily enabled cache} +\usage{ +n_times_faster_with_cache( + x1, + x2 = x1, + ..., + fun = styler::style_text, + n = 3L, + clear = "always" +) +} +\arguments{ +\item{...}{Arguments passed to \code{fun}.} + +\item{fun}{The function that should be timed.} + +\item{n}{The number of times the experiment should be repeated.} +} +\value{ +A scalar indicating the relative difference of the second compared to the +first run. +} +\description{ +This can be helpful for benchmarking. +} +\keyword{internal} diff --git a/man/needs_indention.Rd b/man/needs_indention.Rd new file mode 100644 index 000000000..6f0f7469b --- /dev/null +++ b/man/needs_indention.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/indent.R +\name{needs_indention} +\alias{needs_indention} +\title{Check whether indention is needed} +\usage{ +needs_indention(pd, potential_triggers_pos, other_trigger_tokens = NULL) +} +\arguments{ +\item{pd}{A parse table.} + +\item{potential_triggers_pos}{A vector with indices of the potential trigger +tokens in \code{pd}.} + +\item{other_trigger_tokens}{Other tokens that are going to cause indention +if on the same line as the token corresponding to \code{potential_trigger} and +directly followed by a line break.} +} +\description{ +Checks for each potential trigger token in \code{pd} whether it actually should +cause indention. +} +\keyword{internal} diff --git a/man/needs_indention_one.Rd b/man/needs_indention_one.Rd new file mode 100644 index 000000000..813e5d6f6 --- /dev/null +++ b/man/needs_indention_one.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/indent.R +\name{needs_indention_one} +\alias{needs_indention_one} +\title{Check whether indention is needed} +\usage{ +needs_indention_one(pd, potential_trigger_pos, other_trigger_tokens) +} +\arguments{ +\item{pd}{A parse table.} + +\item{potential_trigger_pos}{the index of the token in the parse table +for which it should be checked whether it should trigger indention.} + +\item{other_trigger_tokens}{Other tokens that are going to cause indention +if on the same line as the token corresponding to \code{potential_trigger} and +directly followed by a line break.} +} +\value{ +Returns \code{TRUE} if indention is needed, \code{FALSE} otherwise. + +\code{TRUE} if indention is needed, \code{FALSE} otherwise. +} +\description{ +Determine whether the tokens corresponding to \code{potential_trigger_pos} should +cause indention, considering that there might be other potential triggers +\code{other_trigger_tokens} that are going to cause indention. +Indention is needed if the two conditions apply: +} +\details{ +\itemize{ +\item there is no multi-line token between the trigger and the first line break. +\item there is no other token between the potential trigger and the first line +break that is going to cause indention. Note that such an other trigger +only causes indention if there is a line break after that other triggering +token, not otherwise. If it causes indention, it is said to be an active +trigger, if it does not, it is called an inactive trigger. +See 'Details' for an example where there is an other trigger token, but +since the next token is on the same line as the other trigger, +the trigger is passive. +} +} +\examples{ +style_text(c( + "call(named = c,", + "named = b)" +), strict = FALSE) +} +\keyword{internal} diff --git a/man/needs_rd_emulation.Rd b/man/needs_rd_emulation.Rd new file mode 100644 index 000000000..93f76c841 --- /dev/null +++ b/man/needs_rd_emulation.Rd @@ -0,0 +1,12 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/roxygen-examples-parse.R +\name{needs_rd_emulation} +\alias{needs_rd_emulation} +\title{Check if rd emulation is required with \code{\link[roxygen2:roc_proc_text]{roxygen2::roc_proc_text()}}} +\usage{ +needs_rd_emulation(roxygen) +} +\description{ +Check if rd emulation is required with \code{\link[roxygen2:roc_proc_text]{roxygen2::roc_proc_text()}} +} +\keyword{internal} diff --git a/man/nest_parse_data.Rd b/man/nest_parse_data.Rd new file mode 100644 index 000000000..228fe40f9 --- /dev/null +++ b/man/nest_parse_data.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/nest.R +\name{nest_parse_data} +\alias{nest_parse_data} +\title{Nest a flat parse table} +\usage{ +nest_parse_data(pd_flat) +} +\arguments{ +\item{pd_flat}{A flat parse table including both terminals and non-terminals.} +} +\value{ +A nested parse table. +} +\description{ +\code{nest_parse_data} groups \code{pd_flat} into a parse table with tokens that are +a parent to other tokens (called internal) and such that are not (called +child). Then, the token in child are joined to their parents in internal +and all token information of the children is nested into a column "child". +This is done recursively until we are only left with a nested data frame that +contains one row: The nested parse table. +} +\seealso{ +\code{\link[=compute_parse_data_nested]{compute_parse_data_nested()}} +} +\keyword{internal} diff --git a/man/next_non_comment.Rd b/man/next_non_comment.Rd new file mode 100644 index 000000000..b66b1825c --- /dev/null +++ b/man/next_non_comment.Rd @@ -0,0 +1,33 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-navigate-nest.R +\name{next_non_comment} +\alias{next_non_comment} +\alias{previous_non_comment} +\title{Find the index of the next or previous non-comment in a parse table.} +\usage{ +next_non_comment(pd, pos) + +previous_non_comment(pd, pos) +} +\arguments{ +\item{pd}{A parse table.} + +\item{pos}{The position of the token to start the search from.} +} +\description{ +Find the index of the next or previous non-comment in a parse table. +} +\examples{ +code <- "a <- # hi \n x \%>\% b()" +writeLines(code) +pd <- compute_parse_data_nested(code) +child <- pd$child[[1]] +previous_non_comment(child, 4L) +next_non_comment(child, 2L) +} +\seealso{ +Other third-party style guide helpers: +\code{\link{pd_is}}, +\code{\link{scope_normalize}()} +} +\concept{third-party style guide helpers} diff --git a/man/next_terminal.Rd b/man/next_terminal.Rd new file mode 100644 index 000000000..c332197de --- /dev/null +++ b/man/next_terminal.Rd @@ -0,0 +1,48 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-navigate-nest.R +\name{next_terminal} +\alias{next_terminal} +\title{Tell me what the next terminal is} +\usage{ +next_terminal( + pd, + stack = FALSE, + vars = c("pos_id", "token", "text"), + tokens_exclude = NULL +) +} +\arguments{ +\item{pd}{A nest.} + +\item{stack}{Whether or not to also return information on the tokens that +are between \code{pd} and the first terminal, so the returned data frame can be +understood as a transition path from \code{pd} to the next terminal, instead of +the information at the terminal only. The order is inside-out, +i.e. the first non-terminal on top, the terminal last.} + +\item{vars}{The variables to return.} + +\item{tokens_exclude}{A vector with tokens to exclude. This can be helpful if +one wants to find the next token that is not a comment for example.} +} +\value{ +Returns a data frame (which is \strong{not} a valid parse table for +\code{stack = TRUE}), with \code{vars} and another variable \code{position} that denotes +the index each element in the transition. This can be helpful in conjunction +with \code{\link[purrr:pluck]{purrr::pluck()}} or \code{\link[purrr:modify_in]{purrr::modify_in()}} to reach the terminal in the +nested structure. +} +\description{ +If the first is a terminal, return it. If not, go inside it and search the +next terminal +} +\examples{ +withr::with_options( + list(styler.cache_name = NULL), # temporarily deactivate cache + { + pd <- compute_parse_data_nested("if (TRUE) f()") + styler:::next_terminal(pd) + } +) +} +\keyword{internal} diff --git a/man/option_read.Rd b/man/option_read.Rd new file mode 100644 index 000000000..8136f4396 --- /dev/null +++ b/man/option_read.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{option_read} +\alias{option_read} +\title{Get the value of an option} +\usage{ +option_read(x, default = NULL, error_if_not_found = TRUE) +} +\arguments{ +\item{x}{a character string holding an option name.} + +\item{default}{if the specified option is not set in the options list, + this value is returned. This facilitates retrieving an option and + checking whether it is set and setting it separately if not.} + +\item{error_if_not_found}{Whether or not an error should be returned if the +option was not set.} +} +\description{ +Basically a \code{getOptions()} that fails fast by default. +} +\keyword{internal} diff --git a/man/parse_roxygen.Rd b/man/parse_roxygen.Rd new file mode 100644 index 000000000..1d1f2c4a1 --- /dev/null +++ b/man/parse_roxygen.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/roxygen-examples-parse.R +\name{parse_roxygen} +\alias{parse_roxygen} +\title{Parse roxygen comments into text} +\usage{ +parse_roxygen(roxygen) +} +\arguments{ +\item{roxygen}{Roxygen comments.} +} +\description{ +Used to parse roxygen code examples. Removes line break before +\verb{\\\dontrun{...}} and friends because it does not occur for segments other +than \verb{\\\\dont\{...\}} and friends. +} +\examples{ +styler:::parse_roxygen(c( + "#' @examples", + "#' 1+ 1" +)) +styler:::parse_roxygen(c( + "#' @examples 33", + "#'1+ 1" +)) +} +\keyword{internal} diff --git a/man/parse_safely.Rd b/man/parse_safely.Rd new file mode 100644 index 000000000..de198e2fc --- /dev/null +++ b/man/parse_safely.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/parse.R +\name{parse_safely} +\alias{parse_safely} +\title{Save parsing from text} +\usage{ +parse_safely(text, ...) +} +\arguments{ +\item{text}{Text to parse.} + +\item{...}{Parameters passed to \code{\link[base:parse]{base::parse()}}.} +} +\description{ +Parses text safely, i.e. throws an informative error if EOL style does not +match LF or indicates the exact position where the parsing failed. Note +that we can only detect wrong EOL style if it occurs on the first line +already. +} +\examples{ +try(styler:::parse_safely("a + 3 -4 -> x\r\n glück + 1")) +# This cannot be detected as a EOL style problem because the first +# line ends as expected with \n +try(styler:::parse_safely("a + 3 -4 -> x\nx + 2\r\n glück + 1")) + +styler:::parse_safely("a + 3 -4 -> \n glück + 1") +} +\keyword{internal} diff --git a/man/parse_transform_serialize_r.Rd b/man/parse_transform_serialize_r.Rd new file mode 100644 index 000000000..2f2439d6c --- /dev/null +++ b/man/parse_transform_serialize_r.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-files.R +\name{parse_transform_serialize_r} +\alias{parse_transform_serialize_r} +\title{Parse, transform and serialize text} +\usage{ +parse_transform_serialize_r( + text, + transformers, + base_indention, + warn_empty = TRUE, + is_roxygen_code_example = FALSE +) +} +\arguments{ +\item{text}{The text to parse.} + +\item{transformers}{Passed to \code{\link[=cache_make_key]{cache_make_key()}} to generate a key.} + +\item{base_indention}{Integer scalar indicating by how many spaces the whole +output text should be indented. Note that this is not the same as splitting +by line and add a \code{base_indention} spaces before the code in the case +multi-line strings are present. See 'Examples'.} + +\item{warn_empty}{Whether or not a warning should be displayed when \code{text} +does not contain any tokens.} + +\item{is_roxygen_code_example}{Is code a roxygen examples block?} +} +\description{ +Wrapper function for the common three operations. +} +\seealso{ +\code{\link[=parse_transform_serialize_roxygen]{parse_transform_serialize_roxygen()}} +} +\keyword{internal} diff --git a/man/parse_transform_serialize_r_block.Rd b/man/parse_transform_serialize_r_block.Rd new file mode 100644 index 000000000..64d85e7d3 --- /dev/null +++ b/man/parse_transform_serialize_r_block.Rd @@ -0,0 +1,43 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-block.R +\name{parse_transform_serialize_r_block} +\alias{parse_transform_serialize_r_block} +\title{Parse, transform and serialize a nested parse table} +\usage{ +parse_transform_serialize_r_block( + pd_nested, + start_line, + transformers, + base_indention +) +} +\arguments{ +\item{pd_nested}{A block of the nested parse table.} + +\item{start_line}{The line number on which the code starts.} + +\item{transformers}{A list of \emph{named} transformer functions} + +\item{base_indention}{Integer scalar indicating by how many spaces the whole +output text should be indented. Note that this is not the same as splitting +by line and add a \code{base_indention} spaces before the code in the case +multi-line strings are present. See 'Examples'.} +} +\description{ +We process blocks of nested parse tables for speed. See \code{\link[=cache_find_block]{cache_find_block()}} +for details on how a top-level nest is split into blocks. +} +\examples{ +text_in <- 'x<- function() +"here +is" +NULL +1+ 1 +' +style_text(text_in, base_indention = 3) +# not equal to the naive approach +styler:::construct_vertical( + paste0(styler:::add_spaces(3), style_text(text_in), sep = "") +) +} +\keyword{internal} diff --git a/man/parse_transform_serialize_roxygen.Rd b/man/parse_transform_serialize_roxygen.Rd new file mode 100644 index 000000000..d95780f21 --- /dev/null +++ b/man/parse_transform_serialize_roxygen.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-files.R +\name{parse_transform_serialize_roxygen} +\alias{parse_transform_serialize_roxygen} +\title{Parse, transform and serialize roxygen comments} +\usage{ +parse_transform_serialize_roxygen(text, transformers, base_indention) +} +\description{ +Splits \code{text} into roxygen code examples and non-roxygen code examples and +then maps over these examples by applying +\code{\link[=style_roxygen_code_example]{style_roxygen_code_example()}}. +} +\section{Hierarchy}{ + +Styling involves splitting roxygen example code into segments, and segments +into snippets. This describes the process for input of +\code{\link[=parse_transform_serialize_roxygen]{parse_transform_serialize_roxygen()}}: +\itemize{ +\item Splitting code into roxygen example code and other code. Downstream, +we are only concerned about roxygen code. See +\code{\link[=parse_transform_serialize_roxygen]{parse_transform_serialize_roxygen()}}. +\item Every roxygen example code can have zero or more +dontrun / dontshow / donttest sequences. We next create segments of roxygen +code examples that contain at most one of these. See +\code{\link[=style_roxygen_code_example]{style_roxygen_code_example()}}. +\item We further split the segment that contains at most one dont* sequence into +snippets that are either don* or not. See +\code{\link[=style_roxygen_code_example_segment]{style_roxygen_code_example_segment()}}. +} + +Finally, that we have roxygen code snippets that are either dont* or not, +we style them in \code{\link[=style_roxygen_example_snippet]{style_roxygen_example_snippet()}} using +\code{\link[=parse_transform_serialize_r]{parse_transform_serialize_r()}}. +} + +\keyword{internal} diff --git a/man/parse_tree_must_be_identical.Rd b/man/parse_tree_must_be_identical.Rd new file mode 100644 index 000000000..a9204cf03 --- /dev/null +++ b/man/parse_tree_must_be_identical.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-files.R +\name{parse_tree_must_be_identical} +\alias{parse_tree_must_be_identical} +\title{Check whether a round trip verification can be carried out} +\usage{ +parse_tree_must_be_identical(transformers) +} +\arguments{ +\item{transformers}{The list of transformer functions used for styling. +Needed for reverse engineering the scope.} +} +\description{ +If scope was set to "line_breaks" or lower (compare \code{\link[=tidyverse_style]{tidyverse_style()}}), +we can compare the expression before and after styling and return an error if +it is not the same. +} +\keyword{internal} diff --git a/man/parser_version_set.Rd b/man/parser_version_set.Rd new file mode 100644 index 000000000..d740077a1 --- /dev/null +++ b/man/parser_version_set.Rd @@ -0,0 +1,55 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/environments.R +\name{parser_version_set} +\alias{parser_version_set} +\alias{parser_version_get} +\alias{parser_version_find} +\title{Work with parser versions} +\usage{ +parser_version_set(version) + +parser_version_get() + +parser_version_find(pd) +} +\arguments{ +\item{version}{The version of the parser to be used.} + +\item{pd}{A parse table such as the output from +\code{utils::getParseData(parse(text = text))}.} +} +\description{ +The structure of the parse data affects many operations in styler. There was +unexpected behavior of the parser that styler was initially designed to work +around. Examples are \href{https://github.com/r-lib/styler/issues/187}{#187}, +\href{https://github.com/r-lib/styler/issues/216}{#216}, +\href{https://github.com/r-lib/styler/issues/100}{#100} and others. With +\href{https://github.com/r-lib/styler/issues/419}{#419}, the structure of the +parse data changes and we need to dispatch for older versions. As it is +inconvenient to pass a parser version down in the call stack in various +places, the environment \code{env_current} is used to store the current version +\emph{globally} but internally. +} +\details{ +We version the parser as follows: +\itemize{ +\item version 1: Before fix mentioned in #419. R < 3.6 +\item version 2: After #419. R >= 3.6 +} +} +\section{version 3: After #582. R >= 4.0}{ +The following utilities are available: +\itemize{ +\item \code{parser_version_set()} sets the parser version in the environment +\code{env_current}. +\item \code{parser_version_get()} retrieves the parser version from the +environment \code{env_current}. +\item \code{parser_version_find()} determines the version of the parser from parse +data. This does not necessarily mean that the version found is the +actual version, but it \emph{behaves} like it. For example, code that does not +contain \code{EQ_ASSIGN} is parsed the same way with version 1 and 2. If the +behavior is identical, the version is set to 1. +} +} + +\keyword{internal} diff --git a/man/pd_is.Rd b/man/pd_is.Rd new file mode 100644 index 000000000..d05b1b731 --- /dev/null +++ b/man/pd_is.Rd @@ -0,0 +1,119 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/expr-is.R +\name{pd_is} +\alias{pd_is} +\alias{is_curly_expr} +\alias{is_for_expr} +\alias{is_conditional_expr} +\alias{is_while_expr} +\alias{is_function_call} +\alias{is_function_declaration} +\alias{is_comment} +\alias{is_tilde_expr} +\alias{is_asymmetric_tilde_expr} +\alias{is_symmetric_tilde_expr} +\title{What is a parse table representing?} +\usage{ +is_curly_expr(pd) + +is_for_expr(pd) + +is_conditional_expr(pd) + +is_while_expr(pd) + +is_function_call(pd) + +is_function_declaration(pd) + +is_comment(pd) + +is_tilde_expr(pd, tilde_pos = c(1L, 2L)) + +is_asymmetric_tilde_expr(pd) + +is_symmetric_tilde_expr(pd) +} +\arguments{ +\item{pd}{A parse table.} + +\item{tilde_pos}{Integer vector indicating row-indices that should be +checked for tilde. See 'Details'.} +} +\description{ +Check whether a parse table corresponds to a certain expression. +} +\details{ +A tilde is on the top row in the parse table if it is an asymmetric tilde +expression (like \code{~column}), in the second row if it is a symmetric tilde +expression (like \code{a~b}). +} +\section{Functions}{ +\itemize{ +\item \code{is_curly_expr()}: Checks whether \code{pd} contains an expression wrapped in curly brackets. + +\item \code{is_for_expr()}: Checks whether \code{pd} contains a \code{for} loop. + +\item \code{is_conditional_expr()}: Checks whether \code{pd} contains is a conditional expression. + +\item \code{is_while_expr()}: Checks whether \code{pd} contains a \code{while} loop. + +\item \code{is_function_call()}: Checks whether \code{pd} is a function call. + +\item \code{is_function_declaration()}: Checks whether \code{pd} is a function declaration. + +\item \code{is_comment()}: Checks for every token whether or not it is a comment. + +\item \code{is_tilde_expr()}: Checks whether \code{pd} contains a tilde. + +\item \code{is_asymmetric_tilde_expr()}: If \code{pd} contains a tilde, checks whether it is asymmetrical. + +\item \code{is_symmetric_tilde_expr()}: If \code{pd} contains a tilde, checks whether it is symmetrical. + +}} +\examples{ +code <- "if (TRUE) { 1 }" +pd <- compute_parse_data_nested(code) +is_curly_expr(pd) +child_of_child <- pd$child[[1]]$child[[5]] +is_curly_expr(child_of_child) + +code <- "for (i in 1:5) print(1:i)" +pd <- compute_parse_data_nested(code) +is_for_expr(pd) +is_for_expr(pd$child[[1]]) + +code <- "if (TRUE) x <- 1 else x <- 0" +pd <- compute_parse_data_nested(code) +is_conditional_expr(pd) +is_conditional_expr(pd$child[[1]]) + +code <- "x <- list(1:3)" +pd <- compute_parse_data_nested(code) +is_function_call(pd) +child_of_child <- pd$child[[1]]$child[[3]] +is_function_call(child_of_child) + +code <- "foo <- function() NULL" +pd <- compute_parse_data_nested(code) +is_function_declaration(pd) +child_of_child <- pd$child[[1]]$child[[3]] +is_function_declaration(child_of_child) + +code <- "x <- 1 # TODO: check value" +pd <- compute_parse_data_nested(code) +is_comment(pd) + +code <- "lm(wt ~ mpg, mtcars)" +pd <- compute_parse_data_nested(code) +is_tilde_expr(pd$child[[1]]$child[[3]]) +is_symmetric_tilde_expr(pd$child[[1]]$child[[3]]) +is_asymmetric_tilde_expr(pd$child[[1]]$child[[3]]) + +} +\seealso{ +Other third-party style guide helpers: +\code{\link{next_non_comment}()}, +\code{\link{scope_normalize}()} +} +\concept{third-party style guide helpers} diff --git a/man/pd_is_multi_line.Rd b/man/pd_is_multi_line.Rd new file mode 100644 index 000000000..1b0a6c62d --- /dev/null +++ b/man/pd_is_multi_line.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/indent.R +\name{pd_is_multi_line} +\alias{pd_is_multi_line} +\title{Check whether a parse table is a multi-line token} +\usage{ +pd_is_multi_line(pd) +} +\arguments{ +\item{pd}{A parse table.} +} +\description{ +A token is a multi-line expression if and only if: +} +\details{ +\itemize{ +\item it contains a line break. +\item it has at least one child that is a multi-line expression itself. +} +} +\keyword{internal} diff --git a/man/post_parse_roxygen.Rd b/man/post_parse_roxygen.Rd new file mode 100644 index 000000000..35bfaf396 --- /dev/null +++ b/man/post_parse_roxygen.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/roxygen-examples-parse.R +\name{post_parse_roxygen} +\alias{post_parse_roxygen} +\title{Changing the line definition} +\usage{ +post_parse_roxygen(raw) +} +\arguments{ +\item{raw}{Raw code to post-process.} +} +\description{ +Input: New line denoted with \verb{\\\\n}. Lines can span across elements. +Output: Each element in the vector is one line. +} +\keyword{internal} diff --git a/man/prettify_any.Rd b/man/prettify_any.Rd new file mode 100644 index 000000000..c4b7fc5a1 --- /dev/null +++ b/man/prettify_any.Rd @@ -0,0 +1,55 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ui-styling.R +\name{prettify_any} +\alias{prettify_any} +\title{Prettify R code in current working directory} +\usage{ +prettify_any( + transformers, + filetype, + recursive, + exclude_files, + exclude_dirs, + include_roxygen_examples, + base_indention = 0L, + dry +) +} +\arguments{ +\item{transformers}{A set of transformer functions. This argument is most +conveniently constructed via the \code{style} argument and \code{...}. See +'Examples'.} + +\item{filetype}{Vector of file extensions indicating which file types should +be styled. Case is ignored, and the \code{.} is optional, e.g. \code{c(".R", ".Rmd")}, or \code{c("r", "rmd")}. Supported values (after standardization) are: +"r", "rprofile", "rmd", "rmarkdown", "rnw", "qmd". Rmarkdown is treated as +Rmd.} + +\item{recursive}{A logical value indicating whether or not files in +subdirectories should be styled as well.} + +\item{exclude_files}{Character vector with paths to files that should be +excluded from styling.} + +\item{exclude_dirs}{Character vector with directories to exclude +(recursively). Note that the default values were set for consistency with +\code{\link[=style_dir]{style_dir()}} and as these directories are anyways not styled.} + +\item{include_roxygen_examples}{Whether or not to style code in roxygen +examples.} + +\item{base_indention}{Integer scalar indicating by how many spaces the whole +output text should be indented. Note that this is not the same as splitting +by line and add a \code{base_indention} spaces before the code in the case +multi-line strings are present. See 'Examples'.} + +\item{dry}{To indicate whether styler should run in \emph{dry} mode, i.e. refrain +from writing back to files .\code{"on"} and \code{"fail"} both don't write back, the +latter returns an error if the input code is not identical to the result +of styling. "off", the default, writes back if the input and output of +styling are not identical.} +} +\description{ +This is a helper function for style_dir. +} +\keyword{internal} diff --git a/man/prettify_pkg.Rd b/man/prettify_pkg.Rd new file mode 100644 index 000000000..4e9a40cb8 --- /dev/null +++ b/man/prettify_pkg.Rd @@ -0,0 +1,50 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ui-styling.R +\name{prettify_pkg} +\alias{prettify_pkg} +\title{Prettify a package} +\usage{ +prettify_pkg( + transformers, + filetype, + exclude_files, + exclude_dirs, + include_roxygen_examples, + base_indention, + dry +) +} +\arguments{ +\item{transformers}{A list of transformer functions that operate on flat +parse tables.} + +\item{filetype}{Vector of file extensions indicating which file types should +be styled. Case is ignored, and the \code{.} is optional, e.g. \code{c(".R", ".Rmd")}, or \code{c("r", "rmd")}. Supported values (after standardization) are: +"r", "rprofile", "rmd", "rmarkdown", "rnw", "qmd". Rmarkdown is treated as +Rmd.} + +\item{exclude_files}{Character vector with paths to files that should be +excluded from styling.} + +\item{exclude_dirs}{Character vector with directories to exclude +(recursively). Note that the default values were set for consistency with +\code{\link[=style_dir]{style_dir()}} and as these directories are anyways not styled.} + +\item{include_roxygen_examples}{Whether or not to style code in roxygen +examples.} + +\item{base_indention}{Integer scalar indicating by how many spaces the whole +output text should be indented. Note that this is not the same as splitting +by line and add a \code{base_indention} spaces before the code in the case +multi-line strings are present. See 'Examples'.} + +\item{dry}{To indicate whether styler should run in \emph{dry} mode, i.e. refrain +from writing back to files .\code{"on"} and \code{"fail"} both don't write back, the +latter returns an error if the input code is not identical to the result +of styling. "off", the default, writes back if the input and output of +styling are not identical.} +} +\description{ +Prettify a package +} +\keyword{internal} diff --git a/man/print.vertical.Rd b/man/print.vertical.Rd new file mode 100644 index 000000000..ee1c9051d --- /dev/null +++ b/man/print.vertical.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/vertical.R +\name{print.vertical} +\alias{print.vertical} +\title{Print styled code} +\usage{ +\method{print}{vertical}( + x, + ..., + colored = getOption("styler.colored_print.vertical"), + style = prettycode::default_style() +) +} +\arguments{ +\item{x}{A character vector, one element corresponds to one line of code.} + +\item{...}{Not currently used.} + +\item{colored}{Whether or not the output should be colored with +\code{prettycode::highlight()}.} + +\item{style}{Passed to \code{prettycode::highlight()}.} +} +\description{ +Print styled code +} diff --git a/man/read_utf8.Rd b/man/read_utf8.Rd new file mode 100644 index 000000000..96c30f640 --- /dev/null +++ b/man/read_utf8.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/io.R +\name{read_utf8} +\alias{read_utf8} +\title{Read UTF-8} +\usage{ +read_utf8(path) +} +\arguments{ +\item{path}{A path to a file to read.} +} +\description{ +Reads an UTF-8 file, returning the content and whether or not the final line +was blank. This information is required higher up in the call stack because +we should write back if contents changed or if there is no blank line at the +EOF. A perfectly styled file with no EOF blank line will gain such a line +with this implementation. +} +\keyword{internal} diff --git a/man/read_utf8_bare.Rd b/man/read_utf8_bare.Rd new file mode 100644 index 000000000..52f397af4 --- /dev/null +++ b/man/read_utf8_bare.Rd @@ -0,0 +1,14 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/io.R +\name{read_utf8_bare} +\alias{read_utf8_bare} +\title{Drop-in replacement for \code{xfun::read_utf8()}, with an optional \code{warn} +argument.} +\usage{ +read_utf8_bare(con, warn = TRUE) +} +\description{ +Drop-in replacement for \code{xfun::read_utf8()}, with an optional \code{warn} +argument. +} +\keyword{internal} diff --git a/man/reindention.Rd b/man/reindention.Rd index 7a6cdb0ed..c2dd9daf9 100644 --- a/man/reindention.Rd +++ b/man/reindention.Rd @@ -1,13 +1,12 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/style_guides.R +% Please edit documentation in R/style-guides.R \name{reindention} \alias{reindention} \alias{specify_reindention} \alias{tidyverse_reindention} \title{Specify what is re-indented how} \usage{ -specify_reindention(regex_pattern = NULL, indention = 0, - comments_only = TRUE) +specify_reindention(regex_pattern = NULL, indention = 0L, comments_only = TRUE) tidyverse_reindention() } @@ -30,17 +29,17 @@ the need of setting all arguments explicitly. } \section{Functions}{ \itemize{ -\item \code{specify_reindention}: Allows to specify which tokens are reindented and +\item \code{specify_reindention()}: Allows to specify which tokens are reindented and how. -\item \code{tidyverse_reindention}: Simple forwarder to +\item \code{tidyverse_reindention()}: Simple forwarder to \code{specify_reindention} with reindention according to the tidyverse style guide. -}} +}} \examples{ style_text("a <- xyz", reindention = specify_reindention( - regex_pattern = "xyz", indention = 4, comments_only = FALSE) -) + regex_pattern = "xyz", indention = 4, comments_only = FALSE +)) style_text("a <- xyz", reindention = tidyverse_reindention()) } diff --git a/man/remove_dont_mask.Rd b/man/remove_dont_mask.Rd new file mode 100644 index 000000000..616fbcb51 --- /dev/null +++ b/man/remove_dont_mask.Rd @@ -0,0 +1,15 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/roxygen-examples-add-remove.R +\name{remove_dont_mask} +\alias{remove_dont_mask} +\title{Remove dont* mask} +\usage{ +remove_dont_mask(roxygen) +} +\arguments{ +\item{roxygen}{Roxygen code examples that contains a dont* segment only.} +} +\description{ +Remove dont* mask +} +\keyword{internal} diff --git a/man/remove_roxygen_header.Rd b/man/remove_roxygen_header.Rd new file mode 100644 index 000000000..0eaa658f4 --- /dev/null +++ b/man/remove_roxygen_header.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/roxygen-examples-add-remove.R +\name{remove_roxygen_header} +\alias{remove_roxygen_header} +\title{Remove roxygen header} +\usage{ +remove_roxygen_header(text) +} +\description{ +Can't simply remove the element with the regex because it may happen that +the roxygen tag is on the same line as its contents start. +} +\examples{ +#' @examples c(1, 2) +} +\keyword{internal} diff --git a/man/rep_char.Rd b/man/rep_char.Rd new file mode 100644 index 000000000..bfed9b9ba --- /dev/null +++ b/man/rep_char.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils-strings.R +\name{rep_char} +\alias{rep_char} +\title{Repeat elements of a character vector \code{times} times and collapse it} +\usage{ +rep_char(char, times) +} +\arguments{ +\item{char}{A character vector.} + +\item{times}{an integer giving the number of repetitions.} +} +\value{ +A character vector. +} +\description{ +Repeat elements of a character vector \code{times} times and collapse it +} +\keyword{internal} diff --git a/man/roxygen_remove_extra_brace.Rd b/man/roxygen_remove_extra_brace.Rd new file mode 100644 index 000000000..7024e4218 --- /dev/null +++ b/man/roxygen_remove_extra_brace.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/roxygen-examples-parse.R +\name{roxygen_remove_extra_brace} +\alias{roxygen_remove_extra_brace} +\title{Fix \code{\link[tools:parse_Rd]{tools::parse_Rd()}} output} +\usage{ +roxygen_remove_extra_brace(parsed) +} +\description{ +Since \code{\link[tools:parse_Rd]{tools::parse_Rd()}} treats braces in quotes as literal braces when +determining brace symmetry, a brace might be added in error to the parsed +data (at the end). We'll remove one at the time, check if output is parsable +until no braces are left. If we end up with no braces left, we signal a +parsing error, otherwise, we return the initial (not parsable input due to +\emph{dont} sequence) with the trailing braces removed. +} +\examples{ +styler:::parse_roxygen( + c( + "#' @examples", + "#' x <- '{'", + "#' \\\\dontrun{", + "#' fu(x = 3)", + "#' }" + ) +) +styler:::parse_roxygen( + c( + "#' @examples", + "#' x <- '{'", + "#' \\\\dontrun{", + "#' c('{', \"'{{{\" ,\"[\")", + "#' }" + ) +) +} +\keyword{internal} diff --git a/man/save_after_styling_is_active.Rd b/man/save_after_styling_is_active.Rd new file mode 100644 index 000000000..3199e0b12 --- /dev/null +++ b/man/save_after_styling_is_active.Rd @@ -0,0 +1,13 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/addins.R +\name{save_after_styling_is_active} +\alias{save_after_styling_is_active} +\title{Heuristic to see if a file styled with the addin should be saved or not.} +\usage{ +save_after_styling_is_active() +} +\description{ +Using the R option \code{"styler.save_after_styling"} and if unset, checks legacy +method via environment variable \code{save_after_styling}. +} +\keyword{internal} diff --git a/man/scope_normalize.Rd b/man/scope_normalize.Rd new file mode 100644 index 000000000..bd98878e0 --- /dev/null +++ b/man/scope_normalize.Rd @@ -0,0 +1,32 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/style-guides.R +\name{scope_normalize} +\alias{scope_normalize} +\title{Convert the styling scope to its lower-level representation} +\usage{ +scope_normalize(scope, name = substitute(scope)) +} +\arguments{ +\item{scope}{A character vector of length one or a vector of class \code{AsIs}.} + +\item{name}{The name of the character vector to be displayed if the +construction of the factor fails.} +} +\description{ +If \code{scope} is of class \code{character} and of length one, the value of the +argument and all less-invasive levels are included too (e.g. +styling tokens includes styling spaces). If +\code{scope} is of class \code{AsIs}, every level to be included has to be declared +individually. See compare \code{\link[=tidyverse_style]{tidyverse_style()}} for the possible levels and +their order. +} +\examples{ +scope_normalize(I("tokens")) +scope_normalize(I(c("indention", "tokens"))) +} +\seealso{ +Other third-party style guide helpers: +\code{\link{next_non_comment}()}, +\code{\link{pd_is}} +} +\concept{third-party style guide helpers} diff --git a/man/separate_chunks.Rd b/man/separate_chunks.Rd new file mode 100644 index 000000000..82f6299c5 --- /dev/null +++ b/man/separate_chunks.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-code.R +\name{separate_chunks} +\alias{separate_chunks} +\title{Separate chunks within Rmd and Rnw contents} +\usage{ +separate_chunks(lines, filetype) +} +\arguments{ +\item{lines}{A character vector of lines from an Rmd or Rnw file.} + +\item{filetype}{A string indicating the filetype - either 'Rmd' or 'Rnw'.} +} +\description{ +Identifies and separates the code and text chunks (the latter includes non-R +code) within an Rmd or Rnw file, and returns these separately. +} +\keyword{internal} diff --git a/man/serialize_parse_data_flattened.Rd b/man/serialize_parse_data_flattened.Rd new file mode 100644 index 000000000..6dc7f1f09 --- /dev/null +++ b/man/serialize_parse_data_flattened.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serialize.R +\name{serialize_parse_data_flattened} +\alias{serialize_parse_data_flattened} +\title{Serialize flattened parse data} +\usage{ +serialize_parse_data_flattened(flattened_pd, indent_character = "") +} +\arguments{ +\item{flattened_pd}{A flattened parse table.} + +\item{indent_character}{The character that is used for indention. We strongly +advise for using spaces as indention characters.} +} +\description{ +Collapses a flattened parse table into character vector representation. +} +\keyword{internal} diff --git a/man/set_and_assert_arg_filetype.Rd b/man/set_and_assert_arg_filetype.Rd new file mode 100644 index 000000000..64d8342a9 --- /dev/null +++ b/man/set_and_assert_arg_filetype.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/set-assert-args.R +\name{set_and_assert_arg_filetype} +\alias{set_and_assert_arg_filetype} +\title{Set the file type argument} +\usage{ +set_and_assert_arg_filetype(filetype) +} +\arguments{ +\item{filetype}{A character vector with file types to convert to the internal +standard format.} +} +\description{ +Sets and asserts the file type argument to a standard format for further internal +processing. +} +\examples{ +styler:::set_and_assert_arg_filetype("rMd") +try(styler:::set_and_assert_arg_filetype("xyz")) +} +\keyword{internal} diff --git a/man/set_arg_paths.Rd b/man/set_arg_paths.Rd new file mode 100644 index 000000000..afa60e3e0 --- /dev/null +++ b/man/set_arg_paths.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/set-assert-args.R +\name{set_arg_paths} +\alias{set_arg_paths} +\title{Standardize paths in root} +\usage{ +set_arg_paths(path) +} +\arguments{ +\item{path}{A path.} +} +\description{ +Standardization required to use \code{setdiff()} with paths. +} +\examples{ +styler:::set_arg_paths(c("./file.R", "file.R", "../another-file.R")) +} +\seealso{ +dir_without_. +} +\keyword{internal} diff --git a/man/set_arg_write_tree.Rd b/man/set_arg_write_tree.Rd new file mode 100644 index 000000000..9bea02feb --- /dev/null +++ b/man/set_arg_write_tree.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/set-assert-args.R +\name{set_arg_write_tree} +\alias{set_arg_write_tree} +\title{Set the write_tree argument} +\usage{ +set_arg_write_tree(write_tree) +} +\arguments{ +\item{write_tree}{Whether or not to write tree.} +} +\description{ +Sets the argument \code{write_tree} in \code{\link[=test_collection]{test_collection()}} to be \code{TRUE} for R +versions higher or equal to 3.2, and \code{FALSE} otherwise since the second-level +dependency \code{DiagrammeR} from \code{data.tree} is not available for R < 3.2. +} +\keyword{internal} diff --git a/man/set_line_break_after_opening_if_call_is_multi_line.Rd b/man/set_line_break_after_opening_if_call_is_multi_line.Rd new file mode 100644 index 000000000..417c04a1a --- /dev/null +++ b/man/set_line_break_after_opening_if_call_is_multi_line.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-line-breaks.R +\name{set_line_break_after_opening_if_call_is_multi_line} +\alias{set_line_break_after_opening_if_call_is_multi_line} +\title{Sets line break after opening parenthesis} +\usage{ +set_line_break_after_opening_if_call_is_multi_line( + pd, + except_token_after = NULL, + except_text_before = NULL, + force_text_before = NULL +) +} +\description{ +Sets line break after opening parenthesis +} +\details{ +In general, every call that is multi-line has a line break after the opening +parenthesis. Exceptions: +\itemize{ +\item The token right after the parenthesis is a comment, then, the line should +be broken after the comment only. Governed by \code{except_token_after}. +\item The name of the function called is \code{ifelse()} or similar, where we can +allow the condition on the same line as the function name, and we don't +impose rules on the line breaks for the subsequent arguments. Governed +by \code{except_text_before}. +\item Some calls like \code{switch()} statements are always forced to become multi- +line. Governed by \code{force_text_before}. +} +} +\keyword{internal} diff --git a/man/set_line_break_around_curly_curly.Rd b/man/set_line_break_around_curly_curly.Rd new file mode 100644 index 000000000..06fd2fa9e --- /dev/null +++ b/man/set_line_break_around_curly_curly.Rd @@ -0,0 +1,48 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-line-breaks.R, R/rules-spaces.R +\name{set_line_break_around_curly_curly} +\alias{set_line_break_around_curly_curly} +\alias{set_space_in_curly_curly} +\title{Styling around \verb{\\\{\\\{}} +\usage{ +set_line_break_around_curly_curly(pd) + +set_space_in_curly_curly(pd) +} +\arguments{ +\item{pd}{A parse table.} +} +\description{ +With \{rlang\} version 0.4, a new syntactic sugar is introduced, the +curly-curly operator. It interprets this code in a special way: +\verb{call(\\\{\\\{ x \\\}\\\})}. See this +\href{https://www.tidyverse.org/blog/2019/06/rlang-0-4-0/}{blog post} +on the topic. Here, the curly-curly sugar is understood as two opening +curly braces, followed by an expression followed by two closing curly braces, +e.g. \verb{\\\{\\\{1\\\}\\\}}. \verb{\\\{\\\{1\\\} + 1\\\}} does not contain the curly-curly syntactic +sugar according to the above definition. On the other hand \verb{\\\{\\\{ x + y \\\}\\\}} +is recognized by styler as containing it (and is parsable code) +but will most likely give an error at runtime because the way the syntactic +sugar is defined in rlang is to use a single token within curly-curly. In +addition, because rlang parses \verb{\\\{\\\{} in a special way (just as \verb{!!}), the +expression \verb{\\\{\\\{ x \\\}\\\}} will give a runtime error when used outside of a +context that is capable of handling it, e.g. on the top-level (that is, not +within function call like \verb{rlang_fun(\\\{\\\{ x \\\}\\\})}) or within a base R +function such as \code{\link[=c]{c()}}. However, these differences are assumed to be +irrelevant for styling curly-curly, as much as they were for styling \verb{!!}. +curly-curly affects styling of line break and spaces, namely: +} +\details{ +\itemize{ +\item No line break after first or second \verb{\\\{}, before third and fourth \verb{\\\{}. +\item No space after first and third \verb{\\\{}, one space after second and before +third \verb{\\\}}. +\item No line breaks within curly-curly, e.g. \verb{\\\{\\\{ x \\\}\\\}} can only contain line +breaks after the last brace or before the first brace. But these are not +dependent on curly-curly specifically. +} +} +\seealso{ +style_text_without_curly_curly +} +\keyword{internal} diff --git a/man/set_line_break_before_curly_opening.Rd b/man/set_line_break_before_curly_opening.Rd new file mode 100644 index 000000000..c590b58fc --- /dev/null +++ b/man/set_line_break_before_curly_opening.Rd @@ -0,0 +1,66 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-line-breaks.R +\name{set_line_break_before_curly_opening} +\alias{set_line_break_before_curly_opening} +\title{Set line break before a curly brace} +\usage{ +set_line_break_before_curly_opening(pd) +} +\description{ +Rule: +\itemize{ +\item Principle: Function arguments that consist of a braced expression always +need to start on a new line +\item Exception: \link{...} unless it's the last argument and all other +arguments fit on the line of the function call +\item Exception: \link{...} or they are named. +\item Extension: Also, expressions following on braced expressions also cause a +line trigger. +} +} +\examples{ +\dontshow{if (FALSE) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} +tryCatch( + { + f(8) + }, + error = function(e) NULL +) +# last-argument case +testthat("braces braces are cool", { + code(to = execute) +}) +call2( + x = 2, { + code(to = execute) + }, + c = { + # this is the named case + g(x = 7) + } +) +tryGugus( + { + g5(k = na) + }, + a + b # line break also here because + # preceded by brace expression +) + +# brace expressions go on new line if part of a pipe, in function call... +c( + data \%>\% + filter(bar) \%>\% + { + cor(.$col1, .$col2, use = "complete.obs") + } +) +# ... or outside +data \%>\% + filter(bar) \%>\% + { + cor(.$col1, .$col2, use = "complete.obs") + } +\dontshow{\}) # examplesIf} +} +\keyword{internal} diff --git a/man/set_line_break_if_call_is_multi_line.Rd b/man/set_line_break_if_call_is_multi_line.Rd new file mode 100644 index 000000000..a8482a378 --- /dev/null +++ b/man/set_line_break_if_call_is_multi_line.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-line-breaks.R +\name{set_line_break_if_call_is_multi_line} +\alias{set_line_break_if_call_is_multi_line} +\alias{set_line_break_before_closing_call} +\alias{remove_line_break_in_fun_call} +\title{Set line break for multi-line function calls} +\usage{ +set_line_break_before_closing_call(pd, except_token_before) + +remove_line_break_in_fun_call(pd, strict) +} +\arguments{ +\item{pd}{A parse table.} + +\item{except_token_before}{A character vector with text before "')'" that do +not cause a line break before "')'".} + +\item{except_token_after}{A character vector with tokens after "'('" that do +not cause a line break after "'('".} + +\item{except_text_before}{A character vector with text before "'('" that do +not cause a line break after "'('".} + +\item{force_text_before}{A character vector with text before "'('" that +forces a line break after every argument in the call.} +} +\description{ +Set line break for multi-line function calls +} +\section{Functions}{ +\itemize{ +\item \code{set_line_break_before_closing_call()}: Sets line break before +closing parenthesis. + +}} +\keyword{internal} diff --git a/man/set_multi_line.Rd b/man/set_multi_line.Rd new file mode 100644 index 000000000..30d4ed8e3 --- /dev/null +++ b/man/set_multi_line.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/indent.R +\name{set_multi_line} +\alias{set_multi_line} +\title{Set the multi-line column} +\usage{ +set_multi_line(pd) +} +\arguments{ +\item{pd}{A parse table.} +} +\description{ +Sets the column \code{multi_line} in \code{pd} by checking row-wise whether any child +of a token is a multi-line token. +} +\keyword{internal} diff --git a/man/set_regex_indention.Rd b/man/set_regex_indention.Rd new file mode 100644 index 000000000..26f1ad16f --- /dev/null +++ b/man/set_regex_indention.Rd @@ -0,0 +1,35 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/reindent.R +\name{set_regex_indention} +\alias{set_regex_indention} +\title{Set indention of tokens that match regex} +\usage{ +set_regex_indention( + flattened_pd, + pattern, + target_indention = 0L, + comments_only = TRUE +) +} +\arguments{ +\item{flattened_pd}{A flattened parse table.} + +\item{pattern}{A character with regular expressions to match against the +token in \code{flattened_pd}.} + +\item{target_indention}{The desired level of indention of the tokens that +match \code{pattern}.} + +\item{comments_only}{Boolean indicating whether only comments should be +checked or all tokens.} +} +\value{ +A flattened parse table with indention set to \code{target_indention} for +the tokens that match \code{regex.} +} +\description{ +Force the level of indention of tokens whose text matches a regular +expression pattern to be a certain amount of spaces. The rule +is only active for the first tokens on a line. +} +\keyword{internal} diff --git a/man/set_space_around_op.Rd b/man/set_space_around_op.Rd new file mode 100644 index 000000000..c3a4502e3 --- /dev/null +++ b/man/set_space_around_op.Rd @@ -0,0 +1,12 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-spaces.R +\name{set_space_around_op} +\alias{set_space_around_op} +\title{Set spaces around operators} +\usage{ +set_space_around_op(pd_flat, strict) +} +\description{ +Alignment is kept, if detected. +} +\keyword{internal} diff --git a/man/set_space_between_eq_sub_and_comma.Rd b/man/set_space_between_eq_sub_and_comma.Rd new file mode 100644 index 000000000..f0a8677b0 --- /dev/null +++ b/man/set_space_between_eq_sub_and_comma.Rd @@ -0,0 +1,15 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-spaces.R +\name{set_space_between_eq_sub_and_comma} +\alias{set_space_between_eq_sub_and_comma} +\title{Set space between \code{EQ_SUB} and \code{"','"}} +\usage{ +set_space_between_eq_sub_and_comma(pd) +} +\arguments{ +\item{pd}{A parse table.} +} +\description{ +Set space between \code{EQ_SUB} and \code{"','"} +} +\keyword{internal} diff --git a/man/set_space_between_levels.Rd b/man/set_space_between_levels.Rd new file mode 100644 index 000000000..7f6244b86 --- /dev/null +++ b/man/set_space_between_levels.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-spaces.R +\name{set_space_between_levels} +\alias{set_space_between_levels} +\title{Set space between levels of nesting} +\usage{ +set_space_between_levels(pd_flat) +} +\arguments{ +\item{pd_flat}{A flat parse table.} +} +\description{ +With the nested approach, certain rules do not have an effect anymore because +of the nature of the nested structure. Setting spacing before curly +brackets in for / if / while statements and function declarations will be +such a case since a curly bracket is always at the first position in a parse +table, so spacing cannot be set after the previous token. +} +\keyword{internal} diff --git a/man/set_spaces.Rd b/man/set_spaces.Rd new file mode 100644 index 000000000..ecbcae9dd --- /dev/null +++ b/man/set_spaces.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/nest.R +\name{set_spaces} +\alias{set_spaces} +\title{Helper for setting spaces} +\usage{ +set_spaces(spaces_after_prefix, force_one) +} +\arguments{ +\item{spaces_after_prefix}{An integer vector with the number of spaces +after the prefix.} + +\item{force_one}{Whether spaces_after_prefix should be set to one in all +cases.} +} +\value{ +An integer vector of length spaces_after_prefix, which is either +one (if \code{force_one = TRUE}) or \code{space_after_prefix} with all values +below one set to one. + +Numeric vector indicating the number of spaces. +} +\description{ +Helper for setting spaces +} +\keyword{internal} diff --git a/man/set_style_transformers.Rd b/man/set_style_transformers.Rd new file mode 100644 index 000000000..410693bb3 --- /dev/null +++ b/man/set_style_transformers.Rd @@ -0,0 +1,12 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/addins.R +\name{set_style_transformers} +\alias{set_style_transformers} +\title{Asks the user to supply a style} +\usage{ +set_style_transformers() +} +\description{ +Asks the user to supply a style +} +\keyword{internal} diff --git a/man/set_unindention_child.Rd b/man/set_unindention_child.Rd new file mode 100644 index 000000000..b322bde13 --- /dev/null +++ b/man/set_unindention_child.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/unindent.R +\name{set_unindention_child} +\alias{set_unindention_child} +\title{Unindent a child if necessary} +\usage{ +set_unindention_child(pd, token = "')'", unindent_by) +} +\arguments{ +\item{pd}{A parse table.} + +\item{token}{The token the unindention should be based on.} + +\item{unindent_by}{By how many spaces one level of indention is reversed.} +} +\description{ +check whether any of the children of \code{pd} has \code{token} on the same line as the +closing \code{token} of pd. If so, unindent that token. +} +\keyword{internal} diff --git a/man/shallowify.Rd b/man/shallowify.Rd new file mode 100644 index 000000000..52d763e8d --- /dev/null +++ b/man/shallowify.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/nest.R +\name{shallowify} +\alias{shallowify} +\title{Shallowify the parse table} +\usage{ +shallowify(pd) +} +\description{ +Cached expressions don't need to be transformed with \code{transformers} in +\code{\link[=parse_transform_serialize_r_block]{parse_transform_serialize_r_block()}}, we simply return \code{text} for the +top-level token. +} +\details{ +Expressions that are cached are already styled correctly. We can make the +parse table shallow at these locations, fully relying on the \code{text} column: +\itemize{ +\item remove all children, as they are not needed anymore. +\item mark the expression as a terminal. +} +} +\section{Top-level comments}{ + +Note that we do not cache top-level comments. Because package code has a lot +of roxygen comments and each of them is a top-level expression, checking is +very expensive. More expensive than styling, because comments are always +terminals. This will also yield large speed improvements in +\code{\link[=compute_parse_data_nested]{compute_parse_data_nested()}} because nesting is expensive and will not be +done for cached expressions. +} + +\section{Implementation}{ + +Because the structure of the parse table is not always "top-level expression +first, then children", this function creates a temporary parse table that has +this property and then extract the ids and subset the original parse table so +it is shallow in the right places. +} + +\keyword{internal} diff --git a/man/specify_transformers_drop.Rd b/man/specify_transformers_drop.Rd new file mode 100644 index 000000000..e9069cc33 --- /dev/null +++ b/man/specify_transformers_drop.Rd @@ -0,0 +1,74 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/style-guides.R +\name{specify_transformers_drop} +\alias{specify_transformers_drop} +\title{Specify which tokens must be absent for a transformer to be dropped} +\usage{ +specify_transformers_drop( + spaces = NULL, + indention = NULL, + line_breaks = NULL, + tokens = NULL +) +} +\arguments{ +\item{spaces, indention, line_breaks, tokens}{Each a list (or \code{NULL}) where +the name of each element is the concerning transformer, the value is an +unnamed vector with tokens that match the rule. See 'Examples'.} +} +\description{ +\code{{styler}} can remove transformer functions safely removed from the list of +transformers to be applied on every \emph{nest} with \code{\link[=transformers_drop]{transformers_drop()}} if the +tokens that trigger a manipulation of the parse data are absent in the text +to style. \code{specify_transformers_drop()} helps you specify these +conditions. +} +\details{ +Note that the negative formulation (must be absent in order to be dropped) +means that when you add a new rule and you forget +to add a rule for when to drop it, it will not be dropped. If we required to +specify the complement (which tokens must be present for the transformer to +be kept), the transformer would be silently removed, which is less save. +} +\section{Warning}{ + +It is the responsibility of the developer to ensure expected behavior, in +particular that: +\itemize{ +\item the name of the supplied dropping criteria matches the name of the +transformer function. +\item the dropping criteria (name + token) reflects correctly under which +circumstances the transformer does not have an impact on styling and can +therefore be safely removed without affecting the styling outcome. +} + +You can use the unexported function \code{\link[=test_transformers_drop]{test_transformers_drop()}} for some +checks. +} + +\examples{ +dropping <- specify_transformers_drop( + spaces = c(remove_space_after_excl = "'!'") +) +style_guide <- create_style_guide( + space = list(remove_space_after_excl = styler:::remove_space_after_excl), + transformers_drop = dropping +) +# transformers_drop() will remove the transformer when the code does not +# contain an exclamation mark +style_guide_with_some_transformers_dropped <- styler:::transformers_drop( + "x <- 3;2", style_guide +) +setdiff( + names(style_guide$space), + names(style_guide_with_some_transformers_dropped) +) +# note that dropping all transformers of a scope means that this scope +# has an empty named list for this scope +style_guide_with_some_transformers_dropped$space +# this is not the same as if this scope was never specified. +tidyverse_style(scope = "none")$space +# Hence, styler should check for length 0 to decide if a scope is present or +# not, not via `is.null()` and we can use the `is.null()` check to see if +# this scope was initially required by the user. +} diff --git a/man/split_roxygen_segments.Rd b/man/split_roxygen_segments.Rd new file mode 100644 index 000000000..fffd8d83c --- /dev/null +++ b/man/split_roxygen_segments.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-files.R +\name{split_roxygen_segments} +\alias{split_roxygen_segments} +\title{Split text into roxygen and non-roxygen example segments} +\usage{ +split_roxygen_segments(text, roxygen_examples) +} +\arguments{ +\item{text}{Roxygen comments} + +\item{roxygen_examples}{Integer sequence that indicates which lines in \code{text} +are roxygen examples. Most conveniently obtained with +\link{identify_start_to_stop_of_roxygen_examples_from_text}.} +} +\value{ +A list with two elements: +\itemize{ +\item A list that contains elements grouped into roxygen and non-roxygen +sections. This list is named \code{separated}. +\item An integer vector with the indices that correspond to roxygen code +examples in \code{separated}. +} +} +\description{ +Split text into roxygen and non-roxygen example segments +} +\keyword{internal} diff --git a/man/start_comments_with_space.Rd b/man/start_comments_with_space.Rd new file mode 100644 index 000000000..03af11af0 --- /dev/null +++ b/man/start_comments_with_space.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-spaces.R +\name{start_comments_with_space} +\alias{start_comments_with_space} +\title{Start comments with a space} +\usage{ +start_comments_with_space(pd, force_one = FALSE) +} +\arguments{ +\item{pd}{A parse table.} + +\item{force_one}{Whether or not to force one space or allow multiple spaces.} +} +\description{ +Forces comments to start with a space, that is, after the regular expression +\verb{#+['\\\\*]}, at least one space must follow if the comment is \emph{non-empty}, i.e +there is not just spaces within the comment. Multiple spaces may be legit for +indention in some situations. +} +\section{Exceptions}{ + +Spaces won't be added to comments when they are: +\itemize{ +\item shebangs +\item code chunk headers +\item xaringan markers +} +} + +\keyword{internal} diff --git a/man/style_active_pkg.Rd b/man/style_active_pkg.Rd new file mode 100644 index 000000000..d16e22270 --- /dev/null +++ b/man/style_active_pkg.Rd @@ -0,0 +1,12 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/addins.R +\name{style_active_pkg} +\alias{style_active_pkg} +\title{Wrapper around \code{\link[=style_pkg]{style_pkg()}} for access via Addin.} +\usage{ +style_active_pkg() +} +\description{ +Wrapper around \code{\link[=style_pkg]{style_pkg()}} for access via Addin. +} +\keyword{internal} diff --git a/man/style_dir.Rd b/man/style_dir.Rd index 8e9312c25..9f8294354 100644 --- a/man/style_dir.Rd +++ b/man/style_dir.Rd @@ -1,20 +1,31 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/ui.R +% Please edit documentation in R/ui-styling.R \name{style_dir} \alias{style_dir} \title{Prettify arbitrary R code} \usage{ -style_dir(path = ".", ..., style = tidyverse_style, - transformers = style(...), filetype = "R", recursive = TRUE, - exclude_files = NULL) +style_dir( + path = ".", + ..., + style = tidyverse_style, + transformers = style(...), + filetype = c("R", "Rprofile", "Rmd", "Rmarkdown", "Rnw", "Qmd"), + recursive = TRUE, + exclude_files = NULL, + exclude_dirs = c("packrat", "renv"), + include_roxygen_examples = TRUE, + base_indention = 0L, + dry = "off" +) } \arguments{ \item{path}{Path to a directory with files to transform.} -\item{...}{Arguments passed on to the \code{style} function.} +\item{...}{Arguments passed on to the \code{style} function, +see \code{\link[=tidyverse_style]{tidyverse_style()}} for the default argument.} \item{style}{A function that creates a style guide to use, by default -\code{\link[=tidyverse_style]{tidyverse_style()}} (without the parentheses). Not used +\code{\link{tidyverse_style}}. Not used further except to construct the argument \code{transformers}. See \code{\link[=style_guides]{style_guides()}} for details.} @@ -22,40 +33,99 @@ further except to construct the argument \code{transformers}. See conveniently constructed via the \code{style} argument and \code{...}. See 'Examples'.} -\item{filetype}{Vector of file extensions indicating which filetypes should -be styled. Case is ignored, and the \code{.} is optional, e.g. \code{c(".R", ".Rmd")} -or \code{c("r", "rmd")}.} +\item{filetype}{Vector of file extensions indicating which file types should +be styled. Case is ignored, and the \code{.} is optional, e.g. \code{c(".R", ".Rmd")}, or \code{c("r", "rmd")}. Supported values (after standardization) are: +"r", "rprofile", "rmd", "rmarkdown", "rnw", "qmd". Rmarkdown is treated as +Rmd.} -\item{recursive}{A logical value indicating whether or not files in subdirectories -of \code{path} should be styled as well.} +\item{recursive}{A logical value indicating whether or not files in +sub directories of \code{path} should be styled as well.} \item{exclude_files}{Character vector with paths to files that should be excluded from styling.} + +\item{exclude_dirs}{Character vector with directories to exclude +(recursively).} + +\item{include_roxygen_examples}{Whether or not to style code in roxygen +examples.} + +\item{base_indention}{Integer scalar indicating by how many spaces the whole +output text should be indented. Note that this is not the same as splitting +by line and add a \code{base_indention} spaces before the code in the case +multi-line strings are present. See 'Examples'.} + +\item{dry}{To indicate whether styler should run in \emph{dry} mode, i.e. refrain +from writing back to files .\code{"on"} and \code{"fail"} both don't write back, the +latter returns an error if the input code is not identical to the result +of styling. "off", the default, writes back if the input and output of +styling are not identical.} } \description{ -Performs various substitutions in all \code{.R} files in a directory. +Performs various substitutions in all \code{.R}, \code{.Rmd}, \code{.Rmarkdown}, \code{qmd} +and/or \code{.Rnw} files in a directory (by default only \code{.R} files are styled - +see \code{filetype} argument). Carefully examine the results after running this function! } \section{Value}{ Invisibly returns a data frame that indicates for each file considered for -styling whether or not it was actually changed. +styling whether or not it was actually changed (or would be changed when +\code{dry} is not "off"). } \section{Warning}{ This function overwrites files (if styling results in a change of the -code to be formatted). It is strongly suggested to only style files -that are under version control or to create a backup copy. +code to be formatted and \code{dry = "off"}). It is strongly suggested to only +style files that are under version control or to create a backup copy. + +We suggest to first style with \code{scope < "tokens"} and inspect and commit +changes, because these changes are guaranteed to leave the abstract syntax +tree (AST) unchanged. See section 'Round trip validation' for details. + +Then, we suggest to style with \code{scope = "tokens"} (if desired) and carefully +inspect the changes to make sure the AST is not changed in an unexpected way +that invalidates code. } -\examples{ -\dontrun{ -style_dir(file_type = "r") +\section{Round trip validation}{ + +The following section describes when and how styling is guaranteed to +yield correct code. + +If tokens are not in the styling scope (as specified with the \code{scope} +argument), no tokens are changed and the abstract syntax tree (AST) should +not change. +Hence, it is possible to validate the styling by comparing whether the parsed +expression before and after styling have the same AST. +This comparison omits roxygen code examples and comments. styler throws an +error if the AST has changed through styling. + +Note that if tokens are to be styled, such a comparison is not conducted because +the AST might well change and such a change is intended. There is no way +styler can validate styling, that is why we inform the user to carefully +inspect the changes. + +See section 'Warning' for a good strategy to apply styling safely. } + +\examples{ +\dontshow{if (FALSE) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} +style_dir("path/to/dir", filetype = c("rmd", ".R")) + +# the following is identical (because of ... and defaults) +# but the first is most convenient: +style_dir(strict = TRUE) +style_dir(style = tidyverse_style, strict = TRUE) +style_dir(transformers = tidyverse_style(strict = TRUE)) +\dontshow{\}) # examplesIf} } \seealso{ -Other stylers: \code{\link{style_file}}, - \code{\link{style_pkg}}, \code{\link{style_text}}, - \code{\link{styler_addins}} +Other stylers: +\code{\link{style_file}()}, +\code{\link{style_pkg}()}, +\code{\link{style_text}()}, +\code{\link{styler_addins}} } +\concept{stylers} diff --git a/man/style_file.Rd b/man/style_file.Rd index eab9de7d7..331abbdac 100644 --- a/man/style_file.Rd +++ b/man/style_file.Rd @@ -1,53 +1,125 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/ui.R +% Please edit documentation in R/ui-styling.R \name{style_file} \alias{style_file} -\title{Style \code{.R} and/or \code{.Rmd} files} +\title{Style files with R source code} \usage{ -style_file(path, ..., style = tidyverse_style, transformers = style(...)) +style_file( + path, + ..., + style = tidyverse_style, + transformers = style(...), + include_roxygen_examples = TRUE, + base_indention = 0L, + dry = "off" +) } \arguments{ -\item{path}{A character vector with paths to files to style.} +\item{path}{A character vector with paths to files to style. Supported +extensions: \code{.R}, \code{.Rmd}, \code{.Rmarkdown}, \code{.qmd} and \code{.Rnw}.} -\item{...}{Arguments passed on to the \code{style} function.} +\item{...}{Arguments passed on to the \code{style} function, +see \code{\link[=tidyverse_style]{tidyverse_style()}} for the default argument.} \item{style}{A function that creates a style guide to use, by default -\code{\link[=tidyverse_style]{tidyverse_style()}} (without the parentheses). Not used +\code{\link{tidyverse_style}}. Not used further except to construct the argument \code{transformers}. See \code{\link[=style_guides]{style_guides()}} for details.} \item{transformers}{A set of transformer functions. This argument is most conveniently constructed via the \code{style} argument and \code{...}. See 'Examples'.} + +\item{include_roxygen_examples}{Whether or not to style code in roxygen +examples.} + +\item{base_indention}{Integer scalar indicating by how many spaces the whole +output text should be indented. Note that this is not the same as splitting +by line and add a \code{base_indention} spaces before the code in the case +multi-line strings are present. See 'Examples'.} + +\item{dry}{To indicate whether styler should run in \emph{dry} mode, i.e. refrain +from writing back to files .\code{"on"} and \code{"fail"} both don't write back, the +latter returns an error if the input code is not identical to the result +of styling. "off", the default, writes back if the input and output of +styling are not identical.} } \description{ Performs various substitutions in the files specified. Carefully examine the results after running this function! } +\section{Encoding}{ + +UTF-8 encoding is assumed. Please convert your code to UTF-8 if necessary +before applying styler. +} + \section{Value}{ Invisibly returns a data frame that indicates for each file considered for -styling whether or not it was actually changed. +styling whether or not it was actually changed (or would be changed when +\code{dry} is not "off"). } \section{Warning}{ This function overwrites files (if styling results in a change of the -code to be formatted). It is strongly suggested to only style files -that are under version control or to create a backup copy. +code to be formatted and \code{dry = "off"}). It is strongly suggested to only +style files that are under version control or to create a backup copy. + +We suggest to first style with \code{scope < "tokens"} and inspect and commit +changes, because these changes are guaranteed to leave the abstract syntax +tree (AST) unchanged. See section 'Round trip validation' for details. + +Then, we suggest to style with \code{scope = "tokens"} (if desired) and carefully +inspect the changes to make sure the AST is not changed in an unexpected way +that invalidates code. +} + +\section{Round trip validation}{ + +The following section describes when and how styling is guaranteed to +yield correct code. + +If tokens are not in the styling scope (as specified with the \code{scope} +argument), no tokens are changed and the abstract syntax tree (AST) should +not change. +Hence, it is possible to validate the styling by comparing whether the parsed +expression before and after styling have the same AST. +This comparison omits roxygen code examples and comments. styler throws an +error if the AST has changed through styling. + +Note that if tokens are to be styled, such a comparison is not conducted because +the AST might well change and such a change is intended. There is no way +styler can validate styling, that is why we inform the user to carefully +inspect the changes. + +See section 'Warning' for a good strategy to apply styling safely. } \examples{ -# the following is identical but the former is more convenient: file <- tempfile("styler", fileext = ".R") -enc::write_lines_enc("1++1", file) +writeLines("1++1", file) + +# the following is identical (because of ... and defaults), +# but the first is most convenient: +style_file(file, strict = TRUE) style_file(file, style = tidyverse_style, strict = TRUE) style_file(file, transformers = tidyverse_style(strict = TRUE)) -enc::read_lines_enc(file) + +# only style indention and less invasive levels (i.e. spaces) +style_file(file, scope = "indention", strict = TRUE) +# name levels explicitly to not style less invasive levels +style_file(file, scope = I(c("tokens", "spaces")), strict = TRUE) + +readLines(file) unlink(file) } \seealso{ -Other stylers: \code{\link{style_dir}}, - \code{\link{style_pkg}}, \code{\link{style_text}}, - \code{\link{styler_addins}} +Other stylers: +\code{\link{style_dir}()}, +\code{\link{style_pkg}()}, +\code{\link{style_text}()}, +\code{\link{styler_addins}} } +\concept{stylers} diff --git a/man/style_guides.Rd b/man/style_guides.Rd index f53949ceb..ff8534b1e 100644 --- a/man/style_guides.Rd +++ b/man/style_guides.Rd @@ -1,5 +1,5 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/style_guides.R +% Please edit documentation in R/style-guides.R \name{style_guides} \alias{style_guides} \title{Style guides} @@ -11,3 +11,4 @@ The available style guides are: \item the tidyverse style guide (see \code{\link[=tidyverse_style]{tidyverse_style()}}). } } +\keyword{internal} diff --git a/man/style_pkg.Rd b/man/style_pkg.Rd index 1e39e94d7..303dc7693 100644 --- a/man/style_pkg.Rd +++ b/man/style_pkg.Rd @@ -1,20 +1,30 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/ui.R +% Please edit documentation in R/ui-styling.R \name{style_pkg} \alias{style_pkg} \title{Prettify R source code} \usage{ -style_pkg(pkg = ".", ..., style = tidyverse_style, - transformers = style(...), filetype = "R", - exclude_files = "R/RcppExports.R") +style_pkg( + pkg = ".", + ..., + style = tidyverse_style, + transformers = style(...), + filetype = c("R", "Rprofile", "Rmd", "Rmarkdown", "Rnw", "Qmd"), + exclude_files = c("R/RcppExports.R", "R/cpp11.R"), + exclude_dirs = c("packrat", "renv"), + include_roxygen_examples = TRUE, + base_indention = 0L, + dry = "off" +) } \arguments{ \item{pkg}{Path to a (subdirectory of an) R package.} -\item{...}{Arguments passed on to the \code{style} function.} +\item{...}{Arguments passed on to the \code{style} function, +see \code{\link[=tidyverse_style]{tidyverse_style()}} for the default argument.} \item{style}{A function that creates a style guide to use, by default -\code{\link[=tidyverse_style]{tidyverse_style()}} (without the parentheses). Not used +\code{\link{tidyverse_style}}. Not used further except to construct the argument \code{transformers}. See \code{\link[=style_guides]{style_guides()}} for details.} @@ -22,43 +32,104 @@ further except to construct the argument \code{transformers}. See conveniently constructed via the \code{style} argument and \code{...}. See 'Examples'.} -\item{filetype}{Vector of file extensions indicating which filetypes should -be styled. Case is ignored, and the \code{.} is optional, e.g. \code{c(".R", ".Rmd")} -or \code{c("r", "rmd")}.} +\item{filetype}{Vector of file extensions indicating which file types should +be styled. Case is ignored, and the \code{.} is optional, e.g. \code{c(".R", ".Rmd")}, or \code{c("r", "rmd")}. Supported values (after standardization) are: +"r", "rprofile", "rmd", "rmarkdown", "rnw", "qmd". Rmarkdown is treated as +Rmd.} \item{exclude_files}{Character vector with paths to files that should be excluded from styling.} + +\item{exclude_dirs}{Character vector with directories to exclude +(recursively). Note that the default values were set for consistency with +\code{\link[=style_dir]{style_dir()}} and as these directories are anyways not styled.} + +\item{include_roxygen_examples}{Whether or not to style code in roxygen +examples.} + +\item{base_indention}{Integer scalar indicating by how many spaces the whole +output text should be indented. Note that this is not the same as splitting +by line and add a \code{base_indention} spaces before the code in the case +multi-line strings are present. See 'Examples'.} + +\item{dry}{To indicate whether styler should run in \emph{dry} mode, i.e. refrain +from writing back to files .\code{"on"} and \code{"fail"} both don't write back, the +latter returns an error if the input code is not identical to the result +of styling. "off", the default, writes back if the input and output of +styling are not identical.} } \description{ Performs various substitutions in all \code{.R} files in a package -(code and tests). +(code and tests), \code{.Rmd}, \code{.Rmarkdown} and/or +\code{.qmd}, \code{.Rnw} files (vignettes and readme). Carefully examine the results after running this function! } \section{Warning}{ This function overwrites files (if styling results in a change of the -code to be formatted). It is strongly suggested to only style files -that are under version control or to create a backup copy. +code to be formatted and \code{dry = "off"}). It is strongly suggested to only +style files that are under version control or to create a backup copy. + +We suggest to first style with \code{scope < "tokens"} and inspect and commit +changes, because these changes are guaranteed to leave the abstract syntax +tree (AST) unchanged. See section 'Round trip validation' for details. + +Then, we suggest to style with \code{scope = "tokens"} (if desired) and carefully +inspect the changes to make sure the AST is not changed in an unexpected way +that invalidates code. +} + +\section{Round trip validation}{ + +The following section describes when and how styling is guaranteed to +yield correct code. + +If tokens are not in the styling scope (as specified with the \code{scope} +argument), no tokens are changed and the abstract syntax tree (AST) should +not change. +Hence, it is possible to validate the styling by comparing whether the parsed +expression before and after styling have the same AST. +This comparison omits roxygen code examples and comments. styler throws an +error if the AST has changed through styling. + +Note that if tokens are to be styled, such a comparison is not conducted because +the AST might well change and such a change is intended. There is no way +styler can validate styling, that is why we inform the user to carefully +inspect the changes. + +See section 'Warning' for a good strategy to apply styling safely. } \section{Value}{ Invisibly returns a data frame that indicates for each file considered for -styling whether or not it was actually changed. +styling whether or not it was actually changed (or would be changed when +\code{dry} is not "off"). } \examples{ -\dontrun{ - +\dontshow{if (FALSE) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} +# the following is identical (because of ... and defaults) +# but the first is most convenient: +style_pkg(strict = TRUE) style_pkg(style = tidyverse_style, strict = TRUE) +style_pkg(transformers = tidyverse_style(strict = TRUE)) + +# more options from `tidyverse_style()` style_pkg( scope = "line_breaks", math_token_spacing = specify_math_token_spacing(zero = "'+'") ) -} + +# don't write back and fail if input is not already styled +style_pkg("/path/to/pkg/", dry = "fail") +\dontshow{\}) # examplesIf} } \seealso{ -Other stylers: \code{\link{style_dir}}, - \code{\link{style_file}}, \code{\link{style_text}}, - \code{\link{styler_addins}} +Other stylers: +\code{\link{style_dir}()}, +\code{\link{style_file}()}, +\code{\link{style_text}()}, +\code{\link{styler_addins}} } +\concept{stylers} diff --git a/man/style_roxygen_code_example.Rd b/man/style_roxygen_code_example.Rd new file mode 100644 index 000000000..d2b9c4584 --- /dev/null +++ b/man/style_roxygen_code_example.Rd @@ -0,0 +1,47 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/roxygen-examples.R +\name{style_roxygen_code_example} +\alias{style_roxygen_code_example} +\title{Style a roxygen code example that may contain dontrun and friends} +\usage{ +style_roxygen_code_example(example, transformers, base_indention) +} +\arguments{ +\item{example}{Roxygen example code.} + +\item{transformers}{Passed to \code{\link[=cache_make_key]{cache_make_key()}} to generate a key.} + +\item{base_indention}{Integer scalar indicating by how many spaces the whole +output text should be indented. Note that this is not the same as splitting +by line and add a \code{base_indention} spaces before the code in the case +multi-line strings are present. See 'Examples'.} +} +\description{ +Parses roxygen2 comments into code, breaks it into dont* (dontrun, donttest, +dontshow) and run sections and processes each segment individually using +\code{\link[=style_roxygen_example_snippet]{style_roxygen_example_snippet()}}. +} +\section{Hierarchy}{ + +Styling involves splitting roxygen example code into segments, and segments +into snippets. This describes the process for input of +\code{\link[=parse_transform_serialize_roxygen]{parse_transform_serialize_roxygen()}}: +\itemize{ +\item Splitting code into roxygen example code and other code. Downstream, +we are only concerned about roxygen code. See +\code{\link[=parse_transform_serialize_roxygen]{parse_transform_serialize_roxygen()}}. +\item Every roxygen example code can have zero or more +dontrun / dontshow / donttest sequences. We next create segments of roxygen +code examples that contain at most one of these. See +\code{\link[=style_roxygen_code_example]{style_roxygen_code_example()}}. +\item We further split the segment that contains at most one dont* sequence into +snippets that are either don* or not. See +\code{\link[=style_roxygen_code_example_segment]{style_roxygen_code_example_segment()}}. +} + +Finally, that we have roxygen code snippets that are either dont* or not, +we style them in \code{\link[=style_roxygen_example_snippet]{style_roxygen_example_snippet()}} using +\code{\link[=parse_transform_serialize_r]{parse_transform_serialize_r()}}. +} + +\keyword{internal} diff --git a/man/style_roxygen_code_example_one.Rd b/man/style_roxygen_code_example_one.Rd new file mode 100644 index 000000000..f766b9a89 --- /dev/null +++ b/man/style_roxygen_code_example_one.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/roxygen-examples.R +\name{style_roxygen_code_example_one} +\alias{style_roxygen_code_example_one} +\title{Style a roxygen code example with exactly one \verb{@example} or \verb{@exampleIf}} +\usage{ +style_roxygen_code_example_one(example_one, transformers, base_indention) +} +\arguments{ +\item{example_one}{A character vector, one element per line, that contains in +total at most one example tag.} + +\item{transformers}{Passed to \code{\link[=cache_make_key]{cache_make_key()}} to generate a key.} + +\item{base_indention}{Integer scalar indicating by how many spaces the whole +output text should be indented. Note that this is not the same as splitting +by line and add a \code{base_indention} spaces before the code in the case +multi-line strings are present. See 'Examples'.} +} +\description{ +Style a roxygen code example with exactly one \verb{@example} or \verb{@exampleIf} +} +\keyword{internal} diff --git a/man/style_roxygen_code_example_segment.Rd b/man/style_roxygen_code_example_segment.Rd new file mode 100644 index 000000000..9112ba51f --- /dev/null +++ b/man/style_roxygen_code_example_segment.Rd @@ -0,0 +1,55 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/roxygen-examples.R +\name{style_roxygen_code_example_segment} +\alias{style_roxygen_code_example_segment} +\title{Style a roxygen code example segment} +\usage{ +style_roxygen_code_example_segment(one_dont, transformers, base_indention) +} +\arguments{ +\item{one_dont}{Bare R code containing at most one \verb{\\\dontrun{...}} or +friends.} + +\item{transformers}{Passed to \code{\link[=cache_make_key]{cache_make_key()}} to generate a key.} + +\item{base_indention}{Integer scalar indicating by how many spaces the whole +output text should be indented. Note that this is not the same as splitting +by line and add a \code{base_indention} spaces before the code in the case +multi-line strings are present. See 'Examples'.} +} +\description{ +A roxygen code example segment corresponds to roxygen example code that +contains at most one \verb{\\\dontrun{...}} or friends. +We drop all newline characters first because otherwise the code segment +passed to this function was previously parsed with \code{\link[=parse_roxygen]{parse_roxygen()}} and +line-breaks in and after the \verb{\\\dontrun{...}} are expressed with \code{"\\n"}, +which contradicts to the definition used elsewhere in this package, where +every element in a vector corresponds to a line. These line-breaks don't get +eliminated because they move to the front of a \code{code_segment} and +\code{style_text("\\n1")} gives \code{"\\n1"}, i.e. trailing newlines are not +eliminated. +} +\section{Hierarchy}{ + +Styling involves splitting roxygen example code into segments, and segments +into snippets. This describes the process for input of +\code{\link[=parse_transform_serialize_roxygen]{parse_transform_serialize_roxygen()}}: +\itemize{ +\item Splitting code into roxygen example code and other code. Downstream, +we are only concerned about roxygen code. See +\code{\link[=parse_transform_serialize_roxygen]{parse_transform_serialize_roxygen()}}. +\item Every roxygen example code can have zero or more +dontrun / dontshow / donttest sequences. We next create segments of roxygen +code examples that contain at most one of these. See +\code{\link[=style_roxygen_code_example]{style_roxygen_code_example()}}. +\item We further split the segment that contains at most one dont* sequence into +snippets that are either don* or not. See +\code{\link[=style_roxygen_code_example_segment]{style_roxygen_code_example_segment()}}. +} + +Finally, that we have roxygen code snippets that are either dont* or not, +we style them in \code{\link[=style_roxygen_example_snippet]{style_roxygen_example_snippet()}} using +\code{\link[=parse_transform_serialize_r]{parse_transform_serialize_r()}}. +} + +\keyword{internal} diff --git a/man/style_roxygen_example_snippet.Rd b/man/style_roxygen_example_snippet.Rd new file mode 100644 index 000000000..e1ed4d4fd --- /dev/null +++ b/man/style_roxygen_example_snippet.Rd @@ -0,0 +1,53 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/roxygen-examples.R +\name{style_roxygen_example_snippet} +\alias{style_roxygen_example_snippet} +\title{Given a code snippet is dont* or run, style it} +\usage{ +style_roxygen_example_snippet( + code_snippet, + transformers, + is_dont, + base_indention +) +} +\arguments{ +\item{code_snippet}{A character vector with code to style.} + +\item{transformers}{Passed to \code{\link[=cache_make_key]{cache_make_key()}} to generate a key.} + +\item{is_dont}{Whether the snippet to process is a dontrun, dontshow, +donttest segment or not.} + +\item{base_indention}{Integer scalar indicating by how many spaces the whole +output text should be indented. Note that this is not the same as splitting +by line and add a \code{base_indention} spaces before the code in the case +multi-line strings are present. See 'Examples'.} +} +\description{ +Given a code snippet is dont* or run, style it +} +\section{Hierarchy}{ + +Styling involves splitting roxygen example code into segments, and segments +into snippets. This describes the process for input of +\code{\link[=parse_transform_serialize_roxygen]{parse_transform_serialize_roxygen()}}: +\itemize{ +\item Splitting code into roxygen example code and other code. Downstream, +we are only concerned about roxygen code. See +\code{\link[=parse_transform_serialize_roxygen]{parse_transform_serialize_roxygen()}}. +\item Every roxygen example code can have zero or more +dontrun / dontshow / donttest sequences. We next create segments of roxygen +code examples that contain at most one of these. See +\code{\link[=style_roxygen_code_example]{style_roxygen_code_example()}}. +\item We further split the segment that contains at most one dont* sequence into +snippets that are either don* or not. See +\code{\link[=style_roxygen_code_example_segment]{style_roxygen_code_example_segment()}}. +} + +Finally, that we have roxygen code snippets that are either dont* or not, +we style them in \code{\link[=style_roxygen_example_snippet]{style_roxygen_example_snippet()}} using +\code{\link[=parse_transform_serialize_r]{parse_transform_serialize_r()}}. +} + +\keyword{internal} diff --git a/man/style_selection.Rd b/man/style_selection.Rd new file mode 100644 index 000000000..668695a44 --- /dev/null +++ b/man/style_selection.Rd @@ -0,0 +1,12 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/addins.R +\name{style_selection} +\alias{style_selection} +\title{Styles the highlighted selection in a \code{.R} or \code{.Rmd} file.} +\usage{ +style_selection() +} +\description{ +Styles the highlighted selection in a \code{.R} or \code{.Rmd} file. +} +\keyword{internal} diff --git a/man/style_space_around_math_token.Rd b/man/style_space_around_math_token.Rd new file mode 100644 index 000000000..b994041ac --- /dev/null +++ b/man/style_space_around_math_token.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-spaces.R +\name{style_space_around_math_token} +\alias{style_space_around_math_token} +\title{Style spacing around math tokens} +\usage{ +style_space_around_math_token(strict, zero, one, pd_flat) +} +\arguments{ +\item{strict}{Whether the rules should be applied strictly or not.} + +\item{zero}{Character vector of tokens that should be surrounded with zero +spaces.} + +\item{one}{Character vector with tokens that should be surrounded by at +least one space (depending on \code{strict = TRUE} in the styling functions +\code{\link[=style_text]{style_text()}} and friends). See 'Examples'.} + +\item{pd_flat}{A nest or a flat parse table.} +} +\description{ +Style spacing around math tokens +} +\keyword{internal} diff --git a/man/style_space_around_token.Rd b/man/style_space_around_token.Rd new file mode 100644 index 000000000..50f616320 --- /dev/null +++ b/man/style_space_around_token.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-spaces.R +\name{style_space_around_token} +\alias{style_space_around_token} +\title{Set spacing of token to a certain level} +\usage{ +style_space_around_token( + pd_flat, + strict, + tokens, + level_before, + level_after = level_before +) +} +\arguments{ +\item{pd_flat}{A nest or a flat parse table.} + +\item{strict}{Whether the rules should be applied strictly or not.} + +\item{tokens}{Character vector with tokens that should be styled.} + +\item{level_before, level_after}{Scalar indicating the amount of spaces that +should be inserted around the \code{tokens} on the left and right position +respectively.} +} +\description{ +Set the spacing of all \code{tokens} in \code{pd_flat} to \code{level} if \code{strict = TRUE} or +to at least to \code{level} if \code{strict = FALSE}. +} +\keyword{internal} diff --git a/man/style_text.Rd b/man/style_text.Rd index a7e0537b2..0b79b6151 100644 --- a/man/style_text.Rd +++ b/man/style_text.Rd @@ -1,24 +1,40 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/ui.R +% Please edit documentation in R/ui-styling.R \name{style_text} \alias{style_text} \title{Style a string} \usage{ -style_text(text, ..., style = tidyverse_style, transformers = style(...)) +style_text( + text, + ..., + style = tidyverse_style, + transformers = style(...), + include_roxygen_examples = TRUE, + base_indention = 0L +) } \arguments{ \item{text}{A character vector with text to style.} -\item{...}{Arguments passed on to the \code{style} function.} +\item{...}{Arguments passed on to the \code{style} function, +see \code{\link[=tidyverse_style]{tidyverse_style()}} for the default argument.} \item{style}{A function that creates a style guide to use, by default -\code{\link[=tidyverse_style]{tidyverse_style()}} (without the parentheses). Not used +\code{\link{tidyverse_style}}. Not used further except to construct the argument \code{transformers}. See \code{\link[=style_guides]{style_guides()}} for details.} \item{transformers}{A set of transformer functions. This argument is most conveniently constructed via the \code{style} argument and \code{...}. See 'Examples'.} + +\item{include_roxygen_examples}{Whether or not to style code in roxygen +examples.} + +\item{base_indention}{Integer scalar indicating by how many spaces the whole +output text should be indented. Note that this is not the same as splitting +by line and add a \code{base_indention} spaces before the code in the case +multi-line strings are present. See 'Examples'.} } \description{ Styles a character vector. Each element of the character vector corresponds @@ -27,15 +43,26 @@ to one line of code. \examples{ style_text("call( 1)") style_text("1 + 1", strict = FALSE) + +# the following is identical (because of ... and defaults) +# but the first is most convenient: +style_text("a<-3++1", strict = TRUE) +style_text("a<-3++1", style = tidyverse_style, strict = TRUE) +style_text("a<-3++1", transformers = tidyverse_style(strict = TRUE)) + +# more invasive scopes include less invasive scopes by default style_text("a\%>\%b", scope = "spaces") style_text("a\%>\%b; a", scope = "line_breaks") style_text("a\%>\%b; a", scope = "tokens") -# the following is identical but the former is more convenient: -style_text("a<-3++1", style = tidyverse_style, strict = TRUE) -style_text("a<-3++1", transformers = tidyverse_style(strict = TRUE)) + +# opt out with I() to only style specific levels +style_text("a\%>\%b; a", scope = I("tokens")) } \seealso{ -Other stylers: \code{\link{style_dir}}, - \code{\link{style_file}}, \code{\link{style_pkg}}, - \code{\link{styler_addins}} +Other stylers: +\code{\link{style_dir}()}, +\code{\link{style_file}()}, +\code{\link{style_pkg}()}, +\code{\link{styler_addins}} } +\concept{stylers} diff --git a/man/style_text_without_curly_curly.Rd b/man/style_text_without_curly_curly.Rd new file mode 100644 index 000000000..6fae06410 --- /dev/null +++ b/man/style_text_without_curly_curly.Rd @@ -0,0 +1,35 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/testing-mocks.R +\name{style_text_without_curly_curly} +\alias{style_text_without_curly_curly} +\title{\code{style_text()} without rules for \verb{\\\{\\\{}} +\usage{ +style_text_without_curly_curly( + text, + ..., + style = tidyverse_style, + transformers = style(...), + include_roxygen_examples = TRUE +) +} +\description{ +This function mocks \code{\link[=style_text]{style_text()}}, but without taking into consideration the +rules for the curly-curly syntactic sugar (introduced in rlang 0.4). +This function (\code{style_text_without_curly_curly()}) is needed for testing +only, namely to test indention +with multiple curly braces in a sequence. It is important to maintain testing +for indention rules even as the curly-curly expression is always kept on the +same line in the tidyverse style guide because we should +ensure the underlying mechanics for indention work correctly. When +indention mechanisms are changed later, e.g. by simplifying +\code{\link[=compute_indent_indices]{compute_indent_indices()}}, we must have +a way of testing this without the interaction of \verb{\\\{\\\{}. +} +\examples{ +styler:::style_text_without_curly_curly("rlang::list2({{ x }} := 2L)") +styler:::style_text("rlang::list2({{ x }} := 3)") +} +\seealso{ +set_line_break_around_curly_curly +} +\keyword{internal} diff --git a/man/styler-package.Rd b/man/styler-package.Rd index f752e9b8b..0995015ad 100644 --- a/man/styler-package.Rd +++ b/man/styler-package.Rd @@ -1,18 +1,19 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/styler.R +% Please edit documentation in R/styler-package.R \docType{package} \name{styler-package} \alias{styler} \alias{styler-package} \title{Non-invasive pretty printing of R code} \description{ -styler allows you to format .R files, packages or entire R source trees +styler allows you to format \code{.R}, \code{.Rmd}, \code{.Rmarkdown} and/or +\code{.qmd}, \code{.Rnw} files, R packages, or entire R source trees according to a style guide. The following functions can be used for styling: \itemize{ \item \code{\link[=style_text]{style_text()}} to style a character vector. -\item \code{\link[=style_file]{style_file()}} to style a single .R file. -\item \code{\link[=style_dir]{style_dir()}} to style all .R files in a directory. +\item \code{\link[=style_file]{style_file()}} to style a single file. +\item \code{\link[=style_dir]{style_dir()}} to style all files in a directory. \item \code{\link[=style_pkg]{style_pkg()}} to style the source files of an R package. \item \link{styler_addins} (RStudio Addins) to style either selected code or the active file. @@ -29,7 +30,7 @@ style_text("a\%>\%b; a", scope = "tokens") Useful links: \itemize{ \item \url{https://github.com/r-lib/styler} - \item \url{https://r-lib.github.io/styler/} + \item \url{https://styler.r-lib.org} \item Report bugs at \url{https://github.com/r-lib/styler/issues} } @@ -39,7 +40,12 @@ Useful links: Authors: \itemize{ - \item Kirill Müller \email{krlmlr+r@mailbox.org} + \item Kirill Müller \email{kirill@cynkra.com} (\href{https://orcid.org/0000-0002-1416-3412}{ORCID}) +} + +Other contributors: +\itemize{ + \item Indrajeet Patil \email{patilindrajeet.science@gmail.com} (\href{https://orcid.org/0000-0003-1995-6531}{ORCID}) (@patilindrajeets) [contributor] } } diff --git a/man/styler_addins.Rd b/man/styler_addins.Rd index 5f4b04483..219dd4e8a 100644 --- a/man/styler_addins.Rd +++ b/man/styler_addins.Rd @@ -2,41 +2,66 @@ % Please edit documentation in R/addins.R \name{styler_addins} \alias{styler_addins} -\alias{style_active_file} -\alias{style_selection} \title{Stylers for RStudio Addins} -\usage{ -style_active_file() - -style_selection() -} \description{ Helper functions for styling via RStudio Addins. } -\section{Functions}{ -\itemize{ -\item \code{style_active_file}: Styles the active file with \code{\link[=tidyverse_style]{tidyverse_style()}} and -\code{strict = TRUE}. +\section{Addins}{ -\item \code{style_selection}: Styles the highlighted selection in a \code{.R} or -\code{.Rmd} file. -}} +\itemize{ +\item Set style: Select the style transformers to use. For flexibility, the user +input is passed to the \code{transformers} argument, not the \code{style} argument, +so entering \code{styler::tidyverse_style(scope = "spaces")} in the Addin is +equivalent to \code{styler::style_text("1+1", scope = "spaces")} and +\code{styler::style_text("1+1", transformers = styler::tidyverse_style(scope = "spaces"))} +if the text to style is \code{1+1}. The style transformers are memorized +within an R session via the R option \code{styler.addins_style_transformer} so +if you want it to persist over sessions, set the option +\code{styler.addins_style_transformer} in your \code{.Rprofile}. +\item Style active file: Styles the active file, by default with +\code{\link[=tidyverse_style]{tidyverse_style()}} or the value of the option +\code{styler.addins_style_transformer} if specified. +\item Style selection: Same as \emph{Style active file}, but styles the highlighted +code instead of the whole file. +} +} \section{Auto-Save Option}{ By default, both of the RStudio Addins will apply styling to the (selected) file contents without saving changes. Automatic saving can be enabled by -setting the environment variable \code{save_after_styling} to \code{TRUE}. - +setting the R option \code{styler.save_after_styling} to \code{TRUE}. Consider setting this in your \code{.Rprofile} file if you want to persist this setting across multiple sessions. Untitled files will always need to be saved manually after styling. } -\seealso{ -\code{\link[=Sys.setenv]{Sys.setenv()}} +\section{Life cycle}{ + +The way of specifying the style in the Addin as well as the auto-save option +(see below) are experimental. We are currently considering letting the user +specify the defaults for other style APIs like \code{\link[=style_text]{style_text()}}, +either via R options, config files or other ways as well. +See \href{https://github.com/r-lib/styler/issues/319}{r-lib/styler#319} for +the current status of this. +} -Other stylers: \code{\link{style_dir}}, - \code{\link{style_file}}, \code{\link{style_pkg}}, - \code{\link{style_text}} +\examples{ +\dontrun{ +# save after styling when using the Addin +options(styler.save_after_styling = TRUE) +# only style with scope = "spaces" when using the Addin +val <- "styler::tidyverse_style(scope = 'spaces')" +options( + styler.addins_style_transformer = val +) +} +} +\seealso{ +Other stylers: +\code{\link{style_dir}()}, +\code{\link{style_file}()}, +\code{\link{style_pkg}()}, +\code{\link{style_text}()} } +\concept{stylers} diff --git a/man/stylerignore.Rd b/man/stylerignore.Rd new file mode 100644 index 000000000..f43563654 --- /dev/null +++ b/man/stylerignore.Rd @@ -0,0 +1,62 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/nest.R +\name{stylerignore} +\alias{stylerignore} +\title{Turn off styling for parts of the code} +\description{ +Using stylerignore markers, you can temporarily turn off styler. Beware that +for \verb{styler > 1.2.0}, some alignment is +\href{https://styler.r-lib.org/articles/detect-alignment.html}{detected by styler}, +making stylerignore redundant. See a few illustrative examples below. +} +\details{ +Styling is on for all lines by default when you run styler. +\itemize{ +\item To mark the start of a sequence where you want to turn styling off, use +\verb{# styler: off}. +\item To mark the end of this sequence, put \verb{# styler: on} in your code. After +that line, styler will again format your code. +\item To ignore an inline statement (i.e. just one line), place \verb{# styler: off} +at the end of the line. +To use something else as start and stop markers, set the R options +\code{styler.ignore_start} and +\code{styler.ignore_stop} using \code{\link[=options]{options()}}. For styler version > 1.6.2, the +option supports character vectors longer than one and the marker are not +exactly matched, but using a regular expression, which means you can have +multiple marker on one line, e.g. \verb{# nolint start styler: off}. +} +} +\examples{ +# as long as the order of the markers is correct, the lines are ignored. +style_text( + " + 1+1 + # styler: off + 1+1 + # styler: on + 1+1 + " +) + +# if there is a stop marker before a start marker, styler won't be able +# to figure out which lines you want to ignore and won't ignore anything, +# issuing a warning. +\dontrun{ +style_text( + " + 1+1 + # styler: off + 1+1 + # styler: off + 1+1 + " +) +} +# some alignment of code is detected, so you don't need to use stylerignore +style_text( + "call( + xyz = 3, + x = 11 + )" +) +} diff --git a/man/stylerignore_consolidate_col.Rd b/man/stylerignore_consolidate_col.Rd new file mode 100644 index 000000000..ee50e0fff --- /dev/null +++ b/man/stylerignore_consolidate_col.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/stylerignore.R +\name{stylerignore_consolidate_col} +\alias{stylerignore_consolidate_col} +\title{Consolidate columns after a merge} +\usage{ +stylerignore_consolidate_col( + flattened_pd, + col, + col_x = paste0(col, ".x"), + col_y = paste0(col, ".y") +) +} +\arguments{ +\item{flattened_pd}{A flattened parse table.} + +\item{col}{A string indicating the name of the column that should be +consolidated.} + +\item{col_x, col_y}{The name of the column from the left (right) parent to +consolidate.} +} +\description{ +After \code{\link[base:merge]{base::merge()}}, all non-id columns that were present in \code{x} and \code{y} +do get a suffix \code{.x} and \code{.y}. If the \code{y} value is missing, use the \code{x} +value (because the information for this token was not stylerignored), +otherwise the \code{y} value (i.e. the styled value). +} +\keyword{internal} diff --git a/man/test_collection.Rd b/man/test_collection.Rd new file mode 100644 index 000000000..25b545a20 --- /dev/null +++ b/man/test_collection.Rd @@ -0,0 +1,51 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/testing.R +\name{test_collection} +\alias{test_collection} +\title{Run a collection of tests} +\usage{ +test_collection( + test, + sub_test = NULL, + dry = "off", + write_tree = NA, + transformer, + ... +) +} +\arguments{ +\item{test}{The test to run. It corresponds to a folder name in +tests/testthat.} + +\item{sub_test}{A regex pattern to further reduce the amount of test files +to be tested in the test. \code{sub_test} must match the beginning of file +names in tests/testthat. \code{NULL} matches all files.} + +\item{dry}{To indicate whether styler should run in \emph{dry} mode, i.e. refrain +from writing back to files .\code{"on"} and \code{"fail"} both don't write back, the +latter returns an error if the input code is not identical to the result +of styling. "off", the default, writes back if the input and output of +styling are not identical.} + +\item{write_tree}{Whether or not the tree structure of the test should be +computed and written to a file. Note that this needs R >= 3.2 +(see \code{\link[=set_arg_write_tree]{set_arg_write_tree()}}). If the argument is set to \code{NA}, the function +determines whether R >= 3.2 is in use and if so, trees will be written.} + +\item{transformer}{A function to apply to the content of \code{in_item}.} + +\item{...}{Parameters passed to transformer function.} +} +\description{ +Run transformations on all *-in.R files in a test directory and compare them +with their *-out.R counterpart. +} +\details{ +Each file name that matches \code{test} and \code{sub_test} and ends with +"-in.R" is considered as an input to test. Its counterpart, +the reference to compare it against is the *-out.R file. It is constructed +by taking the substring of the *-in.R file before the +last dash and adding -out.R. In contrast to older versions of this +function, every *-out.R file has just one in file. +} +\keyword{internal} diff --git a/man/test_dry.Rd b/man/test_dry.Rd new file mode 100644 index 000000000..091e6e72e --- /dev/null +++ b/man/test_dry.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/testing-public-api.R +\name{test_dry} +\alias{test_dry} +\title{Test the dry argument} +\usage{ +test_dry(path, styler, styled = FALSE) +} +\arguments{ +\item{path}{A path to pass to the \code{styler}.} + +\item{styler}{A function that takes \code{path}, typically a user exposed styler +function that has side effects, like \code{\link[=style_file]{style_file()}}.} +} +\description{ +Test the dry argument +} +\keyword{internal} diff --git a/man/test_transformer.Rd b/man/test_transformer.Rd new file mode 100644 index 000000000..bb31e1382 --- /dev/null +++ b/man/test_transformer.Rd @@ -0,0 +1,33 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/testing.R +\name{test_transformer} +\alias{test_transformer} +\alias{style_empty} +\alias{style_op} +\title{Transforming test input with a transformer function} +\usage{ +style_empty(text, base_indention = 0L) + +style_op(text, base_indention = 0L) +} +\arguments{ +\item{text}{A character vector to transform.} +} +\description{ +These functions can be used as inputs for \code{\link[=test_collection]{test_collection()}} and +\code{\link[=transform_and_check]{transform_and_check()}}. +} +\details{ +As inputs for \code{\link[=test_collection]{test_collection()}}, we can also use top-level functions such +as \code{\link[=style_text]{style_text()}}. +} +\section{Functions}{ +\itemize{ +\item \code{style_empty()}: Nest and unnest \code{text} without applying any +transformations but remove EOL spaces and indention due to the way the +serialization is set up. + +\item \code{style_op()}: Transformations for indention based on operators + +}} +\keyword{internal} diff --git a/man/test_transformers_drop.Rd b/man/test_transformers_drop.Rd new file mode 100644 index 000000000..bd2bfa381 --- /dev/null +++ b/man/test_transformers_drop.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/testing.R +\name{test_transformers_drop} +\alias{test_transformers_drop} +\title{Test \code{transformers_drop} for consistency} +\usage{ +test_transformers_drop(transformers) +} +\arguments{ +\item{transformers}{The output of \code{\link[=create_style_guide]{create_style_guide()}} we want to test.} +} +\description{ +Check if the argument \code{transformers_drop} in \code{\link[=create_style_guide]{create_style_guide()}} is +consistent with the transformers specified in that function. +} +\keyword{internal} diff --git a/man/testthat_file.Rd b/man/testthat_file.Rd new file mode 100644 index 000000000..0e4885e99 --- /dev/null +++ b/man/testthat_file.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/testing.R +\name{testthat_file} +\alias{testthat_file} +\title{Create the path to a test that file} +\usage{ +testthat_file(...) +} +\arguments{ +\item{...}{Arguments passed to \code{\link[=file.path]{file.path()}} to construct the path after +".../tests/testthat/"} +} +\description{ +Create the path to a test that file +} +\keyword{internal} diff --git a/man/text_to_flat_pd.Rd b/man/text_to_flat_pd.Rd new file mode 100644 index 000000000..6b85a65dc --- /dev/null +++ b/man/text_to_flat_pd.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/nest.R +\name{text_to_flat_pd} +\alias{text_to_flat_pd} +\title{Creates a flat parse table with minimal initialization} +\usage{ +text_to_flat_pd(text, transformers, more_specs) +} +\arguments{ +\item{text}{The text to parse.} + +\item{transformers}{Passed to \code{\link[=cache_make_key]{cache_make_key()}} to generate a key.} + +\item{more_specs}{Passed to \code{\link[=cache_make_key]{cache_make_key()}} to generate a key.} +} +\description{ +Creates a flat parse table with minimal initialization and makes the parse +table shallow where appropriate. +} +\details{ +This includes: +\itemize{ +\item token before and after. +\item stylerignore attribute. +\item caching attributes. +} + +Note that the parse table might be shallow if caching is enabled and some +values are cached. +} +\keyword{internal} diff --git a/man/tidyverse_style.Rd b/man/tidyverse_style.Rd index 0cd3a6a3c..2d4fc729f 100644 --- a/man/tidyverse_style.Rd +++ b/man/tidyverse_style.Rd @@ -1,18 +1,22 @@ % Generated by roxygen2: do not edit by hand -% Please edit documentation in R/style_guides.R +% Please edit documentation in R/style-guides.R \name{tidyverse_style} \alias{tidyverse_style} \title{The tidyverse style} \usage{ -tidyverse_style(scope = "tokens", strict = TRUE, indent_by = 2, +tidyverse_style( + scope = "tokens", + strict = TRUE, + indent_by = 2L, start_comments_with_one_space = FALSE, reindention = tidyverse_reindention(), - math_token_spacing = tidyverse_math_token_spacing()) + math_token_spacing = tidyverse_math_token_spacing() +) } \arguments{ \item{scope}{The extent of manipulation. Can range from "none" (least -invasive) to "token" (most invasive). See 'Details'. This argument is a -vector of length one.} +invasive) to "tokens" (most invasive). See 'Details'. This argument is a +string or a vector of class \code{AsIs}.} \item{strict}{A logical value indicating whether a set of strict or not so strict transformer functions should be returned. Compare the @@ -26,7 +30,7 @@ See 'Examples'.} operators such as '('.} \item{start_comments_with_one_space}{Whether or not comments should start -with only one space (see \code{start_comments_with_space()}).} +with only one space (see \code{\link[=start_comments_with_space]{start_comments_with_space()}}).} \item{reindention}{A list of parameters for regex re-indention, most conveniently constructed using \code{\link[=specify_reindention]{specify_reindention()}}.} @@ -38,24 +42,36 @@ math token, conveniently constructed using \code{\link[=specify_math_token_spaci Style code according to the tidyverse style guide. } \details{ -The following options for \code{scope} are available. +The following levels for \code{scope} are available: \itemize{ \item "none": Performs no transformation at all. \item "spaces": Manipulates spacing between token on the same line. -\item "indention": In addition to "spaces", this option also manipulates the -indention level. -\item "line_breaks": In addition to "indention", this option also manipulates -line breaks. -\item "tokens": In addition to "line_breaks", this option also manipulates -tokens. +\item "indention": Manipulates the indention, i.e. number of spaces at the +beginning of each line. +\item "line_breaks": Manipulates line breaks between tokens. +\item "tokens": manipulates tokens. } -As it becomes clear from this description, more invasive operations can only -be performed if all less invasive operations are performed too. +\code{scope} can be specified in two ways: +\itemize{ +\item As a string: In this case all less invasive scope levels are implied, e.g. +"line_breaks" includes "indention", "spaces". This is brief and what most +users need. +\item As vector of class \code{AsIs}: Each level has to be listed explicitly by +wrapping one ore more levels of the scope in \code{\link[=I]{I()}}. This offers more +granular control at the expense of more verbosity. +} + +See 'Examples' for details. } \examples{ style_text("call( 1)", style = tidyverse_style, scope = "spaces") style_text("call( 1)", transformers = tidyverse_style(strict = TRUE)) style_text(c("ab <- 3", "a <-3"), strict = FALSE) # keeps alignment of "<-" style_text(c("ab <- 3", "a <-3"), strict = TRUE) # drops alignment of "<-" + +# styling line breaks only without spaces +style_text(c("ab <- 3", "a =3"), strict = TRUE, scope = I(c("line_breaks", "tokens"))) } +\concept{obtain transformers} +\concept{style_guides} diff --git a/man/token_is_on_aligned_line.Rd b/man/token_is_on_aligned_line.Rd new file mode 100644 index 000000000..84d6d03e5 --- /dev/null +++ b/man/token_is_on_aligned_line.Rd @@ -0,0 +1,55 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/detect-alignment.R +\name{token_is_on_aligned_line} +\alias{token_is_on_aligned_line} +\title{Check if tokens are aligned} +\usage{ +token_is_on_aligned_line(pd_flat) +} +\arguments{ +\item{pd_flat}{A flat parse table.} +} +\description{ +If all tokens are aligned, \code{TRUE} is returned, otherwise \code{FALSE}. The +function only checks for alignment of function calls. This can be +recycled conveniently later if needed as a vector with length > 1. +} +\details{ +Multiple lines are called aligned if the following conditions hold for all +but the first line of the expression: +\itemize{ +\item lag spaces of column 1 must agree. +\item spacing around comma (0 before, > 1 after) and spacing around \code{=} (at least +one around). +\item all positions of commas of col > 2 must agree (needs recursive creation of +\code{text}). +} + +Because of the last requirement, this function is very expensive to run. For +this reason, the following approach is taken: +\itemize{ +\item Only invoke the function when certain that alignment is possible. +\item Check the cheap conditions first. +\item For the recursive creation of text, greedily check column by column to make +sure we can stop as soon as we found that columns are not aligned. +} +} +\examples{ +library("magrittr") +withr::with_options( + list(styler.cache_name = NULL), # temporarily deactivate cache + { + transformers <- tidyverse_style() + pd_nested <- compute_parse_data_nested(c( + "call(", + " ab = 1L,", + " a = 2", + ")" + )) \%>\% + styler:::post_visit(transformers$initialize) + nest <- pd_nested$child[[1L]] + styler:::token_is_on_aligned_line(nest) + } +) +} +\keyword{internal} diff --git a/man/tokenize.Rd b/man/tokenize.Rd new file mode 100644 index 000000000..b9a3954dc --- /dev/null +++ b/man/tokenize.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/parse.R +\name{tokenize} +\alias{tokenize} +\title{Obtain token table from text} +\usage{ +tokenize(text) +} +\arguments{ +\item{text}{The text to parse.} +} +\value{ +A flat parse table +} +\description{ +\code{\link[utils:getParseData]{utils::getParseData()}} is used to obtain a flat parse table from \code{text}. +} +\details{ +Apart from the columns provided by \code{utils::getParseData()}, the following +columns are added: +\itemize{ +\item A column "short" with the first five characters of "text". +\item A column "pos_id" for (positional id) which can be used for sorting +(because "id" cannot be used in general). Note that the nth value of this +column corresponds to n as long as no tokens are inserted. +\item A column "child" that contains \emph{nest}s. +} +} +\keyword{internal} diff --git a/man/transform_and_check.Rd b/man/transform_and_check.Rd new file mode 100644 index 000000000..4c37376ac --- /dev/null +++ b/man/transform_and_check.Rd @@ -0,0 +1,48 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/testing.R +\name{transform_and_check} +\alias{transform_and_check} +\title{Transform a file an check the result} +\usage{ +transform_and_check( + in_item, + out_item, + in_name = in_item, + out_name = out_item, + transformer, + dry, + write_tree = NA, + out_tree = "_tree", + ... +) +} +\arguments{ +\item{in_item}{An path to an file to transform.} + +\item{out_item}{The path to a file that contains the expected result.} + +\item{in_name}{The label of the in_item, defaults to \code{in_item}.} + +\item{out_name}{The label of the out_item, defaults to \code{out_item}.} + +\item{transformer}{A function to apply to the content of \code{in_item}.} + +\item{dry}{To indicate whether styler should run in \emph{dry} mode, i.e. refrain +from writing back to files .\code{"on"} and \code{"fail"} both don't write back, the +latter returns an error if the input code is not identical to the result +of styling. "off", the default, writes back if the input and output of +styling are not identical.} + +\item{write_tree}{Whether or not the tree structure of the test should be +computed and written to a file. Note that this needs R >= 3.2 +(see \code{\link[=set_arg_write_tree]{set_arg_write_tree()}}). If the argument is set to \code{NA}, the function +determines whether R >= 3.2 is in use and if so, trees will be written.} + +\item{out_tree}{Name of tree file if written out.} + +\item{...}{Parameters passed to transformer function.} +} +\description{ +Transform an file and check whether it is identical to a reference. +} +\keyword{internal} diff --git a/man/transform_code.Rd b/man/transform_code.Rd new file mode 100644 index 000000000..cd3258734 --- /dev/null +++ b/man/transform_code.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-code.R +\name{transform_code} +\alias{transform_code} +\title{Transform code from R, Rmd or Rnw files} +\usage{ +transform_code(path, fun, ..., dry) +} +\arguments{ +\item{path}{A vector with file paths to transform.} + +\item{fun}{A function that returns a character vector.} + +\item{...}{Further arguments passed to \code{\link[=transform_utf8]{transform_utf8()}}.} + +\item{dry}{To indicate whether styler should run in \emph{dry} mode, i.e. refrain +from writing back to files .\code{"on"} and \code{"fail"} both don't write back, the +latter returns an error if the input code is not identical to the result +of styling. "off", the default, writes back if the input and output of +styling are not identical.} +} +\description{ +A wrapper which initiates the styling of +either R, Rmd or Rnw files by passing the relevant transformer function for +each case. +} +\keyword{internal} diff --git a/man/transform_file.Rd b/man/transform_file.Rd new file mode 100644 index 000000000..abba7588f --- /dev/null +++ b/man/transform_file.Rd @@ -0,0 +1,44 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-files.R +\name{transform_file} +\alias{transform_file} +\title{Transform a file and output a customized message} +\usage{ +transform_file( + path, + fun, + max_char_path, + message_before = "", + message_after = " [DONE]", + message_after_if_changed = " *", + ..., + dry +) +} +\arguments{ +\item{path}{A vector with file paths to transform.} + +\item{fun}{A function that returns a character vector.} + +\item{max_char_path}{The number of characters of the longest path. Determines +the indention level of \code{message_after}.} + +\item{message_before}{The message to print before the path.} + +\item{message_after}{The message to print after the path.} + +\item{message_after_if_changed}{The message to print after \code{message_after} if +any file was transformed.} + +\item{...}{Further arguments passed to \code{\link[=transform_utf8]{transform_utf8()}}.} + +\item{dry}{To indicate whether styler should run in \emph{dry} mode, i.e. refrain +from writing back to files .\code{"on"} and \code{"fail"} both don't write back, the +latter returns an error if the input code is not identical to the result +of styling. "off", the default, writes back if the input and output of +styling are not identical.} +} +\description{ +Transforms file contents and outputs customized messages. +} +\keyword{internal} diff --git a/man/transform_files.Rd b/man/transform_files.Rd new file mode 100644 index 000000000..d3a6ff2bd --- /dev/null +++ b/man/transform_files.Rd @@ -0,0 +1,47 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-files.R +\name{transform_files} +\alias{transform_files} +\title{Transform files with transformer functions} +\usage{ +transform_files( + files, + transformers, + include_roxygen_examples, + base_indention, + dry +) +} +\arguments{ +\item{files}{A character vector with paths to the file that should be +transformed.} + +\item{transformers}{A list of transformer functions that operate on flat +parse tables.} + +\item{include_roxygen_examples}{Whether or not to style code in roxygen +examples.} + +\item{base_indention}{Integer scalar indicating by how many spaces the whole +output text should be indented. Note that this is not the same as splitting +by line and add a \code{base_indention} spaces before the code in the case +multi-line strings are present. See 'Examples'.} + +\item{dry}{To indicate whether styler should run in \emph{dry} mode, i.e. refrain +from writing back to files .\code{"on"} and \code{"fail"} both don't write back, the +latter returns an error if the input code is not identical to the result +of styling. "off", the default, writes back if the input and output of +styling are not identical.} +} +\description{ +\code{transform_files} applies transformations to file contents and writes back +the result. +} +\section{Value}{ + +Invisibly returns a data frame that indicates for each file considered for +styling whether or not it was actually changed (or would be changed when +\code{dry} is not "off"). +} + +\keyword{internal} diff --git a/man/transform_mixed.Rd b/man/transform_mixed.Rd new file mode 100644 index 000000000..9fae81d99 --- /dev/null +++ b/man/transform_mixed.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-code.R +\name{transform_mixed} +\alias{transform_mixed} +\title{Transform mixed contents} +\usage{ +transform_mixed(lines, transformer_fun, filetype) +} +\arguments{ +\item{lines}{A character vector of lines from an Rmd or Rnw file.} + +\item{transformer_fun}{A styler transformer function.} + +\item{filetype}{A string indicating the filetype - either 'Rmd' or 'Rnw'.} +} +\description{ +Applies the supplied transformer function to code chunks identified within +an Rmd or Rnw file and recombines the resulting (styled) code chunks with the +text chunks. +} +\keyword{internal} diff --git a/man/transform_mixed_non_empty.Rd b/man/transform_mixed_non_empty.Rd new file mode 100644 index 000000000..8ceae2613 --- /dev/null +++ b/man/transform_mixed_non_empty.Rd @@ -0,0 +1,14 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-code.R +\name{transform_mixed_non_empty} +\alias{transform_mixed_non_empty} +\title{Ensure for \code{.Rmd} and friends that a code chunk without code is formatted as +a code chunk without any lines.} +\usage{ +transform_mixed_non_empty(r_chunk, transformer_fun) +} +\description{ +Ensure for \code{.Rmd} and friends that a code chunk without code is formatted as +a code chunk without any lines. +} +\keyword{internal} diff --git a/man/transform_utf8.Rd b/man/transform_utf8.Rd new file mode 100644 index 000000000..f2b72ca02 --- /dev/null +++ b/man/transform_utf8.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/io.R +\name{transform_utf8} +\alias{transform_utf8} +\title{Apply a function to the contents of a file} +\usage{ +transform_utf8(path, fun, dry) +} +\arguments{ +\item{path}{A vector with file paths to transform.} + +\item{fun}{A function that returns a character vector.} + +\item{dry}{To indicate whether styler should run in \emph{dry} mode, i.e. refrain +from writing back to files .\code{"on"} and \code{"fail"} both don't write back, the +latter returns an error if the input code is not identical to the result +of styling. "off", the default, writes back if the input and output of +styling are not identical.} +} +\description{ +Transforms a file with a function. +} +\keyword{internal} diff --git a/man/transform_utf8_one.Rd b/man/transform_utf8_one.Rd new file mode 100644 index 000000000..63ff1e484 --- /dev/null +++ b/man/transform_utf8_one.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/io.R +\name{transform_utf8_one} +\alias{transform_utf8_one} +\title{Potentially transform a file} +\usage{ +transform_utf8_one(path, fun, dry) +} +\arguments{ +\item{path}{A vector with file paths to transform.} + +\item{fun}{A function that returns a character vector.} + +\item{dry}{To indicate whether styler should run in \emph{dry} mode, i.e. refrain +from writing back to files .\code{"on"} and \code{"fail"} both don't write back, the +latter returns an error if the input code is not identical to the result +of styling. "off", the default, writes back if the input and output of +styling are not identical.} +} +\description{ +Potentially transform a file +} +\keyword{internal} diff --git a/man/transformers_drop.Rd b/man/transformers_drop.Rd new file mode 100644 index 000000000..19623db73 --- /dev/null +++ b/man/transformers_drop.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-files.R +\name{transformers_drop} +\alias{transformers_drop} +\title{Remove transformers that are not needed} +\usage{ +transformers_drop(text, transformers) +} +\arguments{ +\item{text}{Text to parse. Can also be the column \code{text} of the output of +\code{\link[=compute_parse_data_nested]{compute_parse_data_nested()}}, where each element is a token (instead of a +line).} + +\item{transformers}{the transformers.} +} +\description{ +The goal is to speed up styling by removing all rules that are only +applicable in contexts that don't occur often, e.g. for most code, we don't +expect ";" to be in it, so we don't need to apply \code{resolve_semicolon()} on +every \emph{nest}. +} +\seealso{ +specify_transformers_drop +} +\keyword{internal} diff --git a/man/try_transform_as_r_file.Rd b/man/try_transform_as_r_file.Rd new file mode 100644 index 000000000..e5a345e4c --- /dev/null +++ b/man/try_transform_as_r_file.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/addins.R +\name{try_transform_as_r_file} +\alias{try_transform_as_r_file} +\title{Style a file as if it was an .R file} +\usage{ +try_transform_as_r_file(context, transformer) +} +\arguments{ +\item{context}{The context from \code{styler:::get_rstudio_context()}.} + +\item{transformer}{A transformer function most conveniently constructed with +\code{\link[=make_transformer]{make_transformer()}}.} +} +\description{ +If not successful, the file is most +likely not a .R file, so saving the file and try styling again will work if +the file is an .Rmd file. Otherwise, we can throw an error that the file must +be a .R or .Rmd file. +} +\keyword{internal} diff --git a/man/unindent_child.Rd b/man/unindent_child.Rd new file mode 100644 index 000000000..2ab7c9f70 --- /dev/null +++ b/man/unindent_child.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/unindent.R +\name{unindent_child} +\alias{unindent_child} +\title{Unindent a child} +\usage{ +unindent_child(pd, token = c("')'", "'}'"), unindent_by = 2L) +} +\arguments{ +\item{pd}{A parse table.} + +\item{token}{The token the unindention should be based on.} + +\item{unindent_by}{By how many spaces one level of indention is reversed.} +} +\description{ +Unindent a child +} +\keyword{internal} diff --git a/man/unindent_fun_dec.Rd b/man/unindent_fun_dec.Rd new file mode 100644 index 000000000..f0209be93 --- /dev/null +++ b/man/unindent_fun_dec.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-indention.R +\name{unindent_fun_dec} +\alias{unindent_fun_dec} +\title{Revert the indention of function declaration header} +\usage{ +unindent_fun_dec(pd, indent_by = 2L) +} +\arguments{ +\item{pd}{A parse table.} + +\item{indent_by}{How many spaces of indention should be inserted after +operators such as '('.} +} +\description{ +Necessary for consistent indention of the function declaration header. +} +\seealso{ +set_unindention_child update_indention_ref_fun_dec +} +\keyword{internal} diff --git a/man/update_indention.Rd b/man/update_indention.Rd new file mode 100644 index 000000000..e9cc4aa86 --- /dev/null +++ b/man/update_indention.Rd @@ -0,0 +1,64 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/indent.R, R/rules-indention.R +\name{update_indention} +\alias{update_indention} +\alias{indent_without_paren_for_while_fun} +\alias{indent_without_paren_if_else} +\alias{indent_braces} +\alias{indent_op} +\alias{indent_eq_sub} +\alias{indent_without_paren} +\title{Update indention information of parse data} +\usage{ +indent_without_paren_for_while_fun(pd, indent_by) + +indent_without_paren_if_else(pd, indent_by) + +indent_braces(pd, indent_by) + +indent_op( + pd, + indent_by, + token = c(math_token, logical_token, special_token, "PIPE", "LEFT_ASSIGN", "EQ_ASSIGN", + "'$'", "'~'") +) + +indent_eq_sub(pd, indent_by, token = c("EQ_SUB", "EQ_FORMALS")) + +indent_without_paren(pd, indent_by = 2L) +} +\arguments{ +\item{pd}{A nested or flat parse table that is already enhanced with +line break and space information via \code{\link[=default_style_guide_attributes]{default_style_guide_attributes()}}.} + +\item{indent_by}{How many spaces should be added after the token of interest.} + +\item{token}{The token the indention should be based on.} +} +\description{ +Update indention information of parse data +} +\section{Functions}{ +\itemize{ +\item \code{indent_without_paren_for_while_fun()}: Is used to indent for and statements and +function definitions without parenthesis. + +\item \code{indent_without_paren_if_else()}: Is used to indent if and if-else statements. + +\item \code{indent_braces()}: Inserts indention based on round, square and +curly brackets. + +\item \code{indent_op()}: Indents \emph{all} tokens after \code{token} - including +the last token. + +\item \code{indent_eq_sub()}: Updates indention for token EQ_SUB. Only differs +from \code{\link[=indent_op]{indent_op()}} in the sense that not all subsequent tokens in the parse +table are necessarily indented, as \code{EQ_SUB} and \code{EQ_FORMALS} can occur +multiple times in a parse table. +occurs is not indented (see\code{\link[=compute_indent_indices]{compute_indent_indices()}}) + +\item \code{indent_without_paren()}: Is used to indent for / while / if / if-else +statements that do not have curly parenthesis. + +}} +\keyword{internal} diff --git a/man/update_indention_ref.Rd b/man/update_indention_ref.Rd new file mode 100644 index 000000000..a041faead --- /dev/null +++ b/man/update_indention_ref.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-indention.R +\name{update_indention_ref} +\alias{update_indention_ref} +\alias{update_indention_ref_fun_dec} +\title{Update the indention reference} +\usage{ +update_indention_ref_fun_dec(pd_nested) +} +\arguments{ +\item{pd_nested}{A nested parse table.} +} +\description{ +Update the indention reference +} +\section{Functions}{ +\itemize{ +\item \code{update_indention_ref_fun_dec()}: Updates the reference pos_id for all +tokens in \code{pd_nested} if \code{pd_nested} contains a function declaration. +Tokens inside a function declaration are are re-indented, +that is, they are indented up to the level at which the token FUNCTION +ends in terms of col2. + +}} +\examples{ +\dontrun{ +a <- function(x, + y) { + x + y +} +} + +} +\keyword{internal} diff --git a/man/update_newlines.Rd b/man/update_newlines.Rd new file mode 100644 index 000000000..7efa91981 --- /dev/null +++ b/man/update_newlines.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/indent.R +\name{update_newlines} +\alias{update_newlines} +\title{Update the newlines attribute} +\usage{ +update_newlines(pd) +} +\arguments{ +\item{pd}{A parse table.} +} +\value{ +A parse table with synchronized \code{lag_newlines} and \code{newlines} +columns. +} +\description{ +As we work only with the \code{lag_newlines} attribute for setting the line +breaks (\code{R/rules-line_breaks.R}), but we need \code{newlines} to determine +whether or not to set \code{spaces} (\code{R/rules-spaces.R}), we have to update the +attribute. We cannot simply use \code{dplyr::lead(pd$lag_newlines)} since we would +lose information for the last token. \code{spaces} is left as is in +R/rules-spacing.R for tokens at the end of a line since this allows styling +without touching indention. +} +\seealso{ +choose_indention +} +\keyword{internal} diff --git a/man/validate_new_pos_ids.Rd b/man/validate_new_pos_ids.Rd new file mode 100644 index 000000000..f2b1e22a4 --- /dev/null +++ b/man/validate_new_pos_ids.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/token-create.R +\name{validate_new_pos_ids} +\alias{validate_new_pos_ids} +\title{Validate sequence of new position ids} +\usage{ +validate_new_pos_ids(new_ids, after) +} +\arguments{ +\item{new_ids}{A vector with new ids} + +\item{after}{Whether the ids are created with \code{after = TRUE} (and hence +should be in the range x.0-x.45) or not.} +} +\description{ +Ids created with \code{after = TRUE} can have \code{pos_id} values between x.0 and +x.5 and ids created with \code{after = FALSE} can have \code{pos_id} values between +1+ x.0 and 1 + x.5 where x is the \code{pos_id} integer which was used as a +reference to create the new \code{pos_ids}. +} +\seealso{ +Other token creators: +\code{\link{create_pos_ids}()}, +\code{\link{create_tokens}()} +} +\concept{token creators} +\keyword{internal} diff --git a/man/verify_roundtrip.Rd b/man/verify_roundtrip.Rd new file mode 100644 index 000000000..f6492533e --- /dev/null +++ b/man/verify_roundtrip.Rd @@ -0,0 +1,35 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/transform-files.R +\name{verify_roundtrip} +\alias{verify_roundtrip} +\title{Verify the styling} +\usage{ +verify_roundtrip(old_text, new_text, parsable_only = FALSE) +} +\arguments{ +\item{old_text}{The initial expression in its character representation.} + +\item{new_text}{The styled expression in its character representation.} + +\item{parsable_only}{If we should only check for the code to be parsable.} +} +\description{ +If scope was set to "line_breaks" or lower (compare \code{\link[=tidyverse_style]{tidyverse_style()}}), +we can compare the expression before and after styling and return an error if +it is not the same. +If that's not possible, a weaker guarantee that we want to give is that the +resulting code is parsable. +} +\section{Limitation}{ + +Note that this method ignores roxygen code examples and +comments and no verification can be conducted if tokens are in the styling +scope. +} + +\examples{ +styler:::verify_roundtrip("a+1", "a + 1") +styler:::verify_roundtrip("a+1", "a + 1 # comments are dropped") +try(styler:::verify_roundtrip("a+1", "b - 3")) +} +\keyword{internal} diff --git a/man/visit.Rd b/man/visit.Rd new file mode 100644 index 000000000..f2a757fc9 --- /dev/null +++ b/man/visit.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/visit.R +\name{visit} +\alias{visit} +\alias{pre_visit} +\alias{pre_visit_one} +\alias{post_visit} +\alias{post_visit_one} +\title{Visit'em all} +\usage{ +pre_visit(pd_nested, funs) + +pre_visit_one(pd_nested, fun) + +post_visit(pd_nested, funs) + +post_visit_one(pd_nested, fun) +} +\arguments{ +\item{pd_nested}{A nested parse table.} + +\item{funs}{A list of transformer functions.} +} +\description{ +Apply a list of functions to each level in a nested parse table. +\code{pre_visit()} applies \code{funs} before it proceeds to the children, +(that is, starts from the outermost level of nesting progressing +to the innermost level), \code{post_visit()} proceeds to its children +before applying the functions (meaning it first applies the functions +to the innermost level of nesting first and then going outwards). +} +\seealso{ +Other visitors: +\code{\link{visit_one}()} +} +\concept{visitors} +\keyword{internal} diff --git a/man/visit_one.Rd b/man/visit_one.Rd new file mode 100644 index 000000000..7a3bdce6b --- /dev/null +++ b/man/visit_one.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/visit.R +\name{visit_one} +\alias{visit_one} +\title{Transform a flat parse table with a list of transformers} +\usage{ +visit_one(pd_flat, funs) +} +\arguments{ +\item{pd_flat}{A flat parse table.} + +\item{funs}{A list of transformer functions.} +} +\description{ +Uses \code{\link[=Reduce]{Reduce()}} to apply each function of \code{funs} sequentially to +\code{pd_flat}. +} +\seealso{ +Other visitors: +\code{\link{visit}} +} +\concept{visitors} +\keyword{internal} diff --git a/man/wrap_else_multiline_curly.Rd b/man/wrap_else_multiline_curly.Rd new file mode 100644 index 000000000..1a9534242 --- /dev/null +++ b/man/wrap_else_multiline_curly.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-tokens.R +\name{wrap_else_multiline_curly} +\alias{wrap_else_multiline_curly} +\title{Add curly braces to else} +\usage{ +wrap_else_multiline_curly(pd, indent_by = 2L, space_after = 0L) +} +\arguments{ +\item{pd}{A parse table.} + +\item{indent_by}{The amount of spaces used to indent an expression in curly +braces. Used for unindention.} + +\item{space_after}{How many spaces should be inserted after the closing brace.} +} +\description{ +Wrap the else part of a conditional expression into curly braces if not +already wrapped into a such. +} +\keyword{internal} diff --git a/man/wrap_expr_in_curly.Rd b/man/wrap_expr_in_curly.Rd new file mode 100644 index 000000000..6aa6fe057 --- /dev/null +++ b/man/wrap_expr_in_curly.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/token-create.R +\name{wrap_expr_in_curly} +\alias{wrap_expr_in_curly} +\title{Wrap an expression in curly braces} +\usage{ +wrap_expr_in_curly(pd, stretch_out = c(FALSE, FALSE), space_after = 1L) +} +\arguments{ +\item{pd}{A parse table.} + +\item{stretch_out}{Whether or not to create a line break after the opening +curly brace and before the closing curly brace.} + +\item{space_after}{How many spaces should be inserted after the closing brace.} +} +\description{ +Adds curly braces to an expression (represented as a parse table) if there +are none. +} +\keyword{internal} diff --git a/man/wrap_expr_in_expr.Rd b/man/wrap_expr_in_expr.Rd new file mode 100644 index 000000000..312aae585 --- /dev/null +++ b/man/wrap_expr_in_expr.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/relevel.R +\name{wrap_expr_in_expr} +\alias{wrap_expr_in_expr} +\title{Wrap an expression into an expression} +\usage{ +wrap_expr_in_expr(pd) +} +\arguments{ +\item{pd}{A parse table.} +} +\description{ +Takes a parse table and wraps it in a new parse table that contains the +expression as a child. +} +\keyword{internal} diff --git a/man/wrap_if_else_while_for_fun_multi_line_in_curly.Rd b/man/wrap_if_else_while_for_fun_multi_line_in_curly.Rd new file mode 100644 index 000000000..29b7c06e9 --- /dev/null +++ b/man/wrap_if_else_while_for_fun_multi_line_in_curly.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-tokens.R +\name{wrap_if_else_while_for_fun_multi_line_in_curly} +\alias{wrap_if_else_while_for_fun_multi_line_in_curly} +\title{Wrap if-else, while and for statements in curly braces} +\usage{ +wrap_if_else_while_for_fun_multi_line_in_curly(pd, indent_by = 2L) +} +\arguments{ +\item{pd}{A parse table.} + +\item{indent_by}{The amount of spaces used to indent an expression in curly +braces. Used for unindention.} +} +\description{ +Wrap statements in curly braces if it is not already wrapped in a such. +} +\keyword{internal} diff --git a/man/wrap_multiline_curly.Rd b/man/wrap_multiline_curly.Rd new file mode 100644 index 000000000..bc9ea0104 --- /dev/null +++ b/man/wrap_multiline_curly.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-tokens.R +\name{wrap_multiline_curly} +\alias{wrap_multiline_curly} +\title{Wrap a multi-line statement in curly braces} +\usage{ +wrap_multiline_curly(pd, indent_by, key_token, space_after = 1L) +} +\arguments{ +\item{pd}{A parse table.} + +\item{indent_by}{The amount of spaces used to indent an expression in curly +braces. Used for unindention.} + +\item{key_token}{The token that comes right before the token that contains +the expression to be wrapped (ignoring comments). For if and while loops, +this is the closing "')'", for a for-loop it's "forcond".} + +\item{space_after}{How many spaces should be inserted after the closing brace.} +} +\description{ +Wrap a multi-line statement in curly braces +} +\keyword{internal} diff --git a/man/wrap_subexpr_in_curly.Rd b/man/wrap_subexpr_in_curly.Rd new file mode 100644 index 000000000..875143ca4 --- /dev/null +++ b/man/wrap_subexpr_in_curly.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/rules-tokens.R +\name{wrap_subexpr_in_curly} +\alias{wrap_subexpr_in_curly} +\title{Wrap a sub-expression in curly braces} +\usage{ +wrap_subexpr_in_curly(pd, ind_to_be_wrapped, indent_by, space_after) +} +\arguments{ +\item{pd}{A parse table.} + +\item{ind_to_be_wrapped}{The indices of the rows that should be wrapped +into a new expression.} + +\item{space_after}{How many spaces should be inserted after the closing brace.} +} +\description{ +Wraps some rows of a parse table into a sub-expression. +} +\keyword{internal} diff --git a/man/write_utf8.Rd b/man/write_utf8.Rd new file mode 100644 index 000000000..9955e86e2 --- /dev/null +++ b/man/write_utf8.Rd @@ -0,0 +1,12 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/io.R +\name{write_utf8} +\alias{write_utf8} +\title{Drop-in replacement for \code{xfun::write_utf8()}} +\usage{ +write_utf8(text, con, ...) +} +\description{ +Drop-in replacement for \code{xfun::write_utf8()} +} +\keyword{internal} diff --git a/tests/testmanual/addins/non-r.py b/tests/testmanual/addins/non-r.py new file mode 100644 index 000000000..381f905d2 --- /dev/null +++ b/tests/testmanual/addins/non-r.py @@ -0,0 +1,2 @@ +2 + 2 +" " diff --git a/tests/testmanual/addins/r-invalid.R b/tests/testmanual/addins/r-invalid.R new file mode 100644 index 000000000..88d5c0639 --- /dev/null +++ b/tests/testmanual/addins/r-invalid.R @@ -0,0 +1 @@ +1+ /1 diff --git a/tests/testmanual/addins/r-valid.R b/tests/testmanual/addins/r-valid.R new file mode 100644 index 000000000..612dabf52 --- /dev/null +++ b/tests/testmanual/addins/r-valid.R @@ -0,0 +1 @@ +1+ 1 diff --git a/tests/testmanual/addins/rmd-invalid.Rmd b/tests/testmanual/addins/rmd-invalid.Rmd new file mode 100644 index 000000000..5b7c2f3ee --- /dev/null +++ b/tests/testmanual/addins/rmd-invalid.Rmd @@ -0,0 +1,30 @@ +--- +title: "Untitled" +author: "Lorenz Walthert" +date: "4/28/2019" +output: html_document +--- + +```{r setup, include=FALSE} +knitr::opts_chunk$setecho = -TRUE) +``` + +## R Markdown + +This is an R Markdown document. Markdown is a simple formatting syntax for authoring HTML, PDF, and MS Word documents. For more details on using R Markdown see . + +When you click the **Knit** button a document will be generated that includes both content as well as the output of any embedded R code chunks within the document. You can embed an R code chunk like this: + +```{r cars} +summary(cars ) +``` + +## Including Plots + +You can also embed plots, for example: + +```{r pressure, echo=FALSE} +plot(pressure) +``` + +Note that the `echo = FALSE` parameter was added to the code chunk to prevent printing of the R code that generated the plot. diff --git a/tests/testmanual/addins/rmd-valid.Rmd b/tests/testmanual/addins/rmd-valid.Rmd new file mode 100644 index 000000000..b92ffe928 --- /dev/null +++ b/tests/testmanual/addins/rmd-valid.Rmd @@ -0,0 +1,30 @@ +--- +title: "Untitled" +author: "Lorenz Walthert" +date: "4/28/2019" +output: html_document +--- + +```{r setup, include=FALSE} +knitr::opts_chunk$set(echo = -TRUE) +``` + +## R Markdown + +This is an R Markdown document. Markdown is a simple formatting syntax for authoring HTML, PDF, and MS Word documents. For more details on using R Markdown see . + +When you click the **Knit** button a document will be generated that includes both content as well as the output of any embedded R code chunks within the document. You can embed an R code chunk like this: + +```{r cars} +summary(cars) +``` + +## Including Plots + +You can also embed plots, for example: + +```{r pressure, echo=FALSE} +plot(pressure) +``` + +Note that the `echo = FALSE` parameter was added to the code chunk to prevent printing of the R code that generated the plot. diff --git a/tests/testmanual/addins/rnw-invalid.Rnw b/tests/testmanual/addins/rnw-invalid.Rnw new file mode 100644 index 000000000..046415c2c --- /dev/null +++ b/tests/testmanual/addins/rnw-invalid.Rnw @@ -0,0 +1,28 @@ +\documentclass{article} + +\begin{document} + +Some text +<>>= +# Some R code +f <- function(x) { + x +} +@ + +More text + +<<>>= +# More R code +g <- function(y) { + y +} +@ + +Final text +<<>>= +1 + 2 +@ + + +\end{document} diff --git a/tests/testmanual/addins/rnw-valid.Rnw b/tests/testmanual/addins/rnw-valid.Rnw new file mode 100644 index 000000000..e964baaf9 --- /dev/null +++ b/tests/testmanual/addins/rnw-valid.Rnw @@ -0,0 +1,28 @@ +\documentclass{article} + +\begin{document} + +Some text +<<>>= +# Some R code +f <- function(x) { + x +} +@ + +More text + +<<>>= +# More R code +g <- function(y) { + y +} +@ + +Final text +<<>>= +1 + 2 +@ + + +\end{document} diff --git a/tests/testmanual/tests b/tests/testmanual/tests new file mode 100644 index 000000000..118801f24 --- /dev/null +++ b/tests/testmanual/tests @@ -0,0 +1,25 @@ +# Manual tests + +## Addins + +* set style: + - test setting a valid style + - test setting an invalid style +* style active file: + - saved .R file (valid and invalid code) + - unsaved .R file + - saved .Rmd file (valid and invalid code) + - unsaved .Rmd file + - saved .Rnw file (valid and invalid code) + - unsaved .Rnw file + - saved non-R file + - unsaved R file +* style selection: + - saved .R file (valid and invalid code) + - unsaved .R file + - saved .Rmd file (valid and invalid code) + - unsaved .Rmd file + - saved .Rnw file (valid and invalid code) + - unsaved .Rnw file + - saved non-R file + - unsaved R file diff --git a/tests/testthat.R b/tests/testthat.R index 08ba76957..823ab1776 100644 --- a/tests/testthat.R +++ b/tests/testthat.R @@ -1,4 +1,8 @@ library(testthat) library(styler) +test_check("styler") # checks multiple files, in parallel -test_check("styler") +# checks file one by one, not parallel +Sys.setenv(STYLER_TEST_IS_TRULY_PARALLEL = FALSE) +test_file("testthat/test-cache-high-level-api.R") +test_file("testthat/tests-cache-require-serial.R") diff --git a/tests/testthat/_snaps/cache-with-r-cache.md b/tests/testthat/_snaps/cache-with-r-cache.md new file mode 100644 index 000000000..0afcab499 --- /dev/null +++ b/tests/testthat/_snaps/cache-with-r-cache.md @@ -0,0 +1,24 @@ +# cached expressions are displayed propperly + + Code + cache_info[, c("n", "size", "last_modified", "activated")] + Output + n size last_modified activated + 1 0 0 -Inf FALSE + +--- + + Code + cache_info[, c("n", "size", "activated")] + Output + n size activated + 1 1 0 TRUE + +--- + + Code + cache_info[, c("n", "size", "activated")] + Output + n size activated + 1 2 0 TRUE + diff --git a/tests/testthat/_snaps/helpers.md b/tests/testthat/_snaps/helpers.md new file mode 100644 index 000000000..d27821d5d --- /dev/null +++ b/tests/testthat/_snaps/helpers.md @@ -0,0 +1,15 @@ +# can construct and print vertical + + Code + construct_vertical(c("1 + 1", "nw")) + Output + 1 + 1 + nw + +# can lookup tokens + + Code + lookup_new_special() + Output + [1] "SPECIAL-PIPE" "SPECIAL-IN" "SPECIAL-OTHER" + diff --git a/tests/testthat/_snaps/public_api-1.md b/tests/testthat/_snaps/public_api-1.md new file mode 100644 index 000000000..da7b965ef --- /dev/null +++ b/tests/testthat/_snaps/public_api-1.md @@ -0,0 +1,47 @@ +# messages (via cat()) of style_file are correct + + Code + cat(catch_style_file_output(file.path("public-api", "xyzdir-dirty", + "dirty-sample-with-scope-tokens.R")), sep = "\n") + Output + Styling 1 files: + dirty-sample-with-scope-tokens.R i + ---------------------------------------- + Status Count Legend + v 0 File unchanged. + i 1 File changed. + x 0 Styling threw an error. + ---------------------------------------- + Please review the changes carefully! + +--- + + Code + cat(catch_style_file_output(file.path("public-api", "xyzdir-dirty", + "clean-sample-with-scope-tokens.R")), sep = "\n") + Output + Styling 1 files: + clean-sample-with-scope-tokens.R v + ---------------------------------------- + Status Count Legend + v 1 File unchanged. + i 0 File changed. + x 0 Styling threw an error. + ---------------------------------------- + +--- + + Code + cat(catch_style_file_output(file.path("public-api", "xyzdir-dirty", + "dirty-sample-with-scope-spaces.R")), sep = "\n") + Output + Styling 1 files: + dirty-sample-with-scope-spaces.R i + ---------------------------------------- + Status Count Legend + v 0 File unchanged. + i 1 File changed. + x 0 Styling threw an error. + ---------------------------------------- + Please review the changes carefully! + diff --git a/tests/testthat/_snaps/public_api-3.md b/tests/testthat/_snaps/public_api-3.md new file mode 100644 index 000000000..1e18d4101 --- /dev/null +++ b/tests/testthat/_snaps/public_api-3.md @@ -0,0 +1,7 @@ +# No sensitive to decimal option + + Code + style_text("1") + Output + 1 + diff --git a/tests/testthat/_snaps/public_api.md b/tests/testthat/_snaps/public_api.md new file mode 100644 index 000000000..b55a037eb --- /dev/null +++ b/tests/testthat/_snaps/public_api.md @@ -0,0 +1,54 @@ +# messages (via cat()) of style_file are correct + + Code + cat(catch_style_file_output(file.path("public-api", "xyzdir-dirty", + "dirty-sample-with-scope-tokens.R")), sep = "\n") + Output + Styling 1 files: + dirty-sample-with-scope-tokens.R i + ---------------------------------------- + Status Count Legend + v 0 File unchanged. + i 1 File changed. + x 0 Styling threw an error. + ---------------------------------------- + Please review the changes carefully! + +--- + + Code + cat(catch_style_file_output(file.path("public-api", "xyzdir-dirty", + "clean-sample-with-scope-tokens.R")), sep = "\n") + Output + Styling 1 files: + clean-sample-with-scope-tokens.R v + ---------------------------------------- + Status Count Legend + v 1 File unchanged. + i 0 File changed. + x 0 Styling threw an error. + ---------------------------------------- + +--- + + Code + cat(catch_style_file_output(file.path("public-api", "xyzdir-dirty", + "dirty-sample-with-scope-spaces.R")), sep = "\n") + Output + Styling 1 files: + dirty-sample-with-scope-spaces.R i + ---------------------------------------- + Status Count Legend + v 0 File unchanged. + i 1 File changed. + x 0 Styling threw an error. + ---------------------------------------- + Please review the changes carefully! + +# No sensitive to decimal option + + Code + style_text("1") + Output + 1 + diff --git a/tests/testthat/_snaps/roundtrip.md b/tests/testthat/_snaps/roundtrip.md new file mode 100644 index 000000000..f0f66b9bc --- /dev/null +++ b/tests/testthat/_snaps/roundtrip.md @@ -0,0 +1,11 @@ +# correct styling does not give an error + + Code + verify_roundtrip("1+1", "1 + 1") + +# corrupt styling does give an error + + The expression evaluated before the styling is not the same as the expression after styling. This should not happen. + i This is an internal error that was detected in the styler package. + Please report it at with a reprex () and the full backtrace. + diff --git a/tests/testthat/_snaps/utils.md b/tests/testthat/_snaps/utils.md new file mode 100644 index 000000000..afd921aca --- /dev/null +++ b/tests/testthat/_snaps/utils.md @@ -0,0 +1,24 @@ +# files with and without blank EOF line are read correctly + + Code + read_utf8(test_path("reference-objects/missing-blank-at-EOF.R")) + Output + $text + [1] "x" + + $missing_EOF_line_break + [1] TRUE + + +--- + + Code + read_utf8(test_path("reference-objects/non-missing-blank-at-EOF.R")) + Output + $text + [1] "x" + + $missing_EOF_line_break + [1] FALSE + + diff --git a/tests/testthat/alignment/cols-with-one-row-in.R b/tests/testthat/alignment/cols-with-one-row-in.R new file mode 100644 index 000000000..a4d47da45 --- /dev/null +++ b/tests/testthat/alignment/cols-with-one-row-in.R @@ -0,0 +1,16 @@ +c( + "x", "z", + "cgjhg", "thi", "z" +) + + +c( + "x", "z", + "cgjhg", "thi", "z" +) + + +c( + "x", "y", "z", "m", "n", "o", "p", + "c", "d" +) diff --git a/tests/testthat/alignment/cols-with-one-row-in_tree b/tests/testthat/alignment/cols-with-one-row-in_tree new file mode 100644 index 000000000..f821af25e --- /dev/null +++ b/tests/testthat/alignment/cols-with-one-row-in_tree @@ -0,0 +1,73 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: c( + [0/0] {1} + ¦ ¦--expr: c [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {2} + ¦ ¦--'(': ( [0/2] {4} + ¦ ¦--expr: "x" [1/0] {6} + ¦ ¦ °--STR_CONST: "x" [0/0] {5} + ¦ ¦--',': , [0/7] {7} + ¦ ¦--expr: "z" [0/0] {9} + ¦ ¦ °--STR_CONST: "z" [0/0] {8} + ¦ ¦--',': , [0/2] {10} + ¦ ¦--expr: "cgjh [1/0] {12} + ¦ ¦ °--STR_CONST: "cgjh [0/0] {11} + ¦ ¦--',': , [0/1] {13} + ¦ ¦--expr: "thi" [0/0] {15} + ¦ ¦ °--STR_CONST: "thi" [0/0] {14} + ¦ ¦--',': , [0/1] {16} + ¦ ¦--expr: "z" [0/0] {18} + ¦ ¦ °--STR_CONST: "z" [0/0] {17} + ¦ °--')': ) [1/0] {19} + ¦--expr: c( + [3/0] {20} + ¦ ¦--expr: c [0/0] {22} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {21} + ¦ ¦--'(': ( [0/2] {23} + ¦ ¦--expr: "x" [1/0] {25} + ¦ ¦ °--STR_CONST: "x" [0/0] {24} + ¦ ¦--',': , [0/7] {26} + ¦ ¦--expr: "z" [0/0] {28} + ¦ ¦ °--STR_CONST: "z" [0/0] {27} + ¦ ¦--',': , [0/2] {29} + ¦ ¦--expr: "cgjh [1/0] {31} + ¦ ¦ °--STR_CONST: "cgjh [0/0] {30} + ¦ ¦--',': , [0/2] {32} + ¦ ¦--expr: "thi" [0/0] {34} + ¦ ¦ °--STR_CONST: "thi" [0/0] {33} + ¦ ¦--',': , [0/1] {35} + ¦ ¦--expr: "z" [0/0] {37} + ¦ ¦ °--STR_CONST: "z" [0/0] {36} + ¦ °--')': ) [1/0] {38} + °--expr: c( + [3/0] {39} + ¦--expr: c [0/0] {41} + ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {40} + ¦--'(': ( [0/2] {42} + ¦--expr: "x" [1/0] {44} + ¦ °--STR_CONST: "x" [0/0] {43} + ¦--',': , [0/1] {45} + ¦--expr: "y" [0/0] {47} + ¦ °--STR_CONST: "y" [0/0] {46} + ¦--',': , [0/1] {48} + ¦--expr: "z" [0/0] {50} + ¦ °--STR_CONST: "z" [0/0] {49} + ¦--',': , [0/2] {51} + ¦--expr: "m" [0/0] {53} + ¦ °--STR_CONST: "m" [0/0] {52} + ¦--',': , [0/1] {54} + ¦--expr: "n" [0/0] {56} + ¦ °--STR_CONST: "n" [0/0] {55} + ¦--',': , [0/2] {57} + ¦--expr: "o" [0/0] {59} + ¦ °--STR_CONST: "o" [0/0] {58} + ¦--',': , [0/1] {60} + ¦--expr: "p" [0/0] {62} + ¦ °--STR_CONST: "p" [0/0] {61} + ¦--',': , [0/2] {63} + ¦--expr: "c" [1/0] {65} + ¦ °--STR_CONST: "c" [0/0] {64} + ¦--',': , [0/1] {66} + ¦--expr: "d" [0/0] {68} + ¦ °--STR_CONST: "d" [0/0] {67} + °--')': ) [1/0] {69} diff --git a/tests/testthat/alignment/cols-with-one-row-out.R b/tests/testthat/alignment/cols-with-one-row-out.R new file mode 100644 index 000000000..30893a93e --- /dev/null +++ b/tests/testthat/alignment/cols-with-one-row-out.R @@ -0,0 +1,16 @@ +c( + "x", "z", + "cgjhg", "thi", "z" +) + + +c( + "x", "z", + "cgjhg", "thi", "z" +) + + +c( + "x", "y", "z", "m", "n", "o", "p", + "c", "d" +) diff --git a/tests/testthat/alignment/fun-decs-in.R b/tests/testthat/alignment/fun-decs-in.R new file mode 100644 index 000000000..68b5f335f --- /dev/null +++ b/tests/testthat/alignment/fun-decs-in.R @@ -0,0 +1,38 @@ +# aligned +function(x = NULL, + tt = NULL, + ayz = NULL) {} + + +# aligned +k <- function(x = NULL, + aq = NULL, + ayz = NULL) {} + + +# aligned, eq right +function(x = 2, + tt = 1, + ayz = 99) {} + +# aligned, eq left +function(x = 2, + tt = 1, + ayz = 99) {} + + +# not aligned +k <- function(x = fish, + aq = 21, + ayz = t(322)) {} + +# aligned +k <- function(x = flus(we), + aq = x - 22, k = 22, + ayz = m(jk5), xfea = 3) {} + + +# aligned +k <- function(x = flus(we), + aq = x - 22, k = 22, + ayz = m(jk5), xfea = 3) {} diff --git a/tests/testthat/alignment/fun-decs-in_tree b/tests/testthat/alignment/fun-decs-in_tree new file mode 100644 index 000000000..4ff6219f9 --- /dev/null +++ b/tests/testthat/alignment/fun-decs-in_tree @@ -0,0 +1,224 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # ali [0/0] {1} + ¦--expr: funct [1/0] {2} + ¦ ¦--FUNCTION: funct [0/0] {3} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--SYMBOL_FORMALS: x [0/3] {5} + ¦ ¦--EQ_FORMALS: = [0/1] {6} + ¦ ¦--expr: NULL [0/0] {8} + ¦ ¦ °--NULL_CONST: NULL [0/0] {7} + ¦ ¦--',': , [0/9] {9} + ¦ ¦--SYMBOL_FORMALS: tt [1/2] {10} + ¦ ¦--EQ_FORMALS: = [0/1] {11} + ¦ ¦--expr: NULL [0/0] {13} + ¦ ¦ °--NULL_CONST: NULL [0/0] {12} + ¦ ¦--',': , [0/9] {14} + ¦ ¦--SYMBOL_FORMALS: ayz [1/1] {15} + ¦ ¦--EQ_FORMALS: = [0/1] {16} + ¦ ¦--expr: NULL [0/0] {18} + ¦ ¦ °--NULL_CONST: NULL [0/0] {17} + ¦ ¦--')': ) [0/1] {19} + ¦ °--expr: {} [0/0] {20} + ¦ ¦--'{': { [0/0] {21} + ¦ °--'}': } [0/0] {22} + ¦--COMMENT: # ali [3/0] {23} + ¦--expr: k <- [1/0] {24} + ¦ ¦--expr: k [0/1] {26} + ¦ ¦ °--SYMBOL: k [0/0] {25} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {27} + ¦ °--expr: funct [0/0] {28} + ¦ ¦--FUNCTION: funct [0/0] {29} + ¦ ¦--'(': ( [0/0] {30} + ¦ ¦--SYMBOL_FORMALS: x [0/3] {31} + ¦ ¦--EQ_FORMALS: = [0/1] {32} + ¦ ¦--expr: NULL [0/0] {34} + ¦ ¦ °--NULL_CONST: NULL [0/0] {33} + ¦ ¦--',': , [0/14] {35} + ¦ ¦--SYMBOL_FORMALS: aq [1/2] {36} + ¦ ¦--EQ_FORMALS: = [0/1] {37} + ¦ ¦--expr: NULL [0/0] {39} + ¦ ¦ °--NULL_CONST: NULL [0/0] {38} + ¦ ¦--',': , [0/14] {40} + ¦ ¦--SYMBOL_FORMALS: ayz [1/1] {41} + ¦ ¦--EQ_FORMALS: = [0/1] {42} + ¦ ¦--expr: NULL [0/0] {44} + ¦ ¦ °--NULL_CONST: NULL [0/0] {43} + ¦ ¦--')': ) [0/1] {45} + ¦ °--expr: {} [0/0] {46} + ¦ ¦--'{': { [0/0] {47} + ¦ °--'}': } [0/0] {48} + ¦--COMMENT: # ali [3/0] {49} + ¦--expr: funct [1/0] {50} + ¦ ¦--FUNCTION: funct [0/0] {51} + ¦ ¦--'(': ( [0/0] {52} + ¦ ¦--SYMBOL_FORMALS: x [0/3] {53} + ¦ ¦--EQ_FORMALS: = [0/2] {54} + ¦ ¦--expr: 2 [0/0] {56} + ¦ ¦ °--NUM_CONST: 2 [0/0] {55} + ¦ ¦--',': , [0/9] {57} + ¦ ¦--SYMBOL_FORMALS: tt [1/2] {58} + ¦ ¦--EQ_FORMALS: = [0/2] {59} + ¦ ¦--expr: 1 [0/0] {61} + ¦ ¦ °--NUM_CONST: 1 [0/0] {60} + ¦ ¦--',': , [0/9] {62} + ¦ ¦--SYMBOL_FORMALS: ayz [1/1] {63} + ¦ ¦--EQ_FORMALS: = [0/1] {64} + ¦ ¦--expr: 99 [0/0] {66} + ¦ ¦ °--NUM_CONST: 99 [0/0] {65} + ¦ ¦--')': ) [0/1] {67} + ¦ °--expr: {} [0/0] {68} + ¦ ¦--'{': { [0/0] {69} + ¦ °--'}': } [0/0] {70} + ¦--COMMENT: # ali [2/0] {71} + ¦--expr: funct [1/0] {72} + ¦ ¦--FUNCTION: funct [0/0] {73} + ¦ ¦--'(': ( [0/0] {74} + ¦ ¦--SYMBOL_FORMALS: x [0/1] {75} + ¦ ¦--EQ_FORMALS: = [0/4] {76} + ¦ ¦--expr: 2 [0/0] {78} + ¦ ¦ °--NUM_CONST: 2 [0/0] {77} + ¦ ¦--',': , [0/9] {79} + ¦ ¦--SYMBOL_FORMALS: tt [1/1] {80} + ¦ ¦--EQ_FORMALS: = [0/3] {81} + ¦ ¦--expr: 1 [0/0] {83} + ¦ ¦ °--NUM_CONST: 1 [0/0] {82} + ¦ ¦--',': , [0/9] {84} + ¦ ¦--SYMBOL_FORMALS: ayz [1/1] {85} + ¦ ¦--EQ_FORMALS: = [0/1] {86} + ¦ ¦--expr: 99 [0/0] {88} + ¦ ¦ °--NUM_CONST: 99 [0/0] {87} + ¦ ¦--')': ) [0/1] {89} + ¦ °--expr: {} [0/0] {90} + ¦ ¦--'{': { [0/0] {91} + ¦ °--'}': } [0/0] {92} + ¦--COMMENT: # not [3/0] {93} + ¦--expr: k <- [1/0] {94} + ¦ ¦--expr: k [0/1] {96} + ¦ ¦ °--SYMBOL: k [0/0] {95} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {97} + ¦ °--expr: funct [0/0] {98} + ¦ ¦--FUNCTION: funct [0/0] {99} + ¦ ¦--'(': ( [0/0] {100} + ¦ ¦--SYMBOL_FORMALS: x [0/1] {101} + ¦ ¦--EQ_FORMALS: = [0/3] {102} + ¦ ¦--expr: fish [0/0] {104} + ¦ ¦ °--SYMBOL: fish [0/0] {103} + ¦ ¦--',': , [0/14] {105} + ¦ ¦--SYMBOL_FORMALS: aq [1/1] {106} + ¦ ¦--EQ_FORMALS: = [0/5] {107} + ¦ ¦--expr: 21 [0/0] {109} + ¦ ¦ °--NUM_CONST: 21 [0/0] {108} + ¦ ¦--',': , [0/14] {110} + ¦ ¦--SYMBOL_FORMALS: ayz [1/1] {111} + ¦ ¦--EQ_FORMALS: = [0/1] {112} + ¦ ¦--expr: t(322 [0/0] {113} + ¦ ¦ ¦--expr: t [0/0] {115} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: t [0/0] {114} + ¦ ¦ ¦--'(': ( [0/0] {116} + ¦ ¦ ¦--expr: 322 [0/0] {118} + ¦ ¦ ¦ °--NUM_CONST: 322 [0/0] {117} + ¦ ¦ °--')': ) [0/0] {119} + ¦ ¦--')': ) [0/1] {120} + ¦ °--expr: {} [0/0] {121} + ¦ ¦--'{': { [0/0] {122} + ¦ °--'}': } [0/0] {123} + ¦--COMMENT: # ali [2/0] {124} + ¦--expr: k <- [1/0] {125} + ¦ ¦--expr: k [0/1] {127} + ¦ ¦ °--SYMBOL: k [0/0] {126} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {128} + ¦ °--expr: funct [0/0] {129} + ¦ ¦--FUNCTION: funct [0/0] {130} + ¦ ¦--'(': ( [0/0] {131} + ¦ ¦--SYMBOL_FORMALS: x [0/1] {132} + ¦ ¦--EQ_FORMALS: = [0/1] {133} + ¦ ¦--expr: flus( [0/0] {134} + ¦ ¦ ¦--expr: flus [0/0] {136} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: flus [0/0] {135} + ¦ ¦ ¦--'(': ( [0/0] {137} + ¦ ¦ ¦--expr: we [0/0] {139} + ¦ ¦ ¦ °--SYMBOL: we [0/0] {138} + ¦ ¦ °--')': ) [0/0] {140} + ¦ ¦--',': , [0/14] {141} + ¦ ¦--SYMBOL_FORMALS: aq [1/1] {142} + ¦ ¦--EQ_FORMALS: = [0/2] {143} + ¦ ¦--expr: x - 2 [0/0] {144} + ¦ ¦ ¦--expr: x [0/1] {146} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {145} + ¦ ¦ ¦--'-': - [0/1] {147} + ¦ ¦ °--expr: 22 [0/0] {149} + ¦ ¦ °--NUM_CONST: 22 [0/0] {148} + ¦ ¦--',': , [0/1] {150} + ¦ ¦--SYMBOL_FORMALS: k [0/1] {151} + ¦ ¦--EQ_FORMALS: = [0/1] {152} + ¦ ¦--expr: 22 [0/0] {154} + ¦ ¦ °--NUM_CONST: 22 [0/0] {153} + ¦ ¦--',': , [0/14] {155} + ¦ ¦--SYMBOL_FORMALS: ayz [1/1] {156} + ¦ ¦--EQ_FORMALS: = [0/1] {157} + ¦ ¦--expr: m(jk5 [0/0] {158} + ¦ ¦ ¦--expr: m [0/0] {160} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: m [0/0] {159} + ¦ ¦ ¦--'(': ( [0/0] {161} + ¦ ¦ ¦--expr: jk5 [0/0] {163} + ¦ ¦ ¦ °--SYMBOL: jk5 [0/0] {162} + ¦ ¦ °--')': ) [0/0] {164} + ¦ ¦--',': , [0/1] {165} + ¦ ¦--SYMBOL_FORMALS: xfea [0/1] {166} + ¦ ¦--EQ_FORMALS: = [0/2] {167} + ¦ ¦--expr: 3 [0/0] {169} + ¦ ¦ °--NUM_CONST: 3 [0/0] {168} + ¦ ¦--')': ) [0/1] {170} + ¦ °--expr: {} [0/0] {171} + ¦ ¦--'{': { [0/0] {172} + ¦ °--'}': } [0/0] {173} + ¦--COMMENT: # ali [3/0] {174} + °--expr: k <- [1/0] {175} + ¦--expr: k [0/1] {177} + ¦ °--SYMBOL: k [0/0] {176} + ¦--LEFT_ASSIGN: <- [0/1] {178} + °--expr: funct [0/0] {179} + ¦--FUNCTION: funct [0/0] {180} + ¦--'(': ( [0/0] {181} + ¦--SYMBOL_FORMALS: x [0/1] {182} + ¦--EQ_FORMALS: = [0/1] {183} + ¦--expr: flus( [0/0] {184} + ¦ ¦--expr: flus [0/0] {186} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: flus [0/0] {185} + ¦ ¦--'(': ( [0/0] {187} + ¦ ¦--expr: we [0/0] {189} + ¦ ¦ °--SYMBOL: we [0/0] {188} + ¦ °--')': ) [0/0] {190} + ¦--',': , [0/14] {191} + ¦--SYMBOL_FORMALS: aq [1/1] {192} + ¦--EQ_FORMALS: = [0/2] {193} + ¦--expr: x - 2 [0/0] {194} + ¦ ¦--expr: x [0/1] {196} + ¦ ¦ °--SYMBOL: x [0/0] {195} + ¦ ¦--'-': - [0/1] {197} + ¦ °--expr: 22 [0/0] {199} + ¦ °--NUM_CONST: 22 [0/0] {198} + ¦--',': , [0/4] {200} + ¦--SYMBOL_FORMALS: k [0/1] {201} + ¦--EQ_FORMALS: = [0/1] {202} + ¦--expr: 22 [0/0] {204} + ¦ °--NUM_CONST: 22 [0/0] {203} + ¦--',': , [0/14] {205} + ¦--SYMBOL_FORMALS: ayz [1/1] {206} + ¦--EQ_FORMALS: = [0/1] {207} + ¦--expr: m(jk5 [0/0] {208} + ¦ ¦--expr: m [0/0] {210} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: m [0/0] {209} + ¦ ¦--'(': ( [0/0] {211} + ¦ ¦--expr: jk5 [0/0] {213} + ¦ ¦ °--SYMBOL: jk5 [0/0] {212} + ¦ °--')': ) [0/0] {214} + ¦--',': , [0/1] {215} + ¦--SYMBOL_FORMALS: xfea [0/1] {216} + ¦--EQ_FORMALS: = [0/2] {217} + ¦--expr: 3 [0/0] {219} + ¦ °--NUM_CONST: 3 [0/0] {218} + ¦--')': ) [0/1] {220} + °--expr: {} [0/0] {221} + ¦--'{': { [0/0] {222} + °--'}': } [0/0] {223} diff --git a/tests/testthat/alignment/fun-decs-out.R b/tests/testthat/alignment/fun-decs-out.R new file mode 100644 index 000000000..fcefee559 --- /dev/null +++ b/tests/testthat/alignment/fun-decs-out.R @@ -0,0 +1,38 @@ +# aligned +function(x = NULL, + tt = NULL, + ayz = NULL) {} + + +# aligned +k <- function(x = NULL, + aq = NULL, + ayz = NULL) {} + + +# aligned, eq right +function(x = 2, + tt = 1, + ayz = 99) {} + +# aligned, eq left +function(x = 2, + tt = 1, + ayz = 99) {} + + +# not aligned +k <- function(x = fish, + aq = 21, + ayz = t(322)) {} + +# aligned +k <- function(x = flus(we), + aq = x - 22, k = 22, + ayz = m(jk5), xfea = 3) {} + + +# aligned +k <- function(x = flus(we), + aq = x - 22, k = 22, + ayz = m(jk5), xfea = 3) {} diff --git a/tests/testthat/alignment/named-in.R b/tests/testthat/alignment/named-in.R new file mode 100644 index 000000000..3a4eacf4e --- /dev/null +++ b/tests/testthat/alignment/named-in.R @@ -0,0 +1,223 @@ +# algorithm: aligned. human: aligned. +call( + x = 1, kdd = 2, + xy = 2, n = 33, +) + +# without trailing comma +call( + x = 1, kdd = 2, + xy = 2, n = 33 +) + +# algorithm: aligned. human: aligned. +call( + x = 1, kdd = 2, + xy = 2, n = 33, +) + +# algorithm: aligned. human: aligned. +call( + x = 1, kdd = 2, + xy = 2, n = 33, +) + +# algorithm: not aligned (spacing around =). human: aligned (fix: spacing around =). +call( + x =1, kdd = 2, + xy =2, n = 33, +) + +# algorithm: not aligned. human: not aligned. +call( + x = 1, kdd = 2, + xy = 2, n = 33, +) + +# algorithm: not aligned. human: not aligned. +call( + x = 1, kdd = 2, + xy = 22, n = 33, +) + +# algorithm: not aligned. human: not aligned. +call( + x = 1, d = 2, + xy = 22, n = 33, +) + + +# algorithm: aligned. human: aligned. +call( + x = 1, kdd = 2, k = "abc", + xy = 2, n = 33, z = "333" +) + + +# algorithm: aligned. human: aligned. +call( + x = 1, + xy = 2, n = 33, z = "333" +) + +# algorithm: aligned. human: aligned. +call( + x = 1, n = 33, z = "333", + + xy = 2, +) + +# aligned. when spaces are spread accross different nests +call( + k = ff("pk"), k = 3, + b = f(-g), 22 + 1, + 44, 323 +) + +# aligned. when spaces are spread accross different nests +call( + k = ff("pk"), k = 3, + b = f(-g), 22 + 1, + 44, 323, +) + +# no trailing +call( + k = ff("pk"), k = 3, + b = f(-g), 22 + 1, + 44 +) + +# aligned: fewest arguments not on last line +call( + 44, + k = ff("pk"), k = 3, + b = f(-g), 22 + 1, +) + +# aligned: fewest arguments not on last line +call( + k = ff("pk"), k = 3, + 44, + b = f(-g), 22 + 1, +) + + + +# if all col1 arguments are named, col1 must also be aligned +# not aligned +fell( + x = 1, + y = 23, + zz = NULL +) + +# aligned +fell( + x = 1, + y = 23, + zz = NULL +) + +# aligned but comma in the wrong line +call( + a = 2, + bb = 3 +,) + + +# aligned (comments) +call( + a = 2, x = 111, + # another + bb = 3, # hi +) + +# aligned (comments) +call( + a = 2, x = 111, + bb = 3, # hi +) + +# aligned (comments) +call( + # another one + a = 2, x = 111, + bb = 3, # hi +) + +# aligned (comments) +call( + # another one + a = 2, x = 111, + bb = 3 # hi +) + +# not aligned (comments) +call( + a = 2, x = 111, + bb = 3, # hi +) + +# not aligned (comments) +call( + # another one + a = 2, x = 111, + bb = 3, + # hi +) + +# If a call is mult-line, it can't be aligned (also, it would not currently +# not be ideopotent because first bace would be moved up without alignment and +# in the second step, because all arguments are named and there is no alignment, +# the extra spaces before `=` as of 29a010064257fa1a9caf32d182e7ee62008de98a. +call( + x = 95232, + y = f( + ), +) + + +# aligned (left after `=`) +ca( + x = 23200, + y2 = "hi", + m = c(rm.na = 7) +) + +# not aligned (left after `=`) +ca( + x = 23200, + y2 = "hi", + m = c(rm.na = 7) +) + +# aligned =, first all named +fell( + x = 8, annoying = 3, + y = 23, # nothing in column 2 for row 2 + zz = NULL, finally = "stuff" +) + +# aligned =, first not all named +gell( + p = 2, g = gg(x), n = 3 * 3, # + 31, fds = -1, gz = f / 3 + 1, +) + +xgle( + 1212, 232, f(n = 2), + 1, 2, "kFlya" +) + +# left aligned after , +call( + x = 2, y = "another", + y = "hhjkjkbew", x = 3 +) + +call( + k = ff("pk"), k = 3, + b = f(-g), 22 + 1, + 44, 323 +) diff --git a/tests/testthat/alignment/named-in_tree b/tests/testthat/alignment/named-in_tree new file mode 100644 index 000000000..2a569ab09 --- /dev/null +++ b/tests/testthat/alignment/named-in_tree @@ -0,0 +1,966 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # alg [0/0] {1} + ¦--expr: call( [1/0] {2} + ¦ ¦--expr: call [0/0] {4} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {3} + ¦ ¦--'(': ( [0/2] {5} + ¦ ¦--SYMBOL_SUB: x [1/3] {6} + ¦ ¦--EQ_SUB: = [0/1] {7} + ¦ ¦--expr: 1 [0/0] {9} + ¦ ¦ °--NUM_CONST: 1 [0/0] {8} + ¦ ¦--',': , [0/1] {10} + ¦ ¦--SYMBOL_SUB: kdd [0/2] {11} + ¦ ¦--EQ_SUB: = [0/2] {12} + ¦ ¦--expr: 2 [0/0] {14} + ¦ ¦ °--NUM_CONST: 2 [0/0] {13} + ¦ ¦--',': , [0/2] {15} + ¦ ¦--SYMBOL_SUB: xy [1/2] {16} + ¦ ¦--EQ_SUB: = [0/1] {17} + ¦ ¦--expr: 2 [0/0] {19} + ¦ ¦ °--NUM_CONST: 2 [0/0] {18} + ¦ ¦--',': , [0/1] {20} + ¦ ¦--SYMBOL_SUB: n [0/4] {21} + ¦ ¦--EQ_SUB: = [0/1] {22} + ¦ ¦--expr: 33 [0/0] {24} + ¦ ¦ °--NUM_CONST: 33 [0/0] {23} + ¦ ¦--',': , [0/0] {25} + ¦ °--')': ) [1/0] {26} + ¦--COMMENT: # wit [2/0] {27} + ¦--expr: call( [1/0] {28} + ¦ ¦--expr: call [0/0] {30} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {29} + ¦ ¦--'(': ( [0/2] {31} + ¦ ¦--SYMBOL_SUB: x [1/3] {32} + ¦ ¦--EQ_SUB: = [0/1] {33} + ¦ ¦--expr: 1 [0/0] {35} + ¦ ¦ °--NUM_CONST: 1 [0/0] {34} + ¦ ¦--',': , [0/1] {36} + ¦ ¦--SYMBOL_SUB: kdd [0/2] {37} + ¦ ¦--EQ_SUB: = [0/2] {38} + ¦ ¦--expr: 2 [0/0] {40} + ¦ ¦ °--NUM_CONST: 2 [0/0] {39} + ¦ ¦--',': , [0/2] {41} + ¦ ¦--SYMBOL_SUB: xy [1/2] {42} + ¦ ¦--EQ_SUB: = [0/1] {43} + ¦ ¦--expr: 2 [0/0] {45} + ¦ ¦ °--NUM_CONST: 2 [0/0] {44} + ¦ ¦--',': , [0/1] {46} + ¦ ¦--SYMBOL_SUB: n [0/4] {47} + ¦ ¦--EQ_SUB: = [0/1] {48} + ¦ ¦--expr: 33 [0/0] {50} + ¦ ¦ °--NUM_CONST: 33 [0/0] {49} + ¦ °--')': ) [1/0] {51} + ¦--COMMENT: # alg [2/0] {52} + ¦--expr: call( [1/0] {53} + ¦ ¦--expr: call [0/0] {55} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {54} + ¦ ¦--'(': ( [0/2] {56} + ¦ ¦--SYMBOL_SUB: x [1/2] {57} + ¦ ¦--EQ_SUB: = [0/1] {58} + ¦ ¦--expr: 1 [0/0] {60} + ¦ ¦ °--NUM_CONST: 1 [0/0] {59} + ¦ ¦--',': , [0/1] {61} + ¦ ¦--SYMBOL_SUB: kdd [0/2] {62} + ¦ ¦--EQ_SUB: = [0/2] {63} + ¦ ¦--expr: 2 [0/0] {65} + ¦ ¦ °--NUM_CONST: 2 [0/0] {64} + ¦ ¦--',': , [0/2] {66} + ¦ ¦--SYMBOL_SUB: xy [1/1] {67} + ¦ ¦--EQ_SUB: = [0/1] {68} + ¦ ¦--expr: 2 [0/0] {70} + ¦ ¦ °--NUM_CONST: 2 [0/0] {69} + ¦ ¦--',': , [0/1] {71} + ¦ ¦--SYMBOL_SUB: n [0/4] {72} + ¦ ¦--EQ_SUB: = [0/1] {73} + ¦ ¦--expr: 33 [0/0] {75} + ¦ ¦ °--NUM_CONST: 33 [0/0] {74} + ¦ ¦--',': , [0/0] {76} + ¦ °--')': ) [1/0] {77} + ¦--COMMENT: # alg [2/0] {78} + ¦--expr: call( [1/0] {79} + ¦ ¦--expr: call [0/0] {81} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {80} + ¦ ¦--'(': ( [0/2] {82} + ¦ ¦--SYMBOL_SUB: x [1/2] {83} + ¦ ¦--EQ_SUB: = [0/1] {84} + ¦ ¦--expr: 1 [0/0] {86} + ¦ ¦ °--NUM_CONST: 1 [0/0] {85} + ¦ ¦--',': , [0/1] {87} + ¦ ¦--SYMBOL_SUB: kdd [0/2] {88} + ¦ ¦--EQ_SUB: = [0/2] {89} + ¦ ¦--expr: 2 [0/0] {91} + ¦ ¦ °--NUM_CONST: 2 [0/0] {90} + ¦ ¦--',': , [0/2] {92} + ¦ ¦--SYMBOL_SUB: xy [1/1] {93} + ¦ ¦--EQ_SUB: = [0/1] {94} + ¦ ¦--expr: 2 [0/0] {96} + ¦ ¦ °--NUM_CONST: 2 [0/0] {95} + ¦ ¦--',': , [0/1] {97} + ¦ ¦--SYMBOL_SUB: n [0/4] {98} + ¦ ¦--EQ_SUB: = [0/1] {99} + ¦ ¦--expr: 33 [0/0] {101} + ¦ ¦ °--NUM_CONST: 33 [0/0] {100} + ¦ ¦--',': , [0/0] {102} + ¦ °--')': ) [1/0] {103} + ¦--COMMENT: # alg [2/0] {104} + ¦--expr: call( [1/0] {105} + ¦ ¦--expr: call [0/0] {107} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {106} + ¦ ¦--'(': ( [0/2] {108} + ¦ ¦--SYMBOL_SUB: x [1/2] {109} + ¦ ¦--EQ_SUB: = [0/0] {110} + ¦ ¦--expr: 1 [0/0] {112} + ¦ ¦ °--NUM_CONST: 1 [0/0] {111} + ¦ ¦--',': , [0/3] {113} + ¦ ¦--SYMBOL_SUB: kdd [0/2] {114} + ¦ ¦--EQ_SUB: = [0/2] {115} + ¦ ¦--expr: 2 [0/0] {117} + ¦ ¦ °--NUM_CONST: 2 [0/0] {116} + ¦ ¦--',': , [0/2] {118} + ¦ ¦--SYMBOL_SUB: xy [1/1] {119} + ¦ ¦--EQ_SUB: = [0/0] {120} + ¦ ¦--expr: 2 [0/0] {122} + ¦ ¦ °--NUM_CONST: 2 [0/0] {121} + ¦ ¦--',': , [0/3] {123} + ¦ ¦--SYMBOL_SUB: n [0/4] {124} + ¦ ¦--EQ_SUB: = [0/1] {125} + ¦ ¦--expr: 33 [0/0] {127} + ¦ ¦ °--NUM_CONST: 33 [0/0] {126} + ¦ ¦--',': , [0/0] {128} + ¦ °--')': ) [1/0] {129} + ¦--COMMENT: # alg [2/0] {130} + ¦--expr: call( [1/0] {131} + ¦ ¦--expr: call [0/0] {133} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {132} + ¦ ¦--'(': ( [0/2] {134} + ¦ ¦--SYMBOL_SUB: x [1/2] {135} + ¦ ¦--EQ_SUB: = [0/1] {136} + ¦ ¦--expr: 1 [0/0] {138} + ¦ ¦ °--NUM_CONST: 1 [0/0] {137} + ¦ ¦--',': , [0/3] {139} + ¦ ¦--SYMBOL_SUB: kdd [0/2] {140} + ¦ ¦--EQ_SUB: = [0/2] {141} + ¦ ¦--expr: 2 [0/0] {143} + ¦ ¦ °--NUM_CONST: 2 [0/0] {142} + ¦ ¦--',': , [0/2] {144} + ¦ ¦--SYMBOL_SUB: xy [1/1] {145} + ¦ ¦--EQ_SUB: = [0/1] {146} + ¦ ¦--expr: 2 [0/0] {148} + ¦ ¦ °--NUM_CONST: 2 [0/0] {147} + ¦ ¦--',': , [0/1] {149} + ¦ ¦--SYMBOL_SUB: n [0/1] {150} + ¦ ¦--EQ_SUB: = [0/1] {151} + ¦ ¦--expr: 33 [0/0] {153} + ¦ ¦ °--NUM_CONST: 33 [0/0] {152} + ¦ ¦--',': , [0/0] {154} + ¦ °--')': ) [1/0] {155} + ¦--COMMENT: # alg [2/0] {156} + ¦--expr: call( [1/0] {157} + ¦ ¦--expr: call [0/0] {159} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {158} + ¦ ¦--'(': ( [0/2] {160} + ¦ ¦--SYMBOL_SUB: x [1/2] {161} + ¦ ¦--EQ_SUB: = [0/2] {162} + ¦ ¦--expr: 1 [0/0] {164} + ¦ ¦ °--NUM_CONST: 1 [0/0] {163} + ¦ ¦--',': , [0/3] {165} + ¦ ¦--SYMBOL_SUB: kdd [0/2] {166} + ¦ ¦--EQ_SUB: = [0/2] {167} + ¦ ¦--expr: 2 [0/0] {169} + ¦ ¦ °--NUM_CONST: 2 [0/0] {168} + ¦ ¦--',': , [0/2] {170} + ¦ ¦--SYMBOL_SUB: xy [1/1] {171} + ¦ ¦--EQ_SUB: = [0/1] {172} + ¦ ¦--expr: 22 [0/0] {174} + ¦ ¦ °--NUM_CONST: 22 [0/0] {173} + ¦ ¦--',': , [0/1] {175} + ¦ ¦--SYMBOL_SUB: n [0/1] {176} + ¦ ¦--EQ_SUB: = [0/1] {177} + ¦ ¦--expr: 33 [0/0] {179} + ¦ ¦ °--NUM_CONST: 33 [0/0] {178} + ¦ ¦--',': , [0/0] {180} + ¦ °--')': ) [1/0] {181} + ¦--COMMENT: # alg [2/0] {182} + ¦--expr: call( [1/0] {183} + ¦ ¦--expr: call [0/0] {185} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {184} + ¦ ¦--'(': ( [0/2] {186} + ¦ ¦--SYMBOL_SUB: x [1/2] {187} + ¦ ¦--EQ_SUB: = [0/1] {188} + ¦ ¦--expr: 1 [0/0] {190} + ¦ ¦ °--NUM_CONST: 1 [0/0] {189} + ¦ ¦--',': , [0/1] {191} + ¦ ¦--SYMBOL_SUB: d [0/1] {192} + ¦ ¦--EQ_SUB: = [0/1] {193} + ¦ ¦--expr: 2 [0/0] {195} + ¦ ¦ °--NUM_CONST: 2 [0/0] {194} + ¦ ¦--',': , [0/2] {196} + ¦ ¦--SYMBOL_SUB: xy [1/1] {197} + ¦ ¦--EQ_SUB: = [0/1] {198} + ¦ ¦--expr: 22 [0/0] {200} + ¦ ¦ °--NUM_CONST: 22 [0/0] {199} + ¦ ¦--',': , [0/1] {201} + ¦ ¦--SYMBOL_SUB: n [0/1] {202} + ¦ ¦--EQ_SUB: = [0/1] {203} + ¦ ¦--expr: 33 [0/0] {205} + ¦ ¦ °--NUM_CONST: 33 [0/0] {204} + ¦ ¦--',': , [0/0] {206} + ¦ °--')': ) [1/0] {207} + ¦--COMMENT: # alg [3/0] {208} + ¦--expr: call( [1/0] {209} + ¦ ¦--expr: call [0/0] {211} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {210} + ¦ ¦--'(': ( [0/2] {212} + ¦ ¦--SYMBOL_SUB: x [1/2] {213} + ¦ ¦--EQ_SUB: = [0/1] {214} + ¦ ¦--expr: 1 [0/0] {216} + ¦ ¦ °--NUM_CONST: 1 [0/0] {215} + ¦ ¦--',': , [0/3] {217} + ¦ ¦--SYMBOL_SUB: kdd [0/2] {218} + ¦ ¦--EQ_SUB: = [0/2] {219} + ¦ ¦--expr: 2 [0/0] {221} + ¦ ¦ °--NUM_CONST: 2 [0/0] {220} + ¦ ¦--',': , [0/1] {222} + ¦ ¦--SYMBOL_SUB: k [0/1] {223} + ¦ ¦--EQ_SUB: = [0/1] {224} + ¦ ¦--expr: "abc" [0/0] {226} + ¦ ¦ °--STR_CONST: "abc" [0/0] {225} + ¦ ¦--',': , [0/2] {227} + ¦ ¦--SYMBOL_SUB: xy [1/1] {228} + ¦ ¦--EQ_SUB: = [0/1] {229} + ¦ ¦--expr: 2 [0/0] {231} + ¦ ¦ °--NUM_CONST: 2 [0/0] {230} + ¦ ¦--',': , [0/3] {232} + ¦ ¦--SYMBOL_SUB: n [0/4] {233} + ¦ ¦--EQ_SUB: = [0/1] {234} + ¦ ¦--expr: 33 [0/0] {236} + ¦ ¦ °--NUM_CONST: 33 [0/0] {235} + ¦ ¦--',': , [0/1] {237} + ¦ ¦--SYMBOL_SUB: z [0/1] {238} + ¦ ¦--EQ_SUB: = [0/1] {239} + ¦ ¦--expr: "333" [0/0] {241} + ¦ ¦ °--STR_CONST: "333" [0/0] {240} + ¦ °--')': ) [1/0] {242} + ¦--COMMENT: # alg [3/0] {243} + ¦--expr: call( [1/0] {244} + ¦ ¦--expr: call [0/0] {246} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {245} + ¦ ¦--'(': ( [0/2] {247} + ¦ ¦--SYMBOL_SUB: x [1/2] {248} + ¦ ¦--EQ_SUB: = [0/1] {249} + ¦ ¦--expr: 1 [0/0] {251} + ¦ ¦ °--NUM_CONST: 1 [0/0] {250} + ¦ ¦--',': , [0/2] {252} + ¦ ¦--SYMBOL_SUB: xy [1/1] {253} + ¦ ¦--EQ_SUB: = [0/1] {254} + ¦ ¦--expr: 2 [0/0] {256} + ¦ ¦ °--NUM_CONST: 2 [0/0] {255} + ¦ ¦--',': , [0/1] {257} + ¦ ¦--SYMBOL_SUB: n [0/1] {258} + ¦ ¦--EQ_SUB: = [0/1] {259} + ¦ ¦--expr: 33 [0/0] {261} + ¦ ¦ °--NUM_CONST: 33 [0/0] {260} + ¦ ¦--',': , [0/1] {262} + ¦ ¦--SYMBOL_SUB: z [0/1] {263} + ¦ ¦--EQ_SUB: = [0/1] {264} + ¦ ¦--expr: "333" [0/0] {266} + ¦ ¦ °--STR_CONST: "333" [0/0] {265} + ¦ °--')': ) [1/0] {267} + ¦--COMMENT: # alg [2/0] {268} + ¦--expr: call( [1/0] {269} + ¦ ¦--expr: call [0/0] {271} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {270} + ¦ ¦--'(': ( [0/2] {272} + ¦ ¦--SYMBOL_SUB: x [1/2] {273} + ¦ ¦--EQ_SUB: = [0/1] {274} + ¦ ¦--expr: 1 [0/0] {276} + ¦ ¦ °--NUM_CONST: 1 [0/0] {275} + ¦ ¦--',': , [0/1] {277} + ¦ ¦--SYMBOL_SUB: n [0/1] {278} + ¦ ¦--EQ_SUB: = [0/1] {279} + ¦ ¦--expr: 33 [0/0] {281} + ¦ ¦ °--NUM_CONST: 33 [0/0] {280} + ¦ ¦--',': , [0/1] {282} + ¦ ¦--SYMBOL_SUB: z [0/1] {283} + ¦ ¦--EQ_SUB: = [0/1] {284} + ¦ ¦--expr: "333" [0/0] {286} + ¦ ¦ °--STR_CONST: "333" [0/0] {285} + ¦ ¦--',': , [0/2] {287} + ¦ ¦--SYMBOL_SUB: xy [2/1] {288} + ¦ ¦--EQ_SUB: = [0/1] {289} + ¦ ¦--expr: 2 [0/0] {291} + ¦ ¦ °--NUM_CONST: 2 [0/0] {290} + ¦ ¦--',': , [0/0] {292} + ¦ °--')': ) [1/0] {293} + ¦--COMMENT: # ali [2/0] {294} + ¦--expr: call( [1/0] {295} + ¦ ¦--expr: call [0/0] {297} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {296} + ¦ ¦--'(': ( [0/2] {298} + ¦ ¦--SYMBOL_SUB: k [1/1] {299} + ¦ ¦--EQ_SUB: = [0/2] {300} + ¦ ¦--expr: ff("p [0/0] {301} + ¦ ¦ ¦--expr: ff [0/0] {303} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: ff [0/0] {302} + ¦ ¦ ¦--'(': ( [0/0] {304} + ¦ ¦ ¦--expr: "pk" [0/0] {306} + ¦ ¦ ¦ °--STR_CONST: "pk" [0/0] {305} + ¦ ¦ °--')': ) [0/0] {307} + ¦ ¦--',': , [0/1] {308} + ¦ ¦--SYMBOL_SUB: k [0/2] {309} + ¦ ¦--EQ_SUB: = [0/1] {310} + ¦ ¦--expr: 3 [0/0] {312} + ¦ ¦ °--NUM_CONST: 3 [0/0] {311} + ¦ ¦--',': , [0/2] {313} + ¦ ¦--SYMBOL_SUB: b [1/1] {314} + ¦ ¦--EQ_SUB: = [0/1] {315} + ¦ ¦--expr: f(-g) [0/0] {316} + ¦ ¦ ¦--expr: f [0/0] {318} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {317} + ¦ ¦ ¦--'(': ( [0/0] {319} + ¦ ¦ ¦--expr: -g [0/0] {320} + ¦ ¦ ¦ ¦--'-': - [0/0] {321} + ¦ ¦ ¦ °--expr: g [0/0] {323} + ¦ ¦ ¦ °--SYMBOL: g [0/0] {322} + ¦ ¦ °--')': ) [0/0] {324} + ¦ ¦--',': , [0/5] {325} + ¦ ¦--expr: 22 + [0/0] {326} + ¦ ¦ ¦--expr: 22 [0/1] {328} + ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {327} + ¦ ¦ ¦--'+': + [0/1] {329} + ¦ ¦ °--expr: 1 [0/0] {331} + ¦ ¦ °--NUM_CONST: 1 [0/0] {330} + ¦ ¦--',': , [0/2] {332} + ¦ ¦--expr: 44 [1/0] {334} + ¦ ¦ °--NUM_CONST: 44 [0/0] {333} + ¦ ¦--',': , [0/15] {335} + ¦ ¦--expr: 323 [0/0] {337} + ¦ ¦ °--NUM_CONST: 323 [0/0] {336} + ¦ °--')': ) [1/0] {338} + ¦--COMMENT: # ali [2/0] {339} + ¦--expr: call( [1/0] {340} + ¦ ¦--expr: call [0/0] {342} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {341} + ¦ ¦--'(': ( [0/2] {343} + ¦ ¦--SYMBOL_SUB: k [1/1] {344} + ¦ ¦--EQ_SUB: = [0/2] {345} + ¦ ¦--expr: ff("p [0/0] {346} + ¦ ¦ ¦--expr: ff [0/0] {348} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: ff [0/0] {347} + ¦ ¦ ¦--'(': ( [0/0] {349} + ¦ ¦ ¦--expr: "pk" [0/0] {351} + ¦ ¦ ¦ °--STR_CONST: "pk" [0/0] {350} + ¦ ¦ °--')': ) [0/0] {352} + ¦ ¦--',': , [0/1] {353} + ¦ ¦--SYMBOL_SUB: k [0/2] {354} + ¦ ¦--EQ_SUB: = [0/1] {355} + ¦ ¦--expr: 3 [0/0] {357} + ¦ ¦ °--NUM_CONST: 3 [0/0] {356} + ¦ ¦--',': , [0/2] {358} + ¦ ¦--SYMBOL_SUB: b [1/1] {359} + ¦ ¦--EQ_SUB: = [0/1] {360} + ¦ ¦--expr: f(-g) [0/0] {361} + ¦ ¦ ¦--expr: f [0/0] {363} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {362} + ¦ ¦ ¦--'(': ( [0/0] {364} + ¦ ¦ ¦--expr: -g [0/0] {365} + ¦ ¦ ¦ ¦--'-': - [0/0] {366} + ¦ ¦ ¦ °--expr: g [0/0] {368} + ¦ ¦ ¦ °--SYMBOL: g [0/0] {367} + ¦ ¦ °--')': ) [0/0] {369} + ¦ ¦--',': , [0/5] {370} + ¦ ¦--expr: 22 + [0/0] {371} + ¦ ¦ ¦--expr: 22 [0/1] {373} + ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {372} + ¦ ¦ ¦--'+': + [0/1] {374} + ¦ ¦ °--expr: 1 [0/0] {376} + ¦ ¦ °--NUM_CONST: 1 [0/0] {375} + ¦ ¦--',': , [0/2] {377} + ¦ ¦--expr: 44 [1/0] {379} + ¦ ¦ °--NUM_CONST: 44 [0/0] {378} + ¦ ¦--',': , [0/15] {380} + ¦ ¦--expr: 323 [0/0] {382} + ¦ ¦ °--NUM_CONST: 323 [0/0] {381} + ¦ ¦--',': , [0/0] {383} + ¦ °--')': ) [1/0] {384} + ¦--COMMENT: # no [2/0] {385} + ¦--expr: call( [1/0] {386} + ¦ ¦--expr: call [0/0] {388} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {387} + ¦ ¦--'(': ( [0/2] {389} + ¦ ¦--SYMBOL_SUB: k [1/1] {390} + ¦ ¦--EQ_SUB: = [0/2] {391} + ¦ ¦--expr: ff("p [0/0] {392} + ¦ ¦ ¦--expr: ff [0/0] {394} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: ff [0/0] {393} + ¦ ¦ ¦--'(': ( [0/0] {395} + ¦ ¦ ¦--expr: "pk" [0/0] {397} + ¦ ¦ ¦ °--STR_CONST: "pk" [0/0] {396} + ¦ ¦ °--')': ) [0/0] {398} + ¦ ¦--',': , [0/1] {399} + ¦ ¦--SYMBOL_SUB: k [0/2] {400} + ¦ ¦--EQ_SUB: = [0/1] {401} + ¦ ¦--expr: 3 [0/0] {403} + ¦ ¦ °--NUM_CONST: 3 [0/0] {402} + ¦ ¦--',': , [0/2] {404} + ¦ ¦--SYMBOL_SUB: b [1/1] {405} + ¦ ¦--EQ_SUB: = [0/1] {406} + ¦ ¦--expr: f(-g) [0/0] {407} + ¦ ¦ ¦--expr: f [0/0] {409} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {408} + ¦ ¦ ¦--'(': ( [0/0] {410} + ¦ ¦ ¦--expr: -g [0/0] {411} + ¦ ¦ ¦ ¦--'-': - [0/0] {412} + ¦ ¦ ¦ °--expr: g [0/0] {414} + ¦ ¦ ¦ °--SYMBOL: g [0/0] {413} + ¦ ¦ °--')': ) [0/0] {415} + ¦ ¦--',': , [0/5] {416} + ¦ ¦--expr: 22 + [0/0] {417} + ¦ ¦ ¦--expr: 22 [0/1] {419} + ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {418} + ¦ ¦ ¦--'+': + [0/1] {420} + ¦ ¦ °--expr: 1 [0/0] {422} + ¦ ¦ °--NUM_CONST: 1 [0/0] {421} + ¦ ¦--',': , [0/2] {423} + ¦ ¦--expr: 44 [1/0] {425} + ¦ ¦ °--NUM_CONST: 44 [0/0] {424} + ¦ °--')': ) [1/0] {426} + ¦--COMMENT: # ali [2/0] {427} + ¦--expr: call( [1/0] {428} + ¦ ¦--expr: call [0/0] {430} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {429} + ¦ ¦--'(': ( [0/2] {431} + ¦ ¦--expr: 44 [1/0] {433} + ¦ ¦ °--NUM_CONST: 44 [0/0] {432} + ¦ ¦--',': , [0/2] {434} + ¦ ¦--SYMBOL_SUB: k [1/1] {435} + ¦ ¦--EQ_SUB: = [0/2] {436} + ¦ ¦--expr: ff("p [0/0] {437} + ¦ ¦ ¦--expr: ff [0/0] {439} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: ff [0/0] {438} + ¦ ¦ ¦--'(': ( [0/0] {440} + ¦ ¦ ¦--expr: "pk" [0/0] {442} + ¦ ¦ ¦ °--STR_CONST: "pk" [0/0] {441} + ¦ ¦ °--')': ) [0/0] {443} + ¦ ¦--',': , [0/1] {444} + ¦ ¦--SYMBOL_SUB: k [0/2] {445} + ¦ ¦--EQ_SUB: = [0/1] {446} + ¦ ¦--expr: 3 [0/0] {448} + ¦ ¦ °--NUM_CONST: 3 [0/0] {447} + ¦ ¦--',': , [0/2] {449} + ¦ ¦--SYMBOL_SUB: b [1/1] {450} + ¦ ¦--EQ_SUB: = [0/1] {451} + ¦ ¦--expr: f(-g) [0/0] {452} + ¦ ¦ ¦--expr: f [0/0] {454} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {453} + ¦ ¦ ¦--'(': ( [0/0] {455} + ¦ ¦ ¦--expr: -g [0/0] {456} + ¦ ¦ ¦ ¦--'-': - [0/0] {457} + ¦ ¦ ¦ °--expr: g [0/0] {459} + ¦ ¦ ¦ °--SYMBOL: g [0/0] {458} + ¦ ¦ °--')': ) [0/0] {460} + ¦ ¦--',': , [0/5] {461} + ¦ ¦--expr: 22 + [0/0] {462} + ¦ ¦ ¦--expr: 22 [0/1] {464} + ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {463} + ¦ ¦ ¦--'+': + [0/1] {465} + ¦ ¦ °--expr: 1 [0/0] {467} + ¦ ¦ °--NUM_CONST: 1 [0/0] {466} + ¦ ¦--',': , [0/0] {468} + ¦ °--')': ) [1/0] {469} + ¦--COMMENT: # ali [2/0] {470} + ¦--expr: call( [1/0] {471} + ¦ ¦--expr: call [0/0] {473} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {472} + ¦ ¦--'(': ( [0/2] {474} + ¦ ¦--SYMBOL_SUB: k [1/1] {475} + ¦ ¦--EQ_SUB: = [0/2] {476} + ¦ ¦--expr: ff("p [0/0] {477} + ¦ ¦ ¦--expr: ff [0/0] {479} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: ff [0/0] {478} + ¦ ¦ ¦--'(': ( [0/0] {480} + ¦ ¦ ¦--expr: "pk" [0/0] {482} + ¦ ¦ ¦ °--STR_CONST: "pk" [0/0] {481} + ¦ ¦ °--')': ) [0/0] {483} + ¦ ¦--',': , [0/1] {484} + ¦ ¦--SYMBOL_SUB: k [0/2] {485} + ¦ ¦--EQ_SUB: = [0/1] {486} + ¦ ¦--expr: 3 [0/0] {488} + ¦ ¦ °--NUM_CONST: 3 [0/0] {487} + ¦ ¦--',': , [0/2] {489} + ¦ ¦--expr: 44 [1/0] {491} + ¦ ¦ °--NUM_CONST: 44 [0/0] {490} + ¦ ¦--',': , [0/2] {492} + ¦ ¦--SYMBOL_SUB: b [1/1] {493} + ¦ ¦--EQ_SUB: = [0/1] {494} + ¦ ¦--expr: f(-g) [0/0] {495} + ¦ ¦ ¦--expr: f [0/0] {497} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {496} + ¦ ¦ ¦--'(': ( [0/0] {498} + ¦ ¦ ¦--expr: -g [0/0] {499} + ¦ ¦ ¦ ¦--'-': - [0/0] {500} + ¦ ¦ ¦ °--expr: g [0/0] {502} + ¦ ¦ ¦ °--SYMBOL: g [0/0] {501} + ¦ ¦ °--')': ) [0/0] {503} + ¦ ¦--',': , [0/5] {504} + ¦ ¦--expr: 22 + [0/0] {505} + ¦ ¦ ¦--expr: 22 [0/1] {507} + ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {506} + ¦ ¦ ¦--'+': + [0/1] {508} + ¦ ¦ °--expr: 1 [0/0] {510} + ¦ ¦ °--NUM_CONST: 1 [0/0] {509} + ¦ ¦--',': , [0/0] {511} + ¦ °--')': ) [1/0] {512} + ¦--COMMENT: # if [4/0] {513} + ¦--COMMENT: # not [1/0] {514} + ¦--expr: fell( [1/0] {515} + ¦ ¦--expr: fell [0/0] {517} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: fell [0/0] {516} + ¦ ¦--'(': ( [0/2] {518} + ¦ ¦--SYMBOL_SUB: x [1/1] {519} + ¦ ¦--EQ_SUB: = [0/3] {520} + ¦ ¦--expr: 1 [0/0] {522} + ¦ ¦ °--NUM_CONST: 1 [0/0] {521} + ¦ ¦--',': , [0/2] {523} + ¦ ¦--SYMBOL_SUB: y [1/1] {524} + ¦ ¦--EQ_SUB: = [0/2] {525} + ¦ ¦--expr: 23 [0/0] {527} + ¦ ¦ °--NUM_CONST: 23 [0/0] {526} + ¦ ¦--',': , [0/2] {528} + ¦ ¦--SYMBOL_SUB: zz [1/1] {529} + ¦ ¦--EQ_SUB: = [0/1] {530} + ¦ ¦--expr: NULL [0/0] {532} + ¦ ¦ °--NULL_CONST: NULL [0/0] {531} + ¦ °--')': ) [1/0] {533} + ¦--COMMENT: # ali [2/0] {534} + ¦--expr: fell( [1/0] {535} + ¦ ¦--expr: fell [0/0] {537} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: fell [0/0] {536} + ¦ ¦--'(': ( [0/2] {538} + ¦ ¦--SYMBOL_SUB: x [1/1] {539} + ¦ ¦--EQ_SUB: = [0/5] {540} + ¦ ¦--expr: 1 [0/0] {542} + ¦ ¦ °--NUM_CONST: 1 [0/0] {541} + ¦ ¦--',': , [0/2] {543} + ¦ ¦--SYMBOL_SUB: y [1/1] {544} + ¦ ¦--EQ_SUB: = [0/4] {545} + ¦ ¦--expr: 23 [0/0] {547} + ¦ ¦ °--NUM_CONST: 23 [0/0] {546} + ¦ ¦--',': , [0/2] {548} + ¦ ¦--SYMBOL_SUB: zz [1/1] {549} + ¦ ¦--EQ_SUB: = [0/1] {550} + ¦ ¦--expr: NULL [0/0] {552} + ¦ ¦ °--NULL_CONST: NULL [0/0] {551} + ¦ °--')': ) [1/0] {553} + ¦--COMMENT: # ali [2/0] {554} + ¦--expr: call( [1/0] {555} + ¦ ¦--expr: call [0/0] {557} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {556} + ¦ ¦--'(': ( [0/2] {558} + ¦ ¦--SYMBOL_SUB: a [1/2] {559} + ¦ ¦--EQ_SUB: = [0/2] {560} + ¦ ¦--expr: 2 [0/0] {562} + ¦ ¦ °--NUM_CONST: 2 [0/0] {561} + ¦ ¦--',': , [0/2] {563} + ¦ ¦--SYMBOL_SUB: bb [1/1] {564} + ¦ ¦--EQ_SUB: = [0/2] {565} + ¦ ¦--expr: 3 [0/0] {567} + ¦ ¦ °--NUM_CONST: 3 [0/0] {566} + ¦ ¦--',': , [1/0] {568} + ¦ °--')': ) [0/0] {569} + ¦--COMMENT: # ali [3/0] {570} + ¦--expr: call( [1/0] {571} + ¦ ¦--expr: call [0/0] {573} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {572} + ¦ ¦--'(': ( [0/2] {574} + ¦ ¦--SYMBOL_SUB: a [1/2] {575} + ¦ ¦--EQ_SUB: = [0/2] {576} + ¦ ¦--expr: 2 [0/0] {578} + ¦ ¦ °--NUM_CONST: 2 [0/0] {577} + ¦ ¦--',': , [0/1] {579} + ¦ ¦--SYMBOL_SUB: x [0/1] {580} + ¦ ¦--EQ_SUB: = [0/1] {581} + ¦ ¦--expr: 111 [0/0] {583} + ¦ ¦ °--NUM_CONST: 111 [0/0] {582} + ¦ ¦--',': , [0/2] {584} + ¦ ¦--COMMENT: # ano [1/2] {585} + ¦ ¦--SYMBOL_SUB: bb [1/1] {586} + ¦ ¦--EQ_SUB: = [0/2] {587} + ¦ ¦--expr: 3 [0/0] {589} + ¦ ¦ °--NUM_CONST: 3 [0/0] {588} + ¦ ¦--',': , [0/1] {590} + ¦ ¦--COMMENT: # hi [0/0] {591} + ¦ °--')': ) [1/0] {592} + ¦--COMMENT: # ali [2/0] {593} + ¦--expr: call( [1/0] {594} + ¦ ¦--expr: call [0/0] {596} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {595} + ¦ ¦--'(': ( [0/2] {597} + ¦ ¦--SYMBOL_SUB: a [1/2] {598} + ¦ ¦--EQ_SUB: = [0/2] {599} + ¦ ¦--expr: 2 [0/0] {601} + ¦ ¦ °--NUM_CONST: 2 [0/0] {600} + ¦ ¦--',': , [0/1] {602} + ¦ ¦--SYMBOL_SUB: x [0/1] {603} + ¦ ¦--EQ_SUB: = [0/1] {604} + ¦ ¦--expr: 111 [0/0] {606} + ¦ ¦ °--NUM_CONST: 111 [0/0] {605} + ¦ ¦--',': , [0/2] {607} + ¦ ¦--SYMBOL_SUB: bb [1/1] {608} + ¦ ¦--EQ_SUB: = [0/2] {609} + ¦ ¦--expr: 3 [0/0] {611} + ¦ ¦ °--NUM_CONST: 3 [0/0] {610} + ¦ ¦--',': , [0/1] {612} + ¦ ¦--COMMENT: # hi [0/0] {613} + ¦ °--')': ) [1/0] {614} + ¦--COMMENT: # ali [2/0] {615} + ¦--expr: call( [1/0] {616} + ¦ ¦--expr: call [0/0] {618} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {617} + ¦ ¦--'(': ( [0/2] {619} + ¦ ¦--COMMENT: # ano [1/2] {620} + ¦ ¦--SYMBOL_SUB: a [1/2] {621} + ¦ ¦--EQ_SUB: = [0/2] {622} + ¦ ¦--expr: 2 [0/0] {624} + ¦ ¦ °--NUM_CONST: 2 [0/0] {623} + ¦ ¦--',': , [0/1] {625} + ¦ ¦--SYMBOL_SUB: x [0/1] {626} + ¦ ¦--EQ_SUB: = [0/1] {627} + ¦ ¦--expr: 111 [0/0] {629} + ¦ ¦ °--NUM_CONST: 111 [0/0] {628} + ¦ ¦--',': , [0/2] {630} + ¦ ¦--SYMBOL_SUB: bb [1/1] {631} + ¦ ¦--EQ_SUB: = [0/2] {632} + ¦ ¦--expr: 3 [0/0] {634} + ¦ ¦ °--NUM_CONST: 3 [0/0] {633} + ¦ ¦--',': , [0/1] {635} + ¦ ¦--COMMENT: # hi [0/0] {636} + ¦ °--')': ) [1/0] {637} + ¦--COMMENT: # ali [2/0] {638} + ¦--expr: call( [1/0] {639} + ¦ ¦--expr: call [0/0] {641} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {640} + ¦ ¦--'(': ( [0/2] {642} + ¦ ¦--COMMENT: # ano [1/2] {643} + ¦ ¦--SYMBOL_SUB: a [1/2] {644} + ¦ ¦--EQ_SUB: = [0/2] {645} + ¦ ¦--expr: 2 [0/0] {647} + ¦ ¦ °--NUM_CONST: 2 [0/0] {646} + ¦ ¦--',': , [0/1] {648} + ¦ ¦--SYMBOL_SUB: x [0/1] {649} + ¦ ¦--EQ_SUB: = [0/1] {650} + ¦ ¦--expr: 111 [0/0] {652} + ¦ ¦ °--NUM_CONST: 111 [0/0] {651} + ¦ ¦--',': , [0/2] {653} + ¦ ¦--SYMBOL_SUB: bb [1/1] {654} + ¦ ¦--EQ_SUB: = [0/2] {655} + ¦ ¦--expr: 3 [0/1] {657} + ¦ ¦ °--NUM_CONST: 3 [0/0] {656} + ¦ ¦--COMMENT: # hi [0/0] {658} + ¦ °--')': ) [1/0] {659} + ¦--COMMENT: # not [2/0] {660} + ¦--expr: call( [1/0] {661} + ¦ ¦--expr: call [0/0] {663} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {662} + ¦ ¦--'(': ( [0/2] {664} + ¦ ¦--SYMBOL_SUB: a [1/1] {665} + ¦ ¦--EQ_SUB: = [0/2] {666} + ¦ ¦--expr: 2 [0/0] {668} + ¦ ¦ °--NUM_CONST: 2 [0/0] {667} + ¦ ¦--',': , [0/1] {669} + ¦ ¦--SYMBOL_SUB: x [0/1] {670} + ¦ ¦--EQ_SUB: = [0/1] {671} + ¦ ¦--expr: 111 [0/0] {673} + ¦ ¦ °--NUM_CONST: 111 [0/0] {672} + ¦ ¦--',': , [0/2] {674} + ¦ ¦--SYMBOL_SUB: bb [1/1] {675} + ¦ ¦--EQ_SUB: = [0/2] {676} + ¦ ¦--expr: 3 [0/0] {678} + ¦ ¦ °--NUM_CONST: 3 [0/0] {677} + ¦ ¦--',': , [0/1] {679} + ¦ ¦--COMMENT: # hi [0/0] {680} + ¦ °--')': ) [1/0] {681} + ¦--COMMENT: # not [2/0] {682} + ¦--expr: call( [1/0] {683} + ¦ ¦--expr: call [0/0] {685} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {684} + ¦ ¦--'(': ( [0/2] {686} + ¦ ¦--COMMENT: # ano [1/2] {687} + ¦ ¦--SYMBOL_SUB: a [1/1] {688} + ¦ ¦--EQ_SUB: = [0/2] {689} + ¦ ¦--expr: 2 [0/0] {691} + ¦ ¦ °--NUM_CONST: 2 [0/0] {690} + ¦ ¦--',': , [0/1] {692} + ¦ ¦--SYMBOL_SUB: x [0/1] {693} + ¦ ¦--EQ_SUB: = [0/1] {694} + ¦ ¦--expr: 111 [0/0] {696} + ¦ ¦ °--NUM_CONST: 111 [0/0] {695} + ¦ ¦--',': , [0/2] {697} + ¦ ¦--SYMBOL_SUB: bb [1/1] {698} + ¦ ¦--EQ_SUB: = [0/2] {699} + ¦ ¦--expr: 3 [0/0] {701} + ¦ ¦ °--NUM_CONST: 3 [0/0] {700} + ¦ ¦--',': , [0/2] {702} + ¦ ¦--COMMENT: # hi [1/0] {703} + ¦ °--')': ) [1/0] {704} + ¦--COMMENT: # If [2/0] {705} + ¦--COMMENT: # not [1/0] {706} + ¦--COMMENT: # in [1/0] {707} + ¦--COMMENT: # the [1/0] {708} + ¦--expr: call( [1/0] {709} + ¦ ¦--expr: call [0/0] {711} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {710} + ¦ ¦--'(': ( [0/2] {712} + ¦ ¦--SYMBOL_SUB: x [1/2] {713} + ¦ ¦--EQ_SUB: = [0/1] {714} + ¦ ¦--expr: 95232 [0/0] {716} + ¦ ¦ °--NUM_CONST: 95232 [0/0] {715} + ¦ ¦--',': , [0/2] {717} + ¦ ¦--SYMBOL_SUB: y [1/2] {718} + ¦ ¦--EQ_SUB: = [0/1] {719} + ¦ ¦--expr: f( + [0/0] {720} + ¦ ¦ ¦--expr: f [0/0] {722} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {721} + ¦ ¦ ¦--'(': ( [0/2] {723} + ¦ ¦ °--')': ) [1/0] {724} + ¦ ¦--',': , [0/0] {725} + ¦ °--')': ) [1/0] {726} + ¦--COMMENT: # ali [3/0] {727} + ¦--expr: ca( + [1/0] {728} + ¦ ¦--expr: ca [0/0] {730} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: ca [0/0] {729} + ¦ ¦--'(': ( [0/2] {731} + ¦ ¦--SYMBOL_SUB: x [1/2] {732} + ¦ ¦--EQ_SUB: = [0/1] {733} + ¦ ¦--expr: 23200 [0/0] {735} + ¦ ¦ °--NUM_CONST: 23200 [0/0] {734} + ¦ ¦--',': , [0/2] {736} + ¦ ¦--SYMBOL_SUB: y2 [1/1] {737} + ¦ ¦--EQ_SUB: = [0/1] {738} + ¦ ¦--expr: "hi" [0/0] {740} + ¦ ¦ °--STR_CONST: "hi" [0/0] {739} + ¦ ¦--',': , [0/2] {741} + ¦ ¦--SYMBOL_SUB: m [1/2] {742} + ¦ ¦--EQ_SUB: = [0/1] {743} + ¦ ¦--expr: c(rm. [0/0] {744} + ¦ ¦ ¦--expr: c [0/0] {746} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {745} + ¦ ¦ ¦--'(': ( [0/0] {747} + ¦ ¦ ¦--SYMBOL_SUB: rm.na [0/1] {748} + ¦ ¦ ¦--EQ_SUB: = [0/1] {749} + ¦ ¦ ¦--expr: 7 [0/0] {751} + ¦ ¦ ¦ °--NUM_CONST: 7 [0/0] {750} + ¦ ¦ °--')': ) [0/0] {752} + ¦ °--')': ) [1/0] {753} + ¦--COMMENT: # not [2/0] {754} + ¦--expr: ca( + [1/0] {755} + ¦ ¦--expr: ca [0/0] {757} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: ca [0/0] {756} + ¦ ¦--'(': ( [0/2] {758} + ¦ ¦--SYMBOL_SUB: x [1/2] {759} + ¦ ¦--EQ_SUB: = [0/1] {760} + ¦ ¦--expr: 23200 [0/0] {762} + ¦ ¦ °--NUM_CONST: 23200 [0/0] {761} + ¦ ¦--',': , [0/2] {763} + ¦ ¦--SYMBOL_SUB: y2 [1/1] {764} + ¦ ¦--EQ_SUB: = [0/1] {765} + ¦ ¦--expr: "hi" [0/0] {767} + ¦ ¦ °--STR_CONST: "hi" [0/0] {766} + ¦ ¦--',': , [0/2] {768} + ¦ ¦--SYMBOL_SUB: m [1/2] {769} + ¦ ¦--EQ_SUB: = [0/2] {770} + ¦ ¦--expr: c(rm. [0/0] {771} + ¦ ¦ ¦--expr: c [0/0] {773} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {772} + ¦ ¦ ¦--'(': ( [0/0] {774} + ¦ ¦ ¦--SYMBOL_SUB: rm.na [0/1] {775} + ¦ ¦ ¦--EQ_SUB: = [0/1] {776} + ¦ ¦ ¦--expr: 7 [0/0] {778} + ¦ ¦ ¦ °--NUM_CONST: 7 [0/0] {777} + ¦ ¦ °--')': ) [0/0] {779} + ¦ °--')': ) [1/0] {780} + ¦--COMMENT: # ali [2/0] {781} + ¦--expr: fell( [1/0] {782} + ¦ ¦--expr: fell [0/0] {784} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: fell [0/0] {783} + ¦ ¦--'(': ( [0/2] {785} + ¦ ¦--SYMBOL_SUB: x [1/2] {786} + ¦ ¦--EQ_SUB: = [0/1] {787} + ¦ ¦--expr: 8 [0/0] {789} + ¦ ¦ °--NUM_CONST: 8 [0/0] {788} + ¦ ¦--',': , [0/1] {790} + ¦ ¦--SYMBOL_SUB: annoy [0/3] {791} + ¦ ¦--EQ_SUB: = [0/1] {792} + ¦ ¦--expr: 3 [0/0] {794} + ¦ ¦ °--NUM_CONST: 3 [0/0] {793} + ¦ ¦--',': , [0/2] {795} + ¦ ¦--SYMBOL_SUB: y [1/2] {796} + ¦ ¦--EQ_SUB: = [0/1] {797} + ¦ ¦--expr: 23 [0/0] {799} + ¦ ¦ °--NUM_CONST: 23 [0/0] {798} + ¦ ¦--',': , [0/1] {800} + ¦ ¦--COMMENT: # not [0/2] {801} + ¦ ¦--SYMBOL_SUB: zz [1/1] {802} + ¦ ¦--EQ_SUB: = [0/1] {803} + ¦ ¦--expr: NULL [0/0] {805} + ¦ ¦ °--NULL_CONST: NULL [0/0] {804} + ¦ ¦--',': , [0/1] {806} + ¦ ¦--SYMBOL_SUB: final [0/1] {807} + ¦ ¦--EQ_SUB: = [0/1] {808} + ¦ ¦--expr: "stuf [0/0] {810} + ¦ ¦ °--STR_CONST: "stuf [0/0] {809} + ¦ °--')': ) [1/0] {811} + ¦--COMMENT: # ali [2/0] {812} + ¦--expr: gell( [1/0] {813} + ¦ ¦--expr: gell [0/0] {815} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: gell [0/0] {814} + ¦ ¦--'(': ( [0/2] {816} + ¦ ¦--SYMBOL_SUB: p [1/1] {817} + ¦ ¦--EQ_SUB: = [0/1] {818} + ¦ ¦--expr: 2 [0/0] {820} + ¦ ¦ °--NUM_CONST: 2 [0/0] {819} + ¦ ¦--',': , [0/3] {821} + ¦ ¦--SYMBOL_SUB: g [0/1] {822} + ¦ ¦--EQ_SUB: = [0/1] {823} + ¦ ¦--expr: gg(x) [0/0] {824} + ¦ ¦ ¦--expr: gg [0/0] {826} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: gg [0/0] {825} + ¦ ¦ ¦--'(': ( [0/0] {827} + ¦ ¦ ¦--expr: x [0/0] {829} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {828} + ¦ ¦ °--')': ) [0/0] {830} + ¦ ¦--',': , [0/1] {831} + ¦ ¦--SYMBOL_SUB: n [0/1] {832} + ¦ ¦--EQ_SUB: = [0/1] {833} + ¦ ¦--expr: 3 * 3 [0/0] {834} + ¦ ¦ ¦--expr: 3 [0/1] {836} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {835} + ¦ ¦ ¦--'*': * [0/1] {837} + ¦ ¦ °--expr: 3 [0/0] {839} + ¦ ¦ °--NUM_CONST: 3 [0/0] {838} + ¦ ¦--',': , [0/1] {840} + ¦ ¦--COMMENT: # [0/2] {841} + ¦ ¦--expr: 31 [1/0] {843} + ¦ ¦ °--NUM_CONST: 31 [0/0] {842} + ¦ ¦--',': , [0/4] {844} + ¦ ¦--SYMBOL_SUB: fds [0/1] {845} + ¦ ¦--EQ_SUB: = [0/1] {846} + ¦ ¦--expr: -1 [0/0] {847} + ¦ ¦ ¦--'-': - [0/0] {848} + ¦ ¦ °--expr: 1 [0/0] {850} + ¦ ¦ °--NUM_CONST: 1 [0/0] {849} + ¦ ¦--',': , [0/1] {851} + ¦ ¦--SYMBOL_SUB: gz [0/3] {852} + ¦ ¦--EQ_SUB: = [0/1] {853} + ¦ ¦--expr: f / 3 [0/0] {854} + ¦ ¦ ¦--expr: f [0/1] {857} + ¦ ¦ ¦ °--SYMBOL: f [0/0] {856} + ¦ ¦ ¦--'/': / [0/1] {858} + ¦ ¦ ¦--expr: 3 [0/1] {860} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {859} + ¦ ¦ ¦--'+': + [0/1] {861} + ¦ ¦ °--expr: 1 [0/0] {863} + ¦ ¦ °--NUM_CONST: 1 [0/0] {862} + ¦ ¦--',': , [0/0] {864} + ¦ °--')': ) [1/0] {865} + ¦--expr: xgle( [2/0] {866} + ¦ ¦--expr: xgle [0/0] {868} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: xgle [0/0] {867} + ¦ ¦--'(': ( [0/2] {869} + ¦ ¦--expr: 1212 [1/0] {871} + ¦ ¦ °--NUM_CONST: 1212 [0/0] {870} + ¦ ¦--',': , [0/1] {872} + ¦ ¦--expr: 232 [0/0] {874} + ¦ ¦ °--NUM_CONST: 232 [0/0] {873} + ¦ ¦--',': , [0/1] {875} + ¦ ¦--expr: f(n = [0/0] {876} + ¦ ¦ ¦--expr: f [0/0] {878} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {877} + ¦ ¦ ¦--'(': ( [0/0] {879} + ¦ ¦ ¦--SYMBOL_SUB: n [0/1] {880} + ¦ ¦ ¦--EQ_SUB: = [0/1] {881} + ¦ ¦ ¦--expr: 2 [0/0] {883} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {882} + ¦ ¦ °--')': ) [0/0] {884} + ¦ ¦--',': , [0/2] {885} + ¦ ¦--expr: 1 [1/0] {887} + ¦ ¦ °--NUM_CONST: 1 [0/0] {886} + ¦ ¦--',': , [0/6] {888} + ¦ ¦--expr: 2 [0/0] {890} + ¦ ¦ °--NUM_CONST: 2 [0/0] {889} + ¦ ¦--',': , [0/2] {891} + ¦ ¦--expr: "kFly [0/0] {893} + ¦ ¦ °--STR_CONST: "kFly [0/0] {892} + ¦ °--')': ) [1/0] {894} + ¦--COMMENT: # lef [2/0] {895} + ¦--expr: call( [1/0] {896} + ¦ ¦--expr: call [0/0] {898} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {897} + ¦ ¦--'(': ( [0/2] {899} + ¦ ¦--SYMBOL_SUB: x [1/1] {900} + ¦ ¦--EQ_SUB: = [0/1] {901} + ¦ ¦--expr: 2 [0/0] {903} + ¦ ¦ °--NUM_CONST: 2 [0/0] {902} + ¦ ¦--',': , [0/11] {904} + ¦ ¦--SYMBOL_SUB: y [0/1] {905} + ¦ ¦--EQ_SUB: = [0/1] {906} + ¦ ¦--expr: "anot [0/0] {908} + ¦ ¦ °--STR_CONST: "anot [0/0] {907} + ¦ ¦--',': , [0/2] {909} + ¦ ¦--SYMBOL_SUB: y [1/1] {910} + ¦ ¦--EQ_SUB: = [0/1] {911} + ¦ ¦--expr: "hhjk [0/0] {913} + ¦ ¦ °--STR_CONST: "hhjk [0/0] {912} + ¦ ¦--',': , [0/1] {914} + ¦ ¦--SYMBOL_SUB: x [0/1] {915} + ¦ ¦--EQ_SUB: = [0/1] {916} + ¦ ¦--expr: 3 [0/0] {918} + ¦ ¦ °--NUM_CONST: 3 [0/0] {917} + ¦ °--')': ) [1/0] {919} + °--expr: call( [2/0] {920} + ¦--expr: call [0/0] {922} + ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {921} + ¦--'(': ( [0/2] {923} + ¦--SYMBOL_SUB: k [1/1] {924} + ¦--EQ_SUB: = [0/1] {925} + ¦--expr: ff("p [0/0] {926} + ¦ ¦--expr: ff [0/0] {928} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: ff [0/0] {927} + ¦ ¦--'(': ( [0/0] {929} + ¦ ¦--expr: "pk" [0/0] {931} + ¦ ¦ °--STR_CONST: "pk" [0/0] {930} + ¦ °--')': ) [0/0] {932} + ¦--',': , [0/1] {933} + ¦--SYMBOL_SUB: k [0/1] {934} + ¦--EQ_SUB: = [0/1] {935} + ¦--expr: 3 [0/0] {937} + ¦ °--NUM_CONST: 3 [0/0] {936} + ¦--',': , [0/2] {938} + ¦--SYMBOL_SUB: b [1/1] {939} + ¦--EQ_SUB: = [0/1] {940} + ¦--expr: f(-g) [0/0] {941} + ¦ ¦--expr: f [0/0] {943} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {942} + ¦ ¦--'(': ( [0/0] {944} + ¦ ¦--expr: -g [0/0] {945} + ¦ ¦ ¦--'-': - [0/0] {946} + ¦ ¦ °--expr: g [0/0] {948} + ¦ ¦ °--SYMBOL: g [0/0] {947} + ¦ °--')': ) [0/0] {949} + ¦--',': , [0/4] {950} + ¦--expr: 22 + [0/0] {951} + ¦ ¦--expr: 22 [0/1] {953} + ¦ ¦ °--NUM_CONST: 22 [0/0] {952} + ¦ ¦--'+': + [0/1] {954} + ¦ °--expr: 1 [0/0] {956} + ¦ °--NUM_CONST: 1 [0/0] {955} + ¦--',': , [0/2] {957} + ¦--expr: 44 [1/0] {959} + ¦ °--NUM_CONST: 44 [0/0] {958} + ¦--',': , [0/11] {960} + ¦--expr: 323 [0/0] {962} + ¦ °--NUM_CONST: 323 [0/0] {961} + °--')': ) [1/0] {963} diff --git a/tests/testthat/alignment/named-out.R b/tests/testthat/alignment/named-out.R new file mode 100644 index 000000000..5fd8e9507 --- /dev/null +++ b/tests/testthat/alignment/named-out.R @@ -0,0 +1,221 @@ +# algorithm: aligned. human: aligned. +call( + x = 1, kdd = 2, + xy = 2, n = 33, +) + +# without trailing comma +call( + x = 1, kdd = 2, + xy = 2, n = 33 +) + +# algorithm: aligned. human: aligned. +call( + x = 1, kdd = 2, + xy = 2, n = 33, +) + +# algorithm: aligned. human: aligned. +call( + x = 1, kdd = 2, + xy = 2, n = 33, +) + +# algorithm: not aligned (spacing around =). human: aligned (fix: spacing around =). +call( + x = 1, kdd = 2, + xy = 2, n = 33, +) + +# algorithm: not aligned. human: not aligned. +call( + x = 1, kdd = 2, + xy = 2, n = 33, +) + +# algorithm: not aligned. human: not aligned. +call( + x = 1, kdd = 2, + xy = 22, n = 33, +) + +# algorithm: not aligned. human: not aligned. +call( + x = 1, d = 2, + xy = 22, n = 33, +) + + +# algorithm: aligned. human: aligned. +call( + x = 1, kdd = 2, k = "abc", + xy = 2, n = 33, z = "333" +) + + +# algorithm: aligned. human: aligned. +call( + x = 1, + xy = 2, n = 33, z = "333" +) + +# algorithm: aligned. human: aligned. +call( + x = 1, n = 33, z = "333", + xy = 2, +) + +# aligned. when spaces are spread accross different nests +call( + k = ff("pk"), k = 3, + b = f(-g), 22 + 1, + 44, 323 +) + +# aligned. when spaces are spread accross different nests +call( + k = ff("pk"), k = 3, + b = f(-g), 22 + 1, + 44, 323, +) + +# no trailing +call( + k = ff("pk"), k = 3, + b = f(-g), 22 + 1, + 44 +) + +# aligned: fewest arguments not on last line +call( + 44, + k = ff("pk"), k = 3, + b = f(-g), 22 + 1, +) + +# aligned: fewest arguments not on last line +call( + k = ff("pk"), k = 3, + 44, + b = f(-g), 22 + 1, +) + + + +# if all col1 arguments are named, col1 must also be aligned +# not aligned +fell( + x = 1, + y = 23, + zz = NULL +) + +# aligned +fell( + x = 1, + y = 23, + zz = NULL +) + +# aligned but comma in the wrong line +call( + a = 2, + bb = 3, +) + + +# aligned (comments) +call( + a = 2, x = 111, + # another + bb = 3, # hi +) + +# aligned (comments) +call( + a = 2, x = 111, + bb = 3, # hi +) + +# aligned (comments) +call( + # another one + a = 2, x = 111, + bb = 3, # hi +) + +# aligned (comments) +call( + # another one + a = 2, x = 111, + bb = 3 # hi +) + +# not aligned (comments) +call( + a = 2, x = 111, + bb = 3, # hi +) + +# not aligned (comments) +call( + # another one + a = 2, x = 111, + bb = 3, + # hi +) + +# If a call is mult-line, it can't be aligned (also, it would not currently +# not be ideopotent because first bace would be moved up without alignment and +# in the second step, because all arguments are named and there is no alignment, +# the extra spaces before `=` as of 29a010064257fa1a9caf32d182e7ee62008de98a. +call( + x = 95232, + y = f(), +) + + +# aligned (left after `=`) +ca( + x = 23200, + y2 = "hi", + m = c(rm.na = 7) +) + +# not aligned (left after `=`) +ca( + x = 23200, + y2 = "hi", + m = c(rm.na = 7) +) + +# aligned =, first all named +fell( + x = 8, annoying = 3, + y = 23, # nothing in column 2 for row 2 + zz = NULL, finally = "stuff" +) + +# aligned =, first not all named +gell( + p = 2, g = gg(x), n = 3 * 3, # + 31, fds = -1, gz = f / 3 + 1, +) + +xgle( + 1212, 232, f(n = 2), + 1, 2, "kFlya" +) + +# left aligned after , +call( + x = 2, y = "another", + y = "hhjkjkbew", x = 3 +) + +call( + k = ff("pk"), k = 3, + b = f(-g), 22 + 1, + 44, 323 +) diff --git a/tests/testthat/alignment/one-col-some-named-in.R b/tests/testthat/alignment/one-col-some-named-in.R new file mode 100644 index 000000000..58f50e184 --- /dev/null +++ b/tests/testthat/alignment/one-col-some-named-in.R @@ -0,0 +1,14 @@ +foo( + img, + pkg = "abc", + color = "lmn", + font = "xyz" +) + + +foo( + img, # + pkg = "abc", + color = "lmn", + font = "xyz" +) diff --git a/tests/testthat/alignment/one-col-some-named-in_tree b/tests/testthat/alignment/one-col-some-named-in_tree new file mode 100644 index 000000000..392910286 --- /dev/null +++ b/tests/testthat/alignment/one-col-some-named-in_tree @@ -0,0 +1,48 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: foo( + [0/0] {1} + ¦ ¦--expr: foo [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: foo [0/0] {2} + ¦ ¦--'(': ( [0/2] {4} + ¦ ¦--expr: img [1/0] {6} + ¦ ¦ °--SYMBOL: img [0/0] {5} + ¦ ¦--',': , [0/2] {7} + ¦ ¦--SYMBOL_SUB: pkg [1/1] {8} + ¦ ¦--EQ_SUB: = [0/1] {9} + ¦ ¦--expr: "abc" [0/0] {11} + ¦ ¦ °--STR_CONST: "abc" [0/0] {10} + ¦ ¦--',': , [0/2] {12} + ¦ ¦--SYMBOL_SUB: color [1/7] {13} + ¦ ¦--EQ_SUB: = [0/1] {14} + ¦ ¦--expr: "lmn" [0/0] {16} + ¦ ¦ °--STR_CONST: "lmn" [0/0] {15} + ¦ ¦--',': , [0/2] {17} + ¦ ¦--SYMBOL_SUB: font [1/2] {18} + ¦ ¦--EQ_SUB: = [0/1] {19} + ¦ ¦--expr: "xyz" [0/0] {21} + ¦ ¦ °--STR_CONST: "xyz" [0/0] {20} + ¦ °--')': ) [1/0] {22} + °--expr: foo( + [3/0] {23} + ¦--expr: foo [0/0] {25} + ¦ °--SYMBOL_FUNCTION_CALL: foo [0/0] {24} + ¦--'(': ( [0/2] {26} + ¦--expr: img [1/0] {28} + ¦ °--SYMBOL: img [0/0] {27} + ¦--',': , [0/1] {29} + ¦--COMMENT: # [0/2] {30} + ¦--SYMBOL_SUB: pkg [1/1] {31} + ¦--EQ_SUB: = [0/1] {32} + ¦--expr: "abc" [0/0] {34} + ¦ °--STR_CONST: "abc" [0/0] {33} + ¦--',': , [0/2] {35} + ¦--SYMBOL_SUB: color [1/7] {36} + ¦--EQ_SUB: = [0/1] {37} + ¦--expr: "lmn" [0/0] {39} + ¦ °--STR_CONST: "lmn" [0/0] {38} + ¦--',': , [0/2] {40} + ¦--SYMBOL_SUB: font [1/2] {41} + ¦--EQ_SUB: = [0/1] {42} + ¦--expr: "xyz" [0/0] {44} + ¦ °--STR_CONST: "xyz" [0/0] {43} + °--')': ) [1/0] {45} diff --git a/tests/testthat/alignment/one-col-some-named-out.R b/tests/testthat/alignment/one-col-some-named-out.R new file mode 100644 index 000000000..d19808116 --- /dev/null +++ b/tests/testthat/alignment/one-col-some-named-out.R @@ -0,0 +1,14 @@ +foo( + img, + pkg = "abc", + color = "lmn", + font = "xyz" +) + + +foo( + img, # + pkg = "abc", + color = "lmn", + font = "xyz" +) diff --git a/tests/testthat/alignment/quoted-names-in.R b/tests/testthat/alignment/quoted-names-in.R new file mode 100644 index 000000000..f221f97e6 --- /dev/null +++ b/tests/testthat/alignment/quoted-names-in.R @@ -0,0 +1,7 @@ +df <- dplyr::rename(df, + "xValues" = "Time", + "xUnit" = "TimeUnit", + "yValues" = "simulationValues", + "yUnit" = "unit", + "yDimension" = "dimension" +) diff --git a/tests/testthat/alignment/quoted-names-in_tree b/tests/testthat/alignment/quoted-names-in_tree new file mode 100644 index 000000000..0ed87be9c --- /dev/null +++ b/tests/testthat/alignment/quoted-names-in_tree @@ -0,0 +1,39 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + °--expr: df <- [0/0] {1} + ¦--expr: df [0/1] {3} + ¦ °--SYMBOL: df [0/0] {2} + ¦--LEFT_ASSIGN: <- [0/1] {4} + °--expr: dplyr [0/0] {5} + ¦--expr: dplyr [0/0] {6} + ¦ ¦--SYMBOL_PACKAGE: dplyr [0/0] {7} + ¦ ¦--NS_GET: :: [0/0] {8} + ¦ °--SYMBOL_FUNCTION_CALL: renam [0/0] {9} + ¦--'(': ( [0/0] {10} + ¦--expr: df [0/0] {12} + ¦ °--SYMBOL: df [0/0] {11} + ¦--',': , [0/2] {13} + ¦--STR_CONST: "xVal [1/1] {14} + ¦--EQ_SUB: = [0/3] {15} + ¦--expr: "Time [0/0] {17} + ¦ °--STR_CONST: "Time [0/0] {16} + ¦--',': , [0/2] {18} + ¦--STR_CONST: "xUni [1/1] {19} + ¦--EQ_SUB: = [0/1] {20} + ¦--expr: "Time [0/0] {22} + ¦ °--STR_CONST: "Time [0/0] {21} + ¦--',': , [0/2] {23} + ¦--STR_CONST: "yVal [1/3] {24} + ¦--EQ_SUB: = [0/1] {25} + ¦--expr: "simu [0/0] {27} + ¦ °--STR_CONST: "simu [0/0] {26} + ¦--',': , [0/2] {28} + ¦--STR_CONST: "yUni [1/1] {29} + ¦--EQ_SUB: = [0/1] {30} + ¦--expr: "unit [0/0] {32} + ¦ °--STR_CONST: "unit [0/0] {31} + ¦--',': , [0/2] {33} + ¦--STR_CONST: "yDim [1/2] {34} + ¦--EQ_SUB: = [0/1] {35} + ¦--expr: "dime [0/0] {37} + ¦ °--STR_CONST: "dime [0/0] {36} + °--')': ) [1/0] {38} diff --git a/tests/testthat/alignment/quoted-names-out.R b/tests/testthat/alignment/quoted-names-out.R new file mode 100644 index 000000000..1b5c481df --- /dev/null +++ b/tests/testthat/alignment/quoted-names-out.R @@ -0,0 +1,7 @@ +df <- dplyr::rename(df, + "xValues" = "Time", + "xUnit" = "TimeUnit", + "yValues" = "simulationValues", + "yUnit" = "unit", + "yDimension" = "dimension" +) diff --git a/tests/testthat/alignment/tribble-in.R b/tests/testthat/alignment/tribble-in.R new file mode 100644 index 000000000..2a9dd913c --- /dev/null +++ b/tests/testthat/alignment/tribble-in.R @@ -0,0 +1,30 @@ +tribble( + ~x, ~d, + "axa'fa", 1:6, + "b", 4:6 +) + +tribble( + ~x, ~d, + "axa'fa", 1:6, + "b", 4:6 +) + + +tribble( + ~x, ~d, + "axa'fa", 1:6, +"b", 4:6 +) + +tribble( + ~x, ~d, +"axa'fa", 1:6, + "b", 4:6 +) + +# has EQ_SUB which don't match, not tribble-like +mlr3misc:::rowwise_table( + x = 23, zy = 3, + y = 1, k = 1, +) diff --git a/tests/testthat/alignment/tribble-in_tree b/tests/testthat/alignment/tribble-in_tree new file mode 100644 index 000000000..cb374e0bb --- /dev/null +++ b/tests/testthat/alignment/tribble-in_tree @@ -0,0 +1,165 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: tribb [0/0] {1} + ¦ ¦--expr: tribb [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: tribb [0/0] {2} + ¦ ¦--'(': ( [0/2] {4} + ¦ ¦--expr: ~x [1/0] {5} + ¦ ¦ ¦--'~': ~ [0/0] {6} + ¦ ¦ °--expr: x [0/0] {8} + ¦ ¦ °--SYMBOL: x [0/0] {7} + ¦ ¦--',': , [0/7] {9} + ¦ ¦--expr: ~d [0/0] {10} + ¦ ¦ ¦--'~': ~ [0/0] {11} + ¦ ¦ °--expr: d [0/0] {13} + ¦ ¦ °--SYMBOL: d [0/0] {12} + ¦ ¦--',': , [0/2] {14} + ¦ ¦--expr: "axa' [1/0] {16} + ¦ ¦ °--STR_CONST: "axa' [0/0] {15} + ¦ ¦--',': , [0/1] {17} + ¦ ¦--expr: 1:6 [0/0] {18} + ¦ ¦ ¦--expr: 1 [0/0] {20} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {19} + ¦ ¦ ¦--':': : [0/0] {21} + ¦ ¦ °--expr: 6 [0/0] {23} + ¦ ¦ °--NUM_CONST: 6 [0/0] {22} + ¦ ¦--',': , [0/2] {24} + ¦ ¦--expr: "b" [1/0] {26} + ¦ ¦ °--STR_CONST: "b" [0/0] {25} + ¦ ¦--',': , [0/6] {27} + ¦ ¦--expr: 4:6 [0/0] {28} + ¦ ¦ ¦--expr: 4 [0/0] {30} + ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {29} + ¦ ¦ ¦--':': : [0/0] {31} + ¦ ¦ °--expr: 6 [0/0] {33} + ¦ ¦ °--NUM_CONST: 6 [0/0] {32} + ¦ °--')': ) [1/0] {34} + ¦--expr: tribb [2/0] {35} + ¦ ¦--expr: tribb [0/0] {37} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: tribb [0/0] {36} + ¦ ¦--'(': ( [0/2] {38} + ¦ ¦--expr: ~x [1/0] {39} + ¦ ¦ ¦--'~': ~ [0/0] {40} + ¦ ¦ °--expr: x [0/0] {42} + ¦ ¦ °--SYMBOL: x [0/0] {41} + ¦ ¦--',': , [0/7] {43} + ¦ ¦--expr: ~d [0/0] {44} + ¦ ¦ ¦--'~': ~ [0/0] {45} + ¦ ¦ °--expr: d [0/0] {47} + ¦ ¦ °--SYMBOL: d [0/0] {46} + ¦ ¦--',': , [0/2] {48} + ¦ ¦--expr: "axa' [1/0] {50} + ¦ ¦ °--STR_CONST: "axa' [0/0] {49} + ¦ ¦--',': , [0/1] {51} + ¦ ¦--expr: 1:6 [0/0] {52} + ¦ ¦ ¦--expr: 1 [0/0] {54} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {53} + ¦ ¦ ¦--':': : [0/0] {55} + ¦ ¦ °--expr: 6 [0/0] {57} + ¦ ¦ °--NUM_CONST: 6 [0/0] {56} + ¦ ¦--',': , [0/2] {58} + ¦ ¦--expr: "b" [1/0] {60} + ¦ ¦ °--STR_CONST: "b" [0/0] {59} + ¦ ¦--',': , [0/4] {61} + ¦ ¦--expr: 4:6 [0/0] {62} + ¦ ¦ ¦--expr: 4 [0/0] {64} + ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {63} + ¦ ¦ ¦--':': : [0/0] {65} + ¦ ¦ °--expr: 6 [0/0] {67} + ¦ ¦ °--NUM_CONST: 6 [0/0] {66} + ¦ °--')': ) [1/0] {68} + ¦--expr: tribb [3/0] {69} + ¦ ¦--expr: tribb [0/0] {71} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: tribb [0/0] {70} + ¦ ¦--'(': ( [0/2] {72} + ¦ ¦--expr: ~x [1/0] {73} + ¦ ¦ ¦--'~': ~ [0/0] {74} + ¦ ¦ °--expr: x [0/0] {76} + ¦ ¦ °--SYMBOL: x [0/0] {75} + ¦ ¦--',': , [0/7] {77} + ¦ ¦--expr: ~d [0/0] {78} + ¦ ¦ ¦--'~': ~ [0/0] {79} + ¦ ¦ °--expr: d [0/0] {81} + ¦ ¦ °--SYMBOL: d [0/0] {80} + ¦ ¦--',': , [0/2] {82} + ¦ ¦--expr: "axa' [1/0] {84} + ¦ ¦ °--STR_CONST: "axa' [0/0] {83} + ¦ ¦--',': , [0/1] {85} + ¦ ¦--expr: 1:6 [0/0] {86} + ¦ ¦ ¦--expr: 1 [0/0] {88} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {87} + ¦ ¦ ¦--':': : [0/0] {89} + ¦ ¦ °--expr: 6 [0/0] {91} + ¦ ¦ °--NUM_CONST: 6 [0/0] {90} + ¦ ¦--',': , [0/0] {92} + ¦ ¦--expr: "b" [1/0] {94} + ¦ ¦ °--STR_CONST: "b" [0/0] {93} + ¦ ¦--',': , [0/6] {95} + ¦ ¦--expr: 4:6 [0/0] {96} + ¦ ¦ ¦--expr: 4 [0/0] {98} + ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {97} + ¦ ¦ ¦--':': : [0/0] {99} + ¦ ¦ °--expr: 6 [0/0] {101} + ¦ ¦ °--NUM_CONST: 6 [0/0] {100} + ¦ °--')': ) [1/0] {102} + ¦--expr: tribb [2/0] {103} + ¦ ¦--expr: tribb [0/0] {105} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: tribb [0/0] {104} + ¦ ¦--'(': ( [0/2] {106} + ¦ ¦--expr: ~x [1/0] {107} + ¦ ¦ ¦--'~': ~ [0/0] {108} + ¦ ¦ °--expr: x [0/0] {110} + ¦ ¦ °--SYMBOL: x [0/0] {109} + ¦ ¦--',': , [0/7] {111} + ¦ ¦--expr: ~d [0/0] {112} + ¦ ¦ ¦--'~': ~ [0/0] {113} + ¦ ¦ °--expr: d [0/0] {115} + ¦ ¦ °--SYMBOL: d [0/0] {114} + ¦ ¦--',': , [0/0] {116} + ¦ ¦--expr: "axa' [1/0] {118} + ¦ ¦ °--STR_CONST: "axa' [0/0] {117} + ¦ ¦--',': , [0/2] {119} + ¦ ¦--expr: 1:6 [0/0] {120} + ¦ ¦ ¦--expr: 1 [0/0] {122} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {121} + ¦ ¦ ¦--':': : [0/0] {123} + ¦ ¦ °--expr: 6 [0/0] {125} + ¦ ¦ °--NUM_CONST: 6 [0/0] {124} + ¦ ¦--',': , [0/2] {126} + ¦ ¦--expr: "b" [1/0] {128} + ¦ ¦ °--STR_CONST: "b" [0/0] {127} + ¦ ¦--',': , [0/6] {129} + ¦ ¦--expr: 4:6 [0/0] {130} + ¦ ¦ ¦--expr: 4 [0/0] {132} + ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {131} + ¦ ¦ ¦--':': : [0/0] {133} + ¦ ¦ °--expr: 6 [0/0] {135} + ¦ ¦ °--NUM_CONST: 6 [0/0] {134} + ¦ °--')': ) [1/0] {136} + ¦--COMMENT: # has [2/0] {137} + °--expr: mlr3m [1/0] {138} + ¦--expr: mlr3m [0/0] {139} + ¦ ¦--SYMBOL_PACKAGE: mlr3m [0/0] {140} + ¦ ¦--NS_GET_INT: ::: [0/0] {141} + ¦ °--SYMBOL_FUNCTION_CALL: rowwi [0/0] {142} + ¦--'(': ( [0/2] {143} + ¦--SYMBOL_SUB: x [1/1] {144} + ¦--EQ_SUB: = [0/1] {145} + ¦--expr: 23 [0/0] {147} + ¦ °--NUM_CONST: 23 [0/0] {146} + ¦--',': , [0/1] {148} + ¦--SYMBOL_SUB: zy [0/1] {149} + ¦--EQ_SUB: = [0/1] {150} + ¦--expr: 3 [0/0] {152} + ¦ °--NUM_CONST: 3 [0/0] {151} + ¦--',': , [0/2] {153} + ¦--SYMBOL_SUB: y [1/1] {154} + ¦--EQ_SUB: = [0/1] {155} + ¦--expr: 1 [0/0] {157} + ¦ °--NUM_CONST: 1 [0/0] {156} + ¦--',': , [0/2] {158} + ¦--SYMBOL_SUB: k [0/1] {159} + ¦--EQ_SUB: = [0/1] {160} + ¦--expr: 1 [0/0] {162} + ¦ °--NUM_CONST: 1 [0/0] {161} + ¦--',': , [0/0] {163} + °--')': ) [1/0] {164} diff --git a/tests/testthat/alignment/tribble-out.R b/tests/testthat/alignment/tribble-out.R new file mode 100644 index 000000000..2e618487e --- /dev/null +++ b/tests/testthat/alignment/tribble-out.R @@ -0,0 +1,30 @@ +tribble( + ~x, ~d, + "axa'fa", 1:6, + "b", 4:6 +) + +tribble( + ~x, ~d, + "axa'fa", 1:6, + "b", 4:6 +) + + +tribble( + ~x, ~d, + "axa'fa", 1:6, + "b", 4:6 +) + +tribble( + ~x, ~d, + "axa'fa", 1:6, + "b", 4:6 +) + +# has EQ_SUB which don't match, not tribble-like +mlr3misc:::rowwise_table( + x = 23, zy = 3, + y = 1, k = 1, +) diff --git a/tests/testthat/alignment/tribble-three-cols-in.R b/tests/testthat/alignment/tribble-three-cols-in.R new file mode 100644 index 000000000..958b370dc --- /dev/null +++ b/tests/testthat/alignment/tribble-three-cols-in.R @@ -0,0 +1,5 @@ +tribble( + ~x, ~y, ~z, + "one", TRUE, 1L, + "two", FALSE, 2L +) diff --git a/tests/testthat/alignment/tribble-three-cols-in_tree b/tests/testthat/alignment/tribble-three-cols-in_tree new file mode 100644 index 000000000..aa7e8e833 --- /dev/null +++ b/tests/testthat/alignment/tribble-three-cols-in_tree @@ -0,0 +1,38 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + °--expr: tribb [0/0] {1} + ¦--expr: tribb [0/0] {3} + ¦ °--SYMBOL_FUNCTION_CALL: tribb [0/0] {2} + ¦--'(': ( [0/2] {4} + ¦--expr: ~x [1/0] {5} + ¦ ¦--'~': ~ [0/0] {6} + ¦ °--expr: x [0/0] {8} + ¦ °--SYMBOL: x [0/0] {7} + ¦--',': , [0/4] {9} + ¦--expr: ~y [0/0] {10} + ¦ ¦--'~': ~ [0/0] {11} + ¦ °--expr: y [0/0] {13} + ¦ °--SYMBOL: y [0/0] {12} + ¦--',': , [0/4] {14} + ¦--expr: ~z [0/0] {15} + ¦ ¦--'~': ~ [0/0] {16} + ¦ °--expr: z [0/0] {18} + ¦ °--SYMBOL: z [0/0] {17} + ¦--',': , [0/2] {19} + ¦--expr: "one" [1/0] {21} + ¦ °--STR_CONST: "one" [0/0] {20} + ¦--',': , [0/1] {22} + ¦--expr: TRUE [0/0] {24} + ¦ °--NUM_CONST: TRUE [0/0] {23} + ¦--',': , [0/2] {25} + ¦--expr: 1L [0/0] {27} + ¦ °--NUM_CONST: 1L [0/0] {26} + ¦--',': , [0/2] {28} + ¦--expr: "two" [1/0] {30} + ¦ °--STR_CONST: "two" [0/0] {29} + ¦--',': , [0/1] {31} + ¦--expr: FALSE [0/0] {33} + ¦ °--NUM_CONST: FALSE [0/0] {32} + ¦--',': , [0/1] {34} + ¦--expr: 2L [0/0] {36} + ¦ °--NUM_CONST: 2L [0/0] {35} + °--')': ) [1/0] {37} diff --git a/tests/testthat/alignment/tribble-three-cols-out.R b/tests/testthat/alignment/tribble-three-cols-out.R new file mode 100644 index 000000000..958b370dc --- /dev/null +++ b/tests/testthat/alignment/tribble-three-cols-out.R @@ -0,0 +1,5 @@ +tribble( + ~x, ~y, ~z, + "one", TRUE, 1L, + "two", FALSE, 2L +) diff --git a/tests/testthat/cache-with-r-cache/mlflow-1-in.R b/tests/testthat/cache-with-r-cache/mlflow-1-in.R new file mode 100644 index 000000000..eb0ebb231 --- /dev/null +++ b/tests/testthat/cache-with-r-cache/mlflow-1-in.R @@ -0,0 +1,90 @@ +# Returns the current MLflow R package version +mlflow_version <- function() { + utils::packageVersion("mlflow") +} + +# Returns the name of a conda environment in which to install the Python MLflow package +mlflow_conda_env_name <- function() { + paste("r-mlflow", mlflow_version(), sep = "-") +} + +# Create conda env used by MLflow if it doesn't already exist +#' @importFrom reticulate conda_install conda_create conda_list +#' @param python_version Python version to use within conda environment created for +#' installing the MLflow CLI. +mlflow_maybe_create_conda_env <- function(python_version) { + packages <- c(paste("python", python_version, sep = "=")) + conda <- mlflow_conda_bin() + conda_env_name <- mlflow_conda_env_name() + if (!conda_env_name %in% conda_list(conda = conda)$name) { + conda_create(conda_env_name, conda = conda, packages = packages) + } +} + +#' Install MLflow +#' +#' Installs auxiliary dependencies of MLflow (e.g. the MLflow CLI). As a +#' one-time setup step, you must run install_mlflow() to install these +#' dependencies before calling other MLflow APIs. +#' +#' install_mlflow() requires Python and Conda to be installed. +#' See \url{https://www.python.org/getit/} and \url{https://docs.conda.io/projects/conda/en/latest/user-guide/install/}. +#' +#' Alternatively, you can set MLFLOW_PYTHON_BIN and MLFLOW_BIN environment variables +#' instead. MLFLOW_PYTHON_BIN should point to python executable and MLFLOW_BIN to mlflow cli +#' executable. These variables allow you to use custom mlflow installation. Note that there may be +#' some compatibility issues if the custom mlflow version does not match the version of the R +#' package. +#' +#' @examples +#' \dontrun{ +#' library(mlflow) +#' install_mlflow() +#' } +#' +#' @importFrom reticulate conda_install conda_create conda_list +#' @param python_version Optional Python version to use within conda environment created for +#' installing the MLflow CLI. If unspecified, defaults to using Python 3.6 +#' @export +install_mlflow <- function(python_version = "3.6") { + mlflow_maybe_create_conda_env(python_version) + # Install the Python MLflow package with version == the current R package version + packages <- c(paste("mlflow", "==", mlflow_version(), sep = "")) + conda <- mlflow_conda_bin() + conda_install(packages, envname = mlflow_conda_env_name(), pip = TRUE, conda = conda) +} + +#' Uninstall MLflow +#' +#' Uninstalls MLflow by removing the Conda environment. +#' +#' @examples +#' \dontrun{ +#' library(mlflow) +#' install_mlflow() +#' uninstall_mlflow() +#' } +#' +#' @importFrom reticulate conda_install conda_create conda_list +#' @export +uninstall_mlflow <- function() { + reticulate::conda_remove(envname = mlflow_conda_env_name(), conda = mlflow_conda_bin()) +} + + +mlflow_conda_bin <- function() { + conda_home <- Sys.getenv("MLFLOW_CONDA_HOME", NA) + conda <- if (!is.na(conda_home)) paste(conda_home, "bin", "conda", sep = "/") else "auto" + conda_try <- try(conda_binary(conda = conda), silent = TRUE) + if (class(conda_try) == "try-error") { + msg <- paste(attributes(conda_try)$condition$message, + paste( + " If you are not using conda, you can set the environment variable", + "MLFLOW_PYTHON_BIN to the path of your python executable." + ), + sep = "\n" + ) + stop(msg) + } + conda_try +} diff --git a/tests/testthat/cache-with-r-cache/roxygen-cache-1.R b/tests/testthat/cache-with-r-cache/roxygen-cache-1.R new file mode 100644 index 000000000..30353c2e1 --- /dev/null +++ b/tests/testthat/cache-with-r-cache/roxygen-cache-1.R @@ -0,0 +1,29 @@ +#' This shot +#' +#' @examples +#' mlflow_conda_bin <- function() { +#' conda_home <- Sys.getenv("MLFLOW_CONDA_HOME", NA) +#' conda <- if (!is.na(conda_home)) paste(conda_home, "bin", "conda", sep = "/") else "auto" +#' conda_try <- try(conda_binary(conda = conda), silent = TRUE) +#' if (class(conda_try) == "try-error") { +#' msg <- paste(attributes(conda_try)$condition$message, +#' paste( +#' " If you are not using conda, you can set the environment variable", +#' "MLFLOW_PYTHON_BIN to the path of your python executable." +#' ), +#' sep = " " +#' ) +#' stop(msg) +#' } +#' conda_try +#' } +#' +#' if (x) { +#' f(x, na.rm = 4) +#' } else { +#' 99 +#' } +#' +#' +#' xx <-f4() +NULL diff --git a/tests/testthat/curly-curly/mixed-in.R b/tests/testthat/curly-curly/mixed-in.R new file mode 100644 index 000000000..e99eb810c --- /dev/null +++ b/tests/testthat/curly-curly/mixed-in.R @@ -0,0 +1,114 @@ + +## ............................................................................ +## line breaks #### +# not inserting line breaks +call({{ x }}) + +# removing line breaks +call({{ + x +}}) + +call({ + {x +}}) + +call({ + {x}} +) + +call({ + {x} + }) + +call( + { + {x + } + } + ) + +## ............................................................................ +## spaces #### + +# not inserting spaces between braces +call({{ x }}) + +# removing spaces between braces +call({ { x }}) +call({ { x }} ) +call( { { x }}) +call( { { x } }) + +# inserting spaces within {{ +call({{x }}) +call({{x}}) +call({{ x}}) + +# not removing spaces within {{ +call({{ x }}) + + +# combine spaces and line breaks +call({{ x} + }) + +call({ + { x}}) + +# not applicable when only one curly brace +{ + y +} +{ 1 + 1} +{{1 + a} + 1} # not curly-culry! + + +## ............................................................................ +## multiple #### +call("test", { + 1 +}) + +call( + "test", { + 1 +}) + +call("test", + { + 1 + }) + +call("test", { + 1 } +) + +call({ + 1 +}, a + b, { 33 / f(c)}) + +call({{ x }}, {{ y}}) +call({{ x }}, {{ y} + }) +call( + {{ x }}, {{ y}}) + +call( + {{ x }}, + {{ y}} := 3, f(bk) +) + +call({{ + # + 1 +}}) + +call({{ + # +}}) + + +{{ + # +}} diff --git a/tests/testthat/curly-curly/mixed-in_tree b/tests/testthat/curly-curly/mixed-in_tree new file mode 100644 index 000000000..b7ab6ef5a --- /dev/null +++ b/tests/testthat/curly-curly/mixed-in_tree @@ -0,0 +1,505 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: ## . [0/0] {1} + ¦--COMMENT: ## l [1/0] {2} + ¦--COMMENT: # not [1/0] {3} + ¦--expr: call( [1/0] {4} + ¦ ¦--expr: call [0/0] {6} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {5} + ¦ ¦--'(': ( [0/0] {7} + ¦ ¦--expr: {{ x [0/0] {8} + ¦ ¦ ¦--'{': { [0/0] {9} + ¦ ¦ ¦--expr: { x } [0/0] {10} + ¦ ¦ ¦ ¦--'{': { [0/1] {11} + ¦ ¦ ¦ ¦--expr: x [0/1] {13} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {12} + ¦ ¦ ¦ °--'}': } [0/0] {14} + ¦ ¦ °--'}': } [0/0] {15} + ¦ °--')': ) [0/0] {16} + ¦--COMMENT: # rem [2/0] {17} + ¦--expr: call( [1/0] {18} + ¦ ¦--expr: call [0/0] {20} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {19} + ¦ ¦--'(': ( [0/0] {21} + ¦ ¦--expr: {{ + [0/0] {22} + ¦ ¦ ¦--'{': { [0/0] {23} + ¦ ¦ ¦--expr: { + x [0/0] {24} + ¦ ¦ ¦ ¦--'{': { [0/2] {25} + ¦ ¦ ¦ ¦--expr: x [1/0] {27} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {26} + ¦ ¦ ¦ °--'}': } [1/0] {28} + ¦ ¦ °--'}': } [0/0] {29} + ¦ °--')': ) [0/0] {30} + ¦--expr: call( [2/0] {31} + ¦ ¦--expr: call [0/0] {33} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {32} + ¦ ¦--'(': ( [0/0] {34} + ¦ ¦--expr: { + { [0/0] {35} + ¦ ¦ ¦--'{': { [0/2] {36} + ¦ ¦ ¦--expr: {x +} [1/0] {37} + ¦ ¦ ¦ ¦--'{': { [0/0] {38} + ¦ ¦ ¦ ¦--expr: x [0/0] {40} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {39} + ¦ ¦ ¦ °--'}': } [1/0] {41} + ¦ ¦ °--'}': } [0/0] {42} + ¦ °--')': ) [0/0] {43} + ¦--expr: call( [2/0] {44} + ¦ ¦--expr: call [0/0] {46} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {45} + ¦ ¦--'(': ( [0/0] {47} + ¦ ¦--expr: { + { [0/0] {48} + ¦ ¦ ¦--'{': { [0/2] {49} + ¦ ¦ ¦--expr: {x} [1/0] {50} + ¦ ¦ ¦ ¦--'{': { [0/0] {51} + ¦ ¦ ¦ ¦--expr: x [0/0] {53} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {52} + ¦ ¦ ¦ °--'}': } [0/0] {54} + ¦ ¦ °--'}': } [0/0] {55} + ¦ °--')': ) [1/0] {56} + ¦--expr: call( [2/0] {57} + ¦ ¦--expr: call [0/0] {59} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {58} + ¦ ¦--'(': ( [0/0] {60} + ¦ ¦--expr: { + { [0/0] {61} + ¦ ¦ ¦--'{': { [0/2] {62} + ¦ ¦ ¦--expr: {x} [1/2] {63} + ¦ ¦ ¦ ¦--'{': { [0/0] {64} + ¦ ¦ ¦ ¦--expr: x [0/0] {66} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {65} + ¦ ¦ ¦ °--'}': } [0/0] {67} + ¦ ¦ °--'}': } [1/0] {68} + ¦ °--')': ) [0/0] {69} + ¦--expr: call( [2/0] {70} + ¦ ¦--expr: call [0/0] {72} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {71} + ¦ ¦--'(': ( [0/2] {73} + ¦ ¦--expr: { + { [1/2] {74} + ¦ ¦ ¦--'{': { [0/2] {75} + ¦ ¦ ¦--expr: {x + [1/4] {76} + ¦ ¦ ¦ ¦--'{': { [0/0] {77} + ¦ ¦ ¦ ¦--expr: x [0/2] {79} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {78} + ¦ ¦ ¦ °--'}': } [1/0] {80} + ¦ ¦ °--'}': } [1/0] {81} + ¦ °--')': ) [1/0] {82} + ¦--COMMENT: ## . [2/0] {83} + ¦--COMMENT: ## s [1/0] {84} + ¦--COMMENT: # not [2/0] {85} + ¦--expr: call( [1/0] {86} + ¦ ¦--expr: call [0/0] {88} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {87} + ¦ ¦--'(': ( [0/0] {89} + ¦ ¦--expr: {{ x [0/0] {90} + ¦ ¦ ¦--'{': { [0/0] {91} + ¦ ¦ ¦--expr: { x } [0/0] {92} + ¦ ¦ ¦ ¦--'{': { [0/1] {93} + ¦ ¦ ¦ ¦--expr: x [0/1] {95} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {94} + ¦ ¦ ¦ °--'}': } [0/0] {96} + ¦ ¦ °--'}': } [0/0] {97} + ¦ °--')': ) [0/0] {98} + ¦--COMMENT: # rem [2/0] {99} + ¦--expr: call( [1/0] {100} + ¦ ¦--expr: call [0/0] {102} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {101} + ¦ ¦--'(': ( [0/0] {103} + ¦ ¦--expr: { { x [0/0] {104} + ¦ ¦ ¦--'{': { [0/1] {105} + ¦ ¦ ¦--expr: { x } [0/0] {106} + ¦ ¦ ¦ ¦--'{': { [0/1] {107} + ¦ ¦ ¦ ¦--expr: x [0/1] {109} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {108} + ¦ ¦ ¦ °--'}': } [0/0] {110} + ¦ ¦ °--'}': } [0/0] {111} + ¦ °--')': ) [0/0] {112} + ¦--expr: call( [1/0] {113} + ¦ ¦--expr: call [0/0] {115} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {114} + ¦ ¦--'(': ( [0/0] {116} + ¦ ¦--expr: { { x [0/1] {117} + ¦ ¦ ¦--'{': { [0/1] {118} + ¦ ¦ ¦--expr: { x } [0/0] {119} + ¦ ¦ ¦ ¦--'{': { [0/1] {120} + ¦ ¦ ¦ ¦--expr: x [0/1] {122} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {121} + ¦ ¦ ¦ °--'}': } [0/0] {123} + ¦ ¦ °--'}': } [0/0] {124} + ¦ °--')': ) [0/0] {125} + ¦--expr: call( [1/0] {126} + ¦ ¦--expr: call [0/0] {128} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {127} + ¦ ¦--'(': ( [0/1] {129} + ¦ ¦--expr: { { x [0/0] {130} + ¦ ¦ ¦--'{': { [0/1] {131} + ¦ ¦ ¦--expr: { x } [0/0] {132} + ¦ ¦ ¦ ¦--'{': { [0/1] {133} + ¦ ¦ ¦ ¦--expr: x [0/1] {135} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {134} + ¦ ¦ ¦ °--'}': } [0/0] {136} + ¦ ¦ °--'}': } [0/0] {137} + ¦ °--')': ) [0/0] {138} + ¦--expr: call( [1/0] {139} + ¦ ¦--expr: call [0/0] {141} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {140} + ¦ ¦--'(': ( [0/1] {142} + ¦ ¦--expr: { { x [0/0] {143} + ¦ ¦ ¦--'{': { [0/1] {144} + ¦ ¦ ¦--expr: { x } [0/1] {145} + ¦ ¦ ¦ ¦--'{': { [0/1] {146} + ¦ ¦ ¦ ¦--expr: x [0/1] {148} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {147} + ¦ ¦ ¦ °--'}': } [0/0] {149} + ¦ ¦ °--'}': } [0/0] {150} + ¦ °--')': ) [0/0] {151} + ¦--COMMENT: # ins [2/0] {152} + ¦--expr: call( [1/0] {153} + ¦ ¦--expr: call [0/0] {155} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {154} + ¦ ¦--'(': ( [0/0] {156} + ¦ ¦--expr: {{x } [0/0] {157} + ¦ ¦ ¦--'{': { [0/0] {158} + ¦ ¦ ¦--expr: {x } [0/0] {159} + ¦ ¦ ¦ ¦--'{': { [0/0] {160} + ¦ ¦ ¦ ¦--expr: x [0/1] {162} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {161} + ¦ ¦ ¦ °--'}': } [0/0] {163} + ¦ ¦ °--'}': } [0/0] {164} + ¦ °--')': ) [0/0] {165} + ¦--expr: call( [1/0] {166} + ¦ ¦--expr: call [0/0] {168} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {167} + ¦ ¦--'(': ( [0/0] {169} + ¦ ¦--expr: {{x}} [0/0] {170} + ¦ ¦ ¦--'{': { [0/0] {171} + ¦ ¦ ¦--expr: {x} [0/0] {172} + ¦ ¦ ¦ ¦--'{': { [0/0] {173} + ¦ ¦ ¦ ¦--expr: x [0/0] {175} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {174} + ¦ ¦ ¦ °--'}': } [0/0] {176} + ¦ ¦ °--'}': } [0/0] {177} + ¦ °--')': ) [0/0] {178} + ¦--expr: call( [1/0] {179} + ¦ ¦--expr: call [0/0] {181} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {180} + ¦ ¦--'(': ( [0/0] {182} + ¦ ¦--expr: {{ x} [0/0] {183} + ¦ ¦ ¦--'{': { [0/0] {184} + ¦ ¦ ¦--expr: { x} [0/0] {185} + ¦ ¦ ¦ ¦--'{': { [0/1] {186} + ¦ ¦ ¦ ¦--expr: x [0/0] {188} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {187} + ¦ ¦ ¦ °--'}': } [0/0] {189} + ¦ ¦ °--'}': } [0/0] {190} + ¦ °--')': ) [0/0] {191} + ¦--COMMENT: # not [2/0] {192} + ¦--expr: call( [1/0] {193} + ¦ ¦--expr: call [0/0] {195} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {194} + ¦ ¦--'(': ( [0/0] {196} + ¦ ¦--expr: {{ x [0/0] {197} + ¦ ¦ ¦--'{': { [0/0] {198} + ¦ ¦ ¦--expr: { x } [0/0] {199} + ¦ ¦ ¦ ¦--'{': { [0/1] {200} + ¦ ¦ ¦ ¦--expr: x [0/1] {202} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {201} + ¦ ¦ ¦ °--'}': } [0/0] {203} + ¦ ¦ °--'}': } [0/0] {204} + ¦ °--')': ) [0/0] {205} + ¦--COMMENT: # com [3/0] {206} + ¦--expr: call( [1/0] {207} + ¦ ¦--expr: call [0/0] {209} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {208} + ¦ ¦--'(': ( [0/0] {210} + ¦ ¦--expr: {{ x} [0/0] {211} + ¦ ¦ ¦--'{': { [0/0] {212} + ¦ ¦ ¦--expr: { x} [0/2] {213} + ¦ ¦ ¦ ¦--'{': { [0/1] {214} + ¦ ¦ ¦ ¦--expr: x [0/0] {216} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {215} + ¦ ¦ ¦ °--'}': } [0/0] {217} + ¦ ¦ °--'}': } [1/0] {218} + ¦ °--')': ) [0/0] {219} + ¦--expr: call( [2/0] {220} + ¦ ¦--expr: call [0/0] {222} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {221} + ¦ ¦--'(': ( [0/0] {223} + ¦ ¦--expr: { + { [0/0] {224} + ¦ ¦ ¦--'{': { [0/2] {225} + ¦ ¦ ¦--expr: { x} [1/0] {226} + ¦ ¦ ¦ ¦--'{': { [0/1] {227} + ¦ ¦ ¦ ¦--expr: x [0/0] {229} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {228} + ¦ ¦ ¦ °--'}': } [0/0] {230} + ¦ ¦ °--'}': } [0/0] {231} + ¦ °--')': ) [0/0] {232} + ¦--COMMENT: # not [2/0] {233} + ¦--expr: { + y [1/0] {234} + ¦ ¦--'{': { [0/2] {235} + ¦ ¦--expr: y [1/0] {237} + ¦ ¦ °--SYMBOL: y [0/0] {236} + ¦ °--'}': } [1/0] {238} + ¦--expr: { 1 + [1/0] {239} + ¦ ¦--'{': { [0/1] {240} + ¦ ¦--expr: 1 + 1 [0/0] {241} + ¦ ¦ ¦--expr: 1 [0/1] {243} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {242} + ¦ ¦ ¦--'+': + [0/1] {244} + ¦ ¦ °--expr: 1 [0/0] {246} + ¦ ¦ °--NUM_CONST: 1 [0/0] {245} + ¦ °--'}': } [0/0] {247} + ¦--expr: {{1 + [1/1] {248} + ¦ ¦--'{': { [0/0] {249} + ¦ ¦--expr: {1 + [0/0] {250} + ¦ ¦ ¦--expr: {1 + [0/1] {251} + ¦ ¦ ¦ ¦--'{': { [0/0] {252} + ¦ ¦ ¦ ¦--expr: 1 + a [0/0] {253} + ¦ ¦ ¦ ¦ ¦--expr: 1 [0/1] {255} + ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {254} + ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {256} + ¦ ¦ ¦ ¦ °--expr: a [0/0] {258} + ¦ ¦ ¦ ¦ °--SYMBOL: a [0/0] {257} + ¦ ¦ ¦ °--'}': } [0/0] {259} + ¦ ¦ ¦--'+': + [0/1] {260} + ¦ ¦ °--expr: 1 [0/0] {262} + ¦ ¦ °--NUM_CONST: 1 [0/0] {261} + ¦ °--'}': } [0/0] {263} + ¦--COMMENT: # not [0/0] {264} + ¦--COMMENT: ## . [3/0] {265} + ¦--COMMENT: ## m [1/0] {266} + ¦--expr: call( [1/0] {267} + ¦ ¦--expr: call [0/0] {269} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {268} + ¦ ¦--'(': ( [0/0] {270} + ¦ ¦--expr: "test [0/0] {272} + ¦ ¦ °--STR_CONST: "test [0/0] {271} + ¦ ¦--',': , [0/1] {273} + ¦ ¦--expr: { + 1 [0/0] {274} + ¦ ¦ ¦--'{': { [0/2] {275} + ¦ ¦ ¦--expr: 1 [1/0] {277} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {276} + ¦ ¦ °--'}': } [1/0] {278} + ¦ °--')': ) [0/0] {279} + ¦--expr: call( [2/0] {280} + ¦ ¦--expr: call [0/0] {282} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {281} + ¦ ¦--'(': ( [0/2] {283} + ¦ ¦--expr: "test [1/0] {285} + ¦ ¦ °--STR_CONST: "test [0/0] {284} + ¦ ¦--',': , [0/1] {286} + ¦ ¦--expr: { + 1 [0/0] {287} + ¦ ¦ ¦--'{': { [0/2] {288} + ¦ ¦ ¦--expr: 1 [1/0] {290} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {289} + ¦ ¦ °--'}': } [1/0] {291} + ¦ °--')': ) [0/0] {292} + ¦--expr: call( [2/0] {293} + ¦ ¦--expr: call [0/0] {295} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {294} + ¦ ¦--'(': ( [0/0] {296} + ¦ ¦--expr: "test [0/0] {298} + ¦ ¦ °--STR_CONST: "test [0/0] {297} + ¦ ¦--',': , [0/5] {299} + ¦ ¦--expr: { + [1/0] {300} + ¦ ¦ ¦--'{': { [0/4] {301} + ¦ ¦ ¦--expr: 1 [1/2] {303} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {302} + ¦ ¦ °--'}': } [1/0] {304} + ¦ °--')': ) [0/0] {305} + ¦--expr: call( [2/0] {306} + ¦ ¦--expr: call [0/0] {308} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {307} + ¦ ¦--'(': ( [0/0] {309} + ¦ ¦--expr: "test [0/0] {311} + ¦ ¦ °--STR_CONST: "test [0/0] {310} + ¦ ¦--',': , [0/1] {312} + ¦ ¦--expr: { + 1 [0/0] {313} + ¦ ¦ ¦--'{': { [0/2] {314} + ¦ ¦ ¦--expr: 1 [1/1] {316} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {315} + ¦ ¦ °--'}': } [0/0] {317} + ¦ °--')': ) [1/0] {318} + ¦--expr: call( [2/0] {319} + ¦ ¦--expr: call [0/0] {321} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {320} + ¦ ¦--'(': ( [0/0] {322} + ¦ ¦--expr: { + 1 [0/0] {323} + ¦ ¦ ¦--'{': { [0/2] {324} + ¦ ¦ ¦--expr: 1 [1/0] {326} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {325} + ¦ ¦ °--'}': } [1/0] {327} + ¦ ¦--',': , [0/1] {328} + ¦ ¦--expr: a + b [0/0] {329} + ¦ ¦ ¦--expr: a [0/1] {331} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {330} + ¦ ¦ ¦--'+': + [0/1] {332} + ¦ ¦ °--expr: b [0/0] {334} + ¦ ¦ °--SYMBOL: b [0/0] {333} + ¦ ¦--',': , [0/1] {335} + ¦ ¦--expr: { 33 [0/0] {336} + ¦ ¦ ¦--'{': { [0/1] {337} + ¦ ¦ ¦--expr: 33 / [0/0] {338} + ¦ ¦ ¦ ¦--expr: 33 [0/1] {340} + ¦ ¦ ¦ ¦ °--NUM_CONST: 33 [0/0] {339} + ¦ ¦ ¦ ¦--'/': / [0/1] {341} + ¦ ¦ ¦ °--expr: f(c) [0/0] {342} + ¦ ¦ ¦ ¦--expr: f [0/0] {344} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {343} + ¦ ¦ ¦ ¦--'(': ( [0/0] {345} + ¦ ¦ ¦ ¦--expr: c [0/0] {347} + ¦ ¦ ¦ ¦ °--SYMBOL: c [0/0] {346} + ¦ ¦ ¦ °--')': ) [0/0] {348} + ¦ ¦ °--'}': } [0/0] {349} + ¦ °--')': ) [0/0] {350} + ¦--expr: call( [2/0] {351} + ¦ ¦--expr: call [0/0] {353} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {352} + ¦ ¦--'(': ( [0/0] {354} + ¦ ¦--expr: {{ x [0/0] {355} + ¦ ¦ ¦--'{': { [0/0] {356} + ¦ ¦ ¦--expr: { x } [0/0] {357} + ¦ ¦ ¦ ¦--'{': { [0/1] {358} + ¦ ¦ ¦ ¦--expr: x [0/1] {360} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {359} + ¦ ¦ ¦ °--'}': } [0/0] {361} + ¦ ¦ °--'}': } [0/0] {362} + ¦ ¦--',': , [0/1] {363} + ¦ ¦--expr: {{ y} [0/0] {364} + ¦ ¦ ¦--'{': { [0/0] {365} + ¦ ¦ ¦--expr: { y} [0/0] {366} + ¦ ¦ ¦ ¦--'{': { [0/1] {367} + ¦ ¦ ¦ ¦--expr: y [0/0] {369} + ¦ ¦ ¦ ¦ °--SYMBOL: y [0/0] {368} + ¦ ¦ ¦ °--'}': } [0/0] {370} + ¦ ¦ °--'}': } [0/0] {371} + ¦ °--')': ) [0/0] {372} + ¦--expr: call( [1/0] {373} + ¦ ¦--expr: call [0/0] {375} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {374} + ¦ ¦--'(': ( [0/0] {376} + ¦ ¦--expr: {{ x [0/0] {377} + ¦ ¦ ¦--'{': { [0/0] {378} + ¦ ¦ ¦--expr: { x } [0/0] {379} + ¦ ¦ ¦ ¦--'{': { [0/1] {380} + ¦ ¦ ¦ ¦--expr: x [0/1] {382} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {381} + ¦ ¦ ¦ °--'}': } [0/0] {383} + ¦ ¦ °--'}': } [0/0] {384} + ¦ ¦--',': , [0/1] {385} + ¦ ¦--expr: {{ y} [0/0] {386} + ¦ ¦ ¦--'{': { [0/0] {387} + ¦ ¦ ¦--expr: { y} [0/2] {388} + ¦ ¦ ¦ ¦--'{': { [0/1] {389} + ¦ ¦ ¦ ¦--expr: y [0/0] {391} + ¦ ¦ ¦ ¦ °--SYMBOL: y [0/0] {390} + ¦ ¦ ¦ °--'}': } [0/0] {392} + ¦ ¦ °--'}': } [1/0] {393} + ¦ °--')': ) [0/0] {394} + ¦--expr: call( [1/0] {395} + ¦ ¦--expr: call [0/0] {397} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {396} + ¦ ¦--'(': ( [0/2] {398} + ¦ ¦--expr: {{ x [1/0] {399} + ¦ ¦ ¦--'{': { [0/0] {400} + ¦ ¦ ¦--expr: { x } [0/0] {401} + ¦ ¦ ¦ ¦--'{': { [0/1] {402} + ¦ ¦ ¦ ¦--expr: x [0/1] {404} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {403} + ¦ ¦ ¦ °--'}': } [0/0] {405} + ¦ ¦ °--'}': } [0/0] {406} + ¦ ¦--',': , [0/1] {407} + ¦ ¦--expr: {{ y} [0/0] {408} + ¦ ¦ ¦--'{': { [0/0] {409} + ¦ ¦ ¦--expr: { y} [0/0] {410} + ¦ ¦ ¦ ¦--'{': { [0/1] {411} + ¦ ¦ ¦ ¦--expr: y [0/0] {413} + ¦ ¦ ¦ ¦ °--SYMBOL: y [0/0] {412} + ¦ ¦ ¦ °--'}': } [0/0] {414} + ¦ ¦ °--'}': } [0/0] {415} + ¦ °--')': ) [0/0] {416} + ¦--expr: call( [2/0] {417} + ¦ ¦--expr: call [0/0] {419} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {418} + ¦ ¦--'(': ( [0/2] {420} + ¦ ¦--expr: {{ x [1/0] {421} + ¦ ¦ ¦--'{': { [0/0] {422} + ¦ ¦ ¦--expr: { x } [0/0] {423} + ¦ ¦ ¦ ¦--'{': { [0/1] {424} + ¦ ¦ ¦ ¦--expr: x [0/1] {426} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {425} + ¦ ¦ ¦ °--'}': } [0/0] {427} + ¦ ¦ °--'}': } [0/0] {428} + ¦ ¦--',': , [0/2] {429} + ¦ ¦--expr: {{ y} [1/0] {430} + ¦ ¦ ¦--expr: {{ y} [0/1] {431} + ¦ ¦ ¦ ¦--'{': { [0/0] {432} + ¦ ¦ ¦ ¦--expr: { y} [0/0] {433} + ¦ ¦ ¦ ¦ ¦--'{': { [0/1] {434} + ¦ ¦ ¦ ¦ ¦--expr: y [0/0] {436} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: y [0/0] {435} + ¦ ¦ ¦ ¦ °--'}': } [0/0] {437} + ¦ ¦ ¦ °--'}': } [0/0] {438} + ¦ ¦ ¦--LEFT_ASSIGN: := [0/1] {439} + ¦ ¦ °--expr: 3 [0/0] {441} + ¦ ¦ °--NUM_CONST: 3 [0/0] {440} + ¦ ¦--',': , [0/1] {442} + ¦ ¦--expr: f(bk) [0/0] {443} + ¦ ¦ ¦--expr: f [0/0] {445} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {444} + ¦ ¦ ¦--'(': ( [0/0] {446} + ¦ ¦ ¦--expr: bk [0/0] {448} + ¦ ¦ ¦ °--SYMBOL: bk [0/0] {447} + ¦ ¦ °--')': ) [0/0] {449} + ¦ °--')': ) [1/0] {450} + ¦--expr: call( [2/0] {451} + ¦ ¦--expr: call [0/0] {453} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {452} + ¦ ¦--'(': ( [0/0] {454} + ¦ ¦--expr: {{ + [0/0] {455} + ¦ ¦ ¦--'{': { [0/0] {456} + ¦ ¦ ¦--expr: { + # [0/0] {457} + ¦ ¦ ¦ ¦--'{': { [0/2] {458} + ¦ ¦ ¦ ¦--COMMENT: # [1/2] {459} + ¦ ¦ ¦ ¦--expr: 1 [1/0] {461} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {460} + ¦ ¦ ¦ °--'}': } [1/0] {462} + ¦ ¦ °--'}': } [0/0] {463} + ¦ °--')': ) [0/0] {464} + ¦--expr: call( [2/0] {465} + ¦ ¦--expr: call [0/0] {467} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {466} + ¦ ¦--'(': ( [0/0] {468} + ¦ ¦--expr: {{ + [0/0] {469} + ¦ ¦ ¦--'{': { [0/0] {470} + ¦ ¦ ¦--expr: { + # [0/0] {471} + ¦ ¦ ¦ ¦--'{': { [0/2] {472} + ¦ ¦ ¦ ¦--COMMENT: # [1/0] {473} + ¦ ¦ ¦ °--'}': } [1/0] {474} + ¦ ¦ °--'}': } [0/0] {475} + ¦ °--')': ) [0/0] {476} + °--expr: {{ + [3/0] {477} + ¦--'{': { [0/0] {478} + ¦--expr: { + # [0/0] {479} + ¦ ¦--'{': { [0/2] {480} + ¦ ¦--COMMENT: # [1/0] {481} + ¦ °--'}': } [1/0] {482} + °--'}': } [0/0] {483} diff --git a/tests/testthat/curly-curly/mixed-out.R b/tests/testthat/curly-curly/mixed-out.R new file mode 100644 index 000000000..a18134391 --- /dev/null +++ b/tests/testthat/curly-curly/mixed-out.R @@ -0,0 +1,113 @@ +## ............................................................................ +## line breaks #### +# not inserting line breaks +call({{ x }}) + +# removing line breaks +call({{ x }}) + +call({{ x }}) + +call({{ x }}) + +call({{ x }}) + +call( + {{ x }} +) + +## ............................................................................ +## spaces #### + +# not inserting spaces between braces +call({{ x }}) + +# removing spaces between braces +call({{ x }}) +call({{ x }}) +call({{ x }}) +call({{ x }}) + +# inserting spaces within {{ +call({{ x }}) +call({{ x }}) +call({{ x }}) + +# not removing spaces within {{ +call({{ x }}) + + +# combine spaces and line breaks +call({{ x }}) + +call({{ x }}) + +# not applicable when only one curly brace +{ + y +} +{ + 1 + 1 +} +{ + { + 1 + a + } + 1 +} # not curly-culry! + + +## ............................................................................ +## multiple #### +call("test", { + 1 +}) + +call( + "test", + { + 1 + } +) + +call("test", { + 1 +}) + +call("test", { + 1 +}) + +call( + { + 1 + }, + a + b, + { + 33 / f(c) + } +) + +call({{ x }}, {{ y }}) +call({{ x }}, {{ y }}) +call( + {{ x }}, {{ y }} +) + +call( + {{ x }}, + {{ y }} := 3, f(bk) +) + +call({{ + # + 1 +}}) + +call({{ + # +}}) + + +{{ + # +}} diff --git a/tests/testthat/escaping/basic-escape-in.R b/tests/testthat/escaping/basic-escape-in.R new file mode 100644 index 000000000..3ceeab053 --- /dev/null +++ b/tests/testthat/escaping/basic-escape-in.R @@ -0,0 +1,51 @@ +#' things +#' +#' @examples +#' call("\\.") +NULL + + +#' things +#' +#' @examples +#' call("\n") +NULL + +#' things +#' +#' @examples +#' call("\n") +#' ano("\\.", further = X) +NULL + + +#' things +#' +#' @examples +#' call('\n') +#' ano("\\.", further = X) +NULL + +'single quotes with +embedded and \n not embedded line breaks' + +x <- ' 2' # there is a tab emebbed (created with writeLines("x <- '\t2'")) + +x <- '\001' +'\x01' + +"\001" +'\001' + +#' things +#' +#' @examplesIf N +#' call("\n") +#' ano("\\.", further = X) +NULL + +#' things +#' +#' @examplesIf call("\n") +#' ano("\\.", further = X) +NULL diff --git a/tests/testthat/escaping/basic-escape-in_tree b/tests/testthat/escaping/basic-escape-in_tree new file mode 100644 index 000000000..8083077fa --- /dev/null +++ b/tests/testthat/escaping/basic-escape-in_tree @@ -0,0 +1,61 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' th [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' @e [1/0] {3} + ¦--COMMENT: #' ca [1/0] {4} + ¦--expr: NULL [1/0] {6} + ¦ °--NULL_CONST: NULL [0/0] {5} + ¦--COMMENT: #' th [3/0] {7} + ¦--COMMENT: #' [1/0] {8} + ¦--COMMENT: #' @e [1/0] {9} + ¦--COMMENT: #' ca [1/0] {10} + ¦--expr: NULL [1/0] {12} + ¦ °--NULL_CONST: NULL [0/0] {11} + ¦--COMMENT: #' th [2/0] {13} + ¦--COMMENT: #' [1/0] {14} + ¦--COMMENT: #' @e [1/0] {15} + ¦--COMMENT: #' ca [1/0] {16} + ¦--COMMENT: #' an [1/0] {17} + ¦--expr: NULL [1/0] {19} + ¦ °--NULL_CONST: NULL [0/0] {18} + ¦--COMMENT: #' th [3/0] {20} + ¦--COMMENT: #' [1/0] {21} + ¦--COMMENT: #' @e [1/0] {22} + ¦--COMMENT: #' ca [1/0] {23} + ¦--COMMENT: #' an [1/0] {24} + ¦--expr: NULL [1/0] {26} + ¦ °--NULL_CONST: NULL [0/0] {25} + ¦--expr: 'sing [2/0] {28} + ¦ °--STR_CONST: 'sing [0/0] {27} + ¦--expr: x <- [2/1] {29} + ¦ ¦--expr: x [0/1] {31} + ¦ ¦ °--SYMBOL: x [0/0] {30} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {32} + ¦ °--expr: ' 2' [0/0] {34} + ¦ °--STR_CONST: ' 2' [0/0] {33} + ¦--COMMENT: # the [0/0] {35} + ¦--expr: x <- [2/0] {36} + ¦ ¦--expr: x [0/1] {38} + ¦ ¦ °--SYMBOL: x [0/0] {37} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {39} + ¦ °--expr: '\001 [0/0] {41} + ¦ °--STR_CONST: '\001 [0/0] {40} + ¦--expr: '\x01 [1/0] {43} + ¦ °--STR_CONST: '\x01 [0/0] {42} + ¦--expr: "\001 [2/0] {45} + ¦ °--STR_CONST: "\001 [0/0] {44} + ¦--expr: '\001 [1/0] {47} + ¦ °--STR_CONST: '\001 [0/0] {46} + ¦--COMMENT: #' th [2/0] {48} + ¦--COMMENT: #' [1/0] {49} + ¦--COMMENT: #' @e [1/0] {50} + ¦--COMMENT: #' ca [1/0] {51} + ¦--COMMENT: #' an [1/0] {52} + ¦--expr: NULL [1/0] {54} + ¦ °--NULL_CONST: NULL [0/0] {53} + ¦--COMMENT: #' th [2/0] {55} + ¦--COMMENT: #' [1/0] {56} + ¦--COMMENT: #' @e [1/0] {57} + ¦--COMMENT: #' an [1/0] {58} + °--expr: NULL [1/0] {60} + °--NULL_CONST: NULL [0/0] {59} diff --git a/tests/testthat/escaping/basic-escape-out.R b/tests/testthat/escaping/basic-escape-out.R new file mode 100644 index 000000000..c35989e3f --- /dev/null +++ b/tests/testthat/escaping/basic-escape-out.R @@ -0,0 +1,51 @@ +#' things +#' +#' @examples +#' call("\\.") +NULL + + +#' things +#' +#' @examples +#' call("\n") +NULL + +#' things +#' +#' @examples +#' call("\n") +#' ano("\\.", further = X) +NULL + + +#' things +#' +#' @examples +#' call("\n") +#' ano("\\.", further = X) +NULL + +"single quotes with +embedded and \n not embedded line breaks" + +x <- " 2" # there is a tab emebbed (created with writeLines("x <- '\t2'")) + +x <- "\001" +"\x01" + +"\001" +"\001" + +#' things +#' +#' @examplesIf N +#' call("\n") +#' ano("\\.", further = X) +NULL + +#' things +#' +#' @examplesIf call("\n") +#' ano("\\.", further = X) +NULL diff --git a/tests/testthat/escaping/fail-parsing-1-in.R b/tests/testthat/escaping/fail-parsing-1-in.R new file mode 100644 index 000000000..7acb65002 --- /dev/null +++ b/tests/testthat/escaping/fail-parsing-1-in.R @@ -0,0 +1,7 @@ +#' Example +#' +#' @examples +#' fun() { +#' +#' } +NULL diff --git a/tests/testthat/escaping/fail-parsing-1-in_tree b/tests/testthat/escaping/fail-parsing-1-in_tree new file mode 100644 index 000000000..120f4b968 --- /dev/null +++ b/tests/testthat/escaping/fail-parsing-1-in_tree @@ -0,0 +1,9 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Ex [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' @e [1/0] {3} + ¦--COMMENT: #' fu [1/0] {4} + ¦--COMMENT: #' [1/0] {5} + ¦--COMMENT: #' } [1/0] {6} + °--expr: NULL [1/0] {8} + °--NULL_CONST: NULL [0/0] {7} diff --git a/tests/testthat/escaping/fail-parsing-2-in.R b/tests/testthat/escaping/fail-parsing-2-in.R new file mode 100644 index 000000000..c89ec52aa --- /dev/null +++ b/tests/testthat/escaping/fail-parsing-2-in.R @@ -0,0 +1,5 @@ +#' Example +#' +#' @examples +#' x <- +NULL diff --git a/tests/testthat/escaping/fail-parsing-2-in_tree b/tests/testthat/escaping/fail-parsing-2-in_tree new file mode 100644 index 000000000..3ca043b45 --- /dev/null +++ b/tests/testthat/escaping/fail-parsing-2-in_tree @@ -0,0 +1,7 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Ex [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' @e [1/0] {3} + ¦--COMMENT: #' x [1/0] {4} + °--expr: NULL [1/0] {6} + °--NULL_CONST: NULL [0/0] {5} diff --git a/tests/testthat/escaping/fail-parsing-3-in.R b/tests/testthat/escaping/fail-parsing-3-in.R new file mode 100644 index 000000000..aeb26e197 --- /dev/null +++ b/tests/testthat/escaping/fail-parsing-3-in.R @@ -0,0 +1,5 @@ +#' Example +#' +#' @examples +#' 1 _ +NULL diff --git a/tests/testthat/escaping/fail-parsing-3-in_tree b/tests/testthat/escaping/fail-parsing-3-in_tree new file mode 100644 index 000000000..3d33838b7 --- /dev/null +++ b/tests/testthat/escaping/fail-parsing-3-in_tree @@ -0,0 +1,7 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Ex [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' @e [1/0] {3} + ¦--COMMENT: #' 1 [1/0] {4} + °--expr: NULL [1/0] {6} + °--NULL_CONST: NULL [0/0] {5} diff --git a/tests/testthat/escaping/fail-parsing-4-in.R b/tests/testthat/escaping/fail-parsing-4-in.R new file mode 100644 index 000000000..212bc7988 --- /dev/null +++ b/tests/testthat/escaping/fail-parsing-4-in.R @@ -0,0 +1,5 @@ +#' Example +#' +#' @examples +#' 1 + } +NULL diff --git a/tests/testthat/escaping/fail-parsing-4-in_tree b/tests/testthat/escaping/fail-parsing-4-in_tree new file mode 100644 index 000000000..3d33838b7 --- /dev/null +++ b/tests/testthat/escaping/fail-parsing-4-in_tree @@ -0,0 +1,7 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Ex [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' @e [1/0] {3} + ¦--COMMENT: #' 1 [1/0] {4} + °--expr: NULL [1/0] {6} + °--NULL_CONST: NULL [0/0] {5} diff --git a/tests/testthat/fun_dec/fun_dec_scope_spaces-in.R b/tests/testthat/fun_dec/fun_dec_scope_spaces-in.R new file mode 100644 index 000000000..4b7ad4dc7 --- /dev/null +++ b/tests/testthat/fun_dec/fun_dec_scope_spaces-in.R @@ -0,0 +1,27 @@ +a <- function(x, # +y +) { + x - 1 +} + + +a <- function(x, # + y) # +{ + x +} + +function(a = + b, + c) {} + +function(a = + b, + c) { + +} + +function(a = + b, + c +) {} diff --git a/tests/testthat/fun_dec/fun_dec_scope_spaces-in_tree b/tests/testthat/fun_dec/fun_dec_scope_spaces-in_tree new file mode 100644 index 000000000..4fad90015 --- /dev/null +++ b/tests/testthat/fun_dec/fun_dec_scope_spaces-in_tree @@ -0,0 +1,83 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: a <- [0/0] {1} + ¦ ¦--expr: a [0/1] {3} + ¦ ¦ °--SYMBOL: a [0/0] {2} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {4} + ¦ °--expr: funct [0/0] {5} + ¦ ¦--FUNCTION: funct [0/0] {6} + ¦ ¦--'(': ( [0/0] {7} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {8} + ¦ ¦--',': , [0/1] {9} + ¦ ¦--COMMENT: # [0/0] {10} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {11} + ¦ ¦--')': ) [1/1] {12} + ¦ °--expr: { + x [0/0] {13} + ¦ ¦--'{': { [0/2] {14} + ¦ ¦--expr: x - 1 [1/0] {15} + ¦ ¦ ¦--expr: x [0/1] {17} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {16} + ¦ ¦ ¦--'-': - [0/1] {18} + ¦ ¦ °--expr: 1 [0/0] {20} + ¦ ¦ °--NUM_CONST: 1 [0/0] {19} + ¦ °--'}': } [1/0] {21} + ¦--expr: a <- [3/0] {22} + ¦ ¦--expr: a [0/1] {24} + ¦ ¦ °--SYMBOL: a [0/0] {23} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {25} + ¦ °--expr: funct [0/0] {26} + ¦ ¦--FUNCTION: funct [0/0] {27} + ¦ ¦--'(': ( [0/0] {28} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {29} + ¦ ¦--',': , [0/1] {30} + ¦ ¦--COMMENT: # [0/2] {31} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {32} + ¦ ¦--')': ) [0/1] {33} + ¦ ¦--COMMENT: # [0/0] {34} + ¦ °--expr: { + x [1/0] {35} + ¦ ¦--'{': { [0/2] {36} + ¦ ¦--expr: x [1/0] {38} + ¦ ¦ °--SYMBOL: x [0/0] {37} + ¦ °--'}': } [1/0] {39} + ¦--expr: funct [2/0] {40} + ¦ ¦--FUNCTION: funct [0/0] {41} + ¦ ¦--'(': ( [0/0] {42} + ¦ ¦--SYMBOL_FORMALS: a [0/1] {43} + ¦ ¦--EQ_FORMALS: = [0/11] {44} + ¦ ¦--expr: b [1/0] {46} + ¦ ¦ °--SYMBOL: b [0/0] {45} + ¦ ¦--',': , [0/9] {47} + ¦ ¦--SYMBOL_FORMALS: c [1/0] {48} + ¦ ¦--')': ) [0/1] {49} + ¦ °--expr: {} [0/0] {50} + ¦ ¦--'{': { [0/0] {51} + ¦ °--'}': } [0/0] {52} + ¦--expr: funct [2/0] {53} + ¦ ¦--FUNCTION: funct [0/0] {54} + ¦ ¦--'(': ( [0/0] {55} + ¦ ¦--SYMBOL_FORMALS: a [0/1] {56} + ¦ ¦--EQ_FORMALS: = [0/11] {57} + ¦ ¦--expr: b [1/0] {59} + ¦ ¦ °--SYMBOL: b [0/0] {58} + ¦ ¦--',': , [0/9] {60} + ¦ ¦--SYMBOL_FORMALS: c [1/0] {61} + ¦ ¦--')': ) [0/1] {62} + ¦ °--expr: { + +} [0/0] {63} + ¦ ¦--'{': { [0/0] {64} + ¦ °--'}': } [2/0] {65} + °--expr: funct [2/0] {66} + ¦--FUNCTION: funct [0/0] {67} + ¦--'(': ( [0/0] {68} + ¦--SYMBOL_FORMALS: a [0/1] {69} + ¦--EQ_FORMALS: = [0/11] {70} + ¦--expr: b [1/0] {72} + ¦ °--SYMBOL: b [0/0] {71} + ¦--',': , [0/9] {73} + ¦--SYMBOL_FORMALS: c [1/0] {74} + ¦--')': ) [1/1] {75} + °--expr: {} [0/0] {76} + ¦--'{': { [0/0] {77} + °--'}': } [0/0] {78} diff --git a/tests/testthat/fun_dec/fun_dec_scope_spaces-out.R b/tests/testthat/fun_dec/fun_dec_scope_spaces-out.R new file mode 100644 index 000000000..4b7ad4dc7 --- /dev/null +++ b/tests/testthat/fun_dec/fun_dec_scope_spaces-out.R @@ -0,0 +1,27 @@ +a <- function(x, # +y +) { + x - 1 +} + + +a <- function(x, # + y) # +{ + x +} + +function(a = + b, + c) {} + +function(a = + b, + c) { + +} + +function(a = + b, + c +) {} diff --git a/tests/testthat/fun_dec/line_break_fun_dec-in.R b/tests/testthat/fun_dec/line_break_fun_dec-in.R new file mode 100644 index 000000000..4bf36e4ac --- /dev/null +++ b/tests/testthat/fun_dec/line_break_fun_dec-in.R @@ -0,0 +1,48 @@ +a <- function(x, # + y) { + x - 1 +} + + +a <- function(x, # + y) # +{ + x +} + +a <- function(x, # + y # +) { + y +} + + +a <- function(x, + y) { + x - 1 +} + +a <- function(x, + # + y) { + x - 1 +} + +a <- function(x, + + y) { + x - 1 +} + + +a <- function( + x, + y) { + x - 1 +} + +a <- function( # + x, + y) { + x - 1 +} diff --git a/tests/testthat/fun_dec/line_break_fun_dec-in_tree b/tests/testthat/fun_dec/line_break_fun_dec-in_tree new file mode 100644 index 000000000..4c1378941 --- /dev/null +++ b/tests/testthat/fun_dec/line_break_fun_dec-in_tree @@ -0,0 +1,168 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: a <- [0/0] {1} + ¦ ¦--expr: a [0/1] {3} + ¦ ¦ °--SYMBOL: a [0/0] {2} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {4} + ¦ °--expr: funct [0/0] {5} + ¦ ¦--FUNCTION: funct [0/0] {6} + ¦ ¦--'(': ( [0/0] {7} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {8} + ¦ ¦--',': , [0/1] {9} + ¦ ¦--COMMENT: # [0/14] {10} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {11} + ¦ ¦--')': ) [0/1] {12} + ¦ °--expr: { + x [0/0] {13} + ¦ ¦--'{': { [0/2] {14} + ¦ ¦--expr: x - 1 [1/0] {15} + ¦ ¦ ¦--expr: x [0/1] {17} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {16} + ¦ ¦ ¦--'-': - [0/1] {18} + ¦ ¦ °--expr: 1 [0/0] {20} + ¦ ¦ °--NUM_CONST: 1 [0/0] {19} + ¦ °--'}': } [1/0] {21} + ¦--expr: a <- [3/0] {22} + ¦ ¦--expr: a [0/1] {24} + ¦ ¦ °--SYMBOL: a [0/0] {23} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {25} + ¦ °--expr: funct [0/0] {26} + ¦ ¦--FUNCTION: funct [0/0] {27} + ¦ ¦--'(': ( [0/0] {28} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {29} + ¦ ¦--',': , [0/1] {30} + ¦ ¦--COMMENT: # [0/14] {31} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {32} + ¦ ¦--')': ) [0/1] {33} + ¦ ¦--COMMENT: # [0/0] {34} + ¦ °--expr: { + x [1/0] {35} + ¦ ¦--'{': { [0/2] {36} + ¦ ¦--expr: x [1/0] {38} + ¦ ¦ °--SYMBOL: x [0/0] {37} + ¦ °--'}': } [1/0] {39} + ¦--expr: a <- [2/0] {40} + ¦ ¦--expr: a [0/1] {42} + ¦ ¦ °--SYMBOL: a [0/0] {41} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {43} + ¦ °--expr: funct [0/0] {44} + ¦ ¦--FUNCTION: funct [0/0] {45} + ¦ ¦--'(': ( [0/0] {46} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {47} + ¦ ¦--',': , [0/1] {48} + ¦ ¦--COMMENT: # [0/14] {49} + ¦ ¦--SYMBOL_FORMALS: y [1/1] {50} + ¦ ¦--COMMENT: # [0/0] {51} + ¦ ¦--')': ) [1/1] {52} + ¦ °--expr: { + y [0/0] {53} + ¦ ¦--'{': { [0/2] {54} + ¦ ¦--expr: y [1/0] {56} + ¦ ¦ °--SYMBOL: y [0/0] {55} + ¦ °--'}': } [1/0] {57} + ¦--expr: a <- [3/0] {58} + ¦ ¦--expr: a [0/1] {60} + ¦ ¦ °--SYMBOL: a [0/0] {59} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {61} + ¦ °--expr: funct [0/0] {62} + ¦ ¦--FUNCTION: funct [0/0] {63} + ¦ ¦--'(': ( [0/0] {64} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {65} + ¦ ¦--',': , [0/14] {66} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {67} + ¦ ¦--')': ) [0/1] {68} + ¦ °--expr: { + x [0/0] {69} + ¦ ¦--'{': { [0/2] {70} + ¦ ¦--expr: x - 1 [1/0] {71} + ¦ ¦ ¦--expr: x [0/1] {73} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {72} + ¦ ¦ ¦--'-': - [0/1] {74} + ¦ ¦ °--expr: 1 [0/0] {76} + ¦ ¦ °--NUM_CONST: 1 [0/0] {75} + ¦ °--'}': } [1/0] {77} + ¦--expr: a <- [2/0] {78} + ¦ ¦--expr: a [0/1] {80} + ¦ ¦ °--SYMBOL: a [0/0] {79} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {81} + ¦ °--expr: funct [0/0] {82} + ¦ ¦--FUNCTION: funct [0/0] {83} + ¦ ¦--'(': ( [0/0] {84} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {85} + ¦ ¦--',': , [0/14] {86} + ¦ ¦--COMMENT: # [1/14] {87} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {88} + ¦ ¦--')': ) [0/1] {89} + ¦ °--expr: { + x [0/0] {90} + ¦ ¦--'{': { [0/2] {91} + ¦ ¦--expr: x - 1 [1/0] {92} + ¦ ¦ ¦--expr: x [0/1] {94} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {93} + ¦ ¦ ¦--'-': - [0/1] {95} + ¦ ¦ °--expr: 1 [0/0] {97} + ¦ ¦ °--NUM_CONST: 1 [0/0] {96} + ¦ °--'}': } [1/0] {98} + ¦--expr: a <- [2/0] {99} + ¦ ¦--expr: a [0/1] {101} + ¦ ¦ °--SYMBOL: a [0/0] {100} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {102} + ¦ °--expr: funct [0/0] {103} + ¦ ¦--FUNCTION: funct [0/0] {104} + ¦ ¦--'(': ( [0/0] {105} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {106} + ¦ ¦--',': , [0/14] {107} + ¦ ¦--SYMBOL_FORMALS: y [2/0] {108} + ¦ ¦--')': ) [0/1] {109} + ¦ °--expr: { + x [0/0] {110} + ¦ ¦--'{': { [0/2] {111} + ¦ ¦--expr: x - 1 [1/0] {112} + ¦ ¦ ¦--expr: x [0/1] {114} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {113} + ¦ ¦ ¦--'-': - [0/1] {115} + ¦ ¦ °--expr: 1 [0/0] {117} + ¦ ¦ °--NUM_CONST: 1 [0/0] {116} + ¦ °--'}': } [1/0] {118} + ¦--expr: a <- [3/0] {119} + ¦ ¦--expr: a [0/1] {121} + ¦ ¦ °--SYMBOL: a [0/0] {120} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {122} + ¦ °--expr: funct [0/0] {123} + ¦ ¦--FUNCTION: funct [0/0] {124} + ¦ ¦--'(': ( [0/14] {125} + ¦ ¦--SYMBOL_FORMALS: x [1/0] {126} + ¦ ¦--',': , [0/14] {127} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {128} + ¦ ¦--')': ) [0/1] {129} + ¦ °--expr: { + x [0/0] {130} + ¦ ¦--'{': { [0/2] {131} + ¦ ¦--expr: x - 1 [1/0] {132} + ¦ ¦ ¦--expr: x [0/1] {134} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {133} + ¦ ¦ ¦--'-': - [0/1] {135} + ¦ ¦ °--expr: 1 [0/0] {137} + ¦ ¦ °--NUM_CONST: 1 [0/0] {136} + ¦ °--'}': } [1/0] {138} + °--expr: a <- [2/0] {139} + ¦--expr: a [0/1] {141} + ¦ °--SYMBOL: a [0/0] {140} + ¦--LEFT_ASSIGN: <- [0/1] {142} + °--expr: funct [0/0] {143} + ¦--FUNCTION: funct [0/0] {144} + ¦--'(': ( [0/1] {145} + ¦--COMMENT: # [0/2] {146} + ¦--SYMBOL_FORMALS: x [1/0] {147} + ¦--',': , [0/2] {148} + ¦--SYMBOL_FORMALS: y [1/0] {149} + ¦--')': ) [0/1] {150} + °--expr: { + x [0/0] {151} + ¦--'{': { [0/2] {152} + ¦--expr: x - 1 [1/0] {153} + ¦ ¦--expr: x [0/1] {155} + ¦ ¦ °--SYMBOL: x [0/0] {154} + ¦ ¦--'-': - [0/1] {156} + ¦ °--expr: 1 [0/0] {158} + ¦ °--NUM_CONST: 1 [0/0] {157} + °--'}': } [1/0] {159} diff --git a/tests/testthat/fun_dec/line_break_fun_dec-out.R b/tests/testthat/fun_dec/line_break_fun_dec-out.R new file mode 100644 index 000000000..f9d494561 --- /dev/null +++ b/tests/testthat/fun_dec/line_break_fun_dec-out.R @@ -0,0 +1,47 @@ +a <- function(x, # + y) { + x - 1 +} + + +a <- function(x, # + y) # +{ + x +} + +a <- function(x, # + y # +) { + y +} + + +a <- function(x, + y) { + x - 1 +} + +a <- function(x, + # + y) { + x - 1 +} + +a <- function(x, + y) { + x - 1 +} + + +a <- function(x, + y) { + x - 1 +} + +a <- function( + # + x, + y) { + x - 1 +} diff --git a/tests/testthat/helper-viridis.R b/tests/testthat/helper-viridis.R deleted file mode 100644 index 4d3108687..000000000 --- a/tests/testthat/helper-viridis.R +++ /dev/null @@ -1 +0,0 @@ -suppressWarnings(requireNamespace("DiagrammeR")) diff --git a/tests/testthat/helpers-devel-options.R b/tests/testthat/helpers-devel-options.R new file mode 100644 index 000000000..142f51be3 --- /dev/null +++ b/tests/testthat/helpers-devel-options.R @@ -0,0 +1,4 @@ +cat("In tests/testthat/helpers-devel-options: ") +cache_deactivate() + +styler_version <- utils::packageDescription("styler", fields = "Version") diff --git a/tests/testthat/indention_curly_brackets/custom-in.R b/tests/testthat/indention_curly_brackets/custom-in.R new file mode 100644 index 000000000..40156b509 --- /dev/null +++ b/tests/testthat/indention_curly_brackets/custom-in.R @@ -0,0 +1,10 @@ + +value <- 5 +if (value > 0) + print(value) + + + +if (value > 0) { +print(value) +} diff --git a/tests/testthat/indention_curly_brackets/custom-in_tree b/tests/testthat/indention_curly_brackets/custom-in_tree new file mode 100644 index 000000000..454cd1522 --- /dev/null +++ b/tests/testthat/indention_curly_brackets/custom-in_tree @@ -0,0 +1,45 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: value [0/0] {1} + ¦ ¦--expr: value [0/1] {3} + ¦ ¦ °--SYMBOL: value [0/0] {2} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {4} + ¦ °--expr: 5 [0/0] {6} + ¦ °--NUM_CONST: 5 [0/0] {5} + ¦--expr: if (v [1/0] {7} + ¦ ¦--IF: if [0/1] {8} + ¦ ¦--'(': ( [0/0] {9} + ¦ ¦--expr: value [0/0] {10} + ¦ ¦ ¦--expr: value [0/1] {12} + ¦ ¦ ¦ °--SYMBOL: value [0/0] {11} + ¦ ¦ ¦--GT: > [0/1] {13} + ¦ ¦ °--expr: 0 [0/0] {15} + ¦ ¦ °--NUM_CONST: 0 [0/0] {14} + ¦ ¦--')': ) [0/4] {16} + ¦ °--expr: print [1/0] {17} + ¦ ¦--expr: print [0/0] {19} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: print [0/0] {18} + ¦ ¦--'(': ( [0/0] {20} + ¦ ¦--expr: value [0/0] {22} + ¦ ¦ °--SYMBOL: value [0/0] {21} + ¦ °--')': ) [0/0] {23} + °--expr: if (v [4/0] {24} + ¦--IF: if [0/1] {25} + ¦--'(': ( [0/0] {26} + ¦--expr: value [0/0] {27} + ¦ ¦--expr: value [0/1] {29} + ¦ ¦ °--SYMBOL: value [0/0] {28} + ¦ ¦--GT: > [0/1] {30} + ¦ °--expr: 0 [0/0] {32} + ¦ °--NUM_CONST: 0 [0/0] {31} + ¦--')': ) [0/1] {33} + °--expr: { +pri [0/0] {34} + ¦--'{': { [0/0] {35} + ¦--expr: print [1/0] {36} + ¦ ¦--expr: print [0/0] {38} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: print [0/0] {37} + ¦ ¦--'(': ( [0/0] {39} + ¦ ¦--expr: value [0/0] {41} + ¦ ¦ °--SYMBOL: value [0/0] {40} + ¦ °--')': ) [0/0] {42} + °--'}': } [1/0] {43} diff --git a/tests/testthat/indention_curly_brackets/custom-out.R b/tests/testthat/indention_curly_brackets/custom-out.R new file mode 100644 index 000000000..f2612d575 --- /dev/null +++ b/tests/testthat/indention_curly_brackets/custom-out.R @@ -0,0 +1,10 @@ +value <- 5 +if (value > 0) { + print(value) +} + + + +if (value > 0) { + print(value) +} diff --git a/tests/testthat/indention_curly_brackets/multi_line_curly_only-in_tree b/tests/testthat/indention_curly_brackets/multi_line_curly_only-in_tree index 687eb554a..1205a1a3f 100644 --- a/tests/testthat/indention_curly_brackets/multi_line_curly_only-in_tree +++ b/tests/testthat/indention_curly_brackets/multi_line_curly_only-in_tree @@ -1,26 +1,27 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} + °--expr: { + [0/0] {1} ¦--'{': { [0/9] {2} - ¦--expr: [1/0] {3} + ¦--expr: {1 + [1/0] {3} ¦ ¦--'{': { [0/0] {4} - ¦ ¦--expr: [0/0] {5} - ¦ ¦ ¦--expr: [0/1] {7} + ¦ ¦--expr: 1 + 3 [0/0] {5} + ¦ ¦ ¦--expr: 1 [0/1] {7} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {6} ¦ ¦ ¦--'+': + [0/1] {8} - ¦ ¦ °--expr: [0/0] {10} + ¦ ¦ °--expr: 3 [0/0] {10} ¦ ¦ °--NUM_CONST: 3 [0/0] {9} ¦ °--'}': } [0/0] {11} - ¦--expr: [1/6] {12} + ¦--expr: {2 + [1/6] {12} ¦ ¦--'{': { [0/0] {13} - ¦ ¦--expr: [0/0] {14} - ¦ ¦ ¦--expr: [0/1] {16} + ¦ ¦--expr: 2 + s [0/0] {14} + ¦ ¦ ¦--expr: 2 [0/1] {16} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {15} ¦ ¦ ¦--'+': + [0/1] {17} - ¦ ¦ °--expr: [0/0] {18} - ¦ ¦ ¦--expr: [0/0] {20} + ¦ ¦ °--expr: sin(p [0/0] {18} + ¦ ¦ ¦--expr: sin [0/0] {20} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: sin [0/0] {19} ¦ ¦ ¦--'(': ( [0/0] {21} - ¦ ¦ ¦--expr: [0/0] {23} + ¦ ¦ ¦--expr: pi [0/0] {23} ¦ ¦ ¦ °--SYMBOL: pi [0/0] {22} ¦ ¦ °--')': ) [0/0] {24} ¦ °--'}': } [0/0] {25} diff --git a/tests/testthat/indention_curly_brackets/multi_line_curly_round_only-in_tree b/tests/testthat/indention_curly_brackets/multi_line_curly_round_only-in_tree index ba8432bc1..8c1eda02c 100644 --- a/tests/testthat/indention_curly_brackets/multi_line_curly_round_only-in_tree +++ b/tests/testthat/indention_curly_brackets/multi_line_curly_round_only-in_tree @@ -1,58 +1,61 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/1] {3} + °--expr: a <- [0/0] {1} + ¦--expr: a [0/1] {3} ¦ °--SYMBOL: a [0/0] {2} ¦--LEFT_ASSIGN: <- [0/1] {4} - °--expr: [0/0] {5} + °--expr: funct [0/0] {5} ¦--FUNCTION: funct [0/0] {6} ¦--'(': ( [0/0] {7} ¦--SYMBOL_FORMALS: x [0/0] {8} ¦--')': ) [0/1] {9} - °--expr: [0/0] {10} + °--expr: { +x < [0/0] {10} ¦--'{': { [0/0] {11} - ¦--expr: [1/0] {12} - ¦ ¦--expr: [0/1] {14} + ¦--expr: x <- [1/0] {12} + ¦ ¦--expr: x [0/1] {14} ¦ ¦ °--SYMBOL: x [0/0] {13} ¦ ¦--LEFT_ASSIGN: <- [0/1] {15} - ¦ °--expr: [0/0] {16} - ¦ ¦--expr: [0/0] {18} + ¦ °--expr: c(1, + [0/0] {16} + ¦ ¦--expr: c [0/0] {18} ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {17} ¦ ¦--'(': ( [0/0] {19} - ¦ ¦--expr: [0/0] {21} + ¦ ¦--expr: 1 [0/0] {21} ¦ ¦ °--NUM_CONST: 1 [0/0] {20} ¦ ¦--',': , [0/7] {22} - ¦ ¦--expr: [1/0] {23} - ¦ ¦ ¦--expr: [0/1] {25} + ¦ ¦--expr: 2 + 3 [1/0] {23} + ¦ ¦ ¦--expr: 2 [0/1] {25} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {24} ¦ ¦ ¦--'+': + [0/1] {26} - ¦ ¦ °--expr: [0/0] {28} + ¦ ¦ °--expr: 3 [0/0] {28} ¦ ¦ °--NUM_CONST: 3 [0/0] {27} ¦ ¦--',': , [0/0] {29} - ¦ ¦--expr: [1/0] {30} - ¦ ¦ ¦--expr: [0/0] {32} + ¦ ¦--expr: sin(p [1/0] {30} + ¦ ¦ ¦--expr: sin [0/0] {32} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: sin [0/0] {31} ¦ ¦ ¦--'(': ( [0/0] {33} - ¦ ¦ ¦--expr: [0/0] {35} + ¦ ¦ ¦--expr: pi [0/0] {35} ¦ ¦ ¦ °--SYMBOL: pi [0/0] {34} ¦ ¦ °--')': ) [0/0] {36} ¦ °--')': ) [0/0] {37} - ¦--expr: [2/8] {38} + ¦--expr: if(x [2/8] {38} ¦ ¦--IF: if [0/0] {39} ¦ ¦--'(': ( [0/0] {40} - ¦ ¦--expr: [0/0] {41} - ¦ ¦ ¦--expr: [0/1] {43} + ¦ ¦--expr: x > 1 [0/0] {41} + ¦ ¦ ¦--expr: x [0/1] {43} ¦ ¦ ¦ °--SYMBOL: x [0/0] {42} ¦ ¦ ¦--GT: > [0/1] {44} - ¦ ¦ °--expr: [0/0] {46} + ¦ ¦ °--expr: 10 [0/0] {46} ¦ ¦ °--NUM_CONST: 10 [0/0] {45} ¦ ¦--')': ) [0/1] {47} - ¦ °--expr: [0/0] {48} + ¦ °--expr: { + [0/0] {48} ¦ ¦--'{': { [0/4] {49} - ¦ ¦--expr: [1/16] {50} - ¦ ¦ ¦--expr: [0/0] {52} + ¦ ¦--expr: retur [1/16] {50} + ¦ ¦ ¦--expr: retur [0/0] {52} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: retur [0/0] {51} ¦ ¦ ¦--'(': ( [0/0] {53} - ¦ ¦ ¦--expr: [0/0] {55} + ¦ ¦ ¦--expr: "done [0/0] {55} ¦ ¦ ¦ °--STR_CONST: "done [0/0] {54} ¦ ¦ °--')': ) [0/0] {56} ¦ °--'}': } [1/0] {57} diff --git a/tests/testthat/indention_curly_brackets/multi_line_curly_round_spacing-in_tree b/tests/testthat/indention_curly_brackets/multi_line_curly_round_spacing-in_tree index 45cc88db3..67285abef 100644 --- a/tests/testthat/indention_curly_brackets/multi_line_curly_round_spacing-in_tree +++ b/tests/testthat/indention_curly_brackets/multi_line_curly_round_spacing-in_tree @@ -1,58 +1,61 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/0] {3} + °--expr: b<-fu [0/0] {1} + ¦--expr: b [0/0] {3} ¦ °--SYMBOL: b [0/0] {2} ¦--LEFT_ASSIGN: <- [0/0] {4} - °--expr: [0/0] {5} + °--expr: funct [0/0] {5} ¦--FUNCTION: funct [0/0] {6} ¦--'(': ( [0/0] {7} ¦--SYMBOL_FORMALS: x [0/3] {8} ¦--')': ) [0/0] {9} - °--expr: [0/0] {10} + °--expr: { + x [0/0] {10} ¦--'{': { [0/2] {11} - ¦--expr: [1/2] {12} - ¦ ¦--expr: [0/1] {14} + ¦--expr: x <- [1/2] {12} + ¦ ¦--expr: x [0/1] {14} ¦ ¦ °--SYMBOL: x [0/0] {13} ¦ ¦--LEFT_ASSIGN: <- [0/1] {15} - ¦ °--expr: [0/0] {16} - ¦ ¦--expr: [0/0] {18} + ¦ °--expr: c(1, + [0/0] {16} + ¦ ¦--expr: c [0/0] {18} ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {17} ¦ ¦--'(': ( [0/0] {19} - ¦ ¦--expr: [0/0] {21} + ¦ ¦--expr: 1 [0/0] {21} ¦ ¦ °--NUM_CONST: 1 [0/0] {20} ¦ ¦--',': , [0/19] {22} - ¦ ¦--expr: [1/0] {23} - ¦ ¦ ¦--expr: [0/0] {25} + ¦ ¦--expr: 2+ [1/0] {23} + ¦ ¦ ¦--expr: 2 [0/0] {25} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {24} ¦ ¦ ¦--'+': + [0/5] {26} - ¦ ¦ °--expr: [0/0] {28} + ¦ ¦ °--expr: 3 [0/0] {28} ¦ ¦ °--NUM_CONST: 3 [0/0] {27} ¦ ¦--',': , [0/9] {29} - ¦ ¦--expr: [1/1] {30} - ¦ ¦ ¦--expr: [0/0] {32} + ¦ ¦--expr: sin(p [1/1] {30} + ¦ ¦ ¦--expr: sin [0/0] {32} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: sin [0/0] {31} ¦ ¦ ¦--'(': ( [0/0] {33} - ¦ ¦ ¦--expr: [0/0] {35} + ¦ ¦ ¦--expr: pi [0/0] {35} ¦ ¦ ¦ °--SYMBOL: pi [0/0] {34} ¦ ¦ °--')': ) [0/0] {36} ¦ °--')': ) [0/0] {37} - ¦--expr: [2/20] {38} + ¦--expr: if(x [2/20] {38} ¦ ¦--IF: if [0/0] {39} ¦ ¦--'(': ( [0/0] {40} - ¦ ¦--expr: [0/0] {41} - ¦ ¦ ¦--expr: [0/1] {43} + ¦ ¦--expr: x > 1 [0/0] {41} + ¦ ¦ ¦--expr: x [0/1] {43} ¦ ¦ ¦ °--SYMBOL: x [0/0] {42} ¦ ¦ ¦--GT: > [0/1] {44} - ¦ ¦ °--expr: [0/0] {46} + ¦ ¦ °--expr: 10 [0/0] {46} ¦ ¦ °--NUM_CONST: 10 [0/0] {45} ¦ ¦--')': ) [0/0] {47} - ¦ °--expr: [0/0] {48} + ¦ °--expr: { +ret [0/0] {48} ¦ ¦--'{': { [0/0] {49} - ¦ ¦--expr: [1/2] {50} - ¦ ¦ ¦--expr: [0/0] {52} + ¦ ¦--expr: retur [1/2] {50} + ¦ ¦ ¦--expr: retur [0/0] {52} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: retur [0/0] {51} ¦ ¦ ¦--'(': ( [0/0] {53} - ¦ ¦ ¦--expr: [0/0] {55} + ¦ ¦ ¦--expr: "done [0/0] {55} ¦ ¦ ¦ °--STR_CONST: "done [0/0] {54} ¦ ¦ °--')': ) [0/0] {56} ¦ °--'}': } [1/0] {57} diff --git a/tests/testthat/indention_curly_brackets/multi_line_curly_while_for_if_fun-in_tree b/tests/testthat/indention_curly_brackets/multi_line_curly_while_for_if_fun-in_tree index 3a62a3ecc..99ff47e9c 100644 --- a/tests/testthat/indention_curly_brackets/multi_line_curly_while_for_if_fun-in_tree +++ b/tests/testthat/indention_curly_brackets/multi_line_curly_while_for_if_fun-in_tree @@ -1,9 +1,9 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/1] {3} + °--expr: a <- [0/0] {1} + ¦--expr: a [0/1] {3} ¦ °--SYMBOL: a [0/0] {2} ¦--LEFT_ASSIGN: <- [0/1] {4} - °--expr: [0/0] {5} + °--expr: funct [0/0] {5} ¦--FUNCTION: funct [0/0] {6} ¦--'(': ( [0/0] {7} ¦--SYMBOL_FORMALS: x [0/0] {8} @@ -12,74 +12,78 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦--',': , [0/1] {11} ¦--SYMBOL_FORMALS: z [0/0] {12} ¦--')': ) [0/12] {13} - °--expr: [0/0] {14} + °--expr: { + w [0/0] {14} ¦--'{': { [0/2] {15} - ¦--expr: [1/6] {16} + ¦--expr: while [1/6] {16} ¦ ¦--WHILE: while [0/0] {17} ¦ ¦--'(': ( [0/0] {18} - ¦ ¦--expr: [0/0] {19} - ¦ ¦ ¦--expr: [0/0] {20} - ¦ ¦ ¦ ¦--expr: [0/0] {22} + ¦ ¦--expr: 2+2> [0/0] {19} + ¦ ¦ ¦--expr: 2+2 [0/0] {20} + ¦ ¦ ¦ ¦--expr: 2 [0/0] {22} ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {21} ¦ ¦ ¦ ¦--'+': + [0/0] {23} - ¦ ¦ ¦ °--expr: [0/0] {25} + ¦ ¦ ¦ °--expr: 2 [0/0] {25} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {24} ¦ ¦ ¦--GT: > [0/1] {26} - ¦ ¦ °--expr: [0/0] {27} - ¦ ¦ ¦--expr: [0/0] {29} + ¦ ¦ °--expr: call( [0/0] {27} + ¦ ¦ ¦--expr: call [0/0] {29} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {28} ¦ ¦ ¦--'(': ( [0/0] {30} - ¦ ¦ ¦--expr: [0/0] {32} + ¦ ¦ ¦--expr: 3 [0/0] {32} ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {31} ¦ ¦ ¦--',': , [0/0] {33} - ¦ ¦ ¦--expr: [0/0] {35} + ¦ ¦ ¦--expr: 1 [0/0] {35} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {34} ¦ ¦ °--')': ) [0/0] {36} ¦ ¦--')': ) [0/1] {37} - ¦ °--expr: [0/0] {38} + ¦ °--expr: { + [0/0] {38} ¦ ¦--'{': { [0/4] {39} - ¦ ¦--expr: [1/4] {40} + ¦ ¦--expr: if (i [1/4] {40} ¦ ¦ ¦--IF: if [0/1] {41} ¦ ¦ ¦--'(': ( [0/0] {42} - ¦ ¦ ¦--expr: [0/0] {43} - ¦ ¦ ¦ ¦--expr: [0/0] {45} + ¦ ¦ ¦--expr: isTRU [0/0] {43} + ¦ ¦ ¦ ¦--expr: isTRU [0/0] {45} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: isTRU [0/0] {44} ¦ ¦ ¦ ¦--'(': ( [0/0] {46} - ¦ ¦ ¦ ¦--expr: [0/0] {48} + ¦ ¦ ¦ ¦--expr: x [0/0] {48} ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {47} ¦ ¦ ¦ °--')': ) [0/0] {49} ¦ ¦ ¦--')': ) [0/1] {50} - ¦ ¦ °--expr: [0/0] {51} + ¦ ¦ °--expr: { + [0/0] {51} ¦ ¦ ¦--'{': { [0/6] {52} - ¦ ¦ ¦--expr: [1/0] {54} + ¦ ¦ ¦--expr: b [1/0] {54} ¦ ¦ ¦ °--SYMBOL: b [0/0] {53} ¦ ¦ °--'}': } [1/0] {55} ¦ °--'}': } [1/0] {56} - ¦--expr: [1/0] {57} + ¦--expr: for(a [1/0] {57} ¦ ¦--FOR: for [0/0] {58} - ¦ ¦--forcond: [0/0] {59} + ¦ ¦--forcond: (a in [0/0] {59} ¦ ¦ ¦--'(': ( [0/0] {60} ¦ ¦ ¦--SYMBOL: a [0/1] {61} ¦ ¦ ¦--IN: in [0/1] {62} - ¦ ¦ ¦--expr: [0/0] {63} - ¦ ¦ ¦ ¦--expr: [0/0] {65} + ¦ ¦ ¦--expr: 1:19 [0/0] {63} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {65} ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {64} ¦ ¦ ¦ ¦--':': : [0/0] {66} - ¦ ¦ ¦ °--expr: [0/0] {68} + ¦ ¦ ¦ °--expr: 19 [0/0] {68} ¦ ¦ ¦ °--NUM_CONST: 19 [0/0] {67} ¦ ¦ °--')': ) [0/0] {69} - ¦ °--expr: [0/0] {70} + ¦ °--expr: { + [0/0] {70} ¦ ¦--'{': { [0/4] {71} - ¦ ¦--expr: [1/2] {72} - ¦ ¦ ¦--expr: [0/1] {73} - ¦ ¦ ¦ ¦--expr: [0/0] {75} + ¦ ¦--expr: x[i] [1/2] {72} + ¦ ¦ ¦--expr: x[i] [0/1] {73} + ¦ ¦ ¦ ¦--expr: x [0/0] {75} ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {74} ¦ ¦ ¦ ¦--'[': [ [0/0] {76} - ¦ ¦ ¦ ¦--expr: [0/0] {78} + ¦ ¦ ¦ ¦--expr: i [0/0] {78} ¦ ¦ ¦ ¦ °--SYMBOL: i [0/0] {77} ¦ ¦ ¦ °--']': ] [0/0] {79} ¦ ¦ ¦--'+': + [0/0] {80} - ¦ ¦ °--expr: [0/0] {82} + ¦ ¦ °--expr: 1 [0/0] {82} ¦ ¦ °--NUM_CONST: 1 [0/0] {81} ¦ °--'}': } [1/0] {83} °--'}': } [1/0] {84} diff --git a/tests/testthat/indention_curly_brackets/one_line_curly-in_tree b/tests/testthat/indention_curly_brackets/one_line_curly-in_tree index 7e2dba564..20ecfb8ac 100644 --- a/tests/testthat/indention_curly_brackets/one_line_curly-in_tree +++ b/tests/testthat/indention_curly_brackets/one_line_curly-in_tree @@ -1,14 +1,14 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/1] {3} + °--expr: a <- [0/0] {1} + ¦--expr: a [0/1] {3} ¦ °--SYMBOL: a [0/0] {2} ¦--LEFT_ASSIGN: <- [0/1] {4} - °--expr: [0/0] {5} + °--expr: {1+1} [0/0] {5} ¦--'{': { [0/0] {6} - ¦--expr: [0/0] {7} - ¦ ¦--expr: [0/0] {9} + ¦--expr: 1+1 [0/0] {7} + ¦ ¦--expr: 1 [0/0] {9} ¦ ¦ °--NUM_CONST: 1 [0/0] {8} ¦ ¦--'+': + [0/0] {10} - ¦ °--expr: [0/0] {12} + ¦ °--expr: 1 [0/0] {12} ¦ °--NUM_CONST: 1 [0/0] {11} °--'}': } [0/0] {13} diff --git a/tests/testthat/indention_fun_calls/non_strict_calls-in.R b/tests/testthat/indention_fun_calls/non_strict_calls-in.R new file mode 100644 index 000000000..073dbc9e7 --- /dev/null +++ b/tests/testthat/indention_fun_calls/non_strict_calls-in.R @@ -0,0 +1,37 @@ +call(a, + b) + +call(a, + b = 3) + +call(a = 1, b = + 3) + +call(a = + 1, b = 3) + +call(a = 1, + b = 3 +) + +call( + a = 1, + b = 3 +) + +call( + a = + 1, + b = 3 +) + +call( + a = + 1, b = 3 +) + +call( + a = + 1, b = + 3 +) diff --git a/tests/testthat/indention_fun_calls/non_strict_calls-in_tree b/tests/testthat/indention_fun_calls/non_strict_calls-in_tree new file mode 100644 index 000000000..1d50cd650 --- /dev/null +++ b/tests/testthat/indention_fun_calls/non_strict_calls-in_tree @@ -0,0 +1,121 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: call( [0/0] {1} + ¦ ¦--expr: call [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: a [0/0] {6} + ¦ ¦ °--SYMBOL: a [0/0] {5} + ¦ ¦--',': , [0/5] {7} + ¦ ¦--expr: b [1/0] {9} + ¦ ¦ °--SYMBOL: b [0/0] {8} + ¦ °--')': ) [0/0] {10} + ¦--expr: call( [2/0] {11} + ¦ ¦--expr: call [0/0] {13} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {12} + ¦ ¦--'(': ( [0/0] {14} + ¦ ¦--expr: a [0/0] {16} + ¦ ¦ °--SYMBOL: a [0/0] {15} + ¦ ¦--',': , [0/5] {17} + ¦ ¦--SYMBOL_SUB: b [1/1] {18} + ¦ ¦--EQ_SUB: = [0/1] {19} + ¦ ¦--expr: 3 [0/0] {21} + ¦ ¦ °--NUM_CONST: 3 [0/0] {20} + ¦ °--')': ) [0/0] {22} + ¦--expr: call( [2/0] {23} + ¦ ¦--expr: call [0/0] {25} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {24} + ¦ ¦--'(': ( [0/0] {26} + ¦ ¦--SYMBOL_SUB: a [0/1] {27} + ¦ ¦--EQ_SUB: = [0/1] {28} + ¦ ¦--expr: 1 [0/0] {30} + ¦ ¦ °--NUM_CONST: 1 [0/0] {29} + ¦ ¦--',': , [0/1] {31} + ¦ ¦--SYMBOL_SUB: b [0/1] {32} + ¦ ¦--EQ_SUB: = [0/7] {33} + ¦ ¦--expr: 3 [1/0] {35} + ¦ ¦ °--NUM_CONST: 3 [0/0] {34} + ¦ °--')': ) [0/0] {36} + ¦--expr: call( [2/0] {37} + ¦ ¦--expr: call [0/0] {39} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {38} + ¦ ¦--'(': ( [0/0] {40} + ¦ ¦--SYMBOL_SUB: a [0/1] {41} + ¦ ¦--EQ_SUB: = [0/7] {42} + ¦ ¦--expr: 1 [1/0] {44} + ¦ ¦ °--NUM_CONST: 1 [0/0] {43} + ¦ ¦--',': , [0/1] {45} + ¦ ¦--SYMBOL_SUB: b [0/1] {46} + ¦ ¦--EQ_SUB: = [0/1] {47} + ¦ ¦--expr: 3 [0/0] {49} + ¦ ¦ °--NUM_CONST: 3 [0/0] {48} + ¦ °--')': ) [0/0] {50} + ¦--expr: call( [2/0] {51} + ¦ ¦--expr: call [0/0] {53} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {52} + ¦ ¦--'(': ( [0/0] {54} + ¦ ¦--SYMBOL_SUB: a [0/1] {55} + ¦ ¦--EQ_SUB: = [0/1] {56} + ¦ ¦--expr: 1 [0/0] {58} + ¦ ¦ °--NUM_CONST: 1 [0/0] {57} + ¦ ¦--',': , [0/2] {59} + ¦ ¦--SYMBOL_SUB: b [1/1] {60} + ¦ ¦--EQ_SUB: = [0/1] {61} + ¦ ¦--expr: 3 [0/0] {63} + ¦ ¦ °--NUM_CONST: 3 [0/0] {62} + ¦ °--')': ) [1/0] {64} + ¦--expr: call( [2/0] {65} + ¦ ¦--expr: call [0/0] {67} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {66} + ¦ ¦--'(': ( [0/2] {68} + ¦ ¦--SYMBOL_SUB: a [1/1] {69} + ¦ ¦--EQ_SUB: = [0/1] {70} + ¦ ¦--expr: 1 [0/0] {72} + ¦ ¦ °--NUM_CONST: 1 [0/0] {71} + ¦ ¦--',': , [0/2] {73} + ¦ ¦--SYMBOL_SUB: b [1/1] {74} + ¦ ¦--EQ_SUB: = [0/1] {75} + ¦ ¦--expr: 3 [0/0] {77} + ¦ ¦ °--NUM_CONST: 3 [0/0] {76} + ¦ °--')': ) [1/0] {78} + ¦--expr: call( [2/0] {79} + ¦ ¦--expr: call [0/0] {81} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {80} + ¦ ¦--'(': ( [0/2] {82} + ¦ ¦--SYMBOL_SUB: a [1/1] {83} + ¦ ¦--EQ_SUB: = [0/4] {84} + ¦ ¦--expr: 1 [1/0] {86} + ¦ ¦ °--NUM_CONST: 1 [0/0] {85} + ¦ ¦--',': , [0/2] {87} + ¦ ¦--SYMBOL_SUB: b [1/1] {88} + ¦ ¦--EQ_SUB: = [0/1] {89} + ¦ ¦--expr: 3 [0/0] {91} + ¦ ¦ °--NUM_CONST: 3 [0/0] {90} + ¦ °--')': ) [1/0] {92} + ¦--expr: call( [2/0] {93} + ¦ ¦--expr: call [0/0] {95} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {94} + ¦ ¦--'(': ( [0/2] {96} + ¦ ¦--SYMBOL_SUB: a [1/1] {97} + ¦ ¦--EQ_SUB: = [0/4] {98} + ¦ ¦--expr: 1 [1/0] {100} + ¦ ¦ °--NUM_CONST: 1 [0/0] {99} + ¦ ¦--',': , [0/1] {101} + ¦ ¦--SYMBOL_SUB: b [0/1] {102} + ¦ ¦--EQ_SUB: = [0/1] {103} + ¦ ¦--expr: 3 [0/0] {105} + ¦ ¦ °--NUM_CONST: 3 [0/0] {104} + ¦ °--')': ) [1/0] {106} + °--expr: call( [2/0] {107} + ¦--expr: call [0/0] {109} + ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {108} + ¦--'(': ( [0/2] {110} + ¦--SYMBOL_SUB: a [1/1] {111} + ¦--EQ_SUB: = [0/4] {112} + ¦--expr: 1 [1/0] {114} + ¦ °--NUM_CONST: 1 [0/0] {113} + ¦--',': , [0/1] {115} + ¦--SYMBOL_SUB: b [0/1] {116} + ¦--EQ_SUB: = [0/4] {117} + ¦--expr: 3 [1/0] {119} + ¦ °--NUM_CONST: 3 [0/0] {118} + °--')': ) [1/0] {120} diff --git a/tests/testthat/indention_fun_calls/non_strict_calls-out.R b/tests/testthat/indention_fun_calls/non_strict_calls-out.R new file mode 100644 index 000000000..3db1f68f7 --- /dev/null +++ b/tests/testthat/indention_fun_calls/non_strict_calls-out.R @@ -0,0 +1,37 @@ +call(a, + b) + +call(a, + b = 3) + +call(a = 1, b = + 3) + +call(a = + 1, b = 3) + +call(a = 1, + b = 3 +) + +call( + a = 1, + b = 3 +) + +call( + a = + 1, + b = 3 +) + +call( + a = + 1, b = 3 +) + +call( + a = + 1, b = + 3 +) diff --git a/tests/testthat/indention_multiple/edge_round_separate-in.R b/tests/testthat/indention_fun_calls/strict_calls-in.R similarity index 100% rename from tests/testthat/indention_multiple/edge_round_separate-in.R rename to tests/testthat/indention_fun_calls/strict_calls-in.R diff --git a/tests/testthat/indention_multiple/edge_round_separate-in_tree b/tests/testthat/indention_fun_calls/strict_calls-in_tree similarity index 63% rename from tests/testthat/indention_multiple/edge_round_separate-in_tree rename to tests/testthat/indention_fun_calls/strict_calls-in_tree index 6c2575f20..36a0c9a05 100644 --- a/tests/testthat/indention_multiple/edge_round_separate-in_tree +++ b/tests/testthat/indention_fun_calls/strict_calls-in_tree @@ -1,25 +1,25 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/0] {3} + °--expr: call( [0/0] {1} + ¦--expr: call [0/0] {3} ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} ¦--'(': ( [0/0] {4} - ¦--expr: [1/3] {5} - ¦ ¦--expr: [0/0] {7} + ¦--expr: call( [1/3] {5} + ¦ ¦--expr: call [0/0] {7} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {6} ¦ ¦--'(': ( [0/3] {8} - ¦ ¦--expr: [1/0] {9} - ¦ ¦ ¦--expr: [0/0] {11} + ¦ ¦--expr: call( [1/0] {9} + ¦ ¦ ¦--expr: call [0/0] {11} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {10} ¦ ¦ ¦--'(': ( [0/0] {12} - ¦ ¦ ¦--expr: [1/3] {13} - ¦ ¦ ¦ ¦--expr: [0/0] {15} + ¦ ¦ ¦--expr: call( [1/3] {13} + ¦ ¦ ¦ ¦--expr: call [0/0] {15} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {14} ¦ ¦ ¦ ¦--'(': ( [0/10] {16} - ¦ ¦ ¦ ¦--expr: [1/0] {17} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {19} + ¦ ¦ ¦ ¦--expr: call( [1/0] {17} + ¦ ¦ ¦ ¦ ¦--expr: call [0/0] {19} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {18} ¦ ¦ ¦ ¦ ¦--'(': ( [0/5] {20} - ¦ ¦ ¦ ¦ ¦--expr: [1/3] {22} + ¦ ¦ ¦ ¦ ¦--expr: 2 [1/3] {22} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {21} ¦ ¦ ¦ ¦ °--')': ) [1/0] {23} ¦ ¦ ¦ °--')': ) [1/0] {24} diff --git a/tests/testthat/indention_multiple/edge_round_separate-out.R b/tests/testthat/indention_fun_calls/strict_calls-out.R similarity index 100% rename from tests/testthat/indention_multiple/edge_round_separate-out.R rename to tests/testthat/indention_fun_calls/strict_calls-out.R diff --git a/tests/testthat/indention_multiple/curly_and_round-in_tree b/tests/testthat/indention_multiple/curly_and_round-in_tree index cf006823a..48acfd1c0 100644 --- a/tests/testthat/indention_multiple/curly_and_round-in_tree +++ b/tests/testthat/indention_multiple/curly_and_round-in_tree @@ -1,85 +1,94 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/0] {3} + ¦--expr: test_ [0/0] {1} + ¦ ¦--expr: test_ [0/0] {3} ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {2} ¦ ¦--'(': ( [0/0] {4} - ¦ ¦--expr: [0/0] {6} + ¦ ¦--expr: "this [0/0] {6} ¦ ¦ °--STR_CONST: "this [0/0] {5} ¦ ¦--',': , [0/1] {7} - ¦ ¦--expr: [0/0] {8} + ¦ ¦--expr: { + t [0/0] {8} ¦ ¦ ¦--'{': { [0/2] {9} - ¦ ¦ ¦--expr: [1/0] {10} - ¦ ¦ ¦ ¦--expr: [0/0] {12} + ¦ ¦ ¦--expr: test( [1/0] {10} + ¦ ¦ ¦ ¦--expr: test [0/0] {12} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: test [0/0] {11} ¦ ¦ ¦ ¦--'(': ( [0/0] {13} - ¦ ¦ ¦ ¦--expr: [0/0] {15} + ¦ ¦ ¦ ¦--expr: x [0/0] {15} ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {14} ¦ ¦ ¦ ¦--',': , [0/1] {16} - ¦ ¦ ¦ ¦--expr: [0/0] {18} + ¦ ¦ ¦ ¦--expr: y [0/0] {18} ¦ ¦ ¦ ¦ °--SYMBOL: y [0/0] {17} ¦ ¦ ¦ ¦--',': , [0/1] {19} - ¦ ¦ ¦ ¦--expr: [0/0] {20} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {22} + ¦ ¦ ¦ ¦--expr: call( [0/0] {20} + ¦ ¦ ¦ ¦ ¦--expr: call [0/0] {22} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {21} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {23} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {25} + ¦ ¦ ¦ ¦ ¦--expr: z [0/0] {25} ¦ ¦ ¦ ¦ ¦ °--SYMBOL: z [0/0] {24} ¦ ¦ ¦ ¦ °--')': ) [0/0] {26} ¦ ¦ ¦ °--')': ) [0/0] {27} ¦ ¦ °--'}': } [1/0] {28} ¦ °--')': ) [0/0] {29} - ¦--expr: [2/0] {30} + ¦--expr: (({{ + [2/0] {30} ¦ ¦--'(': ( [0/0] {31} - ¦ ¦--expr: [0/0] {32} + ¦ ¦--expr: ({{ + [0/0] {32} ¦ ¦ ¦--'(': ( [0/0] {33} - ¦ ¦ ¦--expr: [0/0] {34} + ¦ ¦ ¦--expr: {{ + [0/0] {34} ¦ ¦ ¦ ¦--'{': { [0/0] {35} - ¦ ¦ ¦ ¦--expr: [0/0] {36} + ¦ ¦ ¦ ¦--expr: { + c [0/0] {36} ¦ ¦ ¦ ¦ ¦--'{': { [0/2] {37} - ¦ ¦ ¦ ¦ ¦--expr: [1/0] {38} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {40} + ¦ ¦ ¦ ¦ ¦--expr: call( [1/0] {38} + ¦ ¦ ¦ ¦ ¦ ¦--expr: call [0/0] {40} ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {39} ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/4] {41} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [1/0] {43} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 12 [1/0] {43} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 12 [0/0] {42} ¦ ¦ ¦ ¦ ¦ ¦--',': , [0/1] {44} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {45} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {47} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 1 + 1 [0/0] {45} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: 1 [0/1] {47} ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {46} ¦ ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {48} - ¦ ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {50} + ¦ ¦ ¦ ¦ ¦ ¦ °--expr: 1 [0/0] {50} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {49} ¦ ¦ ¦ ¦ ¦ ¦--',': , [0/4] {51} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [1/0] {53} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 26 [1/0] {53} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 26 [0/0] {52} ¦ ¦ ¦ ¦ ¦ °--')': ) [0/0] {54} ¦ ¦ ¦ ¦ °--'}': } [1/0] {55} ¦ ¦ ¦ °--'}': } [0/0] {56} ¦ ¦ °--')': ) [0/0] {57} ¦ °--')': ) [0/0] {58} - °--expr: [3/0] {59} + °--expr: (({{ + [3/0] {59} ¦--'(': ( [0/0] {60} - ¦--expr: [0/0] {61} + ¦--expr: ({{ + [0/0] {61} ¦ ¦--'(': ( [0/0] {62} - ¦ ¦--expr: [0/0] {63} + ¦ ¦--expr: {{ + [0/0] {63} ¦ ¦ ¦--'{': { [0/0] {64} - ¦ ¦ ¦--expr: [0/0] {65} + ¦ ¦ ¦--expr: { + c [0/0] {65} ¦ ¦ ¦ ¦--'{': { [0/2] {66} - ¦ ¦ ¦ ¦--expr: [1/0] {67} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {69} + ¦ ¦ ¦ ¦--expr: call( [1/0] {67} + ¦ ¦ ¦ ¦ ¦--expr: call [0/0] {69} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {68} ¦ ¦ ¦ ¦ ¦--'(': ( [0/4] {70} - ¦ ¦ ¦ ¦ ¦--expr: [1/0] {72} + ¦ ¦ ¦ ¦ ¦--expr: 12 [1/0] {72} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 12 [0/0] {71} ¦ ¦ ¦ ¦ ¦--',': , [0/1] {73} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {74} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {76} + ¦ ¦ ¦ ¦ ¦--expr: 1 + 1 [0/0] {74} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 1 [0/1] {76} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {75} ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {77} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {79} + ¦ ¦ ¦ ¦ ¦ °--expr: 1 [0/0] {79} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {78} ¦ ¦ ¦ ¦ ¦--',': , [0/4] {80} - ¦ ¦ ¦ ¦ ¦--expr: [1/2] {82} + ¦ ¦ ¦ ¦ ¦--expr: 26 [1/2] {82} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 26 [0/0] {81} ¦ ¦ ¦ ¦ °--')': ) [1/0] {83} ¦ ¦ ¦ °--'}': } [1/0] {84} diff --git a/tests/testthat/indention_multiple/curly_only-in_tree b/tests/testthat/indention_multiple/curly_only-in_tree index 0feb6f7fb..3135ed9b6 100644 --- a/tests/testthat/indention_multiple/curly_only-in_tree +++ b/tests/testthat/indention_multiple/curly_only-in_tree @@ -1,45 +1,52 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/7] {1} + ¦--expr: { + [0/7] {1} ¦ ¦--'{': { [0/10] {2} - ¦ ¦--expr: [1/1] {3} - ¦ ¦ ¦--expr: [0/1] {5} + ¦ ¦--expr: 1 + 1 [1/1] {3} + ¦ ¦ ¦--expr: 1 [0/1] {5} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {4} ¦ ¦ ¦--'+': + [0/1] {6} - ¦ ¦ °--expr: [0/0] {8} + ¦ ¦ °--expr: 1 [0/0] {8} ¦ ¦ °--NUM_CONST: 1 [0/0] {7} ¦ °--'}': } [1/0] {9} - ¦--expr: [2/0] {10} + ¦--expr: {{{ +2 [2/0] {10} ¦ ¦--'{': { [0/0] {11} - ¦ ¦--expr: [0/0] {12} + ¦ ¦--expr: {{ +25 [0/0] {12} ¦ ¦ ¦--'{': { [0/0] {13} - ¦ ¦ ¦--expr: [0/0] {14} + ¦ ¦ ¦--expr: { +25 [0/0] {14} ¦ ¦ ¦ ¦--'{': { [0/0] {15} - ¦ ¦ ¦ ¦--expr: [1/7] {16} - ¦ ¦ ¦ ¦ ¦--expr: [0/1] {18} + ¦ ¦ ¦ ¦--expr: 25 * [1/7] {16} + ¦ ¦ ¦ ¦ ¦--expr: 25 [0/1] {18} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 25 [0/0] {17} ¦ ¦ ¦ ¦ ¦--'*': * [0/1] {19} - ¦ ¦ ¦ ¦ °--expr: [0/0] {21} + ¦ ¦ ¦ ¦ °--expr: 4 [0/0] {21} ¦ ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {20} ¦ ¦ ¦ °--'}': } [1/0] {22} ¦ ¦ °--'}': } [0/0] {23} ¦ °--'}': } [0/0] {24} - °--expr: [2/0] {25} + °--expr: { +{ + [2/0] {25} ¦--'{': { [0/0] {26} - ¦--expr: [1/0] {27} + ¦--expr: { + 1 [1/0] {27} ¦ ¦--'{': { [0/2] {28} - ¦ ¦--expr: [1/0] {29} - ¦ ¦ ¦--expr: [0/1] {31} + ¦ ¦--expr: 1 + 1 [1/0] {29} + ¦ ¦ ¦--expr: 1 [0/1] {31} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {30} ¦ ¦ ¦--'+': + [0/1] {32} - ¦ ¦ °--expr: [0/0] {33} - ¦ ¦ ¦--expr: [0/1] {35} + ¦ ¦ °--expr: 142 * [0/0] {33} + ¦ ¦ ¦--expr: 142 [0/1] {35} ¦ ¦ ¦ °--NUM_CONST: 142 [0/0] {34} ¦ ¦ ¦--'*': * [0/1] {36} - ¦ ¦ °--expr: [0/0] {37} - ¦ ¦ ¦--expr: [0/0] {39} + ¦ ¦ °--expr: sin(p [0/0] {37} + ¦ ¦ ¦--expr: sin [0/0] {39} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: sin [0/0] {38} ¦ ¦ ¦--'(': ( [0/0] {40} - ¦ ¦ ¦--expr: [0/0] {42} + ¦ ¦ ¦--expr: pi [0/0] {42} ¦ ¦ ¦ °--SYMBOL: pi [0/0] {41} ¦ ¦ °--')': ) [0/0] {43} ¦ °--'}': } [1/0] {44} diff --git a/tests/testthat/indention_multiple/edge_mixed-out.R b/tests/testthat/indention_multiple/edge_mixed-out.R deleted file mode 100644 index 7c1a36471..000000000 --- a/tests/testthat/indention_multiple/edge_mixed-out.R +++ /dev/null @@ -1,25 +0,0 @@ -{ - ( - (({ - { - { - c( - 99, - 1 + 1, { - "within that suff" - } - ) - } - } - })) - ) -} - - -((( - 1 + 2) * (3 + 4 -))) - - -function(x, y, z) { -} diff --git a/tests/testthat/indention_multiple/edge_mixed-in.R b/tests/testthat/indention_multiple/edge_strict_mixed-in.R similarity index 100% rename from tests/testthat/indention_multiple/edge_mixed-in.R rename to tests/testthat/indention_multiple/edge_strict_mixed-in.R diff --git a/tests/testthat/indention_multiple/edge_mixed-in_tree b/tests/testthat/indention_multiple/edge_strict_mixed-in_tree similarity index 67% rename from tests/testthat/indention_multiple/edge_mixed-in_tree rename to tests/testthat/indention_multiple/edge_strict_mixed-in_tree index 747c14c5e..a2a815bd7 100644 --- a/tests/testthat/indention_multiple/edge_mixed-in_tree +++ b/tests/testthat/indention_multiple/edge_strict_mixed-in_tree @@ -1,35 +1,45 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} + ¦--expr: { +( + [0/0] {1} ¦ ¦--'{': { [0/0] {2} - ¦ ¦--expr: [1/0] {3} + ¦ ¦--expr: ( + [1/0] {3} ¦ ¦ ¦--'(': ( [0/7] {4} - ¦ ¦ ¦--expr: [1/2] {5} + ¦ ¦ ¦--expr: (( +{{ [1/2] {5} ¦ ¦ ¦ ¦--'(': ( [0/0] {6} - ¦ ¦ ¦ ¦--expr: [0/0] {7} + ¦ ¦ ¦ ¦--expr: ( +{{ + [0/0] {7} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {8} - ¦ ¦ ¦ ¦ ¦--expr: [1/0] {9} + ¦ ¦ ¦ ¦ ¦--expr: {{ + [1/0] {9} ¦ ¦ ¦ ¦ ¦ ¦--'{': { [0/0] {10} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {11} + ¦ ¦ ¦ ¦ ¦ ¦--expr: { + [0/0] {11} ¦ ¦ ¦ ¦ ¦ ¦ ¦--'{': { [0/8] {12} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [1/4] {13} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: { + [1/4] {13} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--'{': { [0/10] {14} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [1/8] {15} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {17} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: c(99, [1/8] {15} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: c [0/0] {17} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {16} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {18} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {20} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: 99 [0/0] {20} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 99 [0/0] {19} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--',': , [0/9] {21} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [1/0] {22} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {24} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: 1 + 1 [1/0] {22} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: 1 [0/1] {24} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {23} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {25} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {27} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--expr: 1 [0/0] {27} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {26} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--',': , [0/17] {28} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [1/0] {29} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: { + [1/0] {29} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--'{': { [0/4] {30} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [1/0] {32} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: "with [1/0] {32} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--STR_CONST: "with [0/0] {31} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--'}': } [1/0] {33} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--')': ) [0/0] {34} @@ -40,33 +50,37 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦ ¦ ¦ °--')': ) [0/0] {39} ¦ ¦ °--')': ) [1/0] {40} ¦ °--'}': } [1/0] {41} - ¦--expr: [3/0] {42} + ¦--expr: ((( + [3/0] {42} ¦ ¦--'(': ( [0/0] {43} - ¦ ¦--expr: [0/0] {44} + ¦ ¦--expr: (( + [0/0] {44} ¦ ¦ ¦--'(': ( [0/0] {45} - ¦ ¦ ¦--expr: [0/0] {46} - ¦ ¦ ¦ ¦--expr: [0/1] {47} + ¦ ¦ ¦--expr: ( + 1 [0/0] {46} + ¦ ¦ ¦ ¦--expr: ( + 1 [0/1] {47} ¦ ¦ ¦ ¦ ¦--'(': ( [0/2] {48} - ¦ ¦ ¦ ¦ ¦--expr: [1/0] {49} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {51} + ¦ ¦ ¦ ¦ ¦--expr: 1 + 2 [1/0] {49} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 1 [0/1] {51} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {50} ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {52} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {54} + ¦ ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {54} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {53} ¦ ¦ ¦ ¦ °--')': ) [0/0] {55} ¦ ¦ ¦ ¦--'*': * [0/1] {56} - ¦ ¦ ¦ °--expr: [0/0] {57} + ¦ ¦ ¦ °--expr: (3 + [0/0] {57} ¦ ¦ ¦ ¦--'(': ( [0/0] {58} - ¦ ¦ ¦ ¦--expr: [0/0] {59} - ¦ ¦ ¦ ¦ ¦--expr: [0/1] {61} + ¦ ¦ ¦ ¦--expr: 3 + 4 [0/0] {59} + ¦ ¦ ¦ ¦ ¦--expr: 3 [0/1] {61} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {60} ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {62} - ¦ ¦ ¦ ¦ °--expr: [0/0] {64} + ¦ ¦ ¦ ¦ °--expr: 4 [0/0] {64} ¦ ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {63} ¦ ¦ ¦ °--')': ) [1/0] {65} ¦ ¦ °--')': ) [0/0] {66} ¦ °--')': ) [0/0] {67} - °--expr: [3/0] {68} + °--expr: funct [3/0] {68} ¦--FUNCTION: funct [0/0] {69} ¦--'(': ( [0/0] {70} ¦--SYMBOL_FORMALS: x [0/0] {71} @@ -75,6 +89,7 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦--',': , [0/1] {74} ¦--SYMBOL_FORMALS: z [0/0] {75} ¦--')': ) [0/1] {76} - °--expr: [0/0] {77} + °--expr: { +} [0/0] {77} ¦--'{': { [0/0] {78} °--'}': } [1/0] {79} diff --git a/tests/testthat/indention_multiple/edge_strict_mixed-out.R b/tests/testthat/indention_multiple/edge_strict_mixed-out.R new file mode 100644 index 000000000..dcfe06a07 --- /dev/null +++ b/tests/testthat/indention_multiple/edge_strict_mixed-out.R @@ -0,0 +1,27 @@ +{ + ( + (( + { + { + { + c( + 99, + 1 + 1, + { + "within that suff" + } + ) + } + } + })) + ) +} + + +((( + 1 + 2) * (3 + 4 +))) + + +function(x, y, z) { +} diff --git a/tests/testthat/indention_multiple/edge_random-in.R b/tests/testthat/indention_multiple/edge_strict_random-in.R similarity index 99% rename from tests/testthat/indention_multiple/edge_random-in.R rename to tests/testthat/indention_multiple/edge_strict_random-in.R index cc8f8c0c5..7ec47f29d 100644 --- a/tests/testthat/indention_multiple/edge_random-in.R +++ b/tests/testthat/indention_multiple/edge_strict_random-in.R @@ -11,4 +11,3 @@ { c("x", "y", "z", "sin(x)") } - diff --git a/tests/testthat/indention_multiple/edge_random-in_tree b/tests/testthat/indention_multiple/edge_strict_random-in_tree similarity index 74% rename from tests/testthat/indention_multiple/edge_random-in_tree rename to tests/testthat/indention_multiple/edge_strict_random-in_tree index 66255318d..1f6495ff9 100644 --- a/tests/testthat/indention_multiple/edge_random-in_tree +++ b/tests/testthat/indention_multiple/edge_strict_random-in_tree @@ -1,31 +1,33 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/3] {1} + ¦--expr: {{( [0/3] {1} ¦ ¦--'{': { [0/0] {2} - ¦ ¦--expr: [0/0] {3} + ¦ ¦--expr: {( [0/0] {3} ¦ ¦ ¦--'{': { [0/0] {4} - ¦ ¦ ¦--expr: [0/0] {5} + ¦ ¦ ¦--expr: ( { [0/0] {5} ¦ ¦ ¦ ¦--'(': ( [0/3] {6} - ¦ ¦ ¦ ¦--expr: [0/0] {7} + ¦ ¦ ¦ ¦--expr: {{{{ [0/0] {7} ¦ ¦ ¦ ¦ ¦--'{': { [0/0] {8} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {9} + ¦ ¦ ¦ ¦ ¦--expr: {{{ [0/0] {9} ¦ ¦ ¦ ¦ ¦ ¦--'{': { [0/0] {10} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {11} + ¦ ¦ ¦ ¦ ¦ ¦--expr: {{ { [0/0] {11} ¦ ¦ ¦ ¦ ¦ ¦ ¦--'{': { [0/0] {12} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {13} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: { {{ [0/0] {13} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--'{': { [0/2] {14} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {15} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: {{{{ [0/0] {15} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--'{': { [0/0] {16} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {17} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: {{{ [0/0] {17} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--'{': { [0/0] {18} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {19} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: {{ [0/0] {19} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--'{': { [0/0] {20} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {21} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: { ( [0/0] {21} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--'{': { [0/3] {22} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {23} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: (( + [0/0] {23} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {24} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {25} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: ( + 1 [0/0] {25} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/2] {26} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [1/0] {28} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: 19 [1/0] {28} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 19 [0/0] {27} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--')': ) [1/0] {29} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--')': ) [0/0] {30} @@ -40,41 +42,43 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦ ¦ ¦ °--')': ) [0/0] {39} ¦ ¦ °--'}': } [0/0] {40} ¦ °--'}': } [0/0] {41} - ¦--expr: [3/3] {42} + ¦--expr: ( + c [3/3] {42} ¦ ¦--'(': ( [0/2] {43} - ¦ ¦--expr: [1/3] {44} - ¦ ¦ ¦--expr: [0/0] {46} + ¦ ¦--expr: c("x" [1/3] {44} + ¦ ¦ ¦--expr: c [0/0] {46} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {45} ¦ ¦ ¦--'(': ( [0/0] {47} - ¦ ¦ ¦--expr: [0/0] {49} + ¦ ¦ ¦--expr: "x" [0/0] {49} ¦ ¦ ¦ °--STR_CONST: "x" [0/0] {48} ¦ ¦ ¦--',': , [0/1] {50} - ¦ ¦ ¦--expr: [0/0] {52} + ¦ ¦ ¦--expr: "y" [0/0] {52} ¦ ¦ ¦ °--STR_CONST: "y" [0/0] {51} ¦ ¦ ¦--',': , [0/1] {53} - ¦ ¦ ¦--expr: [0/0] {55} + ¦ ¦ ¦--expr: "z" [0/0] {55} ¦ ¦ ¦ °--STR_CONST: "z" [0/0] {54} ¦ ¦ ¦--',': , [0/1] {56} - ¦ ¦ ¦--expr: [0/0] {58} + ¦ ¦ ¦--expr: "sin( [0/0] {58} ¦ ¦ ¦ °--STR_CONST: "sin( [0/0] {57} ¦ ¦ °--')': ) [0/0] {59} ¦ °--')': ) [1/0] {60} - °--expr: [3/0] {61} + °--expr: { + c [3/0] {61} ¦--'{': { [0/2] {62} - ¦--expr: [1/0] {63} - ¦ ¦--expr: [0/0] {65} + ¦--expr: c("x" [1/0] {63} + ¦ ¦--expr: c [0/0] {65} ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {64} ¦ ¦--'(': ( [0/0] {66} - ¦ ¦--expr: [0/0] {68} + ¦ ¦--expr: "x" [0/0] {68} ¦ ¦ °--STR_CONST: "x" [0/0] {67} ¦ ¦--',': , [0/1] {69} - ¦ ¦--expr: [0/0] {71} + ¦ ¦--expr: "y" [0/0] {71} ¦ ¦ °--STR_CONST: "y" [0/0] {70} ¦ ¦--',': , [0/1] {72} - ¦ ¦--expr: [0/0] {74} + ¦ ¦--expr: "z" [0/0] {74} ¦ ¦ °--STR_CONST: "z" [0/0] {73} ¦ ¦--',': , [0/1] {75} - ¦ ¦--expr: [0/0] {77} + ¦ ¦--expr: "sin( [0/0] {77} ¦ ¦ °--STR_CONST: "sin( [0/0] {76} ¦ °--')': ) [0/0] {78} °--'}': } [1/0] {79} diff --git a/tests/testthat/indention_multiple/edge_random-out.R b/tests/testthat/indention_multiple/edge_strict_random-out.R similarity index 100% rename from tests/testthat/indention_multiple/edge_random-out.R rename to tests/testthat/indention_multiple/edge_strict_random-out.R diff --git a/tests/testthat/indention_multiple/fun_for_new_line-in_tree b/tests/testthat/indention_multiple/fun_for_new_line-in_tree index 9a02e8376..8ec66a854 100644 --- a/tests/testthat/indention_multiple/fun_for_new_line-in_tree +++ b/tests/testthat/indention_multiple/fun_for_new_line-in_tree @@ -1,25 +1,27 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} + ¦--expr: funct [0/0] {1} ¦ ¦--FUNCTION: funct [0/0] {2} ¦ ¦--'(': ( [0/0] {3} ¦ ¦--')': ) [0/0] {4} - ¦ °--expr: [1/0] {6} + ¦ °--expr: NULL [1/0] {6} ¦ °--NULL_CONST: NULL [0/0] {5} - °--expr: [2/0] {7} + °--expr: for ( [2/0] {7} ¦--FOR: for [0/1] {8} - ¦--forcond: [0/1] {9} + ¦--forcond: (i in [0/1] {9} ¦ ¦--'(': ( [0/0] {10} ¦ ¦--SYMBOL: i [0/1] {11} ¦ ¦--IN: in [0/1] {12} - ¦ ¦--expr: [0/0] {13} - ¦ ¦ ¦--expr: [0/0] {15} + ¦ ¦--expr: 1:3 [0/0] {13} + ¦ ¦ ¦--expr: 1 [0/0] {15} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {14} ¦ ¦ ¦--':': : [0/0] {16} - ¦ ¦ °--expr: [0/0] {18} + ¦ ¦ °--expr: 3 [0/0] {18} ¦ ¦ °--NUM_CONST: 3 [0/0] {17} ¦ °--')': ) [0/0] {19} - °--expr: [1/0] {20} + °--expr: { +2 + [1/0] {20} ¦--'{': { [0/0] {21} - ¦--expr: [1/1] {23} + ¦--expr: 2 [1/1] {23} ¦ °--NUM_CONST: 2 [0/0] {22} °--'}': } [1/0] {24} diff --git a/tests/testthat/indention_multiple/fun_for_new_line-out.R b/tests/testthat/indention_multiple/fun_for_new_line-out.R index 43a7c5834..f9c4e703e 100644 --- a/tests/testthat/indention_multiple/fun_for_new_line-out.R +++ b/tests/testthat/indention_multiple/fun_for_new_line-out.R @@ -1,5 +1,6 @@ -function() +function() { NULL +} for (i in 1:3) { diff --git a/tests/testthat/indention_multiple/if_else_curly-in.R b/tests/testthat/indention_multiple/if_else_curly-in.R index ee3ad474c..c458ce8e6 100644 --- a/tests/testthat/indention_multiple/if_else_curly-in.R +++ b/tests/testthat/indention_multiple/if_else_curly-in.R @@ -27,3 +27,28 @@ if (TRUE) { 4 } } + +# rather space than brace thing, but +foo <- function(x) { + if (TRUE) { + 1 + } + else { + 2 + } +} + + +if (TRUE) { + 3 +} else + if (FALSE) { + 4 +} + +if (TRUE) { + 3 +} else # comment + if (FALSE) { + 4 + } diff --git a/tests/testthat/indention_multiple/if_else_curly-in_tree b/tests/testthat/indention_multiple/if_else_curly-in_tree index 1ef4bc7ac..36f58267d 100644 --- a/tests/testthat/indention_multiple/if_else_curly-in_tree +++ b/tests/testthat/indention_multiple/if_else_curly-in_tree @@ -1,69 +1,162 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} + ¦--expr: { + [0/0] {1} ¦ ¦--'{': { [0/4] {2} - ¦ ¦--expr: [1/0] {3} + ¦ ¦--expr: if (T [1/0] {3} ¦ ¦ ¦--IF: if [0/1] {4} ¦ ¦ ¦--'(': ( [0/0] {5} - ¦ ¦ ¦--expr: [0/0] {7} + ¦ ¦ ¦--expr: TRUE [0/0] {7} ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {6} ¦ ¦ ¦--')': ) [0/0] {8} - ¦ ¦ ¦--expr: [1/4] {10} + ¦ ¦ ¦--expr: 3 [1/4] {10} ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {9} ¦ ¦ ¦--ELSE: else [1/0] {11} - ¦ ¦ °--expr: [1/0] {13} + ¦ ¦ °--expr: 4 [1/0] {13} ¦ ¦ °--NUM_CONST: 4 [0/0] {12} ¦ °--'}': } [1/0] {14} - ¦--expr: [2/0] {15} + ¦--expr: { +if [2/0] {15} ¦ ¦--'{': { [0/0] {16} - ¦ ¦--expr: [1/0] {17} + ¦ ¦--expr: if (T [1/0] {17} ¦ ¦ ¦--IF: if [0/1] {18} ¦ ¦ ¦--'(': ( [0/0] {19} - ¦ ¦ ¦--expr: [0/0] {21} + ¦ ¦ ¦--expr: TRUE [0/0] {21} ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {20} ¦ ¦ ¦--')': ) [0/1] {22} - ¦ ¦ ¦--expr: [0/1] {23} + ¦ ¦ ¦--expr: { + [0/1] {23} ¦ ¦ ¦ ¦--'{': { [0/3] {24} - ¦ ¦ ¦ ¦--expr: [1/4] {26} + ¦ ¦ ¦ ¦--expr: 3 [1/4] {26} ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {25} ¦ ¦ ¦ °--'}': } [1/0] {27} ¦ ¦ ¦--ELSE: else [0/0] {28} - ¦ ¦ °--expr: [1/0] {30} + ¦ ¦ °--expr: 4 [1/0] {30} ¦ ¦ °--NUM_CONST: 4 [0/0] {29} ¦ °--'}': } [1/0] {31} - ¦--expr: [2/0] {32} + ¦--expr: { +if [2/0] {32} ¦ ¦--'{': { [0/0] {33} - ¦ ¦--expr: [1/0] {34} + ¦ ¦--expr: if (T [1/0] {34} ¦ ¦ ¦--IF: if [0/1] {35} ¦ ¦ ¦--'(': ( [0/0] {36} - ¦ ¦ ¦--expr: [0/0] {38} + ¦ ¦ ¦--expr: TRUE [0/0] {38} ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {37} ¦ ¦ ¦--')': ) [0/4] {39} - ¦ ¦ ¦--expr: [1/0] {41} + ¦ ¦ ¦--expr: 3 [1/0] {41} ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {40} ¦ ¦ ¦--ELSE: else [1/1] {42} - ¦ ¦ °--expr: [0/0] {43} + ¦ ¦ °--expr: { + 4 [0/0] {43} ¦ ¦ ¦--'{': { [0/2] {44} - ¦ ¦ ¦--expr: [1/0] {46} + ¦ ¦ ¦--expr: 4 [1/0] {46} ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {45} ¦ ¦ °--'}': } [1/0] {47} ¦ °--'}': } [1/0] {48} - °--expr: [2/0] {49} - ¦--'{': { [0/0] {50} - ¦--expr: [1/0] {51} - ¦ ¦--IF: if [0/1] {52} - ¦ ¦--'(': ( [0/0] {53} - ¦ ¦--expr: [0/0] {55} - ¦ ¦ °--NUM_CONST: TRUE [0/0] {54} - ¦ ¦--')': ) [0/1] {56} - ¦ ¦--expr: [0/1] {57} - ¦ ¦ ¦--'{': { [0/5] {58} - ¦ ¦ ¦--expr: [1/4] {60} - ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {59} - ¦ ¦ °--'}': } [1/0] {61} - ¦ ¦--ELSE: else [0/1] {62} - ¦ °--expr: [0/0] {63} - ¦ ¦--'{': { [0/0] {64} - ¦ ¦--expr: [1/0] {66} - ¦ ¦ °--NUM_CONST: 4 [0/0] {65} - ¦ °--'}': } [1/0] {67} - °--'}': } [1/0] {68} + ¦--expr: { +if [2/0] {49} + ¦ ¦--'{': { [0/0] {50} + ¦ ¦--expr: if (T [1/0] {51} + ¦ ¦ ¦--IF: if [0/1] {52} + ¦ ¦ ¦--'(': ( [0/0] {53} + ¦ ¦ ¦--expr: TRUE [0/0] {55} + ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {54} + ¦ ¦ ¦--')': ) [0/1] {56} + ¦ ¦ ¦--expr: { + [0/1] {57} + ¦ ¦ ¦ ¦--'{': { [0/5] {58} + ¦ ¦ ¦ ¦--expr: 3 [1/4] {60} + ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {59} + ¦ ¦ ¦ °--'}': } [1/0] {61} + ¦ ¦ ¦--ELSE: else [0/1] {62} + ¦ ¦ °--expr: { +4 +} [0/0] {63} + ¦ ¦ ¦--'{': { [0/0] {64} + ¦ ¦ ¦--expr: 4 [1/0] {66} + ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {65} + ¦ ¦ °--'}': } [1/0] {67} + ¦ °--'}': } [1/0] {68} + ¦--COMMENT: # rat [2/0] {69} + ¦--expr: foo < [1/0] {70} + ¦ ¦--expr: foo [0/1] {72} + ¦ ¦ °--SYMBOL: foo [0/0] {71} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {73} + ¦ °--expr: funct [0/0] {74} + ¦ ¦--FUNCTION: funct [0/0] {75} + ¦ ¦--'(': ( [0/0] {76} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {77} + ¦ ¦--')': ) [0/1] {78} + ¦ °--expr: { + i [0/0] {79} + ¦ ¦--'{': { [0/2] {80} + ¦ ¦--expr: if (T [1/0] {81} + ¦ ¦ ¦--IF: if [0/1] {82} + ¦ ¦ ¦--'(': ( [0/0] {83} + ¦ ¦ ¦--expr: TRUE [0/0] {85} + ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {84} + ¦ ¦ ¦--')': ) [0/1] {86} + ¦ ¦ ¦--expr: { + [0/2] {87} + ¦ ¦ ¦ ¦--'{': { [0/4] {88} + ¦ ¦ ¦ ¦--expr: 1 [1/2] {90} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {89} + ¦ ¦ ¦ °--'}': } [1/0] {91} + ¦ ¦ ¦--ELSE: else [1/1] {92} + ¦ ¦ °--expr: { + [0/0] {93} + ¦ ¦ ¦--'{': { [0/4] {94} + ¦ ¦ ¦--expr: 2 [1/2] {96} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {95} + ¦ ¦ °--'}': } [1/0] {97} + ¦ °--'}': } [1/0] {98} + ¦--expr: if (T [3/0] {99} + ¦ ¦--IF: if [0/1] {100} + ¦ ¦--'(': ( [0/0] {101} + ¦ ¦--expr: TRUE [0/0] {103} + ¦ ¦ °--NUM_CONST: TRUE [0/0] {102} + ¦ ¦--')': ) [0/1] {104} + ¦ ¦--expr: { + 3 [0/1] {105} + ¦ ¦ ¦--'{': { [0/2] {106} + ¦ ¦ ¦--expr: 3 [1/0] {108} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {107} + ¦ ¦ °--'}': } [1/0] {109} + ¦ ¦--ELSE: else [0/2] {110} + ¦ °--expr: if (F [1/0] {111} + ¦ ¦--IF: if [0/1] {112} + ¦ ¦--'(': ( [0/0] {113} + ¦ ¦--expr: FALSE [0/0] {115} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {114} + ¦ ¦--')': ) [0/1] {116} + ¦ °--expr: { + 4 [0/0] {117} + ¦ ¦--'{': { [0/2] {118} + ¦ ¦--expr: 4 [1/0] {120} + ¦ ¦ °--NUM_CONST: 4 [0/0] {119} + ¦ °--'}': } [1/0] {121} + °--expr: if (T [2/0] {122} + ¦--IF: if [0/1] {123} + ¦--'(': ( [0/0] {124} + ¦--expr: TRUE [0/0] {126} + ¦ °--NUM_CONST: TRUE [0/0] {125} + ¦--')': ) [0/1] {127} + ¦--expr: { + 3 [0/1] {128} + ¦ ¦--'{': { [0/2] {129} + ¦ ¦--expr: 3 [1/0] {131} + ¦ ¦ °--NUM_CONST: 3 [0/0] {130} + ¦ °--'}': } [1/0] {132} + ¦--ELSE: else [0/1] {133} + ¦--COMMENT: # com [0/2] {134} + °--expr: if (F [1/0] {135} + ¦--IF: if [0/1] {136} + ¦--'(': ( [0/0] {137} + ¦--expr: FALSE [0/0] {139} + ¦ °--NUM_CONST: FALSE [0/0] {138} + ¦--')': ) [0/1] {140} + °--expr: { + [0/0] {141} + ¦--'{': { [0/4] {142} + ¦--expr: 4 [1/2] {144} + ¦ °--NUM_CONST: 4 [0/0] {143} + °--'}': } [1/0] {145} diff --git a/tests/testthat/indention_multiple/if_else_curly-out.R b/tests/testthat/indention_multiple/if_else_curly-out.R index 6367e8c81..bcd416cb5 100644 --- a/tests/testthat/indention_multiple/if_else_curly-out.R +++ b/tests/testthat/indention_multiple/if_else_curly-out.R @@ -27,3 +27,26 @@ 4 } } + +# rather space than brace thing, but +foo <- function(x) { + if (TRUE) { + 1 + } else { + 2 + } +} + + +if (TRUE) { + 3 +} else if (FALSE) { + 4 +} + +if (TRUE) { + 3 +} else # comment +if (FALSE) { + 4 +} diff --git a/tests/testthat/indention_multiple/overall-in.R b/tests/testthat/indention_multiple/overall-in.R index 50e228294..6f9113e38 100644 --- a/tests/testthat/indention_multiple/overall-in.R +++ b/tests/testthat/indention_multiple/overall-in.R @@ -17,7 +17,7 @@ a <- function(x) { c( list(x + 2), c(c( - 26 ^ 2, # FIXME ^ operator has to be surrounded by one space (or none?!), never multiple + 26 ^ 2, 8, 7 ))) diff --git a/tests/testthat/indention_multiple/overall-in_tree b/tests/testthat/indention_multiple/overall-in_tree index b09455f30..93795a351 100644 --- a/tests/testthat/indention_multiple/overall-in_tree +++ b/tests/testthat/indention_multiple/overall-in_tree @@ -3,78 +3,82 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦--COMMENT: #' [1/0] {2} ¦--COMMENT: #' @p [1/0] {3} ¦--COMMENT: #' [1/0] {4} - ¦--expr: [1/0] {5} - ¦ ¦--expr: [0/1] {7} + ¦--expr: a <- [1/0] {5} + ¦ ¦--expr: a [0/1] {7} ¦ ¦ °--SYMBOL: a [0/0] {6} ¦ ¦--LEFT_ASSIGN: <- [0/1] {8} - ¦ °--expr: [0/0] {9} + ¦ °--expr: funct [0/0] {9} ¦ ¦--FUNCTION: funct [0/0] {10} ¦ ¦--'(': ( [0/0] {11} ¦ ¦--SYMBOL_FORMALS: x [0/0] {12} ¦ ¦--')': ) [0/1] {13} - ¦ °--expr: [0/0] {14} + ¦ °--expr: { + t [0/0] {14} ¦ ¦--'{': { [0/2] {15} - ¦ ¦--expr: [1/2] {16} - ¦ ¦ ¦--expr: [0/0] {18} + ¦ ¦--expr: test_ [1/2] {16} + ¦ ¦ ¦--expr: test_ [0/0] {18} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {17} ¦ ¦ ¦--'(': ( [0/0] {19} - ¦ ¦ ¦--expr: [0/0] {21} + ¦ ¦ ¦--expr: "I wa [0/0] {21} ¦ ¦ ¦ °--STR_CONST: "I wa [0/0] {20} ¦ ¦ ¦--',': , [0/1] {22} - ¦ ¦ ¦--expr: [0/0] {23} + ¦ ¦ ¦--expr: { + [0/0] {23} ¦ ¦ ¦ ¦--'{': { [0/4] {24} - ¦ ¦ ¦ ¦--expr: [1/4] {25} - ¦ ¦ ¦ ¦ ¦--expr: [0/1] {27} + ¦ ¦ ¦ ¦--expr: out < [1/4] {25} + ¦ ¦ ¦ ¦ ¦--expr: out [0/1] {27} ¦ ¦ ¦ ¦ ¦ °--SYMBOL: out [0/0] {26} ¦ ¦ ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {28} - ¦ ¦ ¦ ¦ °--expr: [0/0] {29} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {31} + ¦ ¦ ¦ ¦ °--expr: c(1, [0/0] {29} + ¦ ¦ ¦ ¦ ¦--expr: c [0/0] {31} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {30} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {32} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {34} + ¦ ¦ ¦ ¦ ¦--expr: 1 [0/0] {34} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {33} ¦ ¦ ¦ ¦ ¦--',': , [0/1] {35} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {36} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {38} + ¦ ¦ ¦ ¦ ¦--expr: c( + [0/0] {36} + ¦ ¦ ¦ ¦ ¦ ¦--expr: c [0/0] {38} ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {37} ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/6] {39} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [1/4] {40} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {42} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 22 + [1/4] {40} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: 22 [0/1] {42} ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {41} ¦ ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {43} - ¦ ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {45} + ¦ ¦ ¦ ¦ ¦ ¦ °--expr: 1 [0/0] {45} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {44} ¦ ¦ ¦ ¦ ¦ °--')': ) [1/0] {46} ¦ ¦ ¦ ¦ °--')': ) [0/0] {47} - ¦ ¦ ¦ ¦--expr: [1/2] {48} + ¦ ¦ ¦ ¦--expr: if (x [1/2] {48} ¦ ¦ ¦ ¦ ¦--IF: if [0/1] {49} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {50} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {51} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {53} + ¦ ¦ ¦ ¦ ¦--expr: x > 1 [0/0] {51} + ¦ ¦ ¦ ¦ ¦ ¦--expr: x [0/1] {53} ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {52} ¦ ¦ ¦ ¦ ¦ ¦--GT: > [0/1] {54} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {56} + ¦ ¦ ¦ ¦ ¦ °--expr: 10 [0/0] {56} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 10 [0/0] {55} ¦ ¦ ¦ ¦ ¦--')': ) [0/1] {57} - ¦ ¦ ¦ ¦ °--expr: [0/0] {58} + ¦ ¦ ¦ ¦ °--expr: { + [0/0] {58} ¦ ¦ ¦ ¦ ¦--'{': { [0/6] {59} - ¦ ¦ ¦ ¦ ¦--expr: [1/4] {60} + ¦ ¦ ¦ ¦ ¦--expr: for ( [1/4] {60} ¦ ¦ ¦ ¦ ¦ ¦--FOR: for [0/1] {61} - ¦ ¦ ¦ ¦ ¦ ¦--forcond: [0/1] {62} + ¦ ¦ ¦ ¦ ¦ ¦--forcond: (x in [0/1] {62} ¦ ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {63} ¦ ¦ ¦ ¦ ¦ ¦ ¦--SYMBOL: x [0/1] {64} ¦ ¦ ¦ ¦ ¦ ¦ ¦--IN: in [0/1] {65} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {67} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: 22 [0/0] {67} ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {66} ¦ ¦ ¦ ¦ ¦ ¦ °--')': ) [0/0] {68} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {69} + ¦ ¦ ¦ ¦ ¦ °--expr: { # F [0/0] {69} ¦ ¦ ¦ ¦ ¦ ¦--'{': { [0/1] {70} ¦ ¦ ¦ ¦ ¦ ¦--COMMENT: # FIX [0/8] {71} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [1/6] {72} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {74} + ¦ ¦ ¦ ¦ ¦ ¦--expr: prin( [1/6] {72} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: prin [0/0] {74} ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: prin [0/0] {73} ¦ ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {75} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {77} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: x [0/0] {77} ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {76} ¦ ¦ ¦ ¦ ¦ ¦ °--')': ) [0/0] {78} ¦ ¦ ¦ ¦ ¦ °--'}': } [1/0] {79} @@ -82,73 +86,75 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦ ¦ ¦ °--'}': } [1/0] {81} ¦ ¦ °--')': ) [0/0] {82} ¦ ¦--COMMENT: # we [1/2] {83} - ¦ ¦--expr: [1/2] {84} - ¦ ¦ ¦--expr: [0/0] {86} + ¦ ¦--expr: c( + [1/2] {84} + ¦ ¦ ¦--expr: c [0/0] {86} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {85} ¦ ¦ ¦--'(': ( [0/4] {87} - ¦ ¦ ¦--expr: [1/0] {88} - ¦ ¦ ¦ ¦--expr: [0/0] {90} + ¦ ¦ ¦--expr: list( [1/0] {88} + ¦ ¦ ¦ ¦--expr: list [0/0] {90} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: list [0/0] {89} ¦ ¦ ¦ ¦--'(': ( [0/0] {91} - ¦ ¦ ¦ ¦--expr: [0/0] {92} - ¦ ¦ ¦ ¦ ¦--expr: [0/1] {94} + ¦ ¦ ¦ ¦--expr: x + 2 [0/0] {92} + ¦ ¦ ¦ ¦ ¦--expr: x [0/1] {94} ¦ ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {93} ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {95} - ¦ ¦ ¦ ¦ °--expr: [0/0] {97} + ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {97} ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {96} ¦ ¦ ¦ °--')': ) [0/0] {98} ¦ ¦ ¦--',': , [0/4] {99} - ¦ ¦ ¦--expr: [1/0] {100} - ¦ ¦ ¦ ¦--expr: [0/0] {102} + ¦ ¦ ¦--expr: c(c( + [1/0] {100} + ¦ ¦ ¦ ¦--expr: c [0/0] {102} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {101} ¦ ¦ ¦ ¦--'(': ( [0/0] {103} - ¦ ¦ ¦ ¦--expr: [0/0] {104} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {106} + ¦ ¦ ¦ ¦--expr: c( + [0/0] {104} + ¦ ¦ ¦ ¦ ¦--expr: c [0/0] {106} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {105} ¦ ¦ ¦ ¦ ¦--'(': ( [0/6] {107} - ¦ ¦ ¦ ¦ ¦--expr: [1/0] {108} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {110} + ¦ ¦ ¦ ¦ ¦--expr: 26 ^ [1/0] {108} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 26 [0/1] {110} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 26 [0/0] {109} ¦ ¦ ¦ ¦ ¦ ¦--'^': ^ [0/1] {111} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {113} + ¦ ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {113} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {112} - ¦ ¦ ¦ ¦ ¦--',': , [0/1] {114} - ¦ ¦ ¦ ¦ ¦--COMMENT: # FIX [0/6] {115} - ¦ ¦ ¦ ¦ ¦--expr: [1/0] {117} - ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 8 [0/0] {116} - ¦ ¦ ¦ ¦ ¦--',': , [0/6] {118} - ¦ ¦ ¦ ¦ ¦--expr: [1/2] {120} - ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 7 [0/0] {119} - ¦ ¦ ¦ ¦ °--')': ) [1/0] {121} - ¦ ¦ ¦ °--')': ) [0/0] {122} - ¦ ¦ °--')': ) [0/0] {123} - ¦ ¦--expr: [2/0] {124} - ¦ ¦ ¦--expr: [0/0] {126} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {125} - ¦ ¦ ¦--'(': ( [0/4] {127} - ¦ ¦ ¦--expr: [1/0] {129} - ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {128} - ¦ ¦ ¦--',': , [0/1] {130} - ¦ ¦ ¦--expr: [0/0] {132} - ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {131} - ¦ ¦ ¦--',': , [0/4] {133} - ¦ ¦ ¦--expr: [1/0] {134} - ¦ ¦ ¦ ¦--expr: [0/1] {137} - ¦ ¦ ¦ ¦ °--NUM_CONST: 23 [0/0] {136} - ¦ ¦ ¦ ¦--'+': + [0/1] {138} - ¦ ¦ ¦ ¦--expr: [0/1] {140} - ¦ ¦ ¦ ¦ °--NUM_CONST: Inf [0/0] {139} - ¦ ¦ ¦ ¦--'-': - [0/1] {141} - ¦ ¦ ¦ °--expr: [0/0] {143} - ¦ ¦ ¦ °--NUM_CONST: 99 [0/0] {142} - ¦ ¦ ¦--',': , [0/1] {144} - ¦ ¦ ¦--expr: [0/0] {145} - ¦ ¦ ¦ ¦--expr: [0/0] {147} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {146} - ¦ ¦ ¦ ¦--'(': ( [0/6] {148} - ¦ ¦ ¦ ¦--expr: [1/2] {150} - ¦ ¦ ¦ ¦ °--NUM_CONST: 16 [0/0] {149} - ¦ ¦ ¦ °--')': ) [1/0] {151} - ¦ ¦ °--')': ) [0/0] {152} - ¦ °--'}': } [1/0] {153} - °--COMMENT: # com [1/0] {154} + ¦ ¦ ¦ ¦ ¦--',': , [0/6] {114} + ¦ ¦ ¦ ¦ ¦--expr: 8 [1/0] {116} + ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 8 [0/0] {115} + ¦ ¦ ¦ ¦ ¦--',': , [0/6] {117} + ¦ ¦ ¦ ¦ ¦--expr: 7 [1/2] {119} + ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 7 [0/0] {118} + ¦ ¦ ¦ ¦ °--')': ) [1/0] {120} + ¦ ¦ ¦ °--')': ) [0/0] {121} + ¦ ¦ °--')': ) [0/0] {122} + ¦ ¦--expr: call( [2/0] {123} + ¦ ¦ ¦--expr: call [0/0] {125} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {124} + ¦ ¦ ¦--'(': ( [0/4] {126} + ¦ ¦ ¦--expr: 1 [1/0] {128} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {127} + ¦ ¦ ¦--',': , [0/1] {129} + ¦ ¦ ¦--expr: 2 [0/0] {131} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {130} + ¦ ¦ ¦--',': , [0/4] {132} + ¦ ¦ ¦--expr: 23 + [1/0] {133} + ¦ ¦ ¦ ¦--expr: 23 [0/1] {136} + ¦ ¦ ¦ ¦ °--NUM_CONST: 23 [0/0] {135} + ¦ ¦ ¦ ¦--'+': + [0/1] {137} + ¦ ¦ ¦ ¦--expr: Inf [0/1] {139} + ¦ ¦ ¦ ¦ °--NUM_CONST: Inf [0/0] {138} + ¦ ¦ ¦ ¦--'-': - [0/1] {140} + ¦ ¦ ¦ °--expr: 99 [0/0] {142} + ¦ ¦ ¦ °--NUM_CONST: 99 [0/0] {141} + ¦ ¦ ¦--',': , [0/1] {143} + ¦ ¦ ¦--expr: call( [0/0] {144} + ¦ ¦ ¦ ¦--expr: call [0/0] {146} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {145} + ¦ ¦ ¦ ¦--'(': ( [0/6] {147} + ¦ ¦ ¦ ¦--expr: 16 [1/2] {149} + ¦ ¦ ¦ ¦ °--NUM_CONST: 16 [0/0] {148} + ¦ ¦ ¦ °--')': ) [1/0] {150} + ¦ ¦ °--')': ) [0/0] {151} + ¦ °--'}': } [1/0] {152} + °--COMMENT: # com [1/0] {153} diff --git a/tests/testthat/indention_multiple/overall-out.R b/tests/testthat/indention_multiple/overall-out.R index 909933a63..4db2b90fc 100644 --- a/tests/testthat/indention_multiple/overall-out.R +++ b/tests/testthat/indention_multiple/overall-out.R @@ -17,7 +17,7 @@ a <- function(x) { c( list(x + 2), c(c( - 26 ^ 2, # FIXME ^ operator has to be surrounded by one space (or none?!), never multiple + 26^2, 8, 7 )) diff --git a/tests/testthat/indention_multiple/round_closing_on_same_line-in_tree b/tests/testthat/indention_multiple/round_closing_on_same_line-in_tree index 14fc50971..145b8bb50 100644 --- a/tests/testthat/indention_multiple/round_closing_on_same_line-in_tree +++ b/tests/testthat/indention_multiple/round_closing_on_same_line-in_tree @@ -1,20 +1,20 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/0] {3} + °--expr: c(cal [0/0] {1} + ¦--expr: c [0/0] {3} ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {2} ¦--'(': ( [0/0] {4} - ¦--expr: [0/0] {5} - ¦ ¦--expr: [0/0] {7} + ¦--expr: call( [0/0] {5} + ¦ ¦--expr: call [0/0] {7} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {6} ¦ ¦--'(': ( [0/0] {8} - ¦ ¦--expr: [0/0] {10} + ¦ ¦--expr: 2 [0/0] {10} ¦ ¦ °--NUM_CONST: 2 [0/0] {9} ¦ °--')': ) [0/0] {11} ¦--',': , [0/1] {12} - ¦--expr: [0/0] {14} + ¦--expr: 1 [0/0] {14} ¦ °--NUM_CONST: 1 [0/0] {13} ¦--',': , [0/1] {15} ¦--COMMENT: # com [0/0] {16} - ¦--expr: [1/1] {18} + ¦--expr: 29 [1/1] {18} ¦ °--NUM_CONST: 29 [0/0] {17} °--')': ) [1/0] {19} diff --git a/tests/testthat/indention_multiple/round_only-in_tree b/tests/testthat/indention_multiple/round_only-in_tree index 9f6c571db..70166c710 100644 --- a/tests/testthat/indention_multiple/round_only-in_tree +++ b/tests/testthat/indention_multiple/round_only-in_tree @@ -1,28 +1,39 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/3] {1} + ¦--expr: ((((( [0/3] {1} ¦ ¦--'(': ( [0/0] {2} - ¦ ¦--expr: [0/0] {3} + ¦ ¦--expr: (((( + [0/0] {3} ¦ ¦ ¦--'(': ( [0/0] {4} - ¦ ¦ ¦--expr: [0/0] {5} + ¦ ¦ ¦--expr: ((( +1 [0/0] {5} ¦ ¦ ¦ ¦--'(': ( [0/0] {6} - ¦ ¦ ¦ ¦--expr: [0/0] {7} + ¦ ¦ ¦ ¦--expr: (( +1 + [0/0] {7} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {8} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {9} + ¦ ¦ ¦ ¦ ¦--expr: ( +1 + [0/0] {9} ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {10} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [1/7] {12} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 1 [1/7] {12} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {11} ¦ ¦ ¦ ¦ ¦ °--')': ) [1/0] {13} ¦ ¦ ¦ ¦ °--')': ) [0/0] {14} ¦ ¦ ¦ °--')': ) [0/0] {15} ¦ ¦ °--')': ) [0/0] {16} ¦ °--')': ) [0/0] {17} - °--expr: [2/0] {18} + °--expr: ((( +2 [2/0] {18} ¦--'(': ( [0/0] {19} - ¦--expr: [0/0] {20} + ¦--expr: (( +2 + [0/0] {20} ¦ ¦--'(': ( [0/0] {21} - ¦ ¦--expr: [0/0] {22} + ¦ ¦--expr: ( +2 +) [0/0] {22} ¦ ¦ ¦--'(': ( [0/0] {23} - ¦ ¦ ¦--expr: [1/0] {25} + ¦ ¦ ¦--expr: 2 [1/0] {25} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {24} ¦ ¦ °--')': ) [1/0] {26} ¦ °--')': ) [0/0] {27} diff --git a/tests/testthat/indention_operators/base_pipe_and_assignment-in.R b/tests/testthat/indention_operators/base_pipe_and_assignment-in.R new file mode 100644 index 000000000..b78d18d72 --- /dev/null +++ b/tests/testthat/indention_operators/base_pipe_and_assignment-in.R @@ -0,0 +1,8 @@ +a <- +b() |> + q() |> + g() + +a <- b() |> + c()|> +ggg() diff --git a/tests/testthat/indention_operators/base_pipe_and_assignment-in_tree b/tests/testthat/indention_operators/base_pipe_and_assignment-in_tree new file mode 100644 index 000000000..85acfef3b --- /dev/null +++ b/tests/testthat/indention_operators/base_pipe_and_assignment-in_tree @@ -0,0 +1,43 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: a [0/0] {1} + ¦ ¦--expr: a [0/4] {3} + ¦ ¦ °--SYMBOL: a [0/0] {2} + ¦ ¦--LEFT_ASSIGN: <- [0/0] {4} + ¦ ¦--expr: b() [1/1] {7} + ¦ ¦ ¦--expr: b [0/0] {9} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {8} + ¦ ¦ ¦--'(': ( [0/0] {10} + ¦ ¦ °--')': ) [0/0] {11} + ¦ ¦--PIPE: |> [0/2] {12} + ¦ ¦--expr: q() [1/1] {13} + ¦ ¦ ¦--expr: q [0/0] {15} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: q [0/0] {14} + ¦ ¦ ¦--'(': ( [0/0] {16} + ¦ ¦ °--')': ) [0/0] {17} + ¦ ¦--PIPE: |> [0/5] {18} + ¦ °--expr: g() [1/0] {19} + ¦ ¦--expr: g [0/0] {21} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {20} + ¦ ¦--'(': ( [0/0] {22} + ¦ °--')': ) [0/0] {23} + °--expr: a <- [2/0] {24} + ¦--expr: a [0/1] {26} + ¦ °--SYMBOL: a [0/0] {25} + ¦--LEFT_ASSIGN: <- [0/4] {27} + ¦--expr: b() [0/1] {30} + ¦ ¦--expr: b [0/0] {32} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {31} + ¦ ¦--'(': ( [0/0] {33} + ¦ °--')': ) [0/0] {34} + ¦--PIPE: |> [0/2] {35} + ¦--expr: c() [1/0] {36} + ¦ ¦--expr: c [0/0] {38} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {37} + ¦ ¦--'(': ( [0/0] {39} + ¦ °--')': ) [0/0] {40} + ¦--PIPE: |> [0/0] {41} + °--expr: ggg() [1/0] {42} + ¦--expr: ggg [0/0] {44} + ¦ °--SYMBOL_FUNCTION_CALL: ggg [0/0] {43} + ¦--'(': ( [0/0] {45} + °--')': ) [0/0] {46} diff --git a/tests/testthat/indention_operators/base_pipe_and_assignment-out.R b/tests/testthat/indention_operators/base_pipe_and_assignment-out.R new file mode 100644 index 000000000..98fd88893 --- /dev/null +++ b/tests/testthat/indention_operators/base_pipe_and_assignment-out.R @@ -0,0 +1,8 @@ +a <- + b() |> + q() |> + g() + +a <- b() |> + c() |> + ggg() diff --git a/tests/testthat/indention_operators/base_pipe_and_assignment_and_comment-in.R b/tests/testthat/indention_operators/base_pipe_and_assignment_and_comment-in.R new file mode 100644 index 000000000..723de7160 --- /dev/null +++ b/tests/testthat/indention_operators/base_pipe_and_assignment_and_comment-in.R @@ -0,0 +1,25 @@ +a <-# + b() |> + c() |> + d() + +a <- # + b() |> + c() |> + d() + + +a <- + b() |> + c() |> + d() + +a <- c |> + b()|> + c( ) |> + d() + +a <- + b() |> # + c() |> + d()# d diff --git a/tests/testthat/indention_operators/base_pipe_and_assignment_and_comment-in_tree b/tests/testthat/indention_operators/base_pipe_and_assignment_and_comment-in_tree new file mode 100644 index 000000000..baae8dbbf --- /dev/null +++ b/tests/testthat/indention_operators/base_pipe_and_assignment_and_comment-in_tree @@ -0,0 +1,115 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: a <-# [0/0] {1} + ¦ ¦--expr: a [0/1] {3} + ¦ ¦ °--SYMBOL: a [0/0] {2} + ¦ ¦--LEFT_ASSIGN: <- [0/0] {4} + ¦ ¦--COMMENT: # [0/2] {5} + ¦ ¦--expr: b() [1/1] {8} + ¦ ¦ ¦--expr: b [0/0] {10} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {9} + ¦ ¦ ¦--'(': ( [0/0] {11} + ¦ ¦ °--')': ) [0/0] {12} + ¦ ¦--PIPE: |> [0/2] {13} + ¦ ¦--expr: c() [1/1] {14} + ¦ ¦ ¦--expr: c [0/0] {16} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {15} + ¦ ¦ ¦--'(': ( [0/0] {17} + ¦ ¦ °--')': ) [0/0] {18} + ¦ ¦--PIPE: |> [0/2] {19} + ¦ °--expr: d() [1/0] {20} + ¦ ¦--expr: d [0/0] {22} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: d [0/0] {21} + ¦ ¦--'(': ( [0/0] {23} + ¦ °--')': ) [0/0] {24} + ¦--expr: a <- [2/0] {25} + ¦ ¦--expr: a [0/1] {27} + ¦ ¦ °--SYMBOL: a [0/0] {26} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {28} + ¦ ¦--COMMENT: # [0/2] {29} + ¦ ¦--expr: b() [1/1] {32} + ¦ ¦ ¦--expr: b [0/0] {34} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {33} + ¦ ¦ ¦--'(': ( [0/0] {35} + ¦ ¦ °--')': ) [0/0] {36} + ¦ ¦--PIPE: |> [0/2] {37} + ¦ ¦--expr: c() [1/1] {38} + ¦ ¦ ¦--expr: c [0/0] {40} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {39} + ¦ ¦ ¦--'(': ( [0/0] {41} + ¦ ¦ °--')': ) [0/0] {42} + ¦ ¦--PIPE: |> [0/2] {43} + ¦ °--expr: d() [1/0] {44} + ¦ ¦--expr: d [0/0] {46} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: d [0/0] {45} + ¦ ¦--'(': ( [0/0] {47} + ¦ °--')': ) [0/0] {48} + ¦--expr: a <- + [3/0] {49} + ¦ ¦--expr: a [0/1] {51} + ¦ ¦ °--SYMBOL: a [0/0] {50} + ¦ ¦--LEFT_ASSIGN: <- [0/2] {52} + ¦ ¦--expr: b() [1/1] {55} + ¦ ¦ ¦--expr: b [0/0] {57} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {56} + ¦ ¦ ¦--'(': ( [0/0] {58} + ¦ ¦ °--')': ) [0/0] {59} + ¦ ¦--PIPE: |> [0/2] {60} + ¦ ¦--expr: c() [1/1] {61} + ¦ ¦ ¦--expr: c [0/0] {63} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {62} + ¦ ¦ ¦--'(': ( [0/0] {64} + ¦ ¦ °--')': ) [0/0] {65} + ¦ ¦--PIPE: |> [0/2] {66} + ¦ °--expr: d() [1/0] {67} + ¦ ¦--expr: d [0/0] {69} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: d [0/0] {68} + ¦ ¦--'(': ( [0/0] {70} + ¦ °--')': ) [0/0] {71} + ¦--expr: a <- [2/0] {72} + ¦ ¦--expr: a [0/1] {74} + ¦ ¦ °--SYMBOL: a [0/0] {73} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {75} + ¦ ¦--expr: c [0/1] {80} + ¦ ¦ °--SYMBOL: c [0/0] {79} + ¦ ¦--PIPE: |> [0/2] {81} + ¦ ¦--expr: b() [1/0] {82} + ¦ ¦ ¦--expr: b [0/0] {84} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {83} + ¦ ¦ ¦--'(': ( [0/0] {85} + ¦ ¦ °--')': ) [0/0] {86} + ¦ ¦--PIPE: |> [0/2] {87} + ¦ ¦--expr: c( ) [1/1] {88} + ¦ ¦ ¦--expr: c [0/0] {90} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {89} + ¦ ¦ ¦--'(': ( [0/1] {91} + ¦ ¦ °--')': ) [0/0] {92} + ¦ ¦--PIPE: |> [0/2] {93} + ¦ °--expr: d() [1/0] {94} + ¦ ¦--expr: d [0/0] {96} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: d [0/0] {95} + ¦ ¦--'(': ( [0/0] {97} + ¦ °--')': ) [0/0] {98} + ¦--expr: a <- + [2/0] {99} + ¦ ¦--expr: a [0/1] {101} + ¦ ¦ °--SYMBOL: a [0/0] {100} + ¦ ¦--LEFT_ASSIGN: <- [0/2] {102} + ¦ ¦--expr: b() [1/1] {105} + ¦ ¦ ¦--expr: b [0/0] {107} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {106} + ¦ ¦ ¦--'(': ( [0/0] {108} + ¦ ¦ °--')': ) [0/0] {109} + ¦ ¦--PIPE: |> [0/1] {110} + ¦ ¦--COMMENT: # [0/2] {111} + ¦ ¦--expr: c() [1/1] {112} + ¦ ¦ ¦--expr: c [0/0] {114} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {113} + ¦ ¦ ¦--'(': ( [0/0] {115} + ¦ ¦ °--')': ) [0/0] {116} + ¦ ¦--PIPE: |> [0/2] {117} + ¦ °--expr: d() [1/0] {118} + ¦ ¦--expr: d [0/0] {120} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: d [0/0] {119} + ¦ ¦--'(': ( [0/0] {121} + ¦ °--')': ) [0/0] {122} + °--COMMENT: # d [0/0] {123} diff --git a/tests/testthat/indention_operators/base_pipe_and_assignment_and_comment-out.R b/tests/testthat/indention_operators/base_pipe_and_assignment_and_comment-out.R new file mode 100644 index 000000000..4ebdd39b0 --- /dev/null +++ b/tests/testthat/indention_operators/base_pipe_and_assignment_and_comment-out.R @@ -0,0 +1,25 @@ +a <- # + b() |> + c() |> + d() + +a <- # + b() |> + c() |> + d() + + +a <- + b() |> + c() |> + d() + +a <- c |> + b() |> + c() |> + d() + +a <- + b() |> # + c() |> + d() # d diff --git a/tests/testthat/indention_operators/base_pipe_and_assignment_and_math-in.R b/tests/testthat/indention_operators/base_pipe_and_assignment_and_math-in.R new file mode 100644 index 000000000..53ad74073 --- /dev/null +++ b/tests/testthat/indention_operators/base_pipe_and_assignment_and_math-in.R @@ -0,0 +1,5 @@ +q <- a+ + - 3 + +2+ +g()|> + k() diff --git a/tests/testthat/indention_operators/base_pipe_and_assignment_and_math-in_tree b/tests/testthat/indention_operators/base_pipe_and_assignment_and_math-in_tree new file mode 100644 index 000000000..422b0c21e --- /dev/null +++ b/tests/testthat/indention_operators/base_pipe_and_assignment_and_math-in_tree @@ -0,0 +1,27 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + °--expr: q <- [0/0] {1} + ¦--expr: q [0/1] {3} + ¦ °--SYMBOL: q [0/0] {2} + ¦--LEFT_ASSIGN: <- [0/2] {4} + ¦--expr: a [0/0] {9} + ¦ °--SYMBOL: a [0/0] {8} + ¦--'+': + [0/2] {10} + ¦--expr: - 3 [1/1] {11} + ¦ ¦--'-': - [0/1] {12} + ¦ °--expr: 3 [0/0] {14} + ¦ °--NUM_CONST: 3 [0/0] {13} + ¦--'+': + [0/0] {15} + ¦--expr: 2 [1/0] {17} + ¦ °--NUM_CONST: 2 [0/0] {16} + ¦--'+': + [0/0] {18} + ¦--expr: g() [1/0] {20} + ¦ ¦--expr: g [0/0] {22} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {21} + ¦ ¦--'(': ( [0/0] {23} + ¦ °--')': ) [0/0] {24} + ¦--PIPE: |> [0/3] {25} + °--expr: k() [1/0] {26} + ¦--expr: k [0/0] {28} + ¦ °--SYMBOL_FUNCTION_CALL: k [0/0] {27} + ¦--'(': ( [0/0] {29} + °--')': ) [0/0] {30} diff --git a/tests/testthat/indention_operators/base_pipe_and_assignment_and_math-out.R b/tests/testthat/indention_operators/base_pipe_and_assignment_and_math-out.R new file mode 100644 index 000000000..c8ed58735 --- /dev/null +++ b/tests/testthat/indention_operators/base_pipe_and_assignment_and_math-out.R @@ -0,0 +1,5 @@ +q <- a + + -3 + + 2 + + g() |> + k() diff --git a/tests/testthat/indention_operators/base_pipe_simple-in.R b/tests/testthat/indention_operators/base_pipe_simple-in.R new file mode 100644 index 000000000..d52452414 --- /dev/null +++ b/tests/testthat/indention_operators/base_pipe_simple-in.R @@ -0,0 +1,18 @@ + a |> +b() |> +c() |> + d(1 + e (sin(f))) |> + g_out() + +a <- function(jon_the_pipe) {} + +x |> + + # break + call() + + +y |> + + + call() # mor diff --git a/tests/testthat/indention_operators/base_pipe_simple-in_tree b/tests/testthat/indention_operators/base_pipe_simple-in_tree new file mode 100644 index 000000000..e30c30168 --- /dev/null +++ b/tests/testthat/indention_operators/base_pipe_simple-in_tree @@ -0,0 +1,79 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: a |> + [0/0] {1} + ¦ ¦--expr: a [0/1] {6} + ¦ ¦ °--SYMBOL: a [0/0] {5} + ¦ ¦--PIPE: |> [0/0] {7} + ¦ ¦--expr: b() [1/1] {8} + ¦ ¦ ¦--expr: b [0/0] {10} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {9} + ¦ ¦ ¦--'(': ( [0/0] {11} + ¦ ¦ °--')': ) [0/0] {12} + ¦ ¦--PIPE: |> [0/0] {13} + ¦ ¦--expr: c() [1/1] {14} + ¦ ¦ ¦--expr: c [0/0] {16} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {15} + ¦ ¦ ¦--'(': ( [0/0] {17} + ¦ ¦ °--')': ) [0/0] {18} + ¦ ¦--PIPE: |> [0/10] {19} + ¦ ¦--expr: d(1 + [1/1] {20} + ¦ ¦ ¦--expr: d [0/0] {22} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: d [0/0] {21} + ¦ ¦ ¦--'(': ( [0/0] {23} + ¦ ¦ ¦--expr: 1 + e [0/0] {24} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {26} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {25} + ¦ ¦ ¦ ¦--'+': + [0/1] {27} + ¦ ¦ ¦ °--expr: e (si [0/0] {28} + ¦ ¦ ¦ ¦--expr: e [0/1] {30} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: e [0/0] {29} + ¦ ¦ ¦ ¦--'(': ( [0/0] {31} + ¦ ¦ ¦ ¦--expr: sin(f [0/0] {32} + ¦ ¦ ¦ ¦ ¦--expr: sin [0/0] {34} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: sin [0/0] {33} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {35} + ¦ ¦ ¦ ¦ ¦--expr: f [0/0] {37} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: f [0/0] {36} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {38} + ¦ ¦ ¦ °--')': ) [0/0] {39} + ¦ ¦ °--')': ) [0/0] {40} + ¦ ¦--PIPE: |> [0/33] {41} + ¦ °--expr: g_out [1/0] {42} + ¦ ¦--expr: g_out [0/0] {44} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: g_out [0/0] {43} + ¦ ¦--'(': ( [0/0] {45} + ¦ °--')': ) [0/0] {46} + ¦--expr: a <- [2/0] {47} + ¦ ¦--expr: a [0/1] {49} + ¦ ¦ °--SYMBOL: a [0/0] {48} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {50} + ¦ °--expr: funct [0/0] {51} + ¦ ¦--FUNCTION: funct [0/0] {52} + ¦ ¦--'(': ( [0/0] {53} + ¦ ¦--SYMBOL_FORMALS: jon_t [0/0] {54} + ¦ ¦--')': ) [0/1] {55} + ¦ °--expr: {} [0/0] {56} + ¦ ¦--'{': { [0/0] {57} + ¦ °--'}': } [0/0] {58} + ¦--expr: x |> + [2/0] {59} + ¦ ¦--expr: x [0/1] {61} + ¦ ¦ °--SYMBOL: x [0/0] {60} + ¦ ¦--PIPE: |> [0/2] {62} + ¦ ¦--COMMENT: # bre [2/2] {63} + ¦ °--expr: call( [1/0] {64} + ¦ ¦--expr: call [0/0] {66} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {65} + ¦ ¦--'(': ( [0/0] {67} + ¦ °--')': ) [0/0] {68} + ¦--expr: y |> + [3/1] {69} + ¦ ¦--expr: y [0/1] {71} + ¦ ¦ °--SYMBOL: y [0/0] {70} + ¦ ¦--PIPE: |> [0/2] {72} + ¦ °--expr: call( [3/0] {73} + ¦ ¦--expr: call [0/0] {75} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {74} + ¦ ¦--'(': ( [0/0] {76} + ¦ °--')': ) [0/0] {77} + °--COMMENT: # mor [0/0] {78} diff --git a/tests/testthat/indention_operators/base_pipe_simple-out.R b/tests/testthat/indention_operators/base_pipe_simple-out.R new file mode 100644 index 000000000..4257a7917 --- /dev/null +++ b/tests/testthat/indention_operators/base_pipe_simple-out.R @@ -0,0 +1,15 @@ +a |> + b() |> + c() |> + d(1 + e(sin(f))) |> + g_out() + +a <- function(jon_the_pipe) {} + +x |> + # break + call() + + +y |> + call() # mor diff --git a/tests/testthat/indention_operators/dollar_R6-in_tree b/tests/testthat/indention_operators/dollar_R6-in_tree index 54eedb3b1..31105cc5a 100644 --- a/tests/testthat/indention_operators/dollar_R6-in_tree +++ b/tests/testthat/indention_operators/dollar_R6-in_tree @@ -1,25 +1,30 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/0] {3} - ¦ ¦--expr: [0/0] {4} - ¦ ¦ ¦--expr: [0/2] {5} - ¦ ¦ ¦ ¦--expr: [0/0] {6} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {8} + °--expr: x$ + [0/0] {1} + ¦--expr: x$ + [0/0] {3} + ¦ ¦--expr: x$ + [0/0] {4} + ¦ ¦ ¦--expr: x$ + [0/2] {5} + ¦ ¦ ¦ ¦--expr: x$ + [0/0] {6} + ¦ ¦ ¦ ¦ ¦--expr: x [0/0] {8} ¦ ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {7} ¦ ¦ ¦ ¦ ¦--'$': $ [0/3] {9} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: add [1/0] {10} ¦ ¦ ¦ ¦--'(': ( [0/0] {11} - ¦ ¦ ¦ ¦--expr: [0/0] {13} + ¦ ¦ ¦ ¦--expr: 10 [0/0] {13} ¦ ¦ ¦ ¦ °--NUM_CONST: 10 [0/0] {12} ¦ ¦ ¦ °--')': ) [0/0] {14} ¦ ¦ ¦--'$': $ [0/0] {15} ¦ ¦ °--SYMBOL_FUNCTION_CALL: add [1/0] {16} ¦ ¦--'(': ( [0/0] {17} - ¦ ¦--expr: [0/0] {19} + ¦ ¦--expr: 10 [0/0] {19} ¦ ¦ °--NUM_CONST: 10 [0/0] {18} ¦ °--')': ) [0/0] {20} ¦--'$': $ [0/0] {21} ¦--SYMBOL: sum [0/1] {22} ¦--'+': + [0/0] {23} - °--expr: [1/0] {25} + °--expr: 3 [1/0] {25} °--NUM_CONST: 3 [0/0] {24} diff --git a/tests/testthat/indention_operators/eq_assign-in_tree b/tests/testthat/indention_operators/eq_assign-in_tree index 46bc3780f..c11954b1d 100644 --- a/tests/testthat/indention_operators/eq_assign-in_tree +++ b/tests/testthat/indention_operators/eq_assign-in_tree @@ -1,42 +1,47 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/0] {3} + ¦--expr: switc [0/0] {1} + ¦ ¦--expr: switc [0/0] {3} ¦ ¦ °--SYMBOL_FUNCTION_CALL: switc [0/0] {2} ¦ ¦--'(': ( [0/0] {4} - ¦ ¦--expr: [0/0] {6} + ¦ ¦--expr: engin [0/0] {6} ¦ ¦ °--SYMBOL: engin [0/0] {5} ¦ ¦--',': , [0/4] {7} ¦ ¦--SYMBOL_SUB: pdfte [1/1] {8} ¦ ¦--EQ_SUB: = [0/1] {9} - ¦ ¦--expr: [0/0] {10} + ¦ ¦--expr: { + [0/0] {10} ¦ ¦ ¦--'{': { [0/5] {11} - ¦ ¦ ¦--expr: [1/9] {12} + ¦ ¦ ¦--expr: if (a [1/9] {12} ¦ ¦ ¦ ¦--IF: if [0/1] {13} ¦ ¦ ¦ ¦--'(': ( [0/0] {14} - ¦ ¦ ¦ ¦--expr: [0/0] {16} + ¦ ¦ ¦ ¦--expr: any [0/0] {16} ¦ ¦ ¦ ¦ °--SYMBOL: any [0/0] {15} ¦ ¦ ¦ ¦--')': ) [0/1] {17} - ¦ ¦ ¦ °--expr: [0/0] {18} + ¦ ¦ ¦ °--expr: { + [0/0] {18} ¦ ¦ ¦ ¦--'{': { [0/14] {19} - ¦ ¦ ¦ ¦--expr: [1/12] {21} + ¦ ¦ ¦ ¦--expr: x [1/12] {21} ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {20} ¦ ¦ ¦ °--'}': } [1/0] {22} ¦ ¦ °--'}': } [1/0] {23} ¦ ¦--',': , [0/7] {24} ¦ ¦--SYMBOL_SUB: new [1/0] {25} ¦ ¦--EQ_SUB: = [0/0] {26} - ¦ ¦--expr: [0/3] {27} + ¦ ¦--expr: ( + [0/3] {27} ¦ ¦ ¦--'(': ( [0/6] {28} - ¦ ¦ ¦--expr: [1/7] {30} + ¦ ¦ ¦--expr: 2 [1/7] {30} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {29} ¦ ¦ °--')': ) [1/0] {31} ¦ °--')': ) [0/0] {32} - °--expr: [2/0] {33} + °--expr: { + a [2/0] {33} ¦--'{': { [0/2] {34} - ¦--expr: [1/0] {35} - ¦ ¦--expr: [0/1] {37} + ¦--expr: a <- + [1/0] {35} + ¦ ¦--expr: a [0/1] {37} ¦ ¦ °--SYMBOL: a [0/0] {36} ¦ ¦--LEFT_ASSIGN: <- [0/4] {38} - ¦ °--expr: [1/0] {40} + ¦ °--expr: 3 [1/0] {40} ¦ °--NUM_CONST: 3 [0/0] {39} °--'}': } [1/0] {41} diff --git a/tests/testthat/indention_operators/eq_formal_simple-in.R b/tests/testthat/indention_operators/eq_formal_simple-in.R new file mode 100644 index 000000000..5c799d1cc --- /dev/null +++ b/tests/testthat/indention_operators/eq_formal_simple-in.R @@ -0,0 +1,8 @@ +abbbb <- function(x = + 22 + ) { + data_frame( + x = + long_long_long * x + ) +} diff --git a/tests/testthat/indention_operators/eq_formal_simple-in_tree b/tests/testthat/indention_operators/eq_formal_simple-in_tree new file mode 100644 index 000000000..ce4a97830 --- /dev/null +++ b/tests/testthat/indention_operators/eq_formal_simple-in_tree @@ -0,0 +1,30 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + °--expr: abbbb [0/0] {1} + ¦--expr: abbbb [0/1] {3} + ¦ °--SYMBOL: abbbb [0/0] {2} + ¦--LEFT_ASSIGN: <- [0/1] {4} + °--expr: funct [0/0] {5} + ¦--FUNCTION: funct [0/0] {6} + ¦--'(': ( [0/0] {7} + ¦--SYMBOL_FORMALS: x [0/1] {8} + ¦--EQ_FORMALS: = [0/18] {9} + ¦--expr: 22 [1/18] {11} + ¦ °--NUM_CONST: 22 [0/0] {10} + ¦--')': ) [1/1] {12} + °--expr: { + d [0/0] {13} + ¦--'{': { [0/2] {14} + ¦--expr: data_ [1/0] {15} + ¦ ¦--expr: data_ [0/0] {17} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {16} + ¦ ¦--'(': ( [0/4] {18} + ¦ ¦--SYMBOL_SUB: x [1/1] {19} + ¦ ¦--EQ_SUB: = [0/6] {20} + ¦ ¦--expr: long_ [1/2] {21} + ¦ ¦ ¦--expr: long_ [0/1] {23} + ¦ ¦ ¦ °--SYMBOL: long_ [0/0] {22} + ¦ ¦ ¦--'*': * [0/1] {24} + ¦ ¦ °--expr: x [0/0] {26} + ¦ ¦ °--SYMBOL: x [0/0] {25} + ¦ °--')': ) [1/0] {27} + °--'}': } [1/0] {28} diff --git a/tests/testthat/indention_operators/eq_formal_simple-out.R b/tests/testthat/indention_operators/eq_formal_simple-out.R new file mode 100644 index 000000000..01554a116 --- /dev/null +++ b/tests/testthat/indention_operators/eq_formal_simple-out.R @@ -0,0 +1,7 @@ +abbbb <- function(x = + 22) { + data_frame( + x = + long_long_long * x + ) +} diff --git a/tests/testthat/indention_operators/eq_formals_complex_indention-in.R b/tests/testthat/indention_operators/eq_formals_complex_indention-in.R new file mode 100644 index 000000000..d05f6b0b6 --- /dev/null +++ b/tests/testthat/indention_operators/eq_formals_complex_indention-in.R @@ -0,0 +1,60 @@ +function(a = +33, + b + ) {} + +function(a = + 33, + b ) {} + +function(a , + b, +c + ) {} + +function(a, + b, +c) {} + +function(ss, + a = +3, + er = + 4 + ) {} + +function(a = + b, + f = + d, c = + 3, d = + 4) { + +} + + +# classical +function(a = + 33, + b +) {} + +function(a = + 33, + b ) {} + +function(a , + b, +c +) {} + +function(a, + b, + c) {} + +function(ss, + a = + 3, + er = + 4 +) {} diff --git a/tests/testthat/indention_operators/eq_formals_complex_indention-in_tree b/tests/testthat/indention_operators/eq_formals_complex_indention-in_tree new file mode 100644 index 000000000..210e47a12 --- /dev/null +++ b/tests/testthat/indention_operators/eq_formals_complex_indention-in_tree @@ -0,0 +1,166 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: funct [0/0] {1} + ¦ ¦--FUNCTION: funct [0/0] {2} + ¦ ¦--'(': ( [0/0] {3} + ¦ ¦--SYMBOL_FORMALS: a [0/1] {4} + ¦ ¦--EQ_FORMALS: = [0/0] {5} + ¦ ¦--expr: 33 [1/0] {7} + ¦ ¦ °--NUM_CONST: 33 [0/0] {6} + ¦ ¦--',': , [0/2] {8} + ¦ ¦--SYMBOL_FORMALS: b [1/2] {9} + ¦ ¦--')': ) [1/1] {10} + ¦ °--expr: {} [0/0] {11} + ¦ ¦--'{': { [0/0] {12} + ¦ °--'}': } [0/0] {13} + ¦--expr: funct [2/0] {14} + ¦ ¦--FUNCTION: funct [0/0] {15} + ¦ ¦--'(': ( [0/0] {16} + ¦ ¦--SYMBOL_FORMALS: a [0/1] {17} + ¦ ¦--EQ_FORMALS: = [0/4] {18} + ¦ ¦--expr: 33 [1/0] {20} + ¦ ¦ °--NUM_CONST: 33 [0/0] {19} + ¦ ¦--',': , [0/2] {21} + ¦ ¦--SYMBOL_FORMALS: b [1/2] {22} + ¦ ¦--')': ) [0/1] {23} + ¦ °--expr: {} [0/0] {24} + ¦ ¦--'{': { [0/0] {25} + ¦ °--'}': } [0/0] {26} + ¦--expr: funct [2/0] {27} + ¦ ¦--FUNCTION: funct [0/0] {28} + ¦ ¦--'(': ( [0/0] {29} + ¦ ¦--SYMBOL_FORMALS: a [0/1] {30} + ¦ ¦--',': , [0/4] {31} + ¦ ¦--SYMBOL_FORMALS: b [1/0] {32} + ¦ ¦--',': , [0/0] {33} + ¦ ¦--SYMBOL_FORMALS: c [1/2] {34} + ¦ ¦--')': ) [1/1] {35} + ¦ °--expr: {} [0/0] {36} + ¦ ¦--'{': { [0/0] {37} + ¦ °--'}': } [0/0] {38} + ¦--expr: funct [2/0] {39} + ¦ ¦--FUNCTION: funct [0/0] {40} + ¦ ¦--'(': ( [0/0] {41} + ¦ ¦--SYMBOL_FORMALS: a [0/0] {42} + ¦ ¦--',': , [0/2] {43} + ¦ ¦--SYMBOL_FORMALS: b [1/0] {44} + ¦ ¦--',': , [0/0] {45} + ¦ ¦--SYMBOL_FORMALS: c [1/0] {46} + ¦ ¦--')': ) [0/1] {47} + ¦ °--expr: {} [0/0] {48} + ¦ ¦--'{': { [0/0] {49} + ¦ °--'}': } [0/0] {50} + ¦--expr: funct [2/0] {51} + ¦ ¦--FUNCTION: funct [0/0] {52} + ¦ ¦--'(': ( [0/0] {53} + ¦ ¦--SYMBOL_FORMALS: ss [0/0] {54} + ¦ ¦--',': , [0/3] {55} + ¦ ¦--SYMBOL_FORMALS: a [1/1] {56} + ¦ ¦--EQ_FORMALS: = [0/0] {57} + ¦ ¦--expr: 3 [1/0] {59} + ¦ ¦ °--NUM_CONST: 3 [0/0] {58} + ¦ ¦--',': , [0/3] {60} + ¦ ¦--SYMBOL_FORMALS: er [1/1] {61} + ¦ ¦--EQ_FORMALS: = [0/2] {62} + ¦ ¦--expr: 4 [1/1] {64} + ¦ ¦ °--NUM_CONST: 4 [0/0] {63} + ¦ ¦--')': ) [1/1] {65} + ¦ °--expr: {} [0/0] {66} + ¦ ¦--'{': { [0/0] {67} + ¦ °--'}': } [0/0] {68} + ¦--expr: funct [2/0] {69} + ¦ ¦--FUNCTION: funct [0/0] {70} + ¦ ¦--'(': ( [0/0] {71} + ¦ ¦--SYMBOL_FORMALS: a [0/1] {72} + ¦ ¦--EQ_FORMALS: = [0/11] {73} + ¦ ¦--expr: b [1/0] {75} + ¦ ¦ °--SYMBOL: b [0/0] {74} + ¦ ¦--',': , [0/9] {76} + ¦ ¦--SYMBOL_FORMALS: f [1/1] {77} + ¦ ¦--EQ_FORMALS: = [0/11] {78} + ¦ ¦--expr: d [1/0] {80} + ¦ ¦ °--SYMBOL: d [0/0] {79} + ¦ ¦--',': , [0/1] {81} + ¦ ¦--SYMBOL_FORMALS: c [0/1] {82} + ¦ ¦--EQ_FORMALS: = [0/11] {83} + ¦ ¦--expr: 3 [1/0] {85} + ¦ ¦ °--NUM_CONST: 3 [0/0] {84} + ¦ ¦--',': , [0/1] {86} + ¦ ¦--SYMBOL_FORMALS: d [0/1] {87} + ¦ ¦--EQ_FORMALS: = [0/11] {88} + ¦ ¦--expr: 4 [1/0] {90} + ¦ ¦ °--NUM_CONST: 4 [0/0] {89} + ¦ ¦--')': ) [0/1] {91} + ¦ °--expr: { + +} [0/0] {92} + ¦ ¦--'{': { [0/0] {93} + ¦ °--'}': } [2/0] {94} + ¦--COMMENT: # cla [3/0] {95} + ¦--expr: funct [1/0] {96} + ¦ ¦--FUNCTION: funct [0/0] {97} + ¦ ¦--'(': ( [0/0] {98} + ¦ ¦--SYMBOL_FORMALS: a [0/1] {99} + ¦ ¦--EQ_FORMALS: = [0/13] {100} + ¦ ¦--expr: 33 [1/0] {102} + ¦ ¦ °--NUM_CONST: 33 [0/0] {101} + ¦ ¦--',': , [0/9] {103} + ¦ ¦--SYMBOL_FORMALS: b [1/0] {104} + ¦ ¦--')': ) [1/1] {105} + ¦ °--expr: {} [0/0] {106} + ¦ ¦--'{': { [0/0] {107} + ¦ °--'}': } [0/0] {108} + ¦--expr: funct [2/0] {109} + ¦ ¦--FUNCTION: funct [0/0] {110} + ¦ ¦--'(': ( [0/0] {111} + ¦ ¦--SYMBOL_FORMALS: a [0/1] {112} + ¦ ¦--EQ_FORMALS: = [0/11] {113} + ¦ ¦--expr: 33 [1/0] {115} + ¦ ¦ °--NUM_CONST: 33 [0/0] {114} + ¦ ¦--',': , [0/12] {116} + ¦ ¦--SYMBOL_FORMALS: b [1/2] {117} + ¦ ¦--')': ) [0/1] {118} + ¦ °--expr: {} [0/0] {119} + ¦ ¦--'{': { [0/0] {120} + ¦ °--'}': } [0/0] {121} + ¦--expr: funct [2/0] {122} + ¦ ¦--FUNCTION: funct [0/0] {123} + ¦ ¦--'(': ( [0/0] {124} + ¦ ¦--SYMBOL_FORMALS: a [0/1] {125} + ¦ ¦--',': , [0/9] {126} + ¦ ¦--SYMBOL_FORMALS: b [1/0] {127} + ¦ ¦--',': , [0/0] {128} + ¦ ¦--SYMBOL_FORMALS: c [1/0] {129} + ¦ ¦--')': ) [1/1] {130} + ¦ °--expr: {} [0/0] {131} + ¦ ¦--'{': { [0/0] {132} + ¦ °--'}': } [0/0] {133} + ¦--expr: funct [2/0] {134} + ¦ ¦--FUNCTION: funct [0/0] {135} + ¦ ¦--'(': ( [0/0] {136} + ¦ ¦--SYMBOL_FORMALS: a [0/0] {137} + ¦ ¦--',': , [0/12] {138} + ¦ ¦--SYMBOL_FORMALS: b [1/0] {139} + ¦ ¦--',': , [0/9] {140} + ¦ ¦--SYMBOL_FORMALS: c [1/0] {141} + ¦ ¦--')': ) [0/1] {142} + ¦ °--expr: {} [0/0] {143} + ¦ ¦--'{': { [0/0] {144} + ¦ °--'}': } [0/0] {145} + °--expr: funct [2/0] {146} + ¦--FUNCTION: funct [0/0] {147} + ¦--'(': ( [0/0] {148} + ¦--SYMBOL_FORMALS: ss [0/0] {149} + ¦--',': , [0/11] {150} + ¦--SYMBOL_FORMALS: a [1/1] {151} + ¦--EQ_FORMALS: = [0/13] {152} + ¦--expr: 3 [1/0] {154} + ¦ °--NUM_CONST: 3 [0/0] {153} + ¦--',': , [0/9] {155} + ¦--SYMBOL_FORMALS: er [1/1] {156} + ¦--EQ_FORMALS: = [0/11] {157} + ¦--expr: 4 [1/0] {159} + ¦ °--NUM_CONST: 4 [0/0] {158} + ¦--')': ) [1/1] {160} + °--expr: {} [0/0] {161} + ¦--'{': { [0/0] {162} + °--'}': } [0/0] {163} diff --git a/tests/testthat/indention_operators/eq_formals_complex_indention-out.R b/tests/testthat/indention_operators/eq_formals_complex_indention-out.R new file mode 100644 index 000000000..305fe6e1c --- /dev/null +++ b/tests/testthat/indention_operators/eq_formals_complex_indention-out.R @@ -0,0 +1,60 @@ +function(a = + 33, + b + ) {} + +function(a = + 33, + b) {} + +function(a, + b, + c + ) {} + +function(a, + b, + c) {} + +function(ss, + a = + 3, + er = + 4 + ) {} + +function(a = + b, + f = + d, c = + 3, d = + 4) { + +} + + +# classical +function(a = + 33, + b +) {} + +function(a = + 33, + b) {} + +function(a, + b, + c +) {} + +function(a, + b, + c) {} + +function(ss, + a = + 3, + er = + 4 +) {} diff --git a/tests/testthat/indention_operators/eq_formals_complex_tokens-in.R b/tests/testthat/indention_operators/eq_formals_complex_tokens-in.R new file mode 100644 index 000000000..0710288de --- /dev/null +++ b/tests/testthat/indention_operators/eq_formals_complex_tokens-in.R @@ -0,0 +1,33 @@ +function(a = +33, + b + ) {} + +function(a = + 33, + b ) {} + +function(a , + b, +c + ) {} + +function(a, + b, +c) {} + +function(ss, + a = +3, + er = + 4 + ) {} + +function(a = + b, + f = + d, c = + 3, d = + 4) { + +} diff --git a/tests/testthat/indention_operators/eq_formals_complex_tokens-in_tree b/tests/testthat/indention_operators/eq_formals_complex_tokens-in_tree new file mode 100644 index 000000000..dde5f33df --- /dev/null +++ b/tests/testthat/indention_operators/eq_formals_complex_tokens-in_tree @@ -0,0 +1,97 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: funct [0/0] {1} + ¦ ¦--FUNCTION: funct [0/0] {2} + ¦ ¦--'(': ( [0/0] {3} + ¦ ¦--SYMBOL_FORMALS: a [0/1] {4} + ¦ ¦--EQ_FORMALS: = [0/0] {5} + ¦ ¦--expr: 33 [1/0] {7} + ¦ ¦ °--NUM_CONST: 33 [0/0] {6} + ¦ ¦--',': , [0/2] {8} + ¦ ¦--SYMBOL_FORMALS: b [1/2] {9} + ¦ ¦--')': ) [1/1] {10} + ¦ °--expr: {} [0/0] {11} + ¦ ¦--'{': { [0/0] {12} + ¦ °--'}': } [0/0] {13} + ¦--expr: funct [2/0] {14} + ¦ ¦--FUNCTION: funct [0/0] {15} + ¦ ¦--'(': ( [0/0] {16} + ¦ ¦--SYMBOL_FORMALS: a [0/1] {17} + ¦ ¦--EQ_FORMALS: = [0/4] {18} + ¦ ¦--expr: 33 [1/0] {20} + ¦ ¦ °--NUM_CONST: 33 [0/0] {19} + ¦ ¦--',': , [0/2] {21} + ¦ ¦--SYMBOL_FORMALS: b [1/2] {22} + ¦ ¦--')': ) [0/1] {23} + ¦ °--expr: {} [0/0] {24} + ¦ ¦--'{': { [0/0] {25} + ¦ °--'}': } [0/0] {26} + ¦--expr: funct [2/0] {27} + ¦ ¦--FUNCTION: funct [0/0] {28} + ¦ ¦--'(': ( [0/0] {29} + ¦ ¦--SYMBOL_FORMALS: a [0/1] {30} + ¦ ¦--',': , [0/4] {31} + ¦ ¦--SYMBOL_FORMALS: b [1/0] {32} + ¦ ¦--',': , [0/0] {33} + ¦ ¦--SYMBOL_FORMALS: c [1/2] {34} + ¦ ¦--')': ) [1/1] {35} + ¦ °--expr: {} [0/0] {36} + ¦ ¦--'{': { [0/0] {37} + ¦ °--'}': } [0/0] {38} + ¦--expr: funct [2/0] {39} + ¦ ¦--FUNCTION: funct [0/0] {40} + ¦ ¦--'(': ( [0/0] {41} + ¦ ¦--SYMBOL_FORMALS: a [0/0] {42} + ¦ ¦--',': , [0/2] {43} + ¦ ¦--SYMBOL_FORMALS: b [1/0] {44} + ¦ ¦--',': , [0/0] {45} + ¦ ¦--SYMBOL_FORMALS: c [1/0] {46} + ¦ ¦--')': ) [0/1] {47} + ¦ °--expr: {} [0/0] {48} + ¦ ¦--'{': { [0/0] {49} + ¦ °--'}': } [0/0] {50} + ¦--expr: funct [2/0] {51} + ¦ ¦--FUNCTION: funct [0/0] {52} + ¦ ¦--'(': ( [0/0] {53} + ¦ ¦--SYMBOL_FORMALS: ss [0/0] {54} + ¦ ¦--',': , [0/3] {55} + ¦ ¦--SYMBOL_FORMALS: a [1/1] {56} + ¦ ¦--EQ_FORMALS: = [0/0] {57} + ¦ ¦--expr: 3 [1/0] {59} + ¦ ¦ °--NUM_CONST: 3 [0/0] {58} + ¦ ¦--',': , [0/3] {60} + ¦ ¦--SYMBOL_FORMALS: er [1/1] {61} + ¦ ¦--EQ_FORMALS: = [0/2] {62} + ¦ ¦--expr: 4 [1/1] {64} + ¦ ¦ °--NUM_CONST: 4 [0/0] {63} + ¦ ¦--')': ) [1/1] {65} + ¦ °--expr: {} [0/0] {66} + ¦ ¦--'{': { [0/0] {67} + ¦ °--'}': } [0/0] {68} + °--expr: funct [2/0] {69} + ¦--FUNCTION: funct [0/0] {70} + ¦--'(': ( [0/0] {71} + ¦--SYMBOL_FORMALS: a [0/1] {72} + ¦--EQ_FORMALS: = [0/11] {73} + ¦--expr: b [1/0] {75} + ¦ °--SYMBOL: b [0/0] {74} + ¦--',': , [0/9] {76} + ¦--SYMBOL_FORMALS: f [1/1] {77} + ¦--EQ_FORMALS: = [0/11] {78} + ¦--expr: d [1/0] {80} + ¦ °--SYMBOL: d [0/0] {79} + ¦--',': , [0/1] {81} + ¦--SYMBOL_FORMALS: c [0/1] {82} + ¦--EQ_FORMALS: = [0/11] {83} + ¦--expr: 3 [1/0] {85} + ¦ °--NUM_CONST: 3 [0/0] {84} + ¦--',': , [0/1] {86} + ¦--SYMBOL_FORMALS: d [0/1] {87} + ¦--EQ_FORMALS: = [0/11] {88} + ¦--expr: 4 [1/0] {90} + ¦ °--NUM_CONST: 4 [0/0] {89} + ¦--')': ) [0/1] {91} + °--expr: { + +} [0/0] {92} + ¦--'{': { [0/0] {93} + °--'}': } [2/0] {94} diff --git a/tests/testthat/indention_operators/eq_formals_complex_tokens-out.R b/tests/testthat/indention_operators/eq_formals_complex_tokens-out.R new file mode 100644 index 000000000..612a4beed --- /dev/null +++ b/tests/testthat/indention_operators/eq_formals_complex_tokens-out.R @@ -0,0 +1,35 @@ +function( + a = + 33, + b) {} + +function( + a = + 33, + b) {} + +function( + a, + b, + c) {} + +function( + a, + b, + c) {} + +function( + ss, + a = + 3, + er = + 4) {} + +function(a = + b, + f = + d, c = + 3, d = + 4) { + +} diff --git a/tests/testthat/indention_operators/eq_sub_complex_indention-in.R b/tests/testthat/indention_operators/eq_sub_complex_indention-in.R new file mode 100644 index 000000000..b2e301665 --- /dev/null +++ b/tests/testthat/indention_operators/eq_sub_complex_indention-in.R @@ -0,0 +1,32 @@ +call(a = + 5, + b) + +call(a = + 5, + b + ) + +# multiple nested levels +{ + v <- function(x = + 122, + y) { + } +} + + +{ + v <- function(x = 122, + y) { + } +} + +MyClass <- R6::R6Class( + "MyClass", + public = list(initialize = function(my_arg, + my_named_arg = 1) { + return(invisible()) + } + ), +) diff --git a/tests/testthat/indention_operators/eq_sub_complex_indention-in_tree b/tests/testthat/indention_operators/eq_sub_complex_indention-in_tree new file mode 100644 index 000000000..51e45b4fa --- /dev/null +++ b/tests/testthat/indention_operators/eq_sub_complex_indention-in_tree @@ -0,0 +1,118 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: call( [0/0] {1} + ¦ ¦--expr: call [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--SYMBOL_SUB: a [0/1] {5} + ¦ ¦--EQ_SUB: = [0/7] {6} + ¦ ¦--expr: 5 [1/0] {8} + ¦ ¦ °--NUM_CONST: 5 [0/0] {7} + ¦ ¦--',': , [0/5] {9} + ¦ ¦--expr: b [1/0] {11} + ¦ ¦ °--SYMBOL: b [0/0] {10} + ¦ °--')': ) [0/0] {12} + ¦--expr: call( [2/0] {13} + ¦ ¦--expr: call [0/0] {15} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {14} + ¦ ¦--'(': ( [0/0] {16} + ¦ ¦--SYMBOL_SUB: a [0/1] {17} + ¦ ¦--EQ_SUB: = [0/7] {18} + ¦ ¦--expr: 5 [1/0] {20} + ¦ ¦ °--NUM_CONST: 5 [0/0] {19} + ¦ ¦--',': , [0/5] {21} + ¦ ¦--expr: b [1/5] {23} + ¦ ¦ °--SYMBOL: b [0/0] {22} + ¦ °--')': ) [1/0] {24} + ¦--COMMENT: # mul [2/0] {25} + ¦--expr: { + v [1/0] {26} + ¦ ¦--'{': { [0/2] {27} + ¦ ¦--expr: v <- [1/0] {28} + ¦ ¦ ¦--expr: v [0/1] {30} + ¦ ¦ ¦ °--SYMBOL: v [0/0] {29} + ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {31} + ¦ ¦ °--expr: funct [0/0] {32} + ¦ ¦ ¦--FUNCTION: funct [0/0] {33} + ¦ ¦ ¦--'(': ( [0/0] {34} + ¦ ¦ ¦--SYMBOL_FORMALS: x [0/1] {35} + ¦ ¦ ¦--EQ_FORMALS: = [0/2] {36} + ¦ ¦ ¦--expr: 122 [1/0] {38} + ¦ ¦ ¦ °--NUM_CONST: 122 [0/0] {37} + ¦ ¦ ¦--',': , [0/2] {39} + ¦ ¦ ¦--SYMBOL_FORMALS: y [1/0] {40} + ¦ ¦ ¦--')': ) [0/1] {41} + ¦ ¦ °--expr: { + [0/0] {42} + ¦ ¦ ¦--'{': { [0/7] {43} + ¦ ¦ °--'}': } [1/0] {44} + ¦ °--'}': } [1/0] {45} + ¦--expr: { + [3/0] {46} + ¦ ¦--'{': { [0/8] {47} + ¦ ¦--expr: v <- [1/0] {48} + ¦ ¦ ¦--expr: v [0/1] {50} + ¦ ¦ ¦ °--SYMBOL: v [0/0] {49} + ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {51} + ¦ ¦ °--expr: funct [0/0] {52} + ¦ ¦ ¦--FUNCTION: funct [0/0] {53} + ¦ ¦ ¦--'(': ( [0/0] {54} + ¦ ¦ ¦--SYMBOL_FORMALS: x [0/1] {55} + ¦ ¦ ¦--EQ_FORMALS: = [0/1] {56} + ¦ ¦ ¦--expr: 122 [0/0] {58} + ¦ ¦ ¦ °--NUM_CONST: 122 [0/0] {57} + ¦ ¦ ¦--',': , [0/22] {59} + ¦ ¦ ¦--SYMBOL_FORMALS: y [1/0] {60} + ¦ ¦ ¦--')': ) [0/1] {61} + ¦ ¦ °--expr: { + [0/0] {62} + ¦ ¦ ¦--'{': { [0/8] {63} + ¦ ¦ °--'}': } [1/0] {64} + ¦ °--'}': } [1/0] {65} + °--expr: MyCla [2/0] {66} + ¦--expr: MyCla [0/1] {68} + ¦ °--SYMBOL: MyCla [0/0] {67} + ¦--LEFT_ASSIGN: <- [0/1] {69} + °--expr: R6::R [0/0] {70} + ¦--expr: R6::R [0/0] {71} + ¦ ¦--SYMBOL_PACKAGE: R6 [0/0] {72} + ¦ ¦--NS_GET: :: [0/0] {73} + ¦ °--SYMBOL_FUNCTION_CALL: R6Cla [0/0] {74} + ¦--'(': ( [0/8] {75} + ¦--expr: "MyCl [1/0] {77} + ¦ °--STR_CONST: "MyCl [0/0] {76} + ¦--',': , [0/8] {78} + ¦--SYMBOL_SUB: publi [1/1] {79} + ¦--EQ_SUB: = [0/1] {80} + ¦--expr: list( [0/0] {81} + ¦ ¦--expr: list [0/0] {83} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: list [0/0] {82} + ¦ ¦--'(': ( [0/0] {84} + ¦ ¦--SYMBOL_SUB: initi [0/1] {85} + ¦ ¦--EQ_SUB: = [0/1] {86} + ¦ ¦--expr: funct [0/8] {87} + ¦ ¦ ¦--FUNCTION: funct [0/0] {88} + ¦ ¦ ¦--'(': ( [0/0] {89} + ¦ ¦ ¦--SYMBOL_FORMALS: my_ar [0/0] {90} + ¦ ¦ ¦--',': , [0/44] {91} + ¦ ¦ ¦--SYMBOL_FORMALS: my_na [1/1] {92} + ¦ ¦ ¦--EQ_FORMALS: = [0/1] {93} + ¦ ¦ ¦--expr: 1 [0/0] {95} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {94} + ¦ ¦ ¦--')': ) [0/1] {96} + ¦ ¦ °--expr: { + [0/0] {97} + ¦ ¦ ¦--'{': { [0/16] {98} + ¦ ¦ ¦--expr: retur [1/8] {99} + ¦ ¦ ¦ ¦--expr: retur [0/0] {101} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: retur [0/0] {100} + ¦ ¦ ¦ ¦--'(': ( [0/0] {102} + ¦ ¦ ¦ ¦--expr: invis [0/0] {103} + ¦ ¦ ¦ ¦ ¦--expr: invis [0/0] {105} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: invis [0/0] {104} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {106} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {107} + ¦ ¦ ¦ °--')': ) [0/0] {108} + ¦ ¦ °--'}': } [1/0] {109} + ¦ °--')': ) [1/0] {110} + ¦--',': , [0/0] {111} + °--')': ) [1/0] {112} diff --git a/tests/testthat/indention_operators/eq_sub_complex_indention-out.R b/tests/testthat/indention_operators/eq_sub_complex_indention-out.R new file mode 100644 index 000000000..02b322973 --- /dev/null +++ b/tests/testthat/indention_operators/eq_sub_complex_indention-out.R @@ -0,0 +1,32 @@ +call(a = + 5, +b) + +call(a = + 5, +b +) + +# multiple nested levels +{ + v <- function(x = + 122, + y) { + } +} + + +{ + v <- function(x = 122, + y) { + } +} + +MyClass <- R6::R6Class( + "MyClass", + public = list(initialize = function(my_arg, + my_named_arg = 1) { + return(invisible()) + } + ), +) diff --git a/tests/testthat/indention_operators/eq_sub_complex_tokens-in.R b/tests/testthat/indention_operators/eq_sub_complex_tokens-in.R new file mode 100644 index 000000000..739c340c8 --- /dev/null +++ b/tests/testthat/indention_operators/eq_sub_complex_tokens-in.R @@ -0,0 +1,15 @@ +call(a = + 5, + b) + +call(a = + 5, + b + ) + +c( + a = + 1, + b = # comment here + 2 +) diff --git a/tests/testthat/indention_operators/eq_sub_complex_tokens-in_tree b/tests/testthat/indention_operators/eq_sub_complex_tokens-in_tree new file mode 100644 index 000000000..f9605b5f9 --- /dev/null +++ b/tests/testthat/indention_operators/eq_sub_complex_tokens-in_tree @@ -0,0 +1,41 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: call( [0/0] {1} + ¦ ¦--expr: call [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--SYMBOL_SUB: a [0/1] {5} + ¦ ¦--EQ_SUB: = [0/7] {6} + ¦ ¦--expr: 5 [1/0] {8} + ¦ ¦ °--NUM_CONST: 5 [0/0] {7} + ¦ ¦--',': , [0/5] {9} + ¦ ¦--expr: b [1/0] {11} + ¦ ¦ °--SYMBOL: b [0/0] {10} + ¦ °--')': ) [0/0] {12} + ¦--expr: call( [2/0] {13} + ¦ ¦--expr: call [0/0] {15} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {14} + ¦ ¦--'(': ( [0/0] {16} + ¦ ¦--SYMBOL_SUB: a [0/1] {17} + ¦ ¦--EQ_SUB: = [0/7] {18} + ¦ ¦--expr: 5 [1/0] {20} + ¦ ¦ °--NUM_CONST: 5 [0/0] {19} + ¦ ¦--',': , [0/5] {21} + ¦ ¦--expr: b [1/5] {23} + ¦ ¦ °--SYMBOL: b [0/0] {22} + ¦ °--')': ) [1/0] {24} + °--expr: c( + [2/0] {25} + ¦--expr: c [0/0] {27} + ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {26} + ¦--'(': ( [0/8] {28} + ¦--SYMBOL_SUB: a [1/1] {29} + ¦--EQ_SUB: = [0/16] {30} + ¦--expr: 1 [1/0] {32} + ¦ °--NUM_CONST: 1 [0/0] {31} + ¦--',': , [0/8] {33} + ¦--SYMBOL_SUB: b [1/1] {34} + ¦--EQ_SUB: = [0/1] {35} + ¦--COMMENT: # com [0/16] {36} + ¦--expr: 2 [1/0] {38} + ¦ °--NUM_CONST: 2 [0/0] {37} + °--')': ) [1/0] {39} diff --git a/tests/testthat/indention_operators/eq_sub_complex_tokens-out.R b/tests/testthat/indention_operators/eq_sub_complex_tokens-out.R new file mode 100644 index 000000000..717a8a610 --- /dev/null +++ b/tests/testthat/indention_operators/eq_sub_complex_tokens-out.R @@ -0,0 +1,18 @@ +call( + a = + 5, + b +) + +call( + a = + 5, + b +) + +c( + a = + 1, + b = # comment here + 2 +) diff --git a/tests/testthat/indention_operators/function-multiline-no-braces-in_tree b/tests/testthat/indention_operators/function-multiline-no-braces-in_tree new file mode 100644 index 000000000..767984697 --- /dev/null +++ b/tests/testthat/indention_operators/function-multiline-no-braces-in_tree @@ -0,0 +1,102 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: [0/0] {1} + ¦ ¦--expr: [0/1] {3} + ¦ ¦ °--SYMBOL: g [0/0] {2} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {4} + ¦ °--expr: [0/0] {5} + ¦ ¦--FUNCTION: funct [0/0] {6} + ¦ ¦--'(': ( [0/0] {7} + ¦ ¦--SYMBOL_FORMALS: k [0/0] {8} + ¦ ¦--')': ) [0/2] {9} + ¦ °--expr: [1/0] {11} + ¦ °--NULL_CONST: NULL [0/0] {10} + ¦--expr: [3/0] {12} + ¦ ¦--expr: [0/1] {14} + ¦ ¦ °--SYMBOL: g [0/0] {13} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {15} + ¦ °--expr: [0/0] {16} + ¦ ¦--FUNCTION: funct [0/0] {17} + ¦ ¦--'(': ( [0/0] {18} + ¦ ¦--SYMBOL_FORMALS: k [0/0] {19} + ¦ ¦--')': ) [0/1] {20} + ¦ °--expr: [0/0] {21} + ¦ ¦--expr: [0/0] {23} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {22} + ¦ ¦--'(': ( [0/2] {24} + ¦ ¦--expr: [1/0] {26} + ¦ ¦ °--NULL_CONST: NULL [0/0] {25} + ¦ °--')': ) [1/0] {27} + ¦--expr: [3/0] {28} + ¦ ¦--expr: [0/1] {30} + ¦ ¦ °--SYMBOL: g [0/0] {29} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {31} + ¦ °--expr: [0/0] {32} + ¦ ¦--FUNCTION: funct [0/0] {33} + ¦ ¦--'(': ( [0/0] {34} + ¦ ¦--SYMBOL_FORMALS: k [0/0] {35} + ¦ ¦--')': ) [0/1] {36} + ¦ °--expr: [0/0] {37} + ¦ ¦--expr: [0/0] {39} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {38} + ¦ ¦--'(': ( [0/1] {40} + ¦ ¦--COMMENT: # y [0/2] {41} + ¦ ¦--expr: [1/1] {43} + ¦ ¦ °--NULL_CONST: NULL [0/0] {42} + ¦ ¦--COMMENT: # x [0/0] {44} + ¦ °--')': ) [1/0] {45} + ¦--expr: [2/0] {46} + ¦ ¦--expr: [0/1] {48} + ¦ ¦ °--SYMBOL: g [0/0] {47} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {49} + ¦ °--expr: [0/0] {50} + ¦ ¦--FUNCTION: funct [0/0] {51} + ¦ ¦--'(': ( [0/0] {52} + ¦ ¦--SYMBOL_FORMALS: k [0/0] {53} + ¦ ¦--')': ) [0/1] {54} + ¦ °--expr: [0/0] {55} + ¦ ¦--expr: [0/0] {57} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {56} + ¦ ¦--'(': ( [0/1] {58} + ¦ ¦--COMMENT: # y [0/2] {59} + ¦ ¦--expr: [1/0] {61} + ¦ ¦ °--NULL_CONST: NULL [0/0] {60} + ¦ °--')': ) [1/0] {62} + ¦--expr: [3/0] {63} + ¦ ¦--expr: [0/1] {65} + ¦ ¦ °--SYMBOL: g [0/0] {64} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {66} + ¦ °--expr: [0/0] {67} + ¦ ¦--FUNCTION: funct [0/0] {68} + ¦ ¦--'(': ( [0/0] {69} + ¦ ¦--SYMBOL_FORMALS: k [0/0] {70} + ¦ ¦--')': ) [0/1] {71} + ¦ °--expr: [0/0] {72} + ¦ ¦--expr: [0/0] {74} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {73} + ¦ ¦--'(': ( [0/2] {75} + ¦ ¦--expr: [1/1] {77} + ¦ ¦ °--NULL_CONST: NULL [0/0] {76} + ¦ ¦--COMMENT: # 3jk [0/0] {78} + ¦ °--')': ) [1/0] {79} + °--expr: [2/0] {80} + ¦--expr: [0/1] {82} + ¦ °--SYMBOL: g [0/0] {81} + ¦--LEFT_ASSIGN: <- [0/1] {83} + °--expr: [0/0] {84} + ¦--FUNCTION: funct [0/0] {85} + ¦--'(': ( [0/0] {86} + ¦--SYMBOL_FORMALS: k [0/0] {87} + ¦--')': ) [0/1] {88} + °--expr: [0/0] {89} + ¦--expr: [0/0] {91} + ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {90} + ¦--'(': ( [0/2] {92} + ¦--expr: [1/0] {93} + ¦ ¦--IF: if [0/1] {94} + ¦ ¦--'(': ( [0/0] {95} + ¦ ¦--expr: [0/0] {97} + ¦ ¦ °--NUM_CONST: TRUE [0/0] {96} + ¦ ¦--')': ) [0/4] {98} + ¦ °--expr: [1/0] {100} + ¦ °--SYMBOL: x [0/0] {99} + °--')': ) [1/0] {101} diff --git a/tests/testthat/indention_operators/function-multiline-no-braces-non-strict-in.R b/tests/testthat/indention_operators/function-multiline-no-braces-non-strict-in.R new file mode 100644 index 000000000..79d9acb95 --- /dev/null +++ b/tests/testthat/indention_operators/function-multiline-no-braces-non-strict-in.R @@ -0,0 +1,26 @@ +g <- function(k) + NULL + + +g <- function(k) h( + NULL +) + + +g <- function(k) h( # y + NULL # x +) + +g <- function(k) h( # y + NULL +) + + +g <- function(k) h( + NULL # 3jkö +) + +g <- function(k) h( + if (TRUE) + x +) diff --git a/tests/testthat/indention_operators/function-multiline-no-braces-non-strict-in_tree b/tests/testthat/indention_operators/function-multiline-no-braces-non-strict-in_tree new file mode 100644 index 000000000..aa5df2234 --- /dev/null +++ b/tests/testthat/indention_operators/function-multiline-no-braces-non-strict-in_tree @@ -0,0 +1,105 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: g <- [0/0] {1} + ¦ ¦--expr: g [0/1] {3} + ¦ ¦ °--SYMBOL: g [0/0] {2} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {4} + ¦ °--expr: funct [0/0] {5} + ¦ ¦--FUNCTION: funct [0/0] {6} + ¦ ¦--'(': ( [0/0] {7} + ¦ ¦--SYMBOL_FORMALS: k [0/0] {8} + ¦ ¦--')': ) [0/2] {9} + ¦ °--expr: NULL [1/0] {11} + ¦ °--NULL_CONST: NULL [0/0] {10} + ¦--expr: g <- [3/0] {12} + ¦ ¦--expr: g [0/1] {14} + ¦ ¦ °--SYMBOL: g [0/0] {13} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {15} + ¦ °--expr: funct [0/0] {16} + ¦ ¦--FUNCTION: funct [0/0] {17} + ¦ ¦--'(': ( [0/0] {18} + ¦ ¦--SYMBOL_FORMALS: k [0/0] {19} + ¦ ¦--')': ) [0/1] {20} + ¦ °--expr: h( + [0/0] {21} + ¦ ¦--expr: h [0/0] {23} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {22} + ¦ ¦--'(': ( [0/2] {24} + ¦ ¦--expr: NULL [1/0] {26} + ¦ ¦ °--NULL_CONST: NULL [0/0] {25} + ¦ °--')': ) [1/0] {27} + ¦--expr: g <- [3/0] {28} + ¦ ¦--expr: g [0/1] {30} + ¦ ¦ °--SYMBOL: g [0/0] {29} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {31} + ¦ °--expr: funct [0/0] {32} + ¦ ¦--FUNCTION: funct [0/0] {33} + ¦ ¦--'(': ( [0/0] {34} + ¦ ¦--SYMBOL_FORMALS: k [0/0] {35} + ¦ ¦--')': ) [0/1] {36} + ¦ °--expr: h( # [0/0] {37} + ¦ ¦--expr: h [0/0] {39} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {38} + ¦ ¦--'(': ( [0/1] {40} + ¦ ¦--COMMENT: # y [0/2] {41} + ¦ ¦--expr: NULL [1/1] {43} + ¦ ¦ °--NULL_CONST: NULL [0/0] {42} + ¦ ¦--COMMENT: # x [0/0] {44} + ¦ °--')': ) [1/0] {45} + ¦--expr: g <- [2/0] {46} + ¦ ¦--expr: g [0/1] {48} + ¦ ¦ °--SYMBOL: g [0/0] {47} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {49} + ¦ °--expr: funct [0/0] {50} + ¦ ¦--FUNCTION: funct [0/0] {51} + ¦ ¦--'(': ( [0/0] {52} + ¦ ¦--SYMBOL_FORMALS: k [0/0] {53} + ¦ ¦--')': ) [0/1] {54} + ¦ °--expr: h( # [0/0] {55} + ¦ ¦--expr: h [0/0] {57} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {56} + ¦ ¦--'(': ( [0/1] {58} + ¦ ¦--COMMENT: # y [0/2] {59} + ¦ ¦--expr: NULL [1/0] {61} + ¦ ¦ °--NULL_CONST: NULL [0/0] {60} + ¦ °--')': ) [1/0] {62} + ¦--expr: g <- [3/0] {63} + ¦ ¦--expr: g [0/1] {65} + ¦ ¦ °--SYMBOL: g [0/0] {64} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {66} + ¦ °--expr: funct [0/0] {67} + ¦ ¦--FUNCTION: funct [0/0] {68} + ¦ ¦--'(': ( [0/0] {69} + ¦ ¦--SYMBOL_FORMALS: k [0/0] {70} + ¦ ¦--')': ) [0/1] {71} + ¦ °--expr: h( + [0/0] {72} + ¦ ¦--expr: h [0/0] {74} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {73} + ¦ ¦--'(': ( [0/2] {75} + ¦ ¦--expr: NULL [1/1] {77} + ¦ ¦ °--NULL_CONST: NULL [0/0] {76} + ¦ ¦--COMMENT: # 3jk [0/0] {78} + ¦ °--')': ) [1/0] {79} + °--expr: g <- [2/0] {80} + ¦--expr: g [0/1] {82} + ¦ °--SYMBOL: g [0/0] {81} + ¦--LEFT_ASSIGN: <- [0/1] {83} + °--expr: funct [0/0] {84} + ¦--FUNCTION: funct [0/0] {85} + ¦--'(': ( [0/0] {86} + ¦--SYMBOL_FORMALS: k [0/0] {87} + ¦--')': ) [0/1] {88} + °--expr: h( + [0/0] {89} + ¦--expr: h [0/0] {91} + ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {90} + ¦--'(': ( [0/2] {92} + ¦--expr: if (T [1/0] {93} + ¦ ¦--IF: if [0/1] {94} + ¦ ¦--'(': ( [0/0] {95} + ¦ ¦--expr: TRUE [0/0] {97} + ¦ ¦ °--NUM_CONST: TRUE [0/0] {96} + ¦ ¦--')': ) [0/4] {98} + ¦ °--expr: x [1/0] {100} + ¦ °--SYMBOL: x [0/0] {99} + °--')': ) [1/0] {101} diff --git a/tests/testthat/indention_operators/function-multiline-no-braces-non-strict-out.R b/tests/testthat/indention_operators/function-multiline-no-braces-non-strict-out.R new file mode 100644 index 000000000..79d9acb95 --- /dev/null +++ b/tests/testthat/indention_operators/function-multiline-no-braces-non-strict-out.R @@ -0,0 +1,26 @@ +g <- function(k) + NULL + + +g <- function(k) h( + NULL +) + + +g <- function(k) h( # y + NULL # x +) + +g <- function(k) h( # y + NULL +) + + +g <- function(k) h( + NULL # 3jkö +) + +g <- function(k) h( + if (TRUE) + x +) diff --git a/tests/testthat/indention_operators/function-multiline-no-braces-strict-in.R b/tests/testthat/indention_operators/function-multiline-no-braces-strict-in.R new file mode 100644 index 000000000..79d9acb95 --- /dev/null +++ b/tests/testthat/indention_operators/function-multiline-no-braces-strict-in.R @@ -0,0 +1,26 @@ +g <- function(k) + NULL + + +g <- function(k) h( + NULL +) + + +g <- function(k) h( # y + NULL # x +) + +g <- function(k) h( # y + NULL +) + + +g <- function(k) h( + NULL # 3jkö +) + +g <- function(k) h( + if (TRUE) + x +) diff --git a/tests/testthat/indention_operators/function-multiline-no-braces-strict-in_tree b/tests/testthat/indention_operators/function-multiline-no-braces-strict-in_tree new file mode 100644 index 000000000..aa5df2234 --- /dev/null +++ b/tests/testthat/indention_operators/function-multiline-no-braces-strict-in_tree @@ -0,0 +1,105 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: g <- [0/0] {1} + ¦ ¦--expr: g [0/1] {3} + ¦ ¦ °--SYMBOL: g [0/0] {2} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {4} + ¦ °--expr: funct [0/0] {5} + ¦ ¦--FUNCTION: funct [0/0] {6} + ¦ ¦--'(': ( [0/0] {7} + ¦ ¦--SYMBOL_FORMALS: k [0/0] {8} + ¦ ¦--')': ) [0/2] {9} + ¦ °--expr: NULL [1/0] {11} + ¦ °--NULL_CONST: NULL [0/0] {10} + ¦--expr: g <- [3/0] {12} + ¦ ¦--expr: g [0/1] {14} + ¦ ¦ °--SYMBOL: g [0/0] {13} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {15} + ¦ °--expr: funct [0/0] {16} + ¦ ¦--FUNCTION: funct [0/0] {17} + ¦ ¦--'(': ( [0/0] {18} + ¦ ¦--SYMBOL_FORMALS: k [0/0] {19} + ¦ ¦--')': ) [0/1] {20} + ¦ °--expr: h( + [0/0] {21} + ¦ ¦--expr: h [0/0] {23} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {22} + ¦ ¦--'(': ( [0/2] {24} + ¦ ¦--expr: NULL [1/0] {26} + ¦ ¦ °--NULL_CONST: NULL [0/0] {25} + ¦ °--')': ) [1/0] {27} + ¦--expr: g <- [3/0] {28} + ¦ ¦--expr: g [0/1] {30} + ¦ ¦ °--SYMBOL: g [0/0] {29} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {31} + ¦ °--expr: funct [0/0] {32} + ¦ ¦--FUNCTION: funct [0/0] {33} + ¦ ¦--'(': ( [0/0] {34} + ¦ ¦--SYMBOL_FORMALS: k [0/0] {35} + ¦ ¦--')': ) [0/1] {36} + ¦ °--expr: h( # [0/0] {37} + ¦ ¦--expr: h [0/0] {39} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {38} + ¦ ¦--'(': ( [0/1] {40} + ¦ ¦--COMMENT: # y [0/2] {41} + ¦ ¦--expr: NULL [1/1] {43} + ¦ ¦ °--NULL_CONST: NULL [0/0] {42} + ¦ ¦--COMMENT: # x [0/0] {44} + ¦ °--')': ) [1/0] {45} + ¦--expr: g <- [2/0] {46} + ¦ ¦--expr: g [0/1] {48} + ¦ ¦ °--SYMBOL: g [0/0] {47} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {49} + ¦ °--expr: funct [0/0] {50} + ¦ ¦--FUNCTION: funct [0/0] {51} + ¦ ¦--'(': ( [0/0] {52} + ¦ ¦--SYMBOL_FORMALS: k [0/0] {53} + ¦ ¦--')': ) [0/1] {54} + ¦ °--expr: h( # [0/0] {55} + ¦ ¦--expr: h [0/0] {57} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {56} + ¦ ¦--'(': ( [0/1] {58} + ¦ ¦--COMMENT: # y [0/2] {59} + ¦ ¦--expr: NULL [1/0] {61} + ¦ ¦ °--NULL_CONST: NULL [0/0] {60} + ¦ °--')': ) [1/0] {62} + ¦--expr: g <- [3/0] {63} + ¦ ¦--expr: g [0/1] {65} + ¦ ¦ °--SYMBOL: g [0/0] {64} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {66} + ¦ °--expr: funct [0/0] {67} + ¦ ¦--FUNCTION: funct [0/0] {68} + ¦ ¦--'(': ( [0/0] {69} + ¦ ¦--SYMBOL_FORMALS: k [0/0] {70} + ¦ ¦--')': ) [0/1] {71} + ¦ °--expr: h( + [0/0] {72} + ¦ ¦--expr: h [0/0] {74} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {73} + ¦ ¦--'(': ( [0/2] {75} + ¦ ¦--expr: NULL [1/1] {77} + ¦ ¦ °--NULL_CONST: NULL [0/0] {76} + ¦ ¦--COMMENT: # 3jk [0/0] {78} + ¦ °--')': ) [1/0] {79} + °--expr: g <- [2/0] {80} + ¦--expr: g [0/1] {82} + ¦ °--SYMBOL: g [0/0] {81} + ¦--LEFT_ASSIGN: <- [0/1] {83} + °--expr: funct [0/0] {84} + ¦--FUNCTION: funct [0/0] {85} + ¦--'(': ( [0/0] {86} + ¦--SYMBOL_FORMALS: k [0/0] {87} + ¦--')': ) [0/1] {88} + °--expr: h( + [0/0] {89} + ¦--expr: h [0/0] {91} + ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {90} + ¦--'(': ( [0/2] {92} + ¦--expr: if (T [1/0] {93} + ¦ ¦--IF: if [0/1] {94} + ¦ ¦--'(': ( [0/0] {95} + ¦ ¦--expr: TRUE [0/0] {97} + ¦ ¦ °--NUM_CONST: TRUE [0/0] {96} + ¦ ¦--')': ) [0/4] {98} + ¦ °--expr: x [1/0] {100} + ¦ °--SYMBOL: x [0/0] {99} + °--')': ) [1/0] {101} diff --git a/tests/testthat/indention_operators/function-multiline-no-braces-strict-out.R b/tests/testthat/indention_operators/function-multiline-no-braces-strict-out.R new file mode 100644 index 000000000..d4ed77f59 --- /dev/null +++ b/tests/testthat/indention_operators/function-multiline-no-braces-strict-out.R @@ -0,0 +1,38 @@ +g <- function(k) { + NULL +} + + +g <- function(k) { + h( + NULL + ) +} + + +g <- function(k) { + h( # y + NULL # x + ) +} + +g <- function(k) { + h( # y + NULL + ) +} + + +g <- function(k) { + h( + NULL # 3jkö + ) +} + +g <- function(k) { + h( + if (TRUE) { + x + } + ) +} diff --git a/tests/testthat/indention_operators/if-else-no-braces-not-strict-in.R b/tests/testthat/indention_operators/if-else-no-braces-not-strict-in.R new file mode 100644 index 000000000..acddf4348 --- /dev/null +++ b/tests/testthat/indention_operators/if-else-no-braces-not-strict-in.R @@ -0,0 +1,17 @@ +if (TRUE) c( + 2 +) else c( + 1 +) + +if (TRUE) c( + 2 +) else c( # nothing + 1 +) + +if (TRUE) c( + 2 # also nothing +) else c( + 1 +) diff --git a/tests/testthat/indention_operators/if-else-no-braces-not-strict-in_tree b/tests/testthat/indention_operators/if-else-no-braces-not-strict-in_tree new file mode 100644 index 000000000..450857878 --- /dev/null +++ b/tests/testthat/indention_operators/if-else-no-braces-not-strict-in_tree @@ -0,0 +1,71 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: if (T [0/0] {1} + ¦ ¦--IF: if [0/1] {2} + ¦ ¦--'(': ( [0/0] {3} + ¦ ¦--expr: TRUE [0/0] {5} + ¦ ¦ °--NUM_CONST: TRUE [0/0] {4} + ¦ ¦--')': ) [0/1] {6} + ¦ ¦--expr: c( + [0/1] {7} + ¦ ¦ ¦--expr: c [0/0] {9} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {8} + ¦ ¦ ¦--'(': ( [0/2] {10} + ¦ ¦ ¦--expr: 2 [1/0] {12} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {11} + ¦ ¦ °--')': ) [1/0] {13} + ¦ ¦--ELSE: else [0/1] {14} + ¦ °--expr: c( + [0/0] {15} + ¦ ¦--expr: c [0/0] {17} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {16} + ¦ ¦--'(': ( [0/2] {18} + ¦ ¦--expr: 1 [1/0] {20} + ¦ ¦ °--NUM_CONST: 1 [0/0] {19} + ¦ °--')': ) [1/0] {21} + ¦--expr: if (T [2/0] {22} + ¦ ¦--IF: if [0/1] {23} + ¦ ¦--'(': ( [0/0] {24} + ¦ ¦--expr: TRUE [0/0] {26} + ¦ ¦ °--NUM_CONST: TRUE [0/0] {25} + ¦ ¦--')': ) [0/1] {27} + ¦ ¦--expr: c( + [0/1] {28} + ¦ ¦ ¦--expr: c [0/0] {30} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {29} + ¦ ¦ ¦--'(': ( [0/2] {31} + ¦ ¦ ¦--expr: 2 [1/0] {33} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {32} + ¦ ¦ °--')': ) [1/0] {34} + ¦ ¦--ELSE: else [0/1] {35} + ¦ °--expr: c( # [0/0] {36} + ¦ ¦--expr: c [0/0] {38} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {37} + ¦ ¦--'(': ( [0/1] {39} + ¦ ¦--COMMENT: # not [0/2] {40} + ¦ ¦--expr: 1 [1/0] {42} + ¦ ¦ °--NUM_CONST: 1 [0/0] {41} + ¦ °--')': ) [1/0] {43} + °--expr: if (T [2/0] {44} + ¦--IF: if [0/1] {45} + ¦--'(': ( [0/0] {46} + ¦--expr: TRUE [0/0] {48} + ¦ °--NUM_CONST: TRUE [0/0] {47} + ¦--')': ) [0/1] {49} + ¦--expr: c( + [0/1] {50} + ¦ ¦--expr: c [0/0] {52} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {51} + ¦ ¦--'(': ( [0/2] {53} + ¦ ¦--expr: 2 [1/1] {55} + ¦ ¦ °--NUM_CONST: 2 [0/0] {54} + ¦ ¦--COMMENT: # als [0/0] {56} + ¦ °--')': ) [1/0] {57} + ¦--ELSE: else [0/1] {58} + °--expr: c( + [0/0] {59} + ¦--expr: c [0/0] {61} + ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {60} + ¦--'(': ( [0/2] {62} + ¦--expr: 1 [1/0] {64} + ¦ °--NUM_CONST: 1 [0/0] {63} + °--')': ) [1/0] {65} diff --git a/tests/testthat/indention_operators/if-else-no-braces-not-strict-out.R b/tests/testthat/indention_operators/if-else-no-braces-not-strict-out.R new file mode 100644 index 000000000..acddf4348 --- /dev/null +++ b/tests/testthat/indention_operators/if-else-no-braces-not-strict-out.R @@ -0,0 +1,17 @@ +if (TRUE) c( + 2 +) else c( + 1 +) + +if (TRUE) c( + 2 +) else c( # nothing + 1 +) + +if (TRUE) c( + 2 # also nothing +) else c( + 1 +) diff --git a/tests/testthat/indention_operators/logical_special_eq_sub-in_tree b/tests/testthat/indention_operators/logical_special_eq_sub-in_tree index 14aff20bb..79e0333ae 100644 --- a/tests/testthat/indention_operators/logical_special_eq_sub-in_tree +++ b/tests/testthat/indention_operators/logical_special_eq_sub-in_tree @@ -1,43 +1,47 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/2] {1} - ¦ ¦--expr: [0/1] {3} + ¦--expr: a || + [0/2] {1} + ¦ ¦--expr: a [0/1] {3} ¦ ¦ °--SYMBOL: a [0/0] {2} ¦ ¦--OR2: || [0/0] {4} - ¦ °--expr: [1/0] {6} + ¦ °--expr: b [1/0] {6} ¦ °--SYMBOL: b [0/0] {5} - ¦--expr: [2/0] {7} - ¦ ¦--expr: [0/1] {9} + ¦--expr: a > +4 [2/0] {7} + ¦ ¦--expr: a [0/1] {9} ¦ ¦ °--SYMBOL: a [0/0] {8} ¦ ¦--GT: > [0/0] {10} - ¦ °--expr: [1/0] {12} + ¦ °--expr: 4 [1/0] {12} ¦ °--NUM_CONST: 4 [0/0] {11} - ¦--expr: [2/0] {13} - ¦ ¦--expr: [0/0] {15} + ¦--expr: a& +3 [2/0] {13} + ¦ ¦--expr: a [0/0] {15} ¦ ¦ °--SYMBOL: a [0/0] {14} ¦ ¦--AND: & [0/0] {16} - ¦ °--expr: [1/0] {18} + ¦ °--expr: 3 [1/0] {18} ¦ °--NUM_CONST: 3 [0/0] {17} - ¦--expr: [2/0] {19} - ¦ ¦--expr: [0/1] {21} + ¦--expr: b %in [2/0] {19} + ¦ ¦--expr: b [0/1] {21} ¦ ¦ °--SYMBOL: b [0/0] {20} ¦ ¦--SPECIAL-IN: %in% [0/1] {22} - ¦ °--expr: [1/0] {24} + ¦ °--expr: c [1/0] {24} ¦ °--SYMBOL: c [0/0] {23} - ¦--expr: [2/0] {25} - ¦ ¦--expr: [0/0] {27} + ¦--expr: data_ [2/0] {25} + ¦ ¦--expr: data_ [0/0] {27} ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {26} ¦ ¦--'(': ( [0/0] {28} ¦ ¦--SYMBOL_SUB: a [1/5] {29} ¦ ¦--EQ_SUB: = [0/6] {30} - ¦ ¦--expr: [1/1] {31} - ¦ ¦ ¦--expr: [0/0] {33} + ¦ ¦--expr: list( [1/1] {31} + ¦ ¦ ¦--expr: list [0/0] {33} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: list [0/0] {32} ¦ ¦ ¦--'(': ( [0/0] {34} ¦ ¦ °--')': ) [0/0] {35} ¦ °--')': ) [1/0] {36} - °--expr: [1/0] {36.9} - ¦--expr: [0/1] {38} - ¦ °--SYMBOL: b [0/0] {37} - ¦--EQ_ASSIGN: = [0/0] {39} - °--expr: [1/0] {41} - °--NUM_CONST: 3 [0/0] {40} + °--expr_or_assign_or_help: b = +3 [1/0] {37} + ¦--expr: b [0/1] {39} + ¦ °--SYMBOL: b [0/0] {38} + ¦--EQ_ASSIGN: = [0/0] {40} + °--expr: 3 [1/0] {42} + °--NUM_CONST: 3 [0/0] {41} diff --git a/tests/testthat/indention_operators/multiply_divide-in_tree b/tests/testthat/indention_operators/multiply_divide-in_tree index c148d6da2..22a0e338c 100644 --- a/tests/testthat/indention_operators/multiply_divide-in_tree +++ b/tests/testthat/indention_operators/multiply_divide-in_tree @@ -1,31 +1,33 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/3] {1} - ¦ ¦--expr: [0/1] {4} + ¦--expr: 1 / +2 [0/3] {1} + ¦ ¦--expr: 1 [0/1] {4} ¦ ¦ °--NUM_CONST: 1 [0/0] {3} ¦ ¦--'/': / [0/0] {5} - ¦ ¦--expr: [1/1] {7} + ¦ ¦--expr: 2 [1/1] {7} ¦ ¦ °--NUM_CONST: 2 [0/0] {6} ¦ ¦--'+': + [0/1] {8} - ¦ °--expr: [0/0] {9} - ¦ ¦--expr: [0/1] {14} + ¦ °--expr: 3 * +1 [0/0] {9} + ¦ ¦--expr: 3 [0/1] {14} ¦ ¦ °--NUM_CONST: 3 [0/0] {13} ¦ ¦--'*': * [0/0] {15} - ¦ ¦--expr: [1/1] {17} + ¦ ¦--expr: 17 [1/1] {17} ¦ ¦ °--NUM_CONST: 17 [0/0] {16} ¦ ¦--'*': * [0/0] {18} - ¦ ¦--expr: [1/1] {20} + ¦ ¦--expr: 22222 [1/1] {20} ¦ ¦ °--NUM_CONST: 22222 [0/0] {19} ¦ ¦--'/': / [0/6] {21} - ¦ ¦--expr: [1/1] {23} + ¦ ¦--expr: 19 [1/1] {23} ¦ ¦ °--NUM_CONST: 19 [0/0] {22} ¦ ¦--'*': * [0/6] {24} - ¦ °--expr: [1/0] {25} + ¦ °--expr: -1 [1/0] {25} ¦ ¦--'-': - [0/0] {26} - ¦ °--expr: [0/0] {28} + ¦ °--expr: 1 [0/0] {28} ¦ °--NUM_CONST: 1 [0/0] {27} - °--expr: [2/0] {29} - ¦--expr: [0/1] {31} + °--expr: 3 * 2 [2/0] {29} + ¦--expr: 3 [0/1] {31} ¦ °--NUM_CONST: 3 [0/0] {30} ¦--'*': * [0/1] {32} - °--expr: [0/0] {34} + °--expr: 22 [0/0] {34} °--NUM_CONST: 22 [0/0] {33} diff --git a/tests/testthat/indention_operators/nested-for-spacing-scope-indention-in.R b/tests/testthat/indention_operators/nested-for-spacing-scope-indention-in.R new file mode 100644 index 000000000..24b939fd5 --- /dev/null +++ b/tests/testthat/indention_operators/nested-for-spacing-scope-indention-in.R @@ -0,0 +1,11 @@ +for (x in 1){ +x +for (x in k ) +3 +} + +for (x in 1) { + x + for (x in k ) + 3 +} diff --git a/tests/testthat/indention_operators/nested-for-spacing-scope-indention-in_tree b/tests/testthat/indention_operators/nested-for-spacing-scope-indention-in_tree new file mode 100644 index 000000000..a6579de76 --- /dev/null +++ b/tests/testthat/indention_operators/nested-for-spacing-scope-indention-in_tree @@ -0,0 +1,54 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: for ( [0/0] {1} + ¦ ¦--FOR: for [0/1] {2} + ¦ ¦--forcond: (x in [0/0] {3} + ¦ ¦ ¦--'(': ( [0/0] {4} + ¦ ¦ ¦--SYMBOL: x [0/1] {5} + ¦ ¦ ¦--IN: in [0/1] {6} + ¦ ¦ ¦--expr: 1 [0/0] {8} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {7} + ¦ ¦ °--')': ) [0/0] {9} + ¦ °--expr: { +x +f [0/0] {10} + ¦ ¦--'{': { [0/0] {11} + ¦ ¦--expr: x [1/0] {13} + ¦ ¦ °--SYMBOL: x [0/0] {12} + ¦ ¦--expr: for ( [1/0] {14} + ¦ ¦ ¦--FOR: for [0/1] {15} + ¦ ¦ ¦--forcond: (x in [0/0] {16} + ¦ ¦ ¦ ¦--'(': ( [0/0] {17} + ¦ ¦ ¦ ¦--SYMBOL: x [0/1] {18} + ¦ ¦ ¦ ¦--IN: in [0/1] {19} + ¦ ¦ ¦ ¦--expr: k [0/1] {21} + ¦ ¦ ¦ ¦ °--SYMBOL: k [0/0] {20} + ¦ ¦ ¦ °--')': ) [0/0] {22} + ¦ ¦ °--expr: 3 [1/0] {24} + ¦ ¦ °--NUM_CONST: 3 [0/0] {23} + ¦ °--'}': } [1/0] {25} + °--expr: for ( [2/0] {26} + ¦--FOR: for [0/1] {27} + ¦--forcond: (x in [0/1] {28} + ¦ ¦--'(': ( [0/0] {29} + ¦ ¦--SYMBOL: x [0/1] {30} + ¦ ¦--IN: in [0/1] {31} + ¦ ¦--expr: 1 [0/0] {33} + ¦ ¦ °--NUM_CONST: 1 [0/0] {32} + ¦ °--')': ) [0/0] {34} + °--expr: { + x [0/0] {35} + ¦--'{': { [0/2] {36} + ¦--expr: x [1/2] {38} + ¦ °--SYMBOL: x [0/0] {37} + ¦--expr: for ( [1/0] {39} + ¦ ¦--FOR: for [0/1] {40} + ¦ ¦--forcond: (x in [0/2] {41} + ¦ ¦ ¦--'(': ( [0/0] {42} + ¦ ¦ ¦--SYMBOL: x [0/1] {43} + ¦ ¦ ¦--IN: in [0/1] {44} + ¦ ¦ ¦--expr: k [0/1] {46} + ¦ ¦ ¦ °--SYMBOL: k [0/0] {45} + ¦ ¦ °--')': ) [0/0] {47} + ¦ °--expr: 3 [1/0] {49} + ¦ °--NUM_CONST: 3 [0/0] {48} + °--'}': } [1/0] {50} diff --git a/tests/testthat/indention_operators/nested-for-spacing-scope-indention-out.R b/tests/testthat/indention_operators/nested-for-spacing-scope-indention-out.R new file mode 100644 index 000000000..585942ace --- /dev/null +++ b/tests/testthat/indention_operators/nested-for-spacing-scope-indention-out.R @@ -0,0 +1,11 @@ +for (x in 1) { + x + for (x in k) + 3 +} + +for (x in 1) { + x + for (x in k) + 3 +} diff --git a/tests/testthat/indention_operators/nested-for-spacing-scope-spaces-in.R b/tests/testthat/indention_operators/nested-for-spacing-scope-spaces-in.R new file mode 100644 index 000000000..8e33e7295 --- /dev/null +++ b/tests/testthat/indention_operators/nested-for-spacing-scope-spaces-in.R @@ -0,0 +1,11 @@ +for (x in 1) { + x + for (x in k ) + 3 +} + +for (x in 1) { + x + for (x in k ) + 3 +} diff --git a/tests/testthat/indention_operators/nested-for-spacing-scope-spaces-in_tree b/tests/testthat/indention_operators/nested-for-spacing-scope-spaces-in_tree new file mode 100644 index 000000000..2f0f75e20 --- /dev/null +++ b/tests/testthat/indention_operators/nested-for-spacing-scope-spaces-in_tree @@ -0,0 +1,53 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: for ( [0/0] {1} + ¦ ¦--FOR: for [0/1] {2} + ¦ ¦--forcond: (x in [0/1] {3} + ¦ ¦ ¦--'(': ( [0/0] {4} + ¦ ¦ ¦--SYMBOL: x [0/1] {5} + ¦ ¦ ¦--IN: in [0/1] {6} + ¦ ¦ ¦--expr: 1 [0/0] {8} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {7} + ¦ ¦ °--')': ) [0/0] {9} + ¦ °--expr: { + x [0/0] {10} + ¦ ¦--'{': { [0/2] {11} + ¦ ¦--expr: x [1/2] {13} + ¦ ¦ °--SYMBOL: x [0/0] {12} + ¦ ¦--expr: for ( [1/0] {14} + ¦ ¦ ¦--FOR: for [0/1] {15} + ¦ ¦ ¦--forcond: (x in [0/4] {16} + ¦ ¦ ¦ ¦--'(': ( [0/0] {17} + ¦ ¦ ¦ ¦--SYMBOL: x [0/1] {18} + ¦ ¦ ¦ ¦--IN: in [0/1] {19} + ¦ ¦ ¦ ¦--expr: k [0/1] {21} + ¦ ¦ ¦ ¦ °--SYMBOL: k [0/0] {20} + ¦ ¦ ¦ °--')': ) [0/0] {22} + ¦ ¦ °--expr: 3 [1/0] {24} + ¦ ¦ °--NUM_CONST: 3 [0/0] {23} + ¦ °--'}': } [1/0] {25} + °--expr: for ( [2/0] {26} + ¦--FOR: for [0/1] {27} + ¦--forcond: (x in [0/1] {28} + ¦ ¦--'(': ( [0/0] {29} + ¦ ¦--SYMBOL: x [0/1] {30} + ¦ ¦--IN: in [0/1] {31} + ¦ ¦--expr: 1 [0/0] {33} + ¦ ¦ °--NUM_CONST: 1 [0/0] {32} + ¦ °--')': ) [0/0] {34} + °--expr: { + x [0/0] {35} + ¦--'{': { [0/2] {36} + ¦--expr: x [1/2] {38} + ¦ °--SYMBOL: x [0/0] {37} + ¦--expr: for ( [1/0] {39} + ¦ ¦--FOR: for [0/1] {40} + ¦ ¦--forcond: (x in [0/2] {41} + ¦ ¦ ¦--'(': ( [0/0] {42} + ¦ ¦ ¦--SYMBOL: x [0/1] {43} + ¦ ¦ ¦--IN: in [0/1] {44} + ¦ ¦ ¦--expr: k [0/1] {46} + ¦ ¦ ¦ °--SYMBOL: k [0/0] {45} + ¦ ¦ °--')': ) [0/0] {47} + ¦ °--expr: 3 [1/0] {49} + ¦ °--NUM_CONST: 3 [0/0] {48} + °--'}': } [1/0] {50} diff --git a/tests/testthat/indention_operators/nested-for-spacing-scope-spaces-out.R b/tests/testthat/indention_operators/nested-for-spacing-scope-spaces-out.R new file mode 100644 index 000000000..4016945fe --- /dev/null +++ b/tests/testthat/indention_operators/nested-for-spacing-scope-spaces-out.R @@ -0,0 +1,11 @@ +for (x in 1) { + x + for (x in k) + 3 +} + +for (x in 1) { + x + for (x in k) + 3 +} diff --git a/tests/testthat/indention_operators/not_first_trigger-in_tree b/tests/testthat/indention_operators/not_first_trigger-in_tree index af98853e3..a25d4081f 100644 --- a/tests/testthat/indention_operators/not_first_trigger-in_tree +++ b/tests/testthat/indention_operators/not_first_trigger-in_tree @@ -1,65 +1,70 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/0] {3} + ¦--expr: 1+ ( + [0/0] {1} + ¦ ¦--expr: 1 [0/0] {3} ¦ ¦ °--NUM_CONST: 1 [0/0] {2} ¦ ¦--'+': + [0/1] {4} - ¦ ¦--expr: [0/1] {6} + ¦ ¦--expr: ( + 3 [0/1] {6} ¦ ¦ ¦--'(': ( [0/2] {7} - ¦ ¦ ¦--expr: [1/0] {9} + ¦ ¦ ¦--expr: 3 [1/0] {9} ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {8} ¦ ¦ °--')': ) [1/0] {10} ¦ ¦--SPECIAL-PIPE: %>% [0/0] {11} - ¦ °--expr: [1/0] {12} - ¦ ¦--expr: [0/0] {14} + ¦ °--expr: j() [1/0] {12} + ¦ ¦--expr: j [0/0] {14} ¦ ¦ °--SYMBOL_FUNCTION_CALL: j [0/0] {13} ¦ ¦--'(': ( [0/0] {15} ¦ °--')': ) [0/0] {16} - ¦--expr: [2/0] {17} - ¦ ¦--expr: [0/1] {19} + ¦--expr: a <- [2/0] {17} + ¦ ¦--expr: a [0/1] {19} ¦ ¦ °--SYMBOL: a [0/0] {18} ¦ ¦--LEFT_ASSIGN: <- [0/1] {20} - ¦ ¦--expr: [0/1] {22} - ¦ ¦ ¦--expr: [0/0] {24} + ¦ ¦--expr: c(x, [0/1] {22} + ¦ ¦ ¦--expr: c [0/0] {24} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {23} ¦ ¦ ¦--'(': ( [0/0] {25} - ¦ ¦ ¦--expr: [0/0] {27} + ¦ ¦ ¦--expr: x [0/0] {27} ¦ ¦ ¦ °--SYMBOL: x [0/0] {26} ¦ ¦ ¦--',': , [0/1] {28} - ¦ ¦ ¦--expr: [0/0] {30} + ¦ ¦ ¦--expr: y [0/0] {30} ¦ ¦ ¦ °--SYMBOL: y [0/0] {29} ¦ ¦ ¦--',': , [0/7] {31} - ¦ ¦ ¦--expr: [1/0] {33} + ¦ ¦ ¦--expr: z [1/0] {33} ¦ ¦ ¦ °--SYMBOL: z [0/0] {32} ¦ ¦ °--')': ) [0/0] {34} ¦ ¦--SPECIAL-PIPE: %>% [0/0] {35} - ¦ °--expr: [1/0] {36} - ¦ ¦--expr: [0/0] {38} + ¦ °--expr: k() [1/0] {36} + ¦ ¦--expr: k [0/0] {38} ¦ ¦ °--SYMBOL_FUNCTION_CALL: k [0/0] {37} ¦ ¦--'(': ( [0/0] {39} ¦ °--')': ) [0/0] {40} - °--expr: [2/0] {41} - ¦--expr: [0/1] {44} + °--expr: a + ( [2/0] {41} + ¦--expr: a [0/1] {44} ¦ °--SYMBOL: a [0/0] {43} ¦--'+': + [0/1] {45} - ¦--expr: [0/3] {46} + ¦--expr: ( + c [0/3] {46} ¦ ¦--'(': ( [0/2] {47} - ¦ ¦--expr: [1/0] {49} + ¦ ¦--expr: c [1/0] {49} ¦ ¦ °--SYMBOL: c [0/0] {48} ¦ °--')': ) [1/0] {50} ¦--'+': + [0/1] {51} - ¦--expr: [0/0] {53} + ¦--expr: ( + c( [0/0] {53} ¦ ¦--'(': ( [0/1] {54} - ¦ ¦--expr: [1/0] {55} - ¦ ¦ ¦--expr: [0/0] {57} + ¦ ¦--expr: c( + [1/0] {55} + ¦ ¦ ¦--expr: c [0/0] {57} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {56} ¦ ¦ ¦--'(': ( [0/2] {58} - ¦ ¦ ¦--expr: [1/5] {60} + ¦ ¦ ¦--expr: 2 [1/5] {60} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {59} ¦ ¦ °--')': ) [1/0] {61} ¦ °--')': ) [1/0] {62} ¦--SPECIAL-PIPE: %>% [0/0] {63} - °--expr: [1/0] {64} - ¦--expr: [0/0] {66} + °--expr: j() [1/0] {64} + ¦--expr: j [0/0] {66} ¦ °--SYMBOL_FUNCTION_CALL: j [0/0] {65} ¦--'(': ( [0/0] {67} °--')': ) [0/0] {68} diff --git a/tests/testthat/indention_operators/overall-in_tree b/tests/testthat/indention_operators/overall-in_tree index 0cbd13843..9f04d8e5d 100644 --- a/tests/testthat/indention_operators/overall-in_tree +++ b/tests/testthat/indention_operators/overall-in_tree @@ -1,180 +1,186 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/0] {6} + ¦--expr: pd%>% [0/0] {1} + ¦ ¦--expr: pd [0/0] {6} ¦ ¦ °--SYMBOL: pd [0/0] {5} ¦ ¦--SPECIAL-PIPE: %>% [0/0] {7} - ¦ ¦--expr: [1/1] {8} - ¦ ¦ ¦--expr: [0/0] {10} + ¦ ¦--expr: mutat [1/1] {8} + ¦ ¦ ¦--expr: mutat [0/0] {10} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: mutat [0/0] {9} ¦ ¦ ¦--'(': ( [0/0] {11} ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {12} ¦ ¦ ¦--EQ_SUB: = [0/0] {13} - ¦ ¦ ¦--expr: [0/0] {14} - ¦ ¦ ¦ ¦--expr: [0/0] {16} + ¦ ¦ ¦--expr: devid [0/0] {14} + ¦ ¦ ¦ ¦--expr: devid [0/0] {16} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: devid [0/0] {15} ¦ ¦ ¦ ¦--'(': ( [0/0] {17} - ¦ ¦ ¦ ¦--expr: [0/0] {18} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {20} + ¦ ¦ ¦ ¦--expr: call3 [0/0] {18} + ¦ ¦ ¦ ¦ ¦--expr: call3 [0/0] {20} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call3 [0/0] {19} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {21} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {23} + ¦ ¦ ¦ ¦ ¦--expr: a [0/0] {23} ¦ ¦ ¦ ¦ ¦ °--SYMBOL: a [0/0] {22} ¦ ¦ ¦ ¦ ¦--',': , [0/1] {24} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {26} + ¦ ¦ ¦ ¦ ¦--expr: b [0/0] {26} ¦ ¦ ¦ ¦ ¦ °--SYMBOL: b [0/0] {25} ¦ ¦ ¦ ¦ ¦--',': , [0/1] {27} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {28} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {30} + ¦ ¦ ¦ ¦ ¦--expr: 1 + q [0/0] {28} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 1 [0/1] {30} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {29} ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {31} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {33} + ¦ ¦ ¦ ¦ ¦ °--expr: q [0/0] {33} ¦ ¦ ¦ ¦ ¦ °--SYMBOL: q [0/0] {32} ¦ ¦ ¦ ¦ °--')': ) [0/0] {34} ¦ ¦ ¦ °--')': ) [0/0] {35} ¦ ¦ °--')': ) [0/0] {36} ¦ ¦--SPECIAL-PIPE: %>% [0/5] {37} - ¦ ¦--expr: [1/1] {38} - ¦ ¦ ¦--expr: [0/0] {40} + ¦ ¦--expr: filte [1/1] {38} + ¦ ¦ ¦--expr: filte [0/0] {40} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: filte [0/0] {39} ¦ ¦ ¦--'(': ( [0/1] {41} - ¦ ¦ ¦--expr: [0/0] {42} + ¦ ¦ ¦--expr: !term [0/0] {42} ¦ ¦ ¦ ¦--'!': ! [0/0] {43} - ¦ ¦ ¦ °--expr: [0/0] {45} + ¦ ¦ ¦ °--expr: termi [0/0] {45} ¦ ¦ ¦ °--SYMBOL: termi [0/0] {44} ¦ ¦ °--')': ) [0/0] {46} ¦ ¦--SPECIAL-PIPE: %>% [0/2] {47} - ¦ ¦--expr: [1/0] {48} - ¦ ¦ ¦--expr: [0/0] {50} + ¦ ¦--expr: ggplo [1/0] {48} + ¦ ¦ ¦--expr: ggplo [0/0] {50} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: ggplo [0/0] {49} ¦ ¦ ¦--'(': ( [0/0] {51} - ¦ ¦ ¦--expr: [0/0] {52} - ¦ ¦ ¦ ¦--expr: [0/0] {54} + ¦ ¦ ¦--expr: aes(x [0/0] {52} + ¦ ¦ ¦ ¦--expr: aes [0/0] {54} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: aes [0/0] {53} ¦ ¦ ¦ ¦--'(': ( [0/0] {55} ¦ ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {56} ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {57} - ¦ ¦ ¦ ¦--expr: [0/0] {59} + ¦ ¦ ¦ ¦--expr: new [0/0] {59} ¦ ¦ ¦ ¦ °--SYMBOL: new [0/0] {58} ¦ ¦ ¦ ¦--',': , [0/1] {60} ¦ ¦ ¦ ¦--SYMBOL_SUB: y [0/1] {61} ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {62} - ¦ ¦ ¦ ¦--expr: [0/0] {64} + ¦ ¦ ¦ ¦--expr: old [0/0] {64} ¦ ¦ ¦ ¦ °--SYMBOL: old [0/0] {63} ¦ ¦ ¦ °--')': ) [0/0] {65} ¦ ¦ °--')': ) [0/0] {66} ¦ ¦--'+': + [0/0] {67} - ¦ °--expr: [1/0] {68} - ¦ ¦--expr: [0/0] {70} + ¦ °--expr: geom_ [1/0] {68} + ¦ ¦--expr: geom_ [0/0] {70} ¦ ¦ °--SYMBOL_FUNCTION_CALL: geom_ [0/0] {69} ¦ ¦--'(': ( [0/0] {71} ¦ °--')': ) [0/0] {72} - ¦--expr: [2/0] {73} - ¦ ¦--expr: [0/0] {75} + ¦--expr: 1+( +2 [2/0] {73} + ¦ ¦--expr: 1 [0/0] {75} ¦ ¦ °--NUM_CONST: 1 [0/0] {74} ¦ ¦--'+': + [0/0] {76} - ¦ °--expr: [0/0] {77} + ¦ °--expr: ( +22- [0/0] {77} ¦ ¦--'(': ( [0/0] {78} - ¦ ¦--expr: [1/2] {79} - ¦ ¦ ¦--expr: [0/0] {83} + ¦ ¦--expr: 22- ( [1/2] {79} + ¦ ¦ ¦--expr: 22 [0/0] {83} ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {82} ¦ ¦ ¦--'-': - [0/1] {84} - ¦ ¦ ¦--expr: [0/1] {85} + ¦ ¦ ¦--expr: (1/ + [0/1] {85} ¦ ¦ ¦ ¦--'(': ( [0/0] {86} - ¦ ¦ ¦ ¦--expr: [0/0] {87} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {93} + ¦ ¦ ¦ ¦--expr: 1/ + [0/0] {87} + ¦ ¦ ¦ ¦ ¦--expr: 1 [0/0] {93} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {92} ¦ ¦ ¦ ¦ ¦--'/': / [0/2] {94} - ¦ ¦ ¦ ¦ ¦--expr: [1/1] {96} + ¦ ¦ ¦ ¦ ¦--expr: 2718 [1/1] {96} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2718 [0/0] {95} ¦ ¦ ¦ ¦ ¦--'/': / [0/4] {97} - ¦ ¦ ¦ ¦ ¦--expr: [1/0] {99} + ¦ ¦ ¦ ¦ ¦--expr: 23 [1/0] {99} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 23 [0/0] {98} ¦ ¦ ¦ ¦ ¦--'*': * [0/1] {100} - ¦ ¦ ¦ ¦ ¦--expr: [0/1] {102} + ¦ ¦ ¦ ¦ ¦--expr: 29 [0/1] {102} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 29 [0/0] {101} ¦ ¦ ¦ ¦ ¦--'*': * [0/1] {103} - ¦ ¦ ¦ ¦ ¦--expr: [0/5] {104} + ¦ ¦ ¦ ¦ ¦--expr: ( + [0/5] {104} ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/12] {105} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [1/0] {106} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/5] {109} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 2 [1/0] {106} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: 2 [0/5] {109} ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {108} ¦ ¦ ¦ ¦ ¦ ¦ ¦--'*': * [0/1] {110} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {111} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: (22*- [0/1] {111} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {112} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {113} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {115} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: 22*-1 [0/0] {113} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: 22 [0/0] {115} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {114} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--'*': * [0/0] {116} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {117} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--expr: -1 [0/0] {117} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--'-': - [0/0] {118} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {120} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--expr: 1 [0/0] {120} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {119} ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--')': ) [0/0] {121} ¦ ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/14] {122} - ¦ ¦ ¦ ¦ ¦ ¦ °--expr: [1/0] {124} + ¦ ¦ ¦ ¦ ¦ ¦ °--expr: 1 [1/0] {124} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {123} ¦ ¦ ¦ ¦ ¦ °--')': ) [0/0] {125} ¦ ¦ ¦ ¦ ¦--'-': - [0/10] {126} - ¦ ¦ ¦ ¦ °--expr: [1/0] {128} + ¦ ¦ ¦ ¦ °--expr: 18 [1/0] {128} ¦ ¦ ¦ ¦ °--NUM_CONST: 18 [0/0] {127} ¦ ¦ ¦ °--')': ) [0/0] {129} ¦ ¦ ¦--'+': + [0/4] {130} - ¦ ¦ ¦--expr: [1/1] {131} - ¦ ¦ ¦ ¦--expr: [0/0] {133} + ¦ ¦ ¦--expr: sin( [1/1] {131} + ¦ ¦ ¦ ¦--expr: sin [0/0] {133} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: sin [0/0] {132} ¦ ¦ ¦ ¦--'(': ( [0/4] {134} - ¦ ¦ ¦ ¦--expr: [0/0] {136} + ¦ ¦ ¦ ¦--expr: pi [0/0] {136} ¦ ¦ ¦ ¦ °--SYMBOL: pi [0/0] {135} ¦ ¦ ¦ °--')': ) [0/0] {137} ¦ ¦ ¦--'-': - [0/0] {138} - ¦ ¦ °--expr: [1/0] {140} + ¦ ¦ °--expr: 2 [1/0] {140} ¦ ¦ °--NUM_CONST: 2 [0/0] {139} ¦ °--')': ) [1/0] {141} - ¦--expr: [2/1] {142} - ¦ ¦--expr: [0/1] {144} + ¦--expr: a <- [2/1] {142} + ¦ ¦--expr: a [0/1] {144} ¦ ¦ °--SYMBOL: a [0/0] {143} ¦ ¦--LEFT_ASSIGN: <- [0/1] {145} - ¦ °--expr: [0/0] {146} + ¦ °--expr: funct [0/0] {146} ¦ ¦--FUNCTION: funct [0/0] {147} ¦ ¦--'(': ( [0/0] {148} ¦ ¦--SYMBOL_FORMALS: z [0/0] {149} ¦ ¦--')': ) [0/1] {150} - ¦ °--expr: [0/0] {151} + ¦ °--expr: { + a [0/0] {151} ¦ ¦--'{': { [0/2] {152} - ¦ ¦--expr: [1/0] {153} - ¦ ¦ ¦--expr: [0/1] {156} + ¦ ¦--expr: a %>% [1/0] {153} + ¦ ¦ ¦--expr: a [0/1] {156} ¦ ¦ ¦ °--SYMBOL: a [0/0] {155} ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {157} - ¦ ¦ ¦--expr: [1/1] {158} - ¦ ¦ ¦ ¦--expr: [0/0] {160} + ¦ ¦ ¦--expr: q() [1/1] {158} + ¦ ¦ ¦ ¦--expr: q [0/0] {160} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: q [0/0] {159} ¦ ¦ ¦ ¦--'(': ( [0/0] {161} ¦ ¦ ¦ °--')': ) [0/0] {162} ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {163} - ¦ ¦ °--expr: [1/0] {164} - ¦ ¦ ¦--expr: [0/0] {166} + ¦ ¦ °--expr: n() [1/0] {164} + ¦ ¦ ¦--expr: n [0/0] {166} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: n [0/0] {165} ¦ ¦ ¦--'(': ( [0/0] {167} ¦ ¦ °--')': ) [0/0] {168} ¦ °--'}': } [1/0] {169} - °--expr: [2/0] {170} - ¦--expr: [0/1] {174} + °--expr: a %>% [2/0] {170} + ¦--expr: a [0/1] {174} ¦ °--SYMBOL: a [0/0] {173} ¦--SPECIAL-PIPE: %>% [0/0] {175} - ¦--expr: [1/0] {176} - ¦ ¦--expr: [0/0] {178} + ¦--expr: b() [1/0] {176} + ¦ ¦--expr: b [0/0] {178} ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {177} ¦ ¦--'(': ( [0/0] {179} ¦ °--')': ) [0/0] {180} ¦--SPECIAL-PIPE: %>% [0/0] {181} - ¦--expr: [1/0] {182} - ¦ ¦--expr: [0/0] {184} + ¦--expr: c() [1/0] {182} + ¦ ¦--expr: c [0/0] {184} ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {183} ¦ ¦--'(': ( [0/0] {185} ¦ °--')': ) [0/0] {186} ¦--SPECIAL-PIPE: %>% [0/0] {187} - °--expr: [1/0] {188} - ¦--expr: [0/0] {190} + °--expr: k() [1/0] {188} + ¦--expr: k [0/0] {190} ¦ °--SYMBOL_FUNCTION_CALL: k [0/0] {189} ¦--'(': ( [0/0] {191} °--')': ) [0/0] {192} diff --git a/tests/testthat/indention_operators/pipe_and_assignment-in_tree b/tests/testthat/indention_operators/pipe_and_assignment-in_tree index b873fc151..f7586ded7 100644 --- a/tests/testthat/indention_operators/pipe_and_assignment-in_tree +++ b/tests/testthat/indention_operators/pipe_and_assignment-in_tree @@ -1,43 +1,43 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/4] {3} + ¦--expr: a [0/0] {1} + ¦ ¦--expr: a [0/4] {3} ¦ ¦ °--SYMBOL: a [0/0] {2} ¦ ¦--LEFT_ASSIGN: <- [0/0] {4} - ¦ ¦--expr: [1/1] {7} - ¦ ¦ ¦--expr: [0/0] {9} + ¦ ¦--expr: b() [1/1] {7} + ¦ ¦ ¦--expr: b [0/0] {9} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {8} ¦ ¦ ¦--'(': ( [0/0] {10} ¦ ¦ °--')': ) [0/0] {11} ¦ ¦--SPECIAL-PIPE: %>% [0/2] {12} - ¦ ¦--expr: [1/1] {13} - ¦ ¦ ¦--expr: [0/0] {15} + ¦ ¦--expr: q() [1/1] {13} + ¦ ¦ ¦--expr: q [0/0] {15} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: q [0/0] {14} ¦ ¦ ¦--'(': ( [0/0] {16} ¦ ¦ °--')': ) [0/0] {17} ¦ ¦--SPECIAL-PIPE: %>% [0/5] {18} - ¦ °--expr: [1/0] {19} - ¦ ¦--expr: [0/0] {21} + ¦ °--expr: g() [1/0] {19} + ¦ ¦--expr: g [0/0] {21} ¦ ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {20} ¦ ¦--'(': ( [0/0] {22} ¦ °--')': ) [0/0] {23} - °--expr: [2/0] {24} - ¦--expr: [0/1] {26} + °--expr: a <- [2/0] {24} + ¦--expr: a [0/1] {26} ¦ °--SYMBOL: a [0/0] {25} ¦--LEFT_ASSIGN: <- [0/4] {27} - ¦--expr: [0/1] {30} - ¦ ¦--expr: [0/0] {32} + ¦--expr: b() [0/1] {30} + ¦ ¦--expr: b [0/0] {32} ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {31} ¦ ¦--'(': ( [0/0] {33} ¦ °--')': ) [0/0] {34} ¦--SPECIAL-PIPE: %>% [0/2] {35} - ¦--expr: [1/0] {36} - ¦ ¦--expr: [0/0] {38} + ¦--expr: c() [1/0] {36} + ¦ ¦--expr: c [0/0] {38} ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {37} ¦ ¦--'(': ( [0/0] {39} ¦ °--')': ) [0/0] {40} ¦--SPECIAL-PIPE: %>% [0/0] {41} - °--expr: [1/0] {42} - ¦--expr: [0/0] {44} + °--expr: ggg() [1/0] {42} + ¦--expr: ggg [0/0] {44} ¦ °--SYMBOL_FUNCTION_CALL: ggg [0/0] {43} ¦--'(': ( [0/0] {45} °--')': ) [0/0] {46} diff --git a/tests/testthat/indention_operators/pipe_and_assignment_and_comment-in.R b/tests/testthat/indention_operators/pipe_and_assignment_and_comment-in.R new file mode 100644 index 000000000..ab24b9e47 --- /dev/null +++ b/tests/testthat/indention_operators/pipe_and_assignment_and_comment-in.R @@ -0,0 +1,25 @@ +a <-# + b() %>% + c() %>% + d() + +a <- # + b() %>% + c() %>% + d() + + +a <- + b() %>% + c() %>% + d() + +a <- c %>% + b()%>% + c( ) %>% + d() + +a <- + b() %>% # + c() %>% + d()# d diff --git a/tests/testthat/indention_operators/pipe_and_assignment_and_comment-in_tree b/tests/testthat/indention_operators/pipe_and_assignment_and_comment-in_tree new file mode 100644 index 000000000..b88ba53ae --- /dev/null +++ b/tests/testthat/indention_operators/pipe_and_assignment_and_comment-in_tree @@ -0,0 +1,115 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: a <-# [0/0] {1} + ¦ ¦--expr: a [0/1] {3} + ¦ ¦ °--SYMBOL: a [0/0] {2} + ¦ ¦--LEFT_ASSIGN: <- [0/0] {4} + ¦ ¦--COMMENT: # [0/2] {5} + ¦ ¦--expr: b() [1/1] {8} + ¦ ¦ ¦--expr: b [0/0] {10} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {9} + ¦ ¦ ¦--'(': ( [0/0] {11} + ¦ ¦ °--')': ) [0/0] {12} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {13} + ¦ ¦--expr: c() [1/1] {14} + ¦ ¦ ¦--expr: c [0/0] {16} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {15} + ¦ ¦ ¦--'(': ( [0/0] {17} + ¦ ¦ °--')': ) [0/0] {18} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {19} + ¦ °--expr: d() [1/0] {20} + ¦ ¦--expr: d [0/0] {22} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: d [0/0] {21} + ¦ ¦--'(': ( [0/0] {23} + ¦ °--')': ) [0/0] {24} + ¦--expr: a <- [2/0] {25} + ¦ ¦--expr: a [0/1] {27} + ¦ ¦ °--SYMBOL: a [0/0] {26} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {28} + ¦ ¦--COMMENT: # [0/2] {29} + ¦ ¦--expr: b() [1/1] {32} + ¦ ¦ ¦--expr: b [0/0] {34} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {33} + ¦ ¦ ¦--'(': ( [0/0] {35} + ¦ ¦ °--')': ) [0/0] {36} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {37} + ¦ ¦--expr: c() [1/1] {38} + ¦ ¦ ¦--expr: c [0/0] {40} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {39} + ¦ ¦ ¦--'(': ( [0/0] {41} + ¦ ¦ °--')': ) [0/0] {42} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {43} + ¦ °--expr: d() [1/0] {44} + ¦ ¦--expr: d [0/0] {46} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: d [0/0] {45} + ¦ ¦--'(': ( [0/0] {47} + ¦ °--')': ) [0/0] {48} + ¦--expr: a <- + [3/0] {49} + ¦ ¦--expr: a [0/1] {51} + ¦ ¦ °--SYMBOL: a [0/0] {50} + ¦ ¦--LEFT_ASSIGN: <- [0/2] {52} + ¦ ¦--expr: b() [1/1] {55} + ¦ ¦ ¦--expr: b [0/0] {57} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {56} + ¦ ¦ ¦--'(': ( [0/0] {58} + ¦ ¦ °--')': ) [0/0] {59} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {60} + ¦ ¦--expr: c() [1/1] {61} + ¦ ¦ ¦--expr: c [0/0] {63} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {62} + ¦ ¦ ¦--'(': ( [0/0] {64} + ¦ ¦ °--')': ) [0/0] {65} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {66} + ¦ °--expr: d() [1/0] {67} + ¦ ¦--expr: d [0/0] {69} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: d [0/0] {68} + ¦ ¦--'(': ( [0/0] {70} + ¦ °--')': ) [0/0] {71} + ¦--expr: a <- [2/0] {72} + ¦ ¦--expr: a [0/1] {74} + ¦ ¦ °--SYMBOL: a [0/0] {73} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {75} + ¦ ¦--expr: c [0/1] {80} + ¦ ¦ °--SYMBOL: c [0/0] {79} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {81} + ¦ ¦--expr: b() [1/0] {82} + ¦ ¦ ¦--expr: b [0/0] {84} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {83} + ¦ ¦ ¦--'(': ( [0/0] {85} + ¦ ¦ °--')': ) [0/0] {86} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {87} + ¦ ¦--expr: c( ) [1/1] {88} + ¦ ¦ ¦--expr: c [0/0] {90} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {89} + ¦ ¦ ¦--'(': ( [0/1] {91} + ¦ ¦ °--')': ) [0/0] {92} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {93} + ¦ °--expr: d() [1/0] {94} + ¦ ¦--expr: d [0/0] {96} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: d [0/0] {95} + ¦ ¦--'(': ( [0/0] {97} + ¦ °--')': ) [0/0] {98} + ¦--expr: a <- + [2/0] {99} + ¦ ¦--expr: a [0/1] {101} + ¦ ¦ °--SYMBOL: a [0/0] {100} + ¦ ¦--LEFT_ASSIGN: <- [0/2] {102} + ¦ ¦--expr: b() [1/1] {105} + ¦ ¦ ¦--expr: b [0/0] {107} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {106} + ¦ ¦ ¦--'(': ( [0/0] {108} + ¦ ¦ °--')': ) [0/0] {109} + ¦ ¦--SPECIAL-PIPE: %>% [0/1] {110} + ¦ ¦--COMMENT: # [0/2] {111} + ¦ ¦--expr: c() [1/1] {112} + ¦ ¦ ¦--expr: c [0/0] {114} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {113} + ¦ ¦ ¦--'(': ( [0/0] {115} + ¦ ¦ °--')': ) [0/0] {116} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {117} + ¦ °--expr: d() [1/0] {118} + ¦ ¦--expr: d [0/0] {120} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: d [0/0] {119} + ¦ ¦--'(': ( [0/0] {121} + ¦ °--')': ) [0/0] {122} + °--COMMENT: # d [0/0] {123} diff --git a/tests/testthat/indention_operators/pipe_and_assignment_and_comment-out.R b/tests/testthat/indention_operators/pipe_and_assignment_and_comment-out.R new file mode 100644 index 000000000..4a8a97ac6 --- /dev/null +++ b/tests/testthat/indention_operators/pipe_and_assignment_and_comment-out.R @@ -0,0 +1,25 @@ +a <- # + b() %>% + c() %>% + d() + +a <- # + b() %>% + c() %>% + d() + + +a <- + b() %>% + c() %>% + d() + +a <- c %>% + b() %>% + c() %>% + d() + +a <- + b() %>% # + c() %>% + d() # d diff --git a/tests/testthat/indention_operators/pipe_and_assignment_and_math-in_tree b/tests/testthat/indention_operators/pipe_and_assignment_and_math-in_tree index c096ad5df..ecbf9d3ab 100644 --- a/tests/testthat/indention_operators/pipe_and_assignment_and_math-in_tree +++ b/tests/testthat/indention_operators/pipe_and_assignment_and_math-in_tree @@ -1,27 +1,27 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/1] {3} + °--expr: q <- [0/0] {1} + ¦--expr: q [0/1] {3} ¦ °--SYMBOL: q [0/0] {2} ¦--LEFT_ASSIGN: <- [0/2] {4} - ¦--expr: [0/0] {9} + ¦--expr: a [0/0] {9} ¦ °--SYMBOL: a [0/0] {8} ¦--'+': + [0/2] {10} - ¦--expr: [1/1] {11} + ¦--expr: - 3 [1/1] {11} ¦ ¦--'-': - [0/1] {12} - ¦ °--expr: [0/0] {14} + ¦ °--expr: 3 [0/0] {14} ¦ °--NUM_CONST: 3 [0/0] {13} ¦--'+': + [0/0] {15} - ¦--expr: [1/0] {17} + ¦--expr: 2 [1/0] {17} ¦ °--NUM_CONST: 2 [0/0] {16} ¦--'+': + [0/0] {18} - ¦--expr: [1/0] {20} - ¦ ¦--expr: [0/0] {22} + ¦--expr: g() [1/0] {20} + ¦ ¦--expr: g [0/0] {22} ¦ ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {21} ¦ ¦--'(': ( [0/0] {23} ¦ °--')': ) [0/0] {24} ¦--SPECIAL-PIPE: %>% [0/3] {25} - °--expr: [1/0] {26} - ¦--expr: [0/0] {28} + °--expr: k() [1/0] {26} + ¦--expr: k [0/0] {28} ¦ °--SYMBOL_FUNCTION_CALL: k [0/0] {27} ¦--'(': ( [0/0] {29} °--')': ) [0/0] {30} diff --git a/tests/testthat/indention_operators/pipe_simple-in.R b/tests/testthat/indention_operators/pipe_simple-in.R index 7c50917e2..fe3cd3535 100644 --- a/tests/testthat/indention_operators/pipe_simple-in.R +++ b/tests/testthat/indention_operators/pipe_simple-in.R @@ -5,3 +5,14 @@ c() %>% g_out() a <- function(jon_the_pipe) {} + +x %>% + + # break + call() + + +y %>% + + + call() # mor diff --git a/tests/testthat/indention_operators/pipe_simple-in_tree b/tests/testthat/indention_operators/pipe_simple-in_tree index 91e09c012..9cd38e1cd 100644 --- a/tests/testthat/indention_operators/pipe_simple-in_tree +++ b/tests/testthat/indention_operators/pipe_simple-in_tree @@ -1,56 +1,76 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/1] {6} + ¦--expr: a %>% [0/0] {1} + ¦ ¦--expr: a [0/1] {6} ¦ ¦ °--SYMBOL: a [0/0] {5} ¦ ¦--SPECIAL-PIPE: %>% [0/0] {7} - ¦ ¦--expr: [1/1] {8} - ¦ ¦ ¦--expr: [0/0] {10} + ¦ ¦--expr: b() [1/1] {8} + ¦ ¦ ¦--expr: b [0/0] {10} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {9} ¦ ¦ ¦--'(': ( [0/0] {11} ¦ ¦ °--')': ) [0/0] {12} ¦ ¦--SPECIAL-PIPE: %>% [0/0] {13} - ¦ ¦--expr: [1/1] {14} - ¦ ¦ ¦--expr: [0/0] {16} + ¦ ¦--expr: c() [1/1] {14} + ¦ ¦ ¦--expr: c [0/0] {16} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {15} ¦ ¦ ¦--'(': ( [0/0] {17} ¦ ¦ °--')': ) [0/0] {18} ¦ ¦--SPECIAL-PIPE: %>% [0/10] {19} - ¦ ¦--expr: [1/1] {20} - ¦ ¦ ¦--expr: [0/0] {22} + ¦ ¦--expr: d(1 + [1/1] {20} + ¦ ¦ ¦--expr: d [0/0] {22} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: d [0/0] {21} ¦ ¦ ¦--'(': ( [0/0] {23} - ¦ ¦ ¦--expr: [0/0] {24} - ¦ ¦ ¦ ¦--expr: [0/1] {26} + ¦ ¦ ¦--expr: 1 + e [0/0] {24} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {26} ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {25} ¦ ¦ ¦ ¦--'+': + [0/1] {27} - ¦ ¦ ¦ °--expr: [0/0] {28} - ¦ ¦ ¦ ¦--expr: [0/1] {30} + ¦ ¦ ¦ °--expr: e (si [0/0] {28} + ¦ ¦ ¦ ¦--expr: e [0/1] {30} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: e [0/0] {29} ¦ ¦ ¦ ¦--'(': ( [0/0] {31} - ¦ ¦ ¦ ¦--expr: [0/0] {32} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {34} + ¦ ¦ ¦ ¦--expr: sin(f [0/0] {32} + ¦ ¦ ¦ ¦ ¦--expr: sin [0/0] {34} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: sin [0/0] {33} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {35} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {37} + ¦ ¦ ¦ ¦ ¦--expr: f [0/0] {37} ¦ ¦ ¦ ¦ ¦ °--SYMBOL: f [0/0] {36} ¦ ¦ ¦ ¦ °--')': ) [0/0] {38} ¦ ¦ ¦ °--')': ) [0/0] {39} ¦ ¦ °--')': ) [0/0] {40} ¦ ¦--SPECIAL-PIPE: %>% [0/33] {41} - ¦ °--expr: [1/0] {42} - ¦ ¦--expr: [0/0] {44} + ¦ °--expr: g_out [1/0] {42} + ¦ ¦--expr: g_out [0/0] {44} ¦ ¦ °--SYMBOL_FUNCTION_CALL: g_out [0/0] {43} ¦ ¦--'(': ( [0/0] {45} ¦ °--')': ) [0/0] {46} - °--expr: [2/0] {47} - ¦--expr: [0/1] {49} - ¦ °--SYMBOL: a [0/0] {48} - ¦--LEFT_ASSIGN: <- [0/1] {50} - °--expr: [0/0] {51} - ¦--FUNCTION: funct [0/0] {52} - ¦--'(': ( [0/0] {53} - ¦--SYMBOL_FORMALS: jon_t [0/0] {54} - ¦--')': ) [0/1] {55} - °--expr: [0/0] {56} - ¦--'{': { [0/0] {57} - °--'}': } [0/0] {58} + ¦--expr: a <- [2/0] {47} + ¦ ¦--expr: a [0/1] {49} + ¦ ¦ °--SYMBOL: a [0/0] {48} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {50} + ¦ °--expr: funct [0/0] {51} + ¦ ¦--FUNCTION: funct [0/0] {52} + ¦ ¦--'(': ( [0/0] {53} + ¦ ¦--SYMBOL_FORMALS: jon_t [0/0] {54} + ¦ ¦--')': ) [0/1] {55} + ¦ °--expr: {} [0/0] {56} + ¦ ¦--'{': { [0/0] {57} + ¦ °--'}': } [0/0] {58} + ¦--expr: x %>% [2/0] {59} + ¦ ¦--expr: x [0/1] {61} + ¦ ¦ °--SYMBOL: x [0/0] {60} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {62} + ¦ ¦--COMMENT: # bre [2/2] {63} + ¦ °--expr: call( [1/0] {64} + ¦ ¦--expr: call [0/0] {66} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {65} + ¦ ¦--'(': ( [0/0] {67} + ¦ °--')': ) [0/0] {68} + ¦--expr: y %>% [3/1] {69} + ¦ ¦--expr: y [0/1] {71} + ¦ ¦ °--SYMBOL: y [0/0] {70} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {72} + ¦ °--expr: call( [3/0] {73} + ¦ ¦--expr: call [0/0] {75} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {74} + ¦ ¦--'(': ( [0/0] {76} + ¦ °--')': ) [0/0] {77} + °--COMMENT: # mor [0/0] {78} diff --git a/tests/testthat/indention_operators/pipe_simple-out.R b/tests/testthat/indention_operators/pipe_simple-out.R index 518fc83e3..bb3ff2a43 100644 --- a/tests/testthat/indention_operators/pipe_simple-out.R +++ b/tests/testthat/indention_operators/pipe_simple-out.R @@ -5,3 +5,11 @@ a %>% g_out() a <- function(jon_the_pipe) {} + +x %>% + # break + call() + + +y %>% + call() # mor diff --git a/tests/testthat/indention_operators/pipe_with_dot-in_tree b/tests/testthat/indention_operators/pipe_with_dot-in_tree index 21719b4bd..61a900661 100644 --- a/tests/testthat/indention_operators/pipe_with_dot-in_tree +++ b/tests/testthat/indention_operators/pipe_with_dot-in_tree @@ -1,23 +1,23 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/1] {2} - ¦ ¦--expr: [0/0] {4} + °--expr: strsp [0/0] {1} + ¦--expr: strsp [0/1] {2} + ¦ ¦--expr: strsp [0/0] {4} ¦ ¦ °--SYMBOL_FUNCTION_CALL: strsp [0/0] {3} ¦ ¦--'(': ( [0/0] {5} - ¦ ¦--expr: [0/0] {7} + ¦ ¦--expr: "\n" [0/0] {7} ¦ ¦ °--STR_CONST: "\n" [0/0] {6} ¦ ¦--',': , [0/1] {8} ¦ ¦--SYMBOL_SUB: fixed [0/1] {9} ¦ ¦--EQ_SUB: = [0/1] {10} - ¦ ¦--expr: [0/0] {12} + ¦ ¦--expr: TRUE [0/0] {12} ¦ ¦ °--NUM_CONST: TRUE [0/0] {11} ¦ °--')': ) [0/0] {13} ¦--SPECIAL-PIPE: %>% [0/2] {14} - °--expr: [1/0] {15} - ¦--expr: [0/0] {17} + °--expr: .[[1L [1/0] {15} + ¦--expr: . [0/0] {17} ¦ °--SYMBOL: . [0/0] {16} ¦--LBB: [[ [0/0] {18} - ¦--expr: [0/0] {20} + ¦--expr: 1L [0/0] {20} ¦ °--NUM_CONST: 1L [0/0] {19} ¦--']': ] [0/0] {21} °--']': ] [0/0] {22} diff --git a/tests/testthat/indention_operators/plus_minus-in.R b/tests/testthat/indention_operators/plus_minus-in.R index a543addce..079b0918f 100644 --- a/tests/testthat/indention_operators/plus_minus-in.R +++ b/tests/testthat/indention_operators/plus_minus-in.R @@ -5,4 +5,3 @@ 5 1 + 1 - diff --git a/tests/testthat/indention_operators/plus_minus-in_tree b/tests/testthat/indention_operators/plus_minus-in_tree index b1ba42e09..d805fc5a7 100644 --- a/tests/testthat/indention_operators/plus_minus-in_tree +++ b/tests/testthat/indention_operators/plus_minus-in_tree @@ -1,22 +1,23 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/1] {6} + ¦--expr: 1 + + [0/0] {1} + ¦ ¦--expr: 1 [0/1] {6} ¦ ¦ °--NUM_CONST: 1 [0/0] {5} ¦ ¦--'+': + [0/7] {7} - ¦ ¦--expr: [1/1] {9} + ¦ ¦--expr: 2 [1/1] {9} ¦ ¦ °--NUM_CONST: 2 [0/0] {8} ¦ ¦--'+': + [0/0] {10} - ¦ ¦--expr: [1/1] {12} + ¦ ¦--expr: 3 [1/1] {12} ¦ ¦ °--NUM_CONST: 3 [0/0] {11} ¦ ¦--'+': + [0/7] {13} - ¦ ¦--expr: [1/1] {15} + ¦ ¦--expr: 4 [1/1] {15} ¦ ¦ °--NUM_CONST: 4 [0/0] {14} ¦ ¦--'-': - [0/2] {16} - ¦ °--expr: [1/0] {18} + ¦ °--expr: 5 [1/0] {18} ¦ °--NUM_CONST: 5 [0/0] {17} - °--expr: [2/0] {19} - ¦--expr: [0/1] {21} + °--expr: 1 + 1 [2/0] {19} + ¦--expr: 1 [0/1] {21} ¦ °--NUM_CONST: 1 [0/0] {20} ¦--'+': + [0/1] {22} - °--expr: [0/0] {24} + °--expr: 1 [0/0] {24} °--NUM_CONST: 1 [0/0] {23} diff --git a/tests/testthat/indention_operators/tilde-in.R b/tests/testthat/indention_operators/tilde-in.R new file mode 100644 index 000000000..c74602490 --- /dev/null +++ b/tests/testthat/indention_operators/tilde-in.R @@ -0,0 +1,7 @@ +y ~ +x+ +y + + +x ~ + 1 + (x|b) diff --git a/tests/testthat/indention_operators/tilde-in_tree b/tests/testthat/indention_operators/tilde-in_tree new file mode 100644 index 000000000..7f3155dd1 --- /dev/null +++ b/tests/testthat/indention_operators/tilde-in_tree @@ -0,0 +1,28 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: y ~ +x [0/0] {1} + ¦ ¦--expr: y [0/1] {3} + ¦ ¦ °--SYMBOL: y [0/0] {2} + ¦ ¦--'~': ~ [0/0] {4} + ¦ ¦--expr: x [1/0] {7} + ¦ ¦ °--SYMBOL: x [0/0] {6} + ¦ ¦--'+': + [0/0] {8} + ¦ °--expr: y [1/0] {10} + ¦ °--SYMBOL: y [0/0] {9} + °--expr: x ~ + [3/0] {11} + ¦--expr: x [0/1] {13} + ¦ °--SYMBOL: x [0/0] {12} + ¦--'~': ~ [0/2] {14} + ¦--expr: 1 [1/1] {17} + ¦ °--NUM_CONST: 1 [0/0] {16} + ¦--'+': + [0/1] {18} + °--expr: (x|b) [0/0] {19} + ¦--'(': ( [0/0] {20} + ¦--expr: x|b [0/0] {21} + ¦ ¦--expr: x [0/0] {23} + ¦ ¦ °--SYMBOL: x [0/0] {22} + ¦ ¦--OR: | [0/0] {24} + ¦ °--expr: b [0/0] {26} + ¦ °--SYMBOL: b [0/0] {25} + °--')': ) [0/0] {27} diff --git a/tests/testthat/indention_operators/tilde-out.R b/tests/testthat/indention_operators/tilde-out.R new file mode 100644 index 000000000..094eee463 --- /dev/null +++ b/tests/testthat/indention_operators/tilde-out.R @@ -0,0 +1,7 @@ +y ~ + x + + y + + +x ~ + 1 + (x | b) diff --git a/tests/testthat/indention_operators/while_for_if_without_curly-in.R b/tests/testthat/indention_operators/while_for_if_without_curly-in.R deleted file mode 100644 index 9e53c11b9..000000000 --- a/tests/testthat/indention_operators/while_for_if_without_curly-in.R +++ /dev/null @@ -1,9 +0,0 @@ -while (x > 3) -return(FALSE) - -for (i in 1:3) -print(i) - -if (x) -call2(3) - diff --git a/tests/testthat/indention_operators/while_for_if_without_curly-in_tree b/tests/testthat/indention_operators/while_for_if_without_curly-in_tree deleted file mode 100644 index bdf79075c..000000000 --- a/tests/testthat/indention_operators/while_for_if_without_curly-in_tree +++ /dev/null @@ -1,51 +0,0 @@ -ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--WHILE: while [0/1] {2} - ¦ ¦--'(': ( [0/0] {3} - ¦ ¦--expr: [0/0] {4} - ¦ ¦ ¦--expr: [0/1] {6} - ¦ ¦ ¦ °--SYMBOL: x [0/0] {5} - ¦ ¦ ¦--GT: > [0/1] {7} - ¦ ¦ °--expr: [0/0] {9} - ¦ ¦ °--NUM_CONST: 3 [0/0] {8} - ¦ ¦--')': ) [0/0] {10} - ¦ °--expr: [1/0] {11} - ¦ ¦--expr: [0/0] {13} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: retur [0/0] {12} - ¦ ¦--'(': ( [0/0] {14} - ¦ ¦--expr: [0/0] {16} - ¦ ¦ °--NUM_CONST: FALSE [0/0] {15} - ¦ °--')': ) [0/0] {17} - ¦--expr: [2/0] {18} - ¦ ¦--FOR: for [0/1] {19} - ¦ ¦--forcond: [0/0] {20} - ¦ ¦ ¦--'(': ( [0/0] {21} - ¦ ¦ ¦--SYMBOL: i [0/1] {22} - ¦ ¦ ¦--IN: in [0/1] {23} - ¦ ¦ ¦--expr: [0/0] {24} - ¦ ¦ ¦ ¦--expr: [0/0] {26} - ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {25} - ¦ ¦ ¦ ¦--':': : [0/0] {27} - ¦ ¦ ¦ °--expr: [0/0] {29} - ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {28} - ¦ ¦ °--')': ) [0/0] {30} - ¦ °--expr: [1/0] {31} - ¦ ¦--expr: [0/0] {33} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: print [0/0] {32} - ¦ ¦--'(': ( [0/0] {34} - ¦ ¦--expr: [0/0] {36} - ¦ ¦ °--SYMBOL: i [0/0] {35} - ¦ °--')': ) [0/0] {37} - °--expr: [2/0] {38} - ¦--IF: if [0/1] {39} - ¦--'(': ( [0/0] {40} - ¦--expr: [0/0] {42} - ¦ °--SYMBOL: x [0/0] {41} - ¦--')': ) [0/0] {43} - °--expr: [1/0] {44} - ¦--expr: [0/0] {46} - ¦ °--SYMBOL_FUNCTION_CALL: call2 [0/0] {45} - ¦--'(': ( [0/0] {47} - ¦--expr: [0/0] {49} - ¦ °--NUM_CONST: 3 [0/0] {48} - °--')': ) [0/0] {50} diff --git a/tests/testthat/indention_operators/while_for_if_without_curly-out.R b/tests/testthat/indention_operators/while_for_if_without_curly-out.R deleted file mode 100644 index b6e06fa83..000000000 --- a/tests/testthat/indention_operators/while_for_if_without_curly-out.R +++ /dev/null @@ -1,8 +0,0 @@ -while (x > 3) - return(FALSE) - -for (i in 1:3) - print(i) - -if (x) - call2(3) diff --git a/tests/testthat/indention_operators/while_for_if_without_curly_non_strict-in.R b/tests/testthat/indention_operators/while_for_if_without_curly_non_strict-in.R new file mode 100644 index 000000000..43f1cd48e --- /dev/null +++ b/tests/testthat/indention_operators/while_for_if_without_curly_non_strict-in.R @@ -0,0 +1,61 @@ +while (x > 3) + return(FALSE) + +for (i in 1:3) + print(i) + +if (x) + call2(3) + +for (i in 1:3) # + print(i) + +for (i in + 1:3) # + print(i) + +for (i in # + 1:3) # + print(i) + +for (# + i in # + 1:3# +) # + print(i) + + +while (x > 3) # + return(FALSE) + +while (x > 3 # +) + return(FALSE) + +while ( # test + x > 3) # another + return(FALSE) + +while ( + 2 > #here + 3 # +) # + FALSE + +while ( + 2 > #here + 3 # +) + FALSE + +while ( + 2 > #here + 3 +) # + FALSE + +while (# + 2 > + 3 +) # + FALSE diff --git a/tests/testthat/indention_operators/while_for_if_without_curly_non_strict-in_tree b/tests/testthat/indention_operators/while_for_if_without_curly_non_strict-in_tree new file mode 100644 index 000000000..375209df0 --- /dev/null +++ b/tests/testthat/indention_operators/while_for_if_without_curly_non_strict-in_tree @@ -0,0 +1,253 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: while [0/0] {1} + ¦ ¦--WHILE: while [0/1] {2} + ¦ ¦--'(': ( [0/0] {3} + ¦ ¦--expr: x > 3 [0/0] {4} + ¦ ¦ ¦--expr: x [0/1] {6} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {5} + ¦ ¦ ¦--GT: > [0/1] {7} + ¦ ¦ °--expr: 3 [0/0] {9} + ¦ ¦ °--NUM_CONST: 3 [0/0] {8} + ¦ ¦--')': ) [0/2] {10} + ¦ °--expr: retur [1/0] {11} + ¦ ¦--expr: retur [0/0] {13} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: retur [0/0] {12} + ¦ ¦--'(': ( [0/0] {14} + ¦ ¦--expr: FALSE [0/0] {16} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {15} + ¦ °--')': ) [0/0] {17} + ¦--expr: for ( [2/0] {18} + ¦ ¦--FOR: for [0/1] {19} + ¦ ¦--forcond: (i in [0/2] {20} + ¦ ¦ ¦--'(': ( [0/0] {21} + ¦ ¦ ¦--SYMBOL: i [0/1] {22} + ¦ ¦ ¦--IN: in [0/1] {23} + ¦ ¦ ¦--expr: 1:3 [0/0] {24} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {26} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {25} + ¦ ¦ ¦ ¦--':': : [0/0] {27} + ¦ ¦ ¦ °--expr: 3 [0/0] {29} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {28} + ¦ ¦ °--')': ) [0/0] {30} + ¦ °--expr: print [1/0] {31} + ¦ ¦--expr: print [0/0] {33} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: print [0/0] {32} + ¦ ¦--'(': ( [0/0] {34} + ¦ ¦--expr: i [0/0] {36} + ¦ ¦ °--SYMBOL: i [0/0] {35} + ¦ °--')': ) [0/0] {37} + ¦--expr: if (x [2/0] {38} + ¦ ¦--IF: if [0/1] {39} + ¦ ¦--'(': ( [0/0] {40} + ¦ ¦--expr: x [0/0] {42} + ¦ ¦ °--SYMBOL: x [0/0] {41} + ¦ ¦--')': ) [0/2] {43} + ¦ °--expr: call2 [1/0] {44} + ¦ ¦--expr: call2 [0/0] {46} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call2 [0/0] {45} + ¦ ¦--'(': ( [0/0] {47} + ¦ ¦--expr: 3 [0/0] {49} + ¦ ¦ °--NUM_CONST: 3 [0/0] {48} + ¦ °--')': ) [0/0] {50} + ¦--expr: for ( [2/0] {51} + ¦ ¦--FOR: for [0/1] {52} + ¦ ¦--forcond: (i in [0/1] {53} + ¦ ¦ ¦--'(': ( [0/0] {54} + ¦ ¦ ¦--SYMBOL: i [0/1] {55} + ¦ ¦ ¦--IN: in [0/1] {56} + ¦ ¦ ¦--expr: 1:3 [0/0] {57} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {59} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {58} + ¦ ¦ ¦ ¦--':': : [0/0] {60} + ¦ ¦ ¦ °--expr: 3 [0/0] {62} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {61} + ¦ ¦ °--')': ) [0/0] {63} + ¦ ¦--COMMENT: # [0/2] {64} + ¦ °--expr: print [1/0] {65} + ¦ ¦--expr: print [0/0] {67} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: print [0/0] {66} + ¦ ¦--'(': ( [0/0] {68} + ¦ ¦--expr: i [0/0] {70} + ¦ ¦ °--SYMBOL: i [0/0] {69} + ¦ °--')': ) [0/0] {71} + ¦--expr: for ( [2/0] {72} + ¦ ¦--FOR: for [0/1] {73} + ¦ ¦--forcond: (i in [0/1] {74} + ¦ ¦ ¦--'(': ( [0/0] {75} + ¦ ¦ ¦--SYMBOL: i [0/1] {76} + ¦ ¦ ¦--IN: in [0/5] {77} + ¦ ¦ ¦--expr: 1:3 [1/0] {78} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {80} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {79} + ¦ ¦ ¦ ¦--':': : [0/0] {81} + ¦ ¦ ¦ °--expr: 3 [0/0] {83} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {82} + ¦ ¦ °--')': ) [0/0] {84} + ¦ ¦--COMMENT: # [0/2] {85} + ¦ °--expr: print [1/0] {86} + ¦ ¦--expr: print [0/0] {88} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: print [0/0] {87} + ¦ ¦--'(': ( [0/0] {89} + ¦ ¦--expr: i [0/0] {91} + ¦ ¦ °--SYMBOL: i [0/0] {90} + ¦ °--')': ) [0/0] {92} + ¦--expr: for ( [2/0] {93} + ¦ ¦--FOR: for [0/1] {94} + ¦ ¦--forcond: (i in [0/1] {95} + ¦ ¦ ¦--'(': ( [0/0] {96} + ¦ ¦ ¦--SYMBOL: i [0/1] {97} + ¦ ¦ ¦--IN: in [0/1] {98} + ¦ ¦ ¦--COMMENT: # [0/5] {99} + ¦ ¦ ¦--expr: 1:3 [1/0] {100} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {102} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {101} + ¦ ¦ ¦ ¦--':': : [0/0] {103} + ¦ ¦ ¦ °--expr: 3 [0/0] {105} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {104} + ¦ ¦ °--')': ) [0/0] {106} + ¦ ¦--COMMENT: # [0/2] {107} + ¦ °--expr: print [1/0] {108} + ¦ ¦--expr: print [0/0] {110} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: print [0/0] {109} + ¦ ¦--'(': ( [0/0] {111} + ¦ ¦--expr: i [0/0] {113} + ¦ ¦ °--SYMBOL: i [0/0] {112} + ¦ °--')': ) [0/0] {114} + ¦--expr: for ( [2/0] {115} + ¦ ¦--FOR: for [0/1] {116} + ¦ ¦--forcond: (# + [0/1] {117} + ¦ ¦ ¦--'(': ( [0/0] {118} + ¦ ¦ ¦--COMMENT: # [0/2] {119} + ¦ ¦ ¦--SYMBOL: i [1/1] {120} + ¦ ¦ ¦--IN: in [0/1] {121} + ¦ ¦ ¦--COMMENT: # [0/2] {122} + ¦ ¦ ¦--expr: 1:3 [1/0] {123} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {125} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {124} + ¦ ¦ ¦ ¦--':': : [0/0] {126} + ¦ ¦ ¦ °--expr: 3 [0/0] {128} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {127} + ¦ ¦ ¦--COMMENT: # [0/0] {129} + ¦ ¦ °--')': ) [1/0] {130} + ¦ ¦--COMMENT: # [0/2] {131} + ¦ °--expr: print [1/0] {132} + ¦ ¦--expr: print [0/0] {134} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: print [0/0] {133} + ¦ ¦--'(': ( [0/0] {135} + ¦ ¦--expr: i [0/0] {137} + ¦ ¦ °--SYMBOL: i [0/0] {136} + ¦ °--')': ) [0/0] {138} + ¦--expr: while [3/0] {139} + ¦ ¦--WHILE: while [0/1] {140} + ¦ ¦--'(': ( [0/0] {141} + ¦ ¦--expr: x > 3 [0/0] {142} + ¦ ¦ ¦--expr: x [0/1] {144} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {143} + ¦ ¦ ¦--GT: > [0/1] {145} + ¦ ¦ °--expr: 3 [0/0] {147} + ¦ ¦ °--NUM_CONST: 3 [0/0] {146} + ¦ ¦--')': ) [0/1] {148} + ¦ ¦--COMMENT: # [0/2] {149} + ¦ °--expr: retur [1/0] {150} + ¦ ¦--expr: retur [0/0] {152} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: retur [0/0] {151} + ¦ ¦--'(': ( [0/0] {153} + ¦ ¦--expr: FALSE [0/0] {155} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {154} + ¦ °--')': ) [0/0] {156} + ¦--expr: while [2/0] {157} + ¦ ¦--WHILE: while [0/1] {158} + ¦ ¦--'(': ( [0/0] {159} + ¦ ¦--expr: x > 3 [0/1] {160} + ¦ ¦ ¦--expr: x [0/1] {162} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {161} + ¦ ¦ ¦--GT: > [0/1] {163} + ¦ ¦ °--expr: 3 [0/0] {165} + ¦ ¦ °--NUM_CONST: 3 [0/0] {164} + ¦ ¦--COMMENT: # [0/0] {166} + ¦ ¦--')': ) [1/2] {167} + ¦ °--expr: retur [1/0] {168} + ¦ ¦--expr: retur [0/0] {170} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: retur [0/0] {169} + ¦ ¦--'(': ( [0/0] {171} + ¦ ¦--expr: FALSE [0/0] {173} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {172} + ¦ °--')': ) [0/0] {174} + ¦--expr: while [2/0] {175} + ¦ ¦--WHILE: while [0/1] {176} + ¦ ¦--'(': ( [0/1] {177} + ¦ ¦--COMMENT: # tes [0/2] {178} + ¦ ¦--expr: x > 3 [1/0] {179} + ¦ ¦ ¦--expr: x [0/1] {181} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {180} + ¦ ¦ ¦--GT: > [0/1] {182} + ¦ ¦ °--expr: 3 [0/0] {184} + ¦ ¦ °--NUM_CONST: 3 [0/0] {183} + ¦ ¦--')': ) [0/1] {185} + ¦ ¦--COMMENT: # ano [0/2] {186} + ¦ °--expr: retur [1/0] {187} + ¦ ¦--expr: retur [0/0] {189} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: retur [0/0] {188} + ¦ ¦--'(': ( [0/0] {190} + ¦ ¦--expr: FALSE [0/0] {192} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {191} + ¦ °--')': ) [0/0] {193} + ¦--expr: while [2/0] {194} + ¦ ¦--WHILE: while [0/1] {195} + ¦ ¦--'(': ( [0/2] {196} + ¦ ¦--expr: 2 > # [1/1] {197} + ¦ ¦ ¦--expr: 2 [0/1] {199} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {198} + ¦ ¦ ¦--GT: > [0/1] {200} + ¦ ¦ ¦--COMMENT: #here [0/2] {201} + ¦ ¦ °--expr: 3 [1/0] {203} + ¦ ¦ °--NUM_CONST: 3 [0/0] {202} + ¦ ¦--COMMENT: # [0/0] {204} + ¦ ¦--')': ) [1/1] {205} + ¦ ¦--COMMENT: # [0/2] {206} + ¦ °--expr: FALSE [1/0] {208} + ¦ °--NUM_CONST: FALSE [0/0] {207} + ¦--expr: while [2/0] {209} + ¦ ¦--WHILE: while [0/1] {210} + ¦ ¦--'(': ( [0/2] {211} + ¦ ¦--expr: 2 > # [1/1] {212} + ¦ ¦ ¦--expr: 2 [0/1] {214} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {213} + ¦ ¦ ¦--GT: > [0/1] {215} + ¦ ¦ ¦--COMMENT: #here [0/2] {216} + ¦ ¦ °--expr: 3 [1/0] {218} + ¦ ¦ °--NUM_CONST: 3 [0/0] {217} + ¦ ¦--COMMENT: # [0/0] {219} + ¦ ¦--')': ) [1/2] {220} + ¦ °--expr: FALSE [1/0] {222} + ¦ °--NUM_CONST: FALSE [0/0] {221} + ¦--expr: while [2/0] {223} + ¦ ¦--WHILE: while [0/1] {224} + ¦ ¦--'(': ( [0/2] {225} + ¦ ¦--expr: 2 > # [1/0] {226} + ¦ ¦ ¦--expr: 2 [0/1] {228} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {227} + ¦ ¦ ¦--GT: > [0/1] {229} + ¦ ¦ ¦--COMMENT: #here [0/2] {230} + ¦ ¦ °--expr: 3 [1/0] {232} + ¦ ¦ °--NUM_CONST: 3 [0/0] {231} + ¦ ¦--')': ) [1/1] {233} + ¦ ¦--COMMENT: # [0/2] {234} + ¦ °--expr: FALSE [1/0] {236} + ¦ °--NUM_CONST: FALSE [0/0] {235} + °--expr: while [2/0] {237} + ¦--WHILE: while [0/1] {238} + ¦--'(': ( [0/0] {239} + ¦--COMMENT: # [0/2] {240} + ¦--expr: 2 > + [1/0] {241} + ¦ ¦--expr: 2 [0/1] {243} + ¦ ¦ °--NUM_CONST: 2 [0/0] {242} + ¦ ¦--GT: > [0/2] {244} + ¦ °--expr: 3 [1/0] {246} + ¦ °--NUM_CONST: 3 [0/0] {245} + ¦--')': ) [1/1] {247} + ¦--COMMENT: # [0/2] {248} + °--expr: FALSE [1/0] {250} + °--NUM_CONST: FALSE [0/0] {249} diff --git a/tests/testthat/indention_operators/while_for_if_without_curly_non_strict-out.R b/tests/testthat/indention_operators/while_for_if_without_curly_non_strict-out.R new file mode 100644 index 000000000..00490d0db --- /dev/null +++ b/tests/testthat/indention_operators/while_for_if_without_curly_non_strict-out.R @@ -0,0 +1,61 @@ +while (x > 3) + return(FALSE) + +for (i in 1:3) + print(i) + +if (x) + call2(3) + +for (i in 1:3) # + print(i) + +for (i in + 1:3) # + print(i) + +for (i in # + 1:3) # + print(i) + +for ( # + i in # + 1:3 # +) # + print(i) + + +while (x > 3) # + return(FALSE) + +while (x > 3 # +) + return(FALSE) + +while ( # test + x > 3) # another + return(FALSE) + +while ( + 2 > # here + 3 # +) # + FALSE + +while ( + 2 > # here + 3 # +) + FALSE + +while ( + 2 > # here + 3 +) # + FALSE + +while ( # + 2 > + 3 +) # + FALSE diff --git a/tests/testthat/indention_operators/while_for_if_without_curly_strict-in.R b/tests/testthat/indention_operators/while_for_if_without_curly_strict-in.R new file mode 100644 index 000000000..3bda095e9 --- /dev/null +++ b/tests/testthat/indention_operators/while_for_if_without_curly_strict-in.R @@ -0,0 +1,61 @@ +while (x > 3) +return(FALSE) + +for (i in 1:3) +print(i) + +if (x) +call2(3) + +for (i in 1:3) # + print(i) + +for (i in + 1:3) # + print(i) + +for (i in # + 1:3) # + print(i) + +for (# + i in # + 1:3# + ) # + print(i) + + +while (x > 3) # + return(FALSE) + +while (x > 3 # + ) + return(FALSE) + +while ( # test + x > 3) # another + return(FALSE) + +while ( + 2 > #here + 3 # + ) # + FALSE + +while ( + 2 > #here + 3 # +) + FALSE + +while ( + 2 > #here + 3 +) # + FALSE + +while (# + 2 > + 3 +) # + FALSE diff --git a/tests/testthat/indention_operators/while_for_if_without_curly_strict-in_tree b/tests/testthat/indention_operators/while_for_if_without_curly_strict-in_tree new file mode 100644 index 000000000..3375e43cc --- /dev/null +++ b/tests/testthat/indention_operators/while_for_if_without_curly_strict-in_tree @@ -0,0 +1,253 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: while [0/0] {1} + ¦ ¦--WHILE: while [0/1] {2} + ¦ ¦--'(': ( [0/0] {3} + ¦ ¦--expr: x > 3 [0/0] {4} + ¦ ¦ ¦--expr: x [0/1] {6} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {5} + ¦ ¦ ¦--GT: > [0/1] {7} + ¦ ¦ °--expr: 3 [0/0] {9} + ¦ ¦ °--NUM_CONST: 3 [0/0] {8} + ¦ ¦--')': ) [0/0] {10} + ¦ °--expr: retur [1/0] {11} + ¦ ¦--expr: retur [0/0] {13} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: retur [0/0] {12} + ¦ ¦--'(': ( [0/0] {14} + ¦ ¦--expr: FALSE [0/0] {16} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {15} + ¦ °--')': ) [0/0] {17} + ¦--expr: for ( [2/0] {18} + ¦ ¦--FOR: for [0/1] {19} + ¦ ¦--forcond: (i in [0/0] {20} + ¦ ¦ ¦--'(': ( [0/0] {21} + ¦ ¦ ¦--SYMBOL: i [0/1] {22} + ¦ ¦ ¦--IN: in [0/1] {23} + ¦ ¦ ¦--expr: 1:3 [0/0] {24} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {26} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {25} + ¦ ¦ ¦ ¦--':': : [0/0] {27} + ¦ ¦ ¦ °--expr: 3 [0/0] {29} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {28} + ¦ ¦ °--')': ) [0/0] {30} + ¦ °--expr: print [1/0] {31} + ¦ ¦--expr: print [0/0] {33} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: print [0/0] {32} + ¦ ¦--'(': ( [0/0] {34} + ¦ ¦--expr: i [0/0] {36} + ¦ ¦ °--SYMBOL: i [0/0] {35} + ¦ °--')': ) [0/0] {37} + ¦--expr: if (x [2/0] {38} + ¦ ¦--IF: if [0/1] {39} + ¦ ¦--'(': ( [0/0] {40} + ¦ ¦--expr: x [0/0] {42} + ¦ ¦ °--SYMBOL: x [0/0] {41} + ¦ ¦--')': ) [0/0] {43} + ¦ °--expr: call2 [1/0] {44} + ¦ ¦--expr: call2 [0/0] {46} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call2 [0/0] {45} + ¦ ¦--'(': ( [0/0] {47} + ¦ ¦--expr: 3 [0/0] {49} + ¦ ¦ °--NUM_CONST: 3 [0/0] {48} + ¦ °--')': ) [0/0] {50} + ¦--expr: for ( [2/0] {51} + ¦ ¦--FOR: for [0/1] {52} + ¦ ¦--forcond: (i in [0/1] {53} + ¦ ¦ ¦--'(': ( [0/0] {54} + ¦ ¦ ¦--SYMBOL: i [0/1] {55} + ¦ ¦ ¦--IN: in [0/1] {56} + ¦ ¦ ¦--expr: 1:3 [0/0] {57} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {59} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {58} + ¦ ¦ ¦ ¦--':': : [0/0] {60} + ¦ ¦ ¦ °--expr: 3 [0/0] {62} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {61} + ¦ ¦ °--')': ) [0/0] {63} + ¦ ¦--COMMENT: # [0/2] {64} + ¦ °--expr: print [1/0] {65} + ¦ ¦--expr: print [0/0] {67} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: print [0/0] {66} + ¦ ¦--'(': ( [0/0] {68} + ¦ ¦--expr: i [0/0] {70} + ¦ ¦ °--SYMBOL: i [0/0] {69} + ¦ °--')': ) [0/0] {71} + ¦--expr: for ( [2/0] {72} + ¦ ¦--FOR: for [0/1] {73} + ¦ ¦--forcond: (i in [0/1] {74} + ¦ ¦ ¦--'(': ( [0/0] {75} + ¦ ¦ ¦--SYMBOL: i [0/1] {76} + ¦ ¦ ¦--IN: in [0/5] {77} + ¦ ¦ ¦--expr: 1:3 [1/0] {78} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {80} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {79} + ¦ ¦ ¦ ¦--':': : [0/0] {81} + ¦ ¦ ¦ °--expr: 3 [0/0] {83} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {82} + ¦ ¦ °--')': ) [0/0] {84} + ¦ ¦--COMMENT: # [0/2] {85} + ¦ °--expr: print [1/0] {86} + ¦ ¦--expr: print [0/0] {88} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: print [0/0] {87} + ¦ ¦--'(': ( [0/0] {89} + ¦ ¦--expr: i [0/0] {91} + ¦ ¦ °--SYMBOL: i [0/0] {90} + ¦ °--')': ) [0/0] {92} + ¦--expr: for ( [2/0] {93} + ¦ ¦--FOR: for [0/1] {94} + ¦ ¦--forcond: (i in [0/1] {95} + ¦ ¦ ¦--'(': ( [0/0] {96} + ¦ ¦ ¦--SYMBOL: i [0/1] {97} + ¦ ¦ ¦--IN: in [0/1] {98} + ¦ ¦ ¦--COMMENT: # [0/5] {99} + ¦ ¦ ¦--expr: 1:3 [1/0] {100} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {102} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {101} + ¦ ¦ ¦ ¦--':': : [0/0] {103} + ¦ ¦ ¦ °--expr: 3 [0/0] {105} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {104} + ¦ ¦ °--')': ) [0/0] {106} + ¦ ¦--COMMENT: # [0/2] {107} + ¦ °--expr: print [1/0] {108} + ¦ ¦--expr: print [0/0] {110} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: print [0/0] {109} + ¦ ¦--'(': ( [0/0] {111} + ¦ ¦--expr: i [0/0] {113} + ¦ ¦ °--SYMBOL: i [0/0] {112} + ¦ °--')': ) [0/0] {114} + ¦--expr: for ( [2/0] {115} + ¦ ¦--FOR: for [0/1] {116} + ¦ ¦--forcond: (# + [0/1] {117} + ¦ ¦ ¦--'(': ( [0/0] {118} + ¦ ¦ ¦--COMMENT: # [0/2] {119} + ¦ ¦ ¦--SYMBOL: i [1/1] {120} + ¦ ¦ ¦--IN: in [0/1] {121} + ¦ ¦ ¦--COMMENT: # [0/5] {122} + ¦ ¦ ¦--expr: 1:3 [1/0] {123} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {125} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {124} + ¦ ¦ ¦ ¦--':': : [0/0] {126} + ¦ ¦ ¦ °--expr: 3 [0/0] {128} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {127} + ¦ ¦ ¦--COMMENT: # [0/2] {129} + ¦ ¦ °--')': ) [1/0] {130} + ¦ ¦--COMMENT: # [0/2] {131} + ¦ °--expr: print [1/0] {132} + ¦ ¦--expr: print [0/0] {134} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: print [0/0] {133} + ¦ ¦--'(': ( [0/0] {135} + ¦ ¦--expr: i [0/0] {137} + ¦ ¦ °--SYMBOL: i [0/0] {136} + ¦ °--')': ) [0/0] {138} + ¦--expr: while [3/0] {139} + ¦ ¦--WHILE: while [0/1] {140} + ¦ ¦--'(': ( [0/0] {141} + ¦ ¦--expr: x > 3 [0/0] {142} + ¦ ¦ ¦--expr: x [0/1] {144} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {143} + ¦ ¦ ¦--GT: > [0/1] {145} + ¦ ¦ °--expr: 3 [0/0] {147} + ¦ ¦ °--NUM_CONST: 3 [0/0] {146} + ¦ ¦--')': ) [0/1] {148} + ¦ ¦--COMMENT: # [0/2] {149} + ¦ °--expr: retur [1/0] {150} + ¦ ¦--expr: retur [0/0] {152} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: retur [0/0] {151} + ¦ ¦--'(': ( [0/0] {153} + ¦ ¦--expr: FALSE [0/0] {155} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {154} + ¦ °--')': ) [0/0] {156} + ¦--expr: while [2/0] {157} + ¦ ¦--WHILE: while [0/1] {158} + ¦ ¦--'(': ( [0/0] {159} + ¦ ¦--expr: x > 3 [0/1] {160} + ¦ ¦ ¦--expr: x [0/1] {162} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {161} + ¦ ¦ ¦--GT: > [0/1] {163} + ¦ ¦ °--expr: 3 [0/0] {165} + ¦ ¦ °--NUM_CONST: 3 [0/0] {164} + ¦ ¦--COMMENT: # [0/7] {166} + ¦ ¦--')': ) [1/2] {167} + ¦ °--expr: retur [1/0] {168} + ¦ ¦--expr: retur [0/0] {170} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: retur [0/0] {169} + ¦ ¦--'(': ( [0/0] {171} + ¦ ¦--expr: FALSE [0/0] {173} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {172} + ¦ °--')': ) [0/0] {174} + ¦--expr: while [2/0] {175} + ¦ ¦--WHILE: while [0/1] {176} + ¦ ¦--'(': ( [0/1] {177} + ¦ ¦--COMMENT: # tes [0/2] {178} + ¦ ¦--expr: x > 3 [1/0] {179} + ¦ ¦ ¦--expr: x [0/1] {181} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {180} + ¦ ¦ ¦--GT: > [0/1] {182} + ¦ ¦ °--expr: 3 [0/0] {184} + ¦ ¦ °--NUM_CONST: 3 [0/0] {183} + ¦ ¦--')': ) [0/1] {185} + ¦ ¦--COMMENT: # ano [0/2] {186} + ¦ °--expr: retur [1/0] {187} + ¦ ¦--expr: retur [0/0] {189} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: retur [0/0] {188} + ¦ ¦--'(': ( [0/0] {190} + ¦ ¦--expr: FALSE [0/0] {192} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {191} + ¦ °--')': ) [0/0] {193} + ¦--expr: while [2/0] {194} + ¦ ¦--WHILE: while [0/1] {195} + ¦ ¦--'(': ( [0/2] {196} + ¦ ¦--expr: 2 > # [1/1] {197} + ¦ ¦ ¦--expr: 2 [0/1] {199} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {198} + ¦ ¦ ¦--GT: > [0/1] {200} + ¦ ¦ ¦--COMMENT: #here [0/2] {201} + ¦ ¦ °--expr: 3 [1/0] {203} + ¦ ¦ °--NUM_CONST: 3 [0/0] {202} + ¦ ¦--COMMENT: # [0/2] {204} + ¦ ¦--')': ) [1/1] {205} + ¦ ¦--COMMENT: # [0/2] {206} + ¦ °--expr: FALSE [1/0] {208} + ¦ °--NUM_CONST: FALSE [0/0] {207} + ¦--expr: while [2/0] {209} + ¦ ¦--WHILE: while [0/1] {210} + ¦ ¦--'(': ( [0/2] {211} + ¦ ¦--expr: 2 > # [1/1] {212} + ¦ ¦ ¦--expr: 2 [0/1] {214} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {213} + ¦ ¦ ¦--GT: > [0/1] {215} + ¦ ¦ ¦--COMMENT: #here [0/2] {216} + ¦ ¦ °--expr: 3 [1/0] {218} + ¦ ¦ °--NUM_CONST: 3 [0/0] {217} + ¦ ¦--COMMENT: # [0/0] {219} + ¦ ¦--')': ) [1/2] {220} + ¦ °--expr: FALSE [1/0] {222} + ¦ °--NUM_CONST: FALSE [0/0] {221} + ¦--expr: while [2/0] {223} + ¦ ¦--WHILE: while [0/1] {224} + ¦ ¦--'(': ( [0/2] {225} + ¦ ¦--expr: 2 > # [1/0] {226} + ¦ ¦ ¦--expr: 2 [0/1] {228} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {227} + ¦ ¦ ¦--GT: > [0/1] {229} + ¦ ¦ ¦--COMMENT: #here [0/2] {230} + ¦ ¦ °--expr: 3 [1/0] {232} + ¦ ¦ °--NUM_CONST: 3 [0/0] {231} + ¦ ¦--')': ) [1/1] {233} + ¦ ¦--COMMENT: # [0/2] {234} + ¦ °--expr: FALSE [1/0] {236} + ¦ °--NUM_CONST: FALSE [0/0] {235} + °--expr: while [2/0] {237} + ¦--WHILE: while [0/1] {238} + ¦--'(': ( [0/0] {239} + ¦--COMMENT: # [0/2] {240} + ¦--expr: 2 > + [1/0] {241} + ¦ ¦--expr: 2 [0/1] {243} + ¦ ¦ °--NUM_CONST: 2 [0/0] {242} + ¦ ¦--GT: > [0/2] {244} + ¦ °--expr: 3 [1/0] {246} + ¦ °--NUM_CONST: 3 [0/0] {245} + ¦--')': ) [1/1] {247} + ¦--COMMENT: # [0/2] {248} + °--expr: FALSE [1/0] {250} + °--NUM_CONST: FALSE [0/0] {249} diff --git a/tests/testthat/indention_operators/while_for_if_without_curly_strict-out.R b/tests/testthat/indention_operators/while_for_if_without_curly_strict-out.R new file mode 100644 index 000000000..e759975fc --- /dev/null +++ b/tests/testthat/indention_operators/while_for_if_without_curly_strict-out.R @@ -0,0 +1,75 @@ +while (x > 3) { + return(FALSE) +} + +for (i in 1:3) { + print(i) +} + +if (x) { + call2(3) +} + +for (i in 1:3) { # + print(i) +} + +for (i in + 1:3) { # + print(i) +} + +for (i in # + 1:3) { # + print(i) +} + +for ( # + i in # + 1:3 # +) { # + print(i) +} + + +while (x > 3) { # + return(FALSE) +} + +while (x > 3 # +) { + return(FALSE) +} + +while ( # test + x > 3) { # another + return(FALSE) +} + +while ( + 2 > # here + 3 # +) { # + FALSE +} + +while ( + 2 > # here + 3 # +) { + FALSE +} + +while ( + 2 > # here + 3 +) { # + FALSE +} + +while ( # + 2 > + 3 +) { # + FALSE +} diff --git a/tests/testthat/indention_operators/while_for_without_curly_same_line_non_strict-in.R b/tests/testthat/indention_operators/while_for_without_curly_same_line_non_strict-in.R new file mode 100644 index 000000000..b26d46bc8 --- /dev/null +++ b/tests/testthat/indention_operators/while_for_without_curly_same_line_non_strict-in.R @@ -0,0 +1,31 @@ +while(x == 2) h( + 2 +) + +while(x == 2) h( # comment + 2 +) + +while(x == 2 && + 2 + 2 == 2) h( + 2 +) + + +for(x in 1:22) h( + 2 +) + +for(x in 1:22) h( # comment + 2 +) + +for(k in f( + 2:22 +)) h( + 2 + ) + +for(k in f( + 2:22 # comment +)) h(2) diff --git a/tests/testthat/indention_operators/while_for_without_curly_same_line_non_strict-in_tree b/tests/testthat/indention_operators/while_for_without_curly_same_line_non_strict-in_tree new file mode 100644 index 000000000..95ae7014b --- /dev/null +++ b/tests/testthat/indention_operators/while_for_without_curly_same_line_non_strict-in_tree @@ -0,0 +1,163 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: while [0/0] {1} + ¦ ¦--WHILE: while [0/0] {2} + ¦ ¦--'(': ( [0/0] {3} + ¦ ¦--expr: x == [0/0] {4} + ¦ ¦ ¦--expr: x [0/1] {6} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {5} + ¦ ¦ ¦--EQ: == [0/1] {7} + ¦ ¦ °--expr: 2 [0/0] {9} + ¦ ¦ °--NUM_CONST: 2 [0/0] {8} + ¦ ¦--')': ) [0/1] {10} + ¦ °--expr: h( + [0/0] {11} + ¦ ¦--expr: h [0/0] {13} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {12} + ¦ ¦--'(': ( [0/2] {14} + ¦ ¦--expr: 2 [1/0] {16} + ¦ ¦ °--NUM_CONST: 2 [0/0] {15} + ¦ °--')': ) [1/0] {17} + ¦--expr: while [2/0] {18} + ¦ ¦--WHILE: while [0/0] {19} + ¦ ¦--'(': ( [0/0] {20} + ¦ ¦--expr: x == [0/0] {21} + ¦ ¦ ¦--expr: x [0/1] {23} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {22} + ¦ ¦ ¦--EQ: == [0/1] {24} + ¦ ¦ °--expr: 2 [0/0] {26} + ¦ ¦ °--NUM_CONST: 2 [0/0] {25} + ¦ ¦--')': ) [0/1] {27} + ¦ °--expr: h( # [0/0] {28} + ¦ ¦--expr: h [0/0] {30} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {29} + ¦ ¦--'(': ( [0/1] {31} + ¦ ¦--COMMENT: # com [0/2] {32} + ¦ ¦--expr: 2 [1/0] {34} + ¦ ¦ °--NUM_CONST: 2 [0/0] {33} + ¦ °--')': ) [1/0] {35} + ¦--expr: while [2/0] {36} + ¦ ¦--WHILE: while [0/0] {37} + ¦ ¦--'(': ( [0/0] {38} + ¦ ¦--expr: x == [0/0] {39} + ¦ ¦ ¦--expr: x == [0/1] {40} + ¦ ¦ ¦ ¦--expr: x [0/1] {42} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {41} + ¦ ¦ ¦ ¦--EQ: == [0/1] {43} + ¦ ¦ ¦ °--expr: 2 [0/0] {45} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {44} + ¦ ¦ ¦--AND2: && [0/6] {46} + ¦ ¦ °--expr: 2 + 2 [1/0] {47} + ¦ ¦ ¦--expr: 2 + 2 [0/1] {48} + ¦ ¦ ¦ ¦--expr: 2 [0/1] {50} + ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {49} + ¦ ¦ ¦ ¦--'+': + [0/1] {51} + ¦ ¦ ¦ °--expr: 2 [0/0] {53} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {52} + ¦ ¦ ¦--EQ: == [0/1] {54} + ¦ ¦ °--expr: 2 [0/0] {56} + ¦ ¦ °--NUM_CONST: 2 [0/0] {55} + ¦ ¦--')': ) [0/1] {57} + ¦ °--expr: h( + [0/0] {58} + ¦ ¦--expr: h [0/0] {60} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {59} + ¦ ¦--'(': ( [0/2] {61} + ¦ ¦--expr: 2 [1/0] {63} + ¦ ¦ °--NUM_CONST: 2 [0/0] {62} + ¦ °--')': ) [1/0] {64} + ¦--expr: for(x [3/0] {65} + ¦ ¦--FOR: for [0/0] {66} + ¦ ¦--forcond: (x in [0/1] {67} + ¦ ¦ ¦--'(': ( [0/0] {68} + ¦ ¦ ¦--SYMBOL: x [0/1] {69} + ¦ ¦ ¦--IN: in [0/1] {70} + ¦ ¦ ¦--expr: 1:22 [0/0] {71} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {73} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {72} + ¦ ¦ ¦ ¦--':': : [0/0] {74} + ¦ ¦ ¦ °--expr: 22 [0/0] {76} + ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {75} + ¦ ¦ °--')': ) [0/0] {77} + ¦ °--expr: h( + [0/0] {78} + ¦ ¦--expr: h [0/0] {80} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {79} + ¦ ¦--'(': ( [0/2] {81} + ¦ ¦--expr: 2 [1/0] {83} + ¦ ¦ °--NUM_CONST: 2 [0/0] {82} + ¦ °--')': ) [1/0] {84} + ¦--expr: for(x [2/0] {85} + ¦ ¦--FOR: for [0/0] {86} + ¦ ¦--forcond: (x in [0/1] {87} + ¦ ¦ ¦--'(': ( [0/0] {88} + ¦ ¦ ¦--SYMBOL: x [0/1] {89} + ¦ ¦ ¦--IN: in [0/1] {90} + ¦ ¦ ¦--expr: 1:22 [0/0] {91} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {93} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {92} + ¦ ¦ ¦ ¦--':': : [0/0] {94} + ¦ ¦ ¦ °--expr: 22 [0/0] {96} + ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {95} + ¦ ¦ °--')': ) [0/0] {97} + ¦ °--expr: h( # [0/0] {98} + ¦ ¦--expr: h [0/0] {100} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {99} + ¦ ¦--'(': ( [0/1] {101} + ¦ ¦--COMMENT: # com [0/2] {102} + ¦ ¦--expr: 2 [1/0] {104} + ¦ ¦ °--NUM_CONST: 2 [0/0] {103} + ¦ °--')': ) [1/0] {105} + ¦--expr: for(k [2/0] {106} + ¦ ¦--FOR: for [0/0] {107} + ¦ ¦--forcond: (k in [0/1] {108} + ¦ ¦ ¦--'(': ( [0/0] {109} + ¦ ¦ ¦--SYMBOL: k [0/1] {110} + ¦ ¦ ¦--IN: in [0/1] {111} + ¦ ¦ ¦--expr: f( + [0/0] {112} + ¦ ¦ ¦ ¦--expr: f [0/0] {114} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {113} + ¦ ¦ ¦ ¦--'(': ( [0/2] {115} + ¦ ¦ ¦ ¦--expr: 2:22 [1/0] {116} + ¦ ¦ ¦ ¦ ¦--expr: 2 [0/0] {118} + ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {117} + ¦ ¦ ¦ ¦ ¦--':': : [0/0] {119} + ¦ ¦ ¦ ¦ °--expr: 22 [0/0] {121} + ¦ ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {120} + ¦ ¦ ¦ °--')': ) [1/0] {122} + ¦ ¦ °--')': ) [0/0] {123} + ¦ °--expr: h( + [0/0] {124} + ¦ ¦--expr: h [0/0] {126} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {125} + ¦ ¦--'(': ( [0/8] {127} + ¦ ¦--expr: 2 [1/6] {129} + ¦ ¦ °--NUM_CONST: 2 [0/0] {128} + ¦ °--')': ) [1/0] {130} + °--expr: for(k [2/0] {131} + ¦--FOR: for [0/0] {132} + ¦--forcond: (k in [0/1] {133} + ¦ ¦--'(': ( [0/0] {134} + ¦ ¦--SYMBOL: k [0/1] {135} + ¦ ¦--IN: in [0/1] {136} + ¦ ¦--expr: f( + [0/0] {137} + ¦ ¦ ¦--expr: f [0/0] {139} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {138} + ¦ ¦ ¦--'(': ( [0/2] {140} + ¦ ¦ ¦--expr: 2:22 [1/1] {141} + ¦ ¦ ¦ ¦--expr: 2 [0/0] {143} + ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {142} + ¦ ¦ ¦ ¦--':': : [0/0] {144} + ¦ ¦ ¦ °--expr: 22 [0/0] {146} + ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {145} + ¦ ¦ ¦--COMMENT: # com [0/0] {147} + ¦ ¦ °--')': ) [1/0] {148} + ¦ °--')': ) [0/0] {149} + °--expr: h(2) [0/0] {150} + ¦--expr: h [0/0] {152} + ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {151} + ¦--'(': ( [0/0] {153} + ¦--expr: 2 [0/0] {155} + ¦ °--NUM_CONST: 2 [0/0] {154} + °--')': ) [0/0] {156} diff --git a/tests/testthat/indention_operators/while_for_without_curly_same_line_non_strict-out.R b/tests/testthat/indention_operators/while_for_without_curly_same_line_non_strict-out.R new file mode 100644 index 000000000..7342282c8 --- /dev/null +++ b/tests/testthat/indention_operators/while_for_without_curly_same_line_non_strict-out.R @@ -0,0 +1,31 @@ +while (x == 2) h( + 2 +) + +while (x == 2) h( # comment + 2 +) + +while (x == 2 && + 2 + 2 == 2) h( + 2 +) + + +for (x in 1:22) h( + 2 +) + +for (x in 1:22) h( # comment + 2 +) + +for (k in f( + 2:22 +)) h( + 2 +) + +for (k in f( + 2:22 # comment +)) h(2) diff --git a/tests/testthat/indention_round_brackets/arithmetic_no_start-in.R b/tests/testthat/indention_round_brackets/arithmetic_no_start-in.R index e4d2a565a..01e7fb8e3 100644 --- a/tests/testthat/indention_round_brackets/arithmetic_no_start-in.R +++ b/tests/testthat/indention_round_brackets/arithmetic_no_start-in.R @@ -1,4 +1,3 @@ 1 + 2 + ( 3 + 4) - diff --git a/tests/testthat/indention_round_brackets/arithmetic_no_start-in_tree b/tests/testthat/indention_round_brackets/arithmetic_no_start-in_tree index 28c23a4b2..4f3aa7dc8 100644 --- a/tests/testthat/indention_round_brackets/arithmetic_no_start-in_tree +++ b/tests/testthat/indention_round_brackets/arithmetic_no_start-in_tree @@ -1,17 +1,19 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/1] {4} + °--expr: 1 + + [0/0] {1} + ¦--expr: 1 [0/1] {4} ¦ °--NUM_CONST: 1 [0/0] {3} ¦--'+': + [0/13] {5} - ¦--expr: [1/1] {7} + ¦--expr: 2 [1/1] {7} ¦ °--NUM_CONST: 2 [0/0] {6} ¦--'+': + [0/1] {8} - °--expr: [0/0] {9} + °--expr: ( +3 + [0/0] {9} ¦--'(': ( [0/0] {10} - ¦--expr: [1/0] {11} - ¦ ¦--expr: [0/1] {13} + ¦--expr: 3 + 4 [1/0] {11} + ¦ ¦--expr: 3 [0/1] {13} ¦ ¦ °--NUM_CONST: 3 [0/0] {12} ¦ ¦--'+': + [0/1] {14} - ¦ °--expr: [0/0] {16} + ¦ °--expr: 4 [0/0] {16} ¦ °--NUM_CONST: 4 [0/0] {15} °--')': ) [0/0] {17} diff --git a/tests/testthat/indention_round_brackets/arithmetic_start-in_tree b/tests/testthat/indention_round_brackets/arithmetic_start-in_tree index cd0af42a3..b4c868352 100644 --- a/tests/testthat/indention_round_brackets/arithmetic_start-in_tree +++ b/tests/testthat/indention_round_brackets/arithmetic_start-in_tree @@ -1,20 +1,23 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} + °--expr: (1 + + [0/0] {1} ¦--'(': ( [0/0] {2} - ¦--expr: [0/0] {3} - ¦ ¦--expr: [0/1] {6} + ¦--expr: 1 + +2 [0/0] {3} + ¦ ¦--expr: 1 [0/1] {6} ¦ ¦ °--NUM_CONST: 1 [0/0] {5} ¦ ¦--'+': + [0/0] {7} - ¦ ¦--expr: [1/1] {9} + ¦ ¦--expr: 2 [1/1] {9} ¦ ¦ °--NUM_CONST: 2 [0/0] {8} ¦ ¦--'+': + [0/1] {10} - ¦ °--expr: [0/0] {11} + ¦ °--expr: ( +3 + [0/0] {11} ¦ ¦--'(': ( [0/0] {12} - ¦ ¦--expr: [1/2] {13} - ¦ ¦ ¦--expr: [0/1] {15} + ¦ ¦--expr: 3 + 4 [1/2] {13} + ¦ ¦ ¦--expr: 3 [0/1] {15} ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {14} ¦ ¦ ¦--'+': + [0/1] {16} - ¦ ¦ °--expr: [0/0] {18} + ¦ ¦ °--expr: 4 [0/0] {18} ¦ ¦ °--NUM_CONST: 4 [0/0] {17} ¦ °--')': ) [1/0] {19} °--')': ) [1/0] {20} diff --git a/tests/testthat/indention_round_brackets/multi_line-no-indention-in_tree b/tests/testthat/indention_round_brackets/multi_line-no-indention-in_tree index 4d1a98f90..cdf8dcd24 100644 --- a/tests/testthat/indention_round_brackets/multi_line-no-indention-in_tree +++ b/tests/testthat/indention_round_brackets/multi_line-no-indention-in_tree @@ -1,39 +1,39 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/0] {3} + °--expr: call( [0/0] {1} + ¦--expr: call [0/0] {3} ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} ¦--'(': ( [0/0] {4} - ¦--expr: [1/0] {6} + ¦--expr: 1 [1/0] {6} ¦ °--NUM_CONST: 1 [0/0] {5} ¦--',': , [0/0] {7} - ¦--expr: [1/0] {8} - ¦ ¦--expr: [0/0] {10} + ¦--expr: call2 [1/0] {8} + ¦ ¦--expr: call2 [0/0] {10} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call2 [0/0] {9} ¦ ¦--'(': ( [0/0] {11} - ¦ ¦--expr: [1/0] {13} + ¦ ¦--expr: 2 [1/0] {13} ¦ ¦ °--NUM_CONST: 2 [0/0] {12} ¦ ¦--',': , [0/1] {14} - ¦ ¦--expr: [0/0] {16} + ¦ ¦--expr: 3 [0/0] {16} ¦ ¦ °--NUM_CONST: 3 [0/0] {15} ¦ ¦--',': , [0/0] {17} - ¦ ¦--expr: [1/0] {18} - ¦ ¦ ¦--expr: [0/0] {20} + ¦ ¦--expr: call3 [1/0] {18} + ¦ ¦ ¦--expr: call3 [0/0] {20} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call3 [0/0] {19} ¦ ¦ ¦--'(': ( [0/0] {21} - ¦ ¦ ¦--expr: [0/0] {23} + ¦ ¦ ¦--expr: 1 [0/0] {23} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {22} ¦ ¦ ¦--',': , [0/1] {24} - ¦ ¦ ¦--expr: [0/0] {26} + ¦ ¦ ¦--expr: 2 [0/0] {26} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {25} ¦ ¦ ¦--',': , [0/1] {27} - ¦ ¦ ¦--expr: [0/0] {29} + ¦ ¦ ¦--expr: 22 [0/0] {29} ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {28} ¦ ¦ °--')': ) [0/0] {30} ¦ ¦--',': , [0/0] {31} - ¦ ¦--expr: [1/0] {33} + ¦ ¦--expr: 5 [1/0] {33} ¦ ¦ °--NUM_CONST: 5 [0/0] {32} ¦ °--')': ) [1/0] {34} ¦--',': , [0/0] {35} - ¦--expr: [1/0] {37} + ¦--expr: 144 [1/0] {37} ¦ °--NUM_CONST: 144 [0/0] {36} °--')': ) [1/0] {38} diff --git a/tests/testthat/indention_round_brackets/multi_line-out.R b/tests/testthat/indention_round_brackets/multi_line-no-indention-out.R similarity index 100% rename from tests/testthat/indention_round_brackets/multi_line-out.R rename to tests/testthat/indention_round_brackets/multi_line-no-indention-out.R diff --git a/tests/testthat/indention_round_brackets/multi_line-random-in_tree b/tests/testthat/indention_round_brackets/multi_line-random-in_tree index b87ae9a02..def8bc3ab 100644 --- a/tests/testthat/indention_round_brackets/multi_line-random-in_tree +++ b/tests/testthat/indention_round_brackets/multi_line-random-in_tree @@ -1,39 +1,39 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/0] {3} + °--expr: call( [0/0] {1} + ¦--expr: call [0/0] {3} ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} ¦--'(': ( [0/0] {4} - ¦--expr: [1/0] {6} + ¦--expr: 1 [1/0] {6} ¦ °--NUM_CONST: 1 [0/0] {5} ¦--',': , [0/2] {7} - ¦--expr: [1/0] {8} - ¦ ¦--expr: [0/0] {10} + ¦--expr: call2 [1/0] {8} + ¦ ¦--expr: call2 [0/0] {10} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call2 [0/0] {9} ¦ ¦--'(': ( [0/4] {11} - ¦ ¦--expr: [1/0] {13} + ¦ ¦--expr: 2 [1/0] {13} ¦ ¦ °--NUM_CONST: 2 [0/0] {12} ¦ ¦--',': , [0/1] {14} - ¦ ¦--expr: [0/0] {16} + ¦ ¦--expr: 3 [0/0] {16} ¦ ¦ °--NUM_CONST: 3 [0/0] {15} ¦ ¦--',': , [0/0] {17} - ¦ ¦--expr: [1/0] {18} - ¦ ¦ ¦--expr: [0/0] {20} + ¦ ¦--expr: call3 [1/0] {18} + ¦ ¦ ¦--expr: call3 [0/0] {20} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call3 [0/0] {19} ¦ ¦ ¦--'(': ( [0/0] {21} - ¦ ¦ ¦--expr: [0/0] {23} + ¦ ¦ ¦--expr: 1 [0/0] {23} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {22} ¦ ¦ ¦--',': , [0/1] {24} - ¦ ¦ ¦--expr: [0/0] {26} + ¦ ¦ ¦--expr: 2 [0/0] {26} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {25} ¦ ¦ ¦--',': , [0/1] {27} - ¦ ¦ ¦--expr: [0/0] {29} + ¦ ¦ ¦--expr: 22 [0/0] {29} ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {28} ¦ ¦ °--')': ) [0/0] {30} ¦ ¦--',': , [0/13] {31} - ¦ ¦--expr: [1/2] {33} + ¦ ¦--expr: 5 [1/2] {33} ¦ ¦ °--NUM_CONST: 5 [0/0] {32} ¦ °--')': ) [1/0] {34} ¦--',': , [0/2] {35} - ¦--expr: [1/12] {37} + ¦--expr: 144 [1/12] {37} ¦ °--NUM_CONST: 144 [0/0] {36} °--')': ) [1/0] {38} diff --git a/tests/testthat/indention_round_brackets/multi_line-random-out.R b/tests/testthat/indention_round_brackets/multi_line-random-out.R new file mode 100644 index 000000000..e960a3d0d --- /dev/null +++ b/tests/testthat/indention_round_brackets/multi_line-random-out.R @@ -0,0 +1,9 @@ +call( + 1, + call2( + 2, 3, + call3(1, 2, 22), + 5 + ), + 144 +) diff --git a/tests/testthat/indention_round_brackets/one_line-in_tree b/tests/testthat/indention_round_brackets/one_line-in_tree index dff115ebd..89d3695f6 100644 --- a/tests/testthat/indention_round_brackets/one_line-in_tree +++ b/tests/testthat/indention_round_brackets/one_line-in_tree @@ -1,31 +1,31 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/1] {3} + °--expr: a <- [0/0] {1} + ¦--expr: a [0/1] {3} ¦ °--SYMBOL: a [0/0] {2} ¦--LEFT_ASSIGN: <- [0/1] {4} - °--expr: [0/0] {5} - ¦--expr: [0/0] {7} + °--expr: xyz(x [0/0] {5} + ¦--expr: xyz [0/0] {7} ¦ °--SYMBOL_FUNCTION_CALL: xyz [0/0] {6} ¦--'(': ( [0/0] {8} - ¦--expr: [0/0] {10} + ¦--expr: x [0/0] {10} ¦ °--SYMBOL: x [0/0] {9} ¦--',': , [0/1] {11} - ¦--expr: [0/0] {13} + ¦--expr: 22 [0/0] {13} ¦ °--NUM_CONST: 22 [0/0] {12} ¦--',': , [0/1] {14} - ¦--expr: [0/0] {15} + ¦--expr: if(x [0/0] {15} ¦ ¦--IF: if [0/0] {16} ¦ ¦--'(': ( [0/0] {17} - ¦ ¦--expr: [0/0] {18} - ¦ ¦ ¦--expr: [0/1] {20} + ¦ ¦--expr: x > 1 [0/0] {18} + ¦ ¦ ¦--expr: x [0/1] {20} ¦ ¦ ¦ °--SYMBOL: x [0/0] {19} ¦ ¦ ¦--GT: > [0/1] {21} - ¦ ¦ °--expr: [0/0] {23} + ¦ ¦ °--expr: 1 [0/0] {23} ¦ ¦ °--NUM_CONST: 1 [0/0] {22} ¦ ¦--')': ) [0/1] {24} - ¦ ¦--expr: [0/1] {26} + ¦ ¦--expr: 33 [0/1] {26} ¦ ¦ °--NUM_CONST: 33 [0/0] {25} ¦ ¦--ELSE: else [0/1] {27} - ¦ °--expr: [0/0] {29} + ¦ °--expr: 4 [0/0] {29} ¦ °--NUM_CONST: 4 [0/0] {28} °--')': ) [0/0] {30} diff --git a/tests/testthat/indention_round_brackets/one_line-nested-in_tree b/tests/testthat/indention_round_brackets/one_line-nested-in_tree index dff115ebd..89d3695f6 100644 --- a/tests/testthat/indention_round_brackets/one_line-nested-in_tree +++ b/tests/testthat/indention_round_brackets/one_line-nested-in_tree @@ -1,31 +1,31 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/1] {3} + °--expr: a <- [0/0] {1} + ¦--expr: a [0/1] {3} ¦ °--SYMBOL: a [0/0] {2} ¦--LEFT_ASSIGN: <- [0/1] {4} - °--expr: [0/0] {5} - ¦--expr: [0/0] {7} + °--expr: xyz(x [0/0] {5} + ¦--expr: xyz [0/0] {7} ¦ °--SYMBOL_FUNCTION_CALL: xyz [0/0] {6} ¦--'(': ( [0/0] {8} - ¦--expr: [0/0] {10} + ¦--expr: x [0/0] {10} ¦ °--SYMBOL: x [0/0] {9} ¦--',': , [0/1] {11} - ¦--expr: [0/0] {13} + ¦--expr: 22 [0/0] {13} ¦ °--NUM_CONST: 22 [0/0] {12} ¦--',': , [0/1] {14} - ¦--expr: [0/0] {15} + ¦--expr: if(x [0/0] {15} ¦ ¦--IF: if [0/0] {16} ¦ ¦--'(': ( [0/0] {17} - ¦ ¦--expr: [0/0] {18} - ¦ ¦ ¦--expr: [0/1] {20} + ¦ ¦--expr: x > 1 [0/0] {18} + ¦ ¦ ¦--expr: x [0/1] {20} ¦ ¦ ¦ °--SYMBOL: x [0/0] {19} ¦ ¦ ¦--GT: > [0/1] {21} - ¦ ¦ °--expr: [0/0] {23} + ¦ ¦ °--expr: 1 [0/0] {23} ¦ ¦ °--NUM_CONST: 1 [0/0] {22} ¦ ¦--')': ) [0/1] {24} - ¦ ¦--expr: [0/1] {26} + ¦ ¦--expr: 33 [0/1] {26} ¦ ¦ °--NUM_CONST: 33 [0/0] {25} ¦ ¦--ELSE: else [0/1] {27} - ¦ °--expr: [0/0] {29} + ¦ °--expr: 4 [0/0] {29} ¦ °--NUM_CONST: 4 [0/0] {28} °--')': ) [0/0] {30} diff --git a/tests/testthat/indention_round_brackets/one_line-nested-out.R b/tests/testthat/indention_round_brackets/one_line-nested-out.R new file mode 100644 index 000000000..1f0bb9616 --- /dev/null +++ b/tests/testthat/indention_round_brackets/one_line-nested-out.R @@ -0,0 +1 @@ +a <- xyz(x, 22, if (x > 1) 33 else 4) diff --git a/tests/testthat/indention_square_brackets/square_brackets_double_line_break-in.R b/tests/testthat/indention_square_brackets/square_brackets_double_line_break-in.R new file mode 100644 index 000000000..f5a2fc449 --- /dev/null +++ b/tests/testthat/indention_square_brackets/square_brackets_double_line_break-in.R @@ -0,0 +1,22 @@ +a[[b]] + + +a[[ + 2 +] +] + +a[[ + 2 +]] + + +a[[ +2 + ]] + + +a[[ + 2 +] # +] diff --git a/tests/testthat/indention_square_brackets/square_brackets_double_line_break-in_tree b/tests/testthat/indention_square_brackets/square_brackets_double_line_break-in_tree new file mode 100644 index 000000000..f6ae4068d --- /dev/null +++ b/tests/testthat/indention_square_brackets/square_brackets_double_line_break-in_tree @@ -0,0 +1,46 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: a[[b] [0/0] {1} + ¦ ¦--expr: a [0/0] {3} + ¦ ¦ °--SYMBOL: a [0/0] {2} + ¦ ¦--LBB: [[ [0/0] {4} + ¦ ¦--expr: b [0/0] {6} + ¦ ¦ °--SYMBOL: b [0/0] {5} + ¦ ¦--']': ] [0/0] {7} + ¦ °--']': ] [0/0] {8} + ¦--expr: a[[ + [3/0] {9} + ¦ ¦--expr: a [0/0] {11} + ¦ ¦ °--SYMBOL: a [0/0] {10} + ¦ ¦--LBB: [[ [0/2] {12} + ¦ ¦--expr: 2 [1/0] {14} + ¦ ¦ °--NUM_CONST: 2 [0/0] {13} + ¦ ¦--']': ] [1/0] {15} + ¦ °--']': ] [1/0] {16} + ¦--expr: a[[ + [2/0] {17} + ¦ ¦--expr: a [0/0] {19} + ¦ ¦ °--SYMBOL: a [0/0] {18} + ¦ ¦--LBB: [[ [0/2] {20} + ¦ ¦--expr: 2 [1/0] {22} + ¦ ¦ °--NUM_CONST: 2 [0/0] {21} + ¦ ¦--']': ] [1/0] {23} + ¦ °--']': ] [0/0] {24} + ¦--expr: a[[ +2 [3/0] {25} + ¦ ¦--expr: a [0/0] {27} + ¦ ¦ °--SYMBOL: a [0/0] {26} + ¦ ¦--LBB: [[ [0/0] {28} + ¦ ¦--expr: 2 [1/2] {30} + ¦ ¦ °--NUM_CONST: 2 [0/0] {29} + ¦ ¦--']': ] [1/0] {31} + ¦ °--']': ] [0/0] {32} + °--expr: a[[ + [3/0] {33} + ¦--expr: a [0/0] {35} + ¦ °--SYMBOL: a [0/0] {34} + ¦--LBB: [[ [0/2] {36} + ¦--expr: 2 [1/0] {38} + ¦ °--NUM_CONST: 2 [0/0] {37} + ¦--']': ] [1/1] {39} + ¦--COMMENT: # [0/0] {40} + °--']': ] [1/0] {41} diff --git a/tests/testthat/indention_square_brackets/square_brackets_double_line_break-out.R b/tests/testthat/indention_square_brackets/square_brackets_double_line_break-out.R new file mode 100644 index 000000000..2b5225958 --- /dev/null +++ b/tests/testthat/indention_square_brackets/square_brackets_double_line_break-out.R @@ -0,0 +1,22 @@ +a[[b]] + + +a[[ + 2 +] +] + +a[[ + 2 +]] + + +a[[ + 2 +]] + + +a[[ + 2 + ] # +] diff --git a/tests/testthat/indention_square_brackets/square_brackets_line_break-in.R b/tests/testthat/indention_square_brackets/square_brackets_line_break-in.R index bdfcd50e4..6dd33a894 100644 --- a/tests/testthat/indention_square_brackets/square_brackets_line_break-in.R +++ b/tests/testthat/indention_square_brackets/square_brackets_line_break-in.R @@ -5,7 +5,42 @@ fak[a, b] fac[a, b] + fac[ a, b ] + +fac[ + , `:`(a = b)] + +fac[ + , `:`(a = b) +] + +fac[, `:`(a = c) +] + +x[a ==3 | + b == v,] + +x[a ==3 + | b == v,] + +x[a ==3 || + b == v,] + +x[a ==3 + || b == v,] + +x[a ==3 + && b == v,] + +x[a ==3 + & b == v,] + +x[a ==3 && + b == v,] + +x[a ==3 & + b == v,] diff --git a/tests/testthat/indention_square_brackets/square_brackets_line_break-in_tree b/tests/testthat/indention_square_brackets/square_brackets_line_break-in_tree index a309d6b7d..834f2095e 100644 --- a/tests/testthat/indention_square_brackets/square_brackets_line_break-in_tree +++ b/tests/testthat/indention_square_brackets/square_brackets_line_break-in_tree @@ -1,72 +1,280 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/0] {3} + ¦--expr: range [0/0] {1} + ¦ ¦--expr: range [0/0] {3} ¦ ¦ °--SYMBOL: range [0/0] {2} ¦ ¦--'[': [ [0/0] {4} - ¦ ¦--expr: [0/0] {5} - ¦ ¦ ¦--expr: [0/1] {6} - ¦ ¦ ¦ ¦--expr: [0/1] {8} + ¦ ¦--expr: tag = [0/0] {5} + ¦ ¦ ¦--expr: tag = [0/1] {6} + ¦ ¦ ¦ ¦--expr: tag [0/1] {8} ¦ ¦ ¦ ¦ °--SYMBOL: tag [0/0] {7} ¦ ¦ ¦ ¦--EQ: == [0/1] {9} - ¦ ¦ ¦ °--expr: [0/0] {11} + ¦ ¦ ¦ °--expr: "non_ [0/0] {11} ¦ ¦ ¦ °--STR_CONST: "non_ [0/0] {10} ¦ ¦ ¦--AND: & [0/1] {12} - ¦ ¦ °--expr: [0/0] {13} - ¦ ¦ ¦--expr: [0/0] {15} + ¦ ¦ °--expr: str_d [0/0] {13} + ¦ ¦ ¦--expr: str_d [0/0] {15} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: str_d [0/0] {14} ¦ ¦ ¦--'(': ( [0/0] {16} - ¦ ¦ ¦--expr: [0/0] {18} + ¦ ¦ ¦--expr: text [0/0] {18} ¦ ¦ ¦ °--SYMBOL: text [0/0] {17} ¦ ¦ ¦--',': , [0/1] {19} - ¦ ¦ ¦--expr: [0/0] {21} + ¦ ¦ ¦--expr: ";" [0/0] {21} ¦ ¦ ¦ °--STR_CONST: ";" [0/0] {20} ¦ ¦ °--')': ) [0/0] {22} ¦ ¦--',': , [0/0] {23} - ¦ ¦--expr: [1/0] {24} - ¦ ¦ ¦--expr: [0/1] {26} + ¦ ¦--expr: text [1/0] {24} + ¦ ¦ ¦--expr: text [0/1] {26} ¦ ¦ ¦ °--SYMBOL: text [0/0] {25} ¦ ¦ ¦--LEFT_ASSIGN: := [0/1] {27} - ¦ ¦ °--expr: [0/0] {28} - ¦ ¦ ¦--expr: [0/0] {30} + ¦ ¦ °--expr: str_r [0/0] {28} + ¦ ¦ ¦--expr: str_r [0/0] {30} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: str_r [0/0] {29} ¦ ¦ ¦--'(': ( [0/0] {31} - ¦ ¦ ¦--expr: [0/0] {33} + ¦ ¦ ¦--expr: text [0/0] {33} ¦ ¦ ¦ °--SYMBOL: text [0/0] {32} ¦ ¦ ¦--',': , [0/1] {34} - ¦ ¦ ¦--expr: [0/0] {36} + ¦ ¦ ¦--expr: ";" [0/0] {36} ¦ ¦ ¦ °--STR_CONST: ";" [0/0] {35} ¦ ¦ ¦--',': , [0/1] {37} - ¦ ¦ ¦--expr: [0/0] {39} + ¦ ¦ ¦--expr: "\n" [0/0] {39} ¦ ¦ ¦ °--STR_CONST: "\n" [0/0] {38} ¦ ¦ °--')': ) [0/0] {40} ¦ °--']': ] [0/0] {41} - ¦--expr: [2/0] {42} - ¦ ¦--expr: [0/0] {44} + ¦--expr: fak[a [2/0] {42} + ¦ ¦--expr: fak [0/0] {44} ¦ ¦ °--SYMBOL: fak [0/0] {43} ¦ ¦--'[': [ [0/0] {45} - ¦ ¦--expr: [0/0] {47} + ¦ ¦--expr: a [0/0] {47} ¦ ¦ °--SYMBOL: a [0/0] {46} ¦ ¦--',': , [0/1] {48} - ¦ ¦--expr: [0/0] {50} + ¦ ¦--expr: b [0/0] {50} ¦ ¦ °--SYMBOL: b [0/0] {49} ¦ °--']': ] [0/0] {51} - ¦--expr: [2/0] {52} - ¦ ¦--expr: [0/0] {54} + ¦--expr: fac[a [2/0] {52} + ¦ ¦--expr: fac [0/0] {54} ¦ ¦ °--SYMBOL: fac [0/0] {53} ¦ ¦--'[': [ [0/0] {55} - ¦ ¦--expr: [0/0] {57} + ¦ ¦--expr: a [0/0] {57} ¦ ¦ °--SYMBOL: a [0/0] {56} ¦ ¦--',': , [0/4] {58} - ¦ ¦--expr: [1/0] {60} + ¦ ¦--expr: b [1/0] {60} ¦ ¦ °--SYMBOL: b [0/0] {59} ¦ °--']': ] [0/0] {61} - °--expr: [1/0] {62} - ¦--expr: [0/0] {64} - ¦ °--SYMBOL: fac [0/0] {63} - ¦--'[': [ [0/2] {65} - ¦--expr: [1/0] {67} - ¦ °--SYMBOL: a [0/0] {66} - ¦--',': , [0/2] {68} - ¦--expr: [1/2] {70} - ¦ °--SYMBOL: b [0/0] {69} - °--']': ] [1/0] {71} + ¦--expr: fac[ + [2/0] {62} + ¦ ¦--expr: fac [0/0] {64} + ¦ ¦ °--SYMBOL: fac [0/0] {63} + ¦ ¦--'[': [ [0/2] {65} + ¦ ¦--expr: a [1/0] {67} + ¦ ¦ °--SYMBOL: a [0/0] {66} + ¦ ¦--',': , [0/2] {68} + ¦ ¦--expr: b [1/2] {70} + ¦ ¦ °--SYMBOL: b [0/0] {69} + ¦ °--']': ] [1/0] {71} + ¦--expr: fac[ + [2/0] {72} + ¦ ¦--expr: fac [0/0] {74} + ¦ ¦ °--SYMBOL: fac [0/0] {73} + ¦ ¦--'[': [ [0/2] {75} + ¦ ¦--',': , [1/1] {76} + ¦ ¦--expr: `:`(a [0/0] {77} + ¦ ¦ ¦--expr: `:` [0/0] {79} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: `:` [0/0] {78} + ¦ ¦ ¦--'(': ( [0/0] {80} + ¦ ¦ ¦--SYMBOL_SUB: a [0/1] {81} + ¦ ¦ ¦--EQ_SUB: = [0/1] {82} + ¦ ¦ ¦--expr: b [0/0] {84} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {83} + ¦ ¦ °--')': ) [0/0] {85} + ¦ °--']': ] [0/0] {86} + ¦--expr: fac[ + [2/0] {87} + ¦ ¦--expr: fac [0/0] {89} + ¦ ¦ °--SYMBOL: fac [0/0] {88} + ¦ ¦--'[': [ [0/2] {90} + ¦ ¦--',': , [1/1] {91} + ¦ ¦--expr: `:`(a [0/0] {92} + ¦ ¦ ¦--expr: `:` [0/0] {94} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: `:` [0/0] {93} + ¦ ¦ ¦--'(': ( [0/0] {95} + ¦ ¦ ¦--SYMBOL_SUB: a [0/1] {96} + ¦ ¦ ¦--EQ_SUB: = [0/1] {97} + ¦ ¦ ¦--expr: b [0/0] {99} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {98} + ¦ ¦ °--')': ) [0/0] {100} + ¦ °--']': ] [1/0] {101} + ¦--expr: fac[, [2/0] {102} + ¦ ¦--expr: fac [0/0] {104} + ¦ ¦ °--SYMBOL: fac [0/0] {103} + ¦ ¦--'[': [ [0/0] {105} + ¦ ¦--',': , [0/1] {106} + ¦ ¦--expr: `:`(a [0/0] {107} + ¦ ¦ ¦--expr: `:` [0/0] {109} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: `:` [0/0] {108} + ¦ ¦ ¦--'(': ( [0/0] {110} + ¦ ¦ ¦--SYMBOL_SUB: a [0/1] {111} + ¦ ¦ ¦--EQ_SUB: = [0/1] {112} + ¦ ¦ ¦--expr: c [0/0] {114} + ¦ ¦ ¦ °--SYMBOL: c [0/0] {113} + ¦ ¦ °--')': ) [0/0] {115} + ¦ °--']': ] [1/0] {116} + ¦--expr: x[a = [2/0] {117} + ¦ ¦--expr: x [0/0] {119} + ¦ ¦ °--SYMBOL: x [0/0] {118} + ¦ ¦--'[': [ [0/0] {120} + ¦ ¦--expr: a ==3 [0/0] {121} + ¦ ¦ ¦--expr: a ==3 [0/1] {122} + ¦ ¦ ¦ ¦--expr: a [0/1] {124} + ¦ ¦ ¦ ¦ °--SYMBOL: a [0/0] {123} + ¦ ¦ ¦ ¦--EQ: == [0/0] {125} + ¦ ¦ ¦ °--expr: 3 [0/0] {127} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {126} + ¦ ¦ ¦--OR: | [0/2] {128} + ¦ ¦ °--expr: b == [1/0] {129} + ¦ ¦ ¦--expr: b [0/1] {131} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {130} + ¦ ¦ ¦--EQ: == [0/1] {132} + ¦ ¦ °--expr: v [0/0] {134} + ¦ ¦ °--SYMBOL: v [0/0] {133} + ¦ ¦--',': , [0/0] {135} + ¦ °--']': ] [0/0] {136} + ¦--expr: x[a = [2/0] {137} + ¦ ¦--expr: x [0/0] {139} + ¦ ¦ °--SYMBOL: x [0/0] {138} + ¦ ¦--'[': [ [0/0] {140} + ¦ ¦--expr: a ==3 [0/0] {141} + ¦ ¦ ¦--expr: a ==3 [0/2] {142} + ¦ ¦ ¦ ¦--expr: a [0/1] {144} + ¦ ¦ ¦ ¦ °--SYMBOL: a [0/0] {143} + ¦ ¦ ¦ ¦--EQ: == [0/0] {145} + ¦ ¦ ¦ °--expr: 3 [0/0] {147} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {146} + ¦ ¦ ¦--OR: | [1/2] {148} + ¦ ¦ °--expr: b == [0/0] {149} + ¦ ¦ ¦--expr: b [0/1] {151} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {150} + ¦ ¦ ¦--EQ: == [0/1] {152} + ¦ ¦ °--expr: v [0/0] {154} + ¦ ¦ °--SYMBOL: v [0/0] {153} + ¦ ¦--',': , [0/0] {155} + ¦ °--']': ] [0/0] {156} + ¦--expr: x[a = [2/0] {157} + ¦ ¦--expr: x [0/0] {159} + ¦ ¦ °--SYMBOL: x [0/0] {158} + ¦ ¦--'[': [ [0/0] {160} + ¦ ¦--expr: a ==3 [0/0] {161} + ¦ ¦ ¦--expr: a ==3 [0/1] {162} + ¦ ¦ ¦ ¦--expr: a [0/1] {164} + ¦ ¦ ¦ ¦ °--SYMBOL: a [0/0] {163} + ¦ ¦ ¦ ¦--EQ: == [0/0] {165} + ¦ ¦ ¦ °--expr: 3 [0/0] {167} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {166} + ¦ ¦ ¦--OR2: || [0/4] {168} + ¦ ¦ °--expr: b == [1/0] {169} + ¦ ¦ ¦--expr: b [0/1] {171} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {170} + ¦ ¦ ¦--EQ: == [0/1] {172} + ¦ ¦ °--expr: v [0/0] {174} + ¦ ¦ °--SYMBOL: v [0/0] {173} + ¦ ¦--',': , [0/0] {175} + ¦ °--']': ] [0/0] {176} + ¦--expr: x[a = [2/0] {177} + ¦ ¦--expr: x [0/0] {179} + ¦ ¦ °--SYMBOL: x [0/0] {178} + ¦ ¦--'[': [ [0/0] {180} + ¦ ¦--expr: a ==3 [0/0] {181} + ¦ ¦ ¦--expr: a ==3 [0/2] {182} + ¦ ¦ ¦ ¦--expr: a [0/1] {184} + ¦ ¦ ¦ ¦ °--SYMBOL: a [0/0] {183} + ¦ ¦ ¦ ¦--EQ: == [0/0] {185} + ¦ ¦ ¦ °--expr: 3 [0/0] {187} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {186} + ¦ ¦ ¦--OR2: || [1/2] {188} + ¦ ¦ °--expr: b == [0/0] {189} + ¦ ¦ ¦--expr: b [0/1] {191} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {190} + ¦ ¦ ¦--EQ: == [0/1] {192} + ¦ ¦ °--expr: v [0/0] {194} + ¦ ¦ °--SYMBOL: v [0/0] {193} + ¦ ¦--',': , [0/0] {195} + ¦ °--']': ] [0/0] {196} + ¦--expr: x[a = [2/0] {197} + ¦ ¦--expr: x [0/0] {199} + ¦ ¦ °--SYMBOL: x [0/0] {198} + ¦ ¦--'[': [ [0/0] {200} + ¦ ¦--expr: a ==3 [0/0] {201} + ¦ ¦ ¦--expr: a ==3 [0/2] {202} + ¦ ¦ ¦ ¦--expr: a [0/1] {204} + ¦ ¦ ¦ ¦ °--SYMBOL: a [0/0] {203} + ¦ ¦ ¦ ¦--EQ: == [0/0] {205} + ¦ ¦ ¦ °--expr: 3 [0/0] {207} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {206} + ¦ ¦ ¦--AND2: && [1/2] {208} + ¦ ¦ °--expr: b == [0/0] {209} + ¦ ¦ ¦--expr: b [0/1] {211} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {210} + ¦ ¦ ¦--EQ: == [0/1] {212} + ¦ ¦ °--expr: v [0/0] {214} + ¦ ¦ °--SYMBOL: v [0/0] {213} + ¦ ¦--',': , [0/0] {215} + ¦ °--']': ] [0/0] {216} + ¦--expr: x[a = [2/0] {217} + ¦ ¦--expr: x [0/0] {219} + ¦ ¦ °--SYMBOL: x [0/0] {218} + ¦ ¦--'[': [ [0/0] {220} + ¦ ¦--expr: a ==3 [0/0] {221} + ¦ ¦ ¦--expr: a ==3 [0/2] {222} + ¦ ¦ ¦ ¦--expr: a [0/1] {224} + ¦ ¦ ¦ ¦ °--SYMBOL: a [0/0] {223} + ¦ ¦ ¦ ¦--EQ: == [0/0] {225} + ¦ ¦ ¦ °--expr: 3 [0/0] {227} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {226} + ¦ ¦ ¦--AND: & [1/2] {228} + ¦ ¦ °--expr: b == [0/0] {229} + ¦ ¦ ¦--expr: b [0/1] {231} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {230} + ¦ ¦ ¦--EQ: == [0/1] {232} + ¦ ¦ °--expr: v [0/0] {234} + ¦ ¦ °--SYMBOL: v [0/0] {233} + ¦ ¦--',': , [0/0] {235} + ¦ °--']': ] [0/0] {236} + ¦--expr: x[a = [2/0] {237} + ¦ ¦--expr: x [0/0] {239} + ¦ ¦ °--SYMBOL: x [0/0] {238} + ¦ ¦--'[': [ [0/0] {240} + ¦ ¦--expr: a ==3 [0/0] {241} + ¦ ¦ ¦--expr: a ==3 [0/1] {242} + ¦ ¦ ¦ ¦--expr: a [0/1] {244} + ¦ ¦ ¦ ¦ °--SYMBOL: a [0/0] {243} + ¦ ¦ ¦ ¦--EQ: == [0/0] {245} + ¦ ¦ ¦ °--expr: 3 [0/0] {247} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {246} + ¦ ¦ ¦--AND2: && [0/4] {248} + ¦ ¦ °--expr: b == [1/0] {249} + ¦ ¦ ¦--expr: b [0/1] {251} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {250} + ¦ ¦ ¦--EQ: == [0/1] {252} + ¦ ¦ °--expr: v [0/0] {254} + ¦ ¦ °--SYMBOL: v [0/0] {253} + ¦ ¦--',': , [0/0] {255} + ¦ °--']': ] [0/0] {256} + °--expr: x[a = [2/0] {257} + ¦--expr: x [0/0] {259} + ¦ °--SYMBOL: x [0/0] {258} + ¦--'[': [ [0/0] {260} + ¦--expr: a ==3 [0/0] {261} + ¦ ¦--expr: a ==3 [0/1] {262} + ¦ ¦ ¦--expr: a [0/1] {264} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {263} + ¦ ¦ ¦--EQ: == [0/0] {265} + ¦ ¦ °--expr: 3 [0/0] {267} + ¦ ¦ °--NUM_CONST: 3 [0/0] {266} + ¦ ¦--AND: & [0/3] {268} + ¦ °--expr: b == [1/0] {269} + ¦ ¦--expr: b [0/1] {271} + ¦ ¦ °--SYMBOL: b [0/0] {270} + ¦ ¦--EQ: == [0/1] {272} + ¦ °--expr: v [0/0] {274} + ¦ °--SYMBOL: v [0/0] {273} + ¦--',': , [0/0] {275} + °--']': ] [0/0] {276} diff --git a/tests/testthat/indention_square_brackets/square_brackets_line_break-out.R b/tests/testthat/indention_square_brackets/square_brackets_line_break-out.R index 69e9cea51..ae6be434b 100644 --- a/tests/testthat/indention_square_brackets/square_brackets_line_break-out.R +++ b/tests/testthat/indention_square_brackets/square_brackets_line_break-out.R @@ -9,7 +9,42 @@ fac[ a, b ] + fac[ a, b ] + +fac[ + , `:`(a = b) +] + +fac[ + , `:`(a = b) +] + +fac[, `:`(a = c)] + +x[a == 3 | + b == v, ] + +x[a == 3 | + b == v, ] + +x[a == 3 || + b == v, ] + +x[a == 3 || + b == v, ] + +x[a == 3 && + b == v, ] + +x[a == 3 & + b == v, ] + +x[a == 3 && + b == v, ] + +x[a == 3 & + b == v, ] diff --git a/tests/testthat/insertion_comment_interaction/if_else_if_else_non_strict-in.R b/tests/testthat/insertion_comment_interaction/if_else_if_else_non_strict-in.R index 7296968b6..7699633d2 100644 --- a/tests/testthat/insertion_comment_interaction/if_else_if_else_non_strict-in.R +++ b/tests/testthat/insertion_comment_interaction/if_else_if_else_non_strict-in.R @@ -38,4 +38,3 @@ TRUE)NULL else if(FALSE)NULL else NULL if # comment (TRUE)NULL else if(FALSE)NULL else NULL - diff --git a/tests/testthat/insertion_comment_interaction/if_else_if_else_non_strict-in_tree b/tests/testthat/insertion_comment_interaction/if_else_if_else_non_strict-in_tree index 2588b2bfe..8604c95f4 100644 --- a/tests/testthat/insertion_comment_interaction/if_else_if_else_non_strict-in_tree +++ b/tests/testthat/insertion_comment_interaction/if_else_if_else_non_strict-in_tree @@ -1,256 +1,256 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} + ¦--expr: if(TR [0/0] {1} ¦ ¦--IF: if [0/0] {2} ¦ ¦--'(': ( [0/0] {3} - ¦ ¦--expr: [0/0] {5} + ¦ ¦--expr: TRUE [0/0] {5} ¦ ¦ °--NUM_CONST: TRUE [0/0] {4} ¦ ¦--')': ) [0/0] {6} - ¦ ¦--expr: [0/1] {8} + ¦ ¦--expr: NULL [0/1] {8} ¦ ¦ °--NULL_CONST: NULL [0/0] {7} ¦ ¦--ELSE: else [0/1] {9} - ¦ °--expr: [0/0] {10} + ¦ °--expr: if(FA [0/0] {10} ¦ ¦--IF: if [0/0] {11} ¦ ¦--'(': ( [0/0] {12} - ¦ ¦--expr: [0/0] {14} + ¦ ¦--expr: FALSE [0/0] {14} ¦ ¦ °--NUM_CONST: FALSE [0/0] {13} ¦ ¦--')': ) [0/0] {15} - ¦ ¦--expr: [0/1] {17} + ¦ ¦--expr: NULL [0/1] {17} ¦ ¦ °--NULL_CONST: NULL [0/0] {16} ¦ ¦--ELSE: else [0/1] {18} - ¦ °--expr: [0/0] {20} + ¦ °--expr: NULL [0/0] {20} ¦ °--NULL_CONST: NULL [0/0] {19} - ¦--expr: [2/1] {21} + ¦--expr: if(TR [2/1] {21} ¦ ¦--IF: if [0/0] {22} ¦ ¦--'(': ( [0/0] {23} - ¦ ¦--expr: [0/0] {25} + ¦ ¦--expr: TRUE [0/0] {25} ¦ ¦ °--NUM_CONST: TRUE [0/0] {24} ¦ ¦--')': ) [0/0] {26} - ¦ ¦--expr: [0/1] {28} + ¦ ¦--expr: NULL [0/1] {28} ¦ ¦ °--NULL_CONST: NULL [0/0] {27} ¦ ¦--ELSE: else [0/1] {29} - ¦ °--expr: [0/0] {30} + ¦ °--expr: if(FA [0/0] {30} ¦ ¦--IF: if [0/0] {31} ¦ ¦--'(': ( [0/0] {32} - ¦ ¦--expr: [0/0] {34} + ¦ ¦--expr: FALSE [0/0] {34} ¦ ¦ °--NUM_CONST: FALSE [0/0] {33} ¦ ¦--')': ) [0/0] {35} - ¦ ¦--expr: [0/1] {37} + ¦ ¦--expr: NULL [0/1] {37} ¦ ¦ °--NULL_CONST: NULL [0/0] {36} ¦ ¦--ELSE: else [0/1] {38} - ¦ °--expr: [0/0] {40} + ¦ °--expr: NULL [0/0] {40} ¦ °--NULL_CONST: NULL [0/0] {39} ¦--COMMENT: # com [0/0] {41} - ¦--expr: [3/0] {42} + ¦--expr: if(TR [3/0] {42} ¦ ¦--IF: if [0/0] {43} ¦ ¦--'(': ( [0/0] {44} - ¦ ¦--expr: [0/0] {46} + ¦ ¦--expr: TRUE [0/0] {46} ¦ ¦ °--NUM_CONST: TRUE [0/0] {45} ¦ ¦--')': ) [0/0] {47} - ¦ ¦--expr: [0/1] {49} + ¦ ¦--expr: NULL [0/1] {49} ¦ ¦ °--NULL_CONST: NULL [0/0] {48} ¦ ¦--ELSE: else [0/1] {50} - ¦ °--expr: [0/0] {51} + ¦ °--expr: if(FA [0/0] {51} ¦ ¦--IF: if [0/0] {52} ¦ ¦--'(': ( [0/0] {53} - ¦ ¦--expr: [0/0] {55} + ¦ ¦--expr: FALSE [0/0] {55} ¦ ¦ °--NUM_CONST: FALSE [0/0] {54} ¦ ¦--')': ) [0/0] {56} - ¦ ¦--expr: [0/1] {58} + ¦ ¦--expr: NULL [0/1] {58} ¦ ¦ °--NULL_CONST: NULL [0/0] {57} ¦ ¦--ELSE: else [0/1] {59} ¦ ¦--COMMENT: # com [0/1] {60} - ¦ °--expr: [1/0] {62} + ¦ °--expr: NULL [1/0] {62} ¦ °--NULL_CONST: NULL [0/0] {61} ¦--COMMENT: # if( [2/0] {63} ¦--COMMENT: # el [1/0] {64} - ¦--expr: [2/0] {65} + ¦--expr: if(TR [2/0] {65} ¦ ¦--IF: if [0/0] {66} ¦ ¦--'(': ( [0/0] {67} - ¦ ¦--expr: [0/0] {69} + ¦ ¦--expr: TRUE [0/0] {69} ¦ ¦ °--NUM_CONST: TRUE [0/0] {68} ¦ ¦--')': ) [0/0] {70} - ¦ ¦--expr: [0/1] {72} + ¦ ¦--expr: NULL [0/1] {72} ¦ ¦ °--NULL_CONST: NULL [0/0] {71} ¦ ¦--ELSE: else [0/1] {73} - ¦ °--expr: [0/0] {74} + ¦ °--expr: if(FA [0/0] {74} ¦ ¦--IF: if [0/0] {75} ¦ ¦--'(': ( [0/0] {76} - ¦ ¦--expr: [0/0] {78} + ¦ ¦--expr: FALSE [0/0] {78} ¦ ¦ °--NUM_CONST: FALSE [0/0] {77} ¦ ¦--')': ) [0/1] {79} ¦ ¦--COMMENT: # com [0/0] {80} - ¦ ¦--expr: [1/1] {82} + ¦ ¦--expr: NULL [1/1] {82} ¦ ¦ °--NULL_CONST: NULL [0/0] {81} ¦ ¦--ELSE: else [0/1] {83} - ¦ °--expr: [0/0] {85} + ¦ °--expr: NULL [0/0] {85} ¦ °--NULL_CONST: NULL [0/0] {84} - ¦--expr: [2/0] {86} + ¦--expr: if(TR [2/0] {86} ¦ ¦--IF: if [0/0] {87} ¦ ¦--'(': ( [0/0] {88} - ¦ ¦--expr: [0/0] {90} + ¦ ¦--expr: TRUE [0/0] {90} ¦ ¦ °--NUM_CONST: TRUE [0/0] {89} ¦ ¦--')': ) [0/0] {91} - ¦ ¦--expr: [0/1] {93} + ¦ ¦--expr: NULL [0/1] {93} ¦ ¦ °--NULL_CONST: NULL [0/0] {92} ¦ ¦--ELSE: else [0/1] {94} - ¦ °--expr: [0/0] {95} + ¦ °--expr: if(FA [0/0] {95} ¦ ¦--IF: if [0/0] {96} ¦ ¦--'(': ( [0/0] {97} - ¦ ¦--expr: [0/1] {99} + ¦ ¦--expr: FALSE [0/1] {99} ¦ ¦ °--NUM_CONST: FALSE [0/0] {98} ¦ ¦--COMMENT: # com [0/0] {100} ¦ ¦--')': ) [1/0] {101} - ¦ ¦--expr: [0/1] {103} + ¦ ¦--expr: NULL [0/1] {103} ¦ ¦ °--NULL_CONST: NULL [0/0] {102} ¦ ¦--ELSE: else [0/1] {104} - ¦ °--expr: [0/0] {106} + ¦ °--expr: NULL [0/0] {106} ¦ °--NULL_CONST: NULL [0/0] {105} - ¦--expr: [2/0] {107} + ¦--expr: if(TR [2/0] {107} ¦ ¦--IF: if [0/0] {108} ¦ ¦--'(': ( [0/0] {109} - ¦ ¦--expr: [0/0] {111} + ¦ ¦--expr: TRUE [0/0] {111} ¦ ¦ °--NUM_CONST: TRUE [0/0] {110} ¦ ¦--')': ) [0/0] {112} - ¦ ¦--expr: [0/1] {114} + ¦ ¦--expr: NULL [0/1] {114} ¦ ¦ °--NULL_CONST: NULL [0/0] {113} ¦ ¦--ELSE: else [0/1] {115} - ¦ °--expr: [0/0] {116} + ¦ °--expr: if( # [0/0] {116} ¦ ¦--IF: if [0/0] {117} ¦ ¦--'(': ( [0/1] {118} ¦ ¦--COMMENT: # com [0/0] {119} - ¦ ¦--expr: [1/0] {121} + ¦ ¦--expr: FALSE [1/0] {121} ¦ ¦ °--NUM_CONST: FALSE [0/0] {120} ¦ ¦--')': ) [0/0] {122} - ¦ ¦--expr: [0/1] {124} + ¦ ¦--expr: NULL [0/1] {124} ¦ ¦ °--NULL_CONST: NULL [0/0] {123} ¦ ¦--ELSE: else [0/1] {125} - ¦ °--expr: [0/0] {127} + ¦ °--expr: NULL [0/0] {127} ¦ °--NULL_CONST: NULL [0/0] {126} - ¦--expr: [2/0] {128} + ¦--expr: if(TR [2/0] {128} ¦ ¦--IF: if [0/0] {129} ¦ ¦--'(': ( [0/0] {130} - ¦ ¦--expr: [0/0] {132} + ¦ ¦--expr: TRUE [0/0] {132} ¦ ¦ °--NUM_CONST: TRUE [0/0] {131} ¦ ¦--')': ) [0/0] {133} - ¦ ¦--expr: [0/1] {135} + ¦ ¦--expr: NULL [0/1] {135} ¦ ¦ °--NULL_CONST: NULL [0/0] {134} ¦ ¦--ELSE: else [0/1] {136} - ¦ °--expr: [0/0] {137} + ¦ °--expr: if # [0/0] {137} ¦ ¦--IF: if [0/1] {138} ¦ ¦--COMMENT: # com [0/0] {139} ¦ ¦--'(': ( [1/0] {140} - ¦ ¦--expr: [0/0] {142} + ¦ ¦--expr: FALSE [0/0] {142} ¦ ¦ °--NUM_CONST: FALSE [0/0] {141} ¦ ¦--')': ) [0/0] {143} - ¦ ¦--expr: [0/1] {145} + ¦ ¦--expr: NULL [0/1] {145} ¦ ¦ °--NULL_CONST: NULL [0/0] {144} ¦ ¦--ELSE: else [0/1] {146} - ¦ °--expr: [0/0] {148} + ¦ °--expr: NULL [0/0] {148} ¦ °--NULL_CONST: NULL [0/0] {147} - ¦--expr: [2/0] {149} + ¦--expr: if(TR [2/0] {149} ¦ ¦--IF: if [0/0] {150} ¦ ¦--'(': ( [0/0] {151} - ¦ ¦--expr: [0/0] {153} + ¦ ¦--expr: TRUE [0/0] {153} ¦ ¦ °--NUM_CONST: TRUE [0/0] {152} ¦ ¦--')': ) [0/0] {154} - ¦ ¦--expr: [0/1] {156} + ¦ ¦--expr: NULL [0/1] {156} ¦ ¦ °--NULL_CONST: NULL [0/0] {155} ¦ ¦--ELSE: else [0/1] {157} ¦ ¦--COMMENT: # com [0/1] {158} - ¦ °--expr: [1/0] {159} + ¦ °--expr: if(FA [1/0] {159} ¦ ¦--IF: if [0/0] {160} ¦ ¦--'(': ( [0/0] {161} - ¦ ¦--expr: [0/0] {163} + ¦ ¦--expr: FALSE [0/0] {163} ¦ ¦ °--NUM_CONST: FALSE [0/0] {162} ¦ ¦--')': ) [0/0] {164} - ¦ ¦--expr: [0/1] {166} + ¦ ¦--expr: NULL [0/1] {166} ¦ ¦ °--NULL_CONST: NULL [0/0] {165} ¦ ¦--ELSE: else [0/1] {167} - ¦ °--expr: [0/0] {169} + ¦ °--expr: NULL [0/0] {169} ¦ °--NULL_CONST: NULL [0/0] {168} ¦--COMMENT: # if( [2/0] {170} ¦--COMMENT: # el [1/0] {171} - ¦--expr: [2/0] {172} + ¦--expr: if(TR [2/0] {172} ¦ ¦--IF: if [0/0] {173} ¦ ¦--'(': ( [0/0] {174} - ¦ ¦--expr: [0/0] {176} + ¦ ¦--expr: TRUE [0/0] {176} ¦ ¦ °--NUM_CONST: TRUE [0/0] {175} ¦ ¦--')': ) [0/1] {177} ¦ ¦--COMMENT: # com [0/0] {178} - ¦ ¦--expr: [1/1] {180} + ¦ ¦--expr: NULL [1/1] {180} ¦ ¦ °--NULL_CONST: NULL [0/0] {179} ¦ ¦--ELSE: else [0/1] {181} - ¦ °--expr: [0/0] {182} + ¦ °--expr: if(FA [0/0] {182} ¦ ¦--IF: if [0/0] {183} ¦ ¦--'(': ( [0/0] {184} - ¦ ¦--expr: [0/0] {186} + ¦ ¦--expr: FALSE [0/0] {186} ¦ ¦ °--NUM_CONST: FALSE [0/0] {185} ¦ ¦--')': ) [0/0] {187} - ¦ ¦--expr: [0/1] {189} + ¦ ¦--expr: NULL [0/1] {189} ¦ ¦ °--NULL_CONST: NULL [0/0] {188} ¦ ¦--ELSE: else [0/1] {190} - ¦ °--expr: [0/0] {192} + ¦ °--expr: NULL [0/0] {192} ¦ °--NULL_CONST: NULL [0/0] {191} - ¦--expr: [2/0] {193} + ¦--expr: if(TR [2/0] {193} ¦ ¦--IF: if [0/0] {194} ¦ ¦--'(': ( [0/0] {195} - ¦ ¦--expr: [0/1] {197} + ¦ ¦--expr: TRUE [0/1] {197} ¦ ¦ °--NUM_CONST: TRUE [0/0] {196} ¦ ¦--COMMENT: # com [0/0] {198} ¦ ¦--')': ) [1/0] {199} - ¦ ¦--expr: [0/1] {201} + ¦ ¦--expr: NULL [0/1] {201} ¦ ¦ °--NULL_CONST: NULL [0/0] {200} ¦ ¦--ELSE: else [0/1] {202} - ¦ °--expr: [0/0] {203} + ¦ °--expr: if(FA [0/0] {203} ¦ ¦--IF: if [0/0] {204} ¦ ¦--'(': ( [0/0] {205} - ¦ ¦--expr: [0/0] {207} + ¦ ¦--expr: FALSE [0/0] {207} ¦ ¦ °--NUM_CONST: FALSE [0/0] {206} ¦ ¦--')': ) [0/0] {208} - ¦ ¦--expr: [0/1] {210} + ¦ ¦--expr: NULL [0/1] {210} ¦ ¦ °--NULL_CONST: NULL [0/0] {209} ¦ ¦--ELSE: else [0/1] {211} - ¦ °--expr: [0/0] {213} + ¦ °--expr: NULL [0/0] {213} ¦ °--NULL_CONST: NULL [0/0] {212} - ¦--expr: [2/0] {214} + ¦--expr: if( # [2/0] {214} ¦ ¦--IF: if [0/0] {215} ¦ ¦--'(': ( [0/1] {216} ¦ ¦--COMMENT: # com [0/0] {217} - ¦ ¦--expr: [1/0] {219} + ¦ ¦--expr: TRUE [1/0] {219} ¦ ¦ °--NUM_CONST: TRUE [0/0] {218} ¦ ¦--')': ) [0/0] {220} - ¦ ¦--expr: [0/1] {222} + ¦ ¦--expr: NULL [0/1] {222} ¦ ¦ °--NULL_CONST: NULL [0/0] {221} ¦ ¦--ELSE: else [0/1] {223} - ¦ °--expr: [0/0] {224} + ¦ °--expr: if(FA [0/0] {224} ¦ ¦--IF: if [0/0] {225} ¦ ¦--'(': ( [0/0] {226} - ¦ ¦--expr: [0/0] {228} + ¦ ¦--expr: FALSE [0/0] {228} ¦ ¦ °--NUM_CONST: FALSE [0/0] {227} ¦ ¦--')': ) [0/0] {229} - ¦ ¦--expr: [0/1] {231} + ¦ ¦--expr: NULL [0/1] {231} ¦ ¦ °--NULL_CONST: NULL [0/0] {230} ¦ ¦--ELSE: else [0/1] {232} - ¦ °--expr: [0/0] {234} + ¦ °--expr: NULL [0/0] {234} ¦ °--NULL_CONST: NULL [0/0] {233} - °--expr: [2/0] {235} + °--expr: if # [2/0] {235} ¦--IF: if [0/1] {236} ¦--COMMENT: # com [0/0] {237} ¦--'(': ( [1/0] {238} - ¦--expr: [0/0] {240} + ¦--expr: TRUE [0/0] {240} ¦ °--NUM_CONST: TRUE [0/0] {239} ¦--')': ) [0/0] {241} - ¦--expr: [0/1] {243} + ¦--expr: NULL [0/1] {243} ¦ °--NULL_CONST: NULL [0/0] {242} ¦--ELSE: else [0/1] {244} - °--expr: [0/0] {245} + °--expr: if(FA [0/0] {245} ¦--IF: if [0/0] {246} ¦--'(': ( [0/0] {247} - ¦--expr: [0/0] {249} + ¦--expr: FALSE [0/0] {249} ¦ °--NUM_CONST: FALSE [0/0] {248} ¦--')': ) [0/0] {250} - ¦--expr: [0/1] {252} + ¦--expr: NULL [0/1] {252} ¦ °--NULL_CONST: NULL [0/0] {251} ¦--ELSE: else [0/1] {253} - °--expr: [0/0] {255} + °--expr: NULL [0/0] {255} °--NULL_CONST: NULL [0/0] {254} diff --git a/tests/testthat/insertion_comment_interaction/if_else_if_else_strict-in.R b/tests/testthat/insertion_comment_interaction/if_else_if_else_strict-in.R index 7296968b6..7699633d2 100644 --- a/tests/testthat/insertion_comment_interaction/if_else_if_else_strict-in.R +++ b/tests/testthat/insertion_comment_interaction/if_else_if_else_strict-in.R @@ -38,4 +38,3 @@ TRUE)NULL else if(FALSE)NULL else NULL if # comment (TRUE)NULL else if(FALSE)NULL else NULL - diff --git a/tests/testthat/insertion_comment_interaction/if_else_if_else_strict-in_tree b/tests/testthat/insertion_comment_interaction/if_else_if_else_strict-in_tree index 2588b2bfe..8604c95f4 100644 --- a/tests/testthat/insertion_comment_interaction/if_else_if_else_strict-in_tree +++ b/tests/testthat/insertion_comment_interaction/if_else_if_else_strict-in_tree @@ -1,256 +1,256 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} + ¦--expr: if(TR [0/0] {1} ¦ ¦--IF: if [0/0] {2} ¦ ¦--'(': ( [0/0] {3} - ¦ ¦--expr: [0/0] {5} + ¦ ¦--expr: TRUE [0/0] {5} ¦ ¦ °--NUM_CONST: TRUE [0/0] {4} ¦ ¦--')': ) [0/0] {6} - ¦ ¦--expr: [0/1] {8} + ¦ ¦--expr: NULL [0/1] {8} ¦ ¦ °--NULL_CONST: NULL [0/0] {7} ¦ ¦--ELSE: else [0/1] {9} - ¦ °--expr: [0/0] {10} + ¦ °--expr: if(FA [0/0] {10} ¦ ¦--IF: if [0/0] {11} ¦ ¦--'(': ( [0/0] {12} - ¦ ¦--expr: [0/0] {14} + ¦ ¦--expr: FALSE [0/0] {14} ¦ ¦ °--NUM_CONST: FALSE [0/0] {13} ¦ ¦--')': ) [0/0] {15} - ¦ ¦--expr: [0/1] {17} + ¦ ¦--expr: NULL [0/1] {17} ¦ ¦ °--NULL_CONST: NULL [0/0] {16} ¦ ¦--ELSE: else [0/1] {18} - ¦ °--expr: [0/0] {20} + ¦ °--expr: NULL [0/0] {20} ¦ °--NULL_CONST: NULL [0/0] {19} - ¦--expr: [2/1] {21} + ¦--expr: if(TR [2/1] {21} ¦ ¦--IF: if [0/0] {22} ¦ ¦--'(': ( [0/0] {23} - ¦ ¦--expr: [0/0] {25} + ¦ ¦--expr: TRUE [0/0] {25} ¦ ¦ °--NUM_CONST: TRUE [0/0] {24} ¦ ¦--')': ) [0/0] {26} - ¦ ¦--expr: [0/1] {28} + ¦ ¦--expr: NULL [0/1] {28} ¦ ¦ °--NULL_CONST: NULL [0/0] {27} ¦ ¦--ELSE: else [0/1] {29} - ¦ °--expr: [0/0] {30} + ¦ °--expr: if(FA [0/0] {30} ¦ ¦--IF: if [0/0] {31} ¦ ¦--'(': ( [0/0] {32} - ¦ ¦--expr: [0/0] {34} + ¦ ¦--expr: FALSE [0/0] {34} ¦ ¦ °--NUM_CONST: FALSE [0/0] {33} ¦ ¦--')': ) [0/0] {35} - ¦ ¦--expr: [0/1] {37} + ¦ ¦--expr: NULL [0/1] {37} ¦ ¦ °--NULL_CONST: NULL [0/0] {36} ¦ ¦--ELSE: else [0/1] {38} - ¦ °--expr: [0/0] {40} + ¦ °--expr: NULL [0/0] {40} ¦ °--NULL_CONST: NULL [0/0] {39} ¦--COMMENT: # com [0/0] {41} - ¦--expr: [3/0] {42} + ¦--expr: if(TR [3/0] {42} ¦ ¦--IF: if [0/0] {43} ¦ ¦--'(': ( [0/0] {44} - ¦ ¦--expr: [0/0] {46} + ¦ ¦--expr: TRUE [0/0] {46} ¦ ¦ °--NUM_CONST: TRUE [0/0] {45} ¦ ¦--')': ) [0/0] {47} - ¦ ¦--expr: [0/1] {49} + ¦ ¦--expr: NULL [0/1] {49} ¦ ¦ °--NULL_CONST: NULL [0/0] {48} ¦ ¦--ELSE: else [0/1] {50} - ¦ °--expr: [0/0] {51} + ¦ °--expr: if(FA [0/0] {51} ¦ ¦--IF: if [0/0] {52} ¦ ¦--'(': ( [0/0] {53} - ¦ ¦--expr: [0/0] {55} + ¦ ¦--expr: FALSE [0/0] {55} ¦ ¦ °--NUM_CONST: FALSE [0/0] {54} ¦ ¦--')': ) [0/0] {56} - ¦ ¦--expr: [0/1] {58} + ¦ ¦--expr: NULL [0/1] {58} ¦ ¦ °--NULL_CONST: NULL [0/0] {57} ¦ ¦--ELSE: else [0/1] {59} ¦ ¦--COMMENT: # com [0/1] {60} - ¦ °--expr: [1/0] {62} + ¦ °--expr: NULL [1/0] {62} ¦ °--NULL_CONST: NULL [0/0] {61} ¦--COMMENT: # if( [2/0] {63} ¦--COMMENT: # el [1/0] {64} - ¦--expr: [2/0] {65} + ¦--expr: if(TR [2/0] {65} ¦ ¦--IF: if [0/0] {66} ¦ ¦--'(': ( [0/0] {67} - ¦ ¦--expr: [0/0] {69} + ¦ ¦--expr: TRUE [0/0] {69} ¦ ¦ °--NUM_CONST: TRUE [0/0] {68} ¦ ¦--')': ) [0/0] {70} - ¦ ¦--expr: [0/1] {72} + ¦ ¦--expr: NULL [0/1] {72} ¦ ¦ °--NULL_CONST: NULL [0/0] {71} ¦ ¦--ELSE: else [0/1] {73} - ¦ °--expr: [0/0] {74} + ¦ °--expr: if(FA [0/0] {74} ¦ ¦--IF: if [0/0] {75} ¦ ¦--'(': ( [0/0] {76} - ¦ ¦--expr: [0/0] {78} + ¦ ¦--expr: FALSE [0/0] {78} ¦ ¦ °--NUM_CONST: FALSE [0/0] {77} ¦ ¦--')': ) [0/1] {79} ¦ ¦--COMMENT: # com [0/0] {80} - ¦ ¦--expr: [1/1] {82} + ¦ ¦--expr: NULL [1/1] {82} ¦ ¦ °--NULL_CONST: NULL [0/0] {81} ¦ ¦--ELSE: else [0/1] {83} - ¦ °--expr: [0/0] {85} + ¦ °--expr: NULL [0/0] {85} ¦ °--NULL_CONST: NULL [0/0] {84} - ¦--expr: [2/0] {86} + ¦--expr: if(TR [2/0] {86} ¦ ¦--IF: if [0/0] {87} ¦ ¦--'(': ( [0/0] {88} - ¦ ¦--expr: [0/0] {90} + ¦ ¦--expr: TRUE [0/0] {90} ¦ ¦ °--NUM_CONST: TRUE [0/0] {89} ¦ ¦--')': ) [0/0] {91} - ¦ ¦--expr: [0/1] {93} + ¦ ¦--expr: NULL [0/1] {93} ¦ ¦ °--NULL_CONST: NULL [0/0] {92} ¦ ¦--ELSE: else [0/1] {94} - ¦ °--expr: [0/0] {95} + ¦ °--expr: if(FA [0/0] {95} ¦ ¦--IF: if [0/0] {96} ¦ ¦--'(': ( [0/0] {97} - ¦ ¦--expr: [0/1] {99} + ¦ ¦--expr: FALSE [0/1] {99} ¦ ¦ °--NUM_CONST: FALSE [0/0] {98} ¦ ¦--COMMENT: # com [0/0] {100} ¦ ¦--')': ) [1/0] {101} - ¦ ¦--expr: [0/1] {103} + ¦ ¦--expr: NULL [0/1] {103} ¦ ¦ °--NULL_CONST: NULL [0/0] {102} ¦ ¦--ELSE: else [0/1] {104} - ¦ °--expr: [0/0] {106} + ¦ °--expr: NULL [0/0] {106} ¦ °--NULL_CONST: NULL [0/0] {105} - ¦--expr: [2/0] {107} + ¦--expr: if(TR [2/0] {107} ¦ ¦--IF: if [0/0] {108} ¦ ¦--'(': ( [0/0] {109} - ¦ ¦--expr: [0/0] {111} + ¦ ¦--expr: TRUE [0/0] {111} ¦ ¦ °--NUM_CONST: TRUE [0/0] {110} ¦ ¦--')': ) [0/0] {112} - ¦ ¦--expr: [0/1] {114} + ¦ ¦--expr: NULL [0/1] {114} ¦ ¦ °--NULL_CONST: NULL [0/0] {113} ¦ ¦--ELSE: else [0/1] {115} - ¦ °--expr: [0/0] {116} + ¦ °--expr: if( # [0/0] {116} ¦ ¦--IF: if [0/0] {117} ¦ ¦--'(': ( [0/1] {118} ¦ ¦--COMMENT: # com [0/0] {119} - ¦ ¦--expr: [1/0] {121} + ¦ ¦--expr: FALSE [1/0] {121} ¦ ¦ °--NUM_CONST: FALSE [0/0] {120} ¦ ¦--')': ) [0/0] {122} - ¦ ¦--expr: [0/1] {124} + ¦ ¦--expr: NULL [0/1] {124} ¦ ¦ °--NULL_CONST: NULL [0/0] {123} ¦ ¦--ELSE: else [0/1] {125} - ¦ °--expr: [0/0] {127} + ¦ °--expr: NULL [0/0] {127} ¦ °--NULL_CONST: NULL [0/0] {126} - ¦--expr: [2/0] {128} + ¦--expr: if(TR [2/0] {128} ¦ ¦--IF: if [0/0] {129} ¦ ¦--'(': ( [0/0] {130} - ¦ ¦--expr: [0/0] {132} + ¦ ¦--expr: TRUE [0/0] {132} ¦ ¦ °--NUM_CONST: TRUE [0/0] {131} ¦ ¦--')': ) [0/0] {133} - ¦ ¦--expr: [0/1] {135} + ¦ ¦--expr: NULL [0/1] {135} ¦ ¦ °--NULL_CONST: NULL [0/0] {134} ¦ ¦--ELSE: else [0/1] {136} - ¦ °--expr: [0/0] {137} + ¦ °--expr: if # [0/0] {137} ¦ ¦--IF: if [0/1] {138} ¦ ¦--COMMENT: # com [0/0] {139} ¦ ¦--'(': ( [1/0] {140} - ¦ ¦--expr: [0/0] {142} + ¦ ¦--expr: FALSE [0/0] {142} ¦ ¦ °--NUM_CONST: FALSE [0/0] {141} ¦ ¦--')': ) [0/0] {143} - ¦ ¦--expr: [0/1] {145} + ¦ ¦--expr: NULL [0/1] {145} ¦ ¦ °--NULL_CONST: NULL [0/0] {144} ¦ ¦--ELSE: else [0/1] {146} - ¦ °--expr: [0/0] {148} + ¦ °--expr: NULL [0/0] {148} ¦ °--NULL_CONST: NULL [0/0] {147} - ¦--expr: [2/0] {149} + ¦--expr: if(TR [2/0] {149} ¦ ¦--IF: if [0/0] {150} ¦ ¦--'(': ( [0/0] {151} - ¦ ¦--expr: [0/0] {153} + ¦ ¦--expr: TRUE [0/0] {153} ¦ ¦ °--NUM_CONST: TRUE [0/0] {152} ¦ ¦--')': ) [0/0] {154} - ¦ ¦--expr: [0/1] {156} + ¦ ¦--expr: NULL [0/1] {156} ¦ ¦ °--NULL_CONST: NULL [0/0] {155} ¦ ¦--ELSE: else [0/1] {157} ¦ ¦--COMMENT: # com [0/1] {158} - ¦ °--expr: [1/0] {159} + ¦ °--expr: if(FA [1/0] {159} ¦ ¦--IF: if [0/0] {160} ¦ ¦--'(': ( [0/0] {161} - ¦ ¦--expr: [0/0] {163} + ¦ ¦--expr: FALSE [0/0] {163} ¦ ¦ °--NUM_CONST: FALSE [0/0] {162} ¦ ¦--')': ) [0/0] {164} - ¦ ¦--expr: [0/1] {166} + ¦ ¦--expr: NULL [0/1] {166} ¦ ¦ °--NULL_CONST: NULL [0/0] {165} ¦ ¦--ELSE: else [0/1] {167} - ¦ °--expr: [0/0] {169} + ¦ °--expr: NULL [0/0] {169} ¦ °--NULL_CONST: NULL [0/0] {168} ¦--COMMENT: # if( [2/0] {170} ¦--COMMENT: # el [1/0] {171} - ¦--expr: [2/0] {172} + ¦--expr: if(TR [2/0] {172} ¦ ¦--IF: if [0/0] {173} ¦ ¦--'(': ( [0/0] {174} - ¦ ¦--expr: [0/0] {176} + ¦ ¦--expr: TRUE [0/0] {176} ¦ ¦ °--NUM_CONST: TRUE [0/0] {175} ¦ ¦--')': ) [0/1] {177} ¦ ¦--COMMENT: # com [0/0] {178} - ¦ ¦--expr: [1/1] {180} + ¦ ¦--expr: NULL [1/1] {180} ¦ ¦ °--NULL_CONST: NULL [0/0] {179} ¦ ¦--ELSE: else [0/1] {181} - ¦ °--expr: [0/0] {182} + ¦ °--expr: if(FA [0/0] {182} ¦ ¦--IF: if [0/0] {183} ¦ ¦--'(': ( [0/0] {184} - ¦ ¦--expr: [0/0] {186} + ¦ ¦--expr: FALSE [0/0] {186} ¦ ¦ °--NUM_CONST: FALSE [0/0] {185} ¦ ¦--')': ) [0/0] {187} - ¦ ¦--expr: [0/1] {189} + ¦ ¦--expr: NULL [0/1] {189} ¦ ¦ °--NULL_CONST: NULL [0/0] {188} ¦ ¦--ELSE: else [0/1] {190} - ¦ °--expr: [0/0] {192} + ¦ °--expr: NULL [0/0] {192} ¦ °--NULL_CONST: NULL [0/0] {191} - ¦--expr: [2/0] {193} + ¦--expr: if(TR [2/0] {193} ¦ ¦--IF: if [0/0] {194} ¦ ¦--'(': ( [0/0] {195} - ¦ ¦--expr: [0/1] {197} + ¦ ¦--expr: TRUE [0/1] {197} ¦ ¦ °--NUM_CONST: TRUE [0/0] {196} ¦ ¦--COMMENT: # com [0/0] {198} ¦ ¦--')': ) [1/0] {199} - ¦ ¦--expr: [0/1] {201} + ¦ ¦--expr: NULL [0/1] {201} ¦ ¦ °--NULL_CONST: NULL [0/0] {200} ¦ ¦--ELSE: else [0/1] {202} - ¦ °--expr: [0/0] {203} + ¦ °--expr: if(FA [0/0] {203} ¦ ¦--IF: if [0/0] {204} ¦ ¦--'(': ( [0/0] {205} - ¦ ¦--expr: [0/0] {207} + ¦ ¦--expr: FALSE [0/0] {207} ¦ ¦ °--NUM_CONST: FALSE [0/0] {206} ¦ ¦--')': ) [0/0] {208} - ¦ ¦--expr: [0/1] {210} + ¦ ¦--expr: NULL [0/1] {210} ¦ ¦ °--NULL_CONST: NULL [0/0] {209} ¦ ¦--ELSE: else [0/1] {211} - ¦ °--expr: [0/0] {213} + ¦ °--expr: NULL [0/0] {213} ¦ °--NULL_CONST: NULL [0/0] {212} - ¦--expr: [2/0] {214} + ¦--expr: if( # [2/0] {214} ¦ ¦--IF: if [0/0] {215} ¦ ¦--'(': ( [0/1] {216} ¦ ¦--COMMENT: # com [0/0] {217} - ¦ ¦--expr: [1/0] {219} + ¦ ¦--expr: TRUE [1/0] {219} ¦ ¦ °--NUM_CONST: TRUE [0/0] {218} ¦ ¦--')': ) [0/0] {220} - ¦ ¦--expr: [0/1] {222} + ¦ ¦--expr: NULL [0/1] {222} ¦ ¦ °--NULL_CONST: NULL [0/0] {221} ¦ ¦--ELSE: else [0/1] {223} - ¦ °--expr: [0/0] {224} + ¦ °--expr: if(FA [0/0] {224} ¦ ¦--IF: if [0/0] {225} ¦ ¦--'(': ( [0/0] {226} - ¦ ¦--expr: [0/0] {228} + ¦ ¦--expr: FALSE [0/0] {228} ¦ ¦ °--NUM_CONST: FALSE [0/0] {227} ¦ ¦--')': ) [0/0] {229} - ¦ ¦--expr: [0/1] {231} + ¦ ¦--expr: NULL [0/1] {231} ¦ ¦ °--NULL_CONST: NULL [0/0] {230} ¦ ¦--ELSE: else [0/1] {232} - ¦ °--expr: [0/0] {234} + ¦ °--expr: NULL [0/0] {234} ¦ °--NULL_CONST: NULL [0/0] {233} - °--expr: [2/0] {235} + °--expr: if # [2/0] {235} ¦--IF: if [0/1] {236} ¦--COMMENT: # com [0/0] {237} ¦--'(': ( [1/0] {238} - ¦--expr: [0/0] {240} + ¦--expr: TRUE [0/0] {240} ¦ °--NUM_CONST: TRUE [0/0] {239} ¦--')': ) [0/0] {241} - ¦--expr: [0/1] {243} + ¦--expr: NULL [0/1] {243} ¦ °--NULL_CONST: NULL [0/0] {242} ¦--ELSE: else [0/1] {244} - °--expr: [0/0] {245} + °--expr: if(FA [0/0] {245} ¦--IF: if [0/0] {246} ¦--'(': ( [0/0] {247} - ¦--expr: [0/0] {249} + ¦--expr: FALSE [0/0] {249} ¦ °--NUM_CONST: FALSE [0/0] {248} ¦--')': ) [0/0] {250} - ¦--expr: [0/1] {252} + ¦--expr: NULL [0/1] {252} ¦ °--NULL_CONST: NULL [0/0] {251} ¦--ELSE: else [0/1] {253} - °--expr: [0/0] {255} + °--expr: NULL [0/0] {255} °--NULL_CONST: NULL [0/0] {254} diff --git a/tests/testthat/insertion_comment_interaction/if_else_non_strict-in_tree b/tests/testthat/insertion_comment_interaction/if_else_non_strict-in_tree index 3dbeca382..7d277ba18 100644 --- a/tests/testthat/insertion_comment_interaction/if_else_non_strict-in_tree +++ b/tests/testthat/insertion_comment_interaction/if_else_non_strict-in_tree @@ -1,86 +1,86 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} + ¦--expr: if(TR [0/0] {1} ¦ ¦--IF: if [0/0] {2} ¦ ¦--'(': ( [0/0] {3} - ¦ ¦--expr: [0/0] {5} + ¦ ¦--expr: TRUE [0/0] {5} ¦ ¦ °--NUM_CONST: TRUE [0/0] {4} ¦ ¦--')': ) [0/0] {6} - ¦ ¦--expr: [0/1] {8} + ¦ ¦--expr: NULL [0/1] {8} ¦ ¦ °--NULL_CONST: NULL [0/0] {7} ¦ ¦--ELSE: else [0/1] {9} - ¦ °--expr: [0/0] {11} + ¦ °--expr: NULL [0/0] {11} ¦ °--NULL_CONST: NULL [0/0] {10} - ¦--expr: [2/1] {12} + ¦--expr: if(TR [2/1] {12} ¦ ¦--IF: if [0/0] {13} ¦ ¦--'(': ( [0/0] {14} - ¦ ¦--expr: [0/0] {16} + ¦ ¦--expr: TRUE [0/0] {16} ¦ ¦ °--NUM_CONST: TRUE [0/0] {15} ¦ ¦--')': ) [0/0] {17} - ¦ ¦--expr: [0/1] {19} + ¦ ¦--expr: NULL [0/1] {19} ¦ ¦ °--NULL_CONST: NULL [0/0] {18} ¦ ¦--ELSE: else [0/1] {20} - ¦ °--expr: [0/0] {22} + ¦ °--expr: NULL [0/0] {22} ¦ °--NULL_CONST: NULL [0/0] {21} ¦--COMMENT: # com [0/0] {23} - ¦--expr: [3/0] {24} + ¦--expr: if(TR [3/0] {24} ¦ ¦--IF: if [0/0] {25} ¦ ¦--'(': ( [0/0] {26} - ¦ ¦--expr: [0/0] {28} + ¦ ¦--expr: TRUE [0/0] {28} ¦ ¦ °--NUM_CONST: TRUE [0/0] {27} ¦ ¦--')': ) [0/0] {29} - ¦ ¦--expr: [0/1] {31} + ¦ ¦--expr: NULL [0/1] {31} ¦ ¦ °--NULL_CONST: NULL [0/0] {30} ¦ ¦--ELSE: else [0/1] {32} ¦ ¦--COMMENT: # com [0/1] {33} - ¦ °--expr: [1/0] {35} + ¦ °--expr: NULL [1/0] {35} ¦ °--NULL_CONST: NULL [0/0] {34} ¦--COMMENT: # if( [2/0] {36} ¦--COMMENT: # el [1/0] {37} - ¦--expr: [2/0] {38} + ¦--expr: if(TR [2/0] {38} ¦ ¦--IF: if [0/0] {39} ¦ ¦--'(': ( [0/0] {40} - ¦ ¦--expr: [0/0] {42} + ¦ ¦--expr: TRUE [0/0] {42} ¦ ¦ °--NUM_CONST: TRUE [0/0] {41} ¦ ¦--')': ) [0/1] {43} ¦ ¦--COMMENT: # com [0/0] {44} - ¦ ¦--expr: [1/1] {46} + ¦ ¦--expr: NULL [1/1] {46} ¦ ¦ °--NULL_CONST: NULL [0/0] {45} ¦ ¦--ELSE: else [0/1] {47} - ¦ °--expr: [0/0] {49} + ¦ °--expr: NULL [0/0] {49} ¦ °--NULL_CONST: NULL [0/0] {48} - ¦--expr: [2/0] {50} + ¦--expr: if(TR [2/0] {50} ¦ ¦--IF: if [0/0] {51} ¦ ¦--'(': ( [0/0] {52} - ¦ ¦--expr: [0/1] {54} + ¦ ¦--expr: TRUE [0/1] {54} ¦ ¦ °--NUM_CONST: TRUE [0/0] {53} ¦ ¦--COMMENT: # com [0/0] {55} ¦ ¦--')': ) [1/0] {56} - ¦ ¦--expr: [0/1] {58} + ¦ ¦--expr: NULL [0/1] {58} ¦ ¦ °--NULL_CONST: NULL [0/0] {57} ¦ ¦--ELSE: else [0/1] {59} - ¦ °--expr: [0/0] {61} + ¦ °--expr: NULL [0/0] {61} ¦ °--NULL_CONST: NULL [0/0] {60} - ¦--expr: [2/0] {62} + ¦--expr: if( # [2/0] {62} ¦ ¦--IF: if [0/0] {63} ¦ ¦--'(': ( [0/1] {64} ¦ ¦--COMMENT: # com [0/0] {65} - ¦ ¦--expr: [1/0] {67} + ¦ ¦--expr: TRUE [1/0] {67} ¦ ¦ °--NUM_CONST: TRUE [0/0] {66} ¦ ¦--')': ) [0/0] {68} - ¦ ¦--expr: [0/1] {70} + ¦ ¦--expr: NULL [0/1] {70} ¦ ¦ °--NULL_CONST: NULL [0/0] {69} ¦ ¦--ELSE: else [0/1] {71} - ¦ °--expr: [0/0] {73} + ¦ °--expr: NULL [0/0] {73} ¦ °--NULL_CONST: NULL [0/0] {72} - °--expr: [2/0] {74} + °--expr: if # [2/0] {74} ¦--IF: if [0/1] {75} ¦--COMMENT: # com [0/0] {76} ¦--'(': ( [1/0] {77} - ¦--expr: [0/0] {79} + ¦--expr: TRUE [0/0] {79} ¦ °--NUM_CONST: TRUE [0/0] {78} ¦--')': ) [0/0] {80} - ¦--expr: [0/1] {82} + ¦--expr: NULL [0/1] {82} ¦ °--NULL_CONST: NULL [0/0] {81} ¦--ELSE: else [0/1] {83} - °--expr: [0/0] {85} + °--expr: NULL [0/0] {85} °--NULL_CONST: NULL [0/0] {84} diff --git a/tests/testthat/insertion_comment_interaction/if_else_strict-in_tree b/tests/testthat/insertion_comment_interaction/if_else_strict-in_tree index 3dbeca382..7d277ba18 100644 --- a/tests/testthat/insertion_comment_interaction/if_else_strict-in_tree +++ b/tests/testthat/insertion_comment_interaction/if_else_strict-in_tree @@ -1,86 +1,86 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} + ¦--expr: if(TR [0/0] {1} ¦ ¦--IF: if [0/0] {2} ¦ ¦--'(': ( [0/0] {3} - ¦ ¦--expr: [0/0] {5} + ¦ ¦--expr: TRUE [0/0] {5} ¦ ¦ °--NUM_CONST: TRUE [0/0] {4} ¦ ¦--')': ) [0/0] {6} - ¦ ¦--expr: [0/1] {8} + ¦ ¦--expr: NULL [0/1] {8} ¦ ¦ °--NULL_CONST: NULL [0/0] {7} ¦ ¦--ELSE: else [0/1] {9} - ¦ °--expr: [0/0] {11} + ¦ °--expr: NULL [0/0] {11} ¦ °--NULL_CONST: NULL [0/0] {10} - ¦--expr: [2/1] {12} + ¦--expr: if(TR [2/1] {12} ¦ ¦--IF: if [0/0] {13} ¦ ¦--'(': ( [0/0] {14} - ¦ ¦--expr: [0/0] {16} + ¦ ¦--expr: TRUE [0/0] {16} ¦ ¦ °--NUM_CONST: TRUE [0/0] {15} ¦ ¦--')': ) [0/0] {17} - ¦ ¦--expr: [0/1] {19} + ¦ ¦--expr: NULL [0/1] {19} ¦ ¦ °--NULL_CONST: NULL [0/0] {18} ¦ ¦--ELSE: else [0/1] {20} - ¦ °--expr: [0/0] {22} + ¦ °--expr: NULL [0/0] {22} ¦ °--NULL_CONST: NULL [0/0] {21} ¦--COMMENT: # com [0/0] {23} - ¦--expr: [3/0] {24} + ¦--expr: if(TR [3/0] {24} ¦ ¦--IF: if [0/0] {25} ¦ ¦--'(': ( [0/0] {26} - ¦ ¦--expr: [0/0] {28} + ¦ ¦--expr: TRUE [0/0] {28} ¦ ¦ °--NUM_CONST: TRUE [0/0] {27} ¦ ¦--')': ) [0/0] {29} - ¦ ¦--expr: [0/1] {31} + ¦ ¦--expr: NULL [0/1] {31} ¦ ¦ °--NULL_CONST: NULL [0/0] {30} ¦ ¦--ELSE: else [0/1] {32} ¦ ¦--COMMENT: # com [0/1] {33} - ¦ °--expr: [1/0] {35} + ¦ °--expr: NULL [1/0] {35} ¦ °--NULL_CONST: NULL [0/0] {34} ¦--COMMENT: # if( [2/0] {36} ¦--COMMENT: # el [1/0] {37} - ¦--expr: [2/0] {38} + ¦--expr: if(TR [2/0] {38} ¦ ¦--IF: if [0/0] {39} ¦ ¦--'(': ( [0/0] {40} - ¦ ¦--expr: [0/0] {42} + ¦ ¦--expr: TRUE [0/0] {42} ¦ ¦ °--NUM_CONST: TRUE [0/0] {41} ¦ ¦--')': ) [0/1] {43} ¦ ¦--COMMENT: # com [0/0] {44} - ¦ ¦--expr: [1/1] {46} + ¦ ¦--expr: NULL [1/1] {46} ¦ ¦ °--NULL_CONST: NULL [0/0] {45} ¦ ¦--ELSE: else [0/1] {47} - ¦ °--expr: [0/0] {49} + ¦ °--expr: NULL [0/0] {49} ¦ °--NULL_CONST: NULL [0/0] {48} - ¦--expr: [2/0] {50} + ¦--expr: if(TR [2/0] {50} ¦ ¦--IF: if [0/0] {51} ¦ ¦--'(': ( [0/0] {52} - ¦ ¦--expr: [0/1] {54} + ¦ ¦--expr: TRUE [0/1] {54} ¦ ¦ °--NUM_CONST: TRUE [0/0] {53} ¦ ¦--COMMENT: # com [0/0] {55} ¦ ¦--')': ) [1/0] {56} - ¦ ¦--expr: [0/1] {58} + ¦ ¦--expr: NULL [0/1] {58} ¦ ¦ °--NULL_CONST: NULL [0/0] {57} ¦ ¦--ELSE: else [0/1] {59} - ¦ °--expr: [0/0] {61} + ¦ °--expr: NULL [0/0] {61} ¦ °--NULL_CONST: NULL [0/0] {60} - ¦--expr: [2/0] {62} + ¦--expr: if( # [2/0] {62} ¦ ¦--IF: if [0/0] {63} ¦ ¦--'(': ( [0/1] {64} ¦ ¦--COMMENT: # com [0/0] {65} - ¦ ¦--expr: [1/0] {67} + ¦ ¦--expr: TRUE [1/0] {67} ¦ ¦ °--NUM_CONST: TRUE [0/0] {66} ¦ ¦--')': ) [0/0] {68} - ¦ ¦--expr: [0/1] {70} + ¦ ¦--expr: NULL [0/1] {70} ¦ ¦ °--NULL_CONST: NULL [0/0] {69} ¦ ¦--ELSE: else [0/1] {71} - ¦ °--expr: [0/0] {73} + ¦ °--expr: NULL [0/0] {73} ¦ °--NULL_CONST: NULL [0/0] {72} - °--expr: [2/0] {74} + °--expr: if # [2/0] {74} ¦--IF: if [0/1] {75} ¦--COMMENT: # com [0/0] {76} ¦--'(': ( [1/0] {77} - ¦--expr: [0/0] {79} + ¦--expr: TRUE [0/0] {79} ¦ °--NUM_CONST: TRUE [0/0] {78} ¦--')': ) [0/0] {80} - ¦--expr: [0/1] {82} + ¦--expr: NULL [0/1] {82} ¦ °--NULL_CONST: NULL [0/0] {81} ¦--ELSE: else [0/1] {83} - °--expr: [0/0] {85} + °--expr: NULL [0/0] {85} °--NULL_CONST: NULL [0/0] {84} diff --git a/tests/testthat/insertion_comment_interaction/just_if_non_strict-in_tree b/tests/testthat/insertion_comment_interaction/just_if_non_strict-in_tree index e7a382077..cc8154b6e 100644 --- a/tests/testthat/insertion_comment_interaction/just_if_non_strict-in_tree +++ b/tests/testthat/insertion_comment_interaction/just_if_non_strict-in_tree @@ -1,54 +1,54 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} + ¦--expr: if(TR [0/0] {1} ¦ ¦--IF: if [0/0] {2} ¦ ¦--'(': ( [0/0] {3} - ¦ ¦--expr: [0/0] {5} + ¦ ¦--expr: TRUE [0/0] {5} ¦ ¦ °--NUM_CONST: TRUE [0/0] {4} ¦ ¦--')': ) [0/0] {6} - ¦ °--expr: [0/0] {8} + ¦ °--expr: NULL [0/0] {8} ¦ °--NULL_CONST: NULL [0/0] {7} - ¦--expr: [2/1] {9} + ¦--expr: if(TR [2/1] {9} ¦ ¦--IF: if [0/0] {10} ¦ ¦--'(': ( [0/0] {11} - ¦ ¦--expr: [0/0] {13} + ¦ ¦--expr: TRUE [0/0] {13} ¦ ¦ °--NUM_CONST: TRUE [0/0] {12} ¦ ¦--')': ) [0/0] {14} - ¦ °--expr: [0/0] {16} + ¦ °--expr: NULL [0/0] {16} ¦ °--NULL_CONST: NULL [0/0] {15} ¦--COMMENT: # com [0/0] {17} - ¦--expr: [3/0] {18} + ¦--expr: if(TR [3/0] {18} ¦ ¦--IF: if [0/0] {19} ¦ ¦--'(': ( [0/0] {20} - ¦ ¦--expr: [0/0] {22} + ¦ ¦--expr: TRUE [0/0] {22} ¦ ¦ °--NUM_CONST: TRUE [0/0] {21} ¦ ¦--')': ) [0/1] {23} ¦ ¦--COMMENT: # com [0/0] {24} - ¦ °--expr: [1/0] {26} + ¦ °--expr: NULL [1/0] {26} ¦ °--NULL_CONST: NULL [0/0] {25} - ¦--expr: [2/0] {27} + ¦--expr: if(TR [2/0] {27} ¦ ¦--IF: if [0/0] {28} ¦ ¦--'(': ( [0/0] {29} - ¦ ¦--expr: [0/1] {31} + ¦ ¦--expr: TRUE [0/1] {31} ¦ ¦ °--NUM_CONST: TRUE [0/0] {30} ¦ ¦--COMMENT: # com [0/0] {32} ¦ ¦--')': ) [1/0] {33} - ¦ °--expr: [0/0] {35} + ¦ °--expr: NULL [0/0] {35} ¦ °--NULL_CONST: NULL [0/0] {34} - ¦--expr: [2/0] {36} + ¦--expr: if( # [2/0] {36} ¦ ¦--IF: if [0/0] {37} ¦ ¦--'(': ( [0/1] {38} ¦ ¦--COMMENT: # com [0/0] {39} - ¦ ¦--expr: [1/0] {41} + ¦ ¦--expr: TRUE [1/0] {41} ¦ ¦ °--NUM_CONST: TRUE [0/0] {40} ¦ ¦--')': ) [0/0] {42} - ¦ °--expr: [0/0] {44} + ¦ °--expr: NULL [0/0] {44} ¦ °--NULL_CONST: NULL [0/0] {43} - °--expr: [2/0] {45} + °--expr: if # [2/0] {45} ¦--IF: if [0/1] {46} ¦--COMMENT: # com [0/0] {47} ¦--'(': ( [1/0] {48} - ¦--expr: [0/0] {50} + ¦--expr: TRUE [0/0] {50} ¦ °--NUM_CONST: TRUE [0/0] {49} ¦--')': ) [0/0] {51} - °--expr: [0/0] {53} + °--expr: NULL [0/0] {53} °--NULL_CONST: NULL [0/0] {52} diff --git a/tests/testthat/insertion_comment_interaction/just_if_strict-in.R b/tests/testthat/insertion_comment_interaction/just_if_strict-in.R index 643a9de03..5fbaf5d47 100644 --- a/tests/testthat/insertion_comment_interaction/just_if_strict-in.R +++ b/tests/testthat/insertion_comment_interaction/just_if_strict-in.R @@ -13,4 +13,4 @@ if( # comment TRUE)NULL if # comment -(TRUE)NULL \ No newline at end of file +(TRUE)NULL diff --git a/tests/testthat/insertion_comment_interaction/just_if_strict-in_tree b/tests/testthat/insertion_comment_interaction/just_if_strict-in_tree index e7a382077..cc8154b6e 100644 --- a/tests/testthat/insertion_comment_interaction/just_if_strict-in_tree +++ b/tests/testthat/insertion_comment_interaction/just_if_strict-in_tree @@ -1,54 +1,54 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} + ¦--expr: if(TR [0/0] {1} ¦ ¦--IF: if [0/0] {2} ¦ ¦--'(': ( [0/0] {3} - ¦ ¦--expr: [0/0] {5} + ¦ ¦--expr: TRUE [0/0] {5} ¦ ¦ °--NUM_CONST: TRUE [0/0] {4} ¦ ¦--')': ) [0/0] {6} - ¦ °--expr: [0/0] {8} + ¦ °--expr: NULL [0/0] {8} ¦ °--NULL_CONST: NULL [0/0] {7} - ¦--expr: [2/1] {9} + ¦--expr: if(TR [2/1] {9} ¦ ¦--IF: if [0/0] {10} ¦ ¦--'(': ( [0/0] {11} - ¦ ¦--expr: [0/0] {13} + ¦ ¦--expr: TRUE [0/0] {13} ¦ ¦ °--NUM_CONST: TRUE [0/0] {12} ¦ ¦--')': ) [0/0] {14} - ¦ °--expr: [0/0] {16} + ¦ °--expr: NULL [0/0] {16} ¦ °--NULL_CONST: NULL [0/0] {15} ¦--COMMENT: # com [0/0] {17} - ¦--expr: [3/0] {18} + ¦--expr: if(TR [3/0] {18} ¦ ¦--IF: if [0/0] {19} ¦ ¦--'(': ( [0/0] {20} - ¦ ¦--expr: [0/0] {22} + ¦ ¦--expr: TRUE [0/0] {22} ¦ ¦ °--NUM_CONST: TRUE [0/0] {21} ¦ ¦--')': ) [0/1] {23} ¦ ¦--COMMENT: # com [0/0] {24} - ¦ °--expr: [1/0] {26} + ¦ °--expr: NULL [1/0] {26} ¦ °--NULL_CONST: NULL [0/0] {25} - ¦--expr: [2/0] {27} + ¦--expr: if(TR [2/0] {27} ¦ ¦--IF: if [0/0] {28} ¦ ¦--'(': ( [0/0] {29} - ¦ ¦--expr: [0/1] {31} + ¦ ¦--expr: TRUE [0/1] {31} ¦ ¦ °--NUM_CONST: TRUE [0/0] {30} ¦ ¦--COMMENT: # com [0/0] {32} ¦ ¦--')': ) [1/0] {33} - ¦ °--expr: [0/0] {35} + ¦ °--expr: NULL [0/0] {35} ¦ °--NULL_CONST: NULL [0/0] {34} - ¦--expr: [2/0] {36} + ¦--expr: if( # [2/0] {36} ¦ ¦--IF: if [0/0] {37} ¦ ¦--'(': ( [0/1] {38} ¦ ¦--COMMENT: # com [0/0] {39} - ¦ ¦--expr: [1/0] {41} + ¦ ¦--expr: TRUE [1/0] {41} ¦ ¦ °--NUM_CONST: TRUE [0/0] {40} ¦ ¦--')': ) [0/0] {42} - ¦ °--expr: [0/0] {44} + ¦ °--expr: NULL [0/0] {44} ¦ °--NULL_CONST: NULL [0/0] {43} - °--expr: [2/0] {45} + °--expr: if # [2/0] {45} ¦--IF: if [0/1] {46} ¦--COMMENT: # com [0/0] {47} ¦--'(': ( [1/0] {48} - ¦--expr: [0/0] {50} + ¦--expr: TRUE [0/0] {50} ¦ °--NUM_CONST: TRUE [0/0] {49} ¦--')': ) [0/0] {51} - °--expr: [0/0] {53} + °--expr: NULL [0/0] {53} °--NULL_CONST: NULL [0/0] {52} diff --git a/tests/testthat/line_breaks_and_other/around-eq-sub-in.R b/tests/testthat/line_breaks_and_other/around-eq-sub-in.R new file mode 100644 index 000000000..2d186d774 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/around-eq-sub-in.R @@ -0,0 +1,27 @@ +c(x = 2) + +c(x = + 2) + +c( + x = 2) + +c(x + = 2) + +c(x = 2, a + = + 1) + + +c(x = 2, a + = # stuff + 1) + + +c(b=4, x # comment + = 2) + + +c(x =# comment + 2, c=) diff --git a/tests/testthat/line_breaks_and_other/around-eq-sub-in_tree b/tests/testthat/line_breaks_and_other/around-eq-sub-in_tree new file mode 100644 index 000000000..095d86fc9 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/around-eq-sub-in_tree @@ -0,0 +1,96 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: c(x = [0/0] {1} + ¦ ¦--expr: c [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {2} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--SYMBOL_SUB: x [0/1] {5} + ¦ ¦--EQ_SUB: = [0/1] {6} + ¦ ¦--expr: 2 [0/0] {8} + ¦ ¦ °--NUM_CONST: 2 [0/0] {7} + ¦ °--')': ) [0/0] {9} + ¦--expr: c(x = [2/0] {10} + ¦ ¦--expr: c [0/0] {12} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {11} + ¦ ¦--'(': ( [0/0] {13} + ¦ ¦--SYMBOL_SUB: x [0/1] {14} + ¦ ¦--EQ_SUB: = [0/4] {15} + ¦ ¦--expr: 2 [1/0] {17} + ¦ ¦ °--NUM_CONST: 2 [0/0] {16} + ¦ °--')': ) [0/0] {18} + ¦--expr: c( + [2/0] {19} + ¦ ¦--expr: c [0/0] {21} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {20} + ¦ ¦--'(': ( [0/2] {22} + ¦ ¦--SYMBOL_SUB: x [1/1] {23} + ¦ ¦--EQ_SUB: = [0/1] {24} + ¦ ¦--expr: 2 [0/0] {26} + ¦ ¦ °--NUM_CONST: 2 [0/0] {25} + ¦ °--')': ) [0/0] {27} + ¦--expr: c(x + [2/0] {28} + ¦ ¦--expr: c [0/0] {30} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {29} + ¦ ¦--'(': ( [0/0] {31} + ¦ ¦--SYMBOL_SUB: x [0/2] {32} + ¦ ¦--EQ_SUB: = [1/1] {33} + ¦ ¦--expr: 2 [0/0] {35} + ¦ ¦ °--NUM_CONST: 2 [0/0] {34} + ¦ °--')': ) [0/0] {36} + ¦--expr: c(x = [2/0] {37} + ¦ ¦--expr: c [0/0] {39} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {38} + ¦ ¦--'(': ( [0/0] {40} + ¦ ¦--SYMBOL_SUB: x [0/1] {41} + ¦ ¦--EQ_SUB: = [0/1] {42} + ¦ ¦--expr: 2 [0/0] {44} + ¦ ¦ °--NUM_CONST: 2 [0/0] {43} + ¦ ¦--',': , [0/1] {45} + ¦ ¦--SYMBOL_SUB: a [0/2] {46} + ¦ ¦--EQ_SUB: = [1/4] {47} + ¦ ¦--expr: 1 [1/0] {49} + ¦ ¦ °--NUM_CONST: 1 [0/0] {48} + ¦ °--')': ) [0/0] {50} + ¦--expr: c(x = [3/0] {51} + ¦ ¦--expr: c [0/0] {53} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {52} + ¦ ¦--'(': ( [0/0] {54} + ¦ ¦--SYMBOL_SUB: x [0/1] {55} + ¦ ¦--EQ_SUB: = [0/1] {56} + ¦ ¦--expr: 2 [0/0] {58} + ¦ ¦ °--NUM_CONST: 2 [0/0] {57} + ¦ ¦--',': , [0/1] {59} + ¦ ¦--SYMBOL_SUB: a [0/2] {60} + ¦ ¦--EQ_SUB: = [1/1] {61} + ¦ ¦--COMMENT: # stu [0/4] {62} + ¦ ¦--expr: 1 [1/0] {64} + ¦ ¦ °--NUM_CONST: 1 [0/0] {63} + ¦ °--')': ) [0/0] {65} + ¦--expr: c(b=4 [3/0] {66} + ¦ ¦--expr: c [0/0] {68} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {67} + ¦ ¦--'(': ( [0/0] {69} + ¦ ¦--SYMBOL_SUB: b [0/0] {70} + ¦ ¦--EQ_SUB: = [0/0] {71} + ¦ ¦--expr: 4 [0/0] {73} + ¦ ¦ °--NUM_CONST: 4 [0/0] {72} + ¦ ¦--',': , [0/1] {74} + ¦ ¦--SYMBOL_SUB: x [0/1] {75} + ¦ ¦--COMMENT: # com [0/2] {76} + ¦ ¦--EQ_SUB: = [1/1] {77} + ¦ ¦--expr: 2 [0/0] {79} + ¦ ¦ °--NUM_CONST: 2 [0/0] {78} + ¦ °--')': ) [0/0] {80} + °--expr: c(x = [3/0] {81} + ¦--expr: c [0/0] {83} + ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {82} + ¦--'(': ( [0/0] {84} + ¦--SYMBOL_SUB: x [0/1] {85} + ¦--EQ_SUB: = [0/0] {86} + ¦--COMMENT: # com [0/2] {87} + ¦--expr: 2 [1/0] {89} + ¦ °--NUM_CONST: 2 [0/0] {88} + ¦--',': , [0/1] {90} + ¦--SYMBOL_SUB: c [0/0] {91} + ¦--EQ_SUB: = [0/0] {92} + °--')': ) [0/0] {93} diff --git a/tests/testthat/line_breaks_and_other/around-eq-sub-out.R b/tests/testthat/line_breaks_and_other/around-eq-sub-out.R new file mode 100644 index 000000000..29fe9894c --- /dev/null +++ b/tests/testthat/line_breaks_and_other/around-eq-sub-out.R @@ -0,0 +1,39 @@ +c(x = 2) + +c( + x = + 2 +) + +c( + x = 2 +) + +c( + x = 2 +) + +c( + x = 2, + a = + 1 +) + + +c( + x = 2, + a = # stuff + 1 +) + + +c( + b = 4, x # comment + = 2 +) + + +c( + x = # comment + 2, c = + ) diff --git a/tests/testthat/line_breaks_and_other/assignment-in.R b/tests/testthat/line_breaks_and_other/assignment-in.R new file mode 100644 index 000000000..00180be41 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/assignment-in.R @@ -0,0 +1,40 @@ +x <- + 2 + + +x <- 3 + +# FIXME: edge case not working for R < 3.6: Problem: most likely, comment is +# not moved to the right nest with relocate_eq_assign. +x <- + # the culprit + + 3 + + +x = # + 2 + + +x = 3 + +x = + + # comment + 3 + + + +ImportantDataFrame$ImportantColumn1 <- + ImportantDataFrame$ImportantColumn2 <- + ComplicatedFunction(ImportantDataFrame$InputColumn) + + +ImportantDataFrame$ImportantColumn1 <- + ImportantDataFrame$ImportantColumn2 <- ComplicatedFunction(ImportantDataFrame$InputColumn) + + + +ImportantDataFrame$ImportantColumn1 <- + + ImportantDataFrame$ImportantColumn2 <- ComplicatedFunction(ImportantDataFrame$InputColumn) diff --git a/tests/testthat/line_breaks_and_other/assignment-in_tree b/tests/testthat/line_breaks_and_other/assignment-in_tree new file mode 100644 index 000000000..22eaa0017 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/assignment-in_tree @@ -0,0 +1,115 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: x <- + [0/0] {1} + ¦ ¦--expr: x [0/1] {3} + ¦ ¦ °--SYMBOL: x [0/0] {2} + ¦ ¦--LEFT_ASSIGN: <- [0/2] {4} + ¦ °--expr: 2 [1/0] {6} + ¦ °--NUM_CONST: 2 [0/0] {5} + ¦--expr: x <- [3/0] {7} + ¦ ¦--expr: x [0/1] {9} + ¦ ¦ °--SYMBOL: x [0/0] {8} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {10} + ¦ °--expr: 3 [0/0] {12} + ¦ °--NUM_CONST: 3 [0/0] {11} + ¦--COMMENT: # FIX [2/0] {13} + ¦--COMMENT: # not [1/0] {14} + ¦--expr: x <- + [1/0] {15} + ¦ ¦--expr: x [0/1] {17} + ¦ ¦ °--SYMBOL: x [0/0] {16} + ¦ ¦--LEFT_ASSIGN: <- [0/2] {18} + ¦ ¦--COMMENT: # the [1/2] {19} + ¦ °--expr: 3 [2/0] {21} + ¦ °--NUM_CONST: 3 [0/0] {20} + ¦--expr_or_assign_or_help: x = # [3/0] {22} + ¦ ¦--expr: x [0/1] {24} + ¦ ¦ °--SYMBOL: x [0/0] {23} + ¦ ¦--EQ_ASSIGN: = [0/1] {25} + ¦ ¦--COMMENT: # [0/2] {26} + ¦ °--expr: 2 [1/0] {28} + ¦ °--NUM_CONST: 2 [0/0] {27} + ¦--expr_or_assign_or_help: x = 3 [3/0] {29} + ¦ ¦--expr: x [0/1] {31} + ¦ ¦ °--SYMBOL: x [0/0] {30} + ¦ ¦--EQ_ASSIGN: = [0/1] {32} + ¦ °--expr: 3 [0/0] {34} + ¦ °--NUM_CONST: 3 [0/0] {33} + ¦--expr_or_assign_or_help: x = + + [2/0] {35} + ¦ ¦--expr: x [0/1] {37} + ¦ ¦ °--SYMBOL: x [0/0] {36} + ¦ ¦--EQ_ASSIGN: = [0/2] {38} + ¦ ¦--COMMENT: # com [2/2] {39} + ¦ °--expr: 3 [1/0] {41} + ¦ °--NUM_CONST: 3 [0/0] {40} + ¦--expr: Impor [4/0] {42} + ¦ ¦--expr: Impor [0/1] {43} + ¦ ¦ ¦--expr: Impor [0/0] {45} + ¦ ¦ ¦ °--SYMBOL: Impor [0/0] {44} + ¦ ¦ ¦--'$': $ [0/0] {46} + ¦ ¦ °--SYMBOL: Impor [0/0] {47} + ¦ ¦--LEFT_ASSIGN: <- [0/2] {48} + ¦ ¦--expr: Impor [1/1] {50} + ¦ ¦ ¦--expr: Impor [0/0] {52} + ¦ ¦ ¦ °--SYMBOL: Impor [0/0] {51} + ¦ ¦ ¦--'$': $ [0/0] {53} + ¦ ¦ °--SYMBOL: Impor [0/0] {54} + ¦ ¦--LEFT_ASSIGN: <- [0/2] {55} + ¦ °--expr: Compl [1/0] {56} + ¦ ¦--expr: Compl [0/0] {58} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: Compl [0/0] {57} + ¦ ¦--'(': ( [0/0] {59} + ¦ ¦--expr: Impor [0/0] {60} + ¦ ¦ ¦--expr: Impor [0/0] {62} + ¦ ¦ ¦ °--SYMBOL: Impor [0/0] {61} + ¦ ¦ ¦--'$': $ [0/0] {63} + ¦ ¦ °--SYMBOL: Input [0/0] {64} + ¦ °--')': ) [0/0] {65} + ¦--expr: Impor [3/0] {66} + ¦ ¦--expr: Impor [0/1] {67} + ¦ ¦ ¦--expr: Impor [0/0] {69} + ¦ ¦ ¦ °--SYMBOL: Impor [0/0] {68} + ¦ ¦ ¦--'$': $ [0/0] {70} + ¦ ¦ °--SYMBOL: Impor [0/0] {71} + ¦ ¦--LEFT_ASSIGN: <- [0/2] {72} + ¦ ¦--expr: Impor [1/1] {74} + ¦ ¦ ¦--expr: Impor [0/0] {76} + ¦ ¦ ¦ °--SYMBOL: Impor [0/0] {75} + ¦ ¦ ¦--'$': $ [0/0] {77} + ¦ ¦ °--SYMBOL: Impor [0/0] {78} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {79} + ¦ °--expr: Compl [0/0] {80} + ¦ ¦--expr: Compl [0/0] {82} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: Compl [0/0] {81} + ¦ ¦--'(': ( [0/0] {83} + ¦ ¦--expr: Impor [0/0] {84} + ¦ ¦ ¦--expr: Impor [0/0] {86} + ¦ ¦ ¦ °--SYMBOL: Impor [0/0] {85} + ¦ ¦ ¦--'$': $ [0/0] {87} + ¦ ¦ °--SYMBOL: Input [0/0] {88} + ¦ °--')': ) [0/0] {89} + °--expr: Impor [4/0] {90} + ¦--expr: Impor [0/1] {91} + ¦ ¦--expr: Impor [0/0] {93} + ¦ ¦ °--SYMBOL: Impor [0/0] {92} + ¦ ¦--'$': $ [0/0] {94} + ¦ °--SYMBOL: Impor [0/0] {95} + ¦--LEFT_ASSIGN: <- [0/2] {96} + ¦--expr: Impor [2/1] {98} + ¦ ¦--expr: Impor [0/0] {100} + ¦ ¦ °--SYMBOL: Impor [0/0] {99} + ¦ ¦--'$': $ [0/0] {101} + ¦ °--SYMBOL: Impor [0/0] {102} + ¦--LEFT_ASSIGN: <- [0/1] {103} + °--expr: Compl [0/0] {104} + ¦--expr: Compl [0/0] {106} + ¦ °--SYMBOL_FUNCTION_CALL: Compl [0/0] {105} + ¦--'(': ( [0/0] {107} + ¦--expr: Impor [0/0] {108} + ¦ ¦--expr: Impor [0/0] {110} + ¦ ¦ °--SYMBOL: Impor [0/0] {109} + ¦ ¦--'$': $ [0/0] {111} + ¦ °--SYMBOL: Input [0/0] {112} + °--')': ) [0/0] {113} diff --git a/tests/testthat/line_breaks_and_other/assignment-out.R b/tests/testthat/line_breaks_and_other/assignment-out.R new file mode 100644 index 000000000..681ded9e5 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/assignment-out.R @@ -0,0 +1,38 @@ +x <- + 2 + + +x <- 3 + +# FIXME: edge case not working for R < 3.6: Problem: most likely, comment is +# not moved to the right nest with relocate_eq_assign. +x <- + # the culprit + + 3 + + +x <- # + 2 + + +x <- 3 + +x <- + # comment + 3 + + + +ImportantDataFrame$ImportantColumn1 <- + ImportantDataFrame$ImportantColumn2 <- + ComplicatedFunction(ImportantDataFrame$InputColumn) + + +ImportantDataFrame$ImportantColumn1 <- + ImportantDataFrame$ImportantColumn2 <- ComplicatedFunction(ImportantDataFrame$InputColumn) + + + +ImportantDataFrame$ImportantColumn1 <- + ImportantDataFrame$ImportantColumn2 <- ComplicatedFunction(ImportantDataFrame$InputColumn) diff --git a/tests/testthat/line_breaks_and_other/base-pipe-line-breaks-in.R b/tests/testthat/line_breaks_and_other/base-pipe-line-breaks-in.R new file mode 100644 index 000000000..880f22cf0 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/base-pipe-line-breaks-in.R @@ -0,0 +1,88 @@ + +c(a |> b()) + +c(a + b |> c()) + + +c(a |> b() +) + +c(a |> b() # 33 +) + +c( + a + b |> c() + ) + +c( + a + b |> + c()) + +c(a + b |> + c() +) + +c( + a + b |> # 654 + c() +) + +c( # rr + a + b |> + c() +) + +c( + a + + b |> c() +) + +c(a + + b |> c() +) + +a |> b( +) + +a |> b( +) |> q() + +a |> + b() + +a |> b() |> c() + +# short pipes < 2 can stay on one line +a |> b() + +fun(x, + a |> b()) + +fun(x, + gg = a |> b(), + tt |> q()) + +fun(x, gg = a |> b(), tt |> q()) + +z = a |> b() + +fun( s = g(x), + gg = a(n == 2) |> b(), + tt |> q(r = 3)) + +# FIXME closing brace could go on ntext line. Alternative: remove lin breaks completely. +blew(x |> + + c(), y = 2) + +# FIXME closing brace could go on ntext line. Alternative: move c() up. +blew(y = 2, x |> + c()) + + +{a |> c() +1} + +b |> + f() |> # never move comment to next line as it can be styler: off or nolint + k() |> + x() diff --git a/tests/testthat/line_breaks_and_other/base-pipe-line-breaks-in_tree b/tests/testthat/line_breaks_and_other/base-pipe-line-breaks-in_tree new file mode 100644 index 000000000..c685ecc97 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/base-pipe-line-breaks-in_tree @@ -0,0 +1,465 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: c(a | [0/0] {1} + ¦ ¦--expr: c [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {2} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: a |> [0/0] {5} + ¦ ¦ ¦--expr: a [0/1] {7} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {6} + ¦ ¦ ¦--PIPE: |> [0/1] {8} + ¦ ¦ °--expr: b() [0/0] {9} + ¦ ¦ ¦--expr: b [0/0] {11} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {10} + ¦ ¦ ¦--'(': ( [0/0] {12} + ¦ ¦ °--')': ) [0/0] {13} + ¦ °--')': ) [0/0] {14} + ¦--expr: c(a + [2/0] {15} + ¦ ¦--expr: c [0/0] {17} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {16} + ¦ ¦--'(': ( [0/0] {18} + ¦ ¦--expr: a + b [0/0] {19} + ¦ ¦ ¦--expr: a [0/1] {21} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {20} + ¦ ¦ ¦--'+': + [0/1] {22} + ¦ ¦ ¦--expr: b [0/1] {25} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {24} + ¦ ¦ ¦--PIPE: |> [0/1] {26} + ¦ ¦ °--expr: c() [0/0] {27} + ¦ ¦ ¦--expr: c [0/0] {29} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {28} + ¦ ¦ ¦--'(': ( [0/0] {30} + ¦ ¦ °--')': ) [0/0] {31} + ¦ °--')': ) [0/0] {32} + ¦--expr: c(a | [3/0] {33} + ¦ ¦--expr: c [0/0] {35} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {34} + ¦ ¦--'(': ( [0/0] {36} + ¦ ¦--expr: a |> [0/0] {37} + ¦ ¦ ¦--expr: a [0/1] {39} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {38} + ¦ ¦ ¦--PIPE: |> [0/1] {40} + ¦ ¦ °--expr: b() [0/0] {41} + ¦ ¦ ¦--expr: b [0/0] {43} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {42} + ¦ ¦ ¦--'(': ( [0/0] {44} + ¦ ¦ °--')': ) [0/0] {45} + ¦ °--')': ) [1/0] {46} + ¦--expr: c(a | [2/0] {47} + ¦ ¦--expr: c [0/0] {49} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {48} + ¦ ¦--'(': ( [0/0] {50} + ¦ ¦--expr: a |> [0/1] {51} + ¦ ¦ ¦--expr: a [0/1] {53} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {52} + ¦ ¦ ¦--PIPE: |> [0/1] {54} + ¦ ¦ °--expr: b() [0/0] {55} + ¦ ¦ ¦--expr: b [0/0] {57} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {56} + ¦ ¦ ¦--'(': ( [0/0] {58} + ¦ ¦ °--')': ) [0/0] {59} + ¦ ¦--COMMENT: # 33 [0/0] {60} + ¦ °--')': ) [1/0] {61} + ¦--expr: c( + [2/0] {62} + ¦ ¦--expr: c [0/0] {64} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {63} + ¦ ¦--'(': ( [0/2] {65} + ¦ ¦--expr: a + b [1/2] {66} + ¦ ¦ ¦--expr: a [0/1] {68} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {67} + ¦ ¦ ¦--'+': + [0/1] {69} + ¦ ¦ ¦--expr: b [0/1] {72} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {71} + ¦ ¦ ¦--PIPE: |> [0/1] {73} + ¦ ¦ °--expr: c() [0/0] {74} + ¦ ¦ ¦--expr: c [0/0] {76} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {75} + ¦ ¦ ¦--'(': ( [0/0] {77} + ¦ ¦ °--')': ) [0/0] {78} + ¦ °--')': ) [1/0] {79} + ¦--expr: c( + [2/0] {80} + ¦ ¦--expr: c [0/0] {82} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {81} + ¦ ¦--'(': ( [0/2] {83} + ¦ ¦--expr: a + b [1/0] {84} + ¦ ¦ ¦--expr: a [0/1] {86} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {85} + ¦ ¦ ¦--'+': + [0/1] {87} + ¦ ¦ ¦--expr: b [0/1] {90} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {89} + ¦ ¦ ¦--PIPE: |> [0/4] {91} + ¦ ¦ °--expr: c() [1/0] {92} + ¦ ¦ ¦--expr: c [0/0] {94} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {93} + ¦ ¦ ¦--'(': ( [0/0] {95} + ¦ ¦ °--')': ) [0/0] {96} + ¦ °--')': ) [0/0] {97} + ¦--expr: c(a + [2/0] {98} + ¦ ¦--expr: c [0/0] {100} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {99} + ¦ ¦--'(': ( [0/0] {101} + ¦ ¦--expr: a + b [0/0] {102} + ¦ ¦ ¦--expr: a [0/1] {104} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {103} + ¦ ¦ ¦--'+': + [0/1] {105} + ¦ ¦ ¦--expr: b [0/1] {108} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {107} + ¦ ¦ ¦--PIPE: |> [0/4] {109} + ¦ ¦ °--expr: c() [1/0] {110} + ¦ ¦ ¦--expr: c [0/0] {112} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {111} + ¦ ¦ ¦--'(': ( [0/0] {113} + ¦ ¦ °--')': ) [0/0] {114} + ¦ °--')': ) [1/0] {115} + ¦--expr: c( + [2/0] {116} + ¦ ¦--expr: c [0/0] {118} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {117} + ¦ ¦--'(': ( [0/2] {119} + ¦ ¦--expr: a + b [1/0] {120} + ¦ ¦ ¦--expr: a [0/1] {122} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {121} + ¦ ¦ ¦--'+': + [0/1] {123} + ¦ ¦ ¦--expr: b [0/1] {126} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {125} + ¦ ¦ ¦--PIPE: |> [0/1] {127} + ¦ ¦ ¦--COMMENT: # 654 [0/4] {128} + ¦ ¦ °--expr: c() [1/0] {129} + ¦ ¦ ¦--expr: c [0/0] {131} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {130} + ¦ ¦ ¦--'(': ( [0/0] {132} + ¦ ¦ °--')': ) [0/0] {133} + ¦ °--')': ) [1/0] {134} + ¦--expr: c( # [2/0] {135} + ¦ ¦--expr: c [0/0] {137} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {136} + ¦ ¦--'(': ( [0/1] {138} + ¦ ¦--COMMENT: # rr [0/2] {139} + ¦ ¦--expr: a + b [1/0] {140} + ¦ ¦ ¦--expr: a [0/1] {142} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {141} + ¦ ¦ ¦--'+': + [0/1] {143} + ¦ ¦ ¦--expr: b [0/1] {146} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {145} + ¦ ¦ ¦--PIPE: |> [0/4] {147} + ¦ ¦ °--expr: c() [1/0] {148} + ¦ ¦ ¦--expr: c [0/0] {150} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {149} + ¦ ¦ ¦--'(': ( [0/0] {151} + ¦ ¦ °--')': ) [0/0] {152} + ¦ °--')': ) [1/0] {153} + ¦--expr: c( + [2/0] {154} + ¦ ¦--expr: c [0/0] {156} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {155} + ¦ ¦--'(': ( [0/2] {157} + ¦ ¦--expr: a + + [1/0] {158} + ¦ ¦ ¦--expr: a [0/1] {160} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {159} + ¦ ¦ ¦--'+': + [0/4] {161} + ¦ ¦ ¦--expr: b [1/1] {164} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {163} + ¦ ¦ ¦--PIPE: |> [0/1] {165} + ¦ ¦ °--expr: c() [0/0] {166} + ¦ ¦ ¦--expr: c [0/0] {168} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {167} + ¦ ¦ ¦--'(': ( [0/0] {169} + ¦ ¦ °--')': ) [0/0] {170} + ¦ °--')': ) [1/0] {171} + ¦--expr: c(a + [2/0] {172} + ¦ ¦--expr: c [0/0] {174} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {173} + ¦ ¦--'(': ( [0/0] {175} + ¦ ¦--expr: a + + [0/0] {176} + ¦ ¦ ¦--expr: a [0/1] {178} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {177} + ¦ ¦ ¦--'+': + [0/4] {179} + ¦ ¦ ¦--expr: b [1/1] {182} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {181} + ¦ ¦ ¦--PIPE: |> [0/1] {183} + ¦ ¦ °--expr: c() [0/0] {184} + ¦ ¦ ¦--expr: c [0/0] {186} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {185} + ¦ ¦ ¦--'(': ( [0/0] {187} + ¦ ¦ °--')': ) [0/0] {188} + ¦ °--')': ) [1/0] {189} + ¦--expr: a |> [2/0] {190} + ¦ ¦--expr: a [0/1] {192} + ¦ ¦ °--SYMBOL: a [0/0] {191} + ¦ ¦--PIPE: |> [0/1] {193} + ¦ °--expr: b( +) [0/0] {194} + ¦ ¦--expr: b [0/0] {196} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {195} + ¦ ¦--'(': ( [0/0] {197} + ¦ °--')': ) [1/0] {198} + ¦--expr: a |> [2/0] {199} + ¦ ¦--expr: a [0/1] {202} + ¦ ¦ °--SYMBOL: a [0/0] {201} + ¦ ¦--PIPE: |> [0/1] {203} + ¦ ¦--expr: b( +) [0/1] {204} + ¦ ¦ ¦--expr: b [0/0] {206} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {205} + ¦ ¦ ¦--'(': ( [0/0] {207} + ¦ ¦ °--')': ) [1/0] {208} + ¦ ¦--PIPE: |> [0/1] {209} + ¦ °--expr: q() [0/0] {210} + ¦ ¦--expr: q [0/0] {212} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: q [0/0] {211} + ¦ ¦--'(': ( [0/0] {213} + ¦ °--')': ) [0/0] {214} + ¦--expr: a |> + [2/0] {215} + ¦ ¦--expr: a [0/1] {217} + ¦ ¦ °--SYMBOL: a [0/0] {216} + ¦ ¦--PIPE: |> [0/2] {218} + ¦ °--expr: b() [1/0] {219} + ¦ ¦--expr: b [0/0] {221} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {220} + ¦ ¦--'(': ( [0/0] {222} + ¦ °--')': ) [0/0] {223} + ¦--expr: a |> [2/0] {224} + ¦ ¦--expr: a [0/1] {227} + ¦ ¦ °--SYMBOL: a [0/0] {226} + ¦ ¦--PIPE: |> [0/1] {228} + ¦ ¦--expr: b() [0/1] {229} + ¦ ¦ ¦--expr: b [0/0] {231} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {230} + ¦ ¦ ¦--'(': ( [0/0] {232} + ¦ ¦ °--')': ) [0/0] {233} + ¦ ¦--PIPE: |> [0/1] {234} + ¦ °--expr: c() [0/0] {235} + ¦ ¦--expr: c [0/0] {237} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {236} + ¦ ¦--'(': ( [0/0] {238} + ¦ °--')': ) [0/0] {239} + ¦--COMMENT: # sho [2/0] {240} + ¦--expr: a |> [1/0] {241} + ¦ ¦--expr: a [0/1] {243} + ¦ ¦ °--SYMBOL: a [0/0] {242} + ¦ ¦--PIPE: |> [0/1] {244} + ¦ °--expr: b() [0/0] {245} + ¦ ¦--expr: b [0/0] {247} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {246} + ¦ ¦--'(': ( [0/0] {248} + ¦ °--')': ) [0/0] {249} + ¦--expr: fun(x [2/0] {250} + ¦ ¦--expr: fun [0/0] {252} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: fun [0/0] {251} + ¦ ¦--'(': ( [0/0] {253} + ¦ ¦--expr: x [0/0] {255} + ¦ ¦ °--SYMBOL: x [0/0] {254} + ¦ ¦--',': , [0/2] {256} + ¦ ¦--expr: a |> [1/0] {257} + ¦ ¦ ¦--expr: a [0/1] {259} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {258} + ¦ ¦ ¦--PIPE: |> [0/1] {260} + ¦ ¦ °--expr: b() [0/0] {261} + ¦ ¦ ¦--expr: b [0/0] {263} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {262} + ¦ ¦ ¦--'(': ( [0/0] {264} + ¦ ¦ °--')': ) [0/0] {265} + ¦ °--')': ) [0/0] {266} + ¦--expr: fun(x [2/0] {267} + ¦ ¦--expr: fun [0/0] {269} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: fun [0/0] {268} + ¦ ¦--'(': ( [0/0] {270} + ¦ ¦--expr: x [0/0] {272} + ¦ ¦ °--SYMBOL: x [0/0] {271} + ¦ ¦--',': , [0/4] {273} + ¦ ¦--SYMBOL_SUB: gg [1/1] {274} + ¦ ¦--EQ_SUB: = [0/1] {275} + ¦ ¦--expr: a |> [0/0] {276} + ¦ ¦ ¦--expr: a [0/1] {278} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {277} + ¦ ¦ ¦--PIPE: |> [0/1] {279} + ¦ ¦ °--expr: b() [0/0] {280} + ¦ ¦ ¦--expr: b [0/0] {282} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {281} + ¦ ¦ ¦--'(': ( [0/0] {283} + ¦ ¦ °--')': ) [0/0] {284} + ¦ ¦--',': , [0/4] {285} + ¦ ¦--expr: tt |> [1/0] {286} + ¦ ¦ ¦--expr: tt [0/1] {288} + ¦ ¦ ¦ °--SYMBOL: tt [0/0] {287} + ¦ ¦ ¦--PIPE: |> [0/1] {289} + ¦ ¦ °--expr: q() [0/0] {290} + ¦ ¦ ¦--expr: q [0/0] {292} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: q [0/0] {291} + ¦ ¦ ¦--'(': ( [0/0] {293} + ¦ ¦ °--')': ) [0/0] {294} + ¦ °--')': ) [0/0] {295} + ¦--expr: fun(x [2/0] {296} + ¦ ¦--expr: fun [0/0] {298} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: fun [0/0] {297} + ¦ ¦--'(': ( [0/0] {299} + ¦ ¦--expr: x [0/0] {301} + ¦ ¦ °--SYMBOL: x [0/0] {300} + ¦ ¦--',': , [0/1] {302} + ¦ ¦--SYMBOL_SUB: gg [0/1] {303} + ¦ ¦--EQ_SUB: = [0/1] {304} + ¦ ¦--expr: a |> [0/0] {305} + ¦ ¦ ¦--expr: a [0/1] {307} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {306} + ¦ ¦ ¦--PIPE: |> [0/1] {308} + ¦ ¦ °--expr: b() [0/0] {309} + ¦ ¦ ¦--expr: b [0/0] {311} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {310} + ¦ ¦ ¦--'(': ( [0/0] {312} + ¦ ¦ °--')': ) [0/0] {313} + ¦ ¦--',': , [0/1] {314} + ¦ ¦--expr: tt |> [0/0] {315} + ¦ ¦ ¦--expr: tt [0/1] {317} + ¦ ¦ ¦ °--SYMBOL: tt [0/0] {316} + ¦ ¦ ¦--PIPE: |> [0/1] {318} + ¦ ¦ °--expr: q() [0/0] {319} + ¦ ¦ ¦--expr: q [0/0] {321} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: q [0/0] {320} + ¦ ¦ ¦--'(': ( [0/0] {322} + ¦ ¦ °--')': ) [0/0] {323} + ¦ °--')': ) [0/0] {324} + ¦--expr_or_assign_or_help: z = a [2/0] {325} + ¦ ¦--expr: z [0/1] {327} + ¦ ¦ °--SYMBOL: z [0/0] {326} + ¦ ¦--EQ_ASSIGN: = [0/1] {328} + ¦ ¦--expr: a [0/1] {331} + ¦ ¦ °--SYMBOL: a [0/0] {330} + ¦ ¦--PIPE: |> [0/1] {332} + ¦ °--expr: b() [0/0] {333} + ¦ ¦--expr: b [0/0] {335} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {334} + ¦ ¦--'(': ( [0/0] {336} + ¦ °--')': ) [0/0] {337} + ¦--expr: fun( [2/0] {338} + ¦ ¦--expr: fun [0/0] {340} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: fun [0/0] {339} + ¦ ¦--'(': ( [0/1] {341} + ¦ ¦--SYMBOL_SUB: s [0/1] {342} + ¦ ¦--EQ_SUB: = [0/1] {343} + ¦ ¦--expr: g(x) [0/0] {344} + ¦ ¦ ¦--expr: g [0/0] {346} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {345} + ¦ ¦ ¦--'(': ( [0/0] {347} + ¦ ¦ ¦--expr: x [0/0] {349} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {348} + ¦ ¦ °--')': ) [0/0] {350} + ¦ ¦--',': , [0/4] {351} + ¦ ¦--SYMBOL_SUB: gg [1/1] {352} + ¦ ¦--EQ_SUB: = [0/1] {353} + ¦ ¦--expr: a(n = [0/0] {354} + ¦ ¦ ¦--expr: a(n = [0/1] {355} + ¦ ¦ ¦ ¦--expr: a [0/0] {357} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: a [0/0] {356} + ¦ ¦ ¦ ¦--'(': ( [0/0] {358} + ¦ ¦ ¦ ¦--expr: n == [0/0] {359} + ¦ ¦ ¦ ¦ ¦--expr: n [0/1] {361} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: n [0/0] {360} + ¦ ¦ ¦ ¦ ¦--EQ: == [0/1] {362} + ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {364} + ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {363} + ¦ ¦ ¦ °--')': ) [0/0] {365} + ¦ ¦ ¦--PIPE: |> [0/1] {366} + ¦ ¦ °--expr: b() [0/0] {367} + ¦ ¦ ¦--expr: b [0/0] {369} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {368} + ¦ ¦ ¦--'(': ( [0/0] {370} + ¦ ¦ °--')': ) [0/0] {371} + ¦ ¦--',': , [0/4] {372} + ¦ ¦--expr: tt |> [1/0] {373} + ¦ ¦ ¦--expr: tt [0/1] {375} + ¦ ¦ ¦ °--SYMBOL: tt [0/0] {374} + ¦ ¦ ¦--PIPE: |> [0/1] {376} + ¦ ¦ °--expr: q(r = [0/0] {377} + ¦ ¦ ¦--expr: q [0/0] {379} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: q [0/0] {378} + ¦ ¦ ¦--'(': ( [0/0] {380} + ¦ ¦ ¦--SYMBOL_SUB: r [0/1] {381} + ¦ ¦ ¦--EQ_SUB: = [0/1] {382} + ¦ ¦ ¦--expr: 3 [0/0] {384} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {383} + ¦ ¦ °--')': ) [0/0] {385} + ¦ °--')': ) [0/0] {386} + ¦--COMMENT: # FIX [2/0] {387} + ¦--expr: blew( [1/0] {388} + ¦ ¦--expr: blew [0/0] {390} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: blew [0/0] {389} + ¦ ¦--'(': ( [0/0] {391} + ¦ ¦--expr: x |> + [0/0] {392} + ¦ ¦ ¦--expr: x [0/1] {394} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {393} + ¦ ¦ ¦--PIPE: |> [0/7] {395} + ¦ ¦ °--expr: c() [2/0] {396} + ¦ ¦ ¦--expr: c [0/0] {398} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {397} + ¦ ¦ ¦--'(': ( [0/0] {399} + ¦ ¦ °--')': ) [0/0] {400} + ¦ ¦--',': , [0/1] {401} + ¦ ¦--SYMBOL_SUB: y [0/1] {402} + ¦ ¦--EQ_SUB: = [0/1] {403} + ¦ ¦--expr: 2 [0/0] {405} + ¦ ¦ °--NUM_CONST: 2 [0/0] {404} + ¦ °--')': ) [0/0] {406} + ¦--COMMENT: # FIX [2/0] {407} + ¦--expr: blew( [1/0] {408} + ¦ ¦--expr: blew [0/0] {410} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: blew [0/0] {409} + ¦ ¦--'(': ( [0/0] {411} + ¦ ¦--SYMBOL_SUB: y [0/1] {412} + ¦ ¦--EQ_SUB: = [0/1] {413} + ¦ ¦--expr: 2 [0/0] {415} + ¦ ¦ °--NUM_CONST: 2 [0/0] {414} + ¦ ¦--',': , [0/1] {416} + ¦ ¦--expr: x |> + [0/0] {417} + ¦ ¦ ¦--expr: x [0/1] {419} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {418} + ¦ ¦ ¦--PIPE: |> [0/7] {420} + ¦ ¦ °--expr: c() [1/0] {421} + ¦ ¦ ¦--expr: c [0/0] {423} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {422} + ¦ ¦ ¦--'(': ( [0/0] {424} + ¦ ¦ °--')': ) [0/0] {425} + ¦ °--')': ) [0/0] {426} + ¦--expr: {a |> [3/0] {427} + ¦ ¦--'{': { [0/0] {428} + ¦ ¦--expr: a |> [0/0] {429} + ¦ ¦ ¦--expr: a [0/1] {432} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {431} + ¦ ¦ ¦--PIPE: |> [0/1] {433} + ¦ ¦ ¦--expr: c() [0/1] {434} + ¦ ¦ ¦ ¦--expr: c [0/0] {436} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {435} + ¦ ¦ ¦ ¦--'(': ( [0/0] {437} + ¦ ¦ ¦ °--')': ) [0/0] {438} + ¦ ¦ ¦--'+': + [0/0] {439} + ¦ ¦ °--expr: 1 [0/0] {441} + ¦ ¦ °--NUM_CONST: 1 [0/0] {440} + ¦ °--'}': } [0/0] {442} + °--expr: b |> + [2/0] {443} + ¦--expr: b [0/1] {447} + ¦ °--SYMBOL: b [0/0] {446} + ¦--PIPE: |> [0/2] {448} + ¦--expr: f() [1/1] {449} + ¦ ¦--expr: f [0/0] {451} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {450} + ¦ ¦--'(': ( [0/0] {452} + ¦ °--')': ) [0/0] {453} + ¦--PIPE: |> [0/1] {454} + ¦--COMMENT: # nev [0/2] {455} + ¦--expr: k() [1/1] {456} + ¦ ¦--expr: k [0/0] {458} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: k [0/0] {457} + ¦ ¦--'(': ( [0/0] {459} + ¦ °--')': ) [0/0] {460} + ¦--PIPE: |> [0/2] {461} + °--expr: x() [1/0] {462} + ¦--expr: x [0/0] {464} + ¦ °--SYMBOL_FUNCTION_CALL: x [0/0] {463} + ¦--'(': ( [0/0] {465} + °--')': ) [0/0] {466} diff --git a/tests/testthat/line_breaks_and_other/base-pipe-line-breaks-out.R b/tests/testthat/line_breaks_and_other/base-pipe-line-breaks-out.R new file mode 100644 index 000000000..0fd45f0b5 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/base-pipe-line-breaks-out.R @@ -0,0 +1,94 @@ +c(a |> b()) + +c(a + b |> c()) + + +c(a |> b()) + +c( + a |> b() # 33 +) + +c( + a + b |> c() +) + +c( + a + b |> + c() +) + +c(a + b |> + c()) + +c( + a + b |> # 654 + c() +) + +c( # rr + a + b |> + c() +) + +c( + a + + b |> c() +) + +c(a + + b |> c()) + +a |> b() + +a |> + b() |> + q() + +a |> + b() + +a |> + b() |> + c() + +# short pipes < 2 can stay on one line +a |> b() + +fun( + x, + a |> b() +) + +fun(x, + gg = a |> b(), + tt |> q() +) + +fun(x, gg = a |> b(), tt |> q()) + +z <- a |> b() + +fun( + s = g(x), + gg = a(n == 2) |> b(), + tt |> q(r = 3) +) + +# FIXME closing brace could go on ntext line. Alternative: remove lin breaks completely. +blew(x |> + c(), y = 2) + +# FIXME closing brace could go on ntext line. Alternative: move c() up. +blew(y = 2, x |> + c()) + + +{ + a |> c() + 1 +} + +b |> + f() |> # never move comment to next line as it can be styler: off or nolint + k() |> + x() diff --git a/tests/testthat/line_breaks_and_other/braces-fun-calls1-in.R b/tests/testthat/line_breaks_and_other/braces-fun-calls1-in.R new file mode 100644 index 000000000..362094575 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/braces-fun-calls1-in.R @@ -0,0 +1,35 @@ +# the brace expression is the last argument (classical testthat case) +test_that(x, { + hh +}) + +test_that(x, + { + hh + } +) + + +# there are multiple brace expressions that spread over multiple lines +# (classical tryCatch) +tryCatch({ + exp(x) +}, error = function(x) x) + +tryCatch( + { + exp(x) + }, + error = function(x) x +) + +call({ + blibla +}, { + blublo +}) + +# curly-curly is respected +fio({{x}}) + +test_that("x", {{ k }}) diff --git a/tests/testthat/line_breaks_and_other/braces-fun-calls1-in_tree b/tests/testthat/line_breaks_and_other/braces-fun-calls1-in_tree new file mode 100644 index 000000000..a62afee1c --- /dev/null +++ b/tests/testthat/line_breaks_and_other/braces-fun-calls1-in_tree @@ -0,0 +1,132 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # the [0/0] {1} + ¦--expr: test_ [1/0] {2} + ¦ ¦--expr: test_ [0/0] {4} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {3} + ¦ ¦--'(': ( [0/0] {5} + ¦ ¦--expr: x [0/0] {7} + ¦ ¦ °--SYMBOL: x [0/0] {6} + ¦ ¦--',': , [0/1] {8} + ¦ ¦--expr: { + h [0/0] {9} + ¦ ¦ ¦--'{': { [0/2] {10} + ¦ ¦ ¦--expr: hh [1/0] {12} + ¦ ¦ ¦ °--SYMBOL: hh [0/0] {11} + ¦ ¦ °--'}': } [1/0] {13} + ¦ °--')': ) [0/0] {14} + ¦--expr: test_ [2/0] {15} + ¦ ¦--expr: test_ [0/0] {17} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {16} + ¦ ¦--'(': ( [0/0] {18} + ¦ ¦--expr: x [0/0] {20} + ¦ ¦ °--SYMBOL: x [0/0] {19} + ¦ ¦--',': , [0/2] {21} + ¦ ¦--expr: { + [1/0] {22} + ¦ ¦ ¦--'{': { [0/4] {23} + ¦ ¦ ¦--expr: hh [1/2] {25} + ¦ ¦ ¦ °--SYMBOL: hh [0/0] {24} + ¦ ¦ °--'}': } [1/0] {26} + ¦ °--')': ) [1/0] {27} + ¦--COMMENT: # the [3/0] {28} + ¦--COMMENT: # (cl [1/0] {29} + ¦--expr: tryCa [1/0] {30} + ¦ ¦--expr: tryCa [0/0] {32} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: tryCa [0/0] {31} + ¦ ¦--'(': ( [0/0] {33} + ¦ ¦--expr: { + e [0/0] {34} + ¦ ¦ ¦--'{': { [0/2] {35} + ¦ ¦ ¦--expr: exp(x [1/0] {36} + ¦ ¦ ¦ ¦--expr: exp [0/0] {38} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: exp [0/0] {37} + ¦ ¦ ¦ ¦--'(': ( [0/0] {39} + ¦ ¦ ¦ ¦--expr: x [0/0] {41} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {40} + ¦ ¦ ¦ °--')': ) [0/0] {42} + ¦ ¦ °--'}': } [1/0] {43} + ¦ ¦--',': , [0/1] {44} + ¦ ¦--SYMBOL_SUB: error [0/1] {45} + ¦ ¦--EQ_SUB: = [0/1] {46} + ¦ ¦--expr: funct [0/0] {47} + ¦ ¦ ¦--FUNCTION: funct [0/0] {48} + ¦ ¦ ¦--'(': ( [0/0] {49} + ¦ ¦ ¦--SYMBOL_FORMALS: x [0/0] {50} + ¦ ¦ ¦--')': ) [0/1] {51} + ¦ ¦ °--expr: x [0/0] {53} + ¦ ¦ °--SYMBOL: x [0/0] {52} + ¦ °--')': ) [0/0] {54} + ¦--expr: tryCa [2/0] {55} + ¦ ¦--expr: tryCa [0/0] {57} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: tryCa [0/0] {56} + ¦ ¦--'(': ( [0/2] {58} + ¦ ¦--expr: { + [1/0] {59} + ¦ ¦ ¦--'{': { [0/4] {60} + ¦ ¦ ¦--expr: exp(x [1/2] {61} + ¦ ¦ ¦ ¦--expr: exp [0/0] {63} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: exp [0/0] {62} + ¦ ¦ ¦ ¦--'(': ( [0/0] {64} + ¦ ¦ ¦ ¦--expr: x [0/0] {66} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {65} + ¦ ¦ ¦ °--')': ) [0/0] {67} + ¦ ¦ °--'}': } [1/0] {68} + ¦ ¦--',': , [0/2] {69} + ¦ ¦--SYMBOL_SUB: error [1/1] {70} + ¦ ¦--EQ_SUB: = [0/1] {71} + ¦ ¦--expr: funct [0/0] {72} + ¦ ¦ ¦--FUNCTION: funct [0/0] {73} + ¦ ¦ ¦--'(': ( [0/0] {74} + ¦ ¦ ¦--SYMBOL_FORMALS: x [0/0] {75} + ¦ ¦ ¦--')': ) [0/1] {76} + ¦ ¦ °--expr: x [0/0] {78} + ¦ ¦ °--SYMBOL: x [0/0] {77} + ¦ °--')': ) [1/0] {79} + ¦--expr: call( [2/0] {80} + ¦ ¦--expr: call [0/0] {82} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {81} + ¦ ¦--'(': ( [0/0] {83} + ¦ ¦--expr: { + b [0/0] {84} + ¦ ¦ ¦--'{': { [0/2] {85} + ¦ ¦ ¦--expr: blibl [1/0] {87} + ¦ ¦ ¦ °--SYMBOL: blibl [0/0] {86} + ¦ ¦ °--'}': } [1/0] {88} + ¦ ¦--',': , [0/1] {89} + ¦ ¦--expr: { + b [0/0] {90} + ¦ ¦ ¦--'{': { [0/2] {91} + ¦ ¦ ¦--expr: blubl [1/0] {93} + ¦ ¦ ¦ °--SYMBOL: blubl [0/0] {92} + ¦ ¦ °--'}': } [1/0] {94} + ¦ °--')': ) [0/0] {95} + ¦--COMMENT: # cur [2/0] {96} + ¦--expr: fio({ [1/0] {97} + ¦ ¦--expr: fio [0/0] {99} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: fio [0/0] {98} + ¦ ¦--'(': ( [0/0] {100} + ¦ ¦--expr: {{x}} [0/0] {101} + ¦ ¦ ¦--'{': { [0/0] {102} + ¦ ¦ ¦--expr: {x} [0/0] {103} + ¦ ¦ ¦ ¦--'{': { [0/0] {104} + ¦ ¦ ¦ ¦--expr: x [0/0] {106} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {105} + ¦ ¦ ¦ °--'}': } [0/0] {107} + ¦ ¦ °--'}': } [0/0] {108} + ¦ °--')': ) [0/0] {109} + °--expr: test_ [2/0] {110} + ¦--expr: test_ [0/0] {112} + ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {111} + ¦--'(': ( [0/0] {113} + ¦--expr: "x" [0/0] {115} + ¦ °--STR_CONST: "x" [0/0] {114} + ¦--',': , [0/1] {116} + ¦--expr: {{ k [0/0] {117} + ¦ ¦--'{': { [0/0] {118} + ¦ ¦--expr: { k } [0/0] {119} + ¦ ¦ ¦--'{': { [0/1] {120} + ¦ ¦ ¦--expr: k [0/1] {122} + ¦ ¦ ¦ °--SYMBOL: k [0/0] {121} + ¦ ¦ °--'}': } [0/0] {123} + ¦ °--'}': } [0/0] {124} + °--')': ) [0/0] {125} diff --git a/tests/testthat/line_breaks_and_other/braces-fun-calls1-out.R b/tests/testthat/line_breaks_and_other/braces-fun-calls1-out.R new file mode 100644 index 000000000..df9b2137d --- /dev/null +++ b/tests/testthat/line_breaks_and_other/braces-fun-calls1-out.R @@ -0,0 +1,39 @@ +# the brace expression is the last argument (classical testthat case) +test_that(x, { + hh +}) + +test_that(x, { + hh +}) + + +# there are multiple brace expressions that spread over multiple lines +# (classical tryCatch) +tryCatch( + { + exp(x) + }, + error = function(x) x +) + +tryCatch( + { + exp(x) + }, + error = function(x) x +) + +call( + { + blibla + }, + { + blublo + } +) + +# curly-curly is respected +fio({{ x }}) + +test_that("x", {{ k }}) diff --git a/tests/testthat/line_breaks_and_other/braces-fun-calls2-in.R b/tests/testthat/line_breaks_and_other/braces-fun-calls2-in.R new file mode 100644 index 000000000..fe6003476 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/braces-fun-calls2-in.R @@ -0,0 +1,104 @@ +test( + "x", + { + + }, a + b, { + s(x = sd) + } +) + +test( + "x", { + + }, a + b, { + s(x = sd) + } +) + +test( + "x", + { + + }, + a + b, { + s(x = sd) + } +) + + +test( + "x", + { + + }, + a + b, + { + s(x = sd) + } +) + +test( + "x", + { + + }, # h + a + b, { + s(x = sd) + } +) + +test( + "x", + { + + }, # h + a + b, + # k + { + s(x = sd) + } +) + +test( + "x", + { + + }, + a + b, # k + { + s(x = sd) + } +) + +tetst( + "x", + { + x + }, 1 + +1 +) + +while ({ + x +}) { + f() +} + +while ({ + x +} +) { + f() +} + +while ( + { + x +}) { + f() +} + +while ( + {x +}) { + f() +} diff --git a/tests/testthat/line_breaks_and_other/braces-fun-calls2-in_tree b/tests/testthat/line_breaks_and_other/braces-fun-calls2-in_tree new file mode 100644 index 000000000..2a73d4886 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/braces-fun-calls2-in_tree @@ -0,0 +1,342 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: test( [0/0] {1} + ¦ ¦--expr: test [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test [0/0] {2} + ¦ ¦--'(': ( [0/2] {4} + ¦ ¦--expr: "x" [1/0] {6} + ¦ ¦ °--STR_CONST: "x" [0/0] {5} + ¦ ¦--',': , [0/2] {7} + ¦ ¦--expr: { + + [1/0] {8} + ¦ ¦ ¦--'{': { [0/2] {9} + ¦ ¦ °--'}': } [2/0] {10} + ¦ ¦--',': , [0/1] {11} + ¦ ¦--expr: a + b [0/0] {12} + ¦ ¦ ¦--expr: a [0/1] {14} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {13} + ¦ ¦ ¦--'+': + [0/1] {15} + ¦ ¦ °--expr: b [0/0] {17} + ¦ ¦ °--SYMBOL: b [0/0] {16} + ¦ ¦--',': , [0/1] {18} + ¦ ¦--expr: { + [0/0] {19} + ¦ ¦ ¦--'{': { [0/4] {20} + ¦ ¦ ¦--expr: s(x = [1/2] {21} + ¦ ¦ ¦ ¦--expr: s [0/0] {23} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: s [0/0] {22} + ¦ ¦ ¦ ¦--'(': ( [0/0] {24} + ¦ ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {25} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {26} + ¦ ¦ ¦ ¦--expr: sd [0/0] {28} + ¦ ¦ ¦ ¦ °--SYMBOL: sd [0/0] {27} + ¦ ¦ ¦ °--')': ) [0/0] {29} + ¦ ¦ °--'}': } [1/0] {30} + ¦ °--')': ) [1/0] {31} + ¦--expr: test( [2/0] {32} + ¦ ¦--expr: test [0/0] {34} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test [0/0] {33} + ¦ ¦--'(': ( [0/2] {35} + ¦ ¦--expr: "x" [1/0] {37} + ¦ ¦ °--STR_CONST: "x" [0/0] {36} + ¦ ¦--',': , [0/1] {38} + ¦ ¦--expr: { + + [0/0] {39} + ¦ ¦ ¦--'{': { [0/2] {40} + ¦ ¦ °--'}': } [2/0] {41} + ¦ ¦--',': , [0/1] {42} + ¦ ¦--expr: a + b [0/0] {43} + ¦ ¦ ¦--expr: a [0/1] {45} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {44} + ¦ ¦ ¦--'+': + [0/1] {46} + ¦ ¦ °--expr: b [0/0] {48} + ¦ ¦ °--SYMBOL: b [0/0] {47} + ¦ ¦--',': , [0/1] {49} + ¦ ¦--expr: { + [0/0] {50} + ¦ ¦ ¦--'{': { [0/4] {51} + ¦ ¦ ¦--expr: s(x = [1/2] {52} + ¦ ¦ ¦ ¦--expr: s [0/0] {54} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: s [0/0] {53} + ¦ ¦ ¦ ¦--'(': ( [0/0] {55} + ¦ ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {56} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {57} + ¦ ¦ ¦ ¦--expr: sd [0/0] {59} + ¦ ¦ ¦ ¦ °--SYMBOL: sd [0/0] {58} + ¦ ¦ ¦ °--')': ) [0/0] {60} + ¦ ¦ °--'}': } [1/0] {61} + ¦ °--')': ) [1/0] {62} + ¦--expr: test( [2/0] {63} + ¦ ¦--expr: test [0/0] {65} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test [0/0] {64} + ¦ ¦--'(': ( [0/2] {66} + ¦ ¦--expr: "x" [1/0] {68} + ¦ ¦ °--STR_CONST: "x" [0/0] {67} + ¦ ¦--',': , [0/2] {69} + ¦ ¦--expr: { + + [1/0] {70} + ¦ ¦ ¦--'{': { [0/2] {71} + ¦ ¦ °--'}': } [2/0] {72} + ¦ ¦--',': , [0/2] {73} + ¦ ¦--expr: a + b [1/0] {74} + ¦ ¦ ¦--expr: a [0/1] {76} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {75} + ¦ ¦ ¦--'+': + [0/1] {77} + ¦ ¦ °--expr: b [0/0] {79} + ¦ ¦ °--SYMBOL: b [0/0] {78} + ¦ ¦--',': , [0/1] {80} + ¦ ¦--expr: { + [0/0] {81} + ¦ ¦ ¦--'{': { [0/4] {82} + ¦ ¦ ¦--expr: s(x = [1/2] {83} + ¦ ¦ ¦ ¦--expr: s [0/0] {85} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: s [0/0] {84} + ¦ ¦ ¦ ¦--'(': ( [0/0] {86} + ¦ ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {87} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {88} + ¦ ¦ ¦ ¦--expr: sd [0/0] {90} + ¦ ¦ ¦ ¦ °--SYMBOL: sd [0/0] {89} + ¦ ¦ ¦ °--')': ) [0/0] {91} + ¦ ¦ °--'}': } [1/0] {92} + ¦ °--')': ) [1/0] {93} + ¦--expr: test( [3/0] {94} + ¦ ¦--expr: test [0/0] {96} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test [0/0] {95} + ¦ ¦--'(': ( [0/2] {97} + ¦ ¦--expr: "x" [1/0] {99} + ¦ ¦ °--STR_CONST: "x" [0/0] {98} + ¦ ¦--',': , [0/2] {100} + ¦ ¦--expr: { + + [1/0] {101} + ¦ ¦ ¦--'{': { [0/2] {102} + ¦ ¦ °--'}': } [2/0] {103} + ¦ ¦--',': , [0/2] {104} + ¦ ¦--expr: a + b [1/0] {105} + ¦ ¦ ¦--expr: a [0/1] {107} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {106} + ¦ ¦ ¦--'+': + [0/1] {108} + ¦ ¦ °--expr: b [0/0] {110} + ¦ ¦ °--SYMBOL: b [0/0] {109} + ¦ ¦--',': , [0/2] {111} + ¦ ¦--expr: { + [1/0] {112} + ¦ ¦ ¦--'{': { [0/4] {113} + ¦ ¦ ¦--expr: s(x = [1/2] {114} + ¦ ¦ ¦ ¦--expr: s [0/0] {116} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: s [0/0] {115} + ¦ ¦ ¦ ¦--'(': ( [0/0] {117} + ¦ ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {118} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {119} + ¦ ¦ ¦ ¦--expr: sd [0/0] {121} + ¦ ¦ ¦ ¦ °--SYMBOL: sd [0/0] {120} + ¦ ¦ ¦ °--')': ) [0/0] {122} + ¦ ¦ °--'}': } [1/0] {123} + ¦ °--')': ) [1/0] {124} + ¦--expr: test( [2/0] {125} + ¦ ¦--expr: test [0/0] {127} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test [0/0] {126} + ¦ ¦--'(': ( [0/2] {128} + ¦ ¦--expr: "x" [1/0] {130} + ¦ ¦ °--STR_CONST: "x" [0/0] {129} + ¦ ¦--',': , [0/2] {131} + ¦ ¦--expr: { + + [1/0] {132} + ¦ ¦ ¦--'{': { [0/2] {133} + ¦ ¦ °--'}': } [2/0] {134} + ¦ ¦--',': , [0/1] {135} + ¦ ¦--COMMENT: # h [0/2] {136} + ¦ ¦--expr: a + b [1/0] {137} + ¦ ¦ ¦--expr: a [0/1] {139} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {138} + ¦ ¦ ¦--'+': + [0/1] {140} + ¦ ¦ °--expr: b [0/0] {142} + ¦ ¦ °--SYMBOL: b [0/0] {141} + ¦ ¦--',': , [0/1] {143} + ¦ ¦--expr: { + [0/0] {144} + ¦ ¦ ¦--'{': { [0/4] {145} + ¦ ¦ ¦--expr: s(x = [1/2] {146} + ¦ ¦ ¦ ¦--expr: s [0/0] {148} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: s [0/0] {147} + ¦ ¦ ¦ ¦--'(': ( [0/0] {149} + ¦ ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {150} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {151} + ¦ ¦ ¦ ¦--expr: sd [0/0] {153} + ¦ ¦ ¦ ¦ °--SYMBOL: sd [0/0] {152} + ¦ ¦ ¦ °--')': ) [0/0] {154} + ¦ ¦ °--'}': } [1/0] {155} + ¦ °--')': ) [1/0] {156} + ¦--expr: test( [2/0] {157} + ¦ ¦--expr: test [0/0] {159} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test [0/0] {158} + ¦ ¦--'(': ( [0/2] {160} + ¦ ¦--expr: "x" [1/0] {162} + ¦ ¦ °--STR_CONST: "x" [0/0] {161} + ¦ ¦--',': , [0/2] {163} + ¦ ¦--expr: { + + [1/0] {164} + ¦ ¦ ¦--'{': { [0/2] {165} + ¦ ¦ °--'}': } [2/0] {166} + ¦ ¦--',': , [0/1] {167} + ¦ ¦--COMMENT: # h [0/2] {168} + ¦ ¦--expr: a + b [1/0] {169} + ¦ ¦ ¦--expr: a [0/1] {171} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {170} + ¦ ¦ ¦--'+': + [0/1] {172} + ¦ ¦ °--expr: b [0/0] {174} + ¦ ¦ °--SYMBOL: b [0/0] {173} + ¦ ¦--',': , [0/2] {175} + ¦ ¦--COMMENT: # k [1/2] {176} + ¦ ¦--expr: { + [1/0] {177} + ¦ ¦ ¦--'{': { [0/4] {178} + ¦ ¦ ¦--expr: s(x = [1/2] {179} + ¦ ¦ ¦ ¦--expr: s [0/0] {181} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: s [0/0] {180} + ¦ ¦ ¦ ¦--'(': ( [0/0] {182} + ¦ ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {183} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {184} + ¦ ¦ ¦ ¦--expr: sd [0/0] {186} + ¦ ¦ ¦ ¦ °--SYMBOL: sd [0/0] {185} + ¦ ¦ ¦ °--')': ) [0/0] {187} + ¦ ¦ °--'}': } [1/0] {188} + ¦ °--')': ) [1/0] {189} + ¦--expr: test( [2/0] {190} + ¦ ¦--expr: test [0/0] {192} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test [0/0] {191} + ¦ ¦--'(': ( [0/2] {193} + ¦ ¦--expr: "x" [1/0] {195} + ¦ ¦ °--STR_CONST: "x" [0/0] {194} + ¦ ¦--',': , [0/2] {196} + ¦ ¦--expr: { + + [1/0] {197} + ¦ ¦ ¦--'{': { [0/2] {198} + ¦ ¦ °--'}': } [2/0] {199} + ¦ ¦--',': , [0/2] {200} + ¦ ¦--expr: a + b [1/0] {201} + ¦ ¦ ¦--expr: a [0/1] {203} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {202} + ¦ ¦ ¦--'+': + [0/1] {204} + ¦ ¦ °--expr: b [0/0] {206} + ¦ ¦ °--SYMBOL: b [0/0] {205} + ¦ ¦--',': , [0/2] {207} + ¦ ¦--COMMENT: # k [0/2] {208} + ¦ ¦--expr: { + [1/0] {209} + ¦ ¦ ¦--'{': { [0/4] {210} + ¦ ¦ ¦--expr: s(x = [1/2] {211} + ¦ ¦ ¦ ¦--expr: s [0/0] {213} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: s [0/0] {212} + ¦ ¦ ¦ ¦--'(': ( [0/0] {214} + ¦ ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {215} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {216} + ¦ ¦ ¦ ¦--expr: sd [0/0] {218} + ¦ ¦ ¦ ¦ °--SYMBOL: sd [0/0] {217} + ¦ ¦ ¦ °--')': ) [0/0] {219} + ¦ ¦ °--'}': } [1/0] {220} + ¦ °--')': ) [1/0] {221} + ¦--expr: tetst [2/0] {222} + ¦ ¦--expr: tetst [0/0] {224} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: tetst [0/0] {223} + ¦ ¦--'(': ( [0/2] {225} + ¦ ¦--expr: "x" [1/0] {227} + ¦ ¦ °--STR_CONST: "x" [0/0] {226} + ¦ ¦--',': , [0/2] {228} + ¦ ¦--expr: { + [1/0] {229} + ¦ ¦ ¦--'{': { [0/4] {230} + ¦ ¦ ¦--expr: x [1/2] {232} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {231} + ¦ ¦ °--'}': } [1/0] {233} + ¦ ¦--',': , [0/1] {234} + ¦ ¦--expr: 1 + + [0/0] {235} + ¦ ¦ ¦--expr: 1 [0/1] {237} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {236} + ¦ ¦ ¦--'+': + [0/1] {238} + ¦ ¦ °--expr: +1 [0/0] {239} + ¦ ¦ ¦--'+': + [0/0] {240} + ¦ ¦ °--expr: 1 [0/0] {242} + ¦ ¦ °--NUM_CONST: 1 [0/0] {241} + ¦ °--')': ) [1/0] {243} + ¦--expr: while [2/0] {244} + ¦ ¦--WHILE: while [0/1] {245} + ¦ ¦--'(': ( [0/0] {246} + ¦ ¦--expr: { + x [0/0] {247} + ¦ ¦ ¦--'{': { [0/2] {248} + ¦ ¦ ¦--expr: x [1/0] {250} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {249} + ¦ ¦ °--'}': } [1/0] {251} + ¦ ¦--')': ) [0/1] {252} + ¦ °--expr: { + f [0/0] {253} + ¦ ¦--'{': { [0/2] {254} + ¦ ¦--expr: f() [1/0] {255} + ¦ ¦ ¦--expr: f [0/0] {257} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {256} + ¦ ¦ ¦--'(': ( [0/0] {258} + ¦ ¦ °--')': ) [0/0] {259} + ¦ °--'}': } [1/0] {260} + ¦--expr: while [2/0] {261} + ¦ ¦--WHILE: while [0/1] {262} + ¦ ¦--'(': ( [0/0] {263} + ¦ ¦--expr: { + x [0/0] {264} + ¦ ¦ ¦--'{': { [0/2] {265} + ¦ ¦ ¦--expr: x [1/0] {267} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {266} + ¦ ¦ °--'}': } [1/0] {268} + ¦ ¦--')': ) [1/1] {269} + ¦ °--expr: { + f [0/0] {270} + ¦ ¦--'{': { [0/2] {271} + ¦ ¦--expr: f() [1/0] {272} + ¦ ¦ ¦--expr: f [0/0] {274} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {273} + ¦ ¦ ¦--'(': ( [0/0] {275} + ¦ ¦ °--')': ) [0/0] {276} + ¦ °--'}': } [1/0] {277} + ¦--expr: while [2/0] {278} + ¦ ¦--WHILE: while [0/1] {279} + ¦ ¦--'(': ( [0/2] {280} + ¦ ¦--expr: { + x [1/0] {281} + ¦ ¦ ¦--'{': { [0/2] {282} + ¦ ¦ ¦--expr: x [1/0] {284} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {283} + ¦ ¦ °--'}': } [1/0] {285} + ¦ ¦--')': ) [0/1] {286} + ¦ °--expr: { + f [0/0] {287} + ¦ ¦--'{': { [0/2] {288} + ¦ ¦--expr: f() [1/0] {289} + ¦ ¦ ¦--expr: f [0/0] {291} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {290} + ¦ ¦ ¦--'(': ( [0/0] {292} + ¦ ¦ °--')': ) [0/0] {293} + ¦ °--'}': } [1/0] {294} + °--expr: while [2/0] {295} + ¦--WHILE: while [0/1] {296} + ¦--'(': ( [0/2] {297} + ¦--expr: {x +} [1/0] {298} + ¦ ¦--'{': { [0/0] {299} + ¦ ¦--expr: x [0/0] {301} + ¦ ¦ °--SYMBOL: x [0/0] {300} + ¦ °--'}': } [1/0] {302} + ¦--')': ) [0/1] {303} + °--expr: { + f [0/0] {304} + ¦--'{': { [0/2] {305} + ¦--expr: f() [1/0] {306} + ¦ ¦--expr: f [0/0] {308} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {307} + ¦ ¦--'(': ( [0/0] {309} + ¦ °--')': ) [0/0] {310} + °--'}': } [1/0] {311} diff --git a/tests/testthat/line_breaks_and_other/braces-fun-calls2-out.R b/tests/testthat/line_breaks_and_other/braces-fun-calls2-out.R new file mode 100644 index 000000000..14e0fadc3 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/braces-fun-calls2-out.R @@ -0,0 +1,110 @@ +test( + "x", + { + + }, + a + b, + { + s(x = sd) + } +) + +test( + "x", + { + + }, + a + b, + { + s(x = sd) + } +) + +test( + "x", + { + + }, + a + b, + { + s(x = sd) + } +) + + +test( + "x", + { + + }, + a + b, + { + s(x = sd) + } +) + +test( + "x", + { + + }, # h + a + b, + { + s(x = sd) + } +) + +test( + "x", + { + + }, # h + a + b, + # k + { + s(x = sd) + } +) + +test( + "x", + { + + }, + a + b, # k + { + s(x = sd) + } +) + +tetst( + "x", + { + x + }, + 1 + +1 +) + +while ({ + x +}) { + f() +} + +while ({ + x +}) { + f() +} + +while ({ + x +}) { + f() +} + +while ({ + x +}) { + f() +} diff --git a/tests/testthat/line_breaks_and_other/comma-in.R b/tests/testthat/line_breaks_and_other/comma-in.R new file mode 100644 index 000000000..dba179386 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/comma-in.R @@ -0,0 +1,20 @@ +call(a, + b + , c) + +call(a, b + , + c) + +call(a,) +call(a, +) + +call(a + ,) + +mpg %>% + summarise(avg_cty = mean(cty) +, avg_hwy = mean(hwy) +, n = n() +, n_class = n_distinct(class)) diff --git a/tests/testthat/line_breaks_and_other/comma-in_tree b/tests/testthat/line_breaks_and_other/comma-in_tree new file mode 100644 index 000000000..92771fe7c --- /dev/null +++ b/tests/testthat/line_breaks_and_other/comma-in_tree @@ -0,0 +1,97 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: call( [0/0] {1} + ¦ ¦--expr: call [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: a [0/0] {6} + ¦ ¦ °--SYMBOL: a [0/0] {5} + ¦ ¦--',': , [0/5] {7} + ¦ ¦--expr: b [1/5] {9} + ¦ ¦ °--SYMBOL: b [0/0] {8} + ¦ ¦--',': , [1/1] {10} + ¦ ¦--expr: c [0/0] {12} + ¦ ¦ °--SYMBOL: c [0/0] {11} + ¦ °--')': ) [0/0] {13} + ¦--expr: call( [2/0] {14} + ¦ ¦--expr: call [0/0] {16} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {15} + ¦ ¦--'(': ( [0/0] {17} + ¦ ¦--expr: a [0/0] {19} + ¦ ¦ °--SYMBOL: a [0/0] {18} + ¦ ¦--',': , [0/1] {20} + ¦ ¦--expr: b [0/5] {22} + ¦ ¦ °--SYMBOL: b [0/0] {21} + ¦ ¦--',': , [1/5] {23} + ¦ ¦--expr: c [1/0] {25} + ¦ ¦ °--SYMBOL: c [0/0] {24} + ¦ °--')': ) [0/0] {26} + ¦--expr: call( [2/0] {27} + ¦ ¦--expr: call [0/0] {29} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {28} + ¦ ¦--'(': ( [0/0] {30} + ¦ ¦--expr: a [0/0] {32} + ¦ ¦ °--SYMBOL: a [0/0] {31} + ¦ ¦--',': , [0/0] {33} + ¦ °--')': ) [0/0] {34} + ¦--expr: call( [1/0] {35} + ¦ ¦--expr: call [0/0] {37} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {36} + ¦ ¦--'(': ( [0/0] {38} + ¦ ¦--expr: a [0/0] {40} + ¦ ¦ °--SYMBOL: a [0/0] {39} + ¦ ¦--',': , [0/0] {41} + ¦ °--')': ) [1/0] {42} + ¦--expr: call( [2/0] {43} + ¦ ¦--expr: call [0/0] {45} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {44} + ¦ ¦--'(': ( [0/0] {46} + ¦ ¦--expr: a [0/5] {48} + ¦ ¦ °--SYMBOL: a [0/0] {47} + ¦ ¦--',': , [1/0] {49} + ¦ °--')': ) [0/0] {50} + °--expr: mpg % [2/0] {51} + ¦--expr: mpg [0/1] {53} + ¦ °--SYMBOL: mpg [0/0] {52} + ¦--SPECIAL-PIPE: %>% [0/4] {54} + °--expr: summa [1/0] {55} + ¦--expr: summa [0/0] {57} + ¦ °--SYMBOL_FUNCTION_CALL: summa [0/0] {56} + ¦--'(': ( [0/0] {58} + ¦--SYMBOL_SUB: avg_c [0/1] {59} + ¦--EQ_SUB: = [0/1] {60} + ¦--expr: mean( [0/0] {61} + ¦ ¦--expr: mean [0/0] {63} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: mean [0/0] {62} + ¦ ¦--'(': ( [0/0] {64} + ¦ ¦--expr: cty [0/0] {66} + ¦ ¦ °--SYMBOL: cty [0/0] {65} + ¦ °--')': ) [0/0] {67} + ¦--',': , [1/1] {68} + ¦--SYMBOL_SUB: avg_h [0/1] {69} + ¦--EQ_SUB: = [0/1] {70} + ¦--expr: mean( [0/0] {71} + ¦ ¦--expr: mean [0/0] {73} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: mean [0/0] {72} + ¦ ¦--'(': ( [0/0] {74} + ¦ ¦--expr: hwy [0/0] {76} + ¦ ¦ °--SYMBOL: hwy [0/0] {75} + ¦ °--')': ) [0/0] {77} + ¦--',': , [1/1] {78} + ¦--SYMBOL_SUB: n [0/1] {79} + ¦--EQ_SUB: = [0/1] {80} + ¦--expr: n() [0/0] {81} + ¦ ¦--expr: n [0/0] {83} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: n [0/0] {82} + ¦ ¦--'(': ( [0/0] {84} + ¦ °--')': ) [0/0] {85} + ¦--',': , [1/1] {86} + ¦--SYMBOL_SUB: n_cla [0/1] {87} + ¦--EQ_SUB: = [0/1] {88} + ¦--expr: n_dis [0/0] {89} + ¦ ¦--expr: n_dis [0/0] {91} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: n_dis [0/0] {90} + ¦ ¦--'(': ( [0/0] {92} + ¦ ¦--expr: class [0/0] {94} + ¦ ¦ °--SYMBOL: class [0/0] {93} + ¦ °--')': ) [0/0] {95} + °--')': ) [0/0] {96} diff --git a/tests/testthat/line_breaks_and_other/comma-out.R b/tests/testthat/line_breaks_and_other/comma-out.R new file mode 100644 index 000000000..826ef5d41 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/comma-out.R @@ -0,0 +1,23 @@ +call( + a, + b, + c +) + +call( + a, b, + c +) + +call(a, ) +call(a, ) + +call(a, ) + +mpg %>% + summarise( + avg_cty = mean(cty), + avg_hwy = mean(hwy), + n = n(), + n_class = n_distinct(class) + ) diff --git a/tests/testthat/line_breaks_and_other/comment-around-curly-in.R b/tests/testthat/line_breaks_and_other/comment-around-curly-in.R new file mode 100644 index 000000000..c664d7ba2 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/comment-around-curly-in.R @@ -0,0 +1,2 @@ +X_and_F_symbol_linter <- function() { # nolint: object_name. +} diff --git a/tests/testthat/line_breaks_and_other/comment-around-curly-in_tree b/tests/testthat/line_breaks_and_other/comment-around-curly-in_tree new file mode 100644 index 000000000..7da00a24f --- /dev/null +++ b/tests/testthat/line_breaks_and_other/comment-around-curly-in_tree @@ -0,0 +1,13 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + °--expr: X_and [0/0] {1} + ¦--expr: X_and [0/1] {3} + ¦ °--SYMBOL: X_and [0/0] {2} + ¦--LEFT_ASSIGN: <- [0/1] {4} + °--expr: funct [0/0] {5} + ¦--FUNCTION: funct [0/0] {6} + ¦--'(': ( [0/0] {7} + ¦--')': ) [0/1] {8} + °--expr: { # n [0/0] {9} + ¦--'{': { [0/1] {10} + ¦--COMMENT: # nol [0/0] {11} + °--'}': } [1/0] {12} diff --git a/tests/testthat/line_breaks_and_other/comment-around-curly-out.R b/tests/testthat/line_breaks_and_other/comment-around-curly-out.R new file mode 100644 index 000000000..c664d7ba2 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/comment-around-curly-out.R @@ -0,0 +1,2 @@ +X_and_F_symbol_linter <- function() { # nolint: object_name. +} diff --git a/tests/testthat/line_breaks_and_other/curly-brace-edge-in.R b/tests/testthat/line_breaks_and_other/curly-brace-edge-in.R new file mode 100644 index 000000000..80d006469 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/curly-brace-edge-in.R @@ -0,0 +1,17 @@ +function(y = {}) NULL + +function(y = + {}) NULL + +function(yyy= {1}) { + 1 +} + +function(yyy= {1 + }) { + 1 +} + +f1 <- function(x = {1}, y = 0) { + c(x, y) + } diff --git a/tests/testthat/line_breaks_and_other/curly-brace-edge-in_tree b/tests/testthat/line_breaks_and_other/curly-brace-edge-in_tree new file mode 100644 index 000000000..a8086c1ab --- /dev/null +++ b/tests/testthat/line_breaks_and_other/curly-brace-edge-in_tree @@ -0,0 +1,92 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: funct [0/0] {1} + ¦ ¦--FUNCTION: funct [0/0] {2} + ¦ ¦--'(': ( [0/0] {3} + ¦ ¦--SYMBOL_FORMALS: y [0/1] {4} + ¦ ¦--EQ_FORMALS: = [0/1] {5} + ¦ ¦--expr: {} [0/0] {6} + ¦ ¦ ¦--'{': { [0/0] {7} + ¦ ¦ °--'}': } [0/0] {8} + ¦ ¦--')': ) [0/1] {9} + ¦ °--expr: NULL [0/0] {11} + ¦ °--NULL_CONST: NULL [0/0] {10} + ¦--expr: funct [2/0] {12} + ¦ ¦--FUNCTION: funct [0/0] {13} + ¦ ¦--'(': ( [0/0] {14} + ¦ ¦--SYMBOL_FORMALS: y [0/1] {15} + ¦ ¦--EQ_FORMALS: = [0/11] {16} + ¦ ¦--expr: {} [1/0] {17} + ¦ ¦ ¦--'{': { [0/0] {18} + ¦ ¦ °--'}': } [0/0] {19} + ¦ ¦--')': ) [0/1] {20} + ¦ °--expr: NULL [0/0] {22} + ¦ °--NULL_CONST: NULL [0/0] {21} + ¦--expr: funct [2/0] {23} + ¦ ¦--FUNCTION: funct [0/0] {24} + ¦ ¦--'(': ( [0/0] {25} + ¦ ¦--SYMBOL_FORMALS: yyy [0/0] {26} + ¦ ¦--EQ_FORMALS: = [0/1] {27} + ¦ ¦--expr: {1} [0/0] {28} + ¦ ¦ ¦--'{': { [0/0] {29} + ¦ ¦ ¦--expr: 1 [0/0] {31} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {30} + ¦ ¦ °--'}': } [0/0] {32} + ¦ ¦--')': ) [0/1] {33} + ¦ °--expr: { + 1 [0/0] {34} + ¦ ¦--'{': { [0/2] {35} + ¦ ¦--expr: 1 [1/0] {37} + ¦ ¦ °--NUM_CONST: 1 [0/0] {36} + ¦ °--'}': } [1/0] {38} + ¦--expr: funct [2/0] {39} + ¦ ¦--FUNCTION: funct [0/0] {40} + ¦ ¦--'(': ( [0/0] {41} + ¦ ¦--SYMBOL_FORMALS: yyy [0/0] {42} + ¦ ¦--EQ_FORMALS: = [0/1] {43} + ¦ ¦--expr: {1 + [0/0] {44} + ¦ ¦ ¦--'{': { [0/0] {45} + ¦ ¦ ¦--expr: 1 [0/2] {47} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {46} + ¦ ¦ °--'}': } [1/0] {48} + ¦ ¦--')': ) [0/1] {49} + ¦ °--expr: { + 1 [0/0] {50} + ¦ ¦--'{': { [0/2] {51} + ¦ ¦--expr: 1 [1/0] {53} + ¦ ¦ °--NUM_CONST: 1 [0/0] {52} + ¦ °--'}': } [1/0] {54} + °--expr: f1 <- [2/0] {55} + ¦--expr: f1 [0/1] {57} + ¦ °--SYMBOL: f1 [0/0] {56} + ¦--LEFT_ASSIGN: <- [0/1] {58} + °--expr: funct [0/0] {59} + ¦--FUNCTION: funct [0/0] {60} + ¦--'(': ( [0/0] {61} + ¦--SYMBOL_FORMALS: x [0/1] {62} + ¦--EQ_FORMALS: = [0/1] {63} + ¦--expr: {1} [0/0] {64} + ¦ ¦--'{': { [0/0] {65} + ¦ ¦--expr: 1 [0/0] {67} + ¦ ¦ °--NUM_CONST: 1 [0/0] {66} + ¦ °--'}': } [0/0] {68} + ¦--',': , [0/1] {69} + ¦--SYMBOL_FORMALS: y [0/1] {70} + ¦--EQ_FORMALS: = [0/1] {71} + ¦--expr: 0 [0/0] {73} + ¦ °--NUM_CONST: 0 [0/0] {72} + ¦--')': ) [0/1] {74} + °--expr: { + [0/0] {75} + ¦--'{': { [0/4] {76} + ¦--expr: c(x, [1/3] {77} + ¦ ¦--expr: c [0/0] {79} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {78} + ¦ ¦--'(': ( [0/0] {80} + ¦ ¦--expr: x [0/0] {82} + ¦ ¦ °--SYMBOL: x [0/0] {81} + ¦ ¦--',': , [0/1] {83} + ¦ ¦--expr: y [0/0] {85} + ¦ ¦ °--SYMBOL: y [0/0] {84} + ¦ °--')': ) [0/0] {86} + °--'}': } [1/0] {87} diff --git a/tests/testthat/line_breaks_and_other/curly-brace-edge-out.R b/tests/testthat/line_breaks_and_other/curly-brace-edge-out.R new file mode 100644 index 000000000..4e88ef2ec --- /dev/null +++ b/tests/testthat/line_breaks_and_other/curly-brace-edge-out.R @@ -0,0 +1,24 @@ +function(y = {}) NULL + +function(y = + {}) { + NULL +} + +function(yyy = { + 1 + }) { + 1 +} + +function(yyy = { + 1 + }) { + 1 +} + +f1 <- function(x = { + 1 + }, y = 0) { + c(x, y) +} diff --git a/tests/testthat/line_breaks_and_other/curly-in.R b/tests/testthat/line_breaks_and_other/curly-in.R index 9780a787f..9f44b5716 100644 --- a/tests/testthat/line_breaks_and_other/curly-in.R +++ b/tests/testthat/line_breaks_and_other/curly-in.R @@ -16,7 +16,7 @@ test_that("I am here", if (x > 3) { "x" } -# A } should always go on its own line, unless it’s followed by else or ). +# A } should always go on its own line, unless it's followed by else or ). if (x > 3) { "x"} @@ -31,3 +31,13 @@ test_that("I am here", { a_test(x) } ) + +test_that( + desc = "bla", + code = { + + + + # comment + expect_equal(1 + 1, 2) + }) diff --git a/tests/testthat/line_breaks_and_other/curly-in_tree b/tests/testthat/line_breaks_and_other/curly-in_tree index 6ee2720ba..ca61fa15b 100644 --- a/tests/testthat/line_breaks_and_other/curly-in_tree +++ b/tests/testthat/line_breaks_and_other/curly-in_tree @@ -1,113 +1,154 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦--COMMENT: # { n [0/0] {1} - ¦--expr: [1/0] {2} + ¦--expr: if (y [1/0] {2} ¦ ¦--IF: if [0/1] {3} ¦ ¦--'(': ( [0/0] {4} - ¦ ¦--expr: [0/0] {5} - ¦ ¦ ¦--expr: [0/1] {7} + ¦ ¦--expr: y == [0/0] {5} + ¦ ¦ ¦--expr: y [0/1] {7} ¦ ¦ ¦ °--SYMBOL: y [0/0] {6} ¦ ¦ ¦--EQ: == [0/1] {8} - ¦ ¦ °--expr: [0/0] {10} + ¦ ¦ °--expr: 0 [0/0] {10} ¦ ¦ °--NUM_CONST: 0 [0/0] {9} ¦ ¦--')': ) [0/0] {11} - ¦ ¦--expr: [1/1] {12} + ¦ ¦--expr: { + 1 [1/1] {12} ¦ ¦ ¦--'{': { [0/2] {13} - ¦ ¦ ¦--expr: [1/0] {15} + ¦ ¦ ¦--expr: 1 [1/0] {15} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {14} ¦ ¦ °--'}': } [1/0] {16} ¦ ¦--ELSE: else [0/1] {17} - ¦ °--expr: [0/0] {18} + ¦ °--expr: { + 2 [0/0] {18} ¦ ¦--'{': { [0/2] {19} - ¦ ¦--expr: [1/0] {21} + ¦ ¦--expr: 2 [1/0] {21} ¦ ¦ °--NUM_CONST: 2 [0/0] {20} ¦ °--'}': } [1/0] {22} - ¦--expr: [2/0] {23} - ¦ ¦--expr: [0/0] {25} + ¦--expr: test_ [2/0] {23} + ¦ ¦--expr: test_ [0/0] {25} ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {24} ¦ ¦--'(': ( [0/0] {26} - ¦ ¦--expr: [0/0] {28} + ¦ ¦--expr: "I am [0/0] {28} ¦ ¦ °--STR_CONST: "I am [0/0] {27} ¦ ¦--',': , [0/10] {29} - ¦ ¦--expr: [1/0] {30} + ¦ ¦--expr: { + [1/0] {30} ¦ ¦ ¦--'{': { [0/12] {31} - ¦ ¦ ¦--expr: [1/10] {32} - ¦ ¦ ¦ ¦--expr: [0/0] {34} + ¦ ¦ ¦--expr: a_tes [1/10] {32} + ¦ ¦ ¦ ¦--expr: a_tes [0/0] {34} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: a_tes [0/0] {33} ¦ ¦ ¦ ¦--'(': ( [0/0] {35} - ¦ ¦ ¦ ¦--expr: [0/0] {37} + ¦ ¦ ¦ ¦--expr: x [0/0] {37} ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {36} ¦ ¦ ¦ °--')': ) [0/0] {38} ¦ ¦ °--'}': } [1/0] {39} ¦ °--')': ) [0/0] {40} ¦--COMMENT: # A { [3/0] {41} - ¦--expr: [1/0] {42} + ¦--expr: if (x [1/0] {42} ¦ ¦--IF: if [0/1] {43} ¦ ¦--'(': ( [0/0] {44} - ¦ ¦--expr: [0/0] {45} - ¦ ¦ ¦--expr: [0/1] {47} + ¦ ¦--expr: x > 3 [0/0] {45} + ¦ ¦ ¦--expr: x [0/1] {47} ¦ ¦ ¦ °--SYMBOL: x [0/0] {46} ¦ ¦ ¦--GT: > [0/1] {48} - ¦ ¦ °--expr: [0/0] {50} + ¦ ¦ °--expr: 3 [0/0] {50} ¦ ¦ °--NUM_CONST: 3 [0/0] {49} ¦ ¦--')': ) [0/1] {51} - ¦ °--expr: [0/0] {52} + ¦ °--expr: { "x" [0/0] {52} ¦ ¦--'{': { [0/1] {53} - ¦ ¦--expr: [0/0] {55} + ¦ ¦--expr: "x" [0/0] {55} ¦ ¦ °--STR_CONST: "x" [0/0] {54} ¦ °--'}': } [1/0] {56} ¦--COMMENT: # A } [2/0] {57} - ¦--expr: [1/0] {58} + ¦--expr: if (x [1/0] {58} ¦ ¦--IF: if [0/1] {59} ¦ ¦--'(': ( [0/0] {60} - ¦ ¦--expr: [0/0] {61} - ¦ ¦ ¦--expr: [0/1] {63} + ¦ ¦--expr: x > 3 [0/0] {61} + ¦ ¦ ¦--expr: x [0/1] {63} ¦ ¦ ¦ °--SYMBOL: x [0/0] {62} ¦ ¦ ¦--GT: > [0/1] {64} - ¦ ¦ °--expr: [0/0] {66} + ¦ ¦ °--expr: 3 [0/0] {66} ¦ ¦ °--NUM_CONST: 3 [0/0] {65} ¦ ¦--')': ) [0/1] {67} - ¦ °--expr: [0/0] {68} + ¦ °--expr: { + " [0/0] {68} ¦ ¦--'{': { [0/2] {69} - ¦ ¦--expr: [1/0] {71} + ¦ ¦--expr: "x" [1/0] {71} ¦ ¦ °--STR_CONST: "x" [0/0] {70} ¦ °--'}': } [0/0] {72} ¦--COMMENT: # ELS [2/0] {73} - ¦--expr: [1/0] {74} + ¦--expr: if (1 [1/0] {74} ¦ ¦--IF: if [0/1] {75} ¦ ¦--'(': ( [0/0] {76} - ¦ ¦--expr: [0/0] {77} - ¦ ¦ ¦--expr: [0/1] {79} + ¦ ¦--expr: 1 > 3 [0/0] {77} + ¦ ¦ ¦--expr: 1 [0/1] {79} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {78} ¦ ¦ ¦--GT: > [0/1] {80} - ¦ ¦ °--expr: [0/0] {82} + ¦ ¦ °--expr: 3 [0/0] {82} ¦ ¦ °--NUM_CONST: 3 [0/0] {81} ¦ ¦--')': ) [0/1] {83} - ¦ ¦--expr: [0/1] {84} + ¦ ¦--expr: { + " [0/1] {84} ¦ ¦ ¦--'{': { [0/2] {85} - ¦ ¦ ¦--expr: [1/0] {87} + ¦ ¦ ¦--expr: "x" [1/0] {87} ¦ ¦ ¦ °--STR_CONST: "x" [0/0] {86} ¦ ¦ °--'}': } [1/0] {88} ¦ ¦--ELSE: else [0/1] {89} - ¦ °--expr: [0/0] {90} + ¦ °--expr: { + " [0/0] {90} ¦ ¦--'{': { [0/2] {91} - ¦ ¦--expr: [1/0] {93} + ¦ ¦--expr: "y" [1/0] {93} ¦ ¦ °--STR_CONST: "y" [0/0] {92} ¦ °--'}': } [1/0] {94} - °--expr: [2/0] {95} - ¦--expr: [0/0] {97} - ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {96} - ¦--'(': ( [0/0] {98} - ¦--expr: [0/0] {100} - ¦ °--STR_CONST: "I am [0/0] {99} - ¦--',': , [0/1] {101} - ¦--expr: [0/0] {102} - ¦ ¦--'{': { [0/2] {103} - ¦ ¦--expr: [1/0] {104} - ¦ ¦ ¦--expr: [0/0] {106} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: a_tes [0/0] {105} - ¦ ¦ ¦--'(': ( [0/0] {107} - ¦ ¦ ¦--expr: [0/0] {109} - ¦ ¦ ¦ °--SYMBOL: x [0/0] {108} - ¦ ¦ °--')': ) [0/0] {110} - ¦ °--'}': } [1/0] {111} - °--')': ) [1/0] {112} + ¦--expr: test_ [2/0] {95} + ¦ ¦--expr: test_ [0/0] {97} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {96} + ¦ ¦--'(': ( [0/0] {98} + ¦ ¦--expr: "I am [0/0] {100} + ¦ ¦ °--STR_CONST: "I am [0/0] {99} + ¦ ¦--',': , [0/1] {101} + ¦ ¦--expr: { + a [0/0] {102} + ¦ ¦ ¦--'{': { [0/2] {103} + ¦ ¦ ¦--expr: a_tes [1/0] {104} + ¦ ¦ ¦ ¦--expr: a_tes [0/0] {106} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: a_tes [0/0] {105} + ¦ ¦ ¦ ¦--'(': ( [0/0] {107} + ¦ ¦ ¦ ¦--expr: x [0/0] {109} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {108} + ¦ ¦ ¦ °--')': ) [0/0] {110} + ¦ ¦ °--'}': } [1/0] {111} + ¦ °--')': ) [1/0] {112} + °--expr: test_ [2/0] {113} + ¦--expr: test_ [0/0] {115} + ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {114} + ¦--'(': ( [0/2] {116} + ¦--SYMBOL_SUB: desc [1/1] {117} + ¦--EQ_SUB: = [0/1] {118} + ¦--expr: "bla" [0/0] {120} + ¦ °--STR_CONST: "bla" [0/0] {119} + ¦--',': , [0/2] {121} + ¦--SYMBOL_SUB: code [1/1] {122} + ¦--EQ_SUB: = [0/1] {123} + ¦--expr: { + + + + [0/0] {124} + ¦ ¦--'{': { [0/4] {125} + ¦ ¦--COMMENT: # com [4/4] {126} + ¦ ¦--expr: expec [1/2] {127} + ¦ ¦ ¦--expr: expec [0/0] {129} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: expec [0/0] {128} + ¦ ¦ ¦--'(': ( [0/0] {130} + ¦ ¦ ¦--expr: 1 + 1 [0/0] {131} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {133} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {132} + ¦ ¦ ¦ ¦--'+': + [0/1] {134} + ¦ ¦ ¦ °--expr: 1 [0/0] {136} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {135} + ¦ ¦ ¦--',': , [0/1] {137} + ¦ ¦ ¦--expr: 2 [0/0] {139} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {138} + ¦ ¦ °--')': ) [0/0] {140} + ¦ °--'}': } [1/0] {141} + °--')': ) [0/0] {142} diff --git a/tests/testthat/line_breaks_and_other/curly-out.R b/tests/testthat/line_breaks_and_other/curly-out.R index dd3ff4d23..c55508290 100644 --- a/tests/testthat/line_breaks_and_other/curly-out.R +++ b/tests/testthat/line_breaks_and_other/curly-out.R @@ -15,7 +15,7 @@ if (x > 3) { "x" } -# A } should always go on its own line, unless it’s followed by else or ). +# A } should always go on its own line, unless it's followed by else or ). if (x > 3) { "x" } @@ -30,3 +30,11 @@ if (1 > 3) { test_that("I am here", { a_test(x) }) + +test_that( + desc = "bla", + code = { + # comment + expect_equal(1 + 1, 2) + } +) diff --git a/tests/testthat/line_breaks_and_other/edge_comment_and_curly-in_tree b/tests/testthat/line_breaks_and_other/edge_comment_and_curly-in_tree index e6bcca9e6..46de8acd2 100644 --- a/tests/testthat/line_breaks_and_other/edge_comment_and_curly-in_tree +++ b/tests/testthat/line_breaks_and_other/edge_comment_and_curly-in_tree @@ -1,16 +1,17 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/1] {3} + °--expr: a <- [0/0] {1} + ¦--expr: a [0/1] {3} ¦ °--SYMBOL: a [0/0] {2} ¦--LEFT_ASSIGN: <- [0/1] {4} - °--expr: [0/0] {5} + °--expr: funct [0/0] {5} ¦--FUNCTION: funct [0/0] {6} ¦--'(': ( [0/0] {7} ¦--SYMBOL_FORMALS: x [0/0] {8} ¦--')': ) [0/1] {9} ¦--COMMENT: # thi [0/0] {10} - °--expr: [1/0] {11} + °--expr: { + x [1/0] {11} ¦--'{': { [0/2] {12} - ¦--expr: [1/0] {14} + ¦--expr: x [1/0] {14} ¦ °--SYMBOL: x [0/0] {13} °--'}': } [1/0] {15} diff --git a/tests/testthat/line_breaks_and_other/ggplot2-in.R b/tests/testthat/line_breaks_and_other/ggplot2-in.R new file mode 100644 index 000000000..c767dca7a --- /dev/null +++ b/tests/testthat/line_breaks_and_other/ggplot2-in.R @@ -0,0 +1,51 @@ +# don't remove line break +ggplot(data = mtcars, mapping = aes(x = mpg, y = vs)) + + geom_point() + + +# add when unmasked +ggplot(data = mtcars, mapping = aes(x = mpg, y = vs)) + geom_point() + + +# add when masked +ggplot2::ggplot(data = mtcars, mapping = aes(x = mpg, y = vs)) + geom_point() + +# add when masked +ggplot(data = mtcars, mapping = aes(x = mpg, y = vs)) + ggplot2::geom_point() + +# add when comment +ggplot(data = mtcars, mapping = aes(x = mpg, y = vs)) + # comment + ggplot2::geom_point() + g() + + +# add when comment +ggplot(data = mtcars, mapping = aes(x = mpg, y = vs)) + + ggplot2::geom_point() + g() # comment + +# add when comment +ggplot(data = mtcars, mapping = aes(x = mpg, y = vs)) + ggplot2::geom_point() + g() # comment + + +# add when comment +ggplot(data = mtcars, mapping = aes(x = mpg, y = vs)) + + ggplot2::geom_point() + g() + geom_oint() # comment + +# when subsetted involved +x[1]+ c() + +g() + x[1] + +g()[2] + x[1] + +# don't do anything on unary + and function call ++sin(x) + +# within function call +qqjflk( + log(y + 1) + + # sqrt(x1) + + sqrt(x2) + + # sqrt(x3) + + x4 + + sqrt(x5) +) diff --git a/tests/testthat/line_breaks_and_other/ggplot2-in_tree b/tests/testthat/line_breaks_and_other/ggplot2-in_tree new file mode 100644 index 000000000..dd9f48f8a --- /dev/null +++ b/tests/testthat/line_breaks_and_other/ggplot2-in_tree @@ -0,0 +1,416 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # don [0/0] {1} + ¦--expr: ggplo [1/0] {2} + ¦ ¦--expr: ggplo [0/1] {3} + ¦ ¦ ¦--expr: ggplo [0/0] {5} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: ggplo [0/0] {4} + ¦ ¦ ¦--'(': ( [0/0] {6} + ¦ ¦ ¦--SYMBOL_SUB: data [0/1] {7} + ¦ ¦ ¦--EQ_SUB: = [0/1] {8} + ¦ ¦ ¦--expr: mtcar [0/0] {10} + ¦ ¦ ¦ °--SYMBOL: mtcar [0/0] {9} + ¦ ¦ ¦--',': , [0/1] {11} + ¦ ¦ ¦--SYMBOL_SUB: mappi [0/1] {12} + ¦ ¦ ¦--EQ_SUB: = [0/1] {13} + ¦ ¦ ¦--expr: aes(x [0/0] {14} + ¦ ¦ ¦ ¦--expr: aes [0/0] {16} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: aes [0/0] {15} + ¦ ¦ ¦ ¦--'(': ( [0/0] {17} + ¦ ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {18} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {19} + ¦ ¦ ¦ ¦--expr: mpg [0/0] {21} + ¦ ¦ ¦ ¦ °--SYMBOL: mpg [0/0] {20} + ¦ ¦ ¦ ¦--',': , [0/1] {22} + ¦ ¦ ¦ ¦--SYMBOL_SUB: y [0/1] {23} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {24} + ¦ ¦ ¦ ¦--expr: vs [0/0] {26} + ¦ ¦ ¦ ¦ °--SYMBOL: vs [0/0] {25} + ¦ ¦ ¦ °--')': ) [0/0] {27} + ¦ ¦ °--')': ) [0/0] {28} + ¦ ¦--'+': + [0/2] {29} + ¦ °--expr: geom_ [1/0] {30} + ¦ ¦--expr: geom_ [0/0] {32} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: geom_ [0/0] {31} + ¦ ¦--'(': ( [0/0] {33} + ¦ °--')': ) [0/0] {34} + ¦--COMMENT: # add [3/0] {35} + ¦--expr: ggplo [1/0] {36} + ¦ ¦--expr: ggplo [0/1] {37} + ¦ ¦ ¦--expr: ggplo [0/0] {39} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: ggplo [0/0] {38} + ¦ ¦ ¦--'(': ( [0/0] {40} + ¦ ¦ ¦--SYMBOL_SUB: data [0/1] {41} + ¦ ¦ ¦--EQ_SUB: = [0/1] {42} + ¦ ¦ ¦--expr: mtcar [0/0] {44} + ¦ ¦ ¦ °--SYMBOL: mtcar [0/0] {43} + ¦ ¦ ¦--',': , [0/1] {45} + ¦ ¦ ¦--SYMBOL_SUB: mappi [0/1] {46} + ¦ ¦ ¦--EQ_SUB: = [0/1] {47} + ¦ ¦ ¦--expr: aes(x [0/0] {48} + ¦ ¦ ¦ ¦--expr: aes [0/0] {50} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: aes [0/0] {49} + ¦ ¦ ¦ ¦--'(': ( [0/0] {51} + ¦ ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {52} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {53} + ¦ ¦ ¦ ¦--expr: mpg [0/0] {55} + ¦ ¦ ¦ ¦ °--SYMBOL: mpg [0/0] {54} + ¦ ¦ ¦ ¦--',': , [0/1] {56} + ¦ ¦ ¦ ¦--SYMBOL_SUB: y [0/1] {57} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {58} + ¦ ¦ ¦ ¦--expr: vs [0/0] {60} + ¦ ¦ ¦ ¦ °--SYMBOL: vs [0/0] {59} + ¦ ¦ ¦ °--')': ) [0/0] {61} + ¦ ¦ °--')': ) [0/0] {62} + ¦ ¦--'+': + [0/1] {63} + ¦ °--expr: geom_ [0/0] {64} + ¦ ¦--expr: geom_ [0/0] {66} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: geom_ [0/0] {65} + ¦ ¦--'(': ( [0/0] {67} + ¦ °--')': ) [0/0] {68} + ¦--COMMENT: # add [3/0] {69} + ¦--expr: ggplo [1/0] {70} + ¦ ¦--expr: ggplo [0/1] {71} + ¦ ¦ ¦--expr: ggplo [0/0] {72} + ¦ ¦ ¦ ¦--SYMBOL_PACKAGE: ggplo [0/0] {73} + ¦ ¦ ¦ ¦--NS_GET: :: [0/0] {74} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: ggplo [0/0] {75} + ¦ ¦ ¦--'(': ( [0/0] {76} + ¦ ¦ ¦--SYMBOL_SUB: data [0/1] {77} + ¦ ¦ ¦--EQ_SUB: = [0/1] {78} + ¦ ¦ ¦--expr: mtcar [0/0] {80} + ¦ ¦ ¦ °--SYMBOL: mtcar [0/0] {79} + ¦ ¦ ¦--',': , [0/1] {81} + ¦ ¦ ¦--SYMBOL_SUB: mappi [0/1] {82} + ¦ ¦ ¦--EQ_SUB: = [0/1] {83} + ¦ ¦ ¦--expr: aes(x [0/0] {84} + ¦ ¦ ¦ ¦--expr: aes [0/0] {86} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: aes [0/0] {85} + ¦ ¦ ¦ ¦--'(': ( [0/0] {87} + ¦ ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {88} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {89} + ¦ ¦ ¦ ¦--expr: mpg [0/0] {91} + ¦ ¦ ¦ ¦ °--SYMBOL: mpg [0/0] {90} + ¦ ¦ ¦ ¦--',': , [0/1] {92} + ¦ ¦ ¦ ¦--SYMBOL_SUB: y [0/1] {93} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {94} + ¦ ¦ ¦ ¦--expr: vs [0/0] {96} + ¦ ¦ ¦ ¦ °--SYMBOL: vs [0/0] {95} + ¦ ¦ ¦ °--')': ) [0/0] {97} + ¦ ¦ °--')': ) [0/0] {98} + ¦ ¦--'+': + [0/1] {99} + ¦ °--expr: geom_ [0/0] {100} + ¦ ¦--expr: geom_ [0/0] {102} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: geom_ [0/0] {101} + ¦ ¦--'(': ( [0/0] {103} + ¦ °--')': ) [0/0] {104} + ¦--COMMENT: # add [2/0] {105} + ¦--expr: ggplo [1/0] {106} + ¦ ¦--expr: ggplo [0/1] {107} + ¦ ¦ ¦--expr: ggplo [0/0] {109} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: ggplo [0/0] {108} + ¦ ¦ ¦--'(': ( [0/0] {110} + ¦ ¦ ¦--SYMBOL_SUB: data [0/1] {111} + ¦ ¦ ¦--EQ_SUB: = [0/1] {112} + ¦ ¦ ¦--expr: mtcar [0/0] {114} + ¦ ¦ ¦ °--SYMBOL: mtcar [0/0] {113} + ¦ ¦ ¦--',': , [0/1] {115} + ¦ ¦ ¦--SYMBOL_SUB: mappi [0/1] {116} + ¦ ¦ ¦--EQ_SUB: = [0/1] {117} + ¦ ¦ ¦--expr: aes(x [0/0] {118} + ¦ ¦ ¦ ¦--expr: aes [0/0] {120} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: aes [0/0] {119} + ¦ ¦ ¦ ¦--'(': ( [0/0] {121} + ¦ ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {122} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {123} + ¦ ¦ ¦ ¦--expr: mpg [0/0] {125} + ¦ ¦ ¦ ¦ °--SYMBOL: mpg [0/0] {124} + ¦ ¦ ¦ ¦--',': , [0/1] {126} + ¦ ¦ ¦ ¦--SYMBOL_SUB: y [0/1] {127} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {128} + ¦ ¦ ¦ ¦--expr: vs [0/0] {130} + ¦ ¦ ¦ ¦ °--SYMBOL: vs [0/0] {129} + ¦ ¦ ¦ °--')': ) [0/0] {131} + ¦ ¦ °--')': ) [0/0] {132} + ¦ ¦--'+': + [0/1] {133} + ¦ °--expr: ggplo [0/0] {134} + ¦ ¦--expr: ggplo [0/0] {135} + ¦ ¦ ¦--SYMBOL_PACKAGE: ggplo [0/0] {136} + ¦ ¦ ¦--NS_GET: :: [0/0] {137} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: geom_ [0/0] {138} + ¦ ¦--'(': ( [0/0] {139} + ¦ °--')': ) [0/0] {140} + ¦--COMMENT: # add [2/0] {141} + ¦--expr: ggplo [1/0] {142} + ¦ ¦--expr: ggplo [0/1] {144} + ¦ ¦ ¦--expr: ggplo [0/0] {146} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: ggplo [0/0] {145} + ¦ ¦ ¦--'(': ( [0/0] {147} + ¦ ¦ ¦--SYMBOL_SUB: data [0/1] {148} + ¦ ¦ ¦--EQ_SUB: = [0/1] {149} + ¦ ¦ ¦--expr: mtcar [0/0] {151} + ¦ ¦ ¦ °--SYMBOL: mtcar [0/0] {150} + ¦ ¦ ¦--',': , [0/1] {152} + ¦ ¦ ¦--SYMBOL_SUB: mappi [0/1] {153} + ¦ ¦ ¦--EQ_SUB: = [0/1] {154} + ¦ ¦ ¦--expr: aes(x [0/0] {155} + ¦ ¦ ¦ ¦--expr: aes [0/0] {157} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: aes [0/0] {156} + ¦ ¦ ¦ ¦--'(': ( [0/0] {158} + ¦ ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {159} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {160} + ¦ ¦ ¦ ¦--expr: mpg [0/0] {162} + ¦ ¦ ¦ ¦ °--SYMBOL: mpg [0/0] {161} + ¦ ¦ ¦ ¦--',': , [0/1] {163} + ¦ ¦ ¦ ¦--SYMBOL_SUB: y [0/1] {164} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {165} + ¦ ¦ ¦ ¦--expr: vs [0/0] {167} + ¦ ¦ ¦ ¦ °--SYMBOL: vs [0/0] {166} + ¦ ¦ ¦ °--')': ) [0/0] {168} + ¦ ¦ °--')': ) [0/0] {169} + ¦ ¦--'+': + [0/1] {170} + ¦ ¦--COMMENT: # com [0/2] {171} + ¦ ¦--expr: ggplo [1/1] {172} + ¦ ¦ ¦--expr: ggplo [0/0] {173} + ¦ ¦ ¦ ¦--SYMBOL_PACKAGE: ggplo [0/0] {174} + ¦ ¦ ¦ ¦--NS_GET: :: [0/0] {175} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: geom_ [0/0] {176} + ¦ ¦ ¦--'(': ( [0/0] {177} + ¦ ¦ °--')': ) [0/0] {178} + ¦ ¦--'+': + [0/1] {179} + ¦ °--expr: g() [0/0] {180} + ¦ ¦--expr: g [0/0] {182} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {181} + ¦ ¦--'(': ( [0/0] {183} + ¦ °--')': ) [0/0] {184} + ¦--COMMENT: # add [3/0] {185} + ¦--expr: ggplo [1/1] {186} + ¦ ¦--expr: ggplo [0/1] {188} + ¦ ¦ ¦--expr: ggplo [0/0] {190} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: ggplo [0/0] {189} + ¦ ¦ ¦--'(': ( [0/0] {191} + ¦ ¦ ¦--SYMBOL_SUB: data [0/1] {192} + ¦ ¦ ¦--EQ_SUB: = [0/1] {193} + ¦ ¦ ¦--expr: mtcar [0/0] {195} + ¦ ¦ ¦ °--SYMBOL: mtcar [0/0] {194} + ¦ ¦ ¦--',': , [0/1] {196} + ¦ ¦ ¦--SYMBOL_SUB: mappi [0/1] {197} + ¦ ¦ ¦--EQ_SUB: = [0/1] {198} + ¦ ¦ ¦--expr: aes(x [0/0] {199} + ¦ ¦ ¦ ¦--expr: aes [0/0] {201} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: aes [0/0] {200} + ¦ ¦ ¦ ¦--'(': ( [0/0] {202} + ¦ ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {203} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {204} + ¦ ¦ ¦ ¦--expr: mpg [0/0] {206} + ¦ ¦ ¦ ¦ °--SYMBOL: mpg [0/0] {205} + ¦ ¦ ¦ ¦--',': , [0/1] {207} + ¦ ¦ ¦ ¦--SYMBOL_SUB: y [0/1] {208} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {209} + ¦ ¦ ¦ ¦--expr: vs [0/0] {211} + ¦ ¦ ¦ ¦ °--SYMBOL: vs [0/0] {210} + ¦ ¦ ¦ °--')': ) [0/0] {212} + ¦ ¦ °--')': ) [0/0] {213} + ¦ ¦--'+': + [0/2] {214} + ¦ ¦--expr: ggplo [1/1] {215} + ¦ ¦ ¦--expr: ggplo [0/0] {216} + ¦ ¦ ¦ ¦--SYMBOL_PACKAGE: ggplo [0/0] {217} + ¦ ¦ ¦ ¦--NS_GET: :: [0/0] {218} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: geom_ [0/0] {219} + ¦ ¦ ¦--'(': ( [0/0] {220} + ¦ ¦ °--')': ) [0/0] {221} + ¦ ¦--'+': + [0/1] {222} + ¦ °--expr: g() [0/0] {223} + ¦ ¦--expr: g [0/0] {225} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {224} + ¦ ¦--'(': ( [0/0] {226} + ¦ °--')': ) [0/0] {227} + ¦--COMMENT: # com [0/0] {228} + ¦--COMMENT: # add [2/0] {229} + ¦--expr: ggplo [1/1] {230} + ¦ ¦--expr: ggplo [0/1] {232} + ¦ ¦ ¦--expr: ggplo [0/0] {234} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: ggplo [0/0] {233} + ¦ ¦ ¦--'(': ( [0/0] {235} + ¦ ¦ ¦--SYMBOL_SUB: data [0/1] {236} + ¦ ¦ ¦--EQ_SUB: = [0/1] {237} + ¦ ¦ ¦--expr: mtcar [0/0] {239} + ¦ ¦ ¦ °--SYMBOL: mtcar [0/0] {238} + ¦ ¦ ¦--',': , [0/1] {240} + ¦ ¦ ¦--SYMBOL_SUB: mappi [0/1] {241} + ¦ ¦ ¦--EQ_SUB: = [0/1] {242} + ¦ ¦ ¦--expr: aes(x [0/0] {243} + ¦ ¦ ¦ ¦--expr: aes [0/0] {245} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: aes [0/0] {244} + ¦ ¦ ¦ ¦--'(': ( [0/0] {246} + ¦ ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {247} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {248} + ¦ ¦ ¦ ¦--expr: mpg [0/0] {250} + ¦ ¦ ¦ ¦ °--SYMBOL: mpg [0/0] {249} + ¦ ¦ ¦ ¦--',': , [0/1] {251} + ¦ ¦ ¦ ¦--SYMBOL_SUB: y [0/1] {252} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {253} + ¦ ¦ ¦ ¦--expr: vs [0/0] {255} + ¦ ¦ ¦ ¦ °--SYMBOL: vs [0/0] {254} + ¦ ¦ ¦ °--')': ) [0/0] {256} + ¦ ¦ °--')': ) [0/0] {257} + ¦ ¦--'+': + [0/1] {258} + ¦ ¦--expr: ggplo [0/1] {259} + ¦ ¦ ¦--expr: ggplo [0/0] {260} + ¦ ¦ ¦ ¦--SYMBOL_PACKAGE: ggplo [0/0] {261} + ¦ ¦ ¦ ¦--NS_GET: :: [0/0] {262} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: geom_ [0/0] {263} + ¦ ¦ ¦--'(': ( [0/0] {264} + ¦ ¦ °--')': ) [0/0] {265} + ¦ ¦--'+': + [0/1] {266} + ¦ °--expr: g() [0/0] {267} + ¦ ¦--expr: g [0/0] {269} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {268} + ¦ ¦--'(': ( [0/0] {270} + ¦ °--')': ) [0/0] {271} + ¦--COMMENT: # com [0/0] {272} + ¦--COMMENT: # add [3/0] {273} + ¦--expr: ggplo [1/1] {274} + ¦ ¦--expr: ggplo [0/1] {277} + ¦ ¦ ¦--expr: ggplo [0/0] {279} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: ggplo [0/0] {278} + ¦ ¦ ¦--'(': ( [0/0] {280} + ¦ ¦ ¦--SYMBOL_SUB: data [0/1] {281} + ¦ ¦ ¦--EQ_SUB: = [0/1] {282} + ¦ ¦ ¦--expr: mtcar [0/0] {284} + ¦ ¦ ¦ °--SYMBOL: mtcar [0/0] {283} + ¦ ¦ ¦--',': , [0/1] {285} + ¦ ¦ ¦--SYMBOL_SUB: mappi [0/1] {286} + ¦ ¦ ¦--EQ_SUB: = [0/1] {287} + ¦ ¦ ¦--expr: aes(x [0/0] {288} + ¦ ¦ ¦ ¦--expr: aes [0/0] {290} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: aes [0/0] {289} + ¦ ¦ ¦ ¦--'(': ( [0/0] {291} + ¦ ¦ ¦ ¦--SYMBOL_SUB: x [0/1] {292} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {293} + ¦ ¦ ¦ ¦--expr: mpg [0/0] {295} + ¦ ¦ ¦ ¦ °--SYMBOL: mpg [0/0] {294} + ¦ ¦ ¦ ¦--',': , [0/1] {296} + ¦ ¦ ¦ ¦--SYMBOL_SUB: y [0/1] {297} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {298} + ¦ ¦ ¦ ¦--expr: vs [0/0] {300} + ¦ ¦ ¦ ¦ °--SYMBOL: vs [0/0] {299} + ¦ ¦ ¦ °--')': ) [0/0] {301} + ¦ ¦ °--')': ) [0/0] {302} + ¦ ¦--'+': + [0/2] {303} + ¦ ¦--expr: ggplo [1/1] {304} + ¦ ¦ ¦--expr: ggplo [0/0] {305} + ¦ ¦ ¦ ¦--SYMBOL_PACKAGE: ggplo [0/0] {306} + ¦ ¦ ¦ ¦--NS_GET: :: [0/0] {307} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: geom_ [0/0] {308} + ¦ ¦ ¦--'(': ( [0/0] {309} + ¦ ¦ °--')': ) [0/0] {310} + ¦ ¦--'+': + [0/1] {311} + ¦ ¦--expr: g() [0/2] {312} + ¦ ¦ ¦--expr: g [0/0] {314} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {313} + ¦ ¦ ¦--'(': ( [0/0] {315} + ¦ ¦ °--')': ) [0/0] {316} + ¦ ¦--'+': + [0/1] {317} + ¦ °--expr: geom_ [0/0] {318} + ¦ ¦--expr: geom_ [0/0] {320} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: geom_ [0/0] {319} + ¦ ¦--'(': ( [0/0] {321} + ¦ °--')': ) [0/0] {322} + ¦--COMMENT: # com [0/0] {323} + ¦--COMMENT: # whe [2/0] {324} + ¦--expr: x[1]+ [1/0] {325} + ¦ ¦--expr: x[1] [0/0] {326} + ¦ ¦ ¦--expr: x [0/0] {328} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {327} + ¦ ¦ ¦--'[': [ [0/0] {329} + ¦ ¦ ¦--expr: 1 [0/0] {331} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {330} + ¦ ¦ °--']': ] [0/0] {332} + ¦ ¦--'+': + [0/1] {333} + ¦ °--expr: c() [0/0] {334} + ¦ ¦--expr: c [0/0] {336} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {335} + ¦ ¦--'(': ( [0/0] {337} + ¦ °--')': ) [0/0] {338} + ¦--expr: g() + [2/0] {339} + ¦ ¦--expr: g() [0/1] {340} + ¦ ¦ ¦--expr: g [0/0] {342} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {341} + ¦ ¦ ¦--'(': ( [0/0] {343} + ¦ ¦ °--')': ) [0/0] {344} + ¦ ¦--'+': + [0/1] {345} + ¦ °--expr: x[1] [0/0] {346} + ¦ ¦--expr: x [0/0] {348} + ¦ ¦ °--SYMBOL: x [0/0] {347} + ¦ ¦--'[': [ [0/0] {349} + ¦ ¦--expr: 1 [0/0] {351} + ¦ ¦ °--NUM_CONST: 1 [0/0] {350} + ¦ °--']': ] [0/0] {352} + ¦--expr: g()[2 [2/0] {353} + ¦ ¦--expr: g()[2 [0/1] {354} + ¦ ¦ ¦--expr: g() [0/0] {355} + ¦ ¦ ¦ ¦--expr: g [0/0] {357} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {356} + ¦ ¦ ¦ ¦--'(': ( [0/0] {358} + ¦ ¦ ¦ °--')': ) [0/0] {359} + ¦ ¦ ¦--'[': [ [0/0] {360} + ¦ ¦ ¦--expr: 2 [0/0] {362} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {361} + ¦ ¦ °--']': ] [0/0] {363} + ¦ ¦--'+': + [0/1] {364} + ¦ °--expr: x[1] [0/0] {365} + ¦ ¦--expr: x [0/0] {367} + ¦ ¦ °--SYMBOL: x [0/0] {366} + ¦ ¦--'[': [ [0/0] {368} + ¦ ¦--expr: 1 [0/0] {370} + ¦ ¦ °--NUM_CONST: 1 [0/0] {369} + ¦ °--']': ] [0/0] {371} + ¦--COMMENT: # don [2/0] {372} + ¦--expr: +sin( [1/0] {373} + ¦ ¦--'+': + [0/0] {374} + ¦ °--expr: sin(x [0/0] {375} + ¦ ¦--expr: sin [0/0] {377} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: sin [0/0] {376} + ¦ ¦--'(': ( [0/0] {378} + ¦ ¦--expr: x [0/0] {380} + ¦ ¦ °--SYMBOL: x [0/0] {379} + ¦ °--')': ) [0/0] {381} + ¦--COMMENT: # wit [2/0] {382} + °--expr: qqjfl [1/0] {383} + ¦--expr: qqjfl [0/0] {385} + ¦ °--SYMBOL_FUNCTION_CALL: qqjfl [0/0] {384} + ¦--'(': ( [0/2] {386} + ¦--expr: log(y [1/0] {387} + ¦ ¦--expr: log(y [0/1] {390} + ¦ ¦ ¦--expr: log [0/0] {392} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: log [0/0] {391} + ¦ ¦ ¦--'(': ( [0/0] {393} + ¦ ¦ ¦--expr: y + 1 [0/0] {394} + ¦ ¦ ¦ ¦--expr: y [0/1] {396} + ¦ ¦ ¦ ¦ °--SYMBOL: y [0/0] {395} + ¦ ¦ ¦ ¦--'+': + [0/1] {397} + ¦ ¦ ¦ °--expr: 1 [0/0] {399} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {398} + ¦ ¦ °--')': ) [0/0] {400} + ¦ ¦--'+': + [0/4] {401} + ¦ ¦--COMMENT: # sqr [1/4] {402} + ¦ ¦--expr: sqrt( [1/1] {403} + ¦ ¦ ¦--expr: sqrt [0/0] {405} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: sqrt [0/0] {404} + ¦ ¦ ¦--'(': ( [0/0] {406} + ¦ ¦ ¦--expr: x2 [0/0] {408} + ¦ ¦ ¦ °--SYMBOL: x2 [0/0] {407} + ¦ ¦ °--')': ) [0/0] {409} + ¦ ¦--'+': + [0/4] {410} + ¦ ¦--COMMENT: # sqr [1/4] {411} + ¦ ¦--expr: x4 [1/1] {413} + ¦ ¦ °--SYMBOL: x4 [0/0] {412} + ¦ ¦--'+': + [0/4] {414} + ¦ °--expr: sqrt( [1/0] {415} + ¦ ¦--expr: sqrt [0/0] {417} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: sqrt [0/0] {416} + ¦ ¦--'(': ( [0/0] {418} + ¦ ¦--expr: x5 [0/0] {420} + ¦ ¦ °--SYMBOL: x5 [0/0] {419} + ¦ °--')': ) [0/0] {421} + °--')': ) [1/0] {422} diff --git a/tests/testthat/line_breaks_and_other/ggplot2-out.R b/tests/testthat/line_breaks_and_other/ggplot2-out.R new file mode 100644 index 000000000..9a46ec2b4 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/ggplot2-out.R @@ -0,0 +1,60 @@ +# don't remove line break +ggplot(data = mtcars, mapping = aes(x = mpg, y = vs)) + + geom_point() + + +# add when unmasked +ggplot(data = mtcars, mapping = aes(x = mpg, y = vs)) + + geom_point() + + +# add when masked +ggplot2::ggplot(data = mtcars, mapping = aes(x = mpg, y = vs)) + + geom_point() + +# add when masked +ggplot(data = mtcars, mapping = aes(x = mpg, y = vs)) + + ggplot2::geom_point() + +# add when comment +ggplot(data = mtcars, mapping = aes(x = mpg, y = vs)) + # comment + ggplot2::geom_point() + + g() + + +# add when comment +ggplot(data = mtcars, mapping = aes(x = mpg, y = vs)) + + ggplot2::geom_point() + + g() # comment + +# add when comment +ggplot(data = mtcars, mapping = aes(x = mpg, y = vs)) + + ggplot2::geom_point() + + g() # comment + + +# add when comment +ggplot(data = mtcars, mapping = aes(x = mpg, y = vs)) + + ggplot2::geom_point() + + g() + + geom_oint() # comment + +# when subsetted involved +x[1] + c() + +g() + x[1] + +g()[2] + x[1] + +# don't do anything on unary + and function call ++sin(x) + +# within function call +qqjflk( + log(y + 1) + + # sqrt(x1) + + sqrt(x2) + + # sqrt(x3) + + x4 + + sqrt(x5) +) diff --git a/tests/testthat/line_breaks_and_other/if_with_line_break_indention-in_tree b/tests/testthat/line_breaks_and_other/if_with_line_break_indention-in_tree index 941df8abc..939eee536 100644 --- a/tests/testthat/line_breaks_and_other/if_with_line_break_indention-in_tree +++ b/tests/testthat/line_breaks_and_other/if_with_line_break_indention-in_tree @@ -1,46 +1,47 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦--COMMENT: # add [0/0] {1} - ¦--expr: [1/0] {2} + ¦--expr: if (x [1/0] {2} ¦ ¦--IF: if [0/1] {3} ¦ ¦--'(': ( [0/0] {4} - ¦ ¦--expr: [0/0] {6} + ¦ ¦--expr: x [0/0] {6} ¦ ¦ °--SYMBOL: x [0/0] {5} ¦ ¦--')': ) [0/1] {7} - ¦ ¦--expr: [0/1] {8} + ¦ ¦--expr: {1+1+ [0/1] {8} ¦ ¦ ¦--'{': { [0/0] {9} - ¦ ¦ ¦--expr: [0/0] {10} - ¦ ¦ ¦ ¦--expr: [0/0] {13} + ¦ ¦ ¦--expr: 1+1++ [0/0] {10} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {13} ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {12} ¦ ¦ ¦ ¦--'+': + [0/0] {14} - ¦ ¦ ¦ ¦--expr: [0/0] {16} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {16} ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {15} ¦ ¦ ¦ ¦--'+': + [0/0] {17} - ¦ ¦ ¦ °--expr: [0/0] {18} + ¦ ¦ ¦ °--expr: +1 [0/0] {18} ¦ ¦ ¦ ¦--'+': + [0/0] {19} - ¦ ¦ ¦ °--expr: [0/0] {21} + ¦ ¦ ¦ °--expr: 1 [0/0] {21} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {20} ¦ ¦ °--'}': } [0/0] {22} ¦ ¦--ELSE: else [0/0] {23} - ¦ °--expr: [0/0] {24} + ¦ °--expr: {3} [0/0] {24} ¦ ¦--'{': { [0/0] {25} - ¦ ¦--expr: [0/0] {27} + ¦ ¦--expr: 3 [0/0] {27} ¦ ¦ °--NUM_CONST: 3 [0/0] {26} ¦ °--'}': } [0/0] {28} ¦--COMMENT: # rem [2/0] {29} - °--expr: [1/0] {30} - ¦--expr: [0/0] {32} + °--expr: test_ [1/0] {30} + ¦--expr: test_ [0/0] {32} ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {31} ¦--'(': ( [0/0] {33} - ¦--expr: [0/0] {35} + ¦--expr: "x" [0/0] {35} ¦ °--STR_CONST: "x" [0/0] {34} ¦--',': , [0/0] {36} - ¦--expr: [1/0] {37} + ¦--expr: { + m [1/0] {37} ¦ ¦--'{': { [0/2] {38} - ¦ ¦--expr: [1/0] {39} - ¦ ¦ ¦--expr: [0/0] {41} + ¦ ¦--expr: my_te [1/0] {39} + ¦ ¦ ¦--expr: my_te [0/0] {41} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: my_te [0/0] {40} ¦ ¦ ¦--'(': ( [0/0] {42} - ¦ ¦ ¦--expr: [0/0] {44} + ¦ ¦ ¦--expr: call [0/0] {44} ¦ ¦ ¦ °--SYMBOL: call [0/0] {43} ¦ ¦ °--')': ) [0/0] {45} ¦ °--'}': } [1/0] {46} diff --git a/tests/testthat/line_breaks_and_other/line_break_fun_dec-in.R b/tests/testthat/line_breaks_and_other/line_break_fun_dec-in.R deleted file mode 100644 index 140de7339..000000000 --- a/tests/testthat/line_breaks_and_other/line_break_fun_dec-in.R +++ /dev/null @@ -1,18 +0,0 @@ -a <- function(x, # - y - ) { - x - 1 -} - - -a <- function(x, # - y) # -{ - x -} - -a <- function(x, # - y # - ) { # FIXME: Move to the same indention level as a - y -} diff --git a/tests/testthat/line_breaks_and_other/line_break_fun_dec-in_tree b/tests/testthat/line_breaks_and_other/line_break_fun_dec-in_tree deleted file mode 100644 index 9fddeb747..000000000 --- a/tests/testthat/line_breaks_and_other/line_break_fun_dec-in_tree +++ /dev/null @@ -1,59 +0,0 @@ -ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/1] {3} - ¦ ¦ °--SYMBOL: a [0/0] {2} - ¦ ¦--LEFT_ASSIGN: <- [0/1] {4} - ¦ °--expr: [0/0] {5} - ¦ ¦--FUNCTION: funct [0/0] {6} - ¦ ¦--'(': ( [0/0] {7} - ¦ ¦--SYMBOL_FORMALS: x [0/0] {8} - ¦ ¦--',': , [0/1] {9} - ¦ ¦--COMMENT: # [0/14] {10} - ¦ ¦--SYMBOL_FORMALS: y [1/14] {11} - ¦ ¦--')': ) [1/1] {12} - ¦ °--expr: [0/0] {13} - ¦ ¦--'{': { [0/2] {14} - ¦ ¦--expr: [1/0] {15} - ¦ ¦ ¦--expr: [0/1] {17} - ¦ ¦ ¦ °--SYMBOL: x [0/0] {16} - ¦ ¦ ¦--'-': - [0/1] {18} - ¦ ¦ °--expr: [0/0] {20} - ¦ ¦ °--NUM_CONST: 1 [0/0] {19} - ¦ °--'}': } [1/0] {21} - ¦--expr: [3/0] {22} - ¦ ¦--expr: [0/1] {24} - ¦ ¦ °--SYMBOL: a [0/0] {23} - ¦ ¦--LEFT_ASSIGN: <- [0/1] {25} - ¦ °--expr: [0/0] {26} - ¦ ¦--FUNCTION: funct [0/0] {27} - ¦ ¦--'(': ( [0/0] {28} - ¦ ¦--SYMBOL_FORMALS: x [0/0] {29} - ¦ ¦--',': , [0/1] {30} - ¦ ¦--COMMENT: # [0/14] {31} - ¦ ¦--SYMBOL_FORMALS: y [1/0] {32} - ¦ ¦--')': ) [0/1] {33} - ¦ ¦--COMMENT: # [0/0] {34} - ¦ °--expr: [1/0] {35} - ¦ ¦--'{': { [0/2] {36} - ¦ ¦--expr: [1/0] {38} - ¦ ¦ °--SYMBOL: x [0/0] {37} - ¦ °--'}': } [1/0] {39} - °--expr: [2/0] {40} - ¦--expr: [0/1] {42} - ¦ °--SYMBOL: a [0/0] {41} - ¦--LEFT_ASSIGN: <- [0/1] {43} - °--expr: [0/0] {44} - ¦--FUNCTION: funct [0/0] {45} - ¦--'(': ( [0/0] {46} - ¦--SYMBOL_FORMALS: x [0/0] {47} - ¦--',': , [0/1] {48} - ¦--COMMENT: # [0/14] {49} - ¦--SYMBOL_FORMALS: y [1/1] {50} - ¦--COMMENT: # [0/12] {51} - ¦--')': ) [1/1] {52} - °--expr: [0/0] {53} - ¦--'{': { [0/1] {54} - ¦--COMMENT: # FIX [0/2] {55} - ¦--expr: [1/0] {57} - ¦ °--SYMBOL: y [0/0] {56} - °--'}': } [1/0] {58} diff --git a/tests/testthat/line_breaks_and_other/line_break_fun_dec-out.R b/tests/testthat/line_breaks_and_other/line_break_fun_dec-out.R deleted file mode 100644 index 459d70ce1..000000000 --- a/tests/testthat/line_breaks_and_other/line_break_fun_dec-out.R +++ /dev/null @@ -1,17 +0,0 @@ -a <- function(x, # - y) { - x - 1 -} - - -a <- function(x, # - y) # -{ - x -} - -a <- function(x, # - y # - ) { # FIXME: Move to the same indention level as a - y -} diff --git a/tests/testthat/line_breaks_and_other/pipe-line-breaks-in.R b/tests/testthat/line_breaks_and_other/pipe-line-breaks-in.R new file mode 100644 index 000000000..a60021669 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/pipe-line-breaks-in.R @@ -0,0 +1,156 @@ +c(a %>% b) + +c(a %>% b()) + +c(a + b %>% c) + +c( + a %>% b) + +c(a %>% b() +) + +c(a %>% b() # 33 +) + +c( + a + b %>% c + ) + +c( + a + b %>% + c) + +c(a + b %>% + c) + +c( + a + b %>% # 654 + c +) + +c( # rr + a + b %>% + c +) + +c( + a + + b %>% c +) + +c(a + + b %>% c +) + +a %>% b( +) + +a %>% b( +) %>% q + +a %>% + b() + +a %>% b() %>% c + +# short pipes < 2 can stay on one line +a %>% b() + +fun(x, + a %>% b) + +fun(x, + gg = a %>% b, + tt %>% q) + +fun(x, gg = a %>% b, tt %>% q) + +z = a %>% b() + +fun( s = g(x), + gg = a(n == 2) %>% b, + tt %>% q(r = 3)) + +# FIXME closing brace could go on ntext line. Alternative: remove lin breaks completely. +blew(x %>% + + c(), y = 2) + +# FIXME closing brace could go on ntext line. Alternative: move c() up. +blew(y = 2, x %>% + c()) + + +{a %>% c +1} + + +b %>% + f() %>% # never move comment to next line as it can be styler: off or nolint + k() %>% + x() + + +# line break before { inserted inside and outside function calls +c( +data %>% + filter(bar) %>% { + cor(.$col1, .$col2, use = "complete.obs") + } +) + +data %>% + filter(bar) %>% { + cor(.$col1, .$col2, use = "complete.obs") + } + +# line break before { kept inside and outside function calls +c( + data %>% + filter(bar) %>% + { + cor(.$col1, .$col2, use = "complete.obs") + } +) + +data %>% + filter(bar) %>% + { + cor(.$col1, .$col2, use = "complete.obs") + } + +# redundant blank lines removed +c( + data %>% + filter(bar) %>% + + { + cor(.$col1, .$col2, use = "complete.obs") + } +) + +data %>% + filter(bar) %>% + + { + cor(.$col1, .$col2, use = "complete.obs") + } + +# blank lines kept when around comment +c( + data %>% + filter(bar) %>% + # comment + + { + cor(.$col1, .$col2, use = "complete.obs") + } +) + +data %>% + filter(bar) %>% + # comment + + { + cor(.$col1, .$col2, use = "complete.obs") + } diff --git a/tests/testthat/line_breaks_and_other/pipe-line-breaks-in_tree b/tests/testthat/line_breaks_and_other/pipe-line-breaks-in_tree new file mode 100644 index 000000000..9ed90d2f0 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/pipe-line-breaks-in_tree @@ -0,0 +1,759 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: c(a % [0/0] {1} + ¦ ¦--expr: c [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {2} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: a %>% [0/0] {5} + ¦ ¦ ¦--expr: a [0/1] {7} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {6} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {8} + ¦ ¦ °--expr: b [0/0] {10} + ¦ ¦ °--SYMBOL: b [0/0] {9} + ¦ °--')': ) [0/0] {11} + ¦--expr: c(a % [2/0] {12} + ¦ ¦--expr: c [0/0] {14} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {13} + ¦ ¦--'(': ( [0/0] {15} + ¦ ¦--expr: a %>% [0/0] {16} + ¦ ¦ ¦--expr: a [0/1] {18} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {17} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {19} + ¦ ¦ °--expr: b() [0/0] {20} + ¦ ¦ ¦--expr: b [0/0] {22} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {21} + ¦ ¦ ¦--'(': ( [0/0] {23} + ¦ ¦ °--')': ) [0/0] {24} + ¦ °--')': ) [0/0] {25} + ¦--expr: c(a + [2/0] {26} + ¦ ¦--expr: c [0/0] {28} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {27} + ¦ ¦--'(': ( [0/0] {29} + ¦ ¦--expr: a + b [0/0] {30} + ¦ ¦ ¦--expr: a [0/1] {32} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {31} + ¦ ¦ ¦--'+': + [0/1] {33} + ¦ ¦ ¦--expr: b [0/1] {36} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {35} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {37} + ¦ ¦ °--expr: c [0/0] {39} + ¦ ¦ °--SYMBOL: c [0/0] {38} + ¦ °--')': ) [0/0] {40} + ¦--expr: c( + [2/0] {41} + ¦ ¦--expr: c [0/0] {43} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {42} + ¦ ¦--'(': ( [0/2] {44} + ¦ ¦--expr: a %>% [1/0] {45} + ¦ ¦ ¦--expr: a [0/1] {47} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {46} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {48} + ¦ ¦ °--expr: b [0/0] {50} + ¦ ¦ °--SYMBOL: b [0/0] {49} + ¦ °--')': ) [0/0] {51} + ¦--expr: c(a % [2/0] {52} + ¦ ¦--expr: c [0/0] {54} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {53} + ¦ ¦--'(': ( [0/0] {55} + ¦ ¦--expr: a %>% [0/0] {56} + ¦ ¦ ¦--expr: a [0/1] {58} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {57} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {59} + ¦ ¦ °--expr: b() [0/0] {60} + ¦ ¦ ¦--expr: b [0/0] {62} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {61} + ¦ ¦ ¦--'(': ( [0/0] {63} + ¦ ¦ °--')': ) [0/0] {64} + ¦ °--')': ) [1/0] {65} + ¦--expr: c(a % [2/0] {66} + ¦ ¦--expr: c [0/0] {68} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {67} + ¦ ¦--'(': ( [0/0] {69} + ¦ ¦--expr: a %>% [0/1] {70} + ¦ ¦ ¦--expr: a [0/1] {72} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {71} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {73} + ¦ ¦ °--expr: b() [0/0] {74} + ¦ ¦ ¦--expr: b [0/0] {76} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {75} + ¦ ¦ ¦--'(': ( [0/0] {77} + ¦ ¦ °--')': ) [0/0] {78} + ¦ ¦--COMMENT: # 33 [0/0] {79} + ¦ °--')': ) [1/0] {80} + ¦--expr: c( + [2/0] {81} + ¦ ¦--expr: c [0/0] {83} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {82} + ¦ ¦--'(': ( [0/2] {84} + ¦ ¦--expr: a + b [1/2] {85} + ¦ ¦ ¦--expr: a [0/1] {87} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {86} + ¦ ¦ ¦--'+': + [0/1] {88} + ¦ ¦ ¦--expr: b [0/1] {91} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {90} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {92} + ¦ ¦ °--expr: c [0/0] {94} + ¦ ¦ °--SYMBOL: c [0/0] {93} + ¦ °--')': ) [1/0] {95} + ¦--expr: c( + [2/0] {96} + ¦ ¦--expr: c [0/0] {98} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {97} + ¦ ¦--'(': ( [0/2] {99} + ¦ ¦--expr: a + b [1/0] {100} + ¦ ¦ ¦--expr: a [0/1] {102} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {101} + ¦ ¦ ¦--'+': + [0/1] {103} + ¦ ¦ ¦--expr: b [0/1] {106} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {105} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {107} + ¦ ¦ °--expr: c [1/0] {109} + ¦ ¦ °--SYMBOL: c [0/0] {108} + ¦ °--')': ) [0/0] {110} + ¦--expr: c(a + [2/0] {111} + ¦ ¦--expr: c [0/0] {113} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {112} + ¦ ¦--'(': ( [0/0] {114} + ¦ ¦--expr: a + b [0/0] {115} + ¦ ¦ ¦--expr: a [0/1] {117} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {116} + ¦ ¦ ¦--'+': + [0/1] {118} + ¦ ¦ ¦--expr: b [0/1] {121} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {120} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {122} + ¦ ¦ °--expr: c [1/0] {124} + ¦ ¦ °--SYMBOL: c [0/0] {123} + ¦ °--')': ) [0/0] {125} + ¦--expr: c( + [2/0] {126} + ¦ ¦--expr: c [0/0] {128} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {127} + ¦ ¦--'(': ( [0/2] {129} + ¦ ¦--expr: a + b [1/0] {130} + ¦ ¦ ¦--expr: a [0/1] {132} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {131} + ¦ ¦ ¦--'+': + [0/1] {133} + ¦ ¦ ¦--expr: b [0/1] {136} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {135} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {137} + ¦ ¦ ¦--COMMENT: # 654 [0/4] {138} + ¦ ¦ °--expr: c [1/0] {140} + ¦ ¦ °--SYMBOL: c [0/0] {139} + ¦ °--')': ) [1/0] {141} + ¦--expr: c( # [2/0] {142} + ¦ ¦--expr: c [0/0] {144} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {143} + ¦ ¦--'(': ( [0/1] {145} + ¦ ¦--COMMENT: # rr [0/2] {146} + ¦ ¦--expr: a + b [1/0] {147} + ¦ ¦ ¦--expr: a [0/1] {149} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {148} + ¦ ¦ ¦--'+': + [0/1] {150} + ¦ ¦ ¦--expr: b [0/1] {153} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {152} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {154} + ¦ ¦ °--expr: c [1/0] {156} + ¦ ¦ °--SYMBOL: c [0/0] {155} + ¦ °--')': ) [1/0] {157} + ¦--expr: c( + [2/0] {158} + ¦ ¦--expr: c [0/0] {160} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {159} + ¦ ¦--'(': ( [0/2] {161} + ¦ ¦--expr: a + + [1/0] {162} + ¦ ¦ ¦--expr: a [0/1] {164} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {163} + ¦ ¦ ¦--'+': + [0/4] {165} + ¦ ¦ ¦--expr: b [1/1] {168} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {167} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {169} + ¦ ¦ °--expr: c [0/0] {171} + ¦ ¦ °--SYMBOL: c [0/0] {170} + ¦ °--')': ) [1/0] {172} + ¦--expr: c(a + [2/0] {173} + ¦ ¦--expr: c [0/0] {175} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {174} + ¦ ¦--'(': ( [0/0] {176} + ¦ ¦--expr: a + + [0/0] {177} + ¦ ¦ ¦--expr: a [0/1] {179} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {178} + ¦ ¦ ¦--'+': + [0/4] {180} + ¦ ¦ ¦--expr: b [1/1] {183} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {182} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {184} + ¦ ¦ °--expr: c [0/0] {186} + ¦ ¦ °--SYMBOL: c [0/0] {185} + ¦ °--')': ) [1/0] {187} + ¦--expr: a %>% [2/0] {188} + ¦ ¦--expr: a [0/1] {190} + ¦ ¦ °--SYMBOL: a [0/0] {189} + ¦ ¦--SPECIAL-PIPE: %>% [0/1] {191} + ¦ °--expr: b( +) [0/0] {192} + ¦ ¦--expr: b [0/0] {194} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {193} + ¦ ¦--'(': ( [0/0] {195} + ¦ °--')': ) [1/0] {196} + ¦--expr: a %>% [2/0] {197} + ¦ ¦--expr: a [0/1] {200} + ¦ ¦ °--SYMBOL: a [0/0] {199} + ¦ ¦--SPECIAL-PIPE: %>% [0/1] {201} + ¦ ¦--expr: b( +) [0/1] {202} + ¦ ¦ ¦--expr: b [0/0] {204} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {203} + ¦ ¦ ¦--'(': ( [0/0] {205} + ¦ ¦ °--')': ) [1/0] {206} + ¦ ¦--SPECIAL-PIPE: %>% [0/1] {207} + ¦ °--expr: q [0/0] {209} + ¦ °--SYMBOL: q [0/0] {208} + ¦--expr: a %>% [2/0] {210} + ¦ ¦--expr: a [0/1] {212} + ¦ ¦ °--SYMBOL: a [0/0] {211} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {213} + ¦ °--expr: b() [1/0] {214} + ¦ ¦--expr: b [0/0] {216} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {215} + ¦ ¦--'(': ( [0/0] {217} + ¦ °--')': ) [0/0] {218} + ¦--expr: a %>% [2/0] {219} + ¦ ¦--expr: a [0/1] {222} + ¦ ¦ °--SYMBOL: a [0/0] {221} + ¦ ¦--SPECIAL-PIPE: %>% [0/1] {223} + ¦ ¦--expr: b() [0/1] {224} + ¦ ¦ ¦--expr: b [0/0] {226} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {225} + ¦ ¦ ¦--'(': ( [0/0] {227} + ¦ ¦ °--')': ) [0/0] {228} + ¦ ¦--SPECIAL-PIPE: %>% [0/1] {229} + ¦ °--expr: c [0/0] {231} + ¦ °--SYMBOL: c [0/0] {230} + ¦--COMMENT: # sho [2/0] {232} + ¦--expr: a %>% [1/0] {233} + ¦ ¦--expr: a [0/1] {235} + ¦ ¦ °--SYMBOL: a [0/0] {234} + ¦ ¦--SPECIAL-PIPE: %>% [0/1] {236} + ¦ °--expr: b() [0/0] {237} + ¦ ¦--expr: b [0/0] {239} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {238} + ¦ ¦--'(': ( [0/0] {240} + ¦ °--')': ) [0/0] {241} + ¦--expr: fun(x [2/0] {242} + ¦ ¦--expr: fun [0/0] {244} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: fun [0/0] {243} + ¦ ¦--'(': ( [0/0] {245} + ¦ ¦--expr: x [0/0] {247} + ¦ ¦ °--SYMBOL: x [0/0] {246} + ¦ ¦--',': , [0/2] {248} + ¦ ¦--expr: a %>% [1/0] {249} + ¦ ¦ ¦--expr: a [0/1] {251} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {250} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {252} + ¦ ¦ °--expr: b [0/0] {254} + ¦ ¦ °--SYMBOL: b [0/0] {253} + ¦ °--')': ) [0/0] {255} + ¦--expr: fun(x [2/0] {256} + ¦ ¦--expr: fun [0/0] {258} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: fun [0/0] {257} + ¦ ¦--'(': ( [0/0] {259} + ¦ ¦--expr: x [0/0] {261} + ¦ ¦ °--SYMBOL: x [0/0] {260} + ¦ ¦--',': , [0/4] {262} + ¦ ¦--SYMBOL_SUB: gg [1/1] {263} + ¦ ¦--EQ_SUB: = [0/1] {264} + ¦ ¦--expr: a %>% [0/0] {265} + ¦ ¦ ¦--expr: a [0/1] {267} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {266} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {268} + ¦ ¦ °--expr: b [0/0] {270} + ¦ ¦ °--SYMBOL: b [0/0] {269} + ¦ ¦--',': , [0/4] {271} + ¦ ¦--expr: tt %> [1/0] {272} + ¦ ¦ ¦--expr: tt [0/1] {274} + ¦ ¦ ¦ °--SYMBOL: tt [0/0] {273} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {275} + ¦ ¦ °--expr: q [0/0] {277} + ¦ ¦ °--SYMBOL: q [0/0] {276} + ¦ °--')': ) [0/0] {278} + ¦--expr: fun(x [2/0] {279} + ¦ ¦--expr: fun [0/0] {281} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: fun [0/0] {280} + ¦ ¦--'(': ( [0/0] {282} + ¦ ¦--expr: x [0/0] {284} + ¦ ¦ °--SYMBOL: x [0/0] {283} + ¦ ¦--',': , [0/1] {285} + ¦ ¦--SYMBOL_SUB: gg [0/1] {286} + ¦ ¦--EQ_SUB: = [0/1] {287} + ¦ ¦--expr: a %>% [0/0] {288} + ¦ ¦ ¦--expr: a [0/1] {290} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {289} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {291} + ¦ ¦ °--expr: b [0/0] {293} + ¦ ¦ °--SYMBOL: b [0/0] {292} + ¦ ¦--',': , [0/1] {294} + ¦ ¦--expr: tt %> [0/0] {295} + ¦ ¦ ¦--expr: tt [0/1] {297} + ¦ ¦ ¦ °--SYMBOL: tt [0/0] {296} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {298} + ¦ ¦ °--expr: q [0/0] {300} + ¦ ¦ °--SYMBOL: q [0/0] {299} + ¦ °--')': ) [0/0] {301} + ¦--expr_or_assign_or_help: z = a [2/0] {302} + ¦ ¦--expr: z [0/1] {304} + ¦ ¦ °--SYMBOL: z [0/0] {303} + ¦ ¦--EQ_ASSIGN: = [0/1] {305} + ¦ ¦--expr: a [0/1] {308} + ¦ ¦ °--SYMBOL: a [0/0] {307} + ¦ ¦--SPECIAL-PIPE: %>% [0/1] {309} + ¦ °--expr: b() [0/0] {310} + ¦ ¦--expr: b [0/0] {312} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {311} + ¦ ¦--'(': ( [0/0] {313} + ¦ °--')': ) [0/0] {314} + ¦--expr: fun( [2/0] {315} + ¦ ¦--expr: fun [0/0] {317} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: fun [0/0] {316} + ¦ ¦--'(': ( [0/1] {318} + ¦ ¦--SYMBOL_SUB: s [0/1] {319} + ¦ ¦--EQ_SUB: = [0/1] {320} + ¦ ¦--expr: g(x) [0/0] {321} + ¦ ¦ ¦--expr: g [0/0] {323} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {322} + ¦ ¦ ¦--'(': ( [0/0] {324} + ¦ ¦ ¦--expr: x [0/0] {326} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {325} + ¦ ¦ °--')': ) [0/0] {327} + ¦ ¦--',': , [0/4] {328} + ¦ ¦--SYMBOL_SUB: gg [1/1] {329} + ¦ ¦--EQ_SUB: = [0/1] {330} + ¦ ¦--expr: a(n = [0/0] {331} + ¦ ¦ ¦--expr: a(n = [0/1] {332} + ¦ ¦ ¦ ¦--expr: a [0/0] {334} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: a [0/0] {333} + ¦ ¦ ¦ ¦--'(': ( [0/0] {335} + ¦ ¦ ¦ ¦--expr: n == [0/0] {336} + ¦ ¦ ¦ ¦ ¦--expr: n [0/1] {338} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: n [0/0] {337} + ¦ ¦ ¦ ¦ ¦--EQ: == [0/1] {339} + ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {341} + ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {340} + ¦ ¦ ¦ °--')': ) [0/0] {342} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {343} + ¦ ¦ °--expr: b [0/0] {345} + ¦ ¦ °--SYMBOL: b [0/0] {344} + ¦ ¦--',': , [0/4] {346} + ¦ ¦--expr: tt %> [1/0] {347} + ¦ ¦ ¦--expr: tt [0/1] {349} + ¦ ¦ ¦ °--SYMBOL: tt [0/0] {348} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {350} + ¦ ¦ °--expr: q(r = [0/0] {351} + ¦ ¦ ¦--expr: q [0/0] {353} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: q [0/0] {352} + ¦ ¦ ¦--'(': ( [0/0] {354} + ¦ ¦ ¦--SYMBOL_SUB: r [0/1] {355} + ¦ ¦ ¦--EQ_SUB: = [0/1] {356} + ¦ ¦ ¦--expr: 3 [0/0] {358} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {357} + ¦ ¦ °--')': ) [0/0] {359} + ¦ °--')': ) [0/0] {360} + ¦--COMMENT: # FIX [2/0] {361} + ¦--expr: blew( [1/0] {362} + ¦ ¦--expr: blew [0/0] {364} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: blew [0/0] {363} + ¦ ¦--'(': ( [0/0] {365} + ¦ ¦--expr: x %>% [0/0] {366} + ¦ ¦ ¦--expr: x [0/1] {368} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {367} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/7] {369} + ¦ ¦ °--expr: c() [2/0] {370} + ¦ ¦ ¦--expr: c [0/0] {372} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {371} + ¦ ¦ ¦--'(': ( [0/0] {373} + ¦ ¦ °--')': ) [0/0] {374} + ¦ ¦--',': , [0/1] {375} + ¦ ¦--SYMBOL_SUB: y [0/1] {376} + ¦ ¦--EQ_SUB: = [0/1] {377} + ¦ ¦--expr: 2 [0/0] {379} + ¦ ¦ °--NUM_CONST: 2 [0/0] {378} + ¦ °--')': ) [0/0] {380} + ¦--COMMENT: # FIX [2/0] {381} + ¦--expr: blew( [1/0] {382} + ¦ ¦--expr: blew [0/0] {384} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: blew [0/0] {383} + ¦ ¦--'(': ( [0/0] {385} + ¦ ¦--SYMBOL_SUB: y [0/1] {386} + ¦ ¦--EQ_SUB: = [0/1] {387} + ¦ ¦--expr: 2 [0/0] {389} + ¦ ¦ °--NUM_CONST: 2 [0/0] {388} + ¦ ¦--',': , [0/1] {390} + ¦ ¦--expr: x %>% [0/0] {391} + ¦ ¦ ¦--expr: x [0/1] {393} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {392} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/7] {394} + ¦ ¦ °--expr: c() [1/0] {395} + ¦ ¦ ¦--expr: c [0/0] {397} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {396} + ¦ ¦ ¦--'(': ( [0/0] {398} + ¦ ¦ °--')': ) [0/0] {399} + ¦ °--')': ) [0/0] {400} + ¦--expr: {a %> [3/0] {401} + ¦ ¦--'{': { [0/0] {402} + ¦ ¦--expr: a %>% [0/0] {403} + ¦ ¦ ¦--expr: a [0/1] {406} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {405} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {407} + ¦ ¦ ¦--expr: c [0/1] {409} + ¦ ¦ ¦ °--SYMBOL: c [0/0] {408} + ¦ ¦ ¦--'+': + [0/0] {410} + ¦ ¦ °--expr: 1 [0/0] {412} + ¦ ¦ °--NUM_CONST: 1 [0/0] {411} + ¦ °--'}': } [0/0] {413} + ¦--expr: b %>% [3/0] {414} + ¦ ¦--expr: b [0/1] {418} + ¦ ¦ °--SYMBOL: b [0/0] {417} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {419} + ¦ ¦--expr: f() [1/1] {420} + ¦ ¦ ¦--expr: f [0/0] {422} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {421} + ¦ ¦ ¦--'(': ( [0/0] {423} + ¦ ¦ °--')': ) [0/0] {424} + ¦ ¦--SPECIAL-PIPE: %>% [0/1] {425} + ¦ ¦--COMMENT: # nev [0/2] {426} + ¦ ¦--expr: k() [1/1] {427} + ¦ ¦ ¦--expr: k [0/0] {429} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: k [0/0] {428} + ¦ ¦ ¦--'(': ( [0/0] {430} + ¦ ¦ °--')': ) [0/0] {431} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {432} + ¦ °--expr: x() [1/0] {433} + ¦ ¦--expr: x [0/0] {435} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: x [0/0] {434} + ¦ ¦--'(': ( [0/0] {436} + ¦ °--')': ) [0/0] {437} + ¦--COMMENT: # lin [3/0] {438} + ¦--expr: c( +da [1/0] {439} + ¦ ¦--expr: c [0/0] {441} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {440} + ¦ ¦--'(': ( [0/0] {442} + ¦ ¦--expr: data [1/0] {443} + ¦ ¦ ¦--expr: data [0/1] {446} + ¦ ¦ ¦ °--SYMBOL: data [0/0] {445} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/2] {447} + ¦ ¦ ¦--expr: filte [1/1] {448} + ¦ ¦ ¦ ¦--expr: filte [0/0] {450} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: filte [0/0] {449} + ¦ ¦ ¦ ¦--'(': ( [0/0] {451} + ¦ ¦ ¦ ¦--expr: bar [0/0] {453} + ¦ ¦ ¦ ¦ °--SYMBOL: bar [0/0] {452} + ¦ ¦ ¦ °--')': ) [0/0] {454} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {455} + ¦ ¦ °--expr: { + [0/0] {456} + ¦ ¦ ¦--'{': { [0/4] {457} + ¦ ¦ ¦--expr: cor(. [1/2] {458} + ¦ ¦ ¦ ¦--expr: cor [0/0] {460} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: cor [0/0] {459} + ¦ ¦ ¦ ¦--'(': ( [0/0] {461} + ¦ ¦ ¦ ¦--expr: .$col [0/0] {462} + ¦ ¦ ¦ ¦ ¦--expr: . [0/0] {464} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: . [0/0] {463} + ¦ ¦ ¦ ¦ ¦--'$': $ [0/0] {465} + ¦ ¦ ¦ ¦ °--SYMBOL: col1 [0/0] {466} + ¦ ¦ ¦ ¦--',': , [0/1] {467} + ¦ ¦ ¦ ¦--expr: .$col [0/0] {468} + ¦ ¦ ¦ ¦ ¦--expr: . [0/0] {470} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: . [0/0] {469} + ¦ ¦ ¦ ¦ ¦--'$': $ [0/0] {471} + ¦ ¦ ¦ ¦ °--SYMBOL: col2 [0/0] {472} + ¦ ¦ ¦ ¦--',': , [0/1] {473} + ¦ ¦ ¦ ¦--SYMBOL_SUB: use [0/1] {474} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {475} + ¦ ¦ ¦ ¦--expr: "comp [0/0] {477} + ¦ ¦ ¦ ¦ °--STR_CONST: "comp [0/0] {476} + ¦ ¦ ¦ °--')': ) [0/0] {478} + ¦ ¦ °--'}': } [1/0] {479} + ¦ °--')': ) [1/0] {480} + ¦--expr: data [2/0] {481} + ¦ ¦--expr: data [0/1] {484} + ¦ ¦ °--SYMBOL: data [0/0] {483} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {485} + ¦ ¦--expr: filte [1/1] {486} + ¦ ¦ ¦--expr: filte [0/0] {488} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: filte [0/0] {487} + ¦ ¦ ¦--'(': ( [0/0] {489} + ¦ ¦ ¦--expr: bar [0/0] {491} + ¦ ¦ ¦ °--SYMBOL: bar [0/0] {490} + ¦ ¦ °--')': ) [0/0] {492} + ¦ ¦--SPECIAL-PIPE: %>% [0/1] {493} + ¦ °--expr: { + [0/0] {494} + ¦ ¦--'{': { [0/4] {495} + ¦ ¦--expr: cor(. [1/2] {496} + ¦ ¦ ¦--expr: cor [0/0] {498} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: cor [0/0] {497} + ¦ ¦ ¦--'(': ( [0/0] {499} + ¦ ¦ ¦--expr: .$col [0/0] {500} + ¦ ¦ ¦ ¦--expr: . [0/0] {502} + ¦ ¦ ¦ ¦ °--SYMBOL: . [0/0] {501} + ¦ ¦ ¦ ¦--'$': $ [0/0] {503} + ¦ ¦ ¦ °--SYMBOL: col1 [0/0] {504} + ¦ ¦ ¦--',': , [0/1] {505} + ¦ ¦ ¦--expr: .$col [0/0] {506} + ¦ ¦ ¦ ¦--expr: . [0/0] {508} + ¦ ¦ ¦ ¦ °--SYMBOL: . [0/0] {507} + ¦ ¦ ¦ ¦--'$': $ [0/0] {509} + ¦ ¦ ¦ °--SYMBOL: col2 [0/0] {510} + ¦ ¦ ¦--',': , [0/1] {511} + ¦ ¦ ¦--SYMBOL_SUB: use [0/1] {512} + ¦ ¦ ¦--EQ_SUB: = [0/1] {513} + ¦ ¦ ¦--expr: "comp [0/0] {515} + ¦ ¦ ¦ °--STR_CONST: "comp [0/0] {514} + ¦ ¦ °--')': ) [0/0] {516} + ¦ °--'}': } [1/0] {517} + ¦--COMMENT: # lin [2/0] {518} + ¦--expr: c( + [1/0] {519} + ¦ ¦--expr: c [0/0] {521} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {520} + ¦ ¦--'(': ( [0/2] {522} + ¦ ¦--expr: data [1/0] {523} + ¦ ¦ ¦--expr: data [0/1] {526} + ¦ ¦ ¦ °--SYMBOL: data [0/0] {525} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {527} + ¦ ¦ ¦--expr: filte [1/1] {528} + ¦ ¦ ¦ ¦--expr: filte [0/0] {530} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: filte [0/0] {529} + ¦ ¦ ¦ ¦--'(': ( [0/0] {531} + ¦ ¦ ¦ ¦--expr: bar [0/0] {533} + ¦ ¦ ¦ ¦ °--SYMBOL: bar [0/0] {532} + ¦ ¦ ¦ °--')': ) [0/0] {534} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {535} + ¦ ¦ °--expr: { + [1/0] {536} + ¦ ¦ ¦--'{': { [0/6] {537} + ¦ ¦ ¦--expr: cor(. [1/4] {538} + ¦ ¦ ¦ ¦--expr: cor [0/0] {540} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: cor [0/0] {539} + ¦ ¦ ¦ ¦--'(': ( [0/0] {541} + ¦ ¦ ¦ ¦--expr: .$col [0/0] {542} + ¦ ¦ ¦ ¦ ¦--expr: . [0/0] {544} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: . [0/0] {543} + ¦ ¦ ¦ ¦ ¦--'$': $ [0/0] {545} + ¦ ¦ ¦ ¦ °--SYMBOL: col1 [0/0] {546} + ¦ ¦ ¦ ¦--',': , [0/1] {547} + ¦ ¦ ¦ ¦--expr: .$col [0/0] {548} + ¦ ¦ ¦ ¦ ¦--expr: . [0/0] {550} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: . [0/0] {549} + ¦ ¦ ¦ ¦ ¦--'$': $ [0/0] {551} + ¦ ¦ ¦ ¦ °--SYMBOL: col2 [0/0] {552} + ¦ ¦ ¦ ¦--',': , [0/1] {553} + ¦ ¦ ¦ ¦--SYMBOL_SUB: use [0/1] {554} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {555} + ¦ ¦ ¦ ¦--expr: "comp [0/0] {557} + ¦ ¦ ¦ ¦ °--STR_CONST: "comp [0/0] {556} + ¦ ¦ ¦ °--')': ) [0/0] {558} + ¦ ¦ °--'}': } [1/0] {559} + ¦ °--')': ) [1/0] {560} + ¦--expr: data [2/0] {561} + ¦ ¦--expr: data [0/1] {564} + ¦ ¦ °--SYMBOL: data [0/0] {563} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {565} + ¦ ¦--expr: filte [1/1] {566} + ¦ ¦ ¦--expr: filte [0/0] {568} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: filte [0/0] {567} + ¦ ¦ ¦--'(': ( [0/0] {569} + ¦ ¦ ¦--expr: bar [0/0] {571} + ¦ ¦ ¦ °--SYMBOL: bar [0/0] {570} + ¦ ¦ °--')': ) [0/0] {572} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {573} + ¦ °--expr: { + [1/0] {574} + ¦ ¦--'{': { [0/4] {575} + ¦ ¦--expr: cor(. [1/2] {576} + ¦ ¦ ¦--expr: cor [0/0] {578} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: cor [0/0] {577} + ¦ ¦ ¦--'(': ( [0/0] {579} + ¦ ¦ ¦--expr: .$col [0/0] {580} + ¦ ¦ ¦ ¦--expr: . [0/0] {582} + ¦ ¦ ¦ ¦ °--SYMBOL: . [0/0] {581} + ¦ ¦ ¦ ¦--'$': $ [0/0] {583} + ¦ ¦ ¦ °--SYMBOL: col1 [0/0] {584} + ¦ ¦ ¦--',': , [0/1] {585} + ¦ ¦ ¦--expr: .$col [0/0] {586} + ¦ ¦ ¦ ¦--expr: . [0/0] {588} + ¦ ¦ ¦ ¦ °--SYMBOL: . [0/0] {587} + ¦ ¦ ¦ ¦--'$': $ [0/0] {589} + ¦ ¦ ¦ °--SYMBOL: col2 [0/0] {590} + ¦ ¦ ¦--',': , [0/1] {591} + ¦ ¦ ¦--SYMBOL_SUB: use [0/1] {592} + ¦ ¦ ¦--EQ_SUB: = [0/1] {593} + ¦ ¦ ¦--expr: "comp [0/0] {595} + ¦ ¦ ¦ °--STR_CONST: "comp [0/0] {594} + ¦ ¦ °--')': ) [0/0] {596} + ¦ °--'}': } [1/0] {597} + ¦--COMMENT: # red [2/0] {598} + ¦--expr: c( + [1/0] {599} + ¦ ¦--expr: c [0/0] {601} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {600} + ¦ ¦--'(': ( [0/2] {602} + ¦ ¦--expr: data [1/0] {603} + ¦ ¦ ¦--expr: data [0/1] {606} + ¦ ¦ ¦ °--SYMBOL: data [0/0] {605} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {607} + ¦ ¦ ¦--expr: filte [1/1] {608} + ¦ ¦ ¦ ¦--expr: filte [0/0] {610} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: filte [0/0] {609} + ¦ ¦ ¦ ¦--'(': ( [0/0] {611} + ¦ ¦ ¦ ¦--expr: bar [0/0] {613} + ¦ ¦ ¦ ¦ °--SYMBOL: bar [0/0] {612} + ¦ ¦ ¦ °--')': ) [0/0] {614} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {615} + ¦ ¦ °--expr: { + [2/0] {616} + ¦ ¦ ¦--'{': { [0/6] {617} + ¦ ¦ ¦--expr: cor(. [1/4] {618} + ¦ ¦ ¦ ¦--expr: cor [0/0] {620} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: cor [0/0] {619} + ¦ ¦ ¦ ¦--'(': ( [0/0] {621} + ¦ ¦ ¦ ¦--expr: .$col [0/0] {622} + ¦ ¦ ¦ ¦ ¦--expr: . [0/0] {624} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: . [0/0] {623} + ¦ ¦ ¦ ¦ ¦--'$': $ [0/0] {625} + ¦ ¦ ¦ ¦ °--SYMBOL: col1 [0/0] {626} + ¦ ¦ ¦ ¦--',': , [0/1] {627} + ¦ ¦ ¦ ¦--expr: .$col [0/0] {628} + ¦ ¦ ¦ ¦ ¦--expr: . [0/0] {630} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: . [0/0] {629} + ¦ ¦ ¦ ¦ ¦--'$': $ [0/0] {631} + ¦ ¦ ¦ ¦ °--SYMBOL: col2 [0/0] {632} + ¦ ¦ ¦ ¦--',': , [0/1] {633} + ¦ ¦ ¦ ¦--SYMBOL_SUB: use [0/1] {634} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {635} + ¦ ¦ ¦ ¦--expr: "comp [0/0] {637} + ¦ ¦ ¦ ¦ °--STR_CONST: "comp [0/0] {636} + ¦ ¦ ¦ °--')': ) [0/0] {638} + ¦ ¦ °--'}': } [1/0] {639} + ¦ °--')': ) [1/0] {640} + ¦--expr: data [2/0] {641} + ¦ ¦--expr: data [0/1] {644} + ¦ ¦ °--SYMBOL: data [0/0] {643} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {645} + ¦ ¦--expr: filte [1/1] {646} + ¦ ¦ ¦--expr: filte [0/0] {648} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: filte [0/0] {647} + ¦ ¦ ¦--'(': ( [0/0] {649} + ¦ ¦ ¦--expr: bar [0/0] {651} + ¦ ¦ ¦ °--SYMBOL: bar [0/0] {650} + ¦ ¦ °--')': ) [0/0] {652} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {653} + ¦ °--expr: { + [2/0] {654} + ¦ ¦--'{': { [0/4] {655} + ¦ ¦--expr: cor(. [1/2] {656} + ¦ ¦ ¦--expr: cor [0/0] {658} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: cor [0/0] {657} + ¦ ¦ ¦--'(': ( [0/0] {659} + ¦ ¦ ¦--expr: .$col [0/0] {660} + ¦ ¦ ¦ ¦--expr: . [0/0] {662} + ¦ ¦ ¦ ¦ °--SYMBOL: . [0/0] {661} + ¦ ¦ ¦ ¦--'$': $ [0/0] {663} + ¦ ¦ ¦ °--SYMBOL: col1 [0/0] {664} + ¦ ¦ ¦--',': , [0/1] {665} + ¦ ¦ ¦--expr: .$col [0/0] {666} + ¦ ¦ ¦ ¦--expr: . [0/0] {668} + ¦ ¦ ¦ ¦ °--SYMBOL: . [0/0] {667} + ¦ ¦ ¦ ¦--'$': $ [0/0] {669} + ¦ ¦ ¦ °--SYMBOL: col2 [0/0] {670} + ¦ ¦ ¦--',': , [0/1] {671} + ¦ ¦ ¦--SYMBOL_SUB: use [0/1] {672} + ¦ ¦ ¦--EQ_SUB: = [0/1] {673} + ¦ ¦ ¦--expr: "comp [0/0] {675} + ¦ ¦ ¦ °--STR_CONST: "comp [0/0] {674} + ¦ ¦ °--')': ) [0/0] {676} + ¦ °--'}': } [1/0] {677} + ¦--COMMENT: # bla [2/0] {678} + ¦--expr: c( + [1/0] {679} + ¦ ¦--expr: c [0/0] {681} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {680} + ¦ ¦--'(': ( [0/2] {682} + ¦ ¦--expr: data [1/0] {683} + ¦ ¦ ¦--expr: data [0/1] {686} + ¦ ¦ ¦ °--SYMBOL: data [0/0] {685} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {687} + ¦ ¦ ¦--expr: filte [1/1] {688} + ¦ ¦ ¦ ¦--expr: filte [0/0] {690} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: filte [0/0] {689} + ¦ ¦ ¦ ¦--'(': ( [0/0] {691} + ¦ ¦ ¦ ¦--expr: bar [0/0] {693} + ¦ ¦ ¦ ¦ °--SYMBOL: bar [0/0] {692} + ¦ ¦ ¦ °--')': ) [0/0] {694} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {695} + ¦ ¦ ¦--COMMENT: # com [1/4] {696} + ¦ ¦ °--expr: { + [2/0] {697} + ¦ ¦ ¦--'{': { [0/6] {698} + ¦ ¦ ¦--expr: cor(. [1/4] {699} + ¦ ¦ ¦ ¦--expr: cor [0/0] {701} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: cor [0/0] {700} + ¦ ¦ ¦ ¦--'(': ( [0/0] {702} + ¦ ¦ ¦ ¦--expr: .$col [0/0] {703} + ¦ ¦ ¦ ¦ ¦--expr: . [0/0] {705} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: . [0/0] {704} + ¦ ¦ ¦ ¦ ¦--'$': $ [0/0] {706} + ¦ ¦ ¦ ¦ °--SYMBOL: col1 [0/0] {707} + ¦ ¦ ¦ ¦--',': , [0/1] {708} + ¦ ¦ ¦ ¦--expr: .$col [0/0] {709} + ¦ ¦ ¦ ¦ ¦--expr: . [0/0] {711} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: . [0/0] {710} + ¦ ¦ ¦ ¦ ¦--'$': $ [0/0] {712} + ¦ ¦ ¦ ¦ °--SYMBOL: col2 [0/0] {713} + ¦ ¦ ¦ ¦--',': , [0/1] {714} + ¦ ¦ ¦ ¦--SYMBOL_SUB: use [0/1] {715} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {716} + ¦ ¦ ¦ ¦--expr: "comp [0/0] {718} + ¦ ¦ ¦ ¦ °--STR_CONST: "comp [0/0] {717} + ¦ ¦ ¦ °--')': ) [0/0] {719} + ¦ ¦ °--'}': } [1/0] {720} + ¦ °--')': ) [1/0] {721} + °--expr: data [2/0] {722} + ¦--expr: data [0/1] {725} + ¦ °--SYMBOL: data [0/0] {724} + ¦--SPECIAL-PIPE: %>% [0/2] {726} + ¦--expr: filte [1/1] {727} + ¦ ¦--expr: filte [0/0] {729} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: filte [0/0] {728} + ¦ ¦--'(': ( [0/0] {730} + ¦ ¦--expr: bar [0/0] {732} + ¦ ¦ °--SYMBOL: bar [0/0] {731} + ¦ °--')': ) [0/0] {733} + ¦--SPECIAL-PIPE: %>% [0/2] {734} + ¦--COMMENT: # com [1/2] {735} + °--expr: { + [2/0] {736} + ¦--'{': { [0/4] {737} + ¦--expr: cor(. [1/2] {738} + ¦ ¦--expr: cor [0/0] {740} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: cor [0/0] {739} + ¦ ¦--'(': ( [0/0] {741} + ¦ ¦--expr: .$col [0/0] {742} + ¦ ¦ ¦--expr: . [0/0] {744} + ¦ ¦ ¦ °--SYMBOL: . [0/0] {743} + ¦ ¦ ¦--'$': $ [0/0] {745} + ¦ ¦ °--SYMBOL: col1 [0/0] {746} + ¦ ¦--',': , [0/1] {747} + ¦ ¦--expr: .$col [0/0] {748} + ¦ ¦ ¦--expr: . [0/0] {750} + ¦ ¦ ¦ °--SYMBOL: . [0/0] {749} + ¦ ¦ ¦--'$': $ [0/0] {751} + ¦ ¦ °--SYMBOL: col2 [0/0] {752} + ¦ ¦--',': , [0/1] {753} + ¦ ¦--SYMBOL_SUB: use [0/1] {754} + ¦ ¦--EQ_SUB: = [0/1] {755} + ¦ ¦--expr: "comp [0/0] {757} + ¦ ¦ °--STR_CONST: "comp [0/0] {756} + ¦ °--')': ) [0/0] {758} + °--'}': } [1/0] {759} diff --git a/tests/testthat/line_breaks_and_other/pipe-line-breaks-out.R b/tests/testthat/line_breaks_and_other/pipe-line-breaks-out.R new file mode 100644 index 000000000..093655a80 --- /dev/null +++ b/tests/testthat/line_breaks_and_other/pipe-line-breaks-out.R @@ -0,0 +1,165 @@ +c(a %>% b()) + +c(a %>% b()) + +c(a + b %>% c()) + +c( + a %>% b() +) + +c(a %>% b()) + +c( + a %>% b() # 33 +) + +c( + a + b %>% c() +) + +c( + a + b %>% + c() +) + +c(a + b %>% + c()) + +c( + a + b %>% # 654 + c() +) + +c( # rr + a + b %>% + c() +) + +c( + a + + b %>% c() +) + +c(a + + b %>% c()) + +a %>% b() + +a %>% + b() %>% + q() + +a %>% + b() + +a %>% + b() %>% + c() + +# short pipes < 2 can stay on one line +a %>% b() + +fun( + x, + a %>% b() +) + +fun(x, + gg = a %>% b(), + tt %>% q() +) + +fun(x, gg = a %>% b(), tt %>% q()) + +z <- a %>% b() + +fun( + s = g(x), + gg = a(n == 2) %>% b(), + tt %>% q(r = 3) +) + +# FIXME closing brace could go on ntext line. Alternative: remove lin breaks completely. +blew(x %>% + c(), y = 2) + +# FIXME closing brace could go on ntext line. Alternative: move c() up. +blew(y = 2, x %>% + c()) + + +{ + a %>% c() + 1 +} + + +b %>% + f() %>% # never move comment to next line as it can be styler: off or nolint + k() %>% + x() + + +# line break before { inserted inside and outside function calls +c( + data %>% + filter(bar) %>% + { + cor(.$col1, .$col2, use = "complete.obs") + } +) + +data %>% + filter(bar) %>% + { + cor(.$col1, .$col2, use = "complete.obs") + } + +# line break before { kept inside and outside function calls +c( + data %>% + filter(bar) %>% + { + cor(.$col1, .$col2, use = "complete.obs") + } +) + +data %>% + filter(bar) %>% + { + cor(.$col1, .$col2, use = "complete.obs") + } + +# redundant blank lines removed +c( + data %>% + filter(bar) %>% + { + cor(.$col1, .$col2, use = "complete.obs") + } +) + +data %>% + filter(bar) %>% + { + cor(.$col1, .$col2, use = "complete.obs") + } + +# blank lines kept when around comment +c( + data %>% + filter(bar) %>% + # comment + + { + cor(.$col1, .$col2, use = "complete.obs") + } +) + +data %>% + filter(bar) %>% + # comment + + { + cor(.$col1, .$col2, use = "complete.obs") + } diff --git a/tests/testthat/line_breaks_and_other/pipe_and_comment-in_tree b/tests/testthat/line_breaks_and_other/pipe_and_comment-in_tree index ab7ec0e93..f46b15c2a 100644 --- a/tests/testthat/line_breaks_and_other/pipe_and_comment-in_tree +++ b/tests/testthat/line_breaks_and_other/pipe_and_comment-in_tree @@ -1,15 +1,15 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/1] {2} - ¦ ¦--expr: [0/0] {4} + °--expr: 1:10 [0/0] {1} + ¦--expr: 1:10 [0/1] {2} + ¦ ¦--expr: 1 [0/0] {4} ¦ ¦ °--NUM_CONST: 1 [0/0] {3} ¦ ¦--':': : [0/0] {5} - ¦ °--expr: [0/0] {7} + ¦ °--expr: 10 [0/0] {7} ¦ °--NUM_CONST: 10 [0/0] {6} ¦--SPECIAL-PIPE: %>% [0/1] {8} ¦--COMMENT: # sum [0/2] {9} - °--expr: [1/0] {10} - ¦--expr: [0/0] {12} + °--expr: sum() [1/0] {10} + ¦--expr: sum [0/0] {12} ¦ °--SYMBOL_FUNCTION_CALL: sum [0/0] {11} ¦--'(': ( [0/0] {13} °--')': ) [0/0] {14} diff --git a/tests/testthat/line_breaks_fun_call/blank-non-strict-in.R b/tests/testthat/line_breaks_fun_call/blank-non-strict-in.R new file mode 100644 index 000000000..21225f5f8 --- /dev/null +++ b/tests/testthat/line_breaks_fun_call/blank-non-strict-in.R @@ -0,0 +1,18 @@ +call( + + + 1 +) + +call( + # comment + + 1 +) + +call( + x = 2, + 1, + + "w" +) diff --git a/tests/testthat/line_breaks_fun_call/blank-non-strict-in_tree b/tests/testthat/line_breaks_fun_call/blank-non-strict-in_tree new file mode 100644 index 000000000..9632f334c --- /dev/null +++ b/tests/testthat/line_breaks_fun_call/blank-non-strict-in_tree @@ -0,0 +1,31 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: call( [0/0] {1} + ¦ ¦--expr: call [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} + ¦ ¦--'(': ( [0/2] {4} + ¦ ¦--expr: 1 [3/0] {6} + ¦ ¦ °--NUM_CONST: 1 [0/0] {5} + ¦ °--')': ) [1/0] {7} + ¦--expr: call( [2/0] {8} + ¦ ¦--expr: call [0/0] {10} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {9} + ¦ ¦--'(': ( [0/2] {11} + ¦ ¦--COMMENT: # com [1/2] {12} + ¦ ¦--expr: 1 [2/0] {14} + ¦ ¦ °--NUM_CONST: 1 [0/0] {13} + ¦ °--')': ) [1/0] {15} + °--expr: call( [2/0] {16} + ¦--expr: call [0/0] {18} + ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {17} + ¦--'(': ( [0/2] {19} + ¦--SYMBOL_SUB: x [1/1] {20} + ¦--EQ_SUB: = [0/1] {21} + ¦--expr: 2 [0/0] {23} + ¦ °--NUM_CONST: 2 [0/0] {22} + ¦--',': , [0/2] {24} + ¦--expr: 1 [1/0] {26} + ¦ °--NUM_CONST: 1 [0/0] {25} + ¦--',': , [0/2] {27} + ¦--expr: "w" [2/0] {29} + ¦ °--STR_CONST: "w" [0/0] {28} + °--')': ) [1/0] {30} diff --git a/tests/testthat/line_breaks_fun_call/blank-non-strict-out.R b/tests/testthat/line_breaks_fun_call/blank-non-strict-out.R new file mode 100644 index 000000000..21225f5f8 --- /dev/null +++ b/tests/testthat/line_breaks_fun_call/blank-non-strict-out.R @@ -0,0 +1,18 @@ +call( + + + 1 +) + +call( + # comment + + 1 +) + +call( + x = 2, + 1, + + "w" +) diff --git a/tests/testthat/line_breaks_fun_call/blank-strict-in.R b/tests/testthat/line_breaks_fun_call/blank-strict-in.R new file mode 100644 index 000000000..5f67ed329 --- /dev/null +++ b/tests/testthat/line_breaks_fun_call/blank-strict-in.R @@ -0,0 +1,32 @@ +call( + + + 1 +) + +call( + # comment + + 1 +) + +call( + x = 2, + 1, + + "w" +) + +call( + 1, + 2, + + + + # comment + + 1, + 2, + + 3 +) diff --git a/tests/testthat/line_breaks_fun_call/blank-strict-in_tree b/tests/testthat/line_breaks_fun_call/blank-strict-in_tree new file mode 100644 index 000000000..9f5ef0916 --- /dev/null +++ b/tests/testthat/line_breaks_fun_call/blank-strict-in_tree @@ -0,0 +1,51 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: call( [0/0] {1} + ¦ ¦--expr: call [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} + ¦ ¦--'(': ( [0/2] {4} + ¦ ¦--expr: 1 [3/0] {6} + ¦ ¦ °--NUM_CONST: 1 [0/0] {5} + ¦ °--')': ) [1/0] {7} + ¦--expr: call( [2/0] {8} + ¦ ¦--expr: call [0/0] {10} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {9} + ¦ ¦--'(': ( [0/2] {11} + ¦ ¦--COMMENT: # com [1/2] {12} + ¦ ¦--expr: 1 [2/0] {14} + ¦ ¦ °--NUM_CONST: 1 [0/0] {13} + ¦ °--')': ) [1/0] {15} + ¦--expr: call( [2/0] {16} + ¦ ¦--expr: call [0/0] {18} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {17} + ¦ ¦--'(': ( [0/2] {19} + ¦ ¦--SYMBOL_SUB: x [1/1] {20} + ¦ ¦--EQ_SUB: = [0/1] {21} + ¦ ¦--expr: 2 [0/0] {23} + ¦ ¦ °--NUM_CONST: 2 [0/0] {22} + ¦ ¦--',': , [0/2] {24} + ¦ ¦--expr: 1 [1/0] {26} + ¦ ¦ °--NUM_CONST: 1 [0/0] {25} + ¦ ¦--',': , [0/2] {27} + ¦ ¦--expr: "w" [2/0] {29} + ¦ ¦ °--STR_CONST: "w" [0/0] {28} + ¦ °--')': ) [1/0] {30} + °--expr: call( [2/0] {31} + ¦--expr: call [0/0] {33} + ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {32} + ¦--'(': ( [0/2] {34} + ¦--expr: 1 [1/0] {36} + ¦ °--NUM_CONST: 1 [0/0] {35} + ¦--',': , [0/2] {37} + ¦--expr: 2 [1/0] {39} + ¦ °--NUM_CONST: 2 [0/0] {38} + ¦--',': , [0/2] {40} + ¦--COMMENT: # com [4/2] {41} + ¦--expr: 1 [2/0] {43} + ¦ °--NUM_CONST: 1 [0/0] {42} + ¦--',': , [0/2] {44} + ¦--expr: 2 [1/0] {46} + ¦ °--NUM_CONST: 2 [0/0] {45} + ¦--',': , [0/2] {47} + ¦--expr: 3 [2/0] {49} + ¦ °--NUM_CONST: 3 [0/0] {48} + °--')': ) [1/0] {50} diff --git a/tests/testthat/line_breaks_fun_call/blank-strict-out.R b/tests/testthat/line_breaks_fun_call/blank-strict-out.R new file mode 100644 index 000000000..cb9acea09 --- /dev/null +++ b/tests/testthat/line_breaks_fun_call/blank-strict-out.R @@ -0,0 +1,28 @@ +call( + 1 +) + +call( + # comment + + 1 +) + +call( + x = 2, + 1, + "w" +) + +call( + 1, + 2, + + + + # comment + + 1, + 2, + 3 +) diff --git a/tests/testthat/line_breaks_fun_call/line_breaks_and_comments-in_tree b/tests/testthat/line_breaks_fun_call/line_breaks_and_comments-in_tree index e121d7623..899d5d938 100644 --- a/tests/testthat/line_breaks_fun_call/line_breaks_and_comments-in_tree +++ b/tests/testthat/line_breaks_fun_call/line_breaks_and_comments-in_tree @@ -1,80 +1,80 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/0] {3} + ¦--expr: call( [0/0] {1} + ¦ ¦--expr: call [0/0] {3} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} ¦ ¦--'(': ( [0/1] {4} ¦ ¦--COMMENT: # com [0/2] {5} - ¦ ¦--expr: [1/0] {7} + ¦ ¦--expr: am [1/0] {7} ¦ ¦ °--SYMBOL: am [0/0] {6} ¦ °--')': ) [1/0] {8} - ¦--expr: [2/0] {9} - ¦ ¦--expr: [0/0] {11} + ¦--expr: call( [2/0] {9} + ¦ ¦--expr: call [0/0] {11} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {10} ¦ ¦--'(': ( [0/2] {12} ¦ ¦--COMMENT: # com [1/2] {13} - ¦ ¦--expr: [1/0] {15} + ¦ ¦--expr: am [1/0] {15} ¦ ¦ °--SYMBOL: am [0/0] {14} ¦ °--')': ) [1/0] {16} - ¦--expr: [2/0] {17} - ¦ ¦--expr: [0/0] {19} + ¦--expr: call( [2/0] {17} + ¦ ¦--expr: call [0/0] {19} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {18} ¦ ¦--'(': ( [0/0] {20} - ¦ ¦--expr: [0/1] {22} + ¦ ¦--expr: am [0/1] {22} ¦ ¦ °--SYMBOL: am [0/0] {21} ¦ ¦--COMMENT: # com [0/0] {23} ¦ °--')': ) [1/0] {24} - ¦--expr: [2/0] {25} - ¦ ¦--expr: [0/0] {27} + ¦--expr: call( [2/0] {25} + ¦ ¦--expr: call [0/0] {27} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {26} ¦ ¦--'(': ( [0/0] {28} - ¦ ¦--expr: [0/0] {30} + ¦ ¦--expr: am [0/0] {30} ¦ ¦ °--SYMBOL: am [0/0] {29} ¦ ¦--',': , [0/1] {31} ¦ ¦--COMMENT: # com [0/2] {32} - ¦ ¦--expr: [1/0] {34} + ¦ ¦--expr: pm [1/0] {34} ¦ ¦ °--SYMBOL: pm [0/0] {33} ¦ °--')': ) [1/0] {35} - ¦--expr: [3/0] {36} - ¦ ¦--expr: [0/0] {38} + ¦--expr: call( [3/0] {36} + ¦ ¦--expr: call [0/0] {38} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {37} ¦ ¦--'(': ( [0/0] {39} - ¦ ¦--expr: [0/0] {41} + ¦ ¦--expr: b [0/0] {41} ¦ ¦ °--SYMBOL: b [0/0] {40} ¦ °--')': ) [1/0] {42} - ¦--expr: [2/0] {43} - ¦ ¦--expr: [0/0] {45} + ¦--expr: call( [2/0] {43} + ¦ ¦--expr: call [0/0] {45} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {44} ¦ ¦--'(': ( [0/2] {46} - ¦ ¦--expr: [1/0] {48} + ¦ ¦--expr: a [1/0] {48} ¦ ¦ °--SYMBOL: a [0/0] {47} ¦ °--')': ) [1/0] {49} - ¦--expr: [2/0] {50} - ¦ ¦--expr: [0/0] {52} + ¦--expr: call( [2/0] {50} + ¦ ¦--expr: call [0/0] {52} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {51} ¦ ¦--'(': ( [0/2] {53} - ¦ ¦--expr: [1/1] {55} + ¦ ¦--expr: a [1/1] {55} ¦ ¦ °--SYMBOL: a [0/0] {54} ¦ ¦--COMMENT: # b [0/0] {56} ¦ °--')': ) [1/0] {57} - ¦--expr: [2/0] {58} - ¦ ¦--expr: [0/0] {60} + ¦--expr: call( [2/0] {58} + ¦ ¦--expr: call [0/0] {60} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {59} ¦ ¦--'(': ( [0/1] {61} ¦ ¦--COMMENT: # [0/0] {62} ¦ °--')': ) [1/0] {63} - ¦--expr: [2/0] {64} - ¦ ¦--expr: [0/0] {66} + ¦--expr: call( [2/0] {64} + ¦ ¦--expr: call [0/0] {66} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {65} ¦ ¦--'(': ( [0/0] {67} - ¦ ¦--expr: [0/1] {69} + ¦ ¦--expr: a [0/1] {69} ¦ ¦ °--SYMBOL: a [0/0] {68} ¦ ¦--COMMENT: # b [0/0] {70} ¦ °--')': ) [1/0] {71} - °--expr: [1/0] {72} - ¦--expr: [0/0] {74} + °--expr: call( [1/0] {72} + ¦--expr: call [0/0] {74} ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {73} ¦--'(': ( [0/0] {75} ¦--COMMENT: # b [0/2] {76} - ¦--expr: [1/0] {78} + ¦--expr: a [1/0] {78} ¦ °--SYMBOL: a [0/0] {77} °--')': ) [0/0] {79} diff --git a/tests/testthat/line_breaks_fun_call/named_arguments-in.R b/tests/testthat/line_breaks_fun_call/named_arguments-in.R new file mode 100644 index 000000000..f9c70a872 --- /dev/null +++ b/tests/testthat/line_breaks_fun_call/named_arguments-in.R @@ -0,0 +1,18 @@ +call(3, + b = 2, c +) + +gs(3, b = 2, + c) + +call(3, b = 2, c) + +map(data, fun, + x = 3, z = 33) + +map2(dat1, data2, fun, x, y, + z) + +map2(dat1, data2, fun, x = 1, y = 2, + z +) diff --git a/tests/testthat/line_breaks_fun_call/named_arguments-in_tree b/tests/testthat/line_breaks_fun_call/named_arguments-in_tree new file mode 100644 index 000000000..0d1a9212a --- /dev/null +++ b/tests/testthat/line_breaks_fun_call/named_arguments-in_tree @@ -0,0 +1,114 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: call( [0/0] {1} + ¦ ¦--expr: call [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: 3 [0/0] {6} + ¦ ¦ °--NUM_CONST: 3 [0/0] {5} + ¦ ¦--',': , [0/5] {7} + ¦ ¦--SYMBOL_SUB: b [1/1] {8} + ¦ ¦--EQ_SUB: = [0/1] {9} + ¦ ¦--expr: 2 [0/0] {11} + ¦ ¦ °--NUM_CONST: 2 [0/0] {10} + ¦ ¦--',': , [0/1] {12} + ¦ ¦--expr: c [0/0] {14} + ¦ ¦ °--SYMBOL: c [0/0] {13} + ¦ °--')': ) [1/0] {15} + ¦--expr: gs(3, [2/0] {16} + ¦ ¦--expr: gs [0/0] {18} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: gs [0/0] {17} + ¦ ¦--'(': ( [0/0] {19} + ¦ ¦--expr: 3 [0/0] {21} + ¦ ¦ °--NUM_CONST: 3 [0/0] {20} + ¦ ¦--',': , [0/1] {22} + ¦ ¦--SYMBOL_SUB: b [0/1] {23} + ¦ ¦--EQ_SUB: = [0/1] {24} + ¦ ¦--expr: 2 [0/0] {26} + ¦ ¦ °--NUM_CONST: 2 [0/0] {25} + ¦ ¦--',': , [0/3] {27} + ¦ ¦--expr: c [1/0] {29} + ¦ ¦ °--SYMBOL: c [0/0] {28} + ¦ °--')': ) [0/0] {30} + ¦--expr: call( [2/0] {31} + ¦ ¦--expr: call [0/0] {33} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {32} + ¦ ¦--'(': ( [0/0] {34} + ¦ ¦--expr: 3 [0/0] {36} + ¦ ¦ °--NUM_CONST: 3 [0/0] {35} + ¦ ¦--',': , [0/1] {37} + ¦ ¦--SYMBOL_SUB: b [0/1] {38} + ¦ ¦--EQ_SUB: = [0/1] {39} + ¦ ¦--expr: 2 [0/0] {41} + ¦ ¦ °--NUM_CONST: 2 [0/0] {40} + ¦ ¦--',': , [0/1] {42} + ¦ ¦--expr: c [0/0] {44} + ¦ ¦ °--SYMBOL: c [0/0] {43} + ¦ °--')': ) [0/0] {45} + ¦--expr: map(d [2/0] {46} + ¦ ¦--expr: map [0/0] {48} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: map [0/0] {47} + ¦ ¦--'(': ( [0/0] {49} + ¦ ¦--expr: data [0/0] {51} + ¦ ¦ °--SYMBOL: data [0/0] {50} + ¦ ¦--',': , [0/1] {52} + ¦ ¦--expr: fun [0/0] {54} + ¦ ¦ °--SYMBOL: fun [0/0] {53} + ¦ ¦--',': , [0/4] {55} + ¦ ¦--SYMBOL_SUB: x [1/1] {56} + ¦ ¦--EQ_SUB: = [0/1] {57} + ¦ ¦--expr: 3 [0/0] {59} + ¦ ¦ °--NUM_CONST: 3 [0/0] {58} + ¦ ¦--',': , [0/1] {60} + ¦ ¦--SYMBOL_SUB: z [0/1] {61} + ¦ ¦--EQ_SUB: = [0/1] {62} + ¦ ¦--expr: 33 [0/0] {64} + ¦ ¦ °--NUM_CONST: 33 [0/0] {63} + ¦ °--')': ) [0/0] {65} + ¦--expr: map2( [2/0] {66} + ¦ ¦--expr: map2 [0/0] {68} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: map2 [0/0] {67} + ¦ ¦--'(': ( [0/0] {69} + ¦ ¦--expr: dat1 [0/0] {71} + ¦ ¦ °--SYMBOL: dat1 [0/0] {70} + ¦ ¦--',': , [0/1] {72} + ¦ ¦--expr: data2 [0/0] {74} + ¦ ¦ °--SYMBOL: data2 [0/0] {73} + ¦ ¦--',': , [0/1] {75} + ¦ ¦--expr: fun [0/0] {77} + ¦ ¦ °--SYMBOL: fun [0/0] {76} + ¦ ¦--',': , [0/1] {78} + ¦ ¦--expr: x [0/0] {80} + ¦ ¦ °--SYMBOL: x [0/0] {79} + ¦ ¦--',': , [0/1] {81} + ¦ ¦--expr: y [0/0] {83} + ¦ ¦ °--SYMBOL: y [0/0] {82} + ¦ ¦--',': , [0/5] {84} + ¦ ¦--expr: z [1/0] {86} + ¦ ¦ °--SYMBOL: z [0/0] {85} + ¦ °--')': ) [0/0] {87} + °--expr: map2( [2/0] {88} + ¦--expr: map2 [0/0] {90} + ¦ °--SYMBOL_FUNCTION_CALL: map2 [0/0] {89} + ¦--'(': ( [0/0] {91} + ¦--expr: dat1 [0/0] {93} + ¦ °--SYMBOL: dat1 [0/0] {92} + ¦--',': , [0/1] {94} + ¦--expr: data2 [0/0] {96} + ¦ °--SYMBOL: data2 [0/0] {95} + ¦--',': , [0/1] {97} + ¦--expr: fun [0/0] {99} + ¦ °--SYMBOL: fun [0/0] {98} + ¦--',': , [0/1] {100} + ¦--SYMBOL_SUB: x [0/1] {101} + ¦--EQ_SUB: = [0/1] {102} + ¦--expr: 1 [0/0] {104} + ¦ °--NUM_CONST: 1 [0/0] {103} + ¦--',': , [0/1] {105} + ¦--SYMBOL_SUB: y [0/1] {106} + ¦--EQ_SUB: = [0/1] {107} + ¦--expr: 2 [0/0] {109} + ¦ °--NUM_CONST: 2 [0/0] {108} + ¦--',': , [0/2] {110} + ¦--expr: z [1/0] {112} + ¦ °--SYMBOL: z [0/0] {111} + °--')': ) [1/0] {113} diff --git a/tests/testthat/line_breaks_fun_call/named_arguments-out.R b/tests/testthat/line_breaks_fun_call/named_arguments-out.R new file mode 100644 index 000000000..485e4d609 --- /dev/null +++ b/tests/testthat/line_breaks_fun_call/named_arguments-out.R @@ -0,0 +1,24 @@ +call(3, + b = 2, c +) + +gs(3, + b = 2, + c +) + +call(3, b = 2, c) + +map(data, fun, + x = 3, z = 33 +) + +map2( + dat1, data2, fun, x, y, + z +) + +map2(dat1, data2, fun, + x = 1, y = 2, + z +) diff --git a/tests/testthat/line_breaks_fun_call/switch_ifelse_etc_no_line_break-in.R b/tests/testthat/line_breaks_fun_call/switch_ifelse_etc_no_line_break-in.R index f64954e3c..db6ecd18f 100644 --- a/tests/testthat/line_breaks_fun_call/switch_ifelse_etc_no_line_break-in.R +++ b/tests/testthat/line_breaks_fun_call/switch_ifelse_etc_no_line_break-in.R @@ -3,12 +3,45 @@ call( 3 ) -switch(abc, - wei9 +switch( + x, + a = 2, + y = 3 ) -switch(abc, - wei9 + +switch( # + x, + a = 2, + y = 3 +) + + + +switch( + x, + a = 2, # + + + y = 3 +) + + +switch( + x,a = 2, + y = 3 +) + +switch(x,a = 2, + y = 3 +) + +switch(x,a = 2, y = 3) + +switch(x,a = 2, y = 3 +) # + +switch(x,a = 2, y = 3 # ) if_else(a, @@ -18,3 +51,25 @@ if_else(a, ifelse(x, y, z ) + + +# namespacing +base::switch(f, + x = 2, + y = 3 +) + +base::switch( + f, + x = 2, + y = 3 +) + +dplyr::ifelse(x, + 1, 32 +) + +dplyr::ifelse( + x, + 1, 32 +) diff --git a/tests/testthat/line_breaks_fun_call/switch_ifelse_etc_no_line_break-in_tree b/tests/testthat/line_breaks_fun_call/switch_ifelse_etc_no_line_break-in_tree index 7912c96f4..995abb3c5 100644 --- a/tests/testthat/line_breaks_fun_call/switch_ifelse_etc_no_line_break-in_tree +++ b/tests/testthat/line_breaks_fun_call/switch_ifelse_etc_no_line_break-in_tree @@ -1,57 +1,246 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/0] {3} + ¦--expr: call( [0/0] {1} + ¦ ¦--expr: call [0/0] {3} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} ¦ ¦--'(': ( [0/2] {4} - ¦ ¦--expr: [1/0] {6} + ¦ ¦--expr: 2 [1/0] {6} ¦ ¦ °--NUM_CONST: 2 [0/0] {5} ¦ ¦--',': , [0/2] {7} - ¦ ¦--expr: [1/0] {9} + ¦ ¦--expr: 3 [1/0] {9} ¦ ¦ °--NUM_CONST: 3 [0/0] {8} ¦ °--')': ) [1/0] {10} - ¦--expr: [2/0] {11} - ¦ ¦--expr: [0/0] {13} + ¦--expr: switc [2/0] {11} + ¦ ¦--expr: switc [0/0] {13} ¦ ¦ °--SYMBOL_FUNCTION_CALL: switc [0/0] {12} - ¦ ¦--'(': ( [0/0] {14} - ¦ ¦--expr: [0/0] {16} - ¦ ¦ °--SYMBOL: abc [0/0] {15} + ¦ ¦--'(': ( [0/2] {14} + ¦ ¦--expr: x [1/0] {16} + ¦ ¦ °--SYMBOL: x [0/0] {15} ¦ ¦--',': , [0/2] {17} - ¦ ¦--expr: [1/0] {19} - ¦ ¦ °--SYMBOL: wei9 [0/0] {18} - ¦ °--')': ) [1/0] {20} - ¦--expr: [2/0] {21} - ¦ ¦--expr: [0/0] {23} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: switc [0/0] {22} - ¦ ¦--'(': ( [0/0] {24} - ¦ ¦--expr: [0/0] {26} - ¦ ¦ °--SYMBOL: abc [0/0] {25} - ¦ ¦--',': , [0/2] {27} - ¦ ¦--expr: [1/0] {29} - ¦ ¦ °--SYMBOL: wei9 [0/0] {28} - ¦ °--')': ) [1/0] {30} - ¦--expr: [2/0] {31} - ¦ ¦--expr: [0/0] {33} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: if_el [0/0] {32} - ¦ ¦--'(': ( [0/0] {34} - ¦ ¦--expr: [0/0] {36} - ¦ ¦ °--SYMBOL: a [0/0] {35} - ¦ ¦--',': , [0/2] {37} - ¦ ¦--expr: [1/0] {39} - ¦ ¦ °--SYMBOL: c [0/0] {38} - ¦ ¦--',': , [0/1] {40} - ¦ ¦--expr: [0/0] {42} - ¦ ¦ °--SYMBOL: v [0/0] {41} - ¦ °--')': ) [1/0] {43} - °--expr: [2/0] {44} - ¦--expr: [0/0] {46} - ¦ °--SYMBOL_FUNCTION_CALL: ifels [0/0] {45} - ¦--'(': ( [0/0] {47} - ¦--expr: [0/0] {49} - ¦ °--SYMBOL: x [0/0] {48} - ¦--',': , [0/2] {50} - ¦--expr: [1/0] {52} - ¦ °--SYMBOL: y [0/0] {51} - ¦--',': , [0/1] {53} - ¦--expr: [0/0] {55} - ¦ °--SYMBOL: z [0/0] {54} - °--')': ) [1/0] {56} + ¦ ¦--SYMBOL_SUB: a [1/1] {18} + ¦ ¦--EQ_SUB: = [0/1] {19} + ¦ ¦--expr: 2 [0/0] {21} + ¦ ¦ °--NUM_CONST: 2 [0/0] {20} + ¦ ¦--',': , [0/2] {22} + ¦ ¦--SYMBOL_SUB: y [1/1] {23} + ¦ ¦--EQ_SUB: = [0/1] {24} + ¦ ¦--expr: 3 [0/0] {26} + ¦ ¦ °--NUM_CONST: 3 [0/0] {25} + ¦ °--')': ) [1/0] {27} + ¦--expr: switc [3/0] {28} + ¦ ¦--expr: switc [0/0] {30} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: switc [0/0] {29} + ¦ ¦--'(': ( [0/1] {31} + ¦ ¦--COMMENT: # [0/2] {32} + ¦ ¦--expr: x [1/0] {34} + ¦ ¦ °--SYMBOL: x [0/0] {33} + ¦ ¦--',': , [0/2] {35} + ¦ ¦--SYMBOL_SUB: a [1/1] {36} + ¦ ¦--EQ_SUB: = [0/1] {37} + ¦ ¦--expr: 2 [0/0] {39} + ¦ ¦ °--NUM_CONST: 2 [0/0] {38} + ¦ ¦--',': , [0/2] {40} + ¦ ¦--SYMBOL_SUB: y [1/1] {41} + ¦ ¦--EQ_SUB: = [0/1] {42} + ¦ ¦--expr: 3 [0/0] {44} + ¦ ¦ °--NUM_CONST: 3 [0/0] {43} + ¦ °--')': ) [1/0] {45} + ¦--expr: switc [4/0] {46} + ¦ ¦--expr: switc [0/0] {48} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: switc [0/0] {47} + ¦ ¦--'(': ( [0/2] {49} + ¦ ¦--expr: x [1/0] {51} + ¦ ¦ °--SYMBOL: x [0/0] {50} + ¦ ¦--',': , [0/2] {52} + ¦ ¦--SYMBOL_SUB: a [1/1] {53} + ¦ ¦--EQ_SUB: = [0/1] {54} + ¦ ¦--expr: 2 [0/0] {56} + ¦ ¦ °--NUM_CONST: 2 [0/0] {55} + ¦ ¦--',': , [0/1] {57} + ¦ ¦--COMMENT: # [0/2] {58} + ¦ ¦--SYMBOL_SUB: y [3/1] {59} + ¦ ¦--EQ_SUB: = [0/1] {60} + ¦ ¦--expr: 3 [0/0] {62} + ¦ ¦ °--NUM_CONST: 3 [0/0] {61} + ¦ °--')': ) [1/0] {63} + ¦--expr: switc [3/0] {64} + ¦ ¦--expr: switc [0/0] {66} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: switc [0/0] {65} + ¦ ¦--'(': ( [0/7] {67} + ¦ ¦--expr: x [1/0] {69} + ¦ ¦ °--SYMBOL: x [0/0] {68} + ¦ ¦--',': , [0/0] {70} + ¦ ¦--SYMBOL_SUB: a [0/1] {71} + ¦ ¦--EQ_SUB: = [0/1] {72} + ¦ ¦--expr: 2 [0/0] {74} + ¦ ¦ °--NUM_CONST: 2 [0/0] {73} + ¦ ¦--',': , [0/7] {75} + ¦ ¦--SYMBOL_SUB: y [1/1] {76} + ¦ ¦--EQ_SUB: = [0/1] {77} + ¦ ¦--expr: 3 [0/0] {79} + ¦ ¦ °--NUM_CONST: 3 [0/0] {78} + ¦ °--')': ) [1/0] {80} + ¦--expr: switc [2/0] {81} + ¦ ¦--expr: switc [0/0] {83} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: switc [0/0] {82} + ¦ ¦--'(': ( [0/0] {84} + ¦ ¦--expr: x [0/0] {86} + ¦ ¦ °--SYMBOL: x [0/0] {85} + ¦ ¦--',': , [0/0] {87} + ¦ ¦--SYMBOL_SUB: a [0/1] {88} + ¦ ¦--EQ_SUB: = [0/1] {89} + ¦ ¦--expr: 2 [0/0] {91} + ¦ ¦ °--NUM_CONST: 2 [0/0] {90} + ¦ ¦--',': , [0/2] {92} + ¦ ¦--SYMBOL_SUB: y [1/1] {93} + ¦ ¦--EQ_SUB: = [0/1] {94} + ¦ ¦--expr: 3 [0/0] {96} + ¦ ¦ °--NUM_CONST: 3 [0/0] {95} + ¦ °--')': ) [1/0] {97} + ¦--expr: switc [2/0] {98} + ¦ ¦--expr: switc [0/0] {100} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: switc [0/0] {99} + ¦ ¦--'(': ( [0/0] {101} + ¦ ¦--expr: x [0/0] {103} + ¦ ¦ °--SYMBOL: x [0/0] {102} + ¦ ¦--',': , [0/0] {104} + ¦ ¦--SYMBOL_SUB: a [0/1] {105} + ¦ ¦--EQ_SUB: = [0/1] {106} + ¦ ¦--expr: 2 [0/0] {108} + ¦ ¦ °--NUM_CONST: 2 [0/0] {107} + ¦ ¦--',': , [0/1] {109} + ¦ ¦--SYMBOL_SUB: y [0/1] {110} + ¦ ¦--EQ_SUB: = [0/1] {111} + ¦ ¦--expr: 3 [0/0] {113} + ¦ ¦ °--NUM_CONST: 3 [0/0] {112} + ¦ °--')': ) [0/0] {114} + ¦--expr: switc [2/1] {115} + ¦ ¦--expr: switc [0/0] {117} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: switc [0/0] {116} + ¦ ¦--'(': ( [0/0] {118} + ¦ ¦--expr: x [0/0] {120} + ¦ ¦ °--SYMBOL: x [0/0] {119} + ¦ ¦--',': , [0/0] {121} + ¦ ¦--SYMBOL_SUB: a [0/1] {122} + ¦ ¦--EQ_SUB: = [0/1] {123} + ¦ ¦--expr: 2 [0/0] {125} + ¦ ¦ °--NUM_CONST: 2 [0/0] {124} + ¦ ¦--',': , [0/1] {126} + ¦ ¦--SYMBOL_SUB: y [0/1] {127} + ¦ ¦--EQ_SUB: = [0/1] {128} + ¦ ¦--expr: 3 [0/0] {130} + ¦ ¦ °--NUM_CONST: 3 [0/0] {129} + ¦ °--')': ) [1/0] {131} + ¦--COMMENT: # [0/0] {132} + ¦--expr: switc [2/0] {133} + ¦ ¦--expr: switc [0/0] {135} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: switc [0/0] {134} + ¦ ¦--'(': ( [0/0] {136} + ¦ ¦--expr: x [0/0] {138} + ¦ ¦ °--SYMBOL: x [0/0] {137} + ¦ ¦--',': , [0/0] {139} + ¦ ¦--SYMBOL_SUB: a [0/1] {140} + ¦ ¦--EQ_SUB: = [0/1] {141} + ¦ ¦--expr: 2 [0/0] {143} + ¦ ¦ °--NUM_CONST: 2 [0/0] {142} + ¦ ¦--',': , [0/1] {144} + ¦ ¦--SYMBOL_SUB: y [0/1] {145} + ¦ ¦--EQ_SUB: = [0/1] {146} + ¦ ¦--expr: 3 [0/1] {148} + ¦ ¦ °--NUM_CONST: 3 [0/0] {147} + ¦ ¦--COMMENT: # [0/0] {149} + ¦ °--')': ) [1/0] {150} + ¦--expr: if_el [2/0] {151} + ¦ ¦--expr: if_el [0/0] {153} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: if_el [0/0] {152} + ¦ ¦--'(': ( [0/0] {154} + ¦ ¦--expr: a [0/0] {156} + ¦ ¦ °--SYMBOL: a [0/0] {155} + ¦ ¦--',': , [0/2] {157} + ¦ ¦--expr: c [1/0] {159} + ¦ ¦ °--SYMBOL: c [0/0] {158} + ¦ ¦--',': , [0/1] {160} + ¦ ¦--expr: v [0/0] {162} + ¦ ¦ °--SYMBOL: v [0/0] {161} + ¦ °--')': ) [1/0] {163} + ¦--expr: ifels [2/0] {164} + ¦ ¦--expr: ifels [0/0] {166} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: ifels [0/0] {165} + ¦ ¦--'(': ( [0/0] {167} + ¦ ¦--expr: x [0/0] {169} + ¦ ¦ °--SYMBOL: x [0/0] {168} + ¦ ¦--',': , [0/2] {170} + ¦ ¦--expr: y [1/0] {172} + ¦ ¦ °--SYMBOL: y [0/0] {171} + ¦ ¦--',': , [0/1] {173} + ¦ ¦--expr: z [0/0] {175} + ¦ ¦ °--SYMBOL: z [0/0] {174} + ¦ °--')': ) [1/0] {176} + ¦--COMMENT: # nam [3/0] {177} + ¦--expr: base: [1/0] {178} + ¦ ¦--expr: base: [0/0] {179} + ¦ ¦ ¦--SYMBOL_PACKAGE: base [0/0] {180} + ¦ ¦ ¦--NS_GET: :: [0/0] {181} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: switc [0/0] {182} + ¦ ¦--'(': ( [0/0] {183} + ¦ ¦--expr: f [0/0] {185} + ¦ ¦ °--SYMBOL: f [0/0] {184} + ¦ ¦--',': , [0/13] {186} + ¦ ¦--SYMBOL_SUB: x [1/1] {187} + ¦ ¦--EQ_SUB: = [0/1] {188} + ¦ ¦--expr: 2 [0/0] {190} + ¦ ¦ °--NUM_CONST: 2 [0/0] {189} + ¦ ¦--',': , [0/13] {191} + ¦ ¦--SYMBOL_SUB: y [1/1] {192} + ¦ ¦--EQ_SUB: = [0/1] {193} + ¦ ¦--expr: 3 [0/0] {195} + ¦ ¦ °--NUM_CONST: 3 [0/0] {194} + ¦ °--')': ) [1/0] {196} + ¦--expr: base: [2/0] {197} + ¦ ¦--expr: base: [0/0] {198} + ¦ ¦ ¦--SYMBOL_PACKAGE: base [0/0] {199} + ¦ ¦ ¦--NS_GET: :: [0/0] {200} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: switc [0/0] {201} + ¦ ¦--'(': ( [0/2] {202} + ¦ ¦--expr: f [1/0] {204} + ¦ ¦ °--SYMBOL: f [0/0] {203} + ¦ ¦--',': , [0/13] {205} + ¦ ¦--SYMBOL_SUB: x [1/1] {206} + ¦ ¦--EQ_SUB: = [0/1] {207} + ¦ ¦--expr: 2 [0/0] {209} + ¦ ¦ °--NUM_CONST: 2 [0/0] {208} + ¦ ¦--',': , [0/13] {210} + ¦ ¦--SYMBOL_SUB: y [1/1] {211} + ¦ ¦--EQ_SUB: = [0/1] {212} + ¦ ¦--expr: 3 [0/0] {214} + ¦ ¦ °--NUM_CONST: 3 [0/0] {213} + ¦ °--')': ) [1/0] {215} + ¦--expr: dplyr [2/0] {216} + ¦ ¦--expr: dplyr [0/0] {217} + ¦ ¦ ¦--SYMBOL_PACKAGE: dplyr [0/0] {218} + ¦ ¦ ¦--NS_GET: :: [0/0] {219} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: ifels [0/0] {220} + ¦ ¦--'(': ( [0/0] {221} + ¦ ¦--expr: x [0/0] {223} + ¦ ¦ °--SYMBOL: x [0/0] {222} + ¦ ¦--',': , [0/14] {224} + ¦ ¦--expr: 1 [1/0] {226} + ¦ ¦ °--NUM_CONST: 1 [0/0] {225} + ¦ ¦--',': , [0/1] {227} + ¦ ¦--expr: 32 [0/0] {229} + ¦ ¦ °--NUM_CONST: 32 [0/0] {228} + ¦ °--')': ) [1/0] {230} + °--expr: dplyr [2/0] {231} + ¦--expr: dplyr [0/0] {232} + ¦ ¦--SYMBOL_PACKAGE: dplyr [0/0] {233} + ¦ ¦--NS_GET: :: [0/0] {234} + ¦ °--SYMBOL_FUNCTION_CALL: ifels [0/0] {235} + ¦--'(': ( [0/2] {236} + ¦--expr: x [1/0] {238} + ¦ °--SYMBOL: x [0/0] {237} + ¦--',': , [0/14] {239} + ¦--expr: 1 [1/0] {241} + ¦ °--NUM_CONST: 1 [0/0] {240} + ¦--',': , [0/1] {242} + ¦--expr: 32 [0/0] {244} + ¦ °--NUM_CONST: 32 [0/0] {243} + °--')': ) [1/0] {245} diff --git a/tests/testthat/line_breaks_fun_call/switch_ifelse_etc_no_line_break-out.R b/tests/testthat/line_breaks_fun_call/switch_ifelse_etc_no_line_break-out.R index f64954e3c..e6cfeee64 100644 --- a/tests/testthat/line_breaks_fun_call/switch_ifelse_etc_no_line_break-out.R +++ b/tests/testthat/line_breaks_fun_call/switch_ifelse_etc_no_line_break-out.R @@ -3,12 +3,49 @@ call( 3 ) -switch(abc, - wei9 +switch(x, + a = 2, + y = 3 ) -switch(abc, - wei9 + +switch( # + x, + a = 2, + y = 3 +) + + + +switch(x, + a = 2, # + y = 3 +) + + +switch(x, + a = 2, + y = 3 +) + +switch(x, + a = 2, + y = 3 +) + +switch(x, + a = 2, + y = 3 +) + +switch(x, + a = 2, + y = 3 +) # + +switch(x, + a = 2, + y = 3 # ) if_else(a, @@ -18,3 +55,24 @@ if_else(a, ifelse(x, y, z ) + + +# namespacing +base::switch(f, + x = 2, + y = 3 +) + +base::switch(f, + x = 2, + y = 3 +) + +dplyr::ifelse(x, + 1, 32 +) + +dplyr::ifelse( + x, + 1, 32 +) diff --git a/tests/testthat/line_breaks_fun_call/token_dependent_comments-in_tree b/tests/testthat/line_breaks_fun_call/token_dependent_comments-in_tree index 0a20815a8..519525676 100644 --- a/tests/testthat/line_breaks_fun_call/token_dependent_comments-in_tree +++ b/tests/testthat/line_breaks_fun_call/token_dependent_comments-in_tree @@ -1,33 +1,33 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/0] {3} + ¦--expr: call( [0/0] {1} + ¦ ¦--expr: call [0/0] {3} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} ¦ ¦--'(': ( [0/0] {4} - ¦ ¦--expr: [0/0] {5} - ¦ ¦ ¦--expr: [0/0] {7} + ¦ ¦--expr: call( [0/0] {5} + ¦ ¦ ¦--expr: call [0/0] {7} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {6} ¦ ¦ ¦--'(': ( [0/6] {8} ¦ ¦ ¦--COMMENT: # com [0/0] {9} - ¦ ¦ ¦--expr: [1/3] {11} + ¦ ¦ ¦--expr: 3 [1/3] {11} ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {10} ¦ ¦ ¦--',': , [0/1] {12} - ¦ ¦ ¦--expr: [0/2] {14} + ¦ ¦ ¦--expr: 4 [0/2] {14} ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {13} ¦ ¦ °--')': ) [1/0] {15} ¦ °--')': ) [0/0] {16} - °--expr: [2/0] {17} - ¦--expr: [0/0] {19} + °--expr: call( [2/0] {17} + ¦--expr: call [0/0] {19} ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {18} ¦--'(': ( [0/3] {20} - ¦--expr: [0/0] {21} - ¦ ¦--expr: [0/0] {23} + ¦--expr: call( [0/0] {21} + ¦ ¦--expr: call [0/0] {23} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {22} ¦ ¦--'(': ( [0/0] {24} - ¦ ¦--expr: [0/0] {26} + ¦ ¦--expr: 1 [0/0] {26} ¦ ¦ °--NUM_CONST: 1 [0/0] {25} ¦ ¦--',': , [0/1] {27} ¦ ¦--COMMENT: # com [0/4] {28} - ¦ ¦--expr: [1/0] {30} + ¦ ¦--expr: 3 [1/0] {30} ¦ ¦ °--NUM_CONST: 3 [0/0] {29} ¦ °--')': ) [1/0] {31} °--')': ) [0/0] {32} diff --git a/tests/testthat/line_breaks_fun_call/token_dependent_complex_non_strict-in_tree b/tests/testthat/line_breaks_fun_call/token_dependent_complex_non_strict-in_tree index 76e7330c7..40de220ba 100644 --- a/tests/testthat/line_breaks_fun_call/token_dependent_complex_non_strict-in_tree +++ b/tests/testthat/line_breaks_fun_call/token_dependent_complex_non_strict-in_tree @@ -1,135 +1,138 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/0] {3} + ¦--expr: call( [0/0] {1} + ¦ ¦--expr: call [0/0] {3} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} ¦ ¦--'(': ( [0/0] {4} - ¦ ¦--expr: [0/0] {5} - ¦ ¦ ¦--expr: [0/0] {7} + ¦ ¦--expr: call( [0/0] {5} + ¦ ¦ ¦--expr: call [0/0] {7} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {6} ¦ ¦ ¦--'(': ( [0/2] {8} - ¦ ¦ ¦--expr: [1/0] {10} + ¦ ¦ ¦--expr: 2 [1/0] {10} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {9} ¦ ¦ °--')': ) [1/0] {11} ¦ °--')': ) [0/0] {12} - ¦--expr: [2/0] {13} - ¦ ¦--expr: [0/0] {15} + ¦--expr: call( [2/0] {13} + ¦ ¦--expr: call [0/0] {15} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {14} ¦ ¦--'(': ( [0/0] {16} - ¦ ¦--expr: [0/0] {17} - ¦ ¦ ¦--expr: [0/0] {19} + ¦ ¦--expr: call( [0/0] {17} + ¦ ¦ ¦--expr: call [0/0] {19} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {18} ¦ ¦ ¦--'(': ( [0/0] {20} - ¦ ¦ ¦--expr: [0/0] {22} + ¦ ¦ ¦--expr: 1 [0/0] {22} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {21} ¦ ¦ ¦--',': , [0/10] {23} - ¦ ¦ ¦--expr: [1/0] {25} + ¦ ¦ ¦--expr: 2 [1/0] {25} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {24} ¦ ¦ °--')': ) [0/0] {26} ¦ °--')': ) [0/0] {27} ¦--COMMENT: # mul [1/0] {28} - ¦--expr: [1/0] {29} - ¦ ¦--expr: [0/0] {31} + ¦--expr: call( [1/0] {29} + ¦ ¦--expr: call [0/0] {31} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {30} ¦ ¦--'(': ( [0/0] {32} - ¦ ¦--expr: [0/0] {33} - ¦ ¦ ¦--expr: [0/0] {35} + ¦ ¦--expr: a(b(c [0/0] {33} + ¦ ¦ ¦--expr: a [0/0] {35} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: a [0/0] {34} ¦ ¦ ¦--'(': ( [0/0] {36} - ¦ ¦ ¦--expr: [0/0] {37} - ¦ ¦ ¦ ¦--expr: [0/0] {39} + ¦ ¦ ¦--expr: b(c({ [0/0] {37} + ¦ ¦ ¦ ¦--expr: b [0/0] {39} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {38} ¦ ¦ ¦ ¦--'(': ( [0/0] {40} - ¦ ¦ ¦ ¦--expr: [0/0] {41} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {43} + ¦ ¦ ¦ ¦--expr: c({ +} [0/0] {41} + ¦ ¦ ¦ ¦ ¦--expr: c [0/0] {43} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {42} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {44} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {45} + ¦ ¦ ¦ ¦ ¦--expr: { +} [0/0] {45} ¦ ¦ ¦ ¦ ¦ ¦--'{': { [0/0] {46} ¦ ¦ ¦ ¦ ¦ °--'}': } [1/0] {47} ¦ ¦ ¦ ¦ °--')': ) [0/0] {48} ¦ ¦ ¦ °--')': ) [0/0] {49} ¦ ¦ °--')': ) [0/0] {50} ¦ °--')': ) [0/0] {51} - ¦--expr: [2/0] {52} - ¦ ¦--expr: [0/0] {54} + ¦--expr: call( [2/0] {52} + ¦ ¦--expr: call [0/0] {54} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {53} ¦ ¦--'(': ( [0/0] {55} - ¦ ¦--expr: [0/1] {56} - ¦ ¦ ¦--expr: [0/0] {58} + ¦ ¦--expr: call( [0/1] {56} + ¦ ¦ ¦--expr: call [0/0] {58} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {57} ¦ ¦ ¦--'(': ( [0/2] {59} - ¦ ¦ ¦--expr: [1/1] {61} + ¦ ¦ ¦--expr: 2 [1/1] {61} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {60} ¦ ¦ °--')': ) [0/0] {62} ¦ ¦--',': , [0/2] {63} - ¦ ¦--expr: [1/1] {65} + ¦ ¦--expr: 5 [1/1] {65} ¦ ¦ °--NUM_CONST: 5 [0/0] {64} ¦ °--')': ) [0/0] {66} - ¦--expr: [3/0] {67} - ¦ ¦--expr: [0/0] {69} + ¦--expr: call( [3/0] {67} + ¦ ¦--expr: call [0/0] {69} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {68} ¦ ¦--'(': ( [0/0] {70} - ¦ ¦--expr: [0/0] {71} - ¦ ¦ ¦--expr: [0/0] {73} + ¦ ¦--expr: call( [0/0] {71} + ¦ ¦ ¦--expr: call [0/0] {73} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {72} ¦ ¦ ¦--'(': ( [0/0] {74} - ¦ ¦ ¦--expr: [0/0] {76} + ¦ ¦ ¦--expr: 1 [0/0] {76} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {75} ¦ ¦ ¦--',': , [0/10] {77} - ¦ ¦ ¦--expr: [1/0] {79} + ¦ ¦ ¦--expr: 2 [1/0] {79} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {78} ¦ ¦ ¦--',': , [0/1] {80} - ¦ ¦ ¦--expr: [0/0] {81} - ¦ ¦ ¦ ¦--expr: [0/0] {83} + ¦ ¦ ¦--expr: c( + [0/0] {81} + ¦ ¦ ¦ ¦--expr: c [0/0] {83} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {82} ¦ ¦ ¦ ¦--'(': ( [0/12] {84} - ¦ ¦ ¦ ¦--expr: [1/10] {86} + ¦ ¦ ¦ ¦--expr: 3 [1/10] {86} ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {85} ¦ ¦ ¦ °--')': ) [1/0] {87} ¦ ¦ °--')': ) [0/0] {88} ¦ °--')': ) [0/0] {89} - ¦--expr: [2/0] {90} - ¦ ¦--expr: [0/0] {92} + ¦--expr: call( [2/0] {90} + ¦ ¦--expr: call [0/0] {92} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {91} ¦ ¦--'(': ( [0/0] {93} - ¦ ¦--expr: [0/0] {95} + ¦ ¦--expr: 1 [0/0] {95} ¦ ¦ °--NUM_CONST: 1 [0/0] {94} ¦ ¦--',': , [0/5] {96} - ¦ ¦--expr: [1/0] {97} - ¦ ¦ ¦--expr: [0/0] {99} + ¦ ¦--expr: call2 [1/0] {97} + ¦ ¦ ¦--expr: call2 [0/0] {99} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call2 [0/0] {98} ¦ ¦ ¦--'(': ( [0/0] {100} - ¦ ¦ ¦--expr: [0/0] {102} + ¦ ¦ ¦--expr: 3 [0/0] {102} ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {101} ¦ ¦ ¦--',': , [0/1] {103} - ¦ ¦ ¦--expr: [0/0] {105} + ¦ ¦ ¦--expr: 4 [0/0] {105} ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {104} ¦ ¦ ¦--',': , [0/1] {106} - ¦ ¦ ¦--expr: [0/5] {107} - ¦ ¦ ¦ ¦--expr: [0/0] {109} + ¦ ¦ ¦--expr: call( [0/5] {107} + ¦ ¦ ¦ ¦--expr: call [0/0] {109} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {108} ¦ ¦ ¦ ¦--'(': ( [0/0] {110} - ¦ ¦ ¦ ¦--expr: [0/0] {112} + ¦ ¦ ¦ ¦--expr: 3 [0/0] {112} ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {111} ¦ ¦ ¦ ¦--',': , [0/22] {113} - ¦ ¦ ¦ ¦--expr: [1/0] {115} + ¦ ¦ ¦ ¦--expr: 4 [1/0] {115} ¦ ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {114} ¦ ¦ ¦ ¦--',': , [0/1] {116} - ¦ ¦ ¦ ¦--expr: [0/5] {117} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {119} + ¦ ¦ ¦ ¦--expr: call( [0/5] {117} + ¦ ¦ ¦ ¦ ¦--expr: call [0/0] {119} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {118} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {120} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {122} + ¦ ¦ ¦ ¦ ¦--expr: 5 [0/0] {122} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {121} ¦ ¦ ¦ ¦ ¦--',': , [0/1] {123} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {125} + ¦ ¦ ¦ ¦ ¦--expr: 6 [0/0] {125} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 6 [0/0] {124} ¦ ¦ ¦ ¦ ¦--',': , [0/1] {126} - ¦ ¦ ¦ ¦ ¦--expr: [0/22] {127} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {129} + ¦ ¦ ¦ ¦ ¦--expr: call( [0/22] {127} + ¦ ¦ ¦ ¦ ¦ ¦--expr: call [0/0] {129} ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {128} ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/24] {130} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [1/22] {132} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 2 [1/22] {132} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {131} ¦ ¦ ¦ ¦ ¦ °--')': ) [1/0] {133} ¦ ¦ ¦ ¦ °--')': ) [1/0] {134} @@ -137,30 +140,30 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦ ¦ °--')': ) [1/0] {136} ¦ °--')': ) [1/0] {137} ¦--COMMENT: # com [2/0] {138} - ¦--expr: [2/0] {139} - ¦ ¦--expr: [0/0] {141} + ¦--expr: call( [2/0] {139} + ¦ ¦--expr: call [0/0] {141} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {140} ¦ ¦--'(': ( [0/0] {142} - ¦ ¦--expr: [0/0] {143} - ¦ ¦ ¦--expr: [0/0] {145} + ¦ ¦--expr: call( [0/0] {143} + ¦ ¦ ¦--expr: call [0/0] {145} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {144} ¦ ¦ ¦--'(': ( [0/2] {146} - ¦ ¦ ¦--expr: [1/0] {148} + ¦ ¦ ¦--expr: 2 [1/0] {148} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {147} ¦ ¦ °--')': ) [1/0] {149} ¦ °--')': ) [0/0] {150} - °--expr: [2/0] {151} - ¦--expr: [0/0] {153} + °--expr: call( [2/0] {151} + ¦--expr: call [0/0] {153} ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {152} ¦--'(': ( [0/0] {154} - ¦--expr: [0/0] {156} + ¦--expr: 1 [0/0] {156} ¦ °--NUM_CONST: 1 [0/0] {155} ¦--',': , [0/1] {157} - ¦--expr: [0/0] {158} - ¦ ¦--expr: [0/0] {160} + ¦--expr: call( [0/0] {158} + ¦ ¦--expr: call [0/0] {160} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {159} ¦ ¦--'(': ( [0/2] {161} - ¦ ¦--expr: [1/0] {163} + ¦ ¦--expr: 23 [1/0] {163} ¦ ¦ °--NUM_CONST: 23 [0/0] {162} ¦ °--')': ) [1/0] {164} °--')': ) [0/0] {165} diff --git a/tests/testthat/line_breaks_fun_call/token_dependent_complex_strict-in_tree b/tests/testthat/line_breaks_fun_call/token_dependent_complex_strict-in_tree index 69fa8e6ba..427acba96 100644 --- a/tests/testthat/line_breaks_fun_call/token_dependent_complex_strict-in_tree +++ b/tests/testthat/line_breaks_fun_call/token_dependent_complex_strict-in_tree @@ -1,135 +1,138 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/0] {3} + ¦--expr: call( [0/0] {1} + ¦ ¦--expr: call [0/0] {3} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} ¦ ¦--'(': ( [0/0] {4} - ¦ ¦--expr: [0/0] {5} - ¦ ¦ ¦--expr: [0/0] {7} + ¦ ¦--expr: call( [0/0] {5} + ¦ ¦ ¦--expr: call [0/0] {7} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {6} ¦ ¦ ¦--'(': ( [0/0] {8} - ¦ ¦ ¦--expr: [1/2] {10} + ¦ ¦ ¦--expr: 2 [1/2] {10} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {9} ¦ ¦ °--')': ) [1/0] {11} ¦ °--')': ) [0/0] {12} - ¦--expr: [2/0] {13} - ¦ ¦--expr: [0/0] {15} + ¦--expr: call( [2/0] {13} + ¦ ¦--expr: call [0/0] {15} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {14} ¦ ¦--'(': ( [0/2] {16} - ¦ ¦--expr: [0/2] {17} - ¦ ¦ ¦--expr: [0/0] {19} + ¦ ¦--expr: call( [0/2] {17} + ¦ ¦ ¦--expr: call [0/0] {19} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {18} ¦ ¦ ¦--'(': ( [0/0] {20} - ¦ ¦ ¦--expr: [0/0] {22} + ¦ ¦ ¦--expr: 1 [0/0] {22} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {21} ¦ ¦ ¦--',': , [0/6] {23} - ¦ ¦ ¦--expr: [1/0] {25} + ¦ ¦ ¦--expr: 2 [1/0] {25} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {24} ¦ ¦ °--')': ) [0/0] {26} ¦ °--')': ) [0/0] {27} ¦--COMMENT: # mul [1/0] {28} - ¦--expr: [1/3] {29} - ¦ ¦--expr: [0/0] {31} + ¦--expr: call( [1/3] {29} + ¦ ¦--expr: call [0/0] {31} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {30} ¦ ¦--'(': ( [0/0] {32} - ¦ ¦--expr: [0/0] {33} - ¦ ¦ ¦--expr: [0/0] {35} + ¦ ¦--expr: a(b( [0/0] {33} + ¦ ¦ ¦--expr: a [0/0] {35} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: a [0/0] {34} ¦ ¦ ¦--'(': ( [0/0] {36} - ¦ ¦ ¦--expr: [0/0] {37} - ¦ ¦ ¦ ¦--expr: [0/0] {39} + ¦ ¦ ¦--expr: b( c [0/0] {37} + ¦ ¦ ¦ ¦--expr: b [0/0] {39} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {38} ¦ ¦ ¦ ¦--'(': ( [0/2] {40} - ¦ ¦ ¦ ¦--expr: [0/0] {41} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {43} + ¦ ¦ ¦ ¦--expr: c({ + [0/0] {41} + ¦ ¦ ¦ ¦ ¦--expr: c [0/0] {43} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {42} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {44} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {45} + ¦ ¦ ¦ ¦ ¦--expr: { + [0/0] {45} ¦ ¦ ¦ ¦ ¦ ¦--'{': { [0/3] {46} ¦ ¦ ¦ ¦ ¦ °--'}': } [1/0] {47} ¦ ¦ ¦ ¦ °--')': ) [0/0] {48} ¦ ¦ ¦ °--')': ) [0/0] {49} ¦ ¦ °--')': ) [0/0] {50} ¦ °--')': ) [0/0] {51} - ¦--expr: [2/2] {52} - ¦ ¦--expr: [0/0] {54} + ¦--expr: call( [2/2] {52} + ¦ ¦--expr: call [0/0] {54} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {53} ¦ ¦--'(': ( [0/0] {55} - ¦ ¦--expr: [0/1] {56} - ¦ ¦ ¦--expr: [0/0] {58} + ¦ ¦--expr: call( [0/1] {56} + ¦ ¦ ¦--expr: call [0/0] {58} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {57} ¦ ¦ ¦--'(': ( [0/5] {59} - ¦ ¦ ¦--expr: [1/1] {61} + ¦ ¦ ¦--expr: 2 [1/1] {61} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {60} ¦ ¦ °--')': ) [0/0] {62} ¦ ¦--',': , [0/0] {63} - ¦ ¦--expr: [1/1] {65} + ¦ ¦--expr: 5 [1/1] {65} ¦ ¦ °--NUM_CONST: 5 [0/0] {64} ¦ °--')': ) [0/0] {66} - ¦--expr: [3/0] {67} - ¦ ¦--expr: [0/0] {69} + ¦--expr: call( [3/0] {67} + ¦ ¦--expr: call [0/0] {69} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {68} ¦ ¦--'(': ( [0/0] {70} - ¦ ¦--expr: [0/0] {71} - ¦ ¦ ¦--expr: [0/0] {73} + ¦ ¦--expr: call( [0/0] {71} + ¦ ¦ ¦--expr: call [0/0] {73} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {72} ¦ ¦ ¦--'(': ( [0/0] {74} - ¦ ¦ ¦--expr: [0/0] {76} + ¦ ¦ ¦--expr: 1 [0/0] {76} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {75} ¦ ¦ ¦--',': , [0/14] {77} - ¦ ¦ ¦--expr: [1/0] {79} + ¦ ¦ ¦--expr: 2 [1/0] {79} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {78} ¦ ¦ ¦--',': , [0/1] {80} - ¦ ¦ ¦--expr: [0/0] {81} - ¦ ¦ ¦ ¦--expr: [0/0] {83} + ¦ ¦ ¦--expr: c( + [0/0] {81} + ¦ ¦ ¦ ¦--expr: c [0/0] {83} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {82} ¦ ¦ ¦ ¦--'(': ( [0/15] {84} - ¦ ¦ ¦ ¦--expr: [1/11] {86} + ¦ ¦ ¦ ¦--expr: 3 [1/11] {86} ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {85} ¦ ¦ ¦ °--')': ) [1/0] {87} ¦ ¦ °--')': ) [0/0] {88} ¦ °--')': ) [0/0] {89} - ¦--expr: [2/0] {90} - ¦ ¦--expr: [0/0] {92} + ¦--expr: call( [2/0] {90} + ¦ ¦--expr: call [0/0] {92} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {91} ¦ ¦--'(': ( [0/0] {93} - ¦ ¦--expr: [0/0] {95} + ¦ ¦--expr: 1 [0/0] {95} ¦ ¦ °--NUM_CONST: 1 [0/0] {94} ¦ ¦--',': , [0/3] {96} - ¦ ¦--expr: [1/0] {97} - ¦ ¦ ¦--expr: [0/0] {99} + ¦ ¦--expr: call2 [1/0] {97} + ¦ ¦ ¦--expr: call2 [0/0] {99} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call2 [0/0] {98} ¦ ¦ ¦--'(': ( [0/0] {100} - ¦ ¦ ¦--expr: [0/0] {102} + ¦ ¦ ¦--expr: 3 [0/0] {102} ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {101} ¦ ¦ ¦--',': , [0/1] {103} - ¦ ¦ ¦--expr: [0/0] {105} + ¦ ¦ ¦--expr: 4 [0/0] {105} ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {104} ¦ ¦ ¦--',': , [0/1] {106} - ¦ ¦ ¦--expr: [0/0] {107} - ¦ ¦ ¦ ¦--expr: [0/0] {109} + ¦ ¦ ¦--expr: call( [0/0] {107} + ¦ ¦ ¦ ¦--expr: call [0/0] {109} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {108} ¦ ¦ ¦ ¦--'(': ( [0/0] {110} - ¦ ¦ ¦ ¦--expr: [0/0] {112} + ¦ ¦ ¦ ¦--expr: 3 [0/0] {112} ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {111} ¦ ¦ ¦ ¦--',': , [0/0] {113} - ¦ ¦ ¦ ¦--expr: [1/0] {115} + ¦ ¦ ¦ ¦--expr: 4 [1/0] {115} ¦ ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {114} ¦ ¦ ¦ ¦--',': , [0/1] {116} - ¦ ¦ ¦ ¦--expr: [0/0] {117} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {119} + ¦ ¦ ¦ ¦--expr: call( [0/0] {117} + ¦ ¦ ¦ ¦ ¦--expr: call [0/0] {119} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {118} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {120} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {122} + ¦ ¦ ¦ ¦ ¦--expr: 5 [0/0] {122} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {121} ¦ ¦ ¦ ¦ ¦--',': , [0/1] {123} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {125} + ¦ ¦ ¦ ¦ ¦--expr: 6 [0/0] {125} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 6 [0/0] {124} ¦ ¦ ¦ ¦ ¦--',': , [0/1] {126} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {127} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {129} + ¦ ¦ ¦ ¦ ¦--expr: call( [0/0] {127} + ¦ ¦ ¦ ¦ ¦ ¦--expr: call [0/0] {129} ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {128} ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {130} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [1/0] {132} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 2 [1/0] {132} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {131} ¦ ¦ ¦ ¦ ¦ °--')': ) [1/0] {133} ¦ ¦ ¦ ¦ °--')': ) [1/0] {134} @@ -137,30 +140,30 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦ ¦ °--')': ) [1/0] {136} ¦ °--')': ) [1/0] {137} ¦--COMMENT: # com [2/0] {138} - ¦--expr: [2/2] {139} - ¦ ¦--expr: [0/0] {141} + ¦--expr: call( [2/2] {139} + ¦ ¦--expr: call [0/0] {141} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {140} ¦ ¦--'(': ( [0/0] {142} - ¦ ¦--expr: [0/0] {143} - ¦ ¦ ¦--expr: [0/0] {145} + ¦ ¦--expr: call( [0/0] {143} + ¦ ¦ ¦--expr: call [0/0] {145} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {144} ¦ ¦ ¦--'(': ( [0/0] {146} - ¦ ¦ ¦--expr: [1/0] {148} + ¦ ¦ ¦--expr: 2 [1/0] {148} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {147} ¦ ¦ °--')': ) [1/0] {149} ¦ °--')': ) [0/0] {150} - °--expr: [2/0] {151} - ¦--expr: [0/0] {153} + °--expr: call( [2/0] {151} + ¦--expr: call [0/0] {153} ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {152} ¦--'(': ( [0/0] {154} - ¦--expr: [0/0] {156} + ¦--expr: 1 [0/0] {156} ¦ °--NUM_CONST: 1 [0/0] {155} ¦--',': , [0/1] {157} - ¦--expr: [0/0] {158} - ¦ ¦--expr: [0/0] {160} + ¦--expr: call( [0/0] {158} + ¦ ¦--expr: call [0/0] {160} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {159} ¦ ¦--'(': ( [0/2] {161} - ¦ ¦--expr: [1/0] {163} + ¦ ¦--expr: 23 [1/0] {163} ¦ ¦ °--NUM_CONST: 23 [0/0] {162} ¦ °--')': ) [1/0] {164} °--')': ) [0/0] {165} diff --git a/tests/testthat/line_breaks_fun_call/token_dependent_mixed-in_tree b/tests/testthat/line_breaks_fun_call/token_dependent_mixed-in_tree index 1dc1b027d..c9c77f628 100644 --- a/tests/testthat/line_breaks_fun_call/token_dependent_mixed-in_tree +++ b/tests/testthat/line_breaks_fun_call/token_dependent_mixed-in_tree @@ -1,237 +1,238 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/0] {3} + ¦--expr: call( [0/0] {1} + ¦ ¦--expr: call [0/0] {3} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} ¦ ¦--'(': ( [0/0] {4} - ¦ ¦--expr: [0/0] {5} - ¦ ¦ ¦--expr: [0/0] {7} + ¦ ¦--expr: call( [0/0] {5} + ¦ ¦ ¦--expr: call [0/0] {7} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {6} ¦ ¦ ¦--'(': ( [0/0] {8} - ¦ ¦ ¦--expr: [0/0] {9} - ¦ ¦ ¦ ¦--expr: [0/0] {11} + ¦ ¦ ¦--expr: call3 [0/0] {9} + ¦ ¦ ¦ ¦--expr: call3 [0/0] {11} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call3 [0/0] {10} ¦ ¦ ¦ ¦--'(': ( [0/0] {12} ¦ ¦ ¦ °--')': ) [0/0] {13} ¦ ¦ ¦--',': , [0/1] {14} - ¦ ¦ ¦--expr: [0/0] {16} + ¦ ¦ ¦--expr: call [0/0] {16} ¦ ¦ ¦ °--SYMBOL: call [0/0] {15} ¦ ¦ ¦--',': , [0/6] {17} - ¦ ¦ ¦--expr: [1/0] {19} + ¦ ¦ ¦--expr: 4433 [1/0] {19} ¦ ¦ ¦ °--NUM_CONST: 4433 [0/0] {18} ¦ ¦ ¦--',': , [0/8] {20} - ¦ ¦ ¦--expr: [1/0] {22} + ¦ ¦ ¦--expr: 55 [1/0] {22} ¦ ¦ ¦ °--NUM_CONST: 55 [0/0] {21} ¦ ¦ °--')': ) [0/0] {23} ¦ °--')': ) [0/0] {24} - ¦--expr: [2/0] {25} - ¦ ¦--expr: [0/0] {27} + ¦--expr: call( [2/0] {25} + ¦ ¦--expr: call [0/0] {27} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {26} ¦ ¦--'(': ( [0/0] {28} - ¦ ¦--expr: [0/0] {29} - ¦ ¦ ¦--expr: [0/0] {31} + ¦ ¦--expr: call( [0/0] {29} + ¦ ¦ ¦--expr: call [0/0] {31} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {30} ¦ ¦ ¦--'(': ( [0/0] {32} - ¦ ¦ ¦--expr: [0/0] {33} - ¦ ¦ ¦ ¦--expr: [0/0] {35} + ¦ ¦ ¦--expr: call3 [0/0] {33} + ¦ ¦ ¦ ¦--expr: call3 [0/0] {35} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call3 [0/0] {34} ¦ ¦ ¦ ¦--'(': ( [0/0] {36} ¦ ¦ ¦ °--')': ) [0/0] {37} ¦ ¦ ¦--',': , [0/1] {38} - ¦ ¦ ¦--expr: [0/0] {40} + ¦ ¦ ¦--expr: call [0/0] {40} ¦ ¦ ¦ °--SYMBOL: call [0/0] {39} ¦ ¦ ¦--',': , [0/4] {41} - ¦ ¦ ¦--expr: [1/0] {43} + ¦ ¦ ¦--expr: 4433 [1/0] {43} ¦ ¦ ¦ °--NUM_CONST: 4433 [0/0] {42} ¦ ¦ ¦--',': , [0/10] {44} - ¦ ¦ ¦--expr: [1/0] {46} + ¦ ¦ ¦--expr: 55 [1/0] {46} ¦ ¦ ¦ °--NUM_CONST: 55 [0/0] {45} ¦ ¦ °--')': ) [1/0] {47} ¦ °--')': ) [0/0] {48} - ¦--expr: [1/0] {49} - ¦ ¦--expr: [0/0] {51} + ¦--expr: call( [1/0] {49} + ¦ ¦--expr: call [0/0] {51} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {50} ¦ ¦--'(': ( [0/0] {52} - ¦ ¦--expr: [0/0] {53} - ¦ ¦ ¦--expr: [0/0] {55} + ¦ ¦--expr: call( [0/0] {53} + ¦ ¦ ¦--expr: call [0/0] {55} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {54} ¦ ¦ ¦--'(': ( [0/0] {56} - ¦ ¦ ¦--expr: [0/0] {57} - ¦ ¦ ¦ ¦--expr: [0/0] {59} + ¦ ¦ ¦--expr: call3 [0/0] {57} + ¦ ¦ ¦ ¦--expr: call3 [0/0] {59} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call3 [0/0] {58} ¦ ¦ ¦ ¦--'(': ( [0/0] {60} ¦ ¦ ¦ °--')': ) [0/0] {61} ¦ ¦ ¦--',': , [0/1] {62} - ¦ ¦ ¦--expr: [0/0] {64} + ¦ ¦ ¦--expr: call [0/0] {64} ¦ ¦ ¦ °--SYMBOL: call [0/0] {63} ¦ ¦ ¦--',': , [0/13] {65} - ¦ ¦ ¦--expr: [1/0] {67} + ¦ ¦ ¦--expr: 4433 [1/0] {67} ¦ ¦ ¦ °--NUM_CONST: 4433 [0/0] {66} ¦ ¦ ¦--',': , [0/10] {68} - ¦ ¦ ¦--expr: [1/0] {70} + ¦ ¦ ¦--expr: 55 [1/0] {70} ¦ ¦ ¦ °--NUM_CONST: 55 [0/0] {69} ¦ ¦ °--')': ) [0/0] {71} ¦ °--')': ) [1/0] {72} ¦--COMMENT: # no [4/0] {73} - ¦--expr: [1/0] {74} - ¦ ¦--expr: [0/0] {76} + ¦--expr: call( [1/0] {74} + ¦ ¦--expr: call [0/0] {76} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {75} ¦ ¦--'(': ( [0/0] {77} - ¦ ¦--expr: [0/0] {78} - ¦ ¦ ¦--expr: [0/0] {80} + ¦ ¦--expr: call( [0/0] {78} + ¦ ¦ ¦--expr: call [0/0] {80} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {79} ¦ ¦ ¦--'(': ( [0/2] {81} - ¦ ¦ ¦--expr: [1/0] {83} + ¦ ¦ ¦--expr: 3 [1/0] {83} ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {82} ¦ ¦ ¦--',': , [0/1] {84} - ¦ ¦ ¦--expr: [0/0] {86} + ¦ ¦ ¦--expr: 4 [0/0] {86} ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {85} ¦ ¦ °--')': ) [1/0] {87} ¦ °--')': ) [0/0] {88} - ¦--expr: [3/0] {89} - ¦ ¦--expr: [0/0] {91} + ¦--expr: call( [3/0] {89} + ¦ ¦--expr: call [0/0] {91} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {90} ¦ ¦--'(': ( [0/0] {92} - ¦ ¦--expr: [0/4] {94} + ¦ ¦--expr: 3 [0/4] {94} ¦ ¦ °--NUM_CONST: 3 [0/0] {93} ¦ ¦--',': , [0/5] {95} - ¦ ¦--expr: [1/0] {97} + ¦ ¦--expr: 3 [1/0] {97} ¦ ¦ °--NUM_CONST: 3 [0/0] {96} ¦ °--')': ) [1/0] {98} - ¦--expr: [3/0] {99} - ¦ ¦--expr: [0/0] {101} + ¦--expr: call( [3/0] {99} + ¦ ¦--expr: call [0/0] {101} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {100} ¦ ¦--'(': ( [0/0] {102} - ¦ ¦--expr: [0/0] {103} - ¦ ¦ ¦--expr: [0/0] {105} + ¦ ¦--expr: call( [0/0] {103} + ¦ ¦ ¦--expr: call [0/0] {105} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {104} ¦ ¦ ¦--'(': ( [0/0] {106} - ¦ ¦ ¦--expr: [0/0] {107} - ¦ ¦ ¦ ¦--expr: [0/0] {109} + ¦ ¦ ¦--expr: call3 [0/0] {107} + ¦ ¦ ¦ ¦--expr: call3 [0/0] {109} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call3 [0/0] {108} ¦ ¦ ¦ ¦--'(': ( [0/0] {110} ¦ ¦ ¦ °--')': ) [0/0] {111} ¦ ¦ ¦--',': , [0/1] {112} - ¦ ¦ ¦--expr: [0/0] {114} + ¦ ¦ ¦--expr: call [0/0] {114} ¦ ¦ ¦ °--SYMBOL: call [0/0] {113} ¦ ¦ ¦--',': , [0/4] {115} - ¦ ¦ ¦--expr: [1/0] {117} + ¦ ¦ ¦--expr: 44 [1/0] {117} ¦ ¦ ¦ °--NUM_CONST: 44 [0/0] {116} ¦ ¦ ¦--',': , [0/4] {118} - ¦ ¦ ¦--expr: [1/0] {120} + ¦ ¦ ¦--expr: 55 [1/0] {120} ¦ ¦ ¦ °--NUM_CONST: 55 [0/0] {119} ¦ ¦ °--')': ) [1/0] {121} ¦ °--')': ) [0/0] {122} ¦--COMMENT: # [2/0] {123} - ¦--expr: [2/0] {124} - ¦ ¦--expr: [0/0] {126} + ¦--expr: call( [2/0] {124} + ¦ ¦--expr: call [0/0] {126} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {125} ¦ ¦--'(': ( [0/0] {127} - ¦ ¦--expr: [0/0] {129} + ¦ ¦--expr: call [0/0] {129} ¦ ¦ °--SYMBOL: call [0/0] {128} ¦ ¦--',': , [0/0] {130} - ¦ ¦--expr: [0/0] {131} - ¦ ¦ ¦--expr: [0/0] {133} + ¦ ¦--expr: call( [0/0] {131} + ¦ ¦ ¦--expr: call [0/0] {133} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {132} ¦ ¦ ¦--'(': ( [0/0] {134} ¦ ¦ °--')': ) [0/0] {135} ¦ ¦--',': , [0/5] {136} - ¦ ¦--expr: [1/0] {138} + ¦ ¦--expr: 3 [1/0] {138} ¦ ¦ °--NUM_CONST: 3 [0/0] {137} ¦ ¦--',': , [0/5] {139} - ¦ ¦--expr: [1/0] {141} + ¦ ¦--expr: 4 [1/0] {141} ¦ ¦ °--NUM_CONST: 4 [0/0] {140} ¦ °--')': ) [1/0] {142} - ¦--expr: [2/0] {143} - ¦ ¦--expr: [0/0] {145} + ¦--expr: call( [2/0] {143} + ¦ ¦--expr: call [0/0] {145} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {144} ¦ ¦--'(': ( [0/0] {146} - ¦ ¦--expr: [0/0] {147} - ¦ ¦ ¦--expr: [0/0] {149} + ¦ ¦--expr: call( [0/0] {147} + ¦ ¦ ¦--expr: call [0/0] {149} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {148} ¦ ¦ ¦--'(': ( [0/3] {150} - ¦ ¦ ¦--expr: [1/3] {152} + ¦ ¦ ¦--expr: 3 [1/3] {152} ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {151} ¦ ¦ ¦--',': , [0/1] {153} - ¦ ¦ ¦--expr: [0/0] {155} + ¦ ¦ ¦--expr: 4 [0/0] {155} ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {154} ¦ ¦ °--')': ) [1/0] {156} ¦ °--')': ) [0/0] {157} - ¦--expr: [2/0] {158} - ¦ ¦--expr: [0/0] {160} + ¦--expr: call( [2/0] {158} + ¦ ¦--expr: call [0/0] {160} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {159} ¦ ¦--'(': ( [0/0] {161} - ¦ ¦--expr: [0/0] {162} - ¦ ¦ ¦--expr: [0/0] {164} + ¦ ¦--expr: call( [0/0] {162} + ¦ ¦ ¦--expr: call [0/0] {164} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {163} ¦ ¦ ¦--'(': ( [0/0] {165} - ¦ ¦ ¦--expr: [0/0] {167} + ¦ ¦ ¦--expr: 1 [0/0] {167} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {166} ¦ ¦ ¦--',': , [0/6] {168} - ¦ ¦ ¦--expr: [1/0] {170} + ¦ ¦ ¦--expr: 3 [1/0] {170} ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {169} ¦ ¦ °--')': ) [1/0] {171} ¦ °--')': ) [0/0] {172} ¦--COMMENT: # if [2/2] {173} - ¦--expr: [3/0] {174} - ¦ ¦--expr: [0/0] {176} + ¦--expr: call( [3/0] {174} + ¦ ¦--expr: call [0/0] {176} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {175} ¦ ¦--'(': ( [0/0] {177} - ¦ ¦--expr: [1/2] {179} + ¦ ¦--expr: 2 [1/2] {179} ¦ ¦ °--NUM_CONST: 2 [0/0] {178} ¦ °--')': ) [1/0] {180} - ¦--expr: [1/0] {181} - ¦ ¦--expr: [0/0] {183} + ¦--expr: cjald [1/0] {181} + ¦ ¦--expr: cjald [0/0] {183} ¦ ¦ °--SYMBOL_FUNCTION_CALL: cjald [0/0] {182} ¦ ¦--'(': ( [0/0] {184} - ¦ ¦--expr: [0/0] {186} + ¦ ¦--expr: 1 [0/0] {186} ¦ ¦ °--NUM_CONST: 1 [0/0] {185} ¦ ¦--',': , [0/11] {187} - ¦ ¦--expr: [1/0] {189} + ¦ ¦--expr: 3 [1/0] {189} ¦ ¦ °--NUM_CONST: 3 [0/0] {188} ¦ °--')': ) [0/0] {190} - ¦--expr: [2/2] {191} - ¦ ¦--expr: [0/0] {193} + ¦--expr: jclak [2/2] {191} + ¦ ¦--expr: jclak [0/0] {193} ¦ ¦ °--SYMBOL_FUNCTION_CALL: jclak [0/0] {192} ¦ ¦--'(': ( [0/2] {194} - ¦ ¦--expr: [0/0] {195} - ¦ ¦ ¦--expr: [0/0] {197} + ¦ ¦--expr: call( [0/0] {195} + ¦ ¦ ¦--expr: call [0/0] {197} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {196} ¦ ¦ ¦--'(': ( [0/0] {198} - ¦ ¦ ¦--expr: [0/0] {199} - ¦ ¦ ¦ ¦--expr: [0/0] {201} + ¦ ¦ ¦--expr: call( [0/0] {199} + ¦ ¦ ¦ ¦--expr: call [0/0] {201} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {200} ¦ ¦ ¦ ¦--'(': ( [0/0] {202} - ¦ ¦ ¦ ¦--expr: [0/2] {204} + ¦ ¦ ¦ ¦--expr: 2 [0/2] {204} ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {203} ¦ ¦ ¦ ¦--',': , [0/15] {205} - ¦ ¦ ¦ ¦--expr: [1/0] {207} + ¦ ¦ ¦ ¦--expr: 4 [1/0] {207} ¦ ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {206} ¦ ¦ ¦ °--')': ) [0/0] {208} ¦ ¦ °--')': ) [0/0] {209} ¦ °--')': ) [0/0] {210} - ¦--expr: [1/0] {211} - ¦ ¦--expr: [0/0] {213} + ¦--expr: fjadl [1/0] {211} + ¦ ¦--expr: fjadl [0/0] {213} ¦ ¦ °--SYMBOL_FUNCTION_CALL: fjadl [0/0] {212} ¦ ¦--'(': ( [0/0] {214} - ¦ ¦--expr: [0/0] {215} - ¦ ¦ ¦--expr: [0/0] {217} + ¦ ¦--expr: casl( [0/0] {215} + ¦ ¦ ¦--expr: casl [0/0] {217} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: casl [0/0] {216} ¦ ¦ ¦--'(': ( [0/0] {218} ¦ ¦ °--')': ) [0/0] {219} ¦ ¦--',': , [0/6] {220} - ¦ ¦--expr: [1/0] {222} + ¦ ¦--expr: 1 [1/0] {222} ¦ ¦ °--NUM_CONST: 1 [0/0] {221} ¦ °--')': ) [0/0] {223} - °--expr: [3/0] {224} - ¦--expr: [0/0] {226} + °--expr: test_ [3/0] {224} + ¦--expr: test_ [0/0] {226} ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {225} ¦--'(': ( [0/0] {227} - ¦--expr: [0/0] {229} + ¦--expr: "hi" [0/0] {229} ¦ °--STR_CONST: "hi" [0/0] {228} ¦--',': , [0/1] {230} - ¦--expr: [0/0] {231} + ¦--expr: { +"th [0/0] {231} ¦ ¦--'{': { [0/0] {232} - ¦ ¦--expr: [1/2] {234} + ¦ ¦--expr: "ther [1/2] {234} ¦ ¦ °--STR_CONST: "ther [0/0] {233} ¦ °--'}': } [1/0] {235} °--')': ) [0/0] {236} diff --git a/tests/testthat/line_breaks_fun_call/unindent-in.R b/tests/testthat/line_breaks_fun_call/unindent-in.R new file mode 100644 index 000000000..f1174961f --- /dev/null +++ b/tests/testthat/line_breaks_fun_call/unindent-in.R @@ -0,0 +1,16 @@ +test_that(key( + s),x = 1) + +test_that(key( + s), + x = 1 +) + + +test_that(key( + s),x = 1 +) + + +test_that( + key(s),x = 1) diff --git a/tests/testthat/line_breaks_fun_call/unindent-in_tree b/tests/testthat/line_breaks_fun_call/unindent-in_tree new file mode 100644 index 000000000..70fd2a441 --- /dev/null +++ b/tests/testthat/line_breaks_fun_call/unindent-in_tree @@ -0,0 +1,72 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: test_ [0/0] {1} + ¦ ¦--expr: test_ [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {2} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: key( + [0/0] {5} + ¦ ¦ ¦--expr: key [0/0] {7} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: key [0/0] {6} + ¦ ¦ ¦--'(': ( [0/2] {8} + ¦ ¦ ¦--expr: s [1/0] {10} + ¦ ¦ ¦ °--SYMBOL: s [0/0] {9} + ¦ ¦ °--')': ) [0/0] {11} + ¦ ¦--',': , [0/0] {12} + ¦ ¦--SYMBOL_SUB: x [0/1] {13} + ¦ ¦--EQ_SUB: = [0/1] {14} + ¦ ¦--expr: 1 [0/0] {16} + ¦ ¦ °--NUM_CONST: 1 [0/0] {15} + ¦ °--')': ) [0/0] {17} + ¦--expr: test_ [2/0] {18} + ¦ ¦--expr: test_ [0/0] {20} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {19} + ¦ ¦--'(': ( [0/0] {21} + ¦ ¦--expr: key( + [0/0] {22} + ¦ ¦ ¦--expr: key [0/0] {24} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: key [0/0] {23} + ¦ ¦ ¦--'(': ( [0/2] {25} + ¦ ¦ ¦--expr: s [1/0] {27} + ¦ ¦ ¦ °--SYMBOL: s [0/0] {26} + ¦ ¦ °--')': ) [0/0] {28} + ¦ ¦--',': , [0/2] {29} + ¦ ¦--SYMBOL_SUB: x [1/1] {30} + ¦ ¦--EQ_SUB: = [0/1] {31} + ¦ ¦--expr: 1 [0/0] {33} + ¦ ¦ °--NUM_CONST: 1 [0/0] {32} + ¦ °--')': ) [1/0] {34} + ¦--expr: test_ [3/0] {35} + ¦ ¦--expr: test_ [0/0] {37} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {36} + ¦ ¦--'(': ( [0/0] {38} + ¦ ¦--expr: key( + [0/0] {39} + ¦ ¦ ¦--expr: key [0/0] {41} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: key [0/0] {40} + ¦ ¦ ¦--'(': ( [0/2] {42} + ¦ ¦ ¦--expr: s [1/0] {44} + ¦ ¦ ¦ °--SYMBOL: s [0/0] {43} + ¦ ¦ °--')': ) [0/0] {45} + ¦ ¦--',': , [0/0] {46} + ¦ ¦--SYMBOL_SUB: x [0/1] {47} + ¦ ¦--EQ_SUB: = [0/1] {48} + ¦ ¦--expr: 1 [0/0] {50} + ¦ ¦ °--NUM_CONST: 1 [0/0] {49} + ¦ °--')': ) [1/0] {51} + °--expr: test_ [3/0] {52} + ¦--expr: test_ [0/0] {54} + ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {53} + ¦--'(': ( [0/2] {55} + ¦--expr: key(s [1/0] {56} + ¦ ¦--expr: key [0/0] {58} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: key [0/0] {57} + ¦ ¦--'(': ( [0/0] {59} + ¦ ¦--expr: s [0/0] {61} + ¦ ¦ °--SYMBOL: s [0/0] {60} + ¦ °--')': ) [0/0] {62} + ¦--',': , [0/0] {63} + ¦--SYMBOL_SUB: x [0/1] {64} + ¦--EQ_SUB: = [0/1] {65} + ¦--expr: 1 [0/0] {67} + ¦ °--NUM_CONST: 1 [0/0] {66} + °--')': ) [0/0] {68} diff --git a/tests/testthat/line_breaks_fun_call/unindent-out.R b/tests/testthat/line_breaks_fun_call/unindent-out.R new file mode 100644 index 000000000..6452a6c74 --- /dev/null +++ b/tests/testthat/line_breaks_fun_call/unindent-out.R @@ -0,0 +1,21 @@ +test_that(key( + s +), x = 1) + +test_that( + key( + s + ), + x = 1 +) + + +test_that(key( + s +), x = 1) + + +test_that( + key(s), + x = 1 +) diff --git a/tests/testthat/math_token_spacing/non_strict_math_spacing_all-in_tree b/tests/testthat/math_token_spacing/non_strict_math_spacing_all-in_tree index 9060685f0..306fef807 100644 --- a/tests/testthat/math_token_spacing/non_strict_math_spacing_all-in_tree +++ b/tests/testthat/math_token_spacing/non_strict_math_spacing_all-in_tree @@ -1,23 +1,23 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/0] {4} + °--expr: 1++1 [0/0] {1} + ¦--expr: 1 [0/0] {4} ¦ °--NUM_CONST: 1 [0/0] {3} ¦--'+': + [0/0] {5} - ¦--expr: [0/3] {6} + ¦--expr: +1 [0/3] {6} ¦ ¦--'+': + [0/0] {7} - ¦ °--expr: [0/0] {9} + ¦ °--expr: 1 [0/0] {9} ¦ °--NUM_CONST: 1 [0/0] {8} ¦--'-': - [0/7] {10} - °--expr: [0/0] {11} - ¦--expr: [0/1] {14} + °--expr: 3 / 2 [0/0] {11} + ¦--expr: 3 [0/1] {14} ¦ °--NUM_CONST: 3 [0/0] {13} ¦--'/': / [0/1] {15} - ¦--expr: [0/1] {17} + ¦--expr: 23 [0/1] {17} ¦ °--NUM_CONST: 23 [0/0] {16} ¦--'*': * [0/1] {18} - °--expr: [0/0] {19} - ¦--expr: [0/1] {21} + °--expr: 3 ^4 [0/0] {19} + ¦--expr: 3 [0/1] {21} ¦ °--NUM_CONST: 3 [0/0] {20} ¦--'^': ^ [0/0] {22} - °--expr: [0/0] {24} + °--expr: 4 [0/0] {24} °--NUM_CONST: 4 [0/0] {23} diff --git a/tests/testthat/math_token_spacing/non_strict_math_spacing_all-out.R b/tests/testthat/math_token_spacing/non_strict_math_spacing_all-out.R index a29b8f889..1c1a997f0 100644 --- a/tests/testthat/math_token_spacing/non_strict_math_spacing_all-out.R +++ b/tests/testthat/math_token_spacing/non_strict_math_spacing_all-out.R @@ -1 +1 @@ -1 + +1 - 3 / 23 * 3 ^ 4 +1 + +1 - 3 / 23 * 3^4 diff --git a/tests/testthat/math_token_spacing/strict_math_spacing_all-in_tree b/tests/testthat/math_token_spacing/strict_math_spacing_all-in_tree index 5ad9d6a49..1925476a7 100644 --- a/tests/testthat/math_token_spacing/strict_math_spacing_all-in_tree +++ b/tests/testthat/math_token_spacing/strict_math_spacing_all-in_tree @@ -1,23 +1,23 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/1] {4} + °--expr: 1 ++ [0/0] {1} + ¦--expr: 1 [0/1] {4} ¦ °--NUM_CONST: 1 [0/0] {3} ¦--'+': + [0/0] {5} - ¦--expr: [0/1] {6} + ¦--expr: + 1 [0/1] {6} ¦ ¦--'+': + [0/1] {7} - ¦ °--expr: [0/0] {9} + ¦ °--expr: 1 [0/0] {9} ¦ °--NUM_CONST: 1 [0/0] {8} ¦--'-': - [0/1] {10} - °--expr: [0/0] {11} - ¦--expr: [0/1] {14} + °--expr: 3 / [0/0] {11} + ¦--expr: 3 [0/1] {14} ¦ °--NUM_CONST: 3 [0/0] {13} ¦--'/': / [0/7] {15} - ¦--expr: [0/0] {17} + ¦--expr: 23 [0/0] {17} ¦ °--NUM_CONST: 23 [0/0] {16} ¦--'*': * [0/1] {18} - °--expr: [0/0] {19} - ¦--expr: [0/0] {21} + °--expr: 3^ 4 [0/0] {19} + ¦--expr: 3 [0/0] {21} ¦ °--NUM_CONST: 3 [0/0] {20} ¦--'^': ^ [0/1] {22} - °--expr: [0/0] {24} + °--expr: 4 [0/0] {24} °--NUM_CONST: 4 [0/0] {23} diff --git a/tests/testthat/math_token_spacing/strict_math_spacing_all-out.R b/tests/testthat/math_token_spacing/strict_math_spacing_all-out.R index 7dfbe796e..d259e95fc 100644 --- a/tests/testthat/math_token_spacing/strict_math_spacing_all-out.R +++ b/tests/testthat/math_token_spacing/strict_math_spacing_all-out.R @@ -1 +1 @@ -1 + +1 - 3 / 23 * 3 ^ 4 +1 + +1 - 3 / 23 * 3^4 diff --git a/tests/testthat/math_token_spacing/strict_math_spacing_zero_all_but_power-in_tree b/tests/testthat/math_token_spacing/strict_math_spacing_zero_all_but_power-in_tree index f2629cf50..d82f157ca 100644 --- a/tests/testthat/math_token_spacing/strict_math_spacing_zero_all_but_power-in_tree +++ b/tests/testthat/math_token_spacing/strict_math_spacing_zero_all_but_power-in_tree @@ -1,23 +1,23 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/0] {4} + °--expr: 1++1- [0/0] {1} + ¦--expr: 1 [0/0] {4} ¦ °--NUM_CONST: 1 [0/0] {3} ¦--'+': + [0/0] {5} - ¦--expr: [0/0] {6} + ¦--expr: +1 [0/0] {6} ¦ ¦--'+': + [0/0] {7} - ¦ °--expr: [0/0] {9} + ¦ °--expr: 1 [0/0] {9} ¦ °--NUM_CONST: 1 [0/0] {8} ¦--'-': - [0/0] {10} - °--expr: [0/0] {11} - ¦--expr: [0/0] {14} + °--expr: 3/23* [0/0] {11} + ¦--expr: 3 [0/0] {14} ¦ °--NUM_CONST: 3 [0/0] {13} ¦--'/': / [0/0] {15} - ¦--expr: [0/0] {17} + ¦--expr: 23 [0/0] {17} ¦ °--NUM_CONST: 23 [0/0] {16} ¦--'*': * [0/0] {18} - °--expr: [0/0] {19} - ¦--expr: [0/0] {21} + °--expr: 3^4 [0/0] {19} + ¦--expr: 3 [0/0] {21} ¦ °--NUM_CONST: 3 [0/0] {20} ¦--'^': ^ [0/0] {22} - °--expr: [0/0] {24} + °--expr: 4 [0/0] {24} °--NUM_CONST: 4 [0/0] {23} diff --git a/tests/testthat/math_token_spacing/strict_math_spacing_zero_plus-in_tree b/tests/testthat/math_token_spacing/strict_math_spacing_zero_plus-in_tree index 539ec7fe6..c9e38aa79 100644 --- a/tests/testthat/math_token_spacing/strict_math_spacing_zero_plus-in_tree +++ b/tests/testthat/math_token_spacing/strict_math_spacing_zero_plus-in_tree @@ -1,23 +1,23 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/0] {4} + °--expr: 1+ + [0/0] {1} + ¦--expr: 1 [0/0] {4} ¦ °--NUM_CONST: 1 [0/0] {3} ¦--'+': + [0/2] {5} - ¦--expr: [0/0] {6} + ¦--expr: +1 [0/0] {6} ¦ ¦--'+': + [0/0] {7} - ¦ °--expr: [0/0] {9} + ¦ °--expr: 1 [0/0] {9} ¦ °--NUM_CONST: 1 [0/0] {8} ¦--'-': - [0/1] {10} - °--expr: [0/0] {11} - ¦--expr: [0/3] {14} + °--expr: 3 / [0/0] {11} + ¦--expr: 3 [0/3] {14} ¦ °--NUM_CONST: 3 [0/0] {13} ¦--'/': / [0/0] {15} - ¦--expr: [0/0] {17} + ¦--expr: 23 [0/0] {17} ¦ °--NUM_CONST: 23 [0/0] {16} ¦--'*': * [0/1] {18} - °--expr: [0/0] {19} - ¦--expr: [0/0] {21} + °--expr: 3^ 4 [0/0] {19} + ¦--expr: 3 [0/0] {21} ¦ °--NUM_CONST: 3 [0/0] {20} ¦--'^': ^ [0/1] {22} - °--expr: [0/0] {24} + °--expr: 4 [0/0] {24} °--NUM_CONST: 4 [0/0] {23} diff --git a/tests/testthat/multiple_expressions/three_complex_expr-in_tree b/tests/testthat/multiple_expressions/three_complex_expr-in_tree index b332e24c4..4f556c36b 100644 --- a/tests/testthat/multiple_expressions/three_complex_expr-in_tree +++ b/tests/testthat/multiple_expressions/three_complex_expr-in_tree @@ -1,22 +1,23 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {2} + ¦--expr: x [0/0] {2} ¦ °--SYMBOL: x [0/0] {1} - ¦--expr: [1/0] {3} - ¦ ¦--expr: [0/0] {5} + ¦--expr: 1+1 [1/0] {3} + ¦ ¦--expr: 1 [0/0] {5} ¦ ¦ °--NUM_CONST: 1 [0/0] {4} ¦ ¦--'+': + [0/0] {6} - ¦ °--expr: [0/0] {8} + ¦ °--expr: 1 [0/0] {8} ¦ °--NUM_CONST: 1 [0/0] {7} - °--expr: [1/0] {9} - ¦--expr: [0/1] {11} + °--expr: y + ( [1/0] {9} + ¦--expr: y [0/1] {11} ¦ °--SYMBOL: y [0/0] {10} ¦--'+': + [0/1] {12} - °--expr: [0/0] {13} + °--expr: ( +2* [0/0] {13} ¦--'(': ( [0/0] {14} - ¦--expr: [1/0] {15} - ¦ ¦--expr: [0/0] {17} + ¦--expr: 2* z [1/0] {15} + ¦ ¦--expr: 2 [0/0] {17} ¦ ¦ °--NUM_CONST: 2 [0/0] {16} ¦ ¦--'*': * [0/1] {18} - ¦ °--expr: [0/0] {20} + ¦ °--expr: z [0/0] {20} ¦ °--SYMBOL: z [0/0] {19} °--')': ) [1/0] {21} diff --git a/tests/testthat/multiple_expressions/two_simple_expr-in_tree b/tests/testthat/multiple_expressions/two_simple_expr-in_tree index 8ed5bfe91..57159da53 100644 --- a/tests/testthat/multiple_expressions/two_simple_expr-in_tree +++ b/tests/testthat/multiple_expressions/two_simple_expr-in_tree @@ -1,5 +1,5 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {2} + ¦--expr: a [0/0] {2} ¦ °--SYMBOL: a [0/0] {1} - °--expr: [1/0] {4} + °--expr: b [1/0] {4} °--SYMBOL: b [0/0] {3} diff --git a/tests/testthat/parse_comments/eol_eof_spaces-in_tree b/tests/testthat/parse_comments/eol_eof_spaces-in_tree index dcc4e4720..4e6dc6302 100644 --- a/tests/testthat/parse_comments/eol_eof_spaces-in_tree +++ b/tests/testthat/parse_comments/eol_eof_spaces-in_tree @@ -1,5 +1,5 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦--COMMENT: # com [0/0] {1} ¦--COMMENT: #' sp [1/0] {2} - °--expr: [1/0] {4} + °--expr: a [1/0] {4} °--SYMBOL: a [0/0] {3} diff --git a/tests/testthat/parse_comments/mixed-in_tree b/tests/testthat/parse_comments/mixed-in_tree index 43462d134..5168b8f66 100644 --- a/tests/testthat/parse_comments/mixed-in_tree +++ b/tests/testthat/parse_comments/mixed-in_tree @@ -1,10 +1,10 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦--COMMENT: # A f [0/0] {1} - °--expr: [1/0] {2} - ¦--expr: [0/1] {4} + °--expr: a <- [1/0] {2} + ¦--expr: a [0/1] {4} ¦ °--SYMBOL: a [0/0] {3} ¦--LEFT_ASSIGN: <- [0/1] {5} - °--expr: [0/0] {6} + °--expr: funct [0/0] {6} ¦--FUNCTION: funct [0/0] {7} ¦--'(': ( [0/0] {8} ¦--SYMBOL_FORMALS: x [0/0] {9} @@ -13,19 +13,21 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦--',': , [0/1] {12} ¦--SYMBOL_FORMALS: z [0/0] {13} ¦--')': ) [0/1] {14} - °--expr: [0/0] {15} + °--expr: { +if [0/0] {15} ¦--'{': { [0/0] {16} - ¦--expr: [1/0] {17} + ¦--expr: if (1 [1/0] {17} ¦ ¦--IF: if [0/1] {18} ¦ ¦--'(': ( [0/0] {19} - ¦ ¦--expr: [0/0] {20} - ¦ ¦ ¦--expr: [0/0] {22} + ¦ ¦--expr: 1>10 [0/0] {20} + ¦ ¦ ¦--expr: 1 [0/0] {22} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {21} ¦ ¦ ¦--GT: > [0/0] {23} - ¦ ¦ °--expr: [0/0] {25} + ¦ ¦ °--expr: 10 [0/0] {25} ¦ ¦ °--NUM_CONST: 10 [0/0] {24} ¦ ¦--')': ) [0/1] {26} - ¦ °--expr: [0/0] {27} + ¦ °--expr: { +# t [0/0] {27} ¦ ¦--'{': { [0/0] {28} ¦ ¦--COMMENT: # thi [1/0] {29} ¦ °--'}': } [1/0] {30} diff --git a/tests/testthat/parse_comments/output-prefix-in.R b/tests/testthat/parse_comments/output-prefix-in.R new file mode 100644 index 000000000..62c97ec65 --- /dev/null +++ b/tests/testthat/parse_comments/output-prefix-in.R @@ -0,0 +1,8 @@ +this() +#> is output + + +this() #> is not + +this() +# > not sure diff --git a/tests/testthat/parse_comments/output-prefix-in_tree b/tests/testthat/parse_comments/output-prefix-in_tree new file mode 100644 index 000000000..a6c124e96 --- /dev/null +++ b/tests/testthat/parse_comments/output-prefix-in_tree @@ -0,0 +1,19 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: this( [0/0] {1} + ¦ ¦--expr: this [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: this [0/0] {2} + ¦ ¦--'(': ( [0/0] {4} + ¦ °--')': ) [0/0] {5} + ¦--COMMENT: #> is [1/0] {6} + ¦--expr: this( [3/1] {7} + ¦ ¦--expr: this [0/0] {9} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: this [0/0] {8} + ¦ ¦--'(': ( [0/0] {10} + ¦ °--')': ) [0/0] {11} + ¦--COMMENT: #> is [0/0] {12} + ¦--expr: this( [2/0] {13} + ¦ ¦--expr: this [0/0] {15} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: this [0/0] {14} + ¦ ¦--'(': ( [0/0] {16} + ¦ °--')': ) [0/0] {17} + °--COMMENT: # > n [1/0] {18} diff --git a/tests/testthat/parse_comments/output-prefix-out.R b/tests/testthat/parse_comments/output-prefix-out.R new file mode 100644 index 000000000..62c97ec65 --- /dev/null +++ b/tests/testthat/parse_comments/output-prefix-out.R @@ -0,0 +1,8 @@ +this() +#> is output + + +this() #> is not + +this() +# > not sure diff --git a/tests/testthat/parse_comments/rplumber-in.R b/tests/testthat/parse_comments/rplumber-in.R new file mode 100644 index 000000000..186413cb6 --- /dev/null +++ b/tests/testthat/parse_comments/rplumber-in.R @@ -0,0 +1,12 @@ +# myfile.R + +#* @get /mean +normalMean <- function(samples=10) { + data <- rnorm(samples) + mean(data) +} + +#* @post /sum +addTwo <- function(a, b) { + as.numeric(a) + as.numeric(b) +} diff --git a/tests/testthat/parse_comments/rplumber-in_tree b/tests/testthat/parse_comments/rplumber-in_tree new file mode 100644 index 000000000..8faf0b7a9 --- /dev/null +++ b/tests/testthat/parse_comments/rplumber-in_tree @@ -0,0 +1,69 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # myf [0/0] {1} + ¦--COMMENT: #* @g [2/0] {2} + ¦--expr: norma [1/0] {3} + ¦ ¦--expr: norma [0/1] {5} + ¦ ¦ °--SYMBOL: norma [0/0] {4} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {6} + ¦ °--expr: funct [0/0] {7} + ¦ ¦--FUNCTION: funct [0/0] {8} + ¦ ¦--'(': ( [0/0] {9} + ¦ ¦--SYMBOL_FORMALS: sampl [0/0] {10} + ¦ ¦--EQ_FORMALS: = [0/0] {11} + ¦ ¦--expr: 10 [0/0] {13} + ¦ ¦ °--NUM_CONST: 10 [0/0] {12} + ¦ ¦--')': ) [0/1] {14} + ¦ °--expr: { + d [0/0] {15} + ¦ ¦--'{': { [0/2] {16} + ¦ ¦--expr: data [1/2] {17} + ¦ ¦ ¦--expr: data [0/1] {19} + ¦ ¦ ¦ °--SYMBOL: data [0/0] {18} + ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {20} + ¦ ¦ °--expr: rnorm [0/0] {21} + ¦ ¦ ¦--expr: rnorm [0/0] {23} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: rnorm [0/0] {22} + ¦ ¦ ¦--'(': ( [0/0] {24} + ¦ ¦ ¦--expr: sampl [0/0] {26} + ¦ ¦ ¦ °--SYMBOL: sampl [0/0] {25} + ¦ ¦ °--')': ) [0/0] {27} + ¦ ¦--expr: mean( [1/0] {28} + ¦ ¦ ¦--expr: mean [0/0] {30} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: mean [0/0] {29} + ¦ ¦ ¦--'(': ( [0/0] {31} + ¦ ¦ ¦--expr: data [0/0] {33} + ¦ ¦ ¦ °--SYMBOL: data [0/0] {32} + ¦ ¦ °--')': ) [0/0] {34} + ¦ °--'}': } [1/0] {35} + ¦--COMMENT: #* @p [2/0] {36} + °--expr: addTw [1/0] {37} + ¦--expr: addTw [0/1] {39} + ¦ °--SYMBOL: addTw [0/0] {38} + ¦--LEFT_ASSIGN: <- [0/1] {40} + °--expr: funct [0/0] {41} + ¦--FUNCTION: funct [0/0] {42} + ¦--'(': ( [0/0] {43} + ¦--SYMBOL_FORMALS: a [0/0] {44} + ¦--',': , [0/1] {45} + ¦--SYMBOL_FORMALS: b [0/0] {46} + ¦--')': ) [0/1] {47} + °--expr: { + a [0/0] {48} + ¦--'{': { [0/2] {49} + ¦--expr: as.nu [1/0] {50} + ¦ ¦--expr: as.nu [0/1] {51} + ¦ ¦ ¦--expr: as.nu [0/0] {53} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: as.nu [0/0] {52} + ¦ ¦ ¦--'(': ( [0/0] {54} + ¦ ¦ ¦--expr: a [0/0] {56} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {55} + ¦ ¦ °--')': ) [0/0] {57} + ¦ ¦--'+': + [0/1] {58} + ¦ °--expr: as.nu [0/0] {59} + ¦ ¦--expr: as.nu [0/0] {61} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: as.nu [0/0] {60} + ¦ ¦--'(': ( [0/0] {62} + ¦ ¦--expr: b [0/0] {64} + ¦ ¦ °--SYMBOL: b [0/0] {63} + ¦ °--')': ) [0/0] {65} + °--'}': } [1/0] {66} diff --git a/tests/testthat/parse_comments/rplumber-out.R b/tests/testthat/parse_comments/rplumber-out.R new file mode 100644 index 000000000..71a98b86b --- /dev/null +++ b/tests/testthat/parse_comments/rplumber-out.R @@ -0,0 +1,12 @@ +# myfile.R + +#* @get /mean +normalMean <- function(samples = 10) { + data <- rnorm(samples) + mean(data) +} + +#* @post /sum +addTwo <- function(a, b) { + as.numeric(a) + as.numeric(b) +} diff --git a/tests/testthat/parse_comments/shebang_1-in.R b/tests/testthat/parse_comments/shebang_1-in.R new file mode 100644 index 000000000..210862b44 --- /dev/null +++ b/tests/testthat/parse_comments/shebang_1-in.R @@ -0,0 +1,10 @@ +#A comment +#!/usr/bin/env Rscript +#!/usr/bin/env Rscript +a <- 3 + +#!/usr/bin /env Rscript -m --set "W" +dd <- 33 +#!/usr/bin\ /env Rscript -m --set "W" +c() +#!NEED TO REMOVE THIS diff --git a/tests/testthat/parse_comments/shebang_1-in_tree b/tests/testthat/parse_comments/shebang_1-in_tree new file mode 100644 index 000000000..39bb6c0ca --- /dev/null +++ b/tests/testthat/parse_comments/shebang_1-in_tree @@ -0,0 +1,24 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #A co [0/0] {1} + ¦--COMMENT: #!/us [1/0] {2} + ¦--COMMENT: #!/us [1/0] {3} + ¦--expr: a <- [1/0] {4} + ¦ ¦--expr: a [0/1] {6} + ¦ ¦ °--SYMBOL: a [0/0] {5} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {7} + ¦ °--expr: 3 [0/0] {9} + ¦ °--NUM_CONST: 3 [0/0] {8} + ¦--COMMENT: #!/us [2/0] {10} + ¦--expr: dd <- [1/0] {11} + ¦ ¦--expr: dd [0/1] {13} + ¦ ¦ °--SYMBOL: dd [0/0] {12} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {14} + ¦ °--expr: 33 [0/0] {16} + ¦ °--NUM_CONST: 33 [0/0] {15} + ¦--COMMENT: #!/us [1/0] {17} + ¦--expr: c() [1/0] {18} + ¦ ¦--expr: c [0/0] {20} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {19} + ¦ ¦--'(': ( [0/0] {21} + ¦ °--')': ) [0/0] {22} + °--COMMENT: #!NEE [1/0] {23} diff --git a/tests/testthat/parse_comments/shebang_1-out.R b/tests/testthat/parse_comments/shebang_1-out.R new file mode 100644 index 000000000..a145d2c47 --- /dev/null +++ b/tests/testthat/parse_comments/shebang_1-out.R @@ -0,0 +1,10 @@ +# A comment +# !/usr/bin/env Rscript +# !/usr/bin/env Rscript +a <- 3 + +# !/usr/bin /env Rscript -m --set "W" +dd <- 33 +# !/usr/bin\ /env Rscript -m --set "W" +c() +# !NEED TO REMOVE THIS diff --git a/tests/testthat/parse_comments/shebang_2-in.R b/tests/testthat/parse_comments/shebang_2-in.R new file mode 100644 index 000000000..c47bec7a3 --- /dev/null +++ b/tests/testthat/parse_comments/shebang_2-in.R @@ -0,0 +1,9 @@ +#!/usr/bin/env Rscript +#!/usr/bin/env Rscript +a <- 3 + +#!/usr/bin /env Rscript -m --set "W" +dd <- 33 +#!/usr/bin\ /env Rscript -m --set "W" +c() +#!NEED TO REMOVE THIS diff --git a/tests/testthat/parse_comments/shebang_2-in_tree b/tests/testthat/parse_comments/shebang_2-in_tree new file mode 100644 index 000000000..dafbcf6ac --- /dev/null +++ b/tests/testthat/parse_comments/shebang_2-in_tree @@ -0,0 +1,23 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #!/us [0/0] {1} + ¦--COMMENT: #!/us [1/0] {2} + ¦--expr: a <- [1/0] {3} + ¦ ¦--expr: a [0/1] {5} + ¦ ¦ °--SYMBOL: a [0/0] {4} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {6} + ¦ °--expr: 3 [0/0] {8} + ¦ °--NUM_CONST: 3 [0/0] {7} + ¦--COMMENT: #!/us [2/0] {9} + ¦--expr: dd <- [1/0] {10} + ¦ ¦--expr: dd [0/1] {12} + ¦ ¦ °--SYMBOL: dd [0/0] {11} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {13} + ¦ °--expr: 33 [0/0] {15} + ¦ °--NUM_CONST: 33 [0/0] {14} + ¦--COMMENT: #!/us [1/0] {16} + ¦--expr: c() [1/0] {17} + ¦ ¦--expr: c [0/0] {19} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {18} + ¦ ¦--'(': ( [0/0] {20} + ¦ °--')': ) [0/0] {21} + °--COMMENT: #!NEE [1/0] {22} diff --git a/tests/testthat/parse_comments/shebang_2-out.R b/tests/testthat/parse_comments/shebang_2-out.R new file mode 100644 index 000000000..8e8f5ac04 --- /dev/null +++ b/tests/testthat/parse_comments/shebang_2-out.R @@ -0,0 +1,9 @@ +#!/usr/bin/env Rscript +# !/usr/bin/env Rscript +a <- 3 + +# !/usr/bin /env Rscript -m --set "W" +dd <- 33 +# !/usr/bin\ /env Rscript -m --set "W" +c() +# !NEED TO REMOVE THIS diff --git a/tests/testthat/parse_comments/spinning_code_chunk_headers-in.R b/tests/testthat/parse_comments/spinning_code_chunk_headers-in.R new file mode 100644 index 000000000..198d8b6ab --- /dev/null +++ b/tests/testthat/parse_comments/spinning_code_chunk_headers-in.R @@ -0,0 +1,11 @@ +#A comment +a <- function() { + +} + +#+ chunk-label, opt1=value1 +"chunk-content" + +#- chunk-label, opt1=value1 +call(2, 3) +#21 diff --git a/tests/testthat/parse_comments/spinning_code_chunk_headers-in_tree b/tests/testthat/parse_comments/spinning_code_chunk_headers-in_tree new file mode 100644 index 000000000..82e47c6a4 --- /dev/null +++ b/tests/testthat/parse_comments/spinning_code_chunk_headers-in_tree @@ -0,0 +1,30 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #A co [0/0] {1} + ¦--expr: a <- [1/0] {2} + ¦ ¦--expr: a [0/1] {4} + ¦ ¦ °--SYMBOL: a [0/0] {3} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {5} + ¦ °--expr: funct [0/0] {6} + ¦ ¦--FUNCTION: funct [0/0] {7} + ¦ ¦--'(': ( [0/0] {8} + ¦ ¦--')': ) [0/1] {9} + ¦ °--expr: { + +} [0/0] {10} + ¦ ¦--'{': { [0/0] {11} + ¦ °--'}': } [2/0] {12} + ¦--COMMENT: #+ ch [2/0] {13} + ¦--expr: "chun [1/0] {15} + ¦ °--STR_CONST: "chun [0/0] {14} + ¦--COMMENT: #- ch [2/0] {16} + ¦--expr: call( [1/0] {17} + ¦ ¦--expr: call [0/0] {19} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {18} + ¦ ¦--'(': ( [0/0] {20} + ¦ ¦--expr: 2 [0/0] {22} + ¦ ¦ °--NUM_CONST: 2 [0/0] {21} + ¦ ¦--',': , [0/1] {23} + ¦ ¦--expr: 3 [0/0] {25} + ¦ ¦ °--NUM_CONST: 3 [0/0] {24} + ¦ °--')': ) [0/0] {26} + °--COMMENT: #21 [1/0] {27} diff --git a/tests/testthat/parse_comments/spinning_code_chunk_headers-out.R b/tests/testthat/parse_comments/spinning_code_chunk_headers-out.R new file mode 100644 index 000000000..88ab28999 --- /dev/null +++ b/tests/testthat/parse_comments/spinning_code_chunk_headers-out.R @@ -0,0 +1,11 @@ +# A comment +a <- function() { + +} + +#+ chunk-label, opt1=value1 +"chunk-content" + +#- chunk-label, opt1=value1 +call(2, 3) +# 21 diff --git a/tests/testthat/parse_comments/with_indention-in_tree b/tests/testthat/parse_comments/with_indention-in_tree index fd2c47775..ee1218f53 100644 --- a/tests/testthat/parse_comments/with_indention-in_tree +++ b/tests/testthat/parse_comments/with_indention-in_tree @@ -1,69 +1,69 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦--COMMENT: # a c [0/0] {1} - ¦--expr: [1/0] {2} - ¦ ¦--expr: [0/0] {4} + ¦--expr: call( [1/0] {2} + ¦ ¦--expr: call [0/0] {4} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {3} ¦ ¦--'(': ( [0/0] {5} - ¦ ¦--expr: [1/0] {7} + ¦ ¦--expr: 1 [1/0] {7} ¦ ¦ °--NUM_CONST: 1 [0/0] {6} ¦ ¦--',': , [0/0] {8} - ¦ ¦--expr: [1/0] {9} - ¦ ¦ ¦--expr: [0/0] {11} + ¦ ¦--expr: call2 [1/0] {9} + ¦ ¦ ¦--expr: call2 [0/0] {11} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call2 [0/0] {10} ¦ ¦ ¦--'(': ( [0/0] {12} - ¦ ¦ ¦--expr: [1/0] {14} + ¦ ¦ ¦--expr: 2 [1/0] {14} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {13} ¦ ¦ ¦--',': , [0/1] {15} - ¦ ¦ ¦--expr: [0/0] {17} + ¦ ¦ ¦--expr: 3 [0/0] {17} ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {16} ¦ ¦ ¦--',': , [0/0] {18} - ¦ ¦ ¦--expr: [1/0] {19} - ¦ ¦ ¦ ¦--expr: [0/0] {21} + ¦ ¦ ¦--expr: call3 [1/0] {19} + ¦ ¦ ¦ ¦--expr: call3 [0/0] {21} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call3 [0/0] {20} ¦ ¦ ¦ ¦--'(': ( [0/0] {22} ¦ ¦ ¦ ¦--COMMENT: # zer [0/0] {23} ¦ ¦ ¦ ¦--COMMENT: # one [1/19] {24} ¦ ¦ ¦ ¦--COMMENT: # two [1/6] {25} - ¦ ¦ ¦ ¦--expr: [1/0] {27} + ¦ ¦ ¦ ¦--expr: 1 [1/0] {27} ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {26} ¦ ¦ ¦ ¦--',': , [0/1] {28} - ¦ ¦ ¦ ¦--expr: [0/1] {30} + ¦ ¦ ¦ ¦--expr: 2 [0/1] {30} ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {29} ¦ ¦ ¦ ¦--COMMENT: # two [0/6] {31} ¦ ¦ ¦ ¦--',': , [1/1] {32} - ¦ ¦ ¦ ¦--expr: [0/1] {34} + ¦ ¦ ¦ ¦--expr: 22 [0/1] {34} ¦ ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {33} ¦ ¦ ¦ ¦--COMMENT: # com [0/6] {35} ¦ ¦ ¦ °--')': ) [1/0] {36} ¦ ¦ ¦--',': , [0/4] {37} - ¦ ¦ ¦--expr: [1/2] {39} + ¦ ¦ ¦--expr: 5 [1/2] {39} ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {38} ¦ ¦ °--')': ) [1/0] {40} ¦ ¦--',': , [0/1] {41} ¦ ¦--COMMENT: #' A [0/17] {42} - ¦ ¦--expr: [1/18] {44} + ¦ ¦--expr: 144 [1/18] {44} ¦ ¦ °--NUM_CONST: 144 [0/0] {43} ¦ ¦--COMMENT: # ano [1/0] {45} ¦ °--')': ) [1/0] {46} ¦--COMMENT: # new [2/0] {47} - ¦--expr: [5/0] {48} - ¦ ¦--expr: [0/0] {50} + ¦--expr: a() [5/0] {48} + ¦ ¦--expr: a [0/0] {50} ¦ ¦ °--SYMBOL_FUNCTION_CALL: a [0/0] {49} ¦ ¦--'(': ( [0/0] {51} ¦ °--')': ) [0/0] {52} ¦--COMMENT: # I t [1/0] {53} ¦--COMMENT: # new [1/0] {54} - ¦--expr: [1/2] {55} - ¦ ¦--expr: [0/0] {57} + ¦--expr: b(x, [1/2] {55} + ¦ ¦--expr: b [0/0] {57} ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {56} ¦ ¦--'(': ( [0/0] {58} - ¦ ¦--expr: [0/0] {60} + ¦ ¦--expr: x [0/0] {60} ¦ ¦ °--SYMBOL: x [0/0] {59} ¦ ¦--',': , [0/1] {61} - ¦ ¦--expr: [0/0] {63} + ¦ ¦--expr: y [0/0] {63} ¦ ¦ °--SYMBOL: y [0/0] {62} ¦ ¦--',': , [0/1] {64} - ¦ ¦--expr: [0/0] {66} + ¦ ¦--expr: 7 [0/0] {66} ¦ ¦ °--NUM_CONST: 7 [0/0] {65} ¦ °--')': ) [0/0] {67} ¦--COMMENT: # hid [0/0] {68} diff --git a/tests/testthat/parse_comments/xaringan-in.R b/tests/testthat/parse_comments/xaringan-in.R new file mode 100644 index 000000000..abd3dbcb0 --- /dev/null +++ b/tests/testthat/parse_comments/xaringan-in.R @@ -0,0 +1,12 @@ +foo( + data = mtcars, + x = cyl, + y = wt #<< +) + + +library(ggplot2) + +ggplot(aes(x, y), data) + + geom_point() + #<< + scale_x_continuous() #<< diff --git a/tests/testthat/parse_comments/xaringan-in_tree b/tests/testthat/parse_comments/xaringan-in_tree new file mode 100644 index 000000000..65e144b39 --- /dev/null +++ b/tests/testthat/parse_comments/xaringan-in_tree @@ -0,0 +1,62 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: foo( + [0/0] {1} + ¦ ¦--expr: foo [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: foo [0/0] {2} + ¦ ¦--'(': ( [0/2] {4} + ¦ ¦--SYMBOL_SUB: data [1/1] {5} + ¦ ¦--EQ_SUB: = [0/1] {6} + ¦ ¦--expr: mtcar [0/0] {8} + ¦ ¦ °--SYMBOL: mtcar [0/0] {7} + ¦ ¦--',': , [0/2] {9} + ¦ ¦--SYMBOL_SUB: x [1/1] {10} + ¦ ¦--EQ_SUB: = [0/1] {11} + ¦ ¦--expr: cyl [0/0] {13} + ¦ ¦ °--SYMBOL: cyl [0/0] {12} + ¦ ¦--',': , [0/2] {14} + ¦ ¦--SYMBOL_SUB: y [1/1] {15} + ¦ ¦--EQ_SUB: = [0/1] {16} + ¦ ¦--expr: wt [0/1] {18} + ¦ ¦ °--SYMBOL: wt [0/0] {17} + ¦ ¦--COMMENT: #<< [0/0] {19} + ¦ °--')': ) [1/0] {20} + ¦--expr: libra [3/0] {21} + ¦ ¦--expr: libra [0/0] {23} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: libra [0/0] {22} + ¦ ¦--'(': ( [0/0] {24} + ¦ ¦--expr: ggplo [0/0] {26} + ¦ ¦ °--SYMBOL: ggplo [0/0] {25} + ¦ °--')': ) [0/0] {27} + ¦--expr: ggplo [2/1] {28} + ¦ ¦--expr: ggplo [0/1] {30} + ¦ ¦ ¦--expr: ggplo [0/0] {32} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: ggplo [0/0] {31} + ¦ ¦ ¦--'(': ( [0/0] {33} + ¦ ¦ ¦--expr: aes(x [0/0] {34} + ¦ ¦ ¦ ¦--expr: aes [0/0] {36} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: aes [0/0] {35} + ¦ ¦ ¦ ¦--'(': ( [0/0] {37} + ¦ ¦ ¦ ¦--expr: x [0/0] {39} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {38} + ¦ ¦ ¦ ¦--',': , [0/1] {40} + ¦ ¦ ¦ ¦--expr: y [0/0] {42} + ¦ ¦ ¦ ¦ °--SYMBOL: y [0/0] {41} + ¦ ¦ ¦ °--')': ) [0/0] {43} + ¦ ¦ ¦--',': , [0/1] {44} + ¦ ¦ ¦--expr: data [0/0] {46} + ¦ ¦ ¦ °--SYMBOL: data [0/0] {45} + ¦ ¦ °--')': ) [0/0] {47} + ¦ ¦--'+': + [0/2] {48} + ¦ ¦--expr: geom_ [1/1] {49} + ¦ ¦ ¦--expr: geom_ [0/0] {51} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: geom_ [0/0] {50} + ¦ ¦ ¦--'(': ( [0/0] {52} + ¦ ¦ °--')': ) [0/0] {53} + ¦ ¦--'+': + [0/1] {54} + ¦ ¦--COMMENT: #<< [0/2] {55} + ¦ °--expr: scale [1/0] {56} + ¦ ¦--expr: scale [0/0] {58} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: scale [0/0] {57} + ¦ ¦--'(': ( [0/0] {59} + ¦ °--')': ) [0/0] {60} + °--COMMENT: #<< [0/0] {61} diff --git a/tests/testthat/parse_comments/xaringan-out.R b/tests/testthat/parse_comments/xaringan-out.R new file mode 100644 index 000000000..abd3dbcb0 --- /dev/null +++ b/tests/testthat/parse_comments/xaringan-out.R @@ -0,0 +1,12 @@ +foo( + data = mtcars, + x = cyl, + y = wt #<< +) + + +library(ggplot2) + +ggplot(aes(x, y), data) + + geom_point() + #<< + scale_x_continuous() #<< diff --git a/tests/testthat/parsing/long_strings-in.R b/tests/testthat/parsing/long_strings-in.R index 0d6f6444c..4268e6dab 100644 --- a/tests/testthat/parsing/long_strings-in.R +++ b/tests/testthat/parsing/long_strings-in.R @@ -1,5 +1,6 @@ b <- 3 +g <- "v x ijyuldlf ixi tt ucw nk xejkf omch ujm ymgsgkwickxn tg zknjxmk aqtgqrn bhv se g ec avo xs nyz fhadktjlwuocti au y gxv y xbr x kxn om dkaderkl xqok pp ud lcw pnft ggzz lu v sgs ysv uyyxp gmcvt o rumej rfed j qy ozo @@ -32,3 +33,5 @@ b <- 'test' 'test"ji"' # comment 1 + +call("a_is_long" = 2) diff --git a/tests/testthat/parsing/long_strings-in_tree b/tests/testthat/parsing/long_strings-in_tree index f7e1d7962..a8a743040 100644 --- a/tests/testthat/parsing/long_strings-in_tree +++ b/tests/testthat/parsing/long_strings-in_tree @@ -1,24 +1,39 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/1] {3} + ¦--expr: b <- + [0/0] {1} + ¦ ¦--expr: b [0/1] {3} ¦ ¦ °--SYMBOL: b [0/0] {2} ¦ ¦--LEFT_ASSIGN: <- [0/1] {4} - ¦ °--expr: [1/0] {6} + ¦ °--expr: 3 [1/0] {6} ¦ °--NUM_CONST: 3 [0/0] {5} - ¦--expr: [1/0] {8} - ¦ °--STR_CONST: "v x [0/0] {7} - ¦--expr: [2/0] {10} - ¦ °--STR_CONST: "'tes [0/0] {9} - ¦--expr: [1/0] {11} - ¦ ¦--expr: [0/1] {13} - ¦ ¦ °--NUM_CONST: 99 [0/0] {12} - ¦ ¦--'+': + [0/1] {14} - ¦ °--expr: [0/0] {16} - ¦ °--NUM_CONST: 1 [0/0] {15} - ¦--expr: [1/0] {18} - ¦ °--STR_CONST: 'test [0/0] {17} - ¦--expr: [1/1] {20} - ¦ °--STR_CONST: 'test [0/0] {19} - ¦--COMMENT: # com [0/0] {21} - °--expr: [1/0] {23} - °--NUM_CONST: 1 [0/0] {22} + ¦--expr: g <- + [1/0] {7} + ¦ ¦--expr: g [0/1] {9} + ¦ ¦ °--SYMBOL: g [0/0] {8} + ¦ ¦--LEFT_ASSIGN: <- [0/0] {10} + ¦ °--expr: "v x [1/0] {12} + ¦ °--STR_CONST: "v x [0/0] {11} + ¦--expr: "'tes [2/0] {14} + ¦ °--STR_CONST: "'tes [0/0] {13} + ¦--expr: 99 + [1/0] {15} + ¦ ¦--expr: 99 [0/1] {17} + ¦ ¦ °--NUM_CONST: 99 [0/0] {16} + ¦ ¦--'+': + [0/1] {18} + ¦ °--expr: 1 [0/0] {20} + ¦ °--NUM_CONST: 1 [0/0] {19} + ¦--expr: 'test [1/0] {22} + ¦ °--STR_CONST: 'test [0/0] {21} + ¦--expr: 'test [1/1] {24} + ¦ °--STR_CONST: 'test [0/0] {23} + ¦--COMMENT: # com [0/0] {25} + ¦--expr: 1 [1/0] {27} + ¦ °--NUM_CONST: 1 [0/0] {26} + °--expr: call( [2/0] {28} + ¦--expr: call [0/0] {30} + ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {29} + ¦--'(': ( [0/0] {31} + ¦--STR_CONST: "a_is [0/1] {32} + ¦--EQ_SUB: = [0/1] {33} + ¦--expr: 2 [0/0] {35} + ¦ °--NUM_CONST: 2 [0/0] {34} + °--')': ) [0/0] {36} diff --git a/tests/testthat/parsing/long_strings-out.R b/tests/testthat/parsing/long_strings-out.R index 59d7b5054..9af67f0ee 100644 --- a/tests/testthat/parsing/long_strings-out.R +++ b/tests/testthat/parsing/long_strings-out.R @@ -1,6 +1,7 @@ b <- 3 -"v x ijyuldlf ixi tt ucw nk xejkf omch ujm ymgsgkwickxn tg zknjxmk aqtgqrn bhv +g <- + "v x ijyuldlf ixi tt ucw nk xejkf omch ujm ymgsgkwickxn tg zknjxmk aqtgqrn bhv se g ec avo xs nyz fhadktjlwuocti au y gxv y xbr x kxn om dkaderkl xqok pp ud lcw pnft ggzz lu v sgs ysv uyyxp gmcvt o rumej rfed j qy ozo oq wz na oii m rg imfktlkwisc wvc y ab ms pjugxh ieco xjdfiysqsnoizgzz @@ -32,3 +33,5 @@ b <- "test" 'test"ji"' # comment 1 + +call("a_is_long" = 2) diff --git a/tests/testthat/parsing/repeated_parsing-in_tree b/tests/testthat/parsing/repeated_parsing-in_tree index d753ee067..c43c3b88e 100644 --- a/tests/testthat/parsing/repeated_parsing-in_tree +++ b/tests/testthat/parsing/repeated_parsing-in_tree @@ -2,11 +2,11 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦--COMMENT: # [0/0] {1} ¦--COMMENT: # [1/0] {2} ¦--COMMENT: # [1/0] {3} - ¦--expr: [1/0] {4} - ¦ ¦--expr: [0/1] {6} + ¦--expr: r <- [1/0] {4} + ¦ ¦--expr: r [0/1] {6} ¦ ¦ °--SYMBOL: r [0/0] {5} ¦ ¦--LEFT_ASSIGN: <- [0/1] {7} - ¦ °--expr: [0/0] {8} + ¦ °--expr: funct [0/0] {8} ¦ ¦--FUNCTION: funct [0/0] {9} ¦ ¦--'(': ( [0/0] {10} ¦ ¦--SYMBOL_FORMALS: y [0/0] {11} @@ -15,132 +15,136 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦ ¦--',': , [0/1] {14} ¦ ¦--SYMBOL_FORMALS: g [0/1] {15} ¦ ¦--EQ_FORMALS: = [0/1] {16} - ¦ ¦--expr: [0/0] {18} + ¦ ¦--expr: 10 [0/0] {18} ¦ ¦ °--NUM_CONST: 10 [0/0] {17} ¦ ¦--')': ) [0/1] {19} - ¦ °--expr: [0/0] {20} + ¦ °--expr: { + b [0/0] {20} ¦ ¦--'{': { [0/2] {21} - ¦ ¦--expr: [1/2] {22} - ¦ ¦ ¦--expr: [0/0] {24} + ¦ ¦--expr: b("", [1/2] {22} + ¦ ¦ ¦--expr: b [0/0] {24} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {23} ¦ ¦ ¦--'(': ( [0/0] {25} - ¦ ¦ ¦--expr: [0/0] {27} + ¦ ¦ ¦--expr: "" [0/0] {27} ¦ ¦ ¦ °--STR_CONST: "" [0/0] {26} ¦ ¦ ¦--',': , [0/1] {28} - ¦ ¦ ¦--expr: [0/0] {30} + ¦ ¦ ¦--expr: "" [0/0] {30} ¦ ¦ ¦ °--STR_CONST: "" [0/0] {29} ¦ ¦ °--')': ) [0/0] {31} ¦ ¦--COMMENT: # [2/2] {32} - ¦ ¦--expr: [1/2] {33} - ¦ ¦ ¦--expr: [0/1] {35} + ¦ ¦--expr: q <- [1/2] {33} + ¦ ¦ ¦--expr: q [0/1] {35} ¦ ¦ ¦ °--SYMBOL: q [0/0] {34} ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {36} - ¦ ¦ °--expr: [0/0] {37} - ¦ ¦ ¦--expr: [0/0] {39} + ¦ ¦ °--expr: g(d(i [0/0] {37} + ¦ ¦ ¦--expr: g [0/0] {39} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {38} ¦ ¦ ¦--'(': ( [0/0] {40} - ¦ ¦ ¦--expr: [0/0] {41} - ¦ ¦ ¦ ¦--expr: [0/0] {43} + ¦ ¦ ¦--expr: d(i) [0/0] {41} + ¦ ¦ ¦ ¦--expr: d [0/0] {43} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: d [0/0] {42} ¦ ¦ ¦ ¦--'(': ( [0/0] {44} - ¦ ¦ ¦ ¦--expr: [0/0] {46} + ¦ ¦ ¦ ¦--expr: i [0/0] {46} ¦ ¦ ¦ ¦ °--SYMBOL: i [0/0] {45} ¦ ¦ ¦ °--')': ) [0/0] {47} ¦ ¦ ¦--',': , [0/1] {48} - ¦ ¦ ¦--expr: [0/0] {49} + ¦ ¦ ¦--expr: funct [0/0] {49} ¦ ¦ ¦ ¦--FUNCTION: funct [0/0] {50} ¦ ¦ ¦ ¦--'(': ( [0/0] {51} ¦ ¦ ¦ ¦--SYMBOL_FORMALS: i [0/0] {52} ¦ ¦ ¦ ¦--')': ) [0/1] {53} - ¦ ¦ ¦ °--expr: [0/0] {54} + ¦ ¦ ¦ °--expr: { + [0/0] {54} ¦ ¦ ¦ ¦--'{': { [0/4] {55} - ¦ ¦ ¦ ¦--expr: [1/2] {56} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {58} + ¦ ¦ ¦ ¦--expr: d(op( [1/2] {56} + ¦ ¦ ¦ ¦ ¦--expr: d [0/0] {58} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: d [0/0] {57} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {59} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {60} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {62} + ¦ ¦ ¦ ¦ ¦--expr: op(t[ [0/0] {60} + ¦ ¦ ¦ ¦ ¦ ¦--expr: op [0/0] {62} ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: op [0/0] {61} ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {63} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {64} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {66} + ¦ ¦ ¦ ¦ ¦ ¦--expr: t[[p] [0/0] {64} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: t [0/0] {66} ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL: t [0/0] {65} ¦ ¦ ¦ ¦ ¦ ¦ ¦--LBB: [[ [0/0] {67} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {69} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: p [0/0] {69} ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL: p [0/0] {68} ¦ ¦ ¦ ¦ ¦ ¦ ¦--']': ] [0/0] {70} ¦ ¦ ¦ ¦ ¦ ¦ °--']': ] [0/0] {71} ¦ ¦ ¦ ¦ ¦ °--')': ) [0/0] {72} ¦ ¦ ¦ ¦ ¦--',': , [0/1] {73} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {74} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {76} + ¦ ¦ ¦ ¦ ¦--expr: n(i = [0/0] {74} + ¦ ¦ ¦ ¦ ¦ ¦--expr: n [0/0] {76} ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: n [0/0] {75} ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {77} ¦ ¦ ¦ ¦ ¦ ¦--SYMBOL_SUB: i [0/1] {78} ¦ ¦ ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {79} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {81} + ¦ ¦ ¦ ¦ ¦ ¦--expr: i [0/0] {81} ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL: i [0/0] {80} ¦ ¦ ¦ ¦ ¦ °--')': ) [0/0] {82} ¦ ¦ ¦ ¦ °--')': ) [0/0] {83} ¦ ¦ ¦ °--'}': } [1/0] {84} ¦ ¦ °--')': ) [0/0] {85} - ¦ ¦--expr: [1/2] {86} - ¦ ¦ ¦--expr: [0/1] {87} - ¦ ¦ ¦ ¦--expr: [0/0] {89} + ¦ ¦--expr: f(cal [1/2] {86} + ¦ ¦ ¦--expr: f(cal [0/1] {87} + ¦ ¦ ¦ ¦--expr: f [0/0] {89} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {88} ¦ ¦ ¦ ¦--'(': ( [0/0] {90} - ¦ ¦ ¦ ¦--expr: [0/0] {92} + ¦ ¦ ¦ ¦--expr: calls [0/0] {92} ¦ ¦ ¦ ¦ °--SYMBOL: calls [0/0] {91} ¦ ¦ ¦ °--')': ) [0/0] {93} ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {94} - ¦ ¦ °--expr: [0/0] {95} - ¦ ¦ ¦--expr: [0/0] {97} + ¦ ¦ °--expr: f(g) [0/0] {95} + ¦ ¦ ¦--expr: f [0/0] {97} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {96} ¦ ¦ ¦--'(': ( [0/0] {98} - ¦ ¦ ¦--expr: [0/0] {100} + ¦ ¦ ¦--expr: g [0/0] {100} ¦ ¦ ¦ °--SYMBOL: g [0/0] {99} ¦ ¦ °--')': ) [0/0] {101} - ¦ ¦--expr: [2/2] {102} - ¦ ¦ ¦--expr: [0/1] {104} + ¦ ¦--expr: mb <- [2/2] {102} + ¦ ¦ ¦--expr: mb [0/1] {104} ¦ ¦ ¦ °--SYMBOL: mb [0/0] {103} ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {105} - ¦ ¦ °--expr: [0/0] {106} - ¦ ¦ ¦--expr: [0/0] {108} + ¦ ¦ °--expr: j(c( + [0/0] {106} + ¦ ¦ ¦--expr: j [0/0] {108} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: j [0/0] {107} ¦ ¦ ¦--'(': ( [0/0] {109} - ¦ ¦ ¦--expr: [0/0] {110} - ¦ ¦ ¦ ¦--expr: [0/0] {112} + ¦ ¦ ¦--expr: c( + [0/0] {110} + ¦ ¦ ¦ ¦--expr: c [0/0] {112} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {111} ¦ ¦ ¦ ¦--'(': ( [0/4] {113} - ¦ ¦ ¦ ¦--expr: [1/0] {114} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {116} + ¦ ¦ ¦ ¦--expr: q(a:: [1/0] {114} + ¦ ¦ ¦ ¦ ¦--expr: q [0/0] {116} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: q [0/0] {115} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {117} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {118} + ¦ ¦ ¦ ¦ ¦--expr: a::b [0/0] {118} ¦ ¦ ¦ ¦ ¦ ¦--SYMBOL_PACKAGE: a [0/0] {119} ¦ ¦ ¦ ¦ ¦ ¦--NS_GET: :: [0/0] {120} ¦ ¦ ¦ ¦ ¦ °--SYMBOL: b [0/0] {121} ¦ ¦ ¦ ¦ °--')': ) [0/0] {122} ¦ ¦ ¦ ¦--',': , [0/1] {123} - ¦ ¦ ¦ ¦--expr: [0/0] {125} + ¦ ¦ ¦ ¦--expr: r [0/0] {125} ¦ ¦ ¦ ¦ °--SYMBOL: r [0/0] {124} ¦ ¦ ¦ ¦--',': , [0/4] {126} - ¦ ¦ ¦ ¦--expr: [1/2] {127} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {129} + ¦ ¦ ¦ ¦--expr: y(u = [1/2] {127} + ¦ ¦ ¦ ¦ ¦--expr: y [0/0] {129} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: y [0/0] {128} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {130} ¦ ¦ ¦ ¦ ¦--SYMBOL_SUB: u [0/1] {131} ¦ ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {132} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {134} + ¦ ¦ ¦ ¦ ¦--expr: 1 [0/0] {134} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {133} ¦ ¦ ¦ ¦ °--')': ) [0/0] {135} ¦ ¦ ¦ °--')': ) [1/0] {136} ¦ ¦ °--')': ) [0/0] {137} - ¦ ¦--expr: [1/0] {138} - ¦ ¦ ¦--expr: [0/0] {140} + ¦ ¦--expr: k(b) [1/0] {138} + ¦ ¦ ¦--expr: k [0/0] {140} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: k [0/0] {139} ¦ ¦ ¦--'(': ( [0/0] {141} - ¦ ¦ ¦--expr: [0/0] {143} + ¦ ¦ ¦--expr: b [0/0] {143} ¦ ¦ ¦ °--SYMBOL: b [0/0] {142} ¦ ¦ °--')': ) [0/0] {144} ¦ °--'}': } [1/0] {145} diff --git a/tests/testthat/public-api/dry/styled.R b/tests/testthat/public-api/dry/styled.R new file mode 100644 index 000000000..8d2f0971e --- /dev/null +++ b/tests/testthat/public-api/dry/styled.R @@ -0,0 +1 @@ +1 + 1 diff --git a/tests/testthat/public-api/dry/styled.Rmd b/tests/testthat/public-api/dry/styled.Rmd new file mode 100644 index 000000000..d03d5cc42 --- /dev/null +++ b/tests/testthat/public-api/dry/styled.Rmd @@ -0,0 +1,28 @@ +--- +title: "unstyled.Rmd" +output: html_document +--- + +```{r setup, include=FALSE} +knitr::opts_chunk$set(echo = TRUE) +``` + +## R Markdown + +This is an R Markdown document. Markdown is a simple formatting syntax for authoring HTML, PDF, and MS Word documents. For more details on using R Markdown see . + +When you click the **Knit** button a document will be generated that includes both content as well as the output of any embedded R code chunks within the document. You can embed an R code chunk like this: + +```{r cars} +summary(cars) +``` + +## Including Plots + +You can also embed plots, for example: + +```{r pressure, echo=FALSE} +plot(pressure) +``` + +Note that the `echo = FALSE` parameter was added to the code chunk to prevent printing of the R code that generated the plot. diff --git a/tests/testthat/public-api/dry/styled.Rnw b/tests/testthat/public-api/dry/styled.Rnw new file mode 100644 index 000000000..67fcf219a --- /dev/null +++ b/tests/testthat/public-api/dry/styled.Rnw @@ -0,0 +1,22 @@ +\documentclass{article} + +\begin{document} + +This is all you need to do if you want to use the xyzpackage package: + +<>= +library(xyzpackage) +@ + +The quick brown fox jumps over the lazy dog the quick brown fox jumps over the +lazy dog the quick brown fox jumps over the lazy dog. + +<>= +1 + 1 +rnorm(30) +@ + +The quick brown fox jumps over the lazy dog the quick brown fox jumps over the +lazy dog the quick brown fox jumps over the lazy dog. + +\end{document} diff --git a/tests/testthat/public-api/dry/unstyled.R b/tests/testthat/public-api/dry/unstyled.R new file mode 100644 index 000000000..c040fa67d --- /dev/null +++ b/tests/testthat/public-api/dry/unstyled.R @@ -0,0 +1 @@ +1+1 diff --git a/tests/testthat/public-api/dry/unstyled.Rmd b/tests/testthat/public-api/dry/unstyled.Rmd new file mode 100644 index 000000000..5ff9f4c19 --- /dev/null +++ b/tests/testthat/public-api/dry/unstyled.Rmd @@ -0,0 +1,28 @@ +--- +title: "unstyled.Rmd" +output: html_document +--- + +```{r setup, include=FALSE} +knitr::opts_chunk$set(echo = TRUE) +``` + +## R Markdown + +This is an R Markdown document. Markdown is a simple formatting syntax for authoring HTML, PDF, and MS Word documents. For more details on using R Markdown see . + +When you click the **Knit** button a document will be generated that includes both content as well as the output of any embedded R code chunks within the document. You can embed an R code chunk like this: + +```{r cars} +summary(cars ) +``` + +## Including Plots + +You can also embed plots, for example: + +```{r pressure, echo=FALSE} +plot(pressure) +``` + +Note that the `echo = FALSE` parameter was added to the code chunk to prevent printing of the R code that generated the plot. diff --git a/tests/testthat/public-api/dry/unstyled.Rnw b/tests/testthat/public-api/dry/unstyled.Rnw new file mode 100644 index 000000000..240c7739a --- /dev/null +++ b/tests/testthat/public-api/dry/unstyled.Rnw @@ -0,0 +1,23 @@ +\documentclass{article} + +\begin{document} + +This is all you need to do if you want to use the xyzpackage package: + +<>= +library( + xyzpackage) +@ + +The quick brown fox jumps over the lazy dog the quick brown fox jumps over the +lazy dog the quick brown fox jumps over the lazy dog. + +<>= +1 +1 +rnorm( 30) +@ + +The quick brown fox jumps over the lazy dog the quick brown fox jumps over the +lazy dog the quick brown fox jumps over the lazy dog. + +\end{document} diff --git a/tests/testthat/public-api/renvpkg/DESCRIPTION b/tests/testthat/public-api/renvpkg/DESCRIPTION new file mode 100644 index 000000000..04bff12f8 --- /dev/null +++ b/tests/testthat/public-api/renvpkg/DESCRIPTION @@ -0,0 +1,10 @@ +Package: xyzpackage +Title: What the Package Does (one line, title case) +Version: 0.0.0.9000 +Authors@R: person("First", "Last", email = "first.last@example.com", role = c("aut", "cre")) +Description: What the package does (one paragraph). +Depends: R (>= 3.3.3) +License: What license is it under? +Encoding: UTF-8 +LazyData: true +Suggests: testthat diff --git a/tests/testthat/public-api/renvpkg/NAMESPACE b/tests/testthat/public-api/renvpkg/NAMESPACE new file mode 100644 index 000000000..884a6312a --- /dev/null +++ b/tests/testthat/public-api/renvpkg/NAMESPACE @@ -0,0 +1,2 @@ +# Generated by roxygen2: fake comment so roxygen2 overwrites silently. +exportPattern("^[^\\.]") diff --git a/tests/testthat/public-api/renvpkg/renv/hello-world.R b/tests/testthat/public-api/renvpkg/renv/hello-world.R new file mode 100644 index 000000000..d2cb60dff --- /dev/null +++ b/tests/testthat/public-api/renvpkg/renv/hello-world.R @@ -0,0 +1,3 @@ +hello_world <- function() { + print("hello, world") +} diff --git a/tests/testthat/public-api/renvpkg/tests/testthat.R b/tests/testthat/public-api/renvpkg/tests/testthat.R new file mode 100644 index 000000000..89b573e70 --- /dev/null +++ b/tests/testthat/public-api/renvpkg/tests/testthat.R @@ -0,0 +1,4 @@ +library(testthat) +library(xyzpackage) + +test_check("xyzpackage") diff --git a/tests/testthat/public-api/renvpkg/tests/testthat/test-package-xyz.R b/tests/testthat/public-api/renvpkg/tests/testthat/test-package-xyz.R new file mode 100644 index 000000000..92f42950b --- /dev/null +++ b/tests/testthat/public-api/renvpkg/tests/testthat/test-package-xyz.R @@ -0,0 +1,3 @@ +test_that("hi there", { + I(am(a(package(x)))) +}) diff --git a/tests/testthat/public-api/renvpkg/xyzpackage.Rproj b/tests/testthat/public-api/renvpkg/xyzpackage.Rproj new file mode 100644 index 000000000..d848a9ff5 --- /dev/null +++ b/tests/testthat/public-api/renvpkg/xyzpackage.Rproj @@ -0,0 +1,16 @@ +Version: 1.0 + +RestoreWorkspace: No +SaveWorkspace: No +AlwaysSaveHistory: Default + +EnableCodeIndexing: Yes +Encoding: UTF-8 + +AutoAppendNewline: Yes +StripTrailingWhitespace: Yes + +BuildType: Package +PackageUseDevtools: Yes +PackageInstallArgs: --no-multiarch --with-keep.source +PackageRoxygenize: rd,collate,namespace diff --git a/tests/testthat/public-api/xyz-r-and-rmd-dir/random-rmd-script.Rmarkdown b/tests/testthat/public-api/xyz-r-and-rmd-dir/random-rmd-script.Rmarkdown new file mode 100644 index 000000000..8a2767ca2 --- /dev/null +++ b/tests/testthat/public-api/xyz-r-and-rmd-dir/random-rmd-script.Rmarkdown @@ -0,0 +1,30 @@ +--- +title: "random-rmd-script" +author: "Lorenz Walthert" +date: "11/25/2017" +output: html_document +--- + +```{r setup, include=FALSE} +1 + 1 +``` + +## R Markdown + +This is an R Markdown document. Markdown is a simple formatting syntax for authoring HTML, PDF, and MS Word documents. For more details on using R Markdown see . + +When you click the **Knit** button a document will be generated that includes both content as well as the output of any embedded R code chunks within the document. You can embed an R code chunk like this: + +```{r cars} +summary(cars) +``` + +## Including Plots + +You can also embed plots, for example: + +```{r pressure, echo=FALSE} +plot(pressure) +``` + +Note that the `echo = FALSE` parameter was added to the code chunk to prevent printing of the R code that generated the plot. diff --git a/tests/testthat/public-api/xyzdir-dirty/clean-reference-with-scope-tokens b/tests/testthat/public-api/xyzdir-dirty/clean-reference-with-scope-tokens deleted file mode 100644 index b39f3ed2e..000000000 Binary files a/tests/testthat/public-api/xyzdir-dirty/clean-reference-with-scope-tokens and /dev/null differ diff --git a/tests/testthat/public-api/xyzdir-dirty/dirty-reference-with-scope-spaces b/tests/testthat/public-api/xyzdir-dirty/dirty-reference-with-scope-spaces deleted file mode 100644 index 1d7c449b2..000000000 Binary files a/tests/testthat/public-api/xyzdir-dirty/dirty-reference-with-scope-spaces and /dev/null differ diff --git a/tests/testthat/public-api/xyzdir-dirty/dirty-reference-with-scope-tokens b/tests/testthat/public-api/xyzdir-dirty/dirty-reference-with-scope-tokens deleted file mode 100644 index 70ba2bba6..000000000 Binary files a/tests/testthat/public-api/xyzdir-dirty/dirty-reference-with-scope-tokens and /dev/null differ diff --git a/tests/testthat/public-api/xyzfile-rnw/random.Rnw b/tests/testthat/public-api/xyzfile-rnw/random.Rnw new file mode 100644 index 000000000..e964baaf9 --- /dev/null +++ b/tests/testthat/public-api/xyzfile-rnw/random.Rnw @@ -0,0 +1,28 @@ +\documentclass{article} + +\begin{document} + +Some text +<<>>= +# Some R code +f <- function(x) { + x +} +@ + +More text + +<<>>= +# More R code +g <- function(y) { + y +} +@ + +Final text +<<>>= +1 + 2 +@ + + +\end{document} diff --git a/tests/testthat/public-api/xyzfile-rnw/random2.Rnw b/tests/testthat/public-api/xyzfile-rnw/random2.Rnw new file mode 100644 index 000000000..d458220d2 --- /dev/null +++ b/tests/testthat/public-api/xyzfile-rnw/random2.Rnw @@ -0,0 +1,25 @@ +\documentclass{article} + +\begin{document} + +<<>>= +# Start with chunk +@ + +Some text before empty chunk +<<>>= +@ + +Final text before longer code chunk +This text chunk has multiple lines +<<>>= +# random +this(is_a_call(x)) +if (x) { + r() + a <- 3 + bcds <- 5 +} +@ + +\end{document} diff --git a/tests/testthat/public-api/xyzfile-rnw/random3.Rnw b/tests/testthat/public-api/xyzfile-rnw/random3.Rnw new file mode 100644 index 000000000..34044703e --- /dev/null +++ b/tests/testthat/public-api/xyzfile-rnw/random3.Rnw @@ -0,0 +1,25 @@ +\documentclass{article} + +\begin{document} + +Some text +<<>>= +# Some R code +f <- function(x) { + x +} +@ +More text before malformed chunk +# More R code +g <- function(y) { + y +} +@ + +Final text +<<>>= +1 + 2 +@ + + +\end{document} diff --git a/tests/testthat/public-api/xyzfile-rnw/random4.Rnw b/tests/testthat/public-api/xyzfile-rnw/random4.Rnw new file mode 100644 index 000000000..691bda529 --- /dev/null +++ b/tests/testthat/public-api/xyzfile-rnw/random4.Rnw @@ -0,0 +1,26 @@ +\documentclass{article} + +\begin{document} + +Some text +<<>>= +# Some R code +f <- function(x) { + x +} +@ + +More text +<>= +# More R code which is invalid +g <- function(y) { + y +@ + +Final text +<<>>= +1 + 2 +@ + + +\end{document} diff --git a/tests/testthat/public-api/xyzfile/subfolder/random-script.R b/tests/testthat/public-api/xyzfile/subfolder/random-script.R new file mode 100644 index 000000000..62ba053b8 --- /dev/null +++ b/tests/testthat/public-api/xyzfile/subfolder/random-script.R @@ -0,0 +1,7 @@ +# random +this(is_a_call(x)) +if (x) { + r() + a <- 3 + bcds <- 5 +} diff --git a/tests/testthat/public-api/xyzfile_qmd/new.qmd b/tests/testthat/public-api/xyzfile_qmd/new.qmd new file mode 100644 index 000000000..eda390a3e --- /dev/null +++ b/tests/testthat/public-api/xyzfile_qmd/new.qmd @@ -0,0 +1,19 @@ +--- +output: + github_document: + html_preview: true +--- + + + +Some text +```{r} +# Some R code +f <- function(x) { + x +} +``` +Final text +```{r} +1 + 2 +``` diff --git a/tests/testthat/public-api/xyzfile_rmd/random4.Rmd b/tests/testthat/public-api/xyzfile_rmd/invalid4.Rmd similarity index 100% rename from tests/testthat/public-api/xyzfile_rmd/random4.Rmd rename to tests/testthat/public-api/xyzfile_rmd/invalid4.Rmd diff --git a/tests/testthat/public-api/xyzfile_rmd/invalid7.Rmd b/tests/testthat/public-api/xyzfile_rmd/invalid7.Rmd new file mode 100644 index 000000000..87c1d73bf --- /dev/null +++ b/tests/testthat/public-api/xyzfile_rmd/invalid7.Rmd @@ -0,0 +1,23 @@ +Some text +```{r} +# Some R code +f <- function(x) { + x +} +``` +More text before malformed chunk +# More R code +g <- function(y) { + y +} +``` +Final text +```{r} + +``` + +Final text +```{r} +```{r} +1 + 2 +``` diff --git a/tests/testthat/public-api/xyzfile_rmd/random.Rmarkdown b/tests/testthat/public-api/xyzfile_rmd/random.Rmarkdown new file mode 100644 index 000000000..c5528fc5e --- /dev/null +++ b/tests/testthat/public-api/xyzfile_rmd/random.Rmarkdown @@ -0,0 +1,18 @@ +Some text +```{r} +# Some R code +f <- function(x) { + x +} +``` +More text +```{r} +# More R code +g <- function(y) { + y +} +``` +Final text +```{r} +1 + 2 +``` diff --git a/tests/testthat/public-api/xyzfile_rmd/random2.Rmarkdown b/tests/testthat/public-api/xyzfile_rmd/random2.Rmarkdown new file mode 100644 index 000000000..d0a17fbdf --- /dev/null +++ b/tests/testthat/public-api/xyzfile_rmd/random2.Rmarkdown @@ -0,0 +1,17 @@ +```{r} +# Start with chunk +``` +Some text before empty chunk +```{r} +``` +Final text before longer code chunk +This text chunk has multiple lines +```{R} +# random +this(is_a_call(x)) +if (x) { + r() + a <- 3 + bcds <- 5 +} +``` diff --git a/tests/testthat/public-api/xyzfile_rmd/random2.Rmd b/tests/testthat/public-api/xyzfile_rmd/random2.Rmd index f372022ce..d0a17fbdf 100644 --- a/tests/testthat/public-api/xyzfile_rmd/random2.Rmd +++ b/tests/testthat/public-api/xyzfile_rmd/random2.Rmd @@ -3,11 +3,10 @@ ``` Some text before empty chunk ```{r} - ``` Final text before longer code chunk This text chunk has multiple lines -```{r} +```{R} # random this(is_a_call(x)) if (x) { diff --git a/tests/testthat/public-api/xyzpackage-qmd/DESCRIPTION b/tests/testthat/public-api/xyzpackage-qmd/DESCRIPTION new file mode 100644 index 000000000..69b7e9773 --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-qmd/DESCRIPTION @@ -0,0 +1,15 @@ +Package: xyzpackage +Title: What the Package Does (one line, title case) +Version: 0.0.0.9000 +Authors@R: person("First", "Last", email = "first.last@example.com", role = c("aut", "cre")) +Description: What the package does (one paragraph). +Depends: R (>= 3.3.2) +License: What license is it under? +Encoding: UTF-8 +LazyData: true +Suggests: testthat +LinkingTo: + Rcpp +Imports: + Rcpp +RoxygenNote: 6.0.1.9000 diff --git a/tests/testthat/public-api/xyzpackage-qmd/NAMESPACE b/tests/testthat/public-api/xyzpackage-qmd/NAMESPACE new file mode 100644 index 000000000..e651b9448 --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-qmd/NAMESPACE @@ -0,0 +1 @@ +# Generated by roxygen2: do not edit by hand diff --git a/tests/testthat/public-api/xyzpackage-qmd/R/RcppExports.R b/tests/testthat/public-api/xyzpackage-qmd/R/RcppExports.R new file mode 100644 index 000000000..7773bec0d --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-qmd/R/RcppExports.R @@ -0,0 +1,6 @@ +# Generated by using Rcpp::compileAttributes() -> do not edit by hand +# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 + +timesTwo <- function(x) { + .Call("_xyzpackage_timesTwo", PACKAGE = "xyzpackage", x) +} diff --git a/tests/testthat/public-api/xyzpackage-qmd/R/hello-world.R b/tests/testthat/public-api/xyzpackage-qmd/R/hello-world.R new file mode 100644 index 000000000..d2cb60dff --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-qmd/R/hello-world.R @@ -0,0 +1,3 @@ +hello_world <- function() { + print("hello, world") +} diff --git a/tests/testthat/public-api/xyzpackage-qmd/README.Rmd b/tests/testthat/public-api/xyzpackage-qmd/README.Rmd new file mode 100644 index 000000000..1183cea92 --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-qmd/README.Rmd @@ -0,0 +1,33 @@ +--- +output: github_document +--- + + + +```{r setup, include = FALSE} +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>", + fig.path = "man/figures/README-" +) +``` +# styler + +The goal of styler is to ... + +## Installation + +You can install styler from github with: + +```{r gh-installation, eval = FALSE} +# install.packages("devtools") +devtools::install_github("jonmcalder/styler") +``` + +## Example + +This is a basic example which shows you how to solve a common problem: + +```{r example} +## basic example code +``` diff --git a/tests/testthat/public-api/xyzpackage-qmd/new.qmd b/tests/testthat/public-api/xyzpackage-qmd/new.qmd new file mode 100644 index 000000000..eda390a3e --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-qmd/new.qmd @@ -0,0 +1,19 @@ +--- +output: + github_document: + html_preview: true +--- + + + +Some text +```{r} +# Some R code +f <- function(x) { + x +} +``` +Final text +```{r} +1 + 2 +``` diff --git a/tests/testthat/public-api/xyzpackage-qmd/src/.gitignore b/tests/testthat/public-api/xyzpackage-qmd/src/.gitignore new file mode 100644 index 000000000..2f843a5dc --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-qmd/src/.gitignore @@ -0,0 +1,3 @@ +*.dll +*.o +*.so diff --git a/tests/testthat/public-api/xyzpackage-qmd/src/RcppExports.cpp b/tests/testthat/public-api/xyzpackage-qmd/src/RcppExports.cpp new file mode 100644 index 000000000..fcecddd2a --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-qmd/src/RcppExports.cpp @@ -0,0 +1,28 @@ +// Generated by using Rcpp::compileAttributes() -> do not edit by hand +// Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 + +#include + +using namespace Rcpp; + +// timesTwo +NumericVector timesTwo(NumericVector x); +RcppExport SEXP _xyzpackage_timesTwo(SEXP xSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::RNGScope rcpp_rngScope_gen; + Rcpp::traits::input_parameter< NumericVector >::type x(xSEXP); + rcpp_result_gen = Rcpp::wrap(timesTwo(x)); + return rcpp_result_gen; +END_RCPP +} + +static const R_CallMethodDef CallEntries[] = { + {"_xyzpackage_timesTwo", (DL_FUNC) &_xyzpackage_timesTwo, 1}, + {NULL, NULL, 0} +}; + +RcppExport void R_init_xyzpackage(DllInfo *dll) { + R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); + R_useDynamicSymbols(dll, FALSE); +} diff --git a/tests/testthat/public-api/xyzpackage-qmd/src/timesTwo.cpp b/tests/testthat/public-api/xyzpackage-qmd/src/timesTwo.cpp new file mode 100644 index 000000000..b650533e0 --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-qmd/src/timesTwo.cpp @@ -0,0 +1,27 @@ +#include +using namespace Rcpp; + +// This is a simple example of exporting a C++ function to R. You can +// source this function into an R session using the Rcpp::sourceCpp +// function (or via the Source button on the editor toolbar). Learn +// more about Rcpp at: +// +// http://www.rcpp.org/ +// http://adv-r.had.co.nz/Rcpp.html +// http://gallery.rcpp.org/ +// + +// [[Rcpp::export]] +NumericVector timesTwo(NumericVector x) { + return x * 2; +} + + +// You can include R code blocks in C++ files processed with sourceCpp +// (useful for testing and development). The R code will be automatically +// run after the compilation. +// + +/*** R +timesTwo(42) +*/ diff --git a/tests/testthat/public-api/xyzpackage-qmd/tests/testthat.R b/tests/testthat/public-api/xyzpackage-qmd/tests/testthat.R new file mode 100644 index 000000000..89b573e70 --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-qmd/tests/testthat.R @@ -0,0 +1,4 @@ +library(testthat) +library(xyzpackage) + +test_check("xyzpackage") diff --git a/tests/testthat/public-api/xyzpackage-qmd/tests/testthat/test-package-xyz.R b/tests/testthat/public-api/xyzpackage-qmd/tests/testthat/test-package-xyz.R new file mode 100644 index 000000000..92f42950b --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-qmd/tests/testthat/test-package-xyz.R @@ -0,0 +1,3 @@ +test_that("hi there", { + I(am(a(package(x)))) +}) diff --git a/tests/testthat/public-api/xyzpackage-qmd/vignettes/random.Rmarkdown b/tests/testthat/public-api/xyzpackage-qmd/vignettes/random.Rmarkdown new file mode 100644 index 000000000..aace6af45 --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-qmd/vignettes/random.Rmarkdown @@ -0,0 +1,58 @@ +--- +title: "Vignette Title" +author: "Vignette Author" +date: "`r Sys.Date()`" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Vignette Title} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +Vignettes are long form documentation commonly included in packages. Because they are part of the distribution of the package, they need to be as compact as possible. The `html_vignette` output type provides a custom style sheet (and tweaks some options) to ensure that the resulting html is as small as possible. The `html_vignette` format: + +- Never uses retina figures +- Has a smaller default figure size +- Uses a custom CSS stylesheet instead of the default Twitter Bootstrap style + +## Vignette Info + +Note the various macros within the `vignette` section of the metadata block above. These are required in order to instruct R how to build the vignette. Note that you should change the `title` field and the `\VignetteIndexEntry` to match the title of your vignette. + +## Styles + +The `html_vignette` template includes a basic CSS theme. To override this theme you can specify your own CSS in the document metadata as follows: + + output: + rmarkdown::html_vignette: + css: mystyles.css + +## Figures + +The figure sizes have been customised so that you can easily put two images side-by-side. + +```{r, fig.show='hold'} +plot(1:10) +plot(10:1) +``` + +You can enable figure captions by `fig_caption: yes` in YAML: + + output: + rmarkdown::html_vignette: + fig_caption: yes + +Then you can use the chunk option `fig.cap = "Your figure caption."` in **knitr**. + +## More Examples + +You can write math expressions, e.g. $Y = X\beta + \epsilon$, footnotes^[A footnote here.], and tables, e.g. using `knitr::kable()`. + +```{r, echo=FALSE, results='asis'} +knitr::kable(head(mtcars, 10)) +``` + +Also a quote using `>`: + +> "He who gives up [code] safety for [code] speed deserves neither." +([via](https://twitter.com/hadleywickham/status/504368538874703872)) diff --git a/tests/testthat/public-api/xyzpackage-qmd/vignettes/random.Rmd b/tests/testthat/public-api/xyzpackage-qmd/vignettes/random.Rmd new file mode 100644 index 000000000..aace6af45 --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-qmd/vignettes/random.Rmd @@ -0,0 +1,58 @@ +--- +title: "Vignette Title" +author: "Vignette Author" +date: "`r Sys.Date()`" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Vignette Title} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +Vignettes are long form documentation commonly included in packages. Because they are part of the distribution of the package, they need to be as compact as possible. The `html_vignette` output type provides a custom style sheet (and tweaks some options) to ensure that the resulting html is as small as possible. The `html_vignette` format: + +- Never uses retina figures +- Has a smaller default figure size +- Uses a custom CSS stylesheet instead of the default Twitter Bootstrap style + +## Vignette Info + +Note the various macros within the `vignette` section of the metadata block above. These are required in order to instruct R how to build the vignette. Note that you should change the `title` field and the `\VignetteIndexEntry` to match the title of your vignette. + +## Styles + +The `html_vignette` template includes a basic CSS theme. To override this theme you can specify your own CSS in the document metadata as follows: + + output: + rmarkdown::html_vignette: + css: mystyles.css + +## Figures + +The figure sizes have been customised so that you can easily put two images side-by-side. + +```{r, fig.show='hold'} +plot(1:10) +plot(10:1) +``` + +You can enable figure captions by `fig_caption: yes` in YAML: + + output: + rmarkdown::html_vignette: + fig_caption: yes + +Then you can use the chunk option `fig.cap = "Your figure caption."` in **knitr**. + +## More Examples + +You can write math expressions, e.g. $Y = X\beta + \epsilon$, footnotes^[A footnote here.], and tables, e.g. using `knitr::kable()`. + +```{r, echo=FALSE, results='asis'} +knitr::kable(head(mtcars, 10)) +``` + +Also a quote using `>`: + +> "He who gives up [code] safety for [code] speed deserves neither." +([via](https://twitter.com/hadleywickham/status/504368538874703872)) diff --git a/tests/testthat/public-api/xyzpackage-qmd/xyzpackage.Rproj b/tests/testthat/public-api/xyzpackage-qmd/xyzpackage.Rproj new file mode 100644 index 000000000..d848a9ff5 --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-qmd/xyzpackage.Rproj @@ -0,0 +1,16 @@ +Version: 1.0 + +RestoreWorkspace: No +SaveWorkspace: No +AlwaysSaveHistory: Default + +EnableCodeIndexing: Yes +Encoding: UTF-8 + +AutoAppendNewline: Yes +StripTrailingWhitespace: Yes + +BuildType: Package +PackageUseDevtools: Yes +PackageInstallArgs: --no-multiarch --with-keep.source +PackageRoxygenize: rd,collate,namespace diff --git a/tests/testthat/public-api/xyzpackage-rmd/NAMESPACE b/tests/testthat/public-api/xyzpackage-rmd/NAMESPACE index 6ae926839..e651b9448 100644 --- a/tests/testthat/public-api/xyzpackage-rmd/NAMESPACE +++ b/tests/testthat/public-api/xyzpackage-rmd/NAMESPACE @@ -1,2 +1 @@ # Generated by roxygen2: do not edit by hand - diff --git a/tests/testthat/public-api/xyzpackage-rmd/src/.gitignore b/tests/testthat/public-api/xyzpackage-rmd/src/.gitignore index 22034c461..2f843a5dc 100644 --- a/tests/testthat/public-api/xyzpackage-rmd/src/.gitignore +++ b/tests/testthat/public-api/xyzpackage-rmd/src/.gitignore @@ -1,3 +1,3 @@ +*.dll *.o *.so -*.dll diff --git a/tests/testthat/public-api/xyzpackage-rmd/tests/testthat/test-package-xyz.R b/tests/testthat/public-api/xyzpackage-rmd/tests/testthat/test-package-xyz.R index 23dadd10a..92f42950b 100644 --- a/tests/testthat/public-api/xyzpackage-rmd/tests/testthat/test-package-xyz.R +++ b/tests/testthat/public-api/xyzpackage-rmd/tests/testthat/test-package-xyz.R @@ -1,5 +1,3 @@ -context("testing styler on package") - test_that("hi there", { I(am(a(package(x)))) }) diff --git a/tests/testthat/public-api/xyzpackage-rmd/vignettes/random.Rmarkdown b/tests/testthat/public-api/xyzpackage-rmd/vignettes/random.Rmarkdown new file mode 100644 index 000000000..aace6af45 --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-rmd/vignettes/random.Rmarkdown @@ -0,0 +1,58 @@ +--- +title: "Vignette Title" +author: "Vignette Author" +date: "`r Sys.Date()`" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Vignette Title} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +Vignettes are long form documentation commonly included in packages. Because they are part of the distribution of the package, they need to be as compact as possible. The `html_vignette` output type provides a custom style sheet (and tweaks some options) to ensure that the resulting html is as small as possible. The `html_vignette` format: + +- Never uses retina figures +- Has a smaller default figure size +- Uses a custom CSS stylesheet instead of the default Twitter Bootstrap style + +## Vignette Info + +Note the various macros within the `vignette` section of the metadata block above. These are required in order to instruct R how to build the vignette. Note that you should change the `title` field and the `\VignetteIndexEntry` to match the title of your vignette. + +## Styles + +The `html_vignette` template includes a basic CSS theme. To override this theme you can specify your own CSS in the document metadata as follows: + + output: + rmarkdown::html_vignette: + css: mystyles.css + +## Figures + +The figure sizes have been customised so that you can easily put two images side-by-side. + +```{r, fig.show='hold'} +plot(1:10) +plot(10:1) +``` + +You can enable figure captions by `fig_caption: yes` in YAML: + + output: + rmarkdown::html_vignette: + fig_caption: yes + +Then you can use the chunk option `fig.cap = "Your figure caption."` in **knitr**. + +## More Examples + +You can write math expressions, e.g. $Y = X\beta + \epsilon$, footnotes^[A footnote here.], and tables, e.g. using `knitr::kable()`. + +```{r, echo=FALSE, results='asis'} +knitr::kable(head(mtcars, 10)) +``` + +Also a quote using `>`: + +> "He who gives up [code] safety for [code] speed deserves neither." +([via](https://twitter.com/hadleywickham/status/504368538874703872)) diff --git a/tests/testthat/public-api/xyzpackage-rnw/DESCRIPTION b/tests/testthat/public-api/xyzpackage-rnw/DESCRIPTION new file mode 100644 index 000000000..69b7e9773 --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-rnw/DESCRIPTION @@ -0,0 +1,15 @@ +Package: xyzpackage +Title: What the Package Does (one line, title case) +Version: 0.0.0.9000 +Authors@R: person("First", "Last", email = "first.last@example.com", role = c("aut", "cre")) +Description: What the package does (one paragraph). +Depends: R (>= 3.3.2) +License: What license is it under? +Encoding: UTF-8 +LazyData: true +Suggests: testthat +LinkingTo: + Rcpp +Imports: + Rcpp +RoxygenNote: 6.0.1.9000 diff --git a/tests/testthat/public-api/xyzpackage-rnw/NAMESPACE b/tests/testthat/public-api/xyzpackage-rnw/NAMESPACE new file mode 100644 index 000000000..e651b9448 --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-rnw/NAMESPACE @@ -0,0 +1 @@ +# Generated by roxygen2: do not edit by hand diff --git a/tests/testthat/public-api/xyzpackage-rnw/R/RcppExports.R b/tests/testthat/public-api/xyzpackage-rnw/R/RcppExports.R new file mode 100644 index 000000000..7773bec0d --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-rnw/R/RcppExports.R @@ -0,0 +1,6 @@ +# Generated by using Rcpp::compileAttributes() -> do not edit by hand +# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 + +timesTwo <- function(x) { + .Call("_xyzpackage_timesTwo", PACKAGE = "xyzpackage", x) +} diff --git a/tests/testthat/public-api/xyzpackage-rnw/R/hello-world.R b/tests/testthat/public-api/xyzpackage-rnw/R/hello-world.R new file mode 100644 index 000000000..d2cb60dff --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-rnw/R/hello-world.R @@ -0,0 +1,3 @@ +hello_world <- function() { + print("hello, world") +} diff --git a/tests/testthat/public-api/xyzpackage-rnw/README.Rmd b/tests/testthat/public-api/xyzpackage-rnw/README.Rmd new file mode 100644 index 000000000..1183cea92 --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-rnw/README.Rmd @@ -0,0 +1,33 @@ +--- +output: github_document +--- + + + +```{r setup, include = FALSE} +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>", + fig.path = "man/figures/README-" +) +``` +# styler + +The goal of styler is to ... + +## Installation + +You can install styler from github with: + +```{r gh-installation, eval = FALSE} +# install.packages("devtools") +devtools::install_github("jonmcalder/styler") +``` + +## Example + +This is a basic example which shows you how to solve a common problem: + +```{r example} +## basic example code +``` diff --git a/tests/testthat/public-api/xyzpackage-rnw/src/.gitignore b/tests/testthat/public-api/xyzpackage-rnw/src/.gitignore new file mode 100644 index 000000000..2f843a5dc --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-rnw/src/.gitignore @@ -0,0 +1,3 @@ +*.dll +*.o +*.so diff --git a/tests/testthat/public-api/xyzpackage-rnw/src/RcppExports.cpp b/tests/testthat/public-api/xyzpackage-rnw/src/RcppExports.cpp new file mode 100644 index 000000000..fcecddd2a --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-rnw/src/RcppExports.cpp @@ -0,0 +1,28 @@ +// Generated by using Rcpp::compileAttributes() -> do not edit by hand +// Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 + +#include + +using namespace Rcpp; + +// timesTwo +NumericVector timesTwo(NumericVector x); +RcppExport SEXP _xyzpackage_timesTwo(SEXP xSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::RNGScope rcpp_rngScope_gen; + Rcpp::traits::input_parameter< NumericVector >::type x(xSEXP); + rcpp_result_gen = Rcpp::wrap(timesTwo(x)); + return rcpp_result_gen; +END_RCPP +} + +static const R_CallMethodDef CallEntries[] = { + {"_xyzpackage_timesTwo", (DL_FUNC) &_xyzpackage_timesTwo, 1}, + {NULL, NULL, 0} +}; + +RcppExport void R_init_xyzpackage(DllInfo *dll) { + R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); + R_useDynamicSymbols(dll, FALSE); +} diff --git a/tests/testthat/public-api/xyzpackage-rnw/src/timesTwo.cpp b/tests/testthat/public-api/xyzpackage-rnw/src/timesTwo.cpp new file mode 100644 index 000000000..b650533e0 --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-rnw/src/timesTwo.cpp @@ -0,0 +1,27 @@ +#include +using namespace Rcpp; + +// This is a simple example of exporting a C++ function to R. You can +// source this function into an R session using the Rcpp::sourceCpp +// function (or via the Source button on the editor toolbar). Learn +// more about Rcpp at: +// +// http://www.rcpp.org/ +// http://adv-r.had.co.nz/Rcpp.html +// http://gallery.rcpp.org/ +// + +// [[Rcpp::export]] +NumericVector timesTwo(NumericVector x) { + return x * 2; +} + + +// You can include R code blocks in C++ files processed with sourceCpp +// (useful for testing and development). The R code will be automatically +// run after the compilation. +// + +/*** R +timesTwo(42) +*/ diff --git a/tests/testthat/public-api/xyzpackage-rnw/tests/testthat.R b/tests/testthat/public-api/xyzpackage-rnw/tests/testthat.R new file mode 100644 index 000000000..89b573e70 --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-rnw/tests/testthat.R @@ -0,0 +1,4 @@ +library(testthat) +library(xyzpackage) + +test_check("xyzpackage") diff --git a/tests/testthat/public-api/xyzpackage-rnw/tests/testthat/test-package-xyz.R b/tests/testthat/public-api/xyzpackage-rnw/tests/testthat/test-package-xyz.R new file mode 100644 index 000000000..92f42950b --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-rnw/tests/testthat/test-package-xyz.R @@ -0,0 +1,3 @@ +test_that("hi there", { + I(am(a(package(x)))) +}) diff --git a/tests/testthat/public-api/xyzpackage-rnw/vignettes/random.Rmd b/tests/testthat/public-api/xyzpackage-rnw/vignettes/random.Rmd new file mode 100644 index 000000000..aace6af45 --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-rnw/vignettes/random.Rmd @@ -0,0 +1,58 @@ +--- +title: "Vignette Title" +author: "Vignette Author" +date: "`r Sys.Date()`" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Vignette Title} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +Vignettes are long form documentation commonly included in packages. Because they are part of the distribution of the package, they need to be as compact as possible. The `html_vignette` output type provides a custom style sheet (and tweaks some options) to ensure that the resulting html is as small as possible. The `html_vignette` format: + +- Never uses retina figures +- Has a smaller default figure size +- Uses a custom CSS stylesheet instead of the default Twitter Bootstrap style + +## Vignette Info + +Note the various macros within the `vignette` section of the metadata block above. These are required in order to instruct R how to build the vignette. Note that you should change the `title` field and the `\VignetteIndexEntry` to match the title of your vignette. + +## Styles + +The `html_vignette` template includes a basic CSS theme. To override this theme you can specify your own CSS in the document metadata as follows: + + output: + rmarkdown::html_vignette: + css: mystyles.css + +## Figures + +The figure sizes have been customised so that you can easily put two images side-by-side. + +```{r, fig.show='hold'} +plot(1:10) +plot(10:1) +``` + +You can enable figure captions by `fig_caption: yes` in YAML: + + output: + rmarkdown::html_vignette: + fig_caption: yes + +Then you can use the chunk option `fig.cap = "Your figure caption."` in **knitr**. + +## More Examples + +You can write math expressions, e.g. $Y = X\beta + \epsilon$, footnotes^[A footnote here.], and tables, e.g. using `knitr::kable()`. + +```{r, echo=FALSE, results='asis'} +knitr::kable(head(mtcars, 10)) +``` + +Also a quote using `>`: + +> "He who gives up [code] safety for [code] speed deserves neither." +([via](https://twitter.com/hadleywickham/status/504368538874703872)) diff --git a/tests/testthat/public-api/xyzpackage-rnw/vignettes/random.Rnw b/tests/testthat/public-api/xyzpackage-rnw/vignettes/random.Rnw new file mode 100644 index 000000000..67fcf219a --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-rnw/vignettes/random.Rnw @@ -0,0 +1,22 @@ +\documentclass{article} + +\begin{document} + +This is all you need to do if you want to use the xyzpackage package: + +<>= +library(xyzpackage) +@ + +The quick brown fox jumps over the lazy dog the quick brown fox jumps over the +lazy dog the quick brown fox jumps over the lazy dog. + +<>= +1 + 1 +rnorm(30) +@ + +The quick brown fox jumps over the lazy dog the quick brown fox jumps over the +lazy dog the quick brown fox jumps over the lazy dog. + +\end{document} diff --git a/tests/testthat/public-api/xyzpackage-rnw/xyzpackage.Rproj b/tests/testthat/public-api/xyzpackage-rnw/xyzpackage.Rproj new file mode 100644 index 000000000..d848a9ff5 --- /dev/null +++ b/tests/testthat/public-api/xyzpackage-rnw/xyzpackage.Rproj @@ -0,0 +1,16 @@ +Version: 1.0 + +RestoreWorkspace: No +SaveWorkspace: No +AlwaysSaveHistory: Default + +EnableCodeIndexing: Yes +Encoding: UTF-8 + +AutoAppendNewline: Yes +StripTrailingWhitespace: Yes + +BuildType: Package +PackageUseDevtools: Yes +PackageInstallArgs: --no-multiarch --with-keep.source +PackageRoxygenize: rd,collate,namespace diff --git a/tests/testthat/public-api/xyzpackage/tests/testthat/test-package-xyz.R b/tests/testthat/public-api/xyzpackage/tests/testthat/test-package-xyz.R index 23dadd10a..92f42950b 100644 --- a/tests/testthat/public-api/xyzpackage/tests/testthat/test-package-xyz.R +++ b/tests/testthat/public-api/xyzpackage/tests/testthat/test-package-xyz.R @@ -1,5 +1,3 @@ -context("testing styler on package") - test_that("hi there", { I(am(a(package(x)))) }) diff --git a/tests/testthat/reference-objects/cache-info-1 b/tests/testthat/reference-objects/cache-info-1 new file mode 100644 index 000000000..724c65e0a Binary files /dev/null and b/tests/testthat/reference-objects/cache-info-1 differ diff --git a/tests/testthat/reference-objects/cache-info-2 b/tests/testthat/reference-objects/cache-info-2 new file mode 100644 index 000000000..5bf3a665d Binary files /dev/null and b/tests/testthat/reference-objects/cache-info-2 differ diff --git a/tests/testthat/reference-objects/cache-info-3 b/tests/testthat/reference-objects/cache-info-3 new file mode 100644 index 000000000..edda7ba76 Binary files /dev/null and b/tests/testthat/reference-objects/cache-info-3 differ diff --git a/tests/testthat/reference-objects/caching.R b/tests/testthat/reference-objects/caching.R new file mode 100644 index 000000000..a121ebd67 --- /dev/null +++ b/tests/testthat/reference-objects/caching.R @@ -0,0 +1,33 @@ +#' CHan deng +#' +#' Performs various izil +#' @examples +#' zz + 1 +#' \dontrun{ +#' xfun::xxio(fun(77), file) +#' } +#' dplyr::filter(x == 3, zz = max(.data$`5`, na.rom = TRUE)) +#' \dontrun{ +#' unlink(file2) +#' } +#' \dontrun{ +#' { +#' x +#' } +#' unlink(file2) +#' } +xxtt <- function(bli, bla, blup = 3) { + changed <- withr::tzu( + zname(path), + condense_files(x_basename(path), c_transformers) + ) + visible(chan) +} + +g <- 33 + +z <- fun(g, z = xxtt) + +if (not(x) == 9) { + cache_this_file() +} diff --git a/tests/testthat/reference-objects/missing-blank-at-EOF.R b/tests/testthat/reference-objects/missing-blank-at-EOF.R new file mode 100644 index 000000000..c1b0730e0 --- /dev/null +++ b/tests/testthat/reference-objects/missing-blank-at-EOF.R @@ -0,0 +1 @@ +x \ No newline at end of file diff --git a/tests/testthat/reference-objects/non-missing-blank-at-EOF.R b/tests/testthat/reference-objects/non-missing-blank-at-EOF.R new file mode 100644 index 000000000..587be6b4c --- /dev/null +++ b/tests/testthat/reference-objects/non-missing-blank-at-EOF.R @@ -0,0 +1 @@ +x diff --git a/tests/testthat/reference-objects/return-read-utf8-missing-EOF b/tests/testthat/reference-objects/return-read-utf8-missing-EOF new file mode 100644 index 000000000..a88c059f9 Binary files /dev/null and b/tests/testthat/reference-objects/return-read-utf8-missing-EOF differ diff --git a/tests/testthat/reference-objects/return-read-utf8-non-missing-EOF b/tests/testthat/reference-objects/return-read-utf8-non-missing-EOF new file mode 100644 index 000000000..1b4ca0d5b Binary files /dev/null and b/tests/testthat/reference-objects/return-read-utf8-non-missing-EOF differ diff --git a/tests/testthat/relocate_eq_assign/eq_assign_ifelse_scope_line_breaks-in_tree b/tests/testthat/relocate_eq_assign/eq_assign_ifelse_scope_line_breaks-in_tree index 1dfb6b1fd..82ec3ce94 100644 --- a/tests/testthat/relocate_eq_assign/eq_assign_ifelse_scope_line_breaks-in_tree +++ b/tests/testthat/relocate_eq_assign/eq_assign_ifelse_scope_line_breaks-in_tree @@ -1,30 +1,30 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {0.9} - ¦ ¦--expr: [0/1] {2} - ¦ ¦ °--SYMBOL: x [0/0] {1} - ¦ ¦--EQ_ASSIGN: = [0/1] {3} - ¦ °--expr: [0/0] {5} - ¦ °--NUM_CONST: 5 [0/0] {4} - °--expr: [2/0] {6} - ¦--IF: if [0/0] {7} - ¦--'(': ( [0/0] {8} - ¦--expr: [0/0] {9} - ¦ ¦--expr: [0/1] {11} - ¦ ¦ °--SYMBOL: x [0/0] {10} - ¦ ¦--GE: >= [0/1] {12} - ¦ °--expr: [0/0] {14} - ¦ °--NUM_CONST: 5 [0/0] {13} - ¦--')': ) [0/2] {15} - ¦--expr: [1/1] {15.9} - ¦ ¦--expr: [0/1] {17} - ¦ ¦ °--SYMBOL: y [0/0] {16} - ¦ ¦--EQ_ASSIGN: = [0/1] {18} - ¦ °--expr: [0/0] {20} - ¦ °--NUM_CONST: TRUE [0/0] {19} - ¦--ELSE: else [0/4] {21} - °--expr: [1/0] {21.9} - ¦--expr: [0/1] {23} - ¦ °--SYMBOL: y [0/0] {22} - ¦--EQ_ASSIGN: = [0/1] {24} - °--expr: [0/0] {26} - °--NUM_CONST: FALSE [0/0] {25} + ¦--expr_or_assign_or_help: x = 5 [0/0] {1} + ¦ ¦--expr: x [0/1] {3} + ¦ ¦ °--SYMBOL: x [0/0] {2} + ¦ ¦--EQ_ASSIGN: = [0/1] {4} + ¦ °--expr: 5 [0/0] {6} + ¦ °--NUM_CONST: 5 [0/0] {5} + °--expr: if(x [2/0] {7} + ¦--IF: if [0/0] {8} + ¦--'(': ( [0/0] {9} + ¦--expr: x >= [0/0] {10} + ¦ ¦--expr: x [0/1] {12} + ¦ ¦ °--SYMBOL: x [0/0] {11} + ¦ ¦--GE: >= [0/1] {13} + ¦ °--expr: 5 [0/0] {15} + ¦ °--NUM_CONST: 5 [0/0] {14} + ¦--')': ) [0/2] {16} + ¦--expr_or_assign_or_help: y = T [1/1] {17} + ¦ ¦--expr: y [0/1] {19} + ¦ ¦ °--SYMBOL: y [0/0] {18} + ¦ ¦--EQ_ASSIGN: = [0/1] {20} + ¦ °--expr: TRUE [0/0] {22} + ¦ °--NUM_CONST: TRUE [0/0] {21} + ¦--ELSE: else [0/4] {23} + °--expr_or_assign_or_help: y = F [1/0] {24} + ¦--expr: y [0/1] {26} + ¦ °--SYMBOL: y [0/0] {25} + ¦--EQ_ASSIGN: = [0/1] {27} + °--expr: FALSE [0/0] {29} + °--NUM_CONST: FALSE [0/0] {28} diff --git a/tests/testthat/relocate_eq_assign/eq_assign_ifelse_scope_tokens-in_tree b/tests/testthat/relocate_eq_assign/eq_assign_ifelse_scope_tokens-in_tree index 1dfb6b1fd..82ec3ce94 100644 --- a/tests/testthat/relocate_eq_assign/eq_assign_ifelse_scope_tokens-in_tree +++ b/tests/testthat/relocate_eq_assign/eq_assign_ifelse_scope_tokens-in_tree @@ -1,30 +1,30 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {0.9} - ¦ ¦--expr: [0/1] {2} - ¦ ¦ °--SYMBOL: x [0/0] {1} - ¦ ¦--EQ_ASSIGN: = [0/1] {3} - ¦ °--expr: [0/0] {5} - ¦ °--NUM_CONST: 5 [0/0] {4} - °--expr: [2/0] {6} - ¦--IF: if [0/0] {7} - ¦--'(': ( [0/0] {8} - ¦--expr: [0/0] {9} - ¦ ¦--expr: [0/1] {11} - ¦ ¦ °--SYMBOL: x [0/0] {10} - ¦ ¦--GE: >= [0/1] {12} - ¦ °--expr: [0/0] {14} - ¦ °--NUM_CONST: 5 [0/0] {13} - ¦--')': ) [0/2] {15} - ¦--expr: [1/1] {15.9} - ¦ ¦--expr: [0/1] {17} - ¦ ¦ °--SYMBOL: y [0/0] {16} - ¦ ¦--EQ_ASSIGN: = [0/1] {18} - ¦ °--expr: [0/0] {20} - ¦ °--NUM_CONST: TRUE [0/0] {19} - ¦--ELSE: else [0/4] {21} - °--expr: [1/0] {21.9} - ¦--expr: [0/1] {23} - ¦ °--SYMBOL: y [0/0] {22} - ¦--EQ_ASSIGN: = [0/1] {24} - °--expr: [0/0] {26} - °--NUM_CONST: FALSE [0/0] {25} + ¦--expr_or_assign_or_help: x = 5 [0/0] {1} + ¦ ¦--expr: x [0/1] {3} + ¦ ¦ °--SYMBOL: x [0/0] {2} + ¦ ¦--EQ_ASSIGN: = [0/1] {4} + ¦ °--expr: 5 [0/0] {6} + ¦ °--NUM_CONST: 5 [0/0] {5} + °--expr: if(x [2/0] {7} + ¦--IF: if [0/0] {8} + ¦--'(': ( [0/0] {9} + ¦--expr: x >= [0/0] {10} + ¦ ¦--expr: x [0/1] {12} + ¦ ¦ °--SYMBOL: x [0/0] {11} + ¦ ¦--GE: >= [0/1] {13} + ¦ °--expr: 5 [0/0] {15} + ¦ °--NUM_CONST: 5 [0/0] {14} + ¦--')': ) [0/2] {16} + ¦--expr_or_assign_or_help: y = T [1/1] {17} + ¦ ¦--expr: y [0/1] {19} + ¦ ¦ °--SYMBOL: y [0/0] {18} + ¦ ¦--EQ_ASSIGN: = [0/1] {20} + ¦ °--expr: TRUE [0/0] {22} + ¦ °--NUM_CONST: TRUE [0/0] {21} + ¦--ELSE: else [0/4] {23} + °--expr_or_assign_or_help: y = F [1/0] {24} + ¦--expr: y [0/1] {26} + ¦ °--SYMBOL: y [0/0] {25} + ¦--EQ_ASSIGN: = [0/1] {27} + °--expr: FALSE [0/0] {29} + °--NUM_CONST: FALSE [0/0] {28} diff --git a/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_eq_only-in.R b/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_eq_only-in.R new file mode 100644 index 000000000..5cde17473 --- /dev/null +++ b/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_eq_only-in.R @@ -0,0 +1,4 @@ +a = b = c = d = e = f = g = 4 +a <- 3; b = c = d = ey <- 4 +a <- 3; b = c = d <- ey = 4 +ff = 3; b = c = d = 3 ; g = 4; ge = 5 diff --git a/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_eq_only-in_tree b/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_eq_only-in_tree new file mode 100644 index 000000000..636622233 --- /dev/null +++ b/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_eq_only-in_tree @@ -0,0 +1,103 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr_or_assign_or_help: a = b [0/0] {1} + ¦ ¦--expr: a [0/1] {3} + ¦ ¦ °--SYMBOL: a [0/0] {2} + ¦ ¦--EQ_ASSIGN: = [0/1] {4} + ¦ ¦--expr: b [0/1] {7} + ¦ ¦ °--SYMBOL: b [0/0] {6} + ¦ ¦--EQ_ASSIGN: = [0/1] {8} + ¦ ¦--expr: c [0/1] {11} + ¦ ¦ °--SYMBOL: c [0/0] {10} + ¦ ¦--EQ_ASSIGN: = [0/1] {12} + ¦ ¦--expr: d [0/1] {15} + ¦ ¦ °--SYMBOL: d [0/0] {14} + ¦ ¦--EQ_ASSIGN: = [0/1] {16} + ¦ ¦--expr: e [0/1] {19} + ¦ ¦ °--SYMBOL: e [0/0] {18} + ¦ ¦--EQ_ASSIGN: = [0/1] {20} + ¦ ¦--expr: f [0/7] {23} + ¦ ¦ °--SYMBOL: f [0/0] {22} + ¦ ¦--EQ_ASSIGN: = [0/1] {24} + ¦ ¦--expr: g [0/1] {27} + ¦ ¦ °--SYMBOL: g [0/0] {26} + ¦ ¦--EQ_ASSIGN: = [0/1] {28} + ¦ °--expr: 4 [0/0] {30} + ¦ °--NUM_CONST: 4 [0/0] {29} + ¦--expr: a <- [1/0] {31} + ¦ ¦--expr: a [0/1] {33} + ¦ ¦ °--SYMBOL: a [0/0] {32} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {34} + ¦ °--expr: 3 [0/0] {36} + ¦ °--NUM_CONST: 3 [0/0] {35} + ¦--';': ; [0/1] {37} + ¦--expr_or_assign_or_help: b = c [0/0] {38} + ¦ ¦--expr: b [0/1] {40} + ¦ ¦ °--SYMBOL: b [0/0] {39} + ¦ ¦--EQ_ASSIGN: = [0/1] {41} + ¦ ¦--expr: c [0/1] {44} + ¦ ¦ °--SYMBOL: c [0/0] {43} + ¦ ¦--EQ_ASSIGN: = [0/1] {45} + ¦ ¦--expr: d [0/1] {48} + ¦ ¦ °--SYMBOL: d [0/0] {47} + ¦ ¦--EQ_ASSIGN: = [0/1] {49} + ¦ ¦--expr: ey [0/1] {52} + ¦ ¦ °--SYMBOL: ey [0/0] {51} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {53} + ¦ °--expr: 4 [0/0] {55} + ¦ °--NUM_CONST: 4 [0/0] {54} + ¦--expr: a <- [1/0] {56} + ¦ ¦--expr: a [0/1] {58} + ¦ ¦ °--SYMBOL: a [0/0] {57} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {59} + ¦ °--expr: 3 [0/0] {61} + ¦ °--NUM_CONST: 3 [0/0] {60} + ¦--';': ; [0/1] {62} + ¦--expr_or_assign_or_help: b = c [0/0] {63} + ¦ ¦--expr: b [0/1] {65} + ¦ ¦ °--SYMBOL: b [0/0] {64} + ¦ ¦--EQ_ASSIGN: = [0/1] {66} + ¦ ¦--expr: c [0/1] {69} + ¦ ¦ °--SYMBOL: c [0/0] {68} + ¦ ¦--EQ_ASSIGN: = [0/1] {70} + ¦ ¦--expr: d <- [0/1] {72} + ¦ ¦ ¦--expr: d [0/1] {74} + ¦ ¦ ¦ °--SYMBOL: d [0/0] {73} + ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {75} + ¦ ¦ °--expr: ey [0/0] {77} + ¦ ¦ °--SYMBOL: ey [0/0] {76} + ¦ ¦--EQ_ASSIGN: = [0/1] {78} + ¦ °--expr: 4 [0/0] {80} + ¦ °--NUM_CONST: 4 [0/0] {79} + ¦--expr_or_assign_or_help: ff = [1/0] {81} + ¦ ¦--expr: ff [0/1] {83} + ¦ ¦ °--SYMBOL: ff [0/0] {82} + ¦ ¦--EQ_ASSIGN: = [0/1] {84} + ¦ °--expr: 3 [0/0] {86} + ¦ °--NUM_CONST: 3 [0/0] {85} + ¦--';': ; [0/1] {87} + ¦--expr_or_assign_or_help: b = c [0/1] {88} + ¦ ¦--expr: b [0/1] {90} + ¦ ¦ °--SYMBOL: b [0/0] {89} + ¦ ¦--EQ_ASSIGN: = [0/1] {91} + ¦ ¦--expr: c [0/1] {94} + ¦ ¦ °--SYMBOL: c [0/0] {93} + ¦ ¦--EQ_ASSIGN: = [0/1] {95} + ¦ ¦--expr: d [0/1] {98} + ¦ ¦ °--SYMBOL: d [0/0] {97} + ¦ ¦--EQ_ASSIGN: = [0/1] {99} + ¦ °--expr: 3 [0/0] {101} + ¦ °--NUM_CONST: 3 [0/0] {100} + ¦--';': ; [0/1] {102} + ¦--expr_or_assign_or_help: g = 4 [0/0] {103} + ¦ ¦--expr: g [0/1] {105} + ¦ ¦ °--SYMBOL: g [0/0] {104} + ¦ ¦--EQ_ASSIGN: = [0/1] {106} + ¦ °--expr: 4 [0/0] {108} + ¦ °--NUM_CONST: 4 [0/0] {107} + ¦--';': ; [0/1] {109} + °--expr_or_assign_or_help: ge = [0/0] {110} + ¦--expr: ge [0/1] {112} + ¦ °--SYMBOL: ge [0/0] {111} + ¦--EQ_ASSIGN: = [0/1] {113} + °--expr: 5 [0/0] {115} + °--NUM_CONST: 5 [0/0] {114} diff --git a/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_eq_only-out.R b/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_eq_only-out.R new file mode 100644 index 000000000..ecf0b9bc0 --- /dev/null +++ b/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_eq_only-out.R @@ -0,0 +1,9 @@ +a <- b <- c <- d <- e <- f <- g <- 4 +a <- 3 +b <- c <- d <- ey <- 4 +a <- 3 +b <- c <- d <- ey <- 4 +ff <- 3 +b <- c <- d <- 3 +g <- 4 +ge <- 5 diff --git a/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_mixed-in.R b/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_mixed-in.R new file mode 100644 index 000000000..5e3c65b16 --- /dev/null +++ b/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_mixed-in.R @@ -0,0 +1 @@ +a = b = c = d = e = f = g <- 4 diff --git a/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_mixed-in_tree b/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_mixed-in_tree new file mode 100644 index 000000000..12616957c --- /dev/null +++ b/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_mixed-in_tree @@ -0,0 +1,25 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + °--expr_or_assign_or_help: a = b [0/0] {1} + ¦--expr: a [0/1] {3} + ¦ °--SYMBOL: a [0/0] {2} + ¦--EQ_ASSIGN: = [0/1] {4} + ¦--expr: b [0/1] {7} + ¦ °--SYMBOL: b [0/0] {6} + ¦--EQ_ASSIGN: = [0/1] {8} + ¦--expr: c [0/1] {11} + ¦ °--SYMBOL: c [0/0] {10} + ¦--EQ_ASSIGN: = [0/1] {12} + ¦--expr: d [0/1] {15} + ¦ °--SYMBOL: d [0/0] {14} + ¦--EQ_ASSIGN: = [0/1] {16} + ¦--expr: e [0/1] {19} + ¦ °--SYMBOL: e [0/0] {18} + ¦--EQ_ASSIGN: = [0/1] {20} + ¦--expr: f [0/7] {23} + ¦ °--SYMBOL: f [0/0] {22} + ¦--EQ_ASSIGN: = [0/1] {24} + ¦--expr: g [0/1] {27} + ¦ °--SYMBOL: g [0/0] {26} + ¦--LEFT_ASSIGN: <- [0/1] {28} + °--expr: 4 [0/0] {30} + °--NUM_CONST: 4 [0/0] {29} diff --git a/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_mixed-out.R b/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_mixed-out.R new file mode 100644 index 000000000..43d200b0e --- /dev/null +++ b/tests/testthat/relocate_eq_assign/eq_assign_multiple_tokens_mixed-out.R @@ -0,0 +1 @@ +a <- b <- c <- d <- e <- f <- g <- 4 diff --git a/tests/testthat/rmd/invalid-in.Rmd b/tests/testthat/rmd/invalid-in.Rmd new file mode 100644 index 000000000..e5f7a4e05 --- /dev/null +++ b/tests/testthat/rmd/invalid-in.Rmd @@ -0,0 +1,64 @@ +--- + title: x + +--- + this + +```{r} +test + f(1) +``` + + +```{r, tidy = FALSE} +1, , ___ +``` + +```{r} +test + f(1) +``` + +```{r, A = ', tidy = FALSE,'} +knitr::opts_chunk$set(echo = TRUE ) +``` + +```{r, A = ', tidy = FALSE}'} +knitr::opts_chunk$set(echo = TRUE ) +``` + +```{r, A = ', tidy = FALSE'} +knitr::opts_chunk$set(echo = TRUE ) +``` + +```{r, A = 'tidy = FALSE, and more'} +knitr::opts_chunk$set(echo = TRUE ) +``` + + +```{r, tidy = TRUE} +knitr::opts_chunk$set(echo = TRUE ) +``` + +```{r, a = ',tidy = FALSE'} +knitr::opts_chunk$set(echo = TRUE ) +``` + +```{r, a = ',tidy = TRUE'} +knitr::opts_chunk$set(echo = TRUE ) +``` + + +```{r, tidy = FALSE, aniopts= 3} +knitr::opts_chunk$set(echo = TRUE ) +``` + +```{r, tidy= FALSE, aniopts= 3} +knitr::opts_chunk$set(echo = TRUE ) +``` + +```{r, tidy =FALSE, aniopts= 3} +knitr::opts_chunk$set(echo = TRUE ) +``` + +```{r, aniopts= 3, tidy = FALSE} +knitr::opts_chunk$set(echo = TRUE ) +``` diff --git a/tests/testthat/rmd/invalid-out.Rmd b/tests/testthat/rmd/invalid-out.Rmd new file mode 100644 index 000000000..58607b386 --- /dev/null +++ b/tests/testthat/rmd/invalid-out.Rmd @@ -0,0 +1,64 @@ +--- + title: x + +--- + this + +```{r} +test + f(1) +``` + + +```{r, tidy = FALSE} +1, , ___ +``` + +```{r} +test + f(1) +``` + +```{r, A = ', tidy = FALSE,'} +knitr::opts_chunk$set(echo = TRUE ) +``` + +```{r, A = ', tidy = FALSE}'} +knitr::opts_chunk$set(echo = TRUE) +``` + +```{r, A = ', tidy = FALSE'} +knitr::opts_chunk$set(echo = TRUE) +``` + +```{r, A = 'tidy = FALSE, and more'} +knitr::opts_chunk$set(echo = TRUE) +``` + + +```{r, tidy = TRUE} +knitr::opts_chunk$set(echo = TRUE) +``` + +```{r, a = ',tidy = FALSE'} +knitr::opts_chunk$set(echo = TRUE) +``` + +```{r, a = ',tidy = TRUE'} +knitr::opts_chunk$set(echo = TRUE) +``` + + +```{r, tidy = FALSE, aniopts= 3} +knitr::opts_chunk$set(echo = TRUE ) +``` + +```{r, tidy= FALSE, aniopts= 3} +knitr::opts_chunk$set(echo = TRUE ) +``` + +```{r, tidy =FALSE, aniopts= 3} +knitr::opts_chunk$set(echo = TRUE ) +``` + +```{r, aniopts= 3, tidy = FALSE} +knitr::opts_chunk$set(echo = TRUE ) +``` diff --git a/tests/testthat/rmd/nested-2-in.Rmd b/tests/testthat/rmd/nested-2-in.Rmd new file mode 100644 index 000000000..5380e439c --- /dev/null +++ b/tests/testthat/rmd/nested-2-in.Rmd @@ -0,0 +1,18 @@ +--- +title: x + +--- +this + +```{r} +1 +1 #co +``` + + +````md +`r ""` ```{r, highlight.output=c(1, 3)} +head(iris) +``` +```` + +More text diff --git a/tests/testthat/rmd/nested-2-out.Rmd b/tests/testthat/rmd/nested-2-out.Rmd new file mode 100644 index 000000000..7f99c8c57 --- /dev/null +++ b/tests/testthat/rmd/nested-2-out.Rmd @@ -0,0 +1,18 @@ +--- +title: x + +--- +this + +```{r} +1 + 1 # co +``` + + +````md +`r ""` ```{r, highlight.output=c(1, 3)} +head(iris) +``` +```` + +More text diff --git a/tests/testthat/rmd/nested-in.Rmd b/tests/testthat/rmd/nested-in.Rmd new file mode 100644 index 000000000..46aefc718 --- /dev/null +++ b/tests/testthat/rmd/nested-in.Rmd @@ -0,0 +1,7 @@ +this + +````md +`r Sys.Date() ````{r, highlight.output=c(1, 3)} +head(iris) +``` +```` diff --git a/tests/testthat/rmd/nested-out.Rmd b/tests/testthat/rmd/nested-out.Rmd new file mode 100644 index 000000000..46aefc718 --- /dev/null +++ b/tests/testthat/rmd/nested-out.Rmd @@ -0,0 +1,7 @@ +this + +````md +`r Sys.Date() ````{r, highlight.output=c(1, 3)} +head(iris) +``` +```` diff --git a/tests/testthat/rmd/r_and_non_r_code_chunks-in.Rmd b/tests/testthat/rmd/r_and_non_r_code_chunks-in.Rmd new file mode 100644 index 000000000..8360de933 --- /dev/null +++ b/tests/testthat/rmd/r_and_non_r_code_chunks-in.Rmd @@ -0,0 +1,35 @@ +# Header + +Some text. + +```{R} +1+1 +``` + +```{r} +1+1 +``` + +```{r, a = 3} +1+1 +``` + +```{ r } +1+1 +``` + +More text. + +```{nonR} +I like the jungle, I like the .2' +``` + +```{blabla } +I like the jungle, +``` + + +Code without engine +``` +1+1 +``` diff --git a/tests/testthat/rmd/r_and_non_r_code_chunks-out.Rmd b/tests/testthat/rmd/r_and_non_r_code_chunks-out.Rmd new file mode 100644 index 000000000..0e76de3f2 --- /dev/null +++ b/tests/testthat/rmd/r_and_non_r_code_chunks-out.Rmd @@ -0,0 +1,35 @@ +# Header + +Some text. + +```{R} +1 + 1 +``` + +```{r} +1 + 1 +``` + +```{r, a = 3} +1 + 1 +``` + +```{ r } +1+1 +``` + +More text. + +```{nonR} +I like the jungle, I like the .2' +``` + +```{blabla } +I like the jungle, +``` + + +Code without engine +``` +1+1 +``` diff --git a/tests/testthat/rmd/random3-in.Rmd b/tests/testthat/rmd/random3-in.Rmd new file mode 100644 index 000000000..5ea72f5f4 --- /dev/null +++ b/tests/testthat/rmd/random3-in.Rmd @@ -0,0 +1,17 @@ +Some text +```{r} +# Some R code +f <- function(x) { + x +} +``` +More text before malformed chunk +# More R code +g <- function(y) { + y +} +``` +Final text +```{r} +1 + 2 +``` diff --git a/tests/testthat/rmd/random3-out.Rmd b/tests/testthat/rmd/random3-out.Rmd new file mode 100644 index 000000000..5ea72f5f4 --- /dev/null +++ b/tests/testthat/rmd/random3-out.Rmd @@ -0,0 +1,17 @@ +Some text +```{r} +# Some R code +f <- function(x) { + x +} +``` +More text before malformed chunk +# More R code +g <- function(y) { + y +} +``` +Final text +```{r} +1 + 2 +``` diff --git a/tests/testthat/rmd/random5-in.Rmd b/tests/testthat/rmd/random5-in.Rmd new file mode 100644 index 000000000..d7ff4b206 --- /dev/null +++ b/tests/testthat/rmd/random5-in.Rmd @@ -0,0 +1,24 @@ +Some text +```{r} +# Some R code +f <-function(x) { + x +} +``` +More text before malformed chunk + +# More R code +g <- function(y) { + y +} +``` +Final text +```{r} +f +``` +Final text +```md +````{r} +1 + 2 +```` +``` diff --git a/tests/testthat/rmd/random5-out.Rmd b/tests/testthat/rmd/random5-out.Rmd new file mode 100644 index 000000000..4a90fd2fe --- /dev/null +++ b/tests/testthat/rmd/random5-out.Rmd @@ -0,0 +1,24 @@ +Some text +```{r} +# Some R code +f <- function(x) { + x +} +``` +More text before malformed chunk + +# More R code +g <- function(y) { + y +} +``` +Final text +```{r} +f +``` +Final text +```md +````{r} +1 + 2 +```` +``` diff --git a/tests/testthat/rmd/random6-in.Rmd b/tests/testthat/rmd/random6-in.Rmd new file mode 100644 index 000000000..783c25cdb --- /dev/null +++ b/tests/testthat/rmd/random6-in.Rmd @@ -0,0 +1,20 @@ +Some text +```{r} +# Some R code +f <- function(x) { + x +} +``` + + +Final text +```{r} + 33 +``` + + +Final text +```{SQL} +```{python} +1 + 2 +``` diff --git a/tests/testthat/rmd/random6-out.Rmd b/tests/testthat/rmd/random6-out.Rmd new file mode 100644 index 000000000..917c747c0 --- /dev/null +++ b/tests/testthat/rmd/random6-out.Rmd @@ -0,0 +1,20 @@ +Some text +```{r} +# Some R code +f <- function(x) { + x +} +``` + + +Final text +```{r} +33 +``` + + +Final text +```{SQL} +```{python} +1 + 2 +``` diff --git a/tests/testthat/rmd/random7-in.Rmd b/tests/testthat/rmd/random7-in.Rmd new file mode 100644 index 000000000..7c6732108 --- /dev/null +++ b/tests/testthat/rmd/random7-in.Rmd @@ -0,0 +1,14 @@ + +Empty +```{r} + +``` + +Empty 2 +```{r} + +``` + +Empty +```{r} +``` diff --git a/tests/testthat/rmd/random7-out.Rmd b/tests/testthat/rmd/random7-out.Rmd new file mode 100644 index 000000000..6d0bb1c0f --- /dev/null +++ b/tests/testthat/rmd/random7-out.Rmd @@ -0,0 +1,12 @@ + +Empty +```{r} +``` + +Empty 2 +```{r} +``` + +Empty +```{r} +``` diff --git a/tests/testthat/rnw/008-outdec-in.Rnw b/tests/testthat/rnw/008-outdec-in.Rnw new file mode 100644 index 000000000..3d947a584 --- /dev/null +++ b/tests/testthat/rnw/008-outdec-in.Rnw @@ -0,0 +1,27 @@ +\documentclass{article} +\usepackage{amsmath} +\begin{document} + +When the option OutDec is not \texttt{.}, put numbers in \texttt{\textbackslash{}text}. See \#348. + +<<>>= +options(OutDec=",") +@ + +This is the first test. abc \Sexpr{0.6} def + +another test $a = \Sexpr{0.6}$. + +and the last one $a = \Sexpr{'0.6'}$. + +<<>>= +options(OutDec=".") +@ + +This is the first test. abc \Sexpr{0.6} def + +another test $a = \Sexpr{0.6}$. + +and the last one $a = \Sexpr{'0.6'}$. + +\end{document} diff --git a/tests/testthat/rnw/008-outdec-out.Rnw b/tests/testthat/rnw/008-outdec-out.Rnw new file mode 100644 index 000000000..78544f2a1 --- /dev/null +++ b/tests/testthat/rnw/008-outdec-out.Rnw @@ -0,0 +1,27 @@ +\documentclass{article} +\usepackage{amsmath} +\begin{document} + +When the option OutDec is not \texttt{.}, put numbers in \texttt{\textbackslash{}text}. See \#348. + +<<>>= +options(OutDec = ",") +@ + +This is the first test. abc \Sexpr{0.6} def + +another test $a = \Sexpr{0.6}$. + +and the last one $a = \Sexpr{'0.6'}$. + +<<>>= +options(OutDec = ".") +@ + +This is the first test. abc \Sexpr{0.6} def + +another test $a = \Sexpr{0.6}$. + +and the last one $a = \Sexpr{'0.6'}$. + +\end{document} diff --git a/tests/testthat/rnw/011-conditional-eval-in.Rnw b/tests/testthat/rnw/011-conditional-eval-in.Rnw new file mode 100644 index 000000000..e33cd957e --- /dev/null +++ b/tests/testthat/rnw/011-conditional-eval-in.Rnw @@ -0,0 +1,18 @@ +\documentclass{article} + +\begin{document} + +<>= +# this variable controls if a chunk should be evaluated +dothis <- TRUE +@ + +<>= +print( "say hello world" ) +@ + +<>= +print( "silence is gold" ) +@ + +\end{document} diff --git a/tests/testthat/rnw/011-conditional-eval-out.Rnw b/tests/testthat/rnw/011-conditional-eval-out.Rnw new file mode 100644 index 000000000..67c6d81a1 --- /dev/null +++ b/tests/testthat/rnw/011-conditional-eval-out.Rnw @@ -0,0 +1,18 @@ +\documentclass{article} + +\begin{document} + +<>= +# this variable controls if a chunk should be evaluated +dothis <- TRUE +@ + +<>= +print("say hello world") +@ + +<>= +print("silence is gold") +@ + +\end{document} diff --git a/tests/testthat/roxygen-examples-complete/01-one-function-example-last-proper-run-in.R b/tests/testthat/roxygen-examples-complete/01-one-function-example-last-proper-run-in.R new file mode 100644 index 000000000..5087af071 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/01-one-function-example-last-proper-run-in.R @@ -0,0 +1,7 @@ +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package +#' (code and tests). +#' Carefully examine the results after running this function! +#'@examples style_pkg(style= tidyverse_style, strict = TRUE) +a <- 2 diff --git a/tests/testthat/roxygen-examples-complete/01-one-function-example-last-proper-run-in_tree b/tests/testthat/roxygen-examples-complete/01-one-function-example-last-proper-run-in_tree new file mode 100644 index 000000000..2cb3e2b3d --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/01-one-function-example-last-proper-run-in_tree @@ -0,0 +1,13 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Pr [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' Pe [1/0] {3} + ¦--COMMENT: #' (c [1/0] {4} + ¦--COMMENT: #' Ca [1/0] {5} + ¦--COMMENT: #'@ex [1/0] {6} + °--expr: a <- [1/0] {7} + ¦--expr: a [0/1] {9} + ¦ °--SYMBOL: a [0/0] {8} + ¦--LEFT_ASSIGN: <- [0/1] {10} + °--expr: 2 [0/0] {12} + °--NUM_CONST: 2 [0/0] {11} diff --git a/tests/testthat/roxygen-examples-complete/01-one-function-example-last-proper-run-out.R b/tests/testthat/roxygen-examples-complete/01-one-function-example-last-proper-run-out.R new file mode 100644 index 000000000..f8e0f4624 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/01-one-function-example-last-proper-run-out.R @@ -0,0 +1,7 @@ +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package +#' (code and tests). +#' Carefully examine the results after running this function! +#' @examples style_pkg(style = tidyverse_style, strict = TRUE) +a <- 2 diff --git a/tests/testthat/roxygen-examples-complete/02-one-function-examples-last-proper-run-in.R b/tests/testthat/roxygen-examples-complete/02-one-function-examples-last-proper-run-in.R new file mode 100644 index 000000000..7cf13d129 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/02-one-function-examples-last-proper-run-in.R @@ -0,0 +1,12 @@ +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package +#' (code and tests). +#' Carefully examine the results after running this function! +#' @examples +#' style_pkg(style = tidyverse_style, strict = TRUE) +#' style_pkg( +#' scope ="line_breaks", +#' math_token_spacing = specify_math_token_spacing( zero = "'+'") +#' ) +a = call diff --git a/tests/testthat/roxygen-examples-complete/02-one-function-examples-last-proper-run-in_tree b/tests/testthat/roxygen-examples-complete/02-one-function-examples-last-proper-run-in_tree new file mode 100644 index 000000000..93f13fecb --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/02-one-function-examples-last-proper-run-in_tree @@ -0,0 +1,18 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Pr [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' Pe [1/0] {3} + ¦--COMMENT: #' (c [1/0] {4} + ¦--COMMENT: #' Ca [1/0] {5} + ¦--COMMENT: #' @e [1/0] {6} + ¦--COMMENT: #' st [1/0] {7} + ¦--COMMENT: #' st [1/0] {8} + ¦--COMMENT: #' [1/0] {9} + ¦--COMMENT: #' [1/0] {10} + ¦--COMMENT: #' ) [1/0] {11} + °--expr_or_assign_or_help: a = c [1/0] {12} + ¦--expr: a [0/1] {14} + ¦ °--SYMBOL: a [0/0] {13} + ¦--EQ_ASSIGN: = [0/1] {15} + °--expr: call [0/0] {17} + °--SYMBOL: call [0/0] {16} diff --git a/tests/testthat/roxygen-examples-complete/02-one-function-examples-last-proper-run-out.R b/tests/testthat/roxygen-examples-complete/02-one-function-examples-last-proper-run-out.R new file mode 100644 index 000000000..5d3d4873e --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/02-one-function-examples-last-proper-run-out.R @@ -0,0 +1,12 @@ +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package +#' (code and tests). +#' Carefully examine the results after running this function! +#' @examples +#' style_pkg(style = tidyverse_style, strict = TRUE) +#' style_pkg( +#' scope = "line_breaks", +#' math_token_spacing = specify_math_token_spacing(zero = "'+'") +#' ) +a <- call diff --git a/tests/testthat/roxygen-examples-complete/03-one-function-example-not-last-proper-run-in.R b/tests/testthat/roxygen-examples-complete/03-one-function-example-not-last-proper-run-in.R new file mode 100644 index 000000000..3dce9f800 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/03-one-function-example-not-last-proper-run-in.R @@ -0,0 +1,7 @@ +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package... +#' Carefully examine the results after running this function! +#' @examples style_pkg(style = tidyverse_style , strict =TRUE) +#' @name k +a<- 2 diff --git a/tests/testthat/roxygen-examples-complete/03-one-function-example-not-last-proper-run-in_tree b/tests/testthat/roxygen-examples-complete/03-one-function-example-not-last-proper-run-in_tree new file mode 100644 index 000000000..c7da21279 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/03-one-function-example-not-last-proper-run-in_tree @@ -0,0 +1,13 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Pr [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' Pe [1/0] {3} + ¦--COMMENT: #' Ca [1/0] {4} + ¦--COMMENT: #' @e [1/0] {5} + ¦--COMMENT: #' @n [1/0] {6} + °--expr: a<- 2 [1/0] {7} + ¦--expr: a [0/0] {9} + ¦ °--SYMBOL: a [0/0] {8} + ¦--LEFT_ASSIGN: <- [0/1] {10} + °--expr: 2 [0/0] {12} + °--NUM_CONST: 2 [0/0] {11} diff --git a/tests/testthat/roxygen-examples-complete/03-one-function-example-not-last-proper-run-out.R b/tests/testthat/roxygen-examples-complete/03-one-function-example-not-last-proper-run-out.R new file mode 100644 index 000000000..5c4814dbf --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/03-one-function-example-not-last-proper-run-out.R @@ -0,0 +1,7 @@ +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package... +#' Carefully examine the results after running this function! +#' @examples style_pkg(style = tidyverse_style, strict = TRUE) +#' @name k +a <- 2 diff --git a/tests/testthat/roxygen-examples-complete/04-one-function-examples-not-last-proper-run-in.R b/tests/testthat/roxygen-examples-complete/04-one-function-examples-not-last-proper-run-in.R new file mode 100644 index 000000000..295aa90b4 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/04-one-function-examples-not-last-proper-run-in.R @@ -0,0 +1,12 @@ +#' The tidyverse style +#' +#' Style code according to the tidyverse style guide. +#' @family style_guides +#' @examples +#' style_text("call( 1)", style = tidyverse_style, scope = "spaces") +#' style_text("call( 1)", transformers = tidyverse_style(strict = TRUE)) +#' style_text(c("ab <- 3","a <-3"), strict = FALSE) # keeps alignment of "<-" +#' style_text(c("ab <- 3", "a <-3"), strict = TRUE) # drops alignment of "<-" +#' @importFrom purrr partial +#' @export +a<-call diff --git a/tests/testthat/roxygen-examples-complete/04-one-function-examples-not-last-proper-run-in_tree b/tests/testthat/roxygen-examples-complete/04-one-function-examples-not-last-proper-run-in_tree new file mode 100644 index 000000000..d33a983ff --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/04-one-function-examples-not-last-proper-run-in_tree @@ -0,0 +1,18 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Th [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' St [1/0] {3} + ¦--COMMENT: #' @f [1/0] {4} + ¦--COMMENT: #' @e [1/0] {5} + ¦--COMMENT: #' st [1/0] {6} + ¦--COMMENT: #' st [1/0] {7} + ¦--COMMENT: #' st [1/0] {8} + ¦--COMMENT: #' st [1/0] {9} + ¦--COMMENT: #' @i [1/0] {10} + ¦--COMMENT: #' @e [1/0] {11} + °--expr: a<-ca [1/0] {12} + ¦--expr: a [0/0] {14} + ¦ °--SYMBOL: a [0/0] {13} + ¦--LEFT_ASSIGN: <- [0/0] {15} + °--expr: call [0/0] {17} + °--SYMBOL: call [0/0] {16} diff --git a/tests/testthat/roxygen-examples-complete/04-one-function-examples-not-last-proper-run-out.R b/tests/testthat/roxygen-examples-complete/04-one-function-examples-not-last-proper-run-out.R new file mode 100644 index 000000000..81a774367 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/04-one-function-examples-not-last-proper-run-out.R @@ -0,0 +1,12 @@ +#' The tidyverse style +#' +#' Style code according to the tidyverse style guide. +#' @family style_guides +#' @examples +#' style_text("call( 1)", style = tidyverse_style, scope = "spaces") +#' style_text("call( 1)", transformers = tidyverse_style(strict = TRUE)) +#' style_text(c("ab <- 3", "a <-3"), strict = FALSE) # keeps alignment of "<-" +#' style_text(c("ab <- 3", "a <-3"), strict = TRUE) # drops alignment of "<-" +#' @importFrom purrr partial +#' @export +a <- call diff --git a/tests/testthat/roxygen-examples-complete/05-multiple-function-examples-last-proper-run-in.R b/tests/testthat/roxygen-examples-complete/05-multiple-function-examples-last-proper-run-in.R new file mode 100644 index 000000000..0b8c3748d --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/05-multiple-function-examples-last-proper-run-in.R @@ -0,0 +1,18 @@ +#' The tidyverse style +#' +#' Style code according to the tidyverse style guide. +#' @family style_guides +#' @examples +#' style_text("call( 1)", style = tidyverse_style, scope = "spaces") +#' style_text("call( 1)", transformers = tidyverse_style(strict = TRUE)) +#' style_text(c("ab <- 3", "a <-3"), strict = FALSE)# keeps alignment of "<-" +#' style_text(c("ab <- 3", "a <-3"), strict = TRUE) # drops alignment of "<-" +a <- call + +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package +#' (code and tests). +#' Carefully examine the results after running this function! +#' @examples style_pkg(style = tidyverse_style, strict = TRUE) +a <- 2 diff --git a/tests/testthat/roxygen-examples-complete/05-multiple-function-examples-last-proper-run-in_tree b/tests/testthat/roxygen-examples-complete/05-multiple-function-examples-last-proper-run-in_tree new file mode 100644 index 000000000..a7eacdfce --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/05-multiple-function-examples-last-proper-run-in_tree @@ -0,0 +1,28 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Th [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' St [1/0] {3} + ¦--COMMENT: #' @f [1/0] {4} + ¦--COMMENT: #' @e [1/0] {5} + ¦--COMMENT: #' st [1/0] {6} + ¦--COMMENT: #' st [1/0] {7} + ¦--COMMENT: #' st [1/0] {8} + ¦--COMMENT: #' st [1/0] {9} + ¦--expr: a <- [1/0] {10} + ¦ ¦--expr: a [0/1] {12} + ¦ ¦ °--SYMBOL: a [0/0] {11} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {13} + ¦ °--expr: call [0/0] {15} + ¦ °--SYMBOL: call [0/0] {14} + ¦--COMMENT: #' Pr [2/0] {16} + ¦--COMMENT: #' [1/0] {17} + ¦--COMMENT: #' Pe [1/0] {18} + ¦--COMMENT: #' (c [1/0] {19} + ¦--COMMENT: #' Ca [1/0] {20} + ¦--COMMENT: #' @e [1/0] {21} + °--expr: a <- [1/0] {22} + ¦--expr: a [0/1] {24} + ¦ °--SYMBOL: a [0/0] {23} + ¦--LEFT_ASSIGN: <- [0/3] {25} + °--expr: 2 [0/0] {27} + °--NUM_CONST: 2 [0/0] {26} diff --git a/tests/testthat/roxygen-examples-complete/05-multiple-function-examples-last-proper-run-out.R b/tests/testthat/roxygen-examples-complete/05-multiple-function-examples-last-proper-run-out.R new file mode 100644 index 000000000..c35de4e6e --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/05-multiple-function-examples-last-proper-run-out.R @@ -0,0 +1,18 @@ +#' The tidyverse style +#' +#' Style code according to the tidyverse style guide. +#' @family style_guides +#' @examples +#' style_text("call( 1)", style = tidyverse_style, scope = "spaces") +#' style_text("call( 1)", transformers = tidyverse_style(strict = TRUE)) +#' style_text(c("ab <- 3", "a <-3"), strict = FALSE) # keeps alignment of "<-" +#' style_text(c("ab <- 3", "a <-3"), strict = TRUE) # drops alignment of "<-" +a <- call + +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package +#' (code and tests). +#' Carefully examine the results after running this function! +#' @examples style_pkg(style = tidyverse_style, strict = TRUE) +a <- 2 diff --git a/tests/testthat/roxygen-examples-complete/06-multiple-function-examples-no-last-run-in.R b/tests/testthat/roxygen-examples-complete/06-multiple-function-examples-no-last-run-in.R new file mode 100644 index 000000000..9b8c45923 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/06-multiple-function-examples-no-last-run-in.R @@ -0,0 +1,22 @@ +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package... +#' Carefully examine the results after running this function! +#' @examples style_pkg(style = +#' tidyverse_style, strict = TRUE) +#' @name k +a <- 2 + +#' The tidyverse style +#' +#' Style code according to the tidyverse style guide. +#' @family style_guides +#' @examples +#' style_text("call( 1)", style = tidyverse_style, scope = "spaces") +#' style_text("call( 1)", transformers = tidyverse_style(strict = TRUE)) +#' style_text( +#' c("ab <- 3", "a <-3"), strict = FALSE) # keeps alignment of "<-" +#' style_text(c("ab <- 3", "a <-3"), strict = TRUE) # drops alignment of "<-" +#' @importFrom purrr partial +#' @export +a <- call; diff --git a/tests/testthat/roxygen-examples-complete/06-multiple-function-examples-no-last-run-in_tree b/tests/testthat/roxygen-examples-complete/06-multiple-function-examples-no-last-run-in_tree new file mode 100644 index 000000000..a7dcea705 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/06-multiple-function-examples-no-last-run-in_tree @@ -0,0 +1,33 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Pr [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' Pe [1/0] {3} + ¦--COMMENT: #' Ca [1/0] {4} + ¦--COMMENT: #' @e [1/0] {5} + ¦--COMMENT: #' ti [1/0] {6} + ¦--COMMENT: #' @n [1/0] {7} + ¦--expr: a <- [1/0] {8} + ¦ ¦--expr: a [0/1] {10} + ¦ ¦ °--SYMBOL: a [0/0] {9} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {11} + ¦ °--expr: 2 [0/0] {13} + ¦ °--NUM_CONST: 2 [0/0] {12} + ¦--COMMENT: #' Th [2/0] {14} + ¦--COMMENT: #' [1/0] {15} + ¦--COMMENT: #' St [1/0] {16} + ¦--COMMENT: #' @f [1/0] {17} + ¦--COMMENT: #' @e [1/0] {18} + ¦--COMMENT: #' st [1/0] {19} + ¦--COMMENT: #' st [1/0] {20} + ¦--COMMENT: #' st [1/0] {21} + ¦--COMMENT: #' c( [1/0] {22} + ¦--COMMENT: #' st [1/0] {23} + ¦--COMMENT: #' @i [1/0] {24} + ¦--COMMENT: #' @e [1/0] {25} + ¦--expr: a [1/0] {26} + ¦ ¦--expr: a [0/5] {28} + ¦ ¦ °--SYMBOL: a [0/0] {27} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {29} + ¦ °--expr: call [0/0] {31} + ¦ °--SYMBOL: call [0/0] {30} + °--';': ; [0/0] {32} diff --git a/tests/testthat/roxygen-examples-complete/06-multiple-function-examples-no-last-run-out.R b/tests/testthat/roxygen-examples-complete/06-multiple-function-examples-no-last-run-out.R new file mode 100644 index 000000000..edda23f97 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/06-multiple-function-examples-no-last-run-out.R @@ -0,0 +1,26 @@ +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package... +#' Carefully examine the results after running this function! +#' @examples style_pkg( +#' style = +#' tidyverse_style, strict = TRUE +#' ) +#' @name k +a <- 2 + +#' The tidyverse style +#' +#' Style code according to the tidyverse style guide. +#' @family style_guides +#' @examples +#' style_text("call( 1)", style = tidyverse_style, scope = "spaces") +#' style_text("call( 1)", transformers = tidyverse_style(strict = TRUE)) +#' style_text( +#' c("ab <- 3", "a <-3"), +#' strict = FALSE +#' ) # keeps alignment of "<-" +#' style_text(c("ab <- 3", "a <-3"), strict = TRUE) # drops alignment of "<-" +#' @importFrom purrr partial +#' @export +a <- call diff --git a/tests/testthat/roxygen-examples-complete/07-roxygen-no-dontrun-in.R b/tests/testthat/roxygen-examples-complete/07-roxygen-no-dontrun-in.R new file mode 100644 index 000000000..e8b130652 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/07-roxygen-no-dontrun-in.R @@ -0,0 +1,33 @@ +#' Style `.R` and/or `.Rmd` files +#' +#' Performs various substitutions in the files specified. +#' Carefully examine the results after running this function! +#' @param path A character vector with paths to files to style. +#' @inheritParams style_pkg +#' @inheritSection transform_files Value +#' @inheritSection style_pkg Warning +#' @inheritSection style_pkg Roundtrip Validation +#' @examples +#' # the following is identical but the former is more convenient: +#' file<- tempfile("styler", +#' fileext = ".R") +#' xfun::write_utf8("1++1", file) +#' style_file( +#' file, style = tidyverse_style, strict = TRUE) +#' style_file(file, transformers = tidyverse_style(strict = TRUE)) +#' xfun::read_utf8(file) +#' unlink(file2) +#' @family stylers +#' @export +style_file <- function(path, + ... , + style = tidyverse_style, + transformers = style(...), + include_roxygen_examples = TRUE) { + changed<- withr::with_dir( + dirname(path + ), + transform_files(basename(path), transformers) + ) + invisible(changed) +} diff --git a/tests/testthat/roxygen-examples-complete/07-roxygen-no-dontrun-in_tree b/tests/testthat/roxygen-examples-complete/07-roxygen-no-dontrun-in_tree new file mode 100644 index 000000000..399feb009 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/07-roxygen-no-dontrun-in_tree @@ -0,0 +1,98 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' St [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' Pe [1/0] {3} + ¦--COMMENT: #' [1/0] {4} + ¦--COMMENT: #' @p [1/0] {5} + ¦--COMMENT: #' @i [1/0] {6} + ¦--COMMENT: #' @i [1/0] {7} + ¦--COMMENT: #' @i [1/0] {8} + ¦--COMMENT: #' @i [1/0] {9} + ¦--COMMENT: #' @e [1/0] {10} + ¦--COMMENT: #' # [1/0] {11} + ¦--COMMENT: #' fi [1/0] {12} + ¦--COMMENT: #' fi [1/0] {13} + ¦--COMMENT: #' xf [1/0] {14} + ¦--COMMENT: #' st [1/0] {15} + ¦--COMMENT: #' fi [1/0] {16} + ¦--COMMENT: #' st [1/0] {17} + ¦--COMMENT: #' xf [1/0] {18} + ¦--COMMENT: #' un [1/0] {19} + ¦--COMMENT: #' @f [1/0] {20} + ¦--COMMENT: #' @e [1/0] {21} + °--expr: style [1/0] {22} + ¦--expr: style [0/1] {24} + ¦ °--SYMBOL: style [0/0] {23} + ¦--LEFT_ASSIGN: <- [0/1] {25} + °--expr: funct [0/0] {26} + ¦--FUNCTION: funct [0/0] {27} + ¦--'(': ( [0/0] {28} + ¦--SYMBOL_FORMALS: path [0/0] {29} + ¦--',': , [0/23] {30} + ¦--SYMBOL_FORMALS: ... [1/1] {31} + ¦--',': , [0/23] {32} + ¦--SYMBOL_FORMALS: style [1/1] {33} + ¦--EQ_FORMALS: = [0/1] {34} + ¦--expr: tidyv [0/0] {36} + ¦ °--SYMBOL: tidyv [0/0] {35} + ¦--',': , [0/23] {37} + ¦--SYMBOL_FORMALS: trans [1/1] {38} + ¦--EQ_FORMALS: = [0/1] {39} + ¦--expr: style [0/0] {40} + ¦ ¦--expr: style [0/0] {42} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: style [0/0] {41} + ¦ ¦--'(': ( [0/0] {43} + ¦ ¦--expr: ... [0/0] {45} + ¦ ¦ °--SYMBOL: ... [0/0] {44} + ¦ °--')': ) [0/0] {46} + ¦--',': , [0/23] {47} + ¦--SYMBOL_FORMALS: inclu [1/1] {48} + ¦--EQ_FORMALS: = [0/1] {49} + ¦--expr: TRUE [0/0] {51} + ¦ °--NUM_CONST: TRUE [0/0] {50} + ¦--')': ) [0/1] {52} + °--expr: { + c [0/0] {53} + ¦--'{': { [0/2] {54} + ¦--expr: chang [1/2] {55} + ¦ ¦--expr: chang [0/0] {57} + ¦ ¦ °--SYMBOL: chang [0/0] {56} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {58} + ¦ °--expr: withr [0/0] {59} + ¦ ¦--expr: withr [0/0] {60} + ¦ ¦ ¦--SYMBOL_PACKAGE: withr [0/0] {61} + ¦ ¦ ¦--NS_GET: :: [0/0] {62} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: with_ [0/0] {63} + ¦ ¦--'(': ( [0/4] {64} + ¦ ¦--expr: dirna [1/0] {65} + ¦ ¦ ¦--expr: dirna [0/0] {67} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: dirna [0/0] {66} + ¦ ¦ ¦--'(': ( [0/0] {68} + ¦ ¦ ¦--expr: path [0/12] {70} + ¦ ¦ ¦ °--SYMBOL: path [0/0] {69} + ¦ ¦ °--')': ) [1/0] {71} + ¦ ¦--',': , [0/4] {72} + ¦ ¦--expr: trans [1/2] {73} + ¦ ¦ ¦--expr: trans [0/0] {75} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: trans [0/0] {74} + ¦ ¦ ¦--'(': ( [0/0] {76} + ¦ ¦ ¦--expr: basen [0/0] {77} + ¦ ¦ ¦ ¦--expr: basen [0/0] {79} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: basen [0/0] {78} + ¦ ¦ ¦ ¦--'(': ( [0/0] {80} + ¦ ¦ ¦ ¦--expr: path [0/0] {82} + ¦ ¦ ¦ ¦ °--SYMBOL: path [0/0] {81} + ¦ ¦ ¦ °--')': ) [0/0] {83} + ¦ ¦ ¦--',': , [0/1] {84} + ¦ ¦ ¦--expr: trans [0/0] {86} + ¦ ¦ ¦ °--SYMBOL: trans [0/0] {85} + ¦ ¦ °--')': ) [0/0] {87} + ¦ °--')': ) [1/0] {88} + ¦--expr: invis [1/0] {89} + ¦ ¦--expr: invis [0/0] {91} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: invis [0/0] {90} + ¦ ¦--'(': ( [0/0] {92} + ¦ ¦--expr: chang [0/0] {94} + ¦ ¦ °--SYMBOL: chang [0/0] {93} + ¦ °--')': ) [0/0] {95} + °--'}': } [1/0] {96} diff --git a/tests/testthat/roxygen-examples-complete/07-roxygen-no-dontrun-out.R b/tests/testthat/roxygen-examples-complete/07-roxygen-no-dontrun-out.R new file mode 100644 index 000000000..993d3eaa3 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/07-roxygen-no-dontrun-out.R @@ -0,0 +1,35 @@ +#' Style `.R` and/or `.Rmd` files +#' +#' Performs various substitutions in the files specified. +#' Carefully examine the results after running this function! +#' @param path A character vector with paths to files to style. +#' @inheritParams style_pkg +#' @inheritSection transform_files Value +#' @inheritSection style_pkg Warning +#' @inheritSection style_pkg Roundtrip Validation +#' @examples +#' # the following is identical but the former is more convenient: +#' file <- tempfile("styler", +#' fileext = ".R" +#' ) +#' xfun::write_utf8("1++1", file) +#' style_file( +#' file, +#' style = tidyverse_style, strict = TRUE +#' ) +#' style_file(file, transformers = tidyverse_style(strict = TRUE)) +#' xfun::read_utf8(file) +#' unlink(file2) +#' @family stylers +#' @export +style_file <- function(path, + ..., + style = tidyverse_style, + transformers = style(...), + include_roxygen_examples = TRUE) { + changed <- withr::with_dir( + dirname(path), + transform_files(basename(path), transformers) + ) + invisible(changed) +} diff --git a/tests/testthat/roxygen-examples-complete/08-roxygen-dontrun-in.R b/tests/testthat/roxygen-examples-complete/08-roxygen-dontrun-in.R new file mode 100644 index 000000000..e44d4ec34 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/08-roxygen-dontrun-in.R @@ -0,0 +1,38 @@ +#' Style `.R` and/or `.Rmd` files +#' +#' Performs various substitutions in the files specified. +#' Carefully examine the results after running this function! +#' @param path A character vector with paths to files to style. +#' @inheritParams style_pkg +#' @inheritSection transform_files Value +#' @inheritSection style_pkg Warning +#' @inheritSection style_pkg Roundtrip Validation +#' @examples +#' # the following is identical but the former is more convenient: +#' file<- tempfile("styler", +#' fileext = ".R") +#' \dontrun{xfun::write_utf8("1++1",file)} +#' style_file( +#' file, style = tidyverse_style, strict = TRUE) +#' style_file(file, transformers = tidyverse_style(strict = TRUE)) +#' xfun::read_utf8(file) +#' \dontrun{unlink(file2)} +#' \dontrun{ +#' { x +#' } +#' unlink(file2) +#' } +#' @family stylers +#' @export +style_file <- function(path, + ... , + style = tidyverse_style, + transformers = style(...), + include_roxygen_examples = TRUE) { + changed<- withr::with_dir( + dirname(path + ), + transform_files(basename(path), transformers) + ) + invisible(changed) +} diff --git a/tests/testthat/roxygen-examples-complete/08-roxygen-dontrun-in_tree b/tests/testthat/roxygen-examples-complete/08-roxygen-dontrun-in_tree new file mode 100644 index 000000000..710728774 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/08-roxygen-dontrun-in_tree @@ -0,0 +1,103 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' St [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' Pe [1/0] {3} + ¦--COMMENT: #' [1/0] {4} + ¦--COMMENT: #' @p [1/0] {5} + ¦--COMMENT: #' @i [1/0] {6} + ¦--COMMENT: #' @i [1/0] {7} + ¦--COMMENT: #' @i [1/0] {8} + ¦--COMMENT: #' @i [1/0] {9} + ¦--COMMENT: #' @e [1/0] {10} + ¦--COMMENT: #' # [1/0] {11} + ¦--COMMENT: #' fi [1/0] {12} + ¦--COMMENT: #' fi [1/0] {13} + ¦--COMMENT: #' \d [1/0] {14} + ¦--COMMENT: #' st [1/0] {15} + ¦--COMMENT: #' fi [1/0] {16} + ¦--COMMENT: #' st [1/0] {17} + ¦--COMMENT: #' xf [1/0] {18} + ¦--COMMENT: #' \d [1/0] {19} + ¦--COMMENT: #' \d [1/0] {20} + ¦--COMMENT: #' { [1/0] {21} + ¦--COMMENT: #' } [1/0] {22} + ¦--COMMENT: #' un [1/0] {23} + ¦--COMMENT: #' } [1/0] {24} + ¦--COMMENT: #' @f [1/0] {25} + ¦--COMMENT: #' @e [1/0] {26} + °--expr: style [1/0] {27} + ¦--expr: style [0/1] {29} + ¦ °--SYMBOL: style [0/0] {28} + ¦--LEFT_ASSIGN: <- [0/1] {30} + °--expr: funct [0/0] {31} + ¦--FUNCTION: funct [0/0] {32} + ¦--'(': ( [0/0] {33} + ¦--SYMBOL_FORMALS: path [0/0] {34} + ¦--',': , [0/23] {35} + ¦--SYMBOL_FORMALS: ... [1/1] {36} + ¦--',': , [0/23] {37} + ¦--SYMBOL_FORMALS: style [1/1] {38} + ¦--EQ_FORMALS: = [0/1] {39} + ¦--expr: tidyv [0/0] {41} + ¦ °--SYMBOL: tidyv [0/0] {40} + ¦--',': , [0/23] {42} + ¦--SYMBOL_FORMALS: trans [1/1] {43} + ¦--EQ_FORMALS: = [0/1] {44} + ¦--expr: style [0/0] {45} + ¦ ¦--expr: style [0/0] {47} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: style [0/0] {46} + ¦ ¦--'(': ( [0/0] {48} + ¦ ¦--expr: ... [0/0] {50} + ¦ ¦ °--SYMBOL: ... [0/0] {49} + ¦ °--')': ) [0/0] {51} + ¦--',': , [0/23] {52} + ¦--SYMBOL_FORMALS: inclu [1/1] {53} + ¦--EQ_FORMALS: = [0/1] {54} + ¦--expr: TRUE [0/0] {56} + ¦ °--NUM_CONST: TRUE [0/0] {55} + ¦--')': ) [0/1] {57} + °--expr: { + c [0/0] {58} + ¦--'{': { [0/2] {59} + ¦--expr: chang [1/2] {60} + ¦ ¦--expr: chang [0/0] {62} + ¦ ¦ °--SYMBOL: chang [0/0] {61} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {63} + ¦ °--expr: withr [0/0] {64} + ¦ ¦--expr: withr [0/0] {65} + ¦ ¦ ¦--SYMBOL_PACKAGE: withr [0/0] {66} + ¦ ¦ ¦--NS_GET: :: [0/0] {67} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: with_ [0/0] {68} + ¦ ¦--'(': ( [0/4] {69} + ¦ ¦--expr: dirna [1/0] {70} + ¦ ¦ ¦--expr: dirna [0/0] {72} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: dirna [0/0] {71} + ¦ ¦ ¦--'(': ( [0/0] {73} + ¦ ¦ ¦--expr: path [0/12] {75} + ¦ ¦ ¦ °--SYMBOL: path [0/0] {74} + ¦ ¦ °--')': ) [1/0] {76} + ¦ ¦--',': , [0/4] {77} + ¦ ¦--expr: trans [1/2] {78} + ¦ ¦ ¦--expr: trans [0/0] {80} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: trans [0/0] {79} + ¦ ¦ ¦--'(': ( [0/0] {81} + ¦ ¦ ¦--expr: basen [0/0] {82} + ¦ ¦ ¦ ¦--expr: basen [0/0] {84} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: basen [0/0] {83} + ¦ ¦ ¦ ¦--'(': ( [0/0] {85} + ¦ ¦ ¦ ¦--expr: path [0/0] {87} + ¦ ¦ ¦ ¦ °--SYMBOL: path [0/0] {86} + ¦ ¦ ¦ °--')': ) [0/0] {88} + ¦ ¦ ¦--',': , [0/1] {89} + ¦ ¦ ¦--expr: trans [0/0] {91} + ¦ ¦ ¦ °--SYMBOL: trans [0/0] {90} + ¦ ¦ °--')': ) [0/0] {92} + ¦ °--')': ) [1/0] {93} + ¦--expr: invis [1/0] {94} + ¦ ¦--expr: invis [0/0] {96} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: invis [0/0] {95} + ¦ ¦--'(': ( [0/0] {97} + ¦ ¦--expr: chang [0/0] {99} + ¦ ¦ °--SYMBOL: chang [0/0] {98} + ¦ °--')': ) [0/0] {100} + °--'}': } [1/0] {101} diff --git a/tests/testthat/roxygen-examples-complete/08-roxygen-dontrun-out.R b/tests/testthat/roxygen-examples-complete/08-roxygen-dontrun-out.R new file mode 100644 index 000000000..8e92f97cd --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/08-roxygen-dontrun-out.R @@ -0,0 +1,45 @@ +#' Style `.R` and/or `.Rmd` files +#' +#' Performs various substitutions in the files specified. +#' Carefully examine the results after running this function! +#' @param path A character vector with paths to files to style. +#' @inheritParams style_pkg +#' @inheritSection transform_files Value +#' @inheritSection style_pkg Warning +#' @inheritSection style_pkg Roundtrip Validation +#' @examples +#' # the following is identical but the former is more convenient: +#' file <- tempfile("styler", +#' fileext = ".R" +#' ) +#' \dontrun{ +#' xfun::write_utf8("1++1", file) +#' } +#' style_file( +#' file, +#' style = tidyverse_style, strict = TRUE +#' ) +#' style_file(file, transformers = tidyverse_style(strict = TRUE)) +#' xfun::read_utf8(file) +#' \dontrun{ +#' unlink(file2) +#' } +#' \dontrun{ +#' { +#' x +#' } +#' unlink(file2) +#' } +#' @family stylers +#' @export +style_file <- function(path, + ..., + style = tidyverse_style, + transformers = style(...), + include_roxygen_examples = TRUE) { + changed <- withr::with_dir( + dirname(path), + transform_files(basename(path), transformers) + ) + invisible(changed) +} diff --git a/tests/testthat/roxygen-examples-complete/09-styler-r-ui-style-string-multiple-in.R b/tests/testthat/roxygen-examples-complete/09-styler-r-ui-style-string-multiple-in.R new file mode 100644 index 000000000..3fe6ac77b --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/09-styler-r-ui-style-string-multiple-in.R @@ -0,0 +1,27 @@ +#' Style a string +#' +#' Styles a character vector. Each element of the character vector corresponds +#' to one line of code. +#' @param text A character vector with text to style. +#' @inheritParams style_pkg +#' @family stylers +#' @examples +#' style_text("call( 1)") +#' style_text("1 + 1", strict = FALSE) +#' style_text("a%>%b", scope = "spaces") +#' style_text("a%>%b; a", scope = "line_breaks") +#' style_text("a%>%b; a", scope = "tokens") +#' # the following is identical but the former is more convenient: +#' style_text("a<-3++1", style = tidyverse_style, strict = TRUE) +#' @examples +#' \dontrun{style_text("a<-3++1", transformers = tidyverse_style(strict = TRUE))} +#' @export +style_text <- function(text, + ..., + style = tidyverse_style, + transformers = style(...), + include_roxygen_examples = TRUE) { + transformer <- make_transformer(transformers, include_roxygen_examples) + styled_text <- transformer(text) + construct_vertical(styled_text) +} diff --git a/tests/testthat/roxygen-examples-complete/09-styler-r-ui-style-string-multiple-in_tree b/tests/testthat/roxygen-examples-complete/09-styler-r-ui-style-string-multiple-in_tree new file mode 100644 index 000000000..51af44dc6 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/09-styler-r-ui-style-string-multiple-in_tree @@ -0,0 +1,86 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' St [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' St [1/0] {3} + ¦--COMMENT: #' to [1/0] {4} + ¦--COMMENT: #' @p [1/0] {5} + ¦--COMMENT: #' @i [1/0] {6} + ¦--COMMENT: #' @f [1/0] {7} + ¦--COMMENT: #' @e [1/0] {8} + ¦--COMMENT: #' st [1/0] {9} + ¦--COMMENT: #' st [1/0] {10} + ¦--COMMENT: #' st [1/0] {11} + ¦--COMMENT: #' st [1/0] {12} + ¦--COMMENT: #' st [1/0] {13} + ¦--COMMENT: #' # [1/0] {14} + ¦--COMMENT: #' st [1/0] {15} + ¦--COMMENT: #' @e [1/0] {16} + ¦--COMMENT: #' \d [1/0] {17} + ¦--COMMENT: #' @e [1/0] {18} + °--expr: style [1/0] {19} + ¦--expr: style [0/1] {21} + ¦ °--SYMBOL: style [0/0] {20} + ¦--LEFT_ASSIGN: <- [0/1] {22} + °--expr: funct [0/0] {23} + ¦--FUNCTION: funct [0/0] {24} + ¦--'(': ( [0/0] {25} + ¦--SYMBOL_FORMALS: text [0/0] {26} + ¦--',': , [0/23] {27} + ¦--SYMBOL_FORMALS: ... [1/0] {28} + ¦--',': , [0/23] {29} + ¦--SYMBOL_FORMALS: style [1/1] {30} + ¦--EQ_FORMALS: = [0/1] {31} + ¦--expr: tidyv [0/0] {33} + ¦ °--SYMBOL: tidyv [0/0] {32} + ¦--',': , [0/23] {34} + ¦--SYMBOL_FORMALS: trans [1/1] {35} + ¦--EQ_FORMALS: = [0/1] {36} + ¦--expr: style [0/0] {37} + ¦ ¦--expr: style [0/0] {39} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: style [0/0] {38} + ¦ ¦--'(': ( [0/0] {40} + ¦ ¦--expr: ... [0/0] {42} + ¦ ¦ °--SYMBOL: ... [0/0] {41} + ¦ °--')': ) [0/0] {43} + ¦--',': , [0/23] {44} + ¦--SYMBOL_FORMALS: inclu [1/1] {45} + ¦--EQ_FORMALS: = [0/1] {46} + ¦--expr: TRUE [0/0] {48} + ¦ °--NUM_CONST: TRUE [0/0] {47} + ¦--')': ) [0/1] {49} + °--expr: { + t [0/0] {50} + ¦--'{': { [0/2] {51} + ¦--expr: trans [1/2] {52} + ¦ ¦--expr: trans [0/1] {54} + ¦ ¦ °--SYMBOL: trans [0/0] {53} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {55} + ¦ °--expr: make_ [0/0] {56} + ¦ ¦--expr: make_ [0/0] {58} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: make_ [0/0] {57} + ¦ ¦--'(': ( [0/0] {59} + ¦ ¦--expr: trans [0/0] {61} + ¦ ¦ °--SYMBOL: trans [0/0] {60} + ¦ ¦--',': , [0/1] {62} + ¦ ¦--expr: inclu [0/0] {64} + ¦ ¦ °--SYMBOL: inclu [0/0] {63} + ¦ °--')': ) [0/0] {65} + ¦--expr: style [1/2] {66} + ¦ ¦--expr: style [0/1] {68} + ¦ ¦ °--SYMBOL: style [0/0] {67} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {69} + ¦ °--expr: trans [0/0] {70} + ¦ ¦--expr: trans [0/0] {72} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: trans [0/0] {71} + ¦ ¦--'(': ( [0/0] {73} + ¦ ¦--expr: text [0/0] {75} + ¦ ¦ °--SYMBOL: text [0/0] {74} + ¦ °--')': ) [0/0] {76} + ¦--expr: const [1/0] {77} + ¦ ¦--expr: const [0/0] {79} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: const [0/0] {78} + ¦ ¦--'(': ( [0/0] {80} + ¦ ¦--expr: style [0/0] {82} + ¦ ¦ °--SYMBOL: style [0/0] {81} + ¦ °--')': ) [0/0] {83} + °--'}': } [1/0] {84} diff --git a/tests/testthat/roxygen-examples-complete/09-styler-r-ui-style-string-multiple-out.R b/tests/testthat/roxygen-examples-complete/09-styler-r-ui-style-string-multiple-out.R new file mode 100644 index 000000000..743afba45 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/09-styler-r-ui-style-string-multiple-out.R @@ -0,0 +1,29 @@ +#' Style a string +#' +#' Styles a character vector. Each element of the character vector corresponds +#' to one line of code. +#' @param text A character vector with text to style. +#' @inheritParams style_pkg +#' @family stylers +#' @examples +#' style_text("call( 1)") +#' style_text("1 + 1", strict = FALSE) +#' style_text("a%>%b", scope = "spaces") +#' style_text("a%>%b; a", scope = "line_breaks") +#' style_text("a%>%b; a", scope = "tokens") +#' # the following is identical but the former is more convenient: +#' style_text("a<-3++1", style = tidyverse_style, strict = TRUE) +#' @examples +#' \dontrun{ +#' style_text("a<-3++1", transformers = tidyverse_style(strict = TRUE)) +#' } +#' @export +style_text <- function(text, + ..., + style = tidyverse_style, + transformers = style(...), + include_roxygen_examples = TRUE) { + transformer <- make_transformer(transformers, include_roxygen_examples) + styled_text <- transformer(text) + construct_vertical(styled_text) +} diff --git a/R/ui.R b/tests/testthat/roxygen-examples-complete/10-styler-r-ui-in.R similarity index 62% rename from R/ui.R rename to tests/testthat/roxygen-examples-complete/10-styler-r-ui-in.R index 8e4546567..a7cbf3aca 100644 --- a/R/ui.R +++ b/tests/testthat/roxygen-examples-complete/10-styler-r-ui-in.R @@ -18,20 +18,46 @@ NULL #' @param transformers A set of transformer functions. This argument is most #' conveniently constructed via the `style` argument and `...`. See #' 'Examples'. -#' @param filetype Vector of file extensions indicating which filetypes should +#' @param filetype Vector of file extensions indicating which file types should #' be styled. Case is ignored, and the `.` is optional, e.g. `c(".R", ".Rmd")` #' or `c("r", "rmd")`. #' @param exclude_files Character vector with paths to files that should be #' excluded from styling. +#' @param include_roxygen_examples Whether or not to style code in roxygen +#' examples. #' @section Warning: #' This function overwrites files (if styling results in a change of the #' code to be formatted). It is strongly suggested to only style files #' that are under version control or to create a backup copy. +#' +#' We suggest to first style with `scope < "tokens"` and inspect and commit +#' changes, because these changes are guaranteed to leave the abstract syntax +#' tree (AST) unchanged. See section 'Roundtrip Validation' for details. +#' +#' Then, we suggest to style with `scope = "tokens"` (if desired) and carefully +#' inspect the changes to make sure the AST is not changed in an unexpected way +#' that invalidates code. +#' @section Roundtrip Validation: +#' The following section describes when and how styling is guaranteed to +#' yield correct code. +#' +#' If the style guide has `scope < "tokens"`, no tokens are changed and the +#' abstract syntax tree (AST) should not change. +#' Hence, it is possible to validate the styling by comparing whether the parsed +#' expression before and after styling have the same AST. +#' This comparison omits comments. styler compares +#' error if the AST has changed through styling. +#' +#' Note that with `scope = "tokens"` such a comparison is not conducted because +#' the AST might well change and such a change is intended. There is no way +#' styler can validate styling, that is why we inform the user to carefully +#' inspect the changes. +#' +#' See section 'Warning' for a good strategy to apply styling safely. #' @inheritSection transform_files Value #' @family stylers #' @examples #' \dontrun{ -#' #' style_pkg(style = tidyverse_style, strict = TRUE) #' style_pkg( #' scope = "line_breaks", @@ -44,15 +70,19 @@ style_pkg <- function(pkg = ".", style = tidyverse_style, transformers = style(...), filetype = "R", - exclude_files = "R/RcppExports.R") { + exclude_files = "R/RcppExports.R", + include_roxygen_examples = TRUE) { pkg_root <- rprojroot::find_package_root_file(path = pkg) - changed <- withr::with_dir(pkg_root, - prettify_pkg(transformers, filetype, exclude_files) - ) + changed <- withr::with_dir(pkg_root, prettify_pkg( + transformers, filetype, exclude_files, include_roxygen_examples + )) invisible(changed) } -prettify_pkg <- function(transformers, filetype, exclude_files) { +prettify_pkg <- function(transformers, + filetype, + exclude_files, + include_roxygen_examples) { filetype <- set_and_assert_arg_filetype(filetype) r_files <- vignette_files <- readme <- NULL @@ -72,7 +102,7 @@ prettify_pkg <- function(transformers, filetype, exclude_files) { } files <- setdiff(c(r_files, vignette_files, readme), exclude_files) - transform_files(files, transformers) + transform_files(files, transformers, include_roxygen_examples) } @@ -96,8 +126,9 @@ prettify_pkg <- function(transformers, filetype, exclude_files) { style_text <- function(text, ..., style = tidyverse_style, - transformers = style(...)) { - transformer <- make_transformer(transformers) + transformers = style(...), + include_roxygen_examples = TRUE) { + transformer <- make_transformer(transformers, include_roxygen_examples) styled_text <- transformer(text) construct_vertical(styled_text) } @@ -112,6 +143,7 @@ style_text <- function(text, #' @inheritParams style_pkg #' @inheritSection transform_files Value #' @inheritSection style_pkg Warning +#' @inheritSection style_pkg Roundtrip Validation #' @family stylers #' @examples #' \dontrun{ @@ -124,9 +156,12 @@ style_dir <- function(path = ".", transformers = style(...), filetype = "R", recursive = TRUE, - exclude_files = NULL) { + exclude_files = NULL, + include_roxygen_examples = TRUE) { changed <- withr::with_dir( - path, prettify_any(transformers, filetype, recursive, exclude_files) + path, prettify_any( + transformers, filetype, recursive, exclude_files, include_roxygen_examples + ) ) invisible(changed) } @@ -137,39 +172,51 @@ style_dir <- function(path = ".", #' @inheritParams style_pkg #' @param recursive A logical value indicating whether or not files in subdirectories #' should be styled as well. -prettify_any <- function(transformers, filetype, recursive, exclude_files) { +#' @keywords internal +prettify_any <- function(transformers, + filetype, + recursive, + exclude_files, + include_roxygen_examples) { files <- dir( path = ".", pattern = map_filetype_to_pattern(filetype), ignore.case = TRUE, recursive = recursive, full.names = TRUE ) - transform_files(setdiff(files, exclude_files), transformers) + transform_files( + setdiff(files, exclude_files), transformers, include_roxygen_examples + ) } #' Style `.R` and/or `.Rmd` files #' #' Performs various substitutions in the files specified. -#' Carefully examine the results after running this function! +#' Carefully examine the results after running this function! +#' @section Encoding: +#' UTF-8 encoding is assumed. Please convert your code to UTF-8 if necessary +#' before applying styler. #' @param path A character vector with paths to files to style. #' @inheritParams style_pkg #' @inheritSection transform_files Value #' @inheritSection style_pkg Warning +#' @inheritSection style_pkg Roundtrip Validation #' @examples #' # the following is identical but the former is more convenient: #' file <- tempfile("styler", fileext = ".R") -#' enc::write_lines_enc("1++1", file) +#' xfun::write_utf8("1++1", file) #' style_file(file, style = tidyverse_style, strict = TRUE) #' style_file(file, transformers = tidyverse_style(strict = TRUE)) -#' enc::read_lines_enc(file) +#' xfun::read_utf8(file) #' unlink(file) #' @family stylers #' @export style_file <- function(path, ..., style = tidyverse_style, - transformers = style(...)) { + transformers = style(...), + include_roxygen_examples = TRUE) { changed <- withr::with_dir( dirname(path), - transform_files(basename(path), transformers) + transform_files(basename(path), transformers, include_roxygen_examples) ) invisible(changed) } diff --git a/tests/testthat/roxygen-examples-complete/10-styler-r-ui-in_tree b/tests/testthat/roxygen-examples-complete/10-styler-r-ui-in_tree new file mode 100644 index 000000000..070451e6e --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/10-styler-r-ui-in_tree @@ -0,0 +1,756 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' @a [0/0] {1} + ¦--COMMENT: #' @i [1/0] {2} + ¦--COMMENT: #' @i [1/0] {3} + ¦--expr: NULL [1/0] {5} + ¦ °--NULL_CONST: NULL [0/0] {4} + ¦--COMMENT: #' Pr [2/0] {6} + ¦--COMMENT: #' [1/0] {7} + ¦--COMMENT: #' Pe [1/0] {8} + ¦--COMMENT: #' (c [1/0] {9} + ¦--COMMENT: #' Ca [1/0] {10} + ¦--COMMENT: #' [1/0] {11} + ¦--COMMENT: #' @p [1/0] {12} + ¦--COMMENT: #' @p [1/0] {13} + ¦--COMMENT: #' @p [1/0] {14} + ¦--COMMENT: #' [1/0] {15} + ¦--COMMENT: #' [1/0] {16} + ¦--COMMENT: #' [1/0] {17} + ¦--COMMENT: #' @p [1/0] {18} + ¦--COMMENT: #' [1/0] {19} + ¦--COMMENT: #' [1/0] {20} + ¦--COMMENT: #' @p [1/0] {21} + ¦--COMMENT: #' [1/0] {22} + ¦--COMMENT: #' [1/0] {23} + ¦--COMMENT: #' @p [1/0] {24} + ¦--COMMENT: #' [1/0] {25} + ¦--COMMENT: #' @p [1/0] {26} + ¦--COMMENT: #' [1/0] {27} + ¦--COMMENT: #' @s [1/0] {28} + ¦--COMMENT: #' Th [1/0] {29} + ¦--COMMENT: #' co [1/0] {30} + ¦--COMMENT: #' th [1/0] {31} + ¦--COMMENT: #' [1/0] {32} + ¦--COMMENT: #' We [1/0] {33} + ¦--COMMENT: #' ch [1/0] {34} + ¦--COMMENT: #' tr [1/0] {35} + ¦--COMMENT: #' [1/0] {36} + ¦--COMMENT: #' Th [1/0] {37} + ¦--COMMENT: #' in [1/0] {38} + ¦--COMMENT: #' th [1/0] {39} + ¦--COMMENT: #' @s [1/0] {40} + ¦--COMMENT: #' Th [1/0] {41} + ¦--COMMENT: #' yi [1/0] {42} + ¦--COMMENT: #' [1/0] {43} + ¦--COMMENT: #' If [1/0] {44} + ¦--COMMENT: #' ab [1/0] {45} + ¦--COMMENT: #' He [1/0] {46} + ¦--COMMENT: #' ex [1/0] {47} + ¦--COMMENT: #' Th [1/0] {48} + ¦--COMMENT: #' er [1/0] {49} + ¦--COMMENT: #' [1/0] {50} + ¦--COMMENT: #' No [1/0] {51} + ¦--COMMENT: #' th [1/0] {52} + ¦--COMMENT: #' st [1/0] {53} + ¦--COMMENT: #' in [1/0] {54} + ¦--COMMENT: #' [1/0] {55} + ¦--COMMENT: #' Se [1/0] {56} + ¦--COMMENT: #' @i [1/0] {57} + ¦--COMMENT: #' @f [1/0] {58} + ¦--COMMENT: #' @e [1/0] {59} + ¦--COMMENT: #' \d [1/0] {60} + ¦--COMMENT: #' st [1/0] {61} + ¦--COMMENT: #' st [1/0] {62} + ¦--COMMENT: #' [1/0] {63} + ¦--COMMENT: #' [1/0] {64} + ¦--COMMENT: #' ) [1/0] {65} + ¦--COMMENT: #' } [1/0] {66} + ¦--COMMENT: #' @e [1/0] {67} + ¦--expr: style [1/0] {68} + ¦ ¦--expr: style [0/1] {70} + ¦ ¦ °--SYMBOL: style [0/0] {69} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {71} + ¦ °--expr: funct [0/0] {72} + ¦ ¦--FUNCTION: funct [0/0] {73} + ¦ ¦--'(': ( [0/0] {74} + ¦ ¦--SYMBOL_FORMALS: pkg [0/1] {75} + ¦ ¦--EQ_FORMALS: = [0/1] {76} + ¦ ¦--expr: "." [0/0] {78} + ¦ ¦ °--STR_CONST: "." [0/0] {77} + ¦ ¦--',': , [0/22] {79} + ¦ ¦--SYMBOL_FORMALS: ... [1/0] {80} + ¦ ¦--',': , [0/22] {81} + ¦ ¦--SYMBOL_FORMALS: style [1/1] {82} + ¦ ¦--EQ_FORMALS: = [0/1] {83} + ¦ ¦--expr: tidyv [0/0] {85} + ¦ ¦ °--SYMBOL: tidyv [0/0] {84} + ¦ ¦--',': , [0/22] {86} + ¦ ¦--SYMBOL_FORMALS: trans [1/1] {87} + ¦ ¦--EQ_FORMALS: = [0/1] {88} + ¦ ¦--expr: style [0/0] {89} + ¦ ¦ ¦--expr: style [0/0] {91} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: style [0/0] {90} + ¦ ¦ ¦--'(': ( [0/0] {92} + ¦ ¦ ¦--expr: ... [0/0] {94} + ¦ ¦ ¦ °--SYMBOL: ... [0/0] {93} + ¦ ¦ °--')': ) [0/0] {95} + ¦ ¦--',': , [0/22] {96} + ¦ ¦--SYMBOL_FORMALS: filet [1/1] {97} + ¦ ¦--EQ_FORMALS: = [0/1] {98} + ¦ ¦--expr: "R" [0/0] {100} + ¦ ¦ °--STR_CONST: "R" [0/0] {99} + ¦ ¦--',': , [0/22] {101} + ¦ ¦--SYMBOL_FORMALS: exclu [1/1] {102} + ¦ ¦--EQ_FORMALS: = [0/1] {103} + ¦ ¦--expr: "R/Rc [0/0] {105} + ¦ ¦ °--STR_CONST: "R/Rc [0/0] {104} + ¦ ¦--',': , [0/22] {106} + ¦ ¦--SYMBOL_FORMALS: inclu [1/1] {107} + ¦ ¦--EQ_FORMALS: = [0/1] {108} + ¦ ¦--expr: TRUE [0/0] {110} + ¦ ¦ °--NUM_CONST: TRUE [0/0] {109} + ¦ ¦--')': ) [0/1] {111} + ¦ °--expr: { + p [0/0] {112} + ¦ ¦--'{': { [0/2] {113} + ¦ ¦--expr: pkg_r [1/2] {114} + ¦ ¦ ¦--expr: pkg_r [0/1] {116} + ¦ ¦ ¦ °--SYMBOL: pkg_r [0/0] {115} + ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {117} + ¦ ¦ °--expr: rproj [0/0] {118} + ¦ ¦ ¦--expr: rproj [0/0] {119} + ¦ ¦ ¦ ¦--SYMBOL_PACKAGE: rproj [0/0] {120} + ¦ ¦ ¦ ¦--NS_GET: :: [0/0] {121} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: find_ [0/0] {122} + ¦ ¦ ¦--'(': ( [0/0] {123} + ¦ ¦ ¦--SYMBOL_SUB: path [0/1] {124} + ¦ ¦ ¦--EQ_SUB: = [0/1] {125} + ¦ ¦ ¦--expr: pkg [0/0] {127} + ¦ ¦ ¦ °--SYMBOL: pkg [0/0] {126} + ¦ ¦ °--')': ) [0/0] {128} + ¦ ¦--expr: chang [1/2] {129} + ¦ ¦ ¦--expr: chang [0/1] {131} + ¦ ¦ ¦ °--SYMBOL: chang [0/0] {130} + ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {132} + ¦ ¦ °--expr: withr [0/0] {133} + ¦ ¦ ¦--expr: withr [0/0] {134} + ¦ ¦ ¦ ¦--SYMBOL_PACKAGE: withr [0/0] {135} + ¦ ¦ ¦ ¦--NS_GET: :: [0/0] {136} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: with_ [0/0] {137} + ¦ ¦ ¦--'(': ( [0/0] {138} + ¦ ¦ ¦--expr: pkg_r [0/0] {140} + ¦ ¦ ¦ °--SYMBOL: pkg_r [0/0] {139} + ¦ ¦ ¦--',': , [0/1] {141} + ¦ ¦ ¦--expr: prett [0/0] {142} + ¦ ¦ ¦ ¦--expr: prett [0/0] {144} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: prett [0/0] {143} + ¦ ¦ ¦ ¦--'(': ( [0/4] {145} + ¦ ¦ ¦ ¦--expr: trans [1/0] {147} + ¦ ¦ ¦ ¦ °--SYMBOL: trans [0/0] {146} + ¦ ¦ ¦ ¦--',': , [0/1] {148} + ¦ ¦ ¦ ¦--expr: filet [0/0] {150} + ¦ ¦ ¦ ¦ °--SYMBOL: filet [0/0] {149} + ¦ ¦ ¦ ¦--',': , [0/1] {151} + ¦ ¦ ¦ ¦--expr: exclu [0/0] {153} + ¦ ¦ ¦ ¦ °--SYMBOL: exclu [0/0] {152} + ¦ ¦ ¦ ¦--',': , [0/1] {154} + ¦ ¦ ¦ ¦--expr: inclu [0/2] {156} + ¦ ¦ ¦ ¦ °--SYMBOL: inclu [0/0] {155} + ¦ ¦ ¦ °--')': ) [1/0] {157} + ¦ ¦ °--')': ) [0/0] {158} + ¦ ¦--expr: invis [1/0] {159} + ¦ ¦ ¦--expr: invis [0/0] {161} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: invis [0/0] {160} + ¦ ¦ ¦--'(': ( [0/0] {162} + ¦ ¦ ¦--expr: chang [0/0] {164} + ¦ ¦ ¦ °--SYMBOL: chang [0/0] {163} + ¦ ¦ °--')': ) [0/0] {165} + ¦ °--'}': } [1/0] {166} + ¦--expr: prett [2/0] {167} + ¦ ¦--expr: prett [0/1] {169} + ¦ ¦ °--SYMBOL: prett [0/0] {168} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {170} + ¦ °--expr: funct [0/0] {171} + ¦ ¦--FUNCTION: funct [0/0] {172} + ¦ ¦--'(': ( [0/0] {173} + ¦ ¦--SYMBOL_FORMALS: trans [0/0] {174} + ¦ ¦--',': , [0/25] {175} + ¦ ¦--SYMBOL_FORMALS: filet [1/0] {176} + ¦ ¦--',': , [0/25] {177} + ¦ ¦--SYMBOL_FORMALS: exclu [1/0] {178} + ¦ ¦--',': , [0/25] {179} + ¦ ¦--SYMBOL_FORMALS: inclu [1/0] {180} + ¦ ¦--')': ) [0/1] {181} + ¦ °--expr: { + f [0/0] {182} + ¦ ¦--'{': { [0/2] {183} + ¦ ¦--expr: filet [1/2] {184} + ¦ ¦ ¦--expr: filet [0/1] {186} + ¦ ¦ ¦ °--SYMBOL: filet [0/0] {185} + ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {187} + ¦ ¦ °--expr: set_a [0/0] {188} + ¦ ¦ ¦--expr: set_a [0/0] {190} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: set_a [0/0] {189} + ¦ ¦ ¦--'(': ( [0/0] {191} + ¦ ¦ ¦--expr: filet [0/0] {193} + ¦ ¦ ¦ °--SYMBOL: filet [0/0] {192} + ¦ ¦ °--')': ) [0/0] {194} + ¦ ¦--expr: r_fil [1/2] {195} + ¦ ¦ ¦--expr: r_fil [0/1] {197} + ¦ ¦ ¦ °--SYMBOL: r_fil [0/0] {196} + ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {198} + ¦ ¦ ¦--expr: vigne [0/1] {201} + ¦ ¦ ¦ °--SYMBOL: vigne [0/0] {200} + ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {202} + ¦ ¦ ¦--expr: readm [0/1] {205} + ¦ ¦ ¦ °--SYMBOL: readm [0/0] {204} + ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {206} + ¦ ¦ °--expr: NULL [0/0] {208} + ¦ ¦ °--NULL_CONST: NULL [0/0] {207} + ¦ ¦--expr: if (" [2/2] {209} + ¦ ¦ ¦--IF: if [0/1] {210} + ¦ ¦ ¦--'(': ( [0/0] {211} + ¦ ¦ ¦--expr: "\\.r [0/0] {212} + ¦ ¦ ¦ ¦--expr: "\\.r [0/1] {214} + ¦ ¦ ¦ ¦ °--STR_CONST: "\\.r [0/0] {213} + ¦ ¦ ¦ ¦--SPECIAL-IN: %in% [0/1] {215} + ¦ ¦ ¦ °--expr: filet [0/0] {217} + ¦ ¦ ¦ °--SYMBOL: filet [0/0] {216} + ¦ ¦ ¦--')': ) [0/1] {218} + ¦ ¦ °--expr: { + [0/0] {219} + ¦ ¦ ¦--'{': { [0/4] {220} + ¦ ¦ ¦--expr: r_fil [1/2] {221} + ¦ ¦ ¦ ¦--expr: r_fil [0/1] {223} + ¦ ¦ ¦ ¦ °--SYMBOL: r_fil [0/0] {222} + ¦ ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {224} + ¦ ¦ ¦ °--expr: dir( + [0/0] {225} + ¦ ¦ ¦ ¦--expr: dir [0/0] {227} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: dir [0/0] {226} + ¦ ¦ ¦ ¦--'(': ( [0/6] {228} + ¦ ¦ ¦ ¦--SYMBOL_SUB: path [1/1] {229} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {230} + ¦ ¦ ¦ ¦--expr: c("R" [0/0] {231} + ¦ ¦ ¦ ¦ ¦--expr: c [0/0] {233} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {232} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {234} + ¦ ¦ ¦ ¦ ¦--expr: "R" [0/0] {236} + ¦ ¦ ¦ ¦ ¦ °--STR_CONST: "R" [0/0] {235} + ¦ ¦ ¦ ¦ ¦--',': , [0/1] {237} + ¦ ¦ ¦ ¦ ¦--expr: "test [0/0] {239} + ¦ ¦ ¦ ¦ ¦ °--STR_CONST: "test [0/0] {238} + ¦ ¦ ¦ ¦ ¦--',': , [0/1] {240} + ¦ ¦ ¦ ¦ ¦--expr: "data [0/0] {242} + ¦ ¦ ¦ ¦ ¦ °--STR_CONST: "data [0/0] {241} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {243} + ¦ ¦ ¦ ¦--',': , [0/1] {244} + ¦ ¦ ¦ ¦--SYMBOL_SUB: patte [0/1] {245} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {246} + ¦ ¦ ¦ ¦--expr: "\\.r [0/0] {248} + ¦ ¦ ¦ ¦ °--STR_CONST: "\\.r [0/0] {247} + ¦ ¦ ¦ ¦--',': , [0/6] {249} + ¦ ¦ ¦ ¦--SYMBOL_SUB: ignor [1/1] {250} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {251} + ¦ ¦ ¦ ¦--expr: TRUE [0/0] {253} + ¦ ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {252} + ¦ ¦ ¦ ¦--',': , [0/1] {254} + ¦ ¦ ¦ ¦--SYMBOL_SUB: recur [0/1] {255} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {256} + ¦ ¦ ¦ ¦--expr: TRUE [0/0] {258} + ¦ ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {257} + ¦ ¦ ¦ ¦--',': , [0/1] {259} + ¦ ¦ ¦ ¦--SYMBOL_SUB: full. [0/1] {260} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {261} + ¦ ¦ ¦ ¦--expr: TRUE [0/4] {263} + ¦ ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {262} + ¦ ¦ ¦ °--')': ) [1/0] {264} + ¦ ¦ °--'}': } [1/0] {265} + ¦ ¦--expr: if (" [2/2] {266} + ¦ ¦ ¦--IF: if [0/1] {267} + ¦ ¦ ¦--'(': ( [0/0] {268} + ¦ ¦ ¦--expr: "\\.r [0/0] {269} + ¦ ¦ ¦ ¦--expr: "\\.r [0/1] {271} + ¦ ¦ ¦ ¦ °--STR_CONST: "\\.r [0/0] {270} + ¦ ¦ ¦ ¦--SPECIAL-IN: %in% [0/1] {272} + ¦ ¦ ¦ °--expr: filet [0/0] {274} + ¦ ¦ ¦ °--SYMBOL: filet [0/0] {273} + ¦ ¦ ¦--')': ) [0/1] {275} + ¦ ¦ °--expr: { + [0/0] {276} + ¦ ¦ ¦--'{': { [0/4] {277} + ¦ ¦ ¦--expr: vigne [1/4] {278} + ¦ ¦ ¦ ¦--expr: vigne [0/1] {280} + ¦ ¦ ¦ ¦ °--SYMBOL: vigne [0/0] {279} + ¦ ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {281} + ¦ ¦ ¦ °--expr: dir( + [0/0] {282} + ¦ ¦ ¦ ¦--expr: dir [0/0] {284} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: dir [0/0] {283} + ¦ ¦ ¦ ¦--'(': ( [0/6] {285} + ¦ ¦ ¦ ¦--SYMBOL_SUB: path [1/1] {286} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {287} + ¦ ¦ ¦ ¦--expr: "vign [0/0] {289} + ¦ ¦ ¦ ¦ °--STR_CONST: "vign [0/0] {288} + ¦ ¦ ¦ ¦--',': , [0/1] {290} + ¦ ¦ ¦ ¦--SYMBOL_SUB: patte [0/1] {291} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {292} + ¦ ¦ ¦ ¦--expr: "\\.r [0/0] {294} + ¦ ¦ ¦ ¦ °--STR_CONST: "\\.r [0/0] {293} + ¦ ¦ ¦ ¦--',': , [0/6] {295} + ¦ ¦ ¦ ¦--SYMBOL_SUB: ignor [1/1] {296} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {297} + ¦ ¦ ¦ ¦--expr: TRUE [0/0] {299} + ¦ ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {298} + ¦ ¦ ¦ ¦--',': , [0/1] {300} + ¦ ¦ ¦ ¦--SYMBOL_SUB: recur [0/1] {301} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {302} + ¦ ¦ ¦ ¦--expr: TRUE [0/0] {304} + ¦ ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {303} + ¦ ¦ ¦ ¦--',': , [0/1] {305} + ¦ ¦ ¦ ¦--SYMBOL_SUB: full. [0/1] {306} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {307} + ¦ ¦ ¦ ¦--expr: TRUE [0/4] {309} + ¦ ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {308} + ¦ ¦ ¦ °--')': ) [1/0] {310} + ¦ ¦ ¦--expr: readm [1/2] {311} + ¦ ¦ ¦ ¦--expr: readm [0/1] {313} + ¦ ¦ ¦ ¦ °--SYMBOL: readm [0/0] {312} + ¦ ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {314} + ¦ ¦ ¦ °--expr: dir(p [0/0] {315} + ¦ ¦ ¦ ¦--expr: dir [0/0] {317} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: dir [0/0] {316} + ¦ ¦ ¦ ¦--'(': ( [0/0] {318} + ¦ ¦ ¦ ¦--SYMBOL_SUB: patte [0/1] {319} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {320} + ¦ ¦ ¦ ¦--expr: "^rea [0/0] {322} + ¦ ¦ ¦ ¦ °--STR_CONST: "^rea [0/0] {321} + ¦ ¦ ¦ ¦--',': , [0/1] {323} + ¦ ¦ ¦ ¦--SYMBOL_SUB: ignor [0/1] {324} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {325} + ¦ ¦ ¦ ¦--expr: TRUE [0/0] {327} + ¦ ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {326} + ¦ ¦ ¦ °--')': ) [0/0] {328} + ¦ ¦ °--'}': } [1/0] {329} + ¦ ¦--expr: files [2/2] {330} + ¦ ¦ ¦--expr: files [0/1] {332} + ¦ ¦ ¦ °--SYMBOL: files [0/0] {331} + ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {333} + ¦ ¦ °--expr: setdi [0/0] {334} + ¦ ¦ ¦--expr: setdi [0/0] {336} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: setdi [0/0] {335} + ¦ ¦ ¦--'(': ( [0/0] {337} + ¦ ¦ ¦--expr: c(r_f [0/0] {338} + ¦ ¦ ¦ ¦--expr: c [0/0] {340} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {339} + ¦ ¦ ¦ ¦--'(': ( [0/0] {341} + ¦ ¦ ¦ ¦--expr: r_fil [0/0] {343} + ¦ ¦ ¦ ¦ °--SYMBOL: r_fil [0/0] {342} + ¦ ¦ ¦ ¦--',': , [0/1] {344} + ¦ ¦ ¦ ¦--expr: vigne [0/0] {346} + ¦ ¦ ¦ ¦ °--SYMBOL: vigne [0/0] {345} + ¦ ¦ ¦ ¦--',': , [0/1] {347} + ¦ ¦ ¦ ¦--expr: readm [0/0] {349} + ¦ ¦ ¦ ¦ °--SYMBOL: readm [0/0] {348} + ¦ ¦ ¦ °--')': ) [0/0] {350} + ¦ ¦ ¦--',': , [0/1] {351} + ¦ ¦ ¦--expr: exclu [0/0] {353} + ¦ ¦ ¦ °--SYMBOL: exclu [0/0] {352} + ¦ ¦ °--')': ) [0/0] {354} + ¦ ¦--expr: trans [1/0] {355} + ¦ ¦ ¦--expr: trans [0/0] {357} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: trans [0/0] {356} + ¦ ¦ ¦--'(': ( [0/0] {358} + ¦ ¦ ¦--expr: files [0/0] {360} + ¦ ¦ ¦ °--SYMBOL: files [0/0] {359} + ¦ ¦ ¦--',': , [0/1] {361} + ¦ ¦ ¦--expr: trans [0/0] {363} + ¦ ¦ ¦ °--SYMBOL: trans [0/0] {362} + ¦ ¦ ¦--',': , [0/1] {364} + ¦ ¦ ¦--expr: inclu [0/0] {366} + ¦ ¦ ¦ °--SYMBOL: inclu [0/0] {365} + ¦ ¦ °--')': ) [0/0] {367} + ¦ °--'}': } [1/0] {368} + ¦--COMMENT: #' St [3/0] {369} + ¦--COMMENT: #' [1/0] {370} + ¦--COMMENT: #' St [1/0] {371} + ¦--COMMENT: #' to [1/0] {372} + ¦--COMMENT: #' @p [1/0] {373} + ¦--COMMENT: #' @i [1/0] {374} + ¦--COMMENT: #' @f [1/0] {375} + ¦--COMMENT: #' @e [1/0] {376} + ¦--COMMENT: #' st [1/0] {377} + ¦--COMMENT: #' st [1/0] {378} + ¦--COMMENT: #' st [1/0] {379} + ¦--COMMENT: #' st [1/0] {380} + ¦--COMMENT: #' st [1/0] {381} + ¦--COMMENT: #' # [1/0] {382} + ¦--COMMENT: #' st [1/0] {383} + ¦--COMMENT: #' st [1/0] {384} + ¦--COMMENT: #' @e [1/0] {385} + ¦--expr: style [1/0] {386} + ¦ ¦--expr: style [0/1] {388} + ¦ ¦ °--SYMBOL: style [0/0] {387} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {389} + ¦ °--expr: funct [0/0] {390} + ¦ ¦--FUNCTION: funct [0/0] {391} + ¦ ¦--'(': ( [0/0] {392} + ¦ ¦--SYMBOL_FORMALS: text [0/0] {393} + ¦ ¦--',': , [0/23] {394} + ¦ ¦--SYMBOL_FORMALS: ... [1/0] {395} + ¦ ¦--',': , [0/23] {396} + ¦ ¦--SYMBOL_FORMALS: style [1/1] {397} + ¦ ¦--EQ_FORMALS: = [0/1] {398} + ¦ ¦--expr: tidyv [0/0] {400} + ¦ ¦ °--SYMBOL: tidyv [0/0] {399} + ¦ ¦--',': , [0/23] {401} + ¦ ¦--SYMBOL_FORMALS: trans [1/1] {402} + ¦ ¦--EQ_FORMALS: = [0/1] {403} + ¦ ¦--expr: style [0/0] {404} + ¦ ¦ ¦--expr: style [0/0] {406} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: style [0/0] {405} + ¦ ¦ ¦--'(': ( [0/0] {407} + ¦ ¦ ¦--expr: ... [0/0] {409} + ¦ ¦ ¦ °--SYMBOL: ... [0/0] {408} + ¦ ¦ °--')': ) [0/0] {410} + ¦ ¦--',': , [0/23] {411} + ¦ ¦--SYMBOL_FORMALS: inclu [1/1] {412} + ¦ ¦--EQ_FORMALS: = [0/1] {413} + ¦ ¦--expr: TRUE [0/0] {415} + ¦ ¦ °--NUM_CONST: TRUE [0/0] {414} + ¦ ¦--')': ) [0/1] {416} + ¦ °--expr: { + t [0/0] {417} + ¦ ¦--'{': { [0/2] {418} + ¦ ¦--expr: trans [1/2] {419} + ¦ ¦ ¦--expr: trans [0/1] {421} + ¦ ¦ ¦ °--SYMBOL: trans [0/0] {420} + ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {422} + ¦ ¦ °--expr: make_ [0/0] {423} + ¦ ¦ ¦--expr: make_ [0/0] {425} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: make_ [0/0] {424} + ¦ ¦ ¦--'(': ( [0/0] {426} + ¦ ¦ ¦--expr: trans [0/0] {428} + ¦ ¦ ¦ °--SYMBOL: trans [0/0] {427} + ¦ ¦ ¦--',': , [0/1] {429} + ¦ ¦ ¦--expr: inclu [0/0] {431} + ¦ ¦ ¦ °--SYMBOL: inclu [0/0] {430} + ¦ ¦ °--')': ) [0/0] {432} + ¦ ¦--expr: style [1/2] {433} + ¦ ¦ ¦--expr: style [0/1] {435} + ¦ ¦ ¦ °--SYMBOL: style [0/0] {434} + ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {436} + ¦ ¦ °--expr: trans [0/0] {437} + ¦ ¦ ¦--expr: trans [0/0] {439} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: trans [0/0] {438} + ¦ ¦ ¦--'(': ( [0/0] {440} + ¦ ¦ ¦--expr: text [0/0] {442} + ¦ ¦ ¦ °--SYMBOL: text [0/0] {441} + ¦ ¦ °--')': ) [0/0] {443} + ¦ ¦--expr: const [1/0] {444} + ¦ ¦ ¦--expr: const [0/0] {446} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: const [0/0] {445} + ¦ ¦ ¦--'(': ( [0/0] {447} + ¦ ¦ ¦--expr: style [0/0] {449} + ¦ ¦ ¦ °--SYMBOL: style [0/0] {448} + ¦ ¦ °--')': ) [0/0] {450} + ¦ °--'}': } [1/0] {451} + ¦--COMMENT: #' Pr [2/0] {452} + ¦--COMMENT: #' [1/0] {453} + ¦--COMMENT: #' Pe [1/0] {454} + ¦--COMMENT: #' Ca [1/0] {455} + ¦--COMMENT: #' @p [1/0] {456} + ¦--COMMENT: #' @p [1/0] {457} + ¦--COMMENT: #' [1/0] {458} + ¦--COMMENT: #' @i [1/0] {459} + ¦--COMMENT: #' @i [1/0] {460} + ¦--COMMENT: #' @i [1/0] {461} + ¦--COMMENT: #' @i [1/0] {462} + ¦--COMMENT: #' @f [1/0] {463} + ¦--COMMENT: #' @e [1/0] {464} + ¦--COMMENT: #' \d [1/0] {465} + ¦--COMMENT: #' st [1/0] {466} + ¦--COMMENT: #' } [1/0] {467} + ¦--COMMENT: #' @e [1/0] {468} + ¦--expr: style [1/0] {469} + ¦ ¦--expr: style [0/1] {471} + ¦ ¦ °--SYMBOL: style [0/0] {470} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {472} + ¦ °--expr: funct [0/0] {473} + ¦ ¦--FUNCTION: funct [0/0] {474} + ¦ ¦--'(': ( [0/0] {475} + ¦ ¦--SYMBOL_FORMALS: path [0/1] {476} + ¦ ¦--EQ_FORMALS: = [0/1] {477} + ¦ ¦--expr: "." [0/0] {479} + ¦ ¦ °--STR_CONST: "." [0/0] {478} + ¦ ¦--',': , [0/22] {480} + ¦ ¦--SYMBOL_FORMALS: ... [1/0] {481} + ¦ ¦--',': , [0/22] {482} + ¦ ¦--SYMBOL_FORMALS: style [1/1] {483} + ¦ ¦--EQ_FORMALS: = [0/1] {484} + ¦ ¦--expr: tidyv [0/0] {486} + ¦ ¦ °--SYMBOL: tidyv [0/0] {485} + ¦ ¦--',': , [0/22] {487} + ¦ ¦--SYMBOL_FORMALS: trans [1/1] {488} + ¦ ¦--EQ_FORMALS: = [0/1] {489} + ¦ ¦--expr: style [0/0] {490} + ¦ ¦ ¦--expr: style [0/0] {492} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: style [0/0] {491} + ¦ ¦ ¦--'(': ( [0/0] {493} + ¦ ¦ ¦--expr: ... [0/0] {495} + ¦ ¦ ¦ °--SYMBOL: ... [0/0] {494} + ¦ ¦ °--')': ) [0/0] {496} + ¦ ¦--',': , [0/22] {497} + ¦ ¦--SYMBOL_FORMALS: filet [1/1] {498} + ¦ ¦--EQ_FORMALS: = [0/1] {499} + ¦ ¦--expr: "R" [0/0] {501} + ¦ ¦ °--STR_CONST: "R" [0/0] {500} + ¦ ¦--',': , [0/22] {502} + ¦ ¦--SYMBOL_FORMALS: recur [1/1] {503} + ¦ ¦--EQ_FORMALS: = [0/1] {504} + ¦ ¦--expr: TRUE [0/0] {506} + ¦ ¦ °--NUM_CONST: TRUE [0/0] {505} + ¦ ¦--',': , [0/22] {507} + ¦ ¦--SYMBOL_FORMALS: exclu [1/1] {508} + ¦ ¦--EQ_FORMALS: = [0/1] {509} + ¦ ¦--expr: NULL [0/0] {511} + ¦ ¦ °--NULL_CONST: NULL [0/0] {510} + ¦ ¦--',': , [0/22] {512} + ¦ ¦--SYMBOL_FORMALS: inclu [1/1] {513} + ¦ ¦--EQ_FORMALS: = [0/1] {514} + ¦ ¦--expr: TRUE [0/0] {516} + ¦ ¦ °--NUM_CONST: TRUE [0/0] {515} + ¦ ¦--')': ) [0/1] {517} + ¦ °--expr: { + c [0/0] {518} + ¦ ¦--'{': { [0/2] {519} + ¦ ¦--expr: chang [1/2] {520} + ¦ ¦ ¦--expr: chang [0/1] {522} + ¦ ¦ ¦ °--SYMBOL: chang [0/0] {521} + ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {523} + ¦ ¦ °--expr: withr [0/0] {524} + ¦ ¦ ¦--expr: withr [0/0] {525} + ¦ ¦ ¦ ¦--SYMBOL_PACKAGE: withr [0/0] {526} + ¦ ¦ ¦ ¦--NS_GET: :: [0/0] {527} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: with_ [0/0] {528} + ¦ ¦ ¦--'(': ( [0/4] {529} + ¦ ¦ ¦--expr: path [1/0] {531} + ¦ ¦ ¦ °--SYMBOL: path [0/0] {530} + ¦ ¦ ¦--',': , [0/1] {532} + ¦ ¦ ¦--expr: prett [0/2] {533} + ¦ ¦ ¦ ¦--expr: prett [0/0] {535} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: prett [0/0] {534} + ¦ ¦ ¦ ¦--'(': ( [0/6] {536} + ¦ ¦ ¦ ¦--expr: trans [1/0] {538} + ¦ ¦ ¦ ¦ °--SYMBOL: trans [0/0] {537} + ¦ ¦ ¦ ¦--',': , [0/1] {539} + ¦ ¦ ¦ ¦--expr: filet [0/0] {541} + ¦ ¦ ¦ ¦ °--SYMBOL: filet [0/0] {540} + ¦ ¦ ¦ ¦--',': , [0/1] {542} + ¦ ¦ ¦ ¦--expr: recur [0/0] {544} + ¦ ¦ ¦ ¦ °--SYMBOL: recur [0/0] {543} + ¦ ¦ ¦ ¦--',': , [0/1] {545} + ¦ ¦ ¦ ¦--expr: exclu [0/0] {547} + ¦ ¦ ¦ ¦ °--SYMBOL: exclu [0/0] {546} + ¦ ¦ ¦ ¦--',': , [0/1] {548} + ¦ ¦ ¦ ¦--expr: inclu [0/4] {550} + ¦ ¦ ¦ ¦ °--SYMBOL: inclu [0/0] {549} + ¦ ¦ ¦ °--')': ) [1/0] {551} + ¦ ¦ °--')': ) [1/0] {552} + ¦ ¦--expr: invis [1/0] {553} + ¦ ¦ ¦--expr: invis [0/0] {555} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: invis [0/0] {554} + ¦ ¦ ¦--'(': ( [0/0] {556} + ¦ ¦ ¦--expr: chang [0/0] {558} + ¦ ¦ ¦ °--SYMBOL: chang [0/0] {557} + ¦ ¦ °--')': ) [0/0] {559} + ¦ °--'}': } [1/0] {560} + ¦--COMMENT: #' Pr [2/0] {561} + ¦--COMMENT: #' [1/0] {562} + ¦--COMMENT: #' Th [1/0] {563} + ¦--COMMENT: #' @i [1/0] {564} + ¦--COMMENT: #' @p [1/0] {565} + ¦--COMMENT: #' [1/0] {566} + ¦--COMMENT: #' @k [1/0] {567} + ¦--expr: prett [1/0] {568} + ¦ ¦--expr: prett [0/1] {570} + ¦ ¦ °--SYMBOL: prett [0/0] {569} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {571} + ¦ °--expr: funct [0/0] {572} + ¦ ¦--FUNCTION: funct [0/0] {573} + ¦ ¦--'(': ( [0/0] {574} + ¦ ¦--SYMBOL_FORMALS: trans [0/0] {575} + ¦ ¦--',': , [0/25] {576} + ¦ ¦--SYMBOL_FORMALS: filet [1/0] {577} + ¦ ¦--',': , [0/25] {578} + ¦ ¦--SYMBOL_FORMALS: recur [1/0] {579} + ¦ ¦--',': , [0/25] {580} + ¦ ¦--SYMBOL_FORMALS: exclu [1/0] {581} + ¦ ¦--',': , [0/25] {582} + ¦ ¦--SYMBOL_FORMALS: inclu [1/0] {583} + ¦ ¦--')': ) [0/1] {584} + ¦ °--expr: { + f [0/0] {585} + ¦ ¦--'{': { [0/2] {586} + ¦ ¦--expr: files [1/2] {587} + ¦ ¦ ¦--expr: files [0/1] {589} + ¦ ¦ ¦ °--SYMBOL: files [0/0] {588} + ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {590} + ¦ ¦ °--expr: dir( + [0/0] {591} + ¦ ¦ ¦--expr: dir [0/0] {593} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: dir [0/0] {592} + ¦ ¦ ¦--'(': ( [0/4] {594} + ¦ ¦ ¦--SYMBOL_SUB: path [1/1] {595} + ¦ ¦ ¦--EQ_SUB: = [0/1] {596} + ¦ ¦ ¦--expr: "." [0/0] {598} + ¦ ¦ ¦ °--STR_CONST: "." [0/0] {597} + ¦ ¦ ¦--',': , [0/1] {599} + ¦ ¦ ¦--SYMBOL_SUB: patte [0/1] {600} + ¦ ¦ ¦--EQ_SUB: = [0/1] {601} + ¦ ¦ ¦--expr: map_f [0/0] {602} + ¦ ¦ ¦ ¦--expr: map_f [0/0] {604} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: map_f [0/0] {603} + ¦ ¦ ¦ ¦--'(': ( [0/0] {605} + ¦ ¦ ¦ ¦--expr: filet [0/0] {607} + ¦ ¦ ¦ ¦ °--SYMBOL: filet [0/0] {606} + ¦ ¦ ¦ °--')': ) [0/0] {608} + ¦ ¦ ¦--',': , [0/4] {609} + ¦ ¦ ¦--SYMBOL_SUB: ignor [1/1] {610} + ¦ ¦ ¦--EQ_SUB: = [0/1] {611} + ¦ ¦ ¦--expr: TRUE [0/0] {613} + ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {612} + ¦ ¦ ¦--',': , [0/1] {614} + ¦ ¦ ¦--SYMBOL_SUB: recur [0/1] {615} + ¦ ¦ ¦--EQ_SUB: = [0/1] {616} + ¦ ¦ ¦--expr: recur [0/0] {618} + ¦ ¦ ¦ °--SYMBOL: recur [0/0] {617} + ¦ ¦ ¦--',': , [0/1] {619} + ¦ ¦ ¦--SYMBOL_SUB: full. [0/1] {620} + ¦ ¦ ¦--EQ_SUB: = [0/1] {621} + ¦ ¦ ¦--expr: TRUE [0/2] {623} + ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {622} + ¦ ¦ °--')': ) [1/0] {624} + ¦ ¦--expr: trans [1/0] {625} + ¦ ¦ ¦--expr: trans [0/0] {627} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: trans [0/0] {626} + ¦ ¦ ¦--'(': ( [0/4] {628} + ¦ ¦ ¦--expr: setdi [1/0] {629} + ¦ ¦ ¦ ¦--expr: setdi [0/0] {631} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: setdi [0/0] {630} + ¦ ¦ ¦ ¦--'(': ( [0/0] {632} + ¦ ¦ ¦ ¦--expr: files [0/0] {634} + ¦ ¦ ¦ ¦ °--SYMBOL: files [0/0] {633} + ¦ ¦ ¦ ¦--',': , [0/1] {635} + ¦ ¦ ¦ ¦--expr: exclu [0/0] {637} + ¦ ¦ ¦ ¦ °--SYMBOL: exclu [0/0] {636} + ¦ ¦ ¦ °--')': ) [0/0] {638} + ¦ ¦ ¦--',': , [0/1] {639} + ¦ ¦ ¦--expr: trans [0/0] {641} + ¦ ¦ ¦ °--SYMBOL: trans [0/0] {640} + ¦ ¦ ¦--',': , [0/1] {642} + ¦ ¦ ¦--expr: inclu [0/2] {644} + ¦ ¦ ¦ °--SYMBOL: inclu [0/0] {643} + ¦ ¦ °--')': ) [1/0] {645} + ¦ °--'}': } [1/0] {646} + ¦--COMMENT: #' St [2/0] {647} + ¦--COMMENT: #' [1/0] {648} + ¦--COMMENT: #' Pe [1/0] {649} + ¦--COMMENT: #' Ca [1/0] {650} + ¦--COMMENT: #' @s [1/0] {651} + ¦--COMMENT: #' UT [1/0] {652} + ¦--COMMENT: #' be [1/0] {653} + ¦--COMMENT: #' @p [1/0] {654} + ¦--COMMENT: #' @i [1/0] {655} + ¦--COMMENT: #' @i [1/0] {656} + ¦--COMMENT: #' @i [1/0] {657} + ¦--COMMENT: #' @i [1/0] {658} + ¦--COMMENT: #' @e [1/0] {659} + ¦--COMMENT: #' # [1/0] {660} + ¦--COMMENT: #' fi [1/0] {661} + ¦--COMMENT: #' xf [1/0] {662} + ¦--COMMENT: #' st [1/0] {663} + ¦--COMMENT: #' st [1/0] {664} + ¦--COMMENT: #' xf [1/0] {665} + ¦--COMMENT: #' un [1/0] {666} + ¦--COMMENT: #' @f [1/0] {667} + ¦--COMMENT: #' @e [1/0] {668} + °--expr: style [1/0] {669} + ¦--expr: style [0/1] {671} + ¦ °--SYMBOL: style [0/0] {670} + ¦--LEFT_ASSIGN: <- [0/1] {672} + °--expr: funct [0/0] {673} + ¦--FUNCTION: funct [0/0] {674} + ¦--'(': ( [0/0] {675} + ¦--SYMBOL_FORMALS: path [0/0] {676} + ¦--',': , [0/23] {677} + ¦--SYMBOL_FORMALS: ... [1/0] {678} + ¦--',': , [0/23] {679} + ¦--SYMBOL_FORMALS: style [1/1] {680} + ¦--EQ_FORMALS: = [0/1] {681} + ¦--expr: tidyv [0/0] {683} + ¦ °--SYMBOL: tidyv [0/0] {682} + ¦--',': , [0/23] {684} + ¦--SYMBOL_FORMALS: trans [1/1] {685} + ¦--EQ_FORMALS: = [0/1] {686} + ¦--expr: style [0/0] {687} + ¦ ¦--expr: style [0/0] {689} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: style [0/0] {688} + ¦ ¦--'(': ( [0/0] {690} + ¦ ¦--expr: ... [0/0] {692} + ¦ ¦ °--SYMBOL: ... [0/0] {691} + ¦ °--')': ) [0/0] {693} + ¦--',': , [0/23] {694} + ¦--SYMBOL_FORMALS: inclu [1/1] {695} + ¦--EQ_FORMALS: = [0/1] {696} + ¦--expr: TRUE [0/0] {698} + ¦ °--NUM_CONST: TRUE [0/0] {697} + ¦--')': ) [0/1] {699} + °--expr: { + c [0/0] {700} + ¦--'{': { [0/2] {701} + ¦--expr: chang [1/2] {702} + ¦ ¦--expr: chang [0/1] {704} + ¦ ¦ °--SYMBOL: chang [0/0] {703} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {705} + ¦ °--expr: withr [0/0] {706} + ¦ ¦--expr: withr [0/0] {707} + ¦ ¦ ¦--SYMBOL_PACKAGE: withr [0/0] {708} + ¦ ¦ ¦--NS_GET: :: [0/0] {709} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: with_ [0/0] {710} + ¦ ¦--'(': ( [0/4] {711} + ¦ ¦--expr: dirna [1/0] {712} + ¦ ¦ ¦--expr: dirna [0/0] {714} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: dirna [0/0] {713} + ¦ ¦ ¦--'(': ( [0/0] {715} + ¦ ¦ ¦--expr: path [0/0] {717} + ¦ ¦ ¦ °--SYMBOL: path [0/0] {716} + ¦ ¦ °--')': ) [0/0] {718} + ¦ ¦--',': , [0/4] {719} + ¦ ¦--expr: trans [1/2] {720} + ¦ ¦ ¦--expr: trans [0/0] {722} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: trans [0/0] {721} + ¦ ¦ ¦--'(': ( [0/0] {723} + ¦ ¦ ¦--expr: basen [0/0] {724} + ¦ ¦ ¦ ¦--expr: basen [0/0] {726} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: basen [0/0] {725} + ¦ ¦ ¦ ¦--'(': ( [0/0] {727} + ¦ ¦ ¦ ¦--expr: path [0/0] {729} + ¦ ¦ ¦ ¦ °--SYMBOL: path [0/0] {728} + ¦ ¦ ¦ °--')': ) [0/0] {730} + ¦ ¦ ¦--',': , [0/1] {731} + ¦ ¦ ¦--expr: trans [0/0] {733} + ¦ ¦ ¦ °--SYMBOL: trans [0/0] {732} + ¦ ¦ ¦--',': , [0/1] {734} + ¦ ¦ ¦--expr: inclu [0/0] {736} + ¦ ¦ ¦ °--SYMBOL: inclu [0/0] {735} + ¦ ¦ °--')': ) [0/0] {737} + ¦ °--')': ) [1/0] {738} + ¦--expr: invis [1/0] {739} + ¦ ¦--expr: invis [0/0] {741} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: invis [0/0] {740} + ¦ ¦--'(': ( [0/0] {742} + ¦ ¦--expr: chang [0/0] {744} + ¦ ¦ °--SYMBOL: chang [0/0] {743} + ¦ °--')': ) [0/0] {745} + °--'}': } [1/0] {746} diff --git a/tests/testthat/roxygen-examples-complete/10-styler-r-ui-out.R b/tests/testthat/roxygen-examples-complete/10-styler-r-ui-out.R new file mode 100644 index 000000000..a7cbf3aca --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/10-styler-r-ui-out.R @@ -0,0 +1,222 @@ +#' @api +#' @import tibble +#' @importFrom magrittr %>% +NULL + +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package +#' (code and tests). +#' Carefully examine the results after running this function! +#' +#' @param pkg Path to a (subdirectory of an) R package. +#' @param ... Arguments passed on to the `style` function. +#' @param style A function that creates a style guide to use, by default +#' [tidyverse_style()] (without the parentheses). Not used +#' further except to construct the argument `transformers`. See +#' [style_guides()] for details. +#' @param transformers A set of transformer functions. This argument is most +#' conveniently constructed via the `style` argument and `...`. See +#' 'Examples'. +#' @param filetype Vector of file extensions indicating which file types should +#' be styled. Case is ignored, and the `.` is optional, e.g. `c(".R", ".Rmd")` +#' or `c("r", "rmd")`. +#' @param exclude_files Character vector with paths to files that should be +#' excluded from styling. +#' @param include_roxygen_examples Whether or not to style code in roxygen +#' examples. +#' @section Warning: +#' This function overwrites files (if styling results in a change of the +#' code to be formatted). It is strongly suggested to only style files +#' that are under version control or to create a backup copy. +#' +#' We suggest to first style with `scope < "tokens"` and inspect and commit +#' changes, because these changes are guaranteed to leave the abstract syntax +#' tree (AST) unchanged. See section 'Roundtrip Validation' for details. +#' +#' Then, we suggest to style with `scope = "tokens"` (if desired) and carefully +#' inspect the changes to make sure the AST is not changed in an unexpected way +#' that invalidates code. +#' @section Roundtrip Validation: +#' The following section describes when and how styling is guaranteed to +#' yield correct code. +#' +#' If the style guide has `scope < "tokens"`, no tokens are changed and the +#' abstract syntax tree (AST) should not change. +#' Hence, it is possible to validate the styling by comparing whether the parsed +#' expression before and after styling have the same AST. +#' This comparison omits comments. styler compares +#' error if the AST has changed through styling. +#' +#' Note that with `scope = "tokens"` such a comparison is not conducted because +#' the AST might well change and such a change is intended. There is no way +#' styler can validate styling, that is why we inform the user to carefully +#' inspect the changes. +#' +#' See section 'Warning' for a good strategy to apply styling safely. +#' @inheritSection transform_files Value +#' @family stylers +#' @examples +#' \dontrun{ +#' style_pkg(style = tidyverse_style, strict = TRUE) +#' style_pkg( +#' scope = "line_breaks", +#' math_token_spacing = specify_math_token_spacing(zero = "'+'") +#' ) +#' } +#' @export +style_pkg <- function(pkg = ".", + ..., + style = tidyverse_style, + transformers = style(...), + filetype = "R", + exclude_files = "R/RcppExports.R", + include_roxygen_examples = TRUE) { + pkg_root <- rprojroot::find_package_root_file(path = pkg) + changed <- withr::with_dir(pkg_root, prettify_pkg( + transformers, filetype, exclude_files, include_roxygen_examples + )) + invisible(changed) +} + +prettify_pkg <- function(transformers, + filetype, + exclude_files, + include_roxygen_examples) { + filetype <- set_and_assert_arg_filetype(filetype) + r_files <- vignette_files <- readme <- NULL + + if ("\\.r" %in% filetype) { + r_files <- dir( + path = c("R", "tests", "data-raw"), pattern = "\\.r$", + ignore.case = TRUE, recursive = TRUE, full.names = TRUE + ) + } + + if ("\\.rmd" %in% filetype) { + vignette_files <- dir( + path = "vignettes", pattern = "\\.rmd$", + ignore.case = TRUE, recursive = TRUE, full.names = TRUE + ) + readme <- dir(pattern = "^readme\\.rmd$", ignore.case = TRUE) + } + + files <- setdiff(c(r_files, vignette_files, readme), exclude_files) + transform_files(files, transformers, include_roxygen_examples) +} + + +#' Style a string +#' +#' Styles a character vector. Each element of the character vector corresponds +#' to one line of code. +#' @param text A character vector with text to style. +#' @inheritParams style_pkg +#' @family stylers +#' @examples +#' style_text("call( 1)") +#' style_text("1 + 1", strict = FALSE) +#' style_text("a%>%b", scope = "spaces") +#' style_text("a%>%b; a", scope = "line_breaks") +#' style_text("a%>%b; a", scope = "tokens") +#' # the following is identical but the former is more convenient: +#' style_text("a<-3++1", style = tidyverse_style, strict = TRUE) +#' style_text("a<-3++1", transformers = tidyverse_style(strict = TRUE)) +#' @export +style_text <- function(text, + ..., + style = tidyverse_style, + transformers = style(...), + include_roxygen_examples = TRUE) { + transformer <- make_transformer(transformers, include_roxygen_examples) + styled_text <- transformer(text) + construct_vertical(styled_text) +} + +#' Prettify arbitrary R code +#' +#' Performs various substitutions in all `.R` files in a directory. +#' Carefully examine the results after running this function! +#' @param path Path to a directory with files to transform. +#' @param recursive A logical value indicating whether or not files in subdirectories +#' of `path` should be styled as well. +#' @inheritParams style_pkg +#' @inheritSection transform_files Value +#' @inheritSection style_pkg Warning +#' @inheritSection style_pkg Roundtrip Validation +#' @family stylers +#' @examples +#' \dontrun{ +#' style_dir(file_type = "r") +#' } +#' @export +style_dir <- function(path = ".", + ..., + style = tidyverse_style, + transformers = style(...), + filetype = "R", + recursive = TRUE, + exclude_files = NULL, + include_roxygen_examples = TRUE) { + changed <- withr::with_dir( + path, prettify_any( + transformers, filetype, recursive, exclude_files, include_roxygen_examples + ) + ) + invisible(changed) +} + +#' Prettify R code in current working directory +#' +#' This is a helper function for style_dir. +#' @inheritParams style_pkg +#' @param recursive A logical value indicating whether or not files in subdirectories +#' should be styled as well. +#' @keywords internal +prettify_any <- function(transformers, + filetype, + recursive, + exclude_files, + include_roxygen_examples) { + files <- dir( + path = ".", pattern = map_filetype_to_pattern(filetype), + ignore.case = TRUE, recursive = recursive, full.names = TRUE + ) + transform_files( + setdiff(files, exclude_files), transformers, include_roxygen_examples + ) +} + +#' Style `.R` and/or `.Rmd` files +#' +#' Performs various substitutions in the files specified. +#' Carefully examine the results after running this function! +#' @section Encoding: +#' UTF-8 encoding is assumed. Please convert your code to UTF-8 if necessary +#' before applying styler. +#' @param path A character vector with paths to files to style. +#' @inheritParams style_pkg +#' @inheritSection transform_files Value +#' @inheritSection style_pkg Warning +#' @inheritSection style_pkg Roundtrip Validation +#' @examples +#' # the following is identical but the former is more convenient: +#' file <- tempfile("styler", fileext = ".R") +#' xfun::write_utf8("1++1", file) +#' style_file(file, style = tidyverse_style, strict = TRUE) +#' style_file(file, transformers = tidyverse_style(strict = TRUE)) +#' xfun::read_utf8(file) +#' unlink(file) +#' @family stylers +#' @export +style_file <- function(path, + ..., + style = tidyverse_style, + transformers = style(...), + include_roxygen_examples = TRUE) { + changed <- withr::with_dir( + dirname(path), + transform_files(basename(path), transformers, include_roxygen_examples) + ) + invisible(changed) +} diff --git a/tests/testthat/roxygen-examples-complete/11-start-with-dontrun-in.R b/tests/testthat/roxygen-examples-complete/11-start-with-dontrun-in.R new file mode 100644 index 000000000..35a804639 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/11-start-with-dontrun-in.R @@ -0,0 +1,25 @@ +#' Hi +#' +#' x +#' @examples +#' \dontrun{ +#' style_pkg(style = tidyverse_style, strict = TRUE) +#' style_pkg( +#' scope = "line_breaks", +#' math_token_spacing = specfy_math_token_spacing(zero = "'+'") +#' ) +#' } +#' @export +style_pkg <- function(pkg = ".", + ..., + style = tidyverse_style, + transformers = style(...), + filetype = "R", + exclude_files = "R/RcppExports.R", + include_roxygen_examples = TRUE) { + pkg_root <- rprojroot::find_package_root_file(path = pkg) + changed <- withr::with_dir(pkg_root, prettify_pkg( + transformers, filetype, exclude_files, include_roxygen_examples + )) + invisible(changed) +} diff --git a/tests/testthat/roxygen-examples-complete/11-start-with-dontrun-in_tree b/tests/testthat/roxygen-examples-complete/11-start-with-dontrun-in_tree new file mode 100644 index 000000000..9da283113 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/11-start-with-dontrun-in_tree @@ -0,0 +1,113 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Hi [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' x [1/0] {3} + ¦--COMMENT: #' @e [1/0] {4} + ¦--COMMENT: #' \d [1/0] {5} + ¦--COMMENT: #' st [1/0] {6} + ¦--COMMENT: #' st [1/0] {7} + ¦--COMMENT: #' [1/0] {8} + ¦--COMMENT: #' [1/0] {9} + ¦--COMMENT: #' ) [1/0] {10} + ¦--COMMENT: #' } [1/0] {11} + ¦--COMMENT: #' @e [1/0] {12} + °--expr: style [1/0] {13} + ¦--expr: style [0/1] {15} + ¦ °--SYMBOL: style [0/0] {14} + ¦--LEFT_ASSIGN: <- [0/1] {16} + °--expr: funct [0/0] {17} + ¦--FUNCTION: funct [0/0] {18} + ¦--'(': ( [0/0] {19} + ¦--SYMBOL_FORMALS: pkg [0/1] {20} + ¦--EQ_FORMALS: = [0/1] {21} + ¦--expr: "." [0/0] {23} + ¦ °--STR_CONST: "." [0/0] {22} + ¦--',': , [0/22] {24} + ¦--SYMBOL_FORMALS: ... [1/0] {25} + ¦--',': , [0/22] {26} + ¦--SYMBOL_FORMALS: style [1/1] {27} + ¦--EQ_FORMALS: = [0/1] {28} + ¦--expr: tidyv [0/0] {30} + ¦ °--SYMBOL: tidyv [0/0] {29} + ¦--',': , [0/22] {31} + ¦--SYMBOL_FORMALS: trans [1/1] {32} + ¦--EQ_FORMALS: = [0/1] {33} + ¦--expr: style [0/0] {34} + ¦ ¦--expr: style [0/0] {36} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: style [0/0] {35} + ¦ ¦--'(': ( [0/0] {37} + ¦ ¦--expr: ... [0/0] {39} + ¦ ¦ °--SYMBOL: ... [0/0] {38} + ¦ °--')': ) [0/0] {40} + ¦--',': , [0/22] {41} + ¦--SYMBOL_FORMALS: filet [1/1] {42} + ¦--EQ_FORMALS: = [0/1] {43} + ¦--expr: "R" [0/0] {45} + ¦ °--STR_CONST: "R" [0/0] {44} + ¦--',': , [0/22] {46} + ¦--SYMBOL_FORMALS: exclu [1/1] {47} + ¦--EQ_FORMALS: = [0/1] {48} + ¦--expr: "R/Rc [0/0] {50} + ¦ °--STR_CONST: "R/Rc [0/0] {49} + ¦--',': , [0/22] {51} + ¦--SYMBOL_FORMALS: inclu [1/1] {52} + ¦--EQ_FORMALS: = [0/1] {53} + ¦--expr: TRUE [0/0] {55} + ¦ °--NUM_CONST: TRUE [0/0] {54} + ¦--')': ) [0/1] {56} + °--expr: { + p [0/0] {57} + ¦--'{': { [0/2] {58} + ¦--expr: pkg_r [1/2] {59} + ¦ ¦--expr: pkg_r [0/1] {61} + ¦ ¦ °--SYMBOL: pkg_r [0/0] {60} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {62} + ¦ °--expr: rproj [0/0] {63} + ¦ ¦--expr: rproj [0/0] {64} + ¦ ¦ ¦--SYMBOL_PACKAGE: rproj [0/0] {65} + ¦ ¦ ¦--NS_GET: :: [0/0] {66} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: find_ [0/0] {67} + ¦ ¦--'(': ( [0/0] {68} + ¦ ¦--SYMBOL_SUB: path [0/1] {69} + ¦ ¦--EQ_SUB: = [0/1] {70} + ¦ ¦--expr: pkg [0/0] {72} + ¦ ¦ °--SYMBOL: pkg [0/0] {71} + ¦ °--')': ) [0/0] {73} + ¦--expr: chang [1/2] {74} + ¦ ¦--expr: chang [0/1] {76} + ¦ ¦ °--SYMBOL: chang [0/0] {75} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {77} + ¦ °--expr: withr [0/0] {78} + ¦ ¦--expr: withr [0/0] {79} + ¦ ¦ ¦--SYMBOL_PACKAGE: withr [0/0] {80} + ¦ ¦ ¦--NS_GET: :: [0/0] {81} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: with_ [0/0] {82} + ¦ ¦--'(': ( [0/0] {83} + ¦ ¦--expr: pkg_r [0/0] {85} + ¦ ¦ °--SYMBOL: pkg_r [0/0] {84} + ¦ ¦--',': , [0/1] {86} + ¦ ¦--expr: prett [0/0] {87} + ¦ ¦ ¦--expr: prett [0/0] {89} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: prett [0/0] {88} + ¦ ¦ ¦--'(': ( [0/4] {90} + ¦ ¦ ¦--expr: trans [1/0] {92} + ¦ ¦ ¦ °--SYMBOL: trans [0/0] {91} + ¦ ¦ ¦--',': , [0/1] {93} + ¦ ¦ ¦--expr: filet [0/0] {95} + ¦ ¦ ¦ °--SYMBOL: filet [0/0] {94} + ¦ ¦ ¦--',': , [0/1] {96} + ¦ ¦ ¦--expr: exclu [0/0] {98} + ¦ ¦ ¦ °--SYMBOL: exclu [0/0] {97} + ¦ ¦ ¦--',': , [0/1] {99} + ¦ ¦ ¦--expr: inclu [0/2] {101} + ¦ ¦ ¦ °--SYMBOL: inclu [0/0] {100} + ¦ ¦ °--')': ) [1/0] {102} + ¦ °--')': ) [0/0] {103} + ¦--expr: invis [1/0] {104} + ¦ ¦--expr: invis [0/0] {106} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: invis [0/0] {105} + ¦ ¦--'(': ( [0/0] {107} + ¦ ¦--expr: chang [0/0] {109} + ¦ ¦ °--SYMBOL: chang [0/0] {108} + ¦ °--')': ) [0/0] {110} + °--'}': } [1/0] {111} diff --git a/tests/testthat/roxygen-examples-complete/11-start-with-dontrun-out.R b/tests/testthat/roxygen-examples-complete/11-start-with-dontrun-out.R new file mode 100644 index 000000000..35a804639 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/11-start-with-dontrun-out.R @@ -0,0 +1,25 @@ +#' Hi +#' +#' x +#' @examples +#' \dontrun{ +#' style_pkg(style = tidyverse_style, strict = TRUE) +#' style_pkg( +#' scope = "line_breaks", +#' math_token_spacing = specfy_math_token_spacing(zero = "'+'") +#' ) +#' } +#' @export +style_pkg <- function(pkg = ".", + ..., + style = tidyverse_style, + transformers = style(...), + filetype = "R", + exclude_files = "R/RcppExports.R", + include_roxygen_examples = TRUE) { + pkg_root <- rprojroot::find_package_root_file(path = pkg) + changed <- withr::with_dir(pkg_root, prettify_pkg( + transformers, filetype, exclude_files, include_roxygen_examples + )) + invisible(changed) +} diff --git a/tests/testthat/roxygen-examples-complete/12-dontshow-dontrun-donttest-in.R b/tests/testthat/roxygen-examples-complete/12-dontshow-dontrun-donttest-in.R new file mode 100644 index 000000000..d52ce0b89 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/12-dontshow-dontrun-donttest-in.R @@ -0,0 +1,93 @@ + +#' Create a style guide +#' +#' @param reindention A list of parameters for regex re-indention, most +#' conveniently constructed using [specify_reindention()]. +#' @examples +#' set_line_break_before_crly_opening <- function(pd_flat) { +#' op <- pd_flat$token %in% "'{'" +#' pd_flat$lag_newlines[op] <- 1L +#' pd_flat +#' } +#' @examples +#' \dontshow{ +#' { +#' x +#' } +#' } +#' set_line_break_before_curly_opening_style <- function() { +#' create_style_guide(line_break = list(set_line_break_before_curly_opening)) +#' } +#' @examples +#' \dontrun{ +#' style_text("a <- function(x) { x } +#' ", style = set_line_break_before_curly_opening_style) +#' } +#' @importFrom purrr compact +#' @export +create_style_guide <- function(initialize = default_style_guide_attributes, + line_break = NULL, + space = NULL, + token = NULL, + indention = NULL, + use_raw_indention = FALSE, + reindention = tidyverse_reindention()) { + list( + # transformer functions + initialize = list(initialize), + line_break, + space, + token, + indention, + # transformer options + use_raw_indention, + reindention + ) %>% + map(compact) +} + + +#' Create a style guide +#' +#' @param reindention A list of parameters for regex re-indention, most +#' conveniently constructed using [specify_reindention()]. +#' @examples +#' set_line_break_before_crly_opening <- function(pd_flat) { +#' op <- pd_flat$token %in% "'{'" +#' pd_flat$lag_newlines[op] <- 1L +#' pd_flat +#' } +#' @examples +#' \dontshow{ +#' {x +#' } +#' } +#' set_line_break_before_curly_opening_style <- function() { +#' create_style_guide(line_break= list(set_line_break_before_curly_opening)) +#' } +#' @examples +#' \donttest{style_text("a <- function(x) { x } +#' ", style = set_line_break_before_curly_opening_style) +#' } +#' @importFrom purrr compact +#' @export +create_style_guide <- function(initialize = default_style_guide_attributes, + line_break = NULL, + space = NULL, + token = NULL, + indention = NULL, + use_raw_indention = FALSE, + reindention = tidyverse_reindention()) { + list( + #transformer functions + initialize = list(initialize), + line_break, + space, + token, + indention, + # transformer options + use_raw_indention, + reindention + )%>% + map(compact) +} diff --git a/tests/testthat/roxygen-examples-complete/12-dontshow-dontrun-donttest-in_tree b/tests/testthat/roxygen-examples-complete/12-dontshow-dontrun-donttest-in_tree new file mode 100644 index 000000000..2abe159d9 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/12-dontshow-dontrun-donttest-in_tree @@ -0,0 +1,235 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Cr [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' @p [1/0] {3} + ¦--COMMENT: #' [1/0] {4} + ¦--COMMENT: #' @e [1/0] {5} + ¦--COMMENT: #' se [1/0] {6} + ¦--COMMENT: #' [1/0] {7} + ¦--COMMENT: #' [1/0] {8} + ¦--COMMENT: #' [1/0] {9} + ¦--COMMENT: #' } [1/0] {10} + ¦--COMMENT: #' @e [1/0] {11} + ¦--COMMENT: #' \d [1/0] {12} + ¦--COMMENT: #' { [1/0] {13} + ¦--COMMENT: #' [1/0] {14} + ¦--COMMENT: #' } [1/0] {15} + ¦--COMMENT: #' } [1/0] {16} + ¦--COMMENT: #' se [1/0] {17} + ¦--COMMENT: #' [1/0] {18} + ¦--COMMENT: #' } [1/0] {19} + ¦--COMMENT: #' @e [1/0] {20} + ¦--COMMENT: #' \d [1/0] {21} + ¦--COMMENT: #' st [1/0] {22} + ¦--COMMENT: #' ", [1/0] {23} + ¦--COMMENT: #' } [1/0] {24} + ¦--COMMENT: #' @i [1/0] {25} + ¦--COMMENT: #' @e [1/0] {26} + ¦--expr: creat [1/0] {27} + ¦ ¦--expr: creat [0/1] {29} + ¦ ¦ °--SYMBOL: creat [0/0] {28} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {30} + ¦ °--expr: funct [0/0] {31} + ¦ ¦--FUNCTION: funct [0/0] {32} + ¦ ¦--'(': ( [0/0] {33} + ¦ ¦--SYMBOL_FORMALS: initi [0/1] {34} + ¦ ¦--EQ_FORMALS: = [0/1] {35} + ¦ ¦--expr: defau [0/0] {37} + ¦ ¦ °--SYMBOL: defau [0/0] {36} + ¦ ¦--',': , [0/31] {38} + ¦ ¦--SYMBOL_FORMALS: line_ [1/1] {39} + ¦ ¦--EQ_FORMALS: = [0/1] {40} + ¦ ¦--expr: NULL [0/0] {42} + ¦ ¦ °--NULL_CONST: NULL [0/0] {41} + ¦ ¦--',': , [0/31] {43} + ¦ ¦--SYMBOL_FORMALS: space [1/1] {44} + ¦ ¦--EQ_FORMALS: = [0/1] {45} + ¦ ¦--expr: NULL [0/0] {47} + ¦ ¦ °--NULL_CONST: NULL [0/0] {46} + ¦ ¦--',': , [0/31] {48} + ¦ ¦--SYMBOL_FORMALS: token [1/1] {49} + ¦ ¦--EQ_FORMALS: = [0/1] {50} + ¦ ¦--expr: NULL [0/0] {52} + ¦ ¦ °--NULL_CONST: NULL [0/0] {51} + ¦ ¦--',': , [0/31] {53} + ¦ ¦--SYMBOL_FORMALS: inden [1/1] {54} + ¦ ¦--EQ_FORMALS: = [0/1] {55} + ¦ ¦--expr: NULL [0/0] {57} + ¦ ¦ °--NULL_CONST: NULL [0/0] {56} + ¦ ¦--',': , [0/31] {58} + ¦ ¦--SYMBOL_FORMALS: use_r [1/1] {59} + ¦ ¦--EQ_FORMALS: = [0/1] {60} + ¦ ¦--expr: FALSE [0/0] {62} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {61} + ¦ ¦--',': , [0/31] {63} + ¦ ¦--SYMBOL_FORMALS: reind [1/1] {64} + ¦ ¦--EQ_FORMALS: = [0/1] {65} + ¦ ¦--expr: tidyv [0/0] {66} + ¦ ¦ ¦--expr: tidyv [0/0] {68} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: tidyv [0/0] {67} + ¦ ¦ ¦--'(': ( [0/0] {69} + ¦ ¦ °--')': ) [0/0] {70} + ¦ ¦--')': ) [0/1] {71} + ¦ °--expr: { + l [0/0] {72} + ¦ ¦--'{': { [0/2] {73} + ¦ ¦--expr: list( [1/0] {74} + ¦ ¦ ¦--expr: list( [0/1] {75} + ¦ ¦ ¦ ¦--expr: list [0/0] {77} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: list [0/0] {76} + ¦ ¦ ¦ ¦--'(': ( [0/4] {78} + ¦ ¦ ¦ ¦--COMMENT: # tra [1/4] {79} + ¦ ¦ ¦ ¦--SYMBOL_SUB: initi [1/1] {80} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {81} + ¦ ¦ ¦ ¦--expr: list( [0/0] {82} + ¦ ¦ ¦ ¦ ¦--expr: list [0/0] {84} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: list [0/0] {83} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {85} + ¦ ¦ ¦ ¦ ¦--expr: initi [0/0] {87} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: initi [0/0] {86} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {88} + ¦ ¦ ¦ ¦--',': , [0/4] {89} + ¦ ¦ ¦ ¦--expr: line_ [1/0] {91} + ¦ ¦ ¦ ¦ °--SYMBOL: line_ [0/0] {90} + ¦ ¦ ¦ ¦--',': , [0/4] {92} + ¦ ¦ ¦ ¦--expr: space [1/0] {94} + ¦ ¦ ¦ ¦ °--SYMBOL: space [0/0] {93} + ¦ ¦ ¦ ¦--',': , [0/4] {95} + ¦ ¦ ¦ ¦--expr: token [1/0] {97} + ¦ ¦ ¦ ¦ °--SYMBOL: token [0/0] {96} + ¦ ¦ ¦ ¦--',': , [0/4] {98} + ¦ ¦ ¦ ¦--expr: inden [1/0] {100} + ¦ ¦ ¦ ¦ °--SYMBOL: inden [0/0] {99} + ¦ ¦ ¦ ¦--',': , [0/4] {101} + ¦ ¦ ¦ ¦--COMMENT: # tra [1/4] {102} + ¦ ¦ ¦ ¦--expr: use_r [1/0] {104} + ¦ ¦ ¦ ¦ °--SYMBOL: use_r [0/0] {103} + ¦ ¦ ¦ ¦--',': , [0/4] {105} + ¦ ¦ ¦ ¦--expr: reind [1/2] {107} + ¦ ¦ ¦ ¦ °--SYMBOL: reind [0/0] {106} + ¦ ¦ ¦ °--')': ) [1/0] {108} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {109} + ¦ ¦ °--expr: map(c [1/0] {110} + ¦ ¦ ¦--expr: map [0/0] {112} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: map [0/0] {111} + ¦ ¦ ¦--'(': ( [0/0] {113} + ¦ ¦ ¦--expr: compa [0/0] {115} + ¦ ¦ ¦ °--SYMBOL: compa [0/0] {114} + ¦ ¦ °--')': ) [0/0] {116} + ¦ °--'}': } [1/0] {117} + ¦--COMMENT: #' Cr [3/0] {118} + ¦--COMMENT: #' [1/0] {119} + ¦--COMMENT: #' @p [1/0] {120} + ¦--COMMENT: #' [1/0] {121} + ¦--COMMENT: #' @e [1/0] {122} + ¦--COMMENT: #' se [1/0] {123} + ¦--COMMENT: #' [1/0] {124} + ¦--COMMENT: #' [1/0] {125} + ¦--COMMENT: #' [1/0] {126} + ¦--COMMENT: #' } [1/0] {127} + ¦--COMMENT: #' @e [1/0] {128} + ¦--COMMENT: #' \d [1/0] {129} + ¦--COMMENT: #' {x [1/0] {130} + ¦--COMMENT: #' } [1/0] {131} + ¦--COMMENT: #' } [1/0] {132} + ¦--COMMENT: #' se [1/0] {133} + ¦--COMMENT: #' cr [1/0] {134} + ¦--COMMENT: #' } [1/0] {135} + ¦--COMMENT: #' @e [1/0] {136} + ¦--COMMENT: #' \d [1/0] {137} + ¦--COMMENT: #' ", [1/0] {138} + ¦--COMMENT: #' } [1/0] {139} + ¦--COMMENT: #' @i [1/0] {140} + ¦--COMMENT: #' @e [1/0] {141} + °--expr: creat [1/0] {142} + ¦--expr: creat [0/1] {144} + ¦ °--SYMBOL: creat [0/0] {143} + ¦--LEFT_ASSIGN: <- [0/1] {145} + °--expr: funct [0/0] {146} + ¦--FUNCTION: funct [0/0] {147} + ¦--'(': ( [0/0] {148} + ¦--SYMBOL_FORMALS: initi [0/1] {149} + ¦--EQ_FORMALS: = [0/1] {150} + ¦--expr: defau [0/0] {152} + ¦ °--SYMBOL: defau [0/0] {151} + ¦--',': , [0/31] {153} + ¦--SYMBOL_FORMALS: line_ [1/1] {154} + ¦--EQ_FORMALS: = [0/1] {155} + ¦--expr: NULL [0/0] {157} + ¦ °--NULL_CONST: NULL [0/0] {156} + ¦--',': , [0/31] {158} + ¦--SYMBOL_FORMALS: space [1/1] {159} + ¦--EQ_FORMALS: = [0/1] {160} + ¦--expr: NULL [0/0] {162} + ¦ °--NULL_CONST: NULL [0/0] {161} + ¦--',': , [0/31] {163} + ¦--SYMBOL_FORMALS: token [1/1] {164} + ¦--EQ_FORMALS: = [0/1] {165} + ¦--expr: NULL [0/0] {167} + ¦ °--NULL_CONST: NULL [0/0] {166} + ¦--',': , [0/31] {168} + ¦--SYMBOL_FORMALS: inden [1/1] {169} + ¦--EQ_FORMALS: = [0/1] {170} + ¦--expr: NULL [0/0] {172} + ¦ °--NULL_CONST: NULL [0/0] {171} + ¦--',': , [0/31] {173} + ¦--SYMBOL_FORMALS: use_r [1/1] {174} + ¦--EQ_FORMALS: = [0/1] {175} + ¦--expr: FALSE [0/0] {177} + ¦ °--NUM_CONST: FALSE [0/0] {176} + ¦--',': , [0/31] {178} + ¦--SYMBOL_FORMALS: reind [1/1] {179} + ¦--EQ_FORMALS: = [0/1] {180} + ¦--expr: tidyv [0/0] {181} + ¦ ¦--expr: tidyv [0/0] {183} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: tidyv [0/0] {182} + ¦ ¦--'(': ( [0/0] {184} + ¦ °--')': ) [0/0] {185} + ¦--')': ) [0/1] {186} + °--expr: { + l [0/0] {187} + ¦--'{': { [0/2] {188} + ¦--expr: list( [1/0] {189} + ¦ ¦--expr: list( [0/0] {190} + ¦ ¦ ¦--expr: list [0/0] {192} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: list [0/0] {191} + ¦ ¦ ¦--'(': ( [0/4] {193} + ¦ ¦ ¦--COMMENT: #tran [1/4] {194} + ¦ ¦ ¦--SYMBOL_SUB: initi [1/1] {195} + ¦ ¦ ¦--EQ_SUB: = [0/1] {196} + ¦ ¦ ¦--expr: list( [0/0] {197} + ¦ ¦ ¦ ¦--expr: list [0/0] {199} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: list [0/0] {198} + ¦ ¦ ¦ ¦--'(': ( [0/0] {200} + ¦ ¦ ¦ ¦--expr: initi [0/0] {202} + ¦ ¦ ¦ ¦ °--SYMBOL: initi [0/0] {201} + ¦ ¦ ¦ °--')': ) [0/0] {203} + ¦ ¦ ¦--',': , [0/4] {204} + ¦ ¦ ¦--expr: line_ [1/0] {206} + ¦ ¦ ¦ °--SYMBOL: line_ [0/0] {205} + ¦ ¦ ¦--',': , [0/4] {207} + ¦ ¦ ¦--expr: space [1/0] {209} + ¦ ¦ ¦ °--SYMBOL: space [0/0] {208} + ¦ ¦ ¦--',': , [0/4] {210} + ¦ ¦ ¦--expr: token [1/0] {212} + ¦ ¦ ¦ °--SYMBOL: token [0/0] {211} + ¦ ¦ ¦--',': , [0/4] {213} + ¦ ¦ ¦--expr: inden [1/0] {215} + ¦ ¦ ¦ °--SYMBOL: inden [0/0] {214} + ¦ ¦ ¦--',': , [0/4] {216} + ¦ ¦ ¦--COMMENT: # tra [1/4] {217} + ¦ ¦ ¦--expr: use_r [1/0] {219} + ¦ ¦ ¦ °--SYMBOL: use_r [0/0] {218} + ¦ ¦ ¦--',': , [0/4] {220} + ¦ ¦ ¦--expr: reind [1/2] {222} + ¦ ¦ ¦ °--SYMBOL: reind [0/0] {221} + ¦ ¦ °--')': ) [1/0] {223} + ¦ ¦--SPECIAL-PIPE: %>% [0/4] {224} + ¦ °--expr: map(c [1/0] {225} + ¦ ¦--expr: map [0/0] {227} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: map [0/0] {226} + ¦ ¦--'(': ( [0/0] {228} + ¦ ¦--expr: compa [0/0] {230} + ¦ ¦ °--SYMBOL: compa [0/0] {229} + ¦ °--')': ) [0/0] {231} + °--'}': } [1/0] {232} diff --git a/tests/testthat/roxygen-examples-complete/12-dontshow-dontrun-donttest-out.R b/tests/testthat/roxygen-examples-complete/12-dontshow-dontrun-donttest-out.R new file mode 100644 index 000000000..3a260d018 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/12-dontshow-dontrun-donttest-out.R @@ -0,0 +1,94 @@ +#' Create a style guide +#' +#' @param reindention A list of parameters for regex re-indention, most +#' conveniently constructed using [specify_reindention()]. +#' @examples +#' set_line_break_before_crly_opening <- function(pd_flat) { +#' op <- pd_flat$token %in% "'{'" +#' pd_flat$lag_newlines[op] <- 1L +#' pd_flat +#' } +#' @examples +#' \dontshow{ +#' { +#' x +#' } +#' } +#' set_line_break_before_curly_opening_style <- function() { +#' create_style_guide(line_break = list(set_line_break_before_curly_opening)) +#' } +#' @examples +#' \dontrun{ +#' style_text("a <- function(x) { x } +#' ", style = set_line_break_before_curly_opening_style) +#' } +#' @importFrom purrr compact +#' @export +create_style_guide <- function(initialize = default_style_guide_attributes, + line_break = NULL, + space = NULL, + token = NULL, + indention = NULL, + use_raw_indention = FALSE, + reindention = tidyverse_reindention()) { + list( + # transformer functions + initialize = list(initialize), + line_break, + space, + token, + indention, + # transformer options + use_raw_indention, + reindention + ) %>% + map(compact) +} + + +#' Create a style guide +#' +#' @param reindention A list of parameters for regex re-indention, most +#' conveniently constructed using [specify_reindention()]. +#' @examples +#' set_line_break_before_crly_opening <- function(pd_flat) { +#' op <- pd_flat$token %in% "'{'" +#' pd_flat$lag_newlines[op] <- 1L +#' pd_flat +#' } +#' @examples +#' \dontshow{ +#' { +#' x +#' } +#' } +#' set_line_break_before_curly_opening_style <- function() { +#' create_style_guide(line_break = list(set_line_break_before_curly_opening)) +#' } +#' @examples +#' \donttest{ +#' style_text("a <- function(x) { x } +#' ", style = set_line_break_before_curly_opening_style) +#' } +#' @importFrom purrr compact +#' @export +create_style_guide <- function(initialize = default_style_guide_attributes, + line_break = NULL, + space = NULL, + token = NULL, + indention = NULL, + use_raw_indention = FALSE, + reindention = tidyverse_reindention()) { + list( + # transformer functions + initialize = list(initialize), + line_break, + space, + token, + indention, + # transformer options + use_raw_indention, + reindention + ) %>% + map(compact) +} diff --git a/tests/testthat/roxygen-examples-complete/12-fun-decs-in-examples-in.R b/tests/testthat/roxygen-examples-complete/12-fun-decs-in-examples-in.R new file mode 100644 index 000000000..aa4c21362 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/12-fun-decs-in-examples-in.R @@ -0,0 +1,77 @@ + +#' Create a style guide +#' +#' @param reindention A list of parameters for regex re-indention, most +#' conveniently constructed using [specify_reindention()]. +#' @examples +#' set_line_break_before_crly_opening <- function(pd_flat) { +#' op <- pd_flat$token %in% "'{'" +#' pd_flat$lag_newlines[op] <- 1L +#' pd_flat +#' } +#' set_line_break_before_curly_opening_style <- function() { +#' create_style_guide(line_break = list(set_line_break_before_curly_opening)) +#' } +#' style_text("a <- function(x) { x } +#' ", style = set_line_break_before_curly_opening_style) +#' \donttest{ +#' set_line_break_before_crly_opening <- function(pd_flat) { +#' op <- pd_flat$token %in% "'{'" +#' pd_flat$lag_newlines[op] <- 1L +#' pd_flat +#' } +#' } +#' @importFrom purrr compact +#' @export +create_style_guide <- function(initialize = default_style_guide_attributes, + line_break = NULL, + space = NULL, + token = NULL, + indention = NULL, + use_raw_indention = FALSE, + reindention = tidyverse_reindention()) { + list( + # transformer functions + initialize = list(initialize), + line_break, + space, + token, + indention, + # transformer options + use_raw_indention, + reindention + ) %>% + map(compact) +} + +#' Another +#' @examples +#' \donttest{ +#' op= pd_flat$token %in% "'('" +#' } +#' \donttest{ +#' op <- pd_flat$token %in% "')'" +#' } +#' \donttest{ +#' op <- pd_flat$token %in% "(" +#' } +#' \donttest{ +#' op <- pd_flat$token %in% ")" +#' } +#' \donttest{ +#' op <- pd_flat$token %in% "{" +#' } +#' \donttest{ +#' op<- pd_flat$token %in% "}" +#' } +#' op <-pd_flat$token %in% "'['" +#' \donttest{ +#' op <- pd_flat$token %in% "']'" +#' } +#' \donttest{ +#' op <- pd_flat$token%in% "[" +#' } +#' \donttest{ +#' op <- pd_flat$token %in%"]" +#' } +NULL diff --git a/tests/testthat/roxygen-examples-complete/12-fun-decs-in-examples-in_tree b/tests/testthat/roxygen-examples-complete/12-fun-decs-in-examples-in_tree new file mode 100644 index 000000000..bd1f31905 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/12-fun-decs-in-examples-in_tree @@ -0,0 +1,149 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Cr [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' @p [1/0] {3} + ¦--COMMENT: #' [1/0] {4} + ¦--COMMENT: #' @e [1/0] {5} + ¦--COMMENT: #' se [1/0] {6} + ¦--COMMENT: #' [1/0] {7} + ¦--COMMENT: #' [1/0] {8} + ¦--COMMENT: #' [1/0] {9} + ¦--COMMENT: #' } [1/0] {10} + ¦--COMMENT: #' se [1/0] {11} + ¦--COMMENT: #' [1/0] {12} + ¦--COMMENT: #' } [1/0] {13} + ¦--COMMENT: #' st [1/0] {14} + ¦--COMMENT: #' ", [1/0] {15} + ¦--COMMENT: #' \d [1/0] {16} + ¦--COMMENT: #' se [1/0] {17} + ¦--COMMENT: #' [1/0] {18} + ¦--COMMENT: #' [1/0] {19} + ¦--COMMENT: #' [1/0] {20} + ¦--COMMENT: #' } [1/0] {21} + ¦--COMMENT: #' } [1/0] {22} + ¦--COMMENT: #' @i [1/0] {23} + ¦--COMMENT: #' @e [1/0] {24} + ¦--expr: creat [1/0] {25} + ¦ ¦--expr: creat [0/1] {27} + ¦ ¦ °--SYMBOL: creat [0/0] {26} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {28} + ¦ °--expr: funct [0/0] {29} + ¦ ¦--FUNCTION: funct [0/0] {30} + ¦ ¦--'(': ( [0/0] {31} + ¦ ¦--SYMBOL_FORMALS: initi [0/1] {32} + ¦ ¦--EQ_FORMALS: = [0/1] {33} + ¦ ¦--expr: defau [0/0] {35} + ¦ ¦ °--SYMBOL: defau [0/0] {34} + ¦ ¦--',': , [0/31] {36} + ¦ ¦--SYMBOL_FORMALS: line_ [1/1] {37} + ¦ ¦--EQ_FORMALS: = [0/1] {38} + ¦ ¦--expr: NULL [0/0] {40} + ¦ ¦ °--NULL_CONST: NULL [0/0] {39} + ¦ ¦--',': , [0/31] {41} + ¦ ¦--SYMBOL_FORMALS: space [1/1] {42} + ¦ ¦--EQ_FORMALS: = [0/1] {43} + ¦ ¦--expr: NULL [0/0] {45} + ¦ ¦ °--NULL_CONST: NULL [0/0] {44} + ¦ ¦--',': , [0/31] {46} + ¦ ¦--SYMBOL_FORMALS: token [1/1] {47} + ¦ ¦--EQ_FORMALS: = [0/1] {48} + ¦ ¦--expr: NULL [0/0] {50} + ¦ ¦ °--NULL_CONST: NULL [0/0] {49} + ¦ ¦--',': , [0/31] {51} + ¦ ¦--SYMBOL_FORMALS: inden [1/1] {52} + ¦ ¦--EQ_FORMALS: = [0/1] {53} + ¦ ¦--expr: NULL [0/0] {55} + ¦ ¦ °--NULL_CONST: NULL [0/0] {54} + ¦ ¦--',': , [0/31] {56} + ¦ ¦--SYMBOL_FORMALS: use_r [1/1] {57} + ¦ ¦--EQ_FORMALS: = [0/1] {58} + ¦ ¦--expr: FALSE [0/0] {60} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {59} + ¦ ¦--',': , [0/31] {61} + ¦ ¦--SYMBOL_FORMALS: reind [1/1] {62} + ¦ ¦--EQ_FORMALS: = [0/1] {63} + ¦ ¦--expr: tidyv [0/0] {64} + ¦ ¦ ¦--expr: tidyv [0/0] {66} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: tidyv [0/0] {65} + ¦ ¦ ¦--'(': ( [0/0] {67} + ¦ ¦ °--')': ) [0/0] {68} + ¦ ¦--')': ) [0/1] {69} + ¦ °--expr: { + l [0/0] {70} + ¦ ¦--'{': { [0/2] {71} + ¦ ¦--expr: list( [1/0] {72} + ¦ ¦ ¦--expr: list( [0/1] {73} + ¦ ¦ ¦ ¦--expr: list [0/0] {75} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: list [0/0] {74} + ¦ ¦ ¦ ¦--'(': ( [0/4] {76} + ¦ ¦ ¦ ¦--COMMENT: # tra [1/4] {77} + ¦ ¦ ¦ ¦--SYMBOL_SUB: initi [1/1] {78} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {79} + ¦ ¦ ¦ ¦--expr: list( [0/0] {80} + ¦ ¦ ¦ ¦ ¦--expr: list [0/0] {82} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: list [0/0] {81} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {83} + ¦ ¦ ¦ ¦ ¦--expr: initi [0/0] {85} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: initi [0/0] {84} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {86} + ¦ ¦ ¦ ¦--',': , [0/4] {87} + ¦ ¦ ¦ ¦--expr: line_ [1/0] {89} + ¦ ¦ ¦ ¦ °--SYMBOL: line_ [0/0] {88} + ¦ ¦ ¦ ¦--',': , [0/4] {90} + ¦ ¦ ¦ ¦--expr: space [1/0] {92} + ¦ ¦ ¦ ¦ °--SYMBOL: space [0/0] {91} + ¦ ¦ ¦ ¦--',': , [0/4] {93} + ¦ ¦ ¦ ¦--expr: token [1/0] {95} + ¦ ¦ ¦ ¦ °--SYMBOL: token [0/0] {94} + ¦ ¦ ¦ ¦--',': , [0/4] {96} + ¦ ¦ ¦ ¦--expr: inden [1/0] {98} + ¦ ¦ ¦ ¦ °--SYMBOL: inden [0/0] {97} + ¦ ¦ ¦ ¦--',': , [0/4] {99} + ¦ ¦ ¦ ¦--COMMENT: # tra [1/4] {100} + ¦ ¦ ¦ ¦--expr: use_r [1/0] {102} + ¦ ¦ ¦ ¦ °--SYMBOL: use_r [0/0] {101} + ¦ ¦ ¦ ¦--',': , [0/4] {103} + ¦ ¦ ¦ ¦--expr: reind [1/2] {105} + ¦ ¦ ¦ ¦ °--SYMBOL: reind [0/0] {104} + ¦ ¦ ¦ °--')': ) [1/0] {106} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {107} + ¦ ¦ °--expr: map(c [1/0] {108} + ¦ ¦ ¦--expr: map [0/0] {110} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: map [0/0] {109} + ¦ ¦ ¦--'(': ( [0/0] {111} + ¦ ¦ ¦--expr: compa [0/0] {113} + ¦ ¦ ¦ °--SYMBOL: compa [0/0] {112} + ¦ ¦ °--')': ) [0/0] {114} + ¦ °--'}': } [1/0] {115} + ¦--COMMENT: #' An [2/0] {116} + ¦--COMMENT: #' @e [1/0] {117} + ¦--COMMENT: #' \d [1/0] {118} + ¦--COMMENT: #' op [1/0] {119} + ¦--COMMENT: #' } [1/0] {120} + ¦--COMMENT: #' \d [1/0] {121} + ¦--COMMENT: #' op [1/0] {122} + ¦--COMMENT: #' } [1/0] {123} + ¦--COMMENT: #' \d [1/0] {124} + ¦--COMMENT: #' op [1/0] {125} + ¦--COMMENT: #' } [1/0] {126} + ¦--COMMENT: #' \d [1/0] {127} + ¦--COMMENT: #' op [1/0] {128} + ¦--COMMENT: #' } [1/0] {129} + ¦--COMMENT: #' \d [1/0] {130} + ¦--COMMENT: #' op [1/0] {131} + ¦--COMMENT: #' } [1/0] {132} + ¦--COMMENT: #' \d [1/0] {133} + ¦--COMMENT: #' op [1/0] {134} + ¦--COMMENT: #' } [1/0] {135} + ¦--COMMENT: #' op [1/0] {136} + ¦--COMMENT: #' \d [1/0] {137} + ¦--COMMENT: #' op [1/0] {138} + ¦--COMMENT: #' } [1/0] {139} + ¦--COMMENT: #' \d [1/0] {140} + ¦--COMMENT: #' op [1/0] {141} + ¦--COMMENT: #' } [1/0] {142} + ¦--COMMENT: #' \d [1/0] {143} + ¦--COMMENT: #' op [1/0] {144} + ¦--COMMENT: #' } [1/0] {145} + °--expr: NULL [1/0] {147} + °--NULL_CONST: NULL [0/0] {146} diff --git a/tests/testthat/roxygen-examples-complete/12-fun-decs-in-examples-out.R b/tests/testthat/roxygen-examples-complete/12-fun-decs-in-examples-out.R new file mode 100644 index 000000000..0188f0cca --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/12-fun-decs-in-examples-out.R @@ -0,0 +1,76 @@ +#' Create a style guide +#' +#' @param reindention A list of parameters for regex re-indention, most +#' conveniently constructed using [specify_reindention()]. +#' @examples +#' set_line_break_before_crly_opening <- function(pd_flat) { +#' op <- pd_flat$token %in% "'{'" +#' pd_flat$lag_newlines[op] <- 1L +#' pd_flat +#' } +#' set_line_break_before_curly_opening_style <- function() { +#' create_style_guide(line_break = list(set_line_break_before_curly_opening)) +#' } +#' style_text("a <- function(x) { x } +#' ", style = set_line_break_before_curly_opening_style) +#' \donttest{ +#' set_line_break_before_crly_opening <- function(pd_flat) { +#' op <- pd_flat$token %in% "'{'" +#' pd_flat$lag_newlines[op] <- 1L +#' pd_flat +#' } +#' } +#' @importFrom purrr compact +#' @export +create_style_guide <- function(initialize = default_style_guide_attributes, + line_break = NULL, + space = NULL, + token = NULL, + indention = NULL, + use_raw_indention = FALSE, + reindention = tidyverse_reindention()) { + list( + # transformer functions + initialize = list(initialize), + line_break, + space, + token, + indention, + # transformer options + use_raw_indention, + reindention + ) %>% + map(compact) +} + +#' Another +#' @examples +#' \donttest{ +#' op <- pd_flat$token %in% "'('" +#' } +#' \donttest{ +#' op <- pd_flat$token %in% "')'" +#' } +#' \donttest{ +#' op <- pd_flat$token %in% "(" +#' } +#' \donttest{ +#' op <- pd_flat$token %in% ")" +#' } +#' \donttest{ +#' op <- pd_flat$token %in% "{" +#' } +#' \donttest{ +#' op <- pd_flat$token %in% "}" +#' } +#' op <- pd_flat$token %in% "'['" +#' \donttest{ +#' op <- pd_flat$token %in% "']'" +#' } +#' \donttest{ +#' op <- pd_flat$token %in% "[" +#' } +#' \donttest{ +#' op <- pd_flat$token %in% "]" +#' } +NULL diff --git a/tests/testthat/roxygen-examples-complete/13-empty-lines-in.R b/tests/testthat/roxygen-examples-complete/13-empty-lines-in.R new file mode 100644 index 000000000..e1ab00834 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/13-empty-lines-in.R @@ -0,0 +1,48 @@ + +#' Create a style guide +#' +#' @param reindention A list of parameters for regex re-indention, most +#' conveniently constructed using [specify_reindention()]. +#' @examples +#' # empty +#' +#' +#' # two +#' +#' +#' +#' +#' # more +#' a <- 3 +#' # a comment +#' \dontrun{ +#' x +#' +#' y # hi +#' +#' # more +#' +#' a <- 3 +#' } +#' @importFrom purrr compact +#' @export +create_style_guide <- function(initialize = default_style_guide_attributes, + line_break = NULL, + space = NULL, + token = NULL, + indention = NULL, + use_raw_indention = FALSE, + reindention = tidyverse_reindention()) { + list( + # transformer functions + initialize = list(initialize), + line_break, + space, + token, + indention, + # transformer options + use_raw_indention, + reindention + ) %>% + map(compact) +} diff --git a/tests/testthat/roxygen-examples-complete/13-empty-lines-in_tree b/tests/testthat/roxygen-examples-complete/13-empty-lines-in_tree new file mode 100644 index 000000000..b511569eb --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/13-empty-lines-in_tree @@ -0,0 +1,120 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Cr [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' @p [1/0] {3} + ¦--COMMENT: #' [1/0] {4} + ¦--COMMENT: #' @e [1/0] {5} + ¦--COMMENT: #' # [1/0] {6} + ¦--COMMENT: #' [1/0] {7} + ¦--COMMENT: #' [1/0] {8} + ¦--COMMENT: #' # [1/0] {9} + ¦--COMMENT: #' [1/0] {10} + ¦--COMMENT: #' [1/0] {11} + ¦--COMMENT: #' [1/0] {12} + ¦--COMMENT: #' [1/0] {13} + ¦--COMMENT: #' # [1/0] {14} + ¦--COMMENT: #' a [1/0] {15} + ¦--COMMENT: #' # [1/0] {16} + ¦--COMMENT: #' \d [1/0] {17} + ¦--COMMENT: #' x [1/0] {18} + ¦--COMMENT: #' [1/0] {19} + ¦--COMMENT: #' y [1/0] {20} + ¦--COMMENT: #' [1/0] {21} + ¦--COMMENT: #' # [1/0] {22} + ¦--COMMENT: #' [1/0] {23} + ¦--COMMENT: #' a [1/0] {24} + ¦--COMMENT: #' } [1/0] {25} + ¦--COMMENT: #' @i [1/0] {26} + ¦--COMMENT: #' @e [1/0] {27} + °--expr: creat [1/0] {28} + ¦--expr: creat [0/1] {30} + ¦ °--SYMBOL: creat [0/0] {29} + ¦--LEFT_ASSIGN: <- [0/1] {31} + °--expr: funct [0/0] {32} + ¦--FUNCTION: funct [0/0] {33} + ¦--'(': ( [0/0] {34} + ¦--SYMBOL_FORMALS: initi [0/1] {35} + ¦--EQ_FORMALS: = [0/1] {36} + ¦--expr: defau [0/0] {38} + ¦ °--SYMBOL: defau [0/0] {37} + ¦--',': , [0/31] {39} + ¦--SYMBOL_FORMALS: line_ [1/1] {40} + ¦--EQ_FORMALS: = [0/1] {41} + ¦--expr: NULL [0/0] {43} + ¦ °--NULL_CONST: NULL [0/0] {42} + ¦--',': , [0/31] {44} + ¦--SYMBOL_FORMALS: space [1/1] {45} + ¦--EQ_FORMALS: = [0/1] {46} + ¦--expr: NULL [0/0] {48} + ¦ °--NULL_CONST: NULL [0/0] {47} + ¦--',': , [0/31] {49} + ¦--SYMBOL_FORMALS: token [1/1] {50} + ¦--EQ_FORMALS: = [0/1] {51} + ¦--expr: NULL [0/0] {53} + ¦ °--NULL_CONST: NULL [0/0] {52} + ¦--',': , [0/31] {54} + ¦--SYMBOL_FORMALS: inden [1/1] {55} + ¦--EQ_FORMALS: = [0/1] {56} + ¦--expr: NULL [0/0] {58} + ¦ °--NULL_CONST: NULL [0/0] {57} + ¦--',': , [0/31] {59} + ¦--SYMBOL_FORMALS: use_r [1/1] {60} + ¦--EQ_FORMALS: = [0/1] {61} + ¦--expr: FALSE [0/0] {63} + ¦ °--NUM_CONST: FALSE [0/0] {62} + ¦--',': , [0/31] {64} + ¦--SYMBOL_FORMALS: reind [1/1] {65} + ¦--EQ_FORMALS: = [0/1] {66} + ¦--expr: tidyv [0/0] {67} + ¦ ¦--expr: tidyv [0/0] {69} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: tidyv [0/0] {68} + ¦ ¦--'(': ( [0/0] {70} + ¦ °--')': ) [0/0] {71} + ¦--')': ) [0/1] {72} + °--expr: { + l [0/0] {73} + ¦--'{': { [0/2] {74} + ¦--expr: list( [1/0] {75} + ¦ ¦--expr: list( [0/1] {76} + ¦ ¦ ¦--expr: list [0/0] {78} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: list [0/0] {77} + ¦ ¦ ¦--'(': ( [0/4] {79} + ¦ ¦ ¦--COMMENT: # tra [1/4] {80} + ¦ ¦ ¦--SYMBOL_SUB: initi [1/1] {81} + ¦ ¦ ¦--EQ_SUB: = [0/1] {82} + ¦ ¦ ¦--expr: list( [0/0] {83} + ¦ ¦ ¦ ¦--expr: list [0/0] {85} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: list [0/0] {84} + ¦ ¦ ¦ ¦--'(': ( [0/0] {86} + ¦ ¦ ¦ ¦--expr: initi [0/0] {88} + ¦ ¦ ¦ ¦ °--SYMBOL: initi [0/0] {87} + ¦ ¦ ¦ °--')': ) [0/0] {89} + ¦ ¦ ¦--',': , [0/4] {90} + ¦ ¦ ¦--expr: line_ [1/0] {92} + ¦ ¦ ¦ °--SYMBOL: line_ [0/0] {91} + ¦ ¦ ¦--',': , [0/4] {93} + ¦ ¦ ¦--expr: space [1/0] {95} + ¦ ¦ ¦ °--SYMBOL: space [0/0] {94} + ¦ ¦ ¦--',': , [0/4] {96} + ¦ ¦ ¦--expr: token [1/0] {98} + ¦ ¦ ¦ °--SYMBOL: token [0/0] {97} + ¦ ¦ ¦--',': , [0/4] {99} + ¦ ¦ ¦--expr: inden [1/0] {101} + ¦ ¦ ¦ °--SYMBOL: inden [0/0] {100} + ¦ ¦ ¦--',': , [0/4] {102} + ¦ ¦ ¦--COMMENT: # tra [1/4] {103} + ¦ ¦ ¦--expr: use_r [1/0] {105} + ¦ ¦ ¦ °--SYMBOL: use_r [0/0] {104} + ¦ ¦ ¦--',': , [0/4] {106} + ¦ ¦ ¦--expr: reind [1/2] {108} + ¦ ¦ ¦ °--SYMBOL: reind [0/0] {107} + ¦ ¦ °--')': ) [1/0] {109} + ¦ ¦--SPECIAL-PIPE: %>% [0/4] {110} + ¦ °--expr: map(c [1/0] {111} + ¦ ¦--expr: map [0/0] {113} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: map [0/0] {112} + ¦ ¦--'(': ( [0/0] {114} + ¦ ¦--expr: compa [0/0] {116} + ¦ ¦ °--SYMBOL: compa [0/0] {115} + ¦ °--')': ) [0/0] {117} + °--'}': } [1/0] {118} diff --git a/tests/testthat/roxygen-examples-complete/13-empty-lines-out.R b/tests/testthat/roxygen-examples-complete/13-empty-lines-out.R new file mode 100644 index 000000000..ae4f1b9db --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/13-empty-lines-out.R @@ -0,0 +1,47 @@ +#' Create a style guide +#' +#' @param reindention A list of parameters for regex re-indention, most +#' conveniently constructed using [specify_reindention()]. +#' @examples +#' # empty +#' +#' +#' # two +#' +#' +#' +#' +#' # more +#' a <- 3 +#' # a comment +#' \dontrun{ +#' x +#' +#' y # hi +#' +#' # more +#' +#' a <- 3 +#' } +#' @importFrom purrr compact +#' @export +create_style_guide <- function(initialize = default_style_guide_attributes, + line_break = NULL, + space = NULL, + token = NULL, + indention = NULL, + use_raw_indention = FALSE, + reindention = tidyverse_reindention()) { + list( + # transformer functions + initialize = list(initialize), + line_break, + space, + token, + indention, + # transformer options + use_raw_indention, + reindention + ) %>% + map(compact) +} diff --git a/tests/testthat/roxygen-examples-complete/14-pipe-dontrun-in.R b/tests/testthat/roxygen-examples-complete/14-pipe-dontrun-in.R new file mode 100644 index 000000000..ffbb6e1ec --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/14-pipe-dontrun-in.R @@ -0,0 +1,17 @@ +#' Hi +#' +#' x +#' @examples +#' a %>% +#' x(b =3) %>% +#' ff() +#' \dontrun{ +#' style_pkg( +#' scope = "line_breaks", +#' math_token_spacing = specfy_math_token_spacing(zero = "'+'") +#' )%>% +#' there() +#' } +#' call("\\n", x = 3) +#' @export +NULL diff --git a/tests/testthat/roxygen-examples-complete/14-pipe-dontrun-in_tree b/tests/testthat/roxygen-examples-complete/14-pipe-dontrun-in_tree new file mode 100644 index 000000000..6d03f57f6 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/14-pipe-dontrun-in_tree @@ -0,0 +1,19 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Hi [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' x [1/0] {3} + ¦--COMMENT: #' @e [1/0] {4} + ¦--COMMENT: #' a [1/0] {5} + ¦--COMMENT: #' [1/0] {6} + ¦--COMMENT: #' [1/0] {7} + ¦--COMMENT: #' \d [1/0] {8} + ¦--COMMENT: #' st [1/0] {9} + ¦--COMMENT: #' [1/0] {10} + ¦--COMMENT: #' [1/0] {11} + ¦--COMMENT: #' )% [1/0] {12} + ¦--COMMENT: #' [1/0] {13} + ¦--COMMENT: #' } [1/0] {14} + ¦--COMMENT: #' ca [1/0] {15} + ¦--COMMENT: #' @e [1/0] {16} + °--expr: NULL [1/0] {18} + °--NULL_CONST: NULL [0/0] {17} diff --git a/tests/testthat/roxygen-examples-complete/14-pipe-dontrun-out.R b/tests/testthat/roxygen-examples-complete/14-pipe-dontrun-out.R new file mode 100644 index 000000000..6915ef964 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/14-pipe-dontrun-out.R @@ -0,0 +1,17 @@ +#' Hi +#' +#' x +#' @examples +#' a %>% +#' x(b = 3) %>% +#' ff() +#' \dontrun{ +#' style_pkg( +#' scope = "line_breaks", +#' math_token_spacing = specfy_math_token_spacing(zero = "'+'") +#' ) %>% +#' there() +#' } +#' call("\\n", x = 3) +#' @export +NULL diff --git a/tests/testthat/roxygen-examples-complete/15-roxygen-dontrun-indention-in.R b/tests/testthat/roxygen-examples-complete/15-roxygen-dontrun-indention-in.R new file mode 100644 index 000000000..d9540f770 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/15-roxygen-dontrun-indention-in.R @@ -0,0 +1,21 @@ +#' Bla +#' +#' @examples +#' \dontrun{xfun::write_utf8("1++1",file)} +#' style_file( +#' file, +#' style = tidyverse_style,strict =TRUE +#' ) +#' \dontrun{style_file( +#' file, +#' style = tidyverse_style,strict =TRUE +#' ) +#' } +#' if (TRUE) +#' return(X) +#' +#' if (TRUE ) +#' return(X ) +#'if (TRUE ) +#'return(X ) +x <- y diff --git a/tests/testthat/roxygen-examples-complete/15-roxygen-dontrun-indention-in_tree b/tests/testthat/roxygen-examples-complete/15-roxygen-dontrun-indention-in_tree new file mode 100644 index 000000000..974a1e333 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/15-roxygen-dontrun-indention-in_tree @@ -0,0 +1,27 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Bl [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' @e [1/0] {3} + ¦--COMMENT: #' \d [1/0] {4} + ¦--COMMENT: #' st [1/0] {5} + ¦--COMMENT: #' [1/0] {6} + ¦--COMMENT: #' [1/0] {7} + ¦--COMMENT: #' [1/0] {8} + ¦--COMMENT: #' \d [1/0] {9} + ¦--COMMENT: #' [1/0] {10} + ¦--COMMENT: #' [1/0] {11} + ¦--COMMENT: #' [1/0] {12} + ¦--COMMENT: #' } [1/0] {13} + ¦--COMMENT: #' if [1/0] {14} + ¦--COMMENT: #' [1/0] {15} + ¦--COMMENT: #' [1/0] {16} + ¦--COMMENT: #' if [1/0] {17} + ¦--COMMENT: #' re [1/0] {18} + ¦--COMMENT: #'if [1/0] {19} + ¦--COMMENT: #'ret [1/0] {20} + °--expr: x <- [1/0] {21} + ¦--expr: x [0/1] {23} + ¦ °--SYMBOL: x [0/0] {22} + ¦--LEFT_ASSIGN: <- [0/1] {24} + °--expr: y [0/0] {26} + °--SYMBOL: y [0/0] {25} diff --git a/tests/testthat/roxygen-examples-complete/15-roxygen-dontrun-indention-out.R b/tests/testthat/roxygen-examples-complete/15-roxygen-dontrun-indention-out.R new file mode 100644 index 000000000..ae29dec1f --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/15-roxygen-dontrun-indention-out.R @@ -0,0 +1,24 @@ +#' Bla +#' +#' @examples +#' \dontrun{ +#' xfun::write_utf8("1++1", file) +#' } +#' style_file( +#' file, +#' style = tidyverse_style, strict = TRUE +#' ) +#' \dontrun{ +#' style_file( +#' file, +#' style = tidyverse_style, strict = TRUE +#' ) +#' } +#' if (TRUE) +#' return(X) +#' +#' if (TRUE) +#' return(X) +#' if (TRUE) +#' return(X) +x <- y diff --git a/tests/testthat/roxygen-examples-complete/16-dont-warn-empty-in.R b/tests/testthat/roxygen-examples-complete/16-dont-warn-empty-in.R new file mode 100644 index 000000000..64118f440 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/16-dont-warn-empty-in.R @@ -0,0 +1,11 @@ +#' Do stuff +#' +#' Some things we do +#' @examples +#' g() +#' \dontrun{ +#' f(x) +#' } +#' +#' @export +g() diff --git a/tests/testthat/roxygen-examples-complete/16-dont-warn-empty-in_tree b/tests/testthat/roxygen-examples-complete/16-dont-warn-empty-in_tree new file mode 100644 index 000000000..22b238b62 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/16-dont-warn-empty-in_tree @@ -0,0 +1,16 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Do [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' So [1/0] {3} + ¦--COMMENT: #' @e [1/0] {4} + ¦--COMMENT: #' g( [1/0] {5} + ¦--COMMENT: #' \d [1/0] {6} + ¦--COMMENT: #' f( [1/0] {7} + ¦--COMMENT: #' } [1/0] {8} + ¦--COMMENT: #' [1/0] {9} + ¦--COMMENT: #' @e [1/0] {10} + °--expr: g() [1/0] {11} + ¦--expr: g [0/0] {13} + ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {12} + ¦--'(': ( [0/0] {14} + °--')': ) [0/0] {15} diff --git a/tests/testthat/roxygen-examples-complete/16-dont-warn-empty-out.R b/tests/testthat/roxygen-examples-complete/16-dont-warn-empty-out.R new file mode 100644 index 000000000..64118f440 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/16-dont-warn-empty-out.R @@ -0,0 +1,11 @@ +#' Do stuff +#' +#' Some things we do +#' @examples +#' g() +#' \dontrun{ +#' f(x) +#' } +#' +#' @export +g() diff --git a/tests/testthat/roxygen-examples-complete/17-two-no-non-comment-in.R b/tests/testthat/roxygen-examples-complete/17-two-no-non-comment-in.R new file mode 100644 index 000000000..333618a5c --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/17-two-no-non-comment-in.R @@ -0,0 +1,15 @@ +#' @examples +#' my_fun <- function() { +#' print("hello world!") +#' } +#' # before this comment is a left-over space +#' another_function <- function() NULL + + + +#' @examples +#' my_fun <- function() { +#' print("hello world!") +#' } +#' # before this comment is a left-over space +#' another_function <- function() NULL diff --git a/tests/testthat/roxygen-examples-complete/17-two-no-non-comment-in_tree b/tests/testthat/roxygen-examples-complete/17-two-no-non-comment-in_tree new file mode 100644 index 000000000..34ab1e4de --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/17-two-no-non-comment-in_tree @@ -0,0 +1,13 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' @e [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' [1/0] {3} + ¦--COMMENT: #' } [1/0] {4} + ¦--COMMENT: #' # [1/0] {5} + ¦--COMMENT: #' an [1/0] {6} + ¦--COMMENT: #' @e [4/0] {7} + ¦--COMMENT: #' [1/0] {8} + ¦--COMMENT: #' [1/0] {9} + ¦--COMMENT: #' } [1/0] {10} + ¦--COMMENT: #' # [1/0] {11} + °--COMMENT: #' an [1/0] {12} diff --git a/tests/testthat/roxygen-examples-complete/17-two-no-non-comment-out.R b/tests/testthat/roxygen-examples-complete/17-two-no-non-comment-out.R new file mode 100644 index 000000000..d68e7623d --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/17-two-no-non-comment-out.R @@ -0,0 +1,13 @@ +#' @examples +#' my_fun <- function() { +#' print("hello world!") +#' } +#' # before this comment is a left-over space +#' another_function <- function() NULL + +#' @examples +#' my_fun <- function() { +#' print("hello world!") +#' } +#' # before this comment is a left-over space +#' another_function <- function() NULL diff --git a/tests/testthat/roxygen-examples-complete/18-no-non-comment-in.R b/tests/testthat/roxygen-examples-complete/18-no-non-comment-in.R new file mode 100644 index 000000000..ee22aef1e --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/18-no-non-comment-in.R @@ -0,0 +1,6 @@ +#' @examples +#' my_fun <- function() { +#' print("hello world!") +#' } +#' # before this comment is a left-over space +#' another_function <- function() NULL diff --git a/tests/testthat/roxygen-examples-complete/18-no-non-comment-in_tree b/tests/testthat/roxygen-examples-complete/18-no-non-comment-in_tree new file mode 100644 index 000000000..86f0c3de2 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/18-no-non-comment-in_tree @@ -0,0 +1,7 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' @e [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' [1/0] {3} + ¦--COMMENT: #' } [1/0] {4} + ¦--COMMENT: #' # [1/0] {5} + °--COMMENT: #' an [1/0] {6} diff --git a/tests/testthat/roxygen-examples-complete/18-no-non-comment-out.R b/tests/testthat/roxygen-examples-complete/18-no-non-comment-out.R new file mode 100644 index 000000000..433d122ab --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/18-no-non-comment-out.R @@ -0,0 +1,6 @@ +#' @examples +#' my_fun <- function() { +#' print("hello world!") +#' } +#' # before this comment is a left-over space +#' another_function <- function() NULL diff --git a/tests/testthat/roxygen-examples-complete/19-escaped-slash-in.R b/tests/testthat/roxygen-examples-complete/19-escaped-slash-in.R new file mode 100644 index 000000000..ec3d8075a --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/19-escaped-slash-in.R @@ -0,0 +1,25 @@ +#' Data frame and Tables Pretty Formatting +#' +#' @examples +#' c("\nA \\nyellow \\\nline", "yellow") +#' c("\ A \\ nyellow \\\ nline", "yellow") +#' \dontrun{ +#' c("\nA \\nyellow \\\nline", "yellow") +#' c("\ A \\ nyellow \\\ nline", "yellow") +#' } +#' @export +NULL + + +one <- "\t" +two = "\\t" +three = "\\\t" +four = "\\\\t" +five = "\\\\\t" +six = "\\\\\\t" + +two = "\\" +four = "\\\\" +five = "\\\\." +six = "\\\\\\w" +six = "\\\\\\" diff --git a/tests/testthat/roxygen-examples-complete/19-escaped-slash-in_tree b/tests/testthat/roxygen-examples-complete/19-escaped-slash-in_tree new file mode 100644 index 000000000..fcf4e4a39 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/19-escaped-slash-in_tree @@ -0,0 +1,79 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Da [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' @e [1/0] {3} + ¦--COMMENT: #' [1/0] {4} + ¦--COMMENT: #' [1/0] {5} + ¦--COMMENT: #' \d [1/0] {6} + ¦--COMMENT: #' [1/0] {7} + ¦--COMMENT: #' [1/0] {8} + ¦--COMMENT: #' } [1/0] {9} + ¦--COMMENT: #' @e [1/0] {10} + ¦--expr: NULL [1/0] {12} + ¦ °--NULL_CONST: NULL [0/0] {11} + ¦--expr: one [3/0] {13} + ¦ ¦--expr: one [0/2] {15} + ¦ ¦ °--SYMBOL: one [0/0] {14} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {16} + ¦ °--expr: "\t" [0/0] {18} + ¦ °--STR_CONST: "\t" [0/0] {17} + ¦--expr_or_assign_or_help: two [1/0] {19} + ¦ ¦--expr: two [0/3] {21} + ¦ ¦ °--SYMBOL: two [0/0] {20} + ¦ ¦--EQ_ASSIGN: = [0/1] {22} + ¦ °--expr: "\\t" [0/0] {24} + ¦ °--STR_CONST: "\\t" [0/0] {23} + ¦--expr_or_assign_or_help: three [1/0] {25} + ¦ ¦--expr: three [0/1] {27} + ¦ ¦ °--SYMBOL: three [0/0] {26} + ¦ ¦--EQ_ASSIGN: = [0/1] {28} + ¦ °--expr: "\\\t [0/0] {30} + ¦ °--STR_CONST: "\\\t [0/0] {29} + ¦--expr_or_assign_or_help: four [1/0] {31} + ¦ ¦--expr: four [0/2] {33} + ¦ ¦ °--SYMBOL: four [0/0] {32} + ¦ ¦--EQ_ASSIGN: = [0/1] {34} + ¦ °--expr: "\\\\ [0/0] {36} + ¦ °--STR_CONST: "\\\\ [0/0] {35} + ¦--expr_or_assign_or_help: five [1/0] {37} + ¦ ¦--expr: five [0/2] {39} + ¦ ¦ °--SYMBOL: five [0/0] {38} + ¦ ¦--EQ_ASSIGN: = [0/1] {40} + ¦ °--expr: "\\\\ [0/0] {42} + ¦ °--STR_CONST: "\\\\ [0/0] {41} + ¦--expr_or_assign_or_help: six [1/0] {43} + ¦ ¦--expr: six [0/3] {45} + ¦ ¦ °--SYMBOL: six [0/0] {44} + ¦ ¦--EQ_ASSIGN: = [0/1] {46} + ¦ °--expr: "\\\\ [0/0] {48} + ¦ °--STR_CONST: "\\\\ [0/0] {47} + ¦--expr_or_assign_or_help: two [2/0] {49} + ¦ ¦--expr: two [0/3] {51} + ¦ ¦ °--SYMBOL: two [0/0] {50} + ¦ ¦--EQ_ASSIGN: = [0/1] {52} + ¦ °--expr: "\\" [0/0] {54} + ¦ °--STR_CONST: "\\" [0/0] {53} + ¦--expr_or_assign_or_help: four [1/0] {55} + ¦ ¦--expr: four [0/2] {57} + ¦ ¦ °--SYMBOL: four [0/0] {56} + ¦ ¦--EQ_ASSIGN: = [0/1] {58} + ¦ °--expr: "\\\\ [0/0] {60} + ¦ °--STR_CONST: "\\\\ [0/0] {59} + ¦--expr_or_assign_or_help: five [1/0] {61} + ¦ ¦--expr: five [0/2] {63} + ¦ ¦ °--SYMBOL: five [0/0] {62} + ¦ ¦--EQ_ASSIGN: = [0/1] {64} + ¦ °--expr: "\\\\ [0/0] {66} + ¦ °--STR_CONST: "\\\\ [0/0] {65} + ¦--expr_or_assign_or_help: six [1/0] {67} + ¦ ¦--expr: six [0/3] {69} + ¦ ¦ °--SYMBOL: six [0/0] {68} + ¦ ¦--EQ_ASSIGN: = [0/1] {70} + ¦ °--expr: "\\\\ [0/0] {72} + ¦ °--STR_CONST: "\\\\ [0/0] {71} + °--expr_or_assign_or_help: six [1/0] {73} + ¦--expr: six [0/3] {75} + ¦ °--SYMBOL: six [0/0] {74} + ¦--EQ_ASSIGN: = [0/1] {76} + °--expr: "\\\\ [0/0] {78} + °--STR_CONST: "\\\\ [0/0] {77} diff --git a/tests/testthat/roxygen-examples-complete/19-escaped-slash-out.R b/tests/testthat/roxygen-examples-complete/19-escaped-slash-out.R new file mode 100644 index 000000000..a7502d319 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/19-escaped-slash-out.R @@ -0,0 +1,25 @@ +#' Data frame and Tables Pretty Formatting +#' +#' @examples +#' c("\nA \\nyellow \\\nline", "yellow") +#' c("\ A \\ nyellow \\\ nline", "yellow") +#' \dontrun{ +#' c("\nA \\nyellow \\\nline", "yellow") +#' c("\ A \\ nyellow \\\ nline", "yellow") +#' } +#' @export +NULL + + +one <- "\t" +two <- "\\t" +three <- "\\\t" +four <- "\\\\t" +five <- "\\\\\t" +six <- "\\\\\\t" + +two <- "\\" +four <- "\\\\" +five <- "\\\\." +six <- "\\\\\\w" +six <- "\\\\\\" diff --git a/tests/testthat/roxygen-examples-complete/20-exampleIf-simple-in.R b/tests/testthat/roxygen-examples-complete/20-exampleIf-simple-in.R new file mode 100644 index 000000000..1541e5ed3 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/20-exampleIf-simple-in.R @@ -0,0 +1,9 @@ +#' The tidyverse style +#' +#' Style code according to the tidyverse style guide. +#' @family style_guides +#' @examplesIf TRUE +#' c( ) +#' @importFrom purrr partial +#' @export +a <- call; diff --git a/tests/testthat/roxygen-examples-complete/20-exampleIf-simple-in_tree b/tests/testthat/roxygen-examples-complete/20-exampleIf-simple-in_tree new file mode 100644 index 000000000..cb8c82e20 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/20-exampleIf-simple-in_tree @@ -0,0 +1,16 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Th [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' St [1/0] {3} + ¦--COMMENT: #' @f [1/0] {4} + ¦--COMMENT: #' @e [1/0] {5} + ¦--COMMENT: #' c( [1/0] {6} + ¦--COMMENT: #' @i [1/0] {7} + ¦--COMMENT: #' @e [1/0] {8} + ¦--expr: a [1/0] {9} + ¦ ¦--expr: a [0/5] {11} + ¦ ¦ °--SYMBOL: a [0/0] {10} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {12} + ¦ °--expr: call [0/0] {14} + ¦ °--SYMBOL: call [0/0] {13} + °--';': ; [0/0] {15} diff --git a/tests/testthat/roxygen-examples-complete/20-exampleIf-simple-out.R b/tests/testthat/roxygen-examples-complete/20-exampleIf-simple-out.R new file mode 100644 index 000000000..34427f9a0 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/20-exampleIf-simple-out.R @@ -0,0 +1,9 @@ +#' The tidyverse style +#' +#' Style code according to the tidyverse style guide. +#' @family style_guides +#' @examplesIf TRUE +#' c() +#' @importFrom purrr partial +#' @export +a <- call diff --git a/tests/testthat/roxygen-examples-complete/21-exampleIf-multiple-in.R b/tests/testthat/roxygen-examples-complete/21-exampleIf-multiple-in.R new file mode 100644 index 000000000..5af339a2b --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/21-exampleIf-multiple-in.R @@ -0,0 +1,11 @@ +#' The tidyverse style +#' +#' Style code according to the tidyverse style guide. +#' @family style_guides +#' @examplesIf TRUE +#' c( ) +#' @examplesIf TRUE +#' x=2 +#' @importFrom purrr partial +#' @export +a <- call; diff --git a/tests/testthat/roxygen-examples-complete/21-exampleIf-multiple-in_tree b/tests/testthat/roxygen-examples-complete/21-exampleIf-multiple-in_tree new file mode 100644 index 000000000..9138e38d9 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/21-exampleIf-multiple-in_tree @@ -0,0 +1,18 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Th [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' St [1/0] {3} + ¦--COMMENT: #' @f [1/0] {4} + ¦--COMMENT: #' @e [1/0] {5} + ¦--COMMENT: #' c( [1/0] {6} + ¦--COMMENT: #' @e [1/0] {7} + ¦--COMMENT: #' x= [1/0] {8} + ¦--COMMENT: #' @i [1/0] {9} + ¦--COMMENT: #' @e [1/0] {10} + ¦--expr: a [1/0] {11} + ¦ ¦--expr: a [0/5] {13} + ¦ ¦ °--SYMBOL: a [0/0] {12} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {14} + ¦ °--expr: call [0/0] {16} + ¦ °--SYMBOL: call [0/0] {15} + °--';': ; [0/0] {17} diff --git a/tests/testthat/roxygen-examples-complete/21-exampleIf-multiple-out.R b/tests/testthat/roxygen-examples-complete/21-exampleIf-multiple-out.R new file mode 100644 index 000000000..b06074393 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/21-exampleIf-multiple-out.R @@ -0,0 +1,11 @@ +#' The tidyverse style +#' +#' Style code according to the tidyverse style guide. +#' @family style_guides +#' @examplesIf TRUE +#' c() +#' @examplesIf TRUE +#' x <- 2 +#' @importFrom purrr partial +#' @export +a <- call diff --git a/tests/testthat/roxygen-examples-complete/22-exampleIf-example-mixed-in.R b/tests/testthat/roxygen-examples-complete/22-exampleIf-example-mixed-in.R new file mode 100644 index 000000000..c0748dc7e --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/22-exampleIf-example-mixed-in.R @@ -0,0 +1,23 @@ +#' The bli blauuu2 +#' +#' Style code according to the bli blauuu2 guide. +#' @family some +#' @examplesIf TRUE +#' c( ) +#' @examples +#' x=2 +#' @importFrom purrr partial +#' @export +x <- 3 + +#' Some more docs +#' +#' Style code according to the bli blauuu2 guide. +#' @family not +#' @examples +#' x=2 +#' @examplesIf TRUE +#' c( ) +#' @export +function() + NULL diff --git a/tests/testthat/roxygen-examples-complete/22-exampleIf-example-mixed-in_tree b/tests/testthat/roxygen-examples-complete/22-exampleIf-example-mixed-in_tree new file mode 100644 index 000000000..d283302bd --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/22-exampleIf-example-mixed-in_tree @@ -0,0 +1,32 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Th [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' St [1/0] {3} + ¦--COMMENT: #' @f [1/0] {4} + ¦--COMMENT: #' @e [1/0] {5} + ¦--COMMENT: #' c( [1/0] {6} + ¦--COMMENT: #' @e [1/0] {7} + ¦--COMMENT: #' x= [1/0] {8} + ¦--COMMENT: #' @i [1/0] {9} + ¦--COMMENT: #' @e [1/0] {10} + ¦--expr: x <- [1/0] {11} + ¦ ¦--expr: x [0/1] {13} + ¦ ¦ °--SYMBOL: x [0/0] {12} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {14} + ¦ °--expr: 3 [0/0] {16} + ¦ °--NUM_CONST: 3 [0/0] {15} + ¦--COMMENT: #' So [2/0] {17} + ¦--COMMENT: #' [1/0] {18} + ¦--COMMENT: #' St [1/0] {19} + ¦--COMMENT: #' @f [1/0] {20} + ¦--COMMENT: #' @e [1/0] {21} + ¦--COMMENT: #' x= [1/0] {22} + ¦--COMMENT: #' @e [1/0] {23} + ¦--COMMENT: #' c( [1/0] {24} + ¦--COMMENT: #' @e [1/0] {25} + °--expr: funct [1/0] {26} + ¦--FUNCTION: funct [0/0] {27} + ¦--'(': ( [0/0] {28} + ¦--')': ) [0/2] {29} + °--expr: NULL [1/0] {31} + °--NULL_CONST: NULL [0/0] {30} diff --git a/tests/testthat/roxygen-examples-complete/22-exampleIf-example-mixed-out.R b/tests/testthat/roxygen-examples-complete/22-exampleIf-example-mixed-out.R new file mode 100644 index 000000000..887d501fa --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/22-exampleIf-example-mixed-out.R @@ -0,0 +1,24 @@ +#' The bli blauuu2 +#' +#' Style code according to the bli blauuu2 guide. +#' @family some +#' @examplesIf TRUE +#' c() +#' @examples +#' x <- 2 +#' @importFrom purrr partial +#' @export +x <- 3 + +#' Some more docs +#' +#' Style code according to the bli blauuu2 guide. +#' @family not +#' @examples +#' x <- 2 +#' @examplesIf TRUE +#' c() +#' @export +function() { + NULL +} diff --git a/tests/testthat/roxygen-examples-complete/23-exampleIf-multiline-cond-in.R b/tests/testthat/roxygen-examples-complete/23-exampleIf-multiline-cond-in.R new file mode 100644 index 000000000..8d96ab40a --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/23-exampleIf-multiline-cond-in.R @@ -0,0 +1,21 @@ +#' dfalkj fdj +#' +#' fjd;kzj lorem impesum +#' @family xj2lkj +#' @examplesIf { +#' 'this-is-a-cond' +#' } +#' c( ) +#' @importFrom purrr partial +#' @export +x <- 3 + + +#' dfalkj fdj +#' +#' fjd;kzj lorem impesum +#' @examplesIf { +#' 'this-is-a-cond' %>% c() +#' } +#' c( ) +x <- 3 diff --git a/tests/testthat/roxygen-examples-complete/23-exampleIf-multiline-cond-in_tree b/tests/testthat/roxygen-examples-complete/23-exampleIf-multiline-cond-in_tree new file mode 100644 index 000000000..27ed71c9d --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/23-exampleIf-multiline-cond-in_tree @@ -0,0 +1,30 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' df [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' fj [1/0] {3} + ¦--COMMENT: #' @f [1/0] {4} + ¦--COMMENT: #' @e [1/0] {5} + ¦--COMMENT: #' 't [1/0] {6} + ¦--COMMENT: #' } [1/0] {7} + ¦--COMMENT: #' c( [1/0] {8} + ¦--COMMENT: #' @i [1/0] {9} + ¦--COMMENT: #' @e [1/0] {10} + ¦--expr: x <- [1/0] {11} + ¦ ¦--expr: x [0/1] {13} + ¦ ¦ °--SYMBOL: x [0/0] {12} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {14} + ¦ °--expr: 3 [0/0] {16} + ¦ °--NUM_CONST: 3 [0/0] {15} + ¦--COMMENT: #' df [3/0] {17} + ¦--COMMENT: #' [1/0] {18} + ¦--COMMENT: #' fj [1/0] {19} + ¦--COMMENT: #' @e [1/0] {20} + ¦--COMMENT: #' 't [1/0] {21} + ¦--COMMENT: #' } [1/0] {22} + ¦--COMMENT: #' c( [1/0] {23} + °--expr: x <- [1/0] {24} + ¦--expr: x [0/1] {26} + ¦ °--SYMBOL: x [0/0] {25} + ¦--LEFT_ASSIGN: <- [0/1] {27} + °--expr: 3 [0/0] {29} + °--NUM_CONST: 3 [0/0] {28} diff --git a/tests/testthat/roxygen-examples-complete/23-exampleIf-multiline-cond-out.R b/tests/testthat/roxygen-examples-complete/23-exampleIf-multiline-cond-out.R new file mode 100644 index 000000000..1c08dcd61 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/23-exampleIf-multiline-cond-out.R @@ -0,0 +1,21 @@ +#' dfalkj fdj +#' +#' fjd;kzj lorem impesum +#' @family xj2lkj +#' @examplesIf { +#' "this-is-a-cond" +#' } +#' c() +#' @importFrom purrr partial +#' @export +x <- 3 + + +#' dfalkj fdj +#' +#' fjd;kzj lorem impesum +#' @examplesIf { +#' "this-is-a-cond" %>% c() +#' } +#' c() +x <- 3 diff --git a/tests/testthat/roxygen-examples-complete/24-exampleIf-spacing-in.R b/tests/testthat/roxygen-examples-complete/24-exampleIf-spacing-in.R new file mode 100644 index 000000000..cbf0435b9 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/24-exampleIf-spacing-in.R @@ -0,0 +1,28 @@ +#' The bli blauuu2 +#' +#' Style code according to the bli blauuu2 guide. +#' @family some +#' @examplesIf TRUE # tab +#' c( ) +#' @examplesIf TRUE +#' c( ) +#' @examplesIf TRUE +#' c( ) +#' @importFrom purrr partial +#' @export +x <- 3 + + +#' Now with needs_rd_emulation +#' +#' Style code according to the bli blauuu2 guide. +#' @family some +#' @examplesIf TRUE # tab +#' a %>%b +#' @examplesIf TRUE +#' a %>%d +#' @examplesIf TRUE +#' a %>%c +#' @importFrom purrr partial +#' @export +x <- 33 diff --git a/tests/testthat/roxygen-examples-complete/24-exampleIf-spacing-in_tree b/tests/testthat/roxygen-examples-complete/24-exampleIf-spacing-in_tree new file mode 100644 index 000000000..3e2391645 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/24-exampleIf-spacing-in_tree @@ -0,0 +1,37 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Th [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' St [1/0] {3} + ¦--COMMENT: #' @f [1/0] {4} + ¦--COMMENT: #' @e [1/0] {5} + ¦--COMMENT: #' c( [1/0] {6} + ¦--COMMENT: #' @e [1/0] {7} + ¦--COMMENT: #' c( [1/0] {8} + ¦--COMMENT: #' [1/0] {9} + ¦--COMMENT: #' c( [1/0] {10} + ¦--COMMENT: #' @i [1/0] {11} + ¦--COMMENT: #' @e [1/0] {12} + ¦--expr: x <- [1/0] {13} + ¦ ¦--expr: x [0/1] {15} + ¦ ¦ °--SYMBOL: x [0/0] {14} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {16} + ¦ °--expr: 3 [0/0] {18} + ¦ °--NUM_CONST: 3 [0/0] {17} + ¦--COMMENT: #' No [3/0] {19} + ¦--COMMENT: #' [1/0] {20} + ¦--COMMENT: #' St [1/0] {21} + ¦--COMMENT: #' @f [1/0] {22} + ¦--COMMENT: #' @e [1/0] {23} + ¦--COMMENT: #' a [1/0] {24} + ¦--COMMENT: #' @e [1/0] {25} + ¦--COMMENT: #' a [1/0] {26} + ¦--COMMENT: #' [1/0] {27} + ¦--COMMENT: #' a [1/0] {28} + ¦--COMMENT: #' @i [1/0] {29} + ¦--COMMENT: #' @e [1/0] {30} + °--expr: x <- [1/0] {31} + ¦--expr: x [0/1] {33} + ¦ °--SYMBOL: x [0/0] {32} + ¦--LEFT_ASSIGN: <- [0/1] {34} + °--expr: 33 [0/0] {36} + °--NUM_CONST: 33 [0/0] {35} diff --git a/tests/testthat/roxygen-examples-complete/24-exampleIf-spacing-out.R b/tests/testthat/roxygen-examples-complete/24-exampleIf-spacing-out.R new file mode 100644 index 000000000..50ea30701 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/24-exampleIf-spacing-out.R @@ -0,0 +1,28 @@ +#' The bli blauuu2 +#' +#' Style code according to the bli blauuu2 guide. +#' @family some +#' @examplesIf TRUE # tab +#' c() +#' @examplesIf TRUE +#' c() +#' @examplesIf TRUE +#' c() +#' @importFrom purrr partial +#' @export +x <- 3 + + +#' Now with needs_rd_emulation +#' +#' Style code according to the bli blauuu2 guide. +#' @family some +#' @examplesIf TRUE # tab +#' a %>% b() +#' @examplesIf TRUE +#' a %>% d() +#' @examplesIf TRUE +#' a %>% c() +#' @importFrom purrr partial +#' @export +x <- 33 diff --git a/tests/testthat/roxygen-examples-complete/25-ordinary-comment-in-example-in.R b/tests/testthat/roxygen-examples-complete/25-ordinary-comment-in-example-in.R new file mode 100644 index 000000000..5e58b51b4 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/25-ordinary-comment-in-example-in.R @@ -0,0 +1,133 @@ +#' Example +# Random comment +#' Roxygen +#' @examples +#' 1 + 1 +NULL + + +#' Example +# Random comment +#' Roxygen +#' @examplesIf +#' 1 + 1 +NULL + +#' Example +# Random comment +#' Roxygen +#' @examples +#' 1 + 1 +# comment +# more +NULL + +#' Example +# Random comment +#' Roxygen +#' @examples +# There +#' 1 + 1 +# comment +# more +NULL + +#' Example +#' Random comment +#' Roxygen +#' @examples +# There +#' 1 + 1 +# comment +# more +NULL + +#' Example +# Random comment +#' Roxygen +#' @examples +# There +#' \dontrun{ +#' 1 + 1 +#' } +# comment +# more +NULL + +#' Example +# Random comment +#' Roxygen +#' @examples +# There +#' \dontrun{ +#' 1 + 1 +#' } # comment +# more +NULL + +#' Example +# Random comment +#' Roxygen +#' @examples +# 'There +#' \dontrun{ +#' 1 + 1 +#' } +# comment +# more +NULL + +#' Example +# Random comment +#' Roxygen +#' @examples +# There +#' \dontrun{ +# comment +#' 1 + 1 +#' } +# more +NULL + +#' Example +# Random comment +#' Roxygen +#' @examples +# There +#' \dontrun{ +#' call( +# comment +#' 1 + 1 +#' ) +#' } +# more +NULL + +# nolint start +#' @examplesIf TRUE +# nolint end +#' df %>% func() +func <- function() NULL + + +#' Hi +# Comment +#' @examples +#' 1 + 1 +# this +# this +#this +# thi3 +#' c() +NULL + +#' Hi +# Comment +#' @examples +#' 1 + 1 +# this +# this +#this +# thi3 +#' c() +NULL diff --git a/tests/testthat/roxygen-examples-complete/25-ordinary-comment-in-example-in_tree b/tests/testthat/roxygen-examples-complete/25-ordinary-comment-in-example-in_tree new file mode 100644 index 000000000..edac0463e --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/25-ordinary-comment-in-example-in_tree @@ -0,0 +1,141 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Ex [0/0] {1} + ¦--COMMENT: # Ran [1/0] {2} + ¦--COMMENT: #' Ro [1/0] {3} + ¦--COMMENT: #' @e [1/0] {4} + ¦--COMMENT: #' 1 [1/0] {5} + ¦--expr: NULL [1/0] {7} + ¦ °--NULL_CONST: NULL [0/0] {6} + ¦--COMMENT: #' Ex [3/0] {8} + ¦--COMMENT: # Ran [1/0] {9} + ¦--COMMENT: #' Ro [1/0] {10} + ¦--COMMENT: #' @e [1/0] {11} + ¦--COMMENT: #' 1 [1/0] {12} + ¦--expr: NULL [1/0] {14} + ¦ °--NULL_CONST: NULL [0/0] {13} + ¦--COMMENT: #' Ex [2/0] {15} + ¦--COMMENT: # Ran [1/0] {16} + ¦--COMMENT: #' Ro [1/0] {17} + ¦--COMMENT: #' @e [1/0] {18} + ¦--COMMENT: #' 1 [1/0] {19} + ¦--COMMENT: # com [1/0] {20} + ¦--COMMENT: # mor [1/0] {21} + ¦--expr: NULL [1/0] {23} + ¦ °--NULL_CONST: NULL [0/0] {22} + ¦--COMMENT: #' Ex [2/0] {24} + ¦--COMMENT: # Ran [1/0] {25} + ¦--COMMENT: #' Ro [1/0] {26} + ¦--COMMENT: #' @e [1/0] {27} + ¦--COMMENT: # The [1/0] {28} + ¦--COMMENT: #' 1 [1/0] {29} + ¦--COMMENT: # com [1/0] {30} + ¦--COMMENT: # mor [1/0] {31} + ¦--expr: NULL [1/0] {33} + ¦ °--NULL_CONST: NULL [0/0] {32} + ¦--COMMENT: #' Ex [2/0] {34} + ¦--COMMENT: #' Ra [1/0] {35} + ¦--COMMENT: #' Ro [1/0] {36} + ¦--COMMENT: #' @e [1/0] {37} + ¦--COMMENT: # The [1/0] {38} + ¦--COMMENT: #' 1 [1/0] {39} + ¦--COMMENT: # com [1/0] {40} + ¦--COMMENT: # mor [1/0] {41} + ¦--expr: NULL [1/0] {43} + ¦ °--NULL_CONST: NULL [0/0] {42} + ¦--COMMENT: #' Ex [2/0] {44} + ¦--COMMENT: # Ran [1/0] {45} + ¦--COMMENT: #' Ro [1/0] {46} + ¦--COMMENT: #' @e [1/0] {47} + ¦--COMMENT: # The [1/0] {48} + ¦--COMMENT: #' \d [1/0] {49} + ¦--COMMENT: #' 1 [1/0] {50} + ¦--COMMENT: #' } [1/0] {51} + ¦--COMMENT: # com [1/0] {52} + ¦--COMMENT: # mor [1/0] {53} + ¦--expr: NULL [1/0] {55} + ¦ °--NULL_CONST: NULL [0/0] {54} + ¦--COMMENT: #' Ex [2/0] {56} + ¦--COMMENT: # Ran [1/0] {57} + ¦--COMMENT: #' Ro [1/0] {58} + ¦--COMMENT: #' @e [1/0] {59} + ¦--COMMENT: # The [1/0] {60} + ¦--COMMENT: #' \d [1/0] {61} + ¦--COMMENT: #' 1 [1/0] {62} + ¦--COMMENT: #' } [1/0] {63} + ¦--COMMENT: # mor [1/0] {64} + ¦--expr: NULL [1/0] {66} + ¦ °--NULL_CONST: NULL [0/0] {65} + ¦--COMMENT: #' Ex [2/0] {67} + ¦--COMMENT: # Ran [1/0] {68} + ¦--COMMENT: #' Ro [1/0] {69} + ¦--COMMENT: #' @e [1/0] {70} + ¦--COMMENT: # 'Th [1/0] {71} + ¦--COMMENT: #' \d [1/0] {72} + ¦--COMMENT: #' 1 [1/0] {73} + ¦--COMMENT: #' } [1/0] {74} + ¦--COMMENT: # com [1/0] {75} + ¦--COMMENT: # mor [1/0] {76} + ¦--expr: NULL [1/0] {78} + ¦ °--NULL_CONST: NULL [0/0] {77} + ¦--COMMENT: #' Ex [2/0] {79} + ¦--COMMENT: # Ran [1/0] {80} + ¦--COMMENT: #' Ro [1/0] {81} + ¦--COMMENT: #' @e [1/0] {82} + ¦--COMMENT: # The [1/0] {83} + ¦--COMMENT: #' \d [1/0] {84} + ¦--COMMENT: # com [1/0] {85} + ¦--COMMENT: #' 1 [1/0] {86} + ¦--COMMENT: #' } [1/0] {87} + ¦--COMMENT: # mor [1/0] {88} + ¦--expr: NULL [1/0] {90} + ¦ °--NULL_CONST: NULL [0/0] {89} + ¦--COMMENT: #' Ex [2/0] {91} + ¦--COMMENT: # Ran [1/0] {92} + ¦--COMMENT: #' Ro [1/0] {93} + ¦--COMMENT: #' @e [1/0] {94} + ¦--COMMENT: # The [1/0] {95} + ¦--COMMENT: #' \d [1/0] {96} + ¦--COMMENT: #' ca [1/0] {97} + ¦--COMMENT: # com [1/0] {98} + ¦--COMMENT: #' 1 [1/0] {99} + ¦--COMMENT: #' ) [1/0] {100} + ¦--COMMENT: #' } [1/0] {101} + ¦--COMMENT: # mor [1/0] {102} + ¦--expr: NULL [1/0] {104} + ¦ °--NULL_CONST: NULL [0/0] {103} + ¦--COMMENT: # nol [2/0] {105} + ¦--COMMENT: #' @e [1/0] {106} + ¦--COMMENT: # nol [1/0] {107} + ¦--COMMENT: #' df [1/0] {108} + ¦--expr: func [1/0] {109} + ¦ ¦--expr: func [0/1] {111} + ¦ ¦ °--SYMBOL: func [0/0] {110} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {112} + ¦ °--expr: funct [0/0] {113} + ¦ ¦--FUNCTION: funct [0/0] {114} + ¦ ¦--'(': ( [0/0] {115} + ¦ ¦--')': ) [0/1] {116} + ¦ °--expr: NULL [0/0] {118} + ¦ °--NULL_CONST: NULL [0/0] {117} + ¦--COMMENT: #' Hi [3/0] {119} + ¦--COMMENT: # Com [1/0] {120} + ¦--COMMENT: #' @e [1/0] {121} + ¦--COMMENT: #' 1 [1/0] {122} + ¦--COMMENT: # thi [1/0] {123} + ¦--COMMENT: # thi [1/0] {124} + ¦--COMMENT: #this [1/0] {125} + ¦--COMMENT: # thi [1/0] {126} + ¦--COMMENT: #' c( [1/0] {127} + ¦--expr: NULL [1/0] {129} + ¦ °--NULL_CONST: NULL [0/0] {128} + ¦--COMMENT: #' Hi [2/0] {130} + ¦--COMMENT: # Com [1/0] {131} + ¦--COMMENT: #' @e [1/0] {132} + ¦--COMMENT: #' 1 [1/0] {133} + ¦--COMMENT: # thi [1/0] {134} + ¦--COMMENT: # thi [1/0] {135} + ¦--COMMENT: #this [1/0] {136} + ¦--COMMENT: # thi [1/0] {137} + ¦--COMMENT: #' c( [1/0] {138} + °--expr: NULL [1/0] {140} + °--NULL_CONST: NULL [0/0] {139} diff --git a/tests/testthat/roxygen-examples-complete/25-ordinary-comment-in-example-out.R b/tests/testthat/roxygen-examples-complete/25-ordinary-comment-in-example-out.R new file mode 100644 index 000000000..b3fa7b05a --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/25-ordinary-comment-in-example-out.R @@ -0,0 +1,134 @@ +#' Example +# Random comment +#' Roxygen +#' @examples +#' 1 + 1 +NULL + + +#' Example +# Random comment +#' Roxygen +#' @examplesIf +#' 1 + 1 +NULL + +#' Example +# Random comment +#' Roxygen +#' @examples +#' 1 + 1 +# comment +# more +NULL + +#' Example +# Random comment +#' Roxygen +#' @examples +# There +#' 1 + 1 +# comment +# more +NULL + +#' Example +#' Random comment +#' Roxygen +#' @examples +# There +#' 1 + 1 +# comment +# more +NULL + +#' Example +# Random comment +#' Roxygen +#' @examples +# There +#' \dontrun{ +#' 1 + 1 +#' } +# comment +# more +NULL + +#' Example +# Random comment +#' Roxygen +#' @examples +# There +#' \dontrun{ +#' 1 + 1 +#' # comment +#' } +# more +NULL + +#' Example +# Random comment +#' Roxygen +#' @examples +# 'There +#' \dontrun{ +#' 1 + 1 +#' } +# comment +# more +NULL + +#' Example +# Random comment +#' Roxygen +#' @examples +# There +#' \dontrun{ +# comment +#' 1 + 1 +#' } +# more +NULL + +#' Example +# Random comment +#' Roxygen +#' @examples +# There +#' \dontrun{ +#' call( +#' # comment +#' 1 + 1 +#' ) +#' } +# more +NULL + +# nolint start +#' @examplesIf TRUE +# nolint end +#' df %>% func() +func <- function() NULL + + +#' Hi +# Comment +#' @examples +#' 1 + 1 +# this +# this +# this +# thi3 +#' c() +NULL + +#' Hi +# Comment +#' @examples +#' 1 + 1 +# this +# this +# this +# thi3 +#' c() +NULL diff --git a/tests/testthat/roxygen-examples-complete/26-empty-trailing-lines-in.R b/tests/testthat/roxygen-examples-complete/26-empty-trailing-lines-in.R new file mode 100644 index 000000000..5808ced9c --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/26-empty-trailing-lines-in.R @@ -0,0 +1,40 @@ +#' this +#' +#' mey +#' @examples +#' 2 + 1 +#' +NULL + + +#' this +#' +#' mey +#' @examples +#' 2 + 1 +#' +#' +#' +#' +#' +#' +#' +NULL + + +#' this +#' +#' mey +#' @examples +#' 2 + 1 +NULL + + + +#' this +#' +#' empty line after example +#' @examples +#' 2 + 1 + +NULL diff --git a/tests/testthat/roxygen-examples-complete/26-empty-trailing-lines-in_tree b/tests/testthat/roxygen-examples-complete/26-empty-trailing-lines-in_tree new file mode 100644 index 000000000..8f9a397dc --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/26-empty-trailing-lines-in_tree @@ -0,0 +1,37 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' th [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' me [1/0] {3} + ¦--COMMENT: #' @e [1/0] {4} + ¦--COMMENT: #' 2 [1/0] {5} + ¦--COMMENT: #' [1/0] {6} + ¦--expr: NULL [1/0] {8} + ¦ °--NULL_CONST: NULL [0/0] {7} + ¦--COMMENT: #' th [3/0] {9} + ¦--COMMENT: #' [1/0] {10} + ¦--COMMENT: #' me [1/0] {11} + ¦--COMMENT: #' @e [1/0] {12} + ¦--COMMENT: #' 2 [1/0] {13} + ¦--COMMENT: #' [1/0] {14} + ¦--COMMENT: #' [1/0] {15} + ¦--COMMENT: #' [1/0] {16} + ¦--COMMENT: #' [1/0] {17} + ¦--COMMENT: #' [1/0] {18} + ¦--COMMENT: #' [1/0] {19} + ¦--COMMENT: #' [1/0] {20} + ¦--expr: NULL [1/0] {22} + ¦ °--NULL_CONST: NULL [0/0] {21} + ¦--COMMENT: #' th [3/0] {23} + ¦--COMMENT: #' [1/0] {24} + ¦--COMMENT: #' me [1/0] {25} + ¦--COMMENT: #' @e [1/0] {26} + ¦--COMMENT: #' 2 [1/0] {27} + ¦--expr: NULL [1/0] {29} + ¦ °--NULL_CONST: NULL [0/0] {28} + ¦--COMMENT: #' th [4/0] {30} + ¦--COMMENT: #' [1/0] {31} + ¦--COMMENT: #' em [1/0] {32} + ¦--COMMENT: #' @e [1/0] {33} + ¦--COMMENT: #' 2 [1/0] {34} + °--expr: NULL [2/0] {36} + °--NULL_CONST: NULL [0/0] {35} diff --git a/tests/testthat/roxygen-examples-complete/26-empty-trailing-lines-out.R b/tests/testthat/roxygen-examples-complete/26-empty-trailing-lines-out.R new file mode 100644 index 000000000..f85ff9e76 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/26-empty-trailing-lines-out.R @@ -0,0 +1,33 @@ +#' this +#' +#' mey +#' @examples +#' 2 + 1 +#' +NULL + + +#' this +#' +#' mey +#' @examples +#' 2 + 1 +#' +NULL + + +#' this +#' +#' mey +#' @examples +#' 2 + 1 +NULL + + + +#' this +#' +#' empty line after example +#' @examples +#' 2 + 1 +NULL diff --git a/tests/testthat/roxygen-examples-complete/27-no-code-block-after-example-in.R b/tests/testthat/roxygen-examples-complete/27-no-code-block-after-example-in.R new file mode 100644 index 000000000..449e08261 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/27-no-code-block-after-example-in.R @@ -0,0 +1,13 @@ +#' This +#' +#' +#' is stuff +#' +#' @examples +#' 1+1 + + +# nolint start +#' @examplesIf long_condition_line +#' 32 / 3 +# nolint end diff --git a/tests/testthat/roxygen-examples-complete/27-no-code-block-after-example-in_tree b/tests/testthat/roxygen-examples-complete/27-no-code-block-after-example-in_tree new file mode 100644 index 000000000..3eac5d60e --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/27-no-code-block-after-example-in_tree @@ -0,0 +1,12 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Th [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' [1/0] {3} + ¦--COMMENT: #' is [1/0] {4} + ¦--COMMENT: #' [1/0] {5} + ¦--COMMENT: #' @e [1/0] {6} + ¦--COMMENT: #' 1+ [1/0] {7} + ¦--COMMENT: # nol [3/0] {8} + ¦--COMMENT: #' @e [1/0] {9} + ¦--COMMENT: #' 32 [1/0] {10} + °--COMMENT: # nol [1/0] {11} diff --git a/tests/testthat/roxygen-examples-complete/27-no-code-block-after-example-out.R b/tests/testthat/roxygen-examples-complete/27-no-code-block-after-example-out.R new file mode 100644 index 000000000..409108981 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/27-no-code-block-after-example-out.R @@ -0,0 +1,11 @@ +#' This +#' +#' +#' is stuff +#' +#' @examples +#' 1 + 1 +# nolint start +#' @examplesIf long_condition_line +#' 32 / 3 +# nolint end diff --git a/tests/testthat/roxygen-examples-complete/28-end-not-blank-in.R b/tests/testthat/roxygen-examples-complete/28-end-not-blank-in.R new file mode 100644 index 000000000..75f99fa88 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/28-end-not-blank-in.R @@ -0,0 +1,3 @@ +#' @export +#' @examples +#' x=1 diff --git a/tests/testthat/roxygen-examples-complete/28-end-not-blank-in_tree b/tests/testthat/roxygen-examples-complete/28-end-not-blank-in_tree new file mode 100644 index 000000000..9cd091e60 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/28-end-not-blank-in_tree @@ -0,0 +1,4 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' @e [0/0] {1} + ¦--COMMENT: #' @e [1/0] {2} + °--COMMENT: #' x= [1/0] {3} diff --git a/tests/testthat/roxygen-examples-complete/28-end-not-blank-out.R b/tests/testthat/roxygen-examples-complete/28-end-not-blank-out.R new file mode 100644 index 000000000..beb09105b --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/28-end-not-blank-out.R @@ -0,0 +1,3 @@ +#' @export +#' @examples +#' x <- 1 diff --git a/tests/testthat/roxygen-examples-complete/29-multiple-empty-lines-in-example-in.R b/tests/testthat/roxygen-examples-complete/29-multiple-empty-lines-in-example-in.R new file mode 100644 index 000000000..578ca3363 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/29-multiple-empty-lines-in-example-in.R @@ -0,0 +1,11 @@ +#' Empty line in examples +#' +#' @examples +1 + +#' Empty line in examples +#' +#' @examples +#' \dontrun{ +#' } +2 diff --git a/tests/testthat/roxygen-examples-complete/29-multiple-empty-lines-in-example-in_tree b/tests/testthat/roxygen-examples-complete/29-multiple-empty-lines-in-example-in_tree new file mode 100644 index 000000000..37a30f03f --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/29-multiple-empty-lines-in-example-in_tree @@ -0,0 +1,13 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #' Em [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' @e [1/0] {3} + ¦--expr: 1 [1/0] {5} + ¦ °--NUM_CONST: 1 [0/0] {4} + ¦--COMMENT: #' Em [2/0] {6} + ¦--COMMENT: #' [1/0] {7} + ¦--COMMENT: #' @e [1/0] {8} + ¦--COMMENT: #' \d [1/0] {9} + ¦--COMMENT: #' } [1/0] {10} + °--expr: 2 [1/0] {12} + °--NUM_CONST: 2 [0/0] {11} diff --git a/tests/testthat/roxygen-examples-complete/29-multiple-empty-lines-in-example-out.R b/tests/testthat/roxygen-examples-complete/29-multiple-empty-lines-in-example-out.R new file mode 100644 index 000000000..1061151d6 --- /dev/null +++ b/tests/testthat/roxygen-examples-complete/29-multiple-empty-lines-in-example-out.R @@ -0,0 +1,12 @@ +#' Empty line in examples +#' +#' @examples +1 + +#' Empty line in examples +#' +#' @examples +#' \dontrun{ +#' +#' } +2 diff --git a/tests/testthat/roxygen-examples-identify/1-one-function-example-last-proper-run.R b/tests/testthat/roxygen-examples-identify/1-one-function-example-last-proper-run.R new file mode 100644 index 000000000..c5119969a --- /dev/null +++ b/tests/testthat/roxygen-examples-identify/1-one-function-example-last-proper-run.R @@ -0,0 +1,7 @@ +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package +#' (code and tests). +#' Carefully examine the results after running this function! +#'@examples style_pkg(style = tidyverse_style, strict = TRUE) +a <- 2 diff --git a/tests/testthat/roxygen-examples-identify/2-one-function-examples-last-proper-run.R b/tests/testthat/roxygen-examples-identify/2-one-function-examples-last-proper-run.R new file mode 100644 index 000000000..5d3d4873e --- /dev/null +++ b/tests/testthat/roxygen-examples-identify/2-one-function-examples-last-proper-run.R @@ -0,0 +1,12 @@ +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package +#' (code and tests). +#' Carefully examine the results after running this function! +#' @examples +#' style_pkg(style = tidyverse_style, strict = TRUE) +#' style_pkg( +#' scope = "line_breaks", +#' math_token_spacing = specify_math_token_spacing(zero = "'+'") +#' ) +a <- call diff --git a/tests/testthat/roxygen-examples-identify/20-exampleIf-simple-in.R b/tests/testthat/roxygen-examples-identify/20-exampleIf-simple-in.R new file mode 100644 index 000000000..1541e5ed3 --- /dev/null +++ b/tests/testthat/roxygen-examples-identify/20-exampleIf-simple-in.R @@ -0,0 +1,9 @@ +#' The tidyverse style +#' +#' Style code according to the tidyverse style guide. +#' @family style_guides +#' @examplesIf TRUE +#' c( ) +#' @importFrom purrr partial +#' @export +a <- call; diff --git a/tests/testthat/roxygen-examples-identify/21-exampleIf-multiple-in.R b/tests/testthat/roxygen-examples-identify/21-exampleIf-multiple-in.R new file mode 100644 index 000000000..5af339a2b --- /dev/null +++ b/tests/testthat/roxygen-examples-identify/21-exampleIf-multiple-in.R @@ -0,0 +1,11 @@ +#' The tidyverse style +#' +#' Style code according to the tidyverse style guide. +#' @family style_guides +#' @examplesIf TRUE +#' c( ) +#' @examplesIf TRUE +#' x=2 +#' @importFrom purrr partial +#' @export +a <- call; diff --git a/tests/testthat/roxygen-examples-identify/3-one-function-example-not-last-proper-run.R b/tests/testthat/roxygen-examples-identify/3-one-function-example-not-last-proper-run.R new file mode 100644 index 000000000..5c4814dbf --- /dev/null +++ b/tests/testthat/roxygen-examples-identify/3-one-function-example-not-last-proper-run.R @@ -0,0 +1,7 @@ +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package... +#' Carefully examine the results after running this function! +#' @examples style_pkg(style = tidyverse_style, strict = TRUE) +#' @name k +a <- 2 diff --git a/tests/testthat/roxygen-examples-identify/4-one-function-examples-not-last-proper-run.R b/tests/testthat/roxygen-examples-identify/4-one-function-examples-not-last-proper-run.R new file mode 100644 index 000000000..81a774367 --- /dev/null +++ b/tests/testthat/roxygen-examples-identify/4-one-function-examples-not-last-proper-run.R @@ -0,0 +1,12 @@ +#' The tidyverse style +#' +#' Style code according to the tidyverse style guide. +#' @family style_guides +#' @examples +#' style_text("call( 1)", style = tidyverse_style, scope = "spaces") +#' style_text("call( 1)", transformers = tidyverse_style(strict = TRUE)) +#' style_text(c("ab <- 3", "a <-3"), strict = FALSE) # keeps alignment of "<-" +#' style_text(c("ab <- 3", "a <-3"), strict = TRUE) # drops alignment of "<-" +#' @importFrom purrr partial +#' @export +a <- call diff --git a/tests/testthat/roxygen-examples-identify/5-multiple-function-examples-last-proper-run.R b/tests/testthat/roxygen-examples-identify/5-multiple-function-examples-last-proper-run.R new file mode 100644 index 000000000..c35de4e6e --- /dev/null +++ b/tests/testthat/roxygen-examples-identify/5-multiple-function-examples-last-proper-run.R @@ -0,0 +1,18 @@ +#' The tidyverse style +#' +#' Style code according to the tidyverse style guide. +#' @family style_guides +#' @examples +#' style_text("call( 1)", style = tidyverse_style, scope = "spaces") +#' style_text("call( 1)", transformers = tidyverse_style(strict = TRUE)) +#' style_text(c("ab <- 3", "a <-3"), strict = FALSE) # keeps alignment of "<-" +#' style_text(c("ab <- 3", "a <-3"), strict = TRUE) # drops alignment of "<-" +a <- call + +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package +#' (code and tests). +#' Carefully examine the results after running this function! +#' @examples style_pkg(style = tidyverse_style, strict = TRUE) +a <- 2 diff --git a/tests/testthat/roxygen-examples-identify/6-multiple-function-examples-not-last-proper-run.R b/tests/testthat/roxygen-examples-identify/6-multiple-function-examples-not-last-proper-run.R new file mode 100644 index 000000000..1ecfe7bcf --- /dev/null +++ b/tests/testthat/roxygen-examples-identify/6-multiple-function-examples-not-last-proper-run.R @@ -0,0 +1,20 @@ +#' Prettify R source code +#' +#' Performs various substitutions in all `.R` files in a package... +#' Carefully examine the results after running this function! +#' @examples style_pkg(style = tidyverse_style, strict = TRUE) +#' @name k +a <- 2 + +#' The tidyverse style +#' +#' Style code according to the tidyverse style guide. +#' @family style_guides +#' @examples +#' style_text("call( 1)", style = tidyverse_style, scope = "spaces") +#' style_text("call( 1)", transformers = tidyverse_style(strict = TRUE)) +#' style_text(c("ab <- 3", "a <-3"), strict = FALSE) # keeps alignment of "<-" +#' style_text(c("ab <- 3", "a <-3"), strict = TRUE) # drops alignment of "<-" +#' @importFrom purrr partial +#' @export +a <- call diff --git a/tests/testthat/scope_argument/scope_indention-in.R b/tests/testthat/scope-AsIs/scope_indention-in.R similarity index 100% rename from tests/testthat/scope_argument/scope_indention-in.R rename to tests/testthat/scope-AsIs/scope_indention-in.R diff --git a/tests/testthat/scope-AsIs/scope_indention-in_tree b/tests/testthat/scope-AsIs/scope_indention-in_tree new file mode 100644 index 000000000..10fda6ca9 --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_indention-in_tree @@ -0,0 +1,97 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # not [0/0] {1} + ¦--expr: if (x [1/0] {2} + ¦ ¦--IF: if [0/1] {3} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: x [0/0] {6} + ¦ ¦ °--SYMBOL: x [0/0] {5} + ¦ ¦--')': ) [0/1] {7} + ¦ ¦--expr: {1+1+ [0/1] {8} + ¦ ¦ ¦--'{': { [0/0] {9} + ¦ ¦ ¦--expr: 1+1++ [0/0] {10} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {13} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {12} + ¦ ¦ ¦ ¦--'+': + [0/0] {14} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {16} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {15} + ¦ ¦ ¦ ¦--'+': + [0/0] {17} + ¦ ¦ ¦ °--expr: +1 [0/0] {18} + ¦ ¦ ¦ ¦--'+': + [0/0] {19} + ¦ ¦ ¦ °--expr: 1 [0/0] {21} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {20} + ¦ ¦ °--'}': } [0/0] {22} + ¦ ¦--ELSE: else [0/0] {23} + ¦ °--expr: {3} [0/0] {24} + ¦ ¦--'{': { [0/0] {25} + ¦ ¦--expr: 3 [0/0] {27} + ¦ ¦ °--NUM_CONST: 3 [0/0] {26} + ¦ °--'}': } [0/0] {28} + ¦--COMMENT: # not [2/0] {29} + ¦--COMMENT: # FIX [1/0] {30} + ¦--expr: test_ [1/0] {31} + ¦ ¦--expr: test_ [0/0] {33} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {32} + ¦ ¦--'(': ( [0/0] {34} + ¦ ¦--expr: "x" [0/0] {36} + ¦ ¦ °--STR_CONST: "x" [0/0] {35} + ¦ ¦--',': , [0/2] {37} + ¦ ¦--expr: { + [1/0] {38} + ¦ ¦ ¦--'{': { [0/12] {39} + ¦ ¦ ¦--expr: my_te [1/0] {40} + ¦ ¦ ¦ ¦--expr: my_te [0/0] {42} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: my_te [0/0] {41} + ¦ ¦ ¦ ¦--'(': ( [0/0] {43} + ¦ ¦ ¦ ¦--expr: call [0/0] {45} + ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {44} + ¦ ¦ ¦ °--')': ) [0/0] {46} + ¦ ¦ °--'}': } [1/0] {47} + ¦ °--')': ) [0/0] {48} + ¦--COMMENT: # do [2/0] {49} + ¦--expr_or_assign_or_help: a = 3 [1/0] {50} + ¦ ¦--expr: a [0/1] {52} + ¦ ¦ °--SYMBOL: a [0/0] {51} + ¦ ¦--EQ_ASSIGN: = [0/1] {53} + ¦ °--expr: 3 [0/0] {55} + ¦ °--NUM_CONST: 3 [0/0] {54} + ¦--expr: data_ [1/0] {56} + ¦ ¦--expr: data_ [0/0] {58} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {57} + ¦ ¦--'(': ( [0/0] {59} + ¦ ¦--SYMBOL_SUB: a [0/1] {60} + ¦ ¦--EQ_SUB: = [0/1] {61} + ¦ ¦--expr: 3 [0/0] {63} + ¦ ¦ °--NUM_CONST: 3 [0/0] {62} + ¦ °--')': ) [0/0] {64} + ¦--COMMENT: # do [2/0] {65} + ¦--expr: a <- [1/0] {66} + ¦ ¦--expr: a [0/1] {68} + ¦ ¦ °--SYMBOL: a [0/0] {67} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {69} + ¦ °--expr: funct [0/0] {70} + ¦ ¦--FUNCTION: funct [0/0] {71} + ¦ ¦--'(': ( [0/0] {72} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {73} + ¦ ¦--')': ) [0/1] {74} + ¦ °--expr: x + 1 [0/0] {75} + ¦ ¦--expr: x [0/1] {77} + ¦ ¦ °--SYMBOL: x [0/0] {76} + ¦ ¦--'+': + [0/1] {78} + ¦ °--expr: 1 [0/0] {80} + ¦ °--NUM_CONST: 1 [0/0] {79} + ¦--';': ; [0/0] {81} + ¦--expr: b [0/0] {83} + ¦ °--SYMBOL: b [0/0] {82} + ¦--';': ; [0/0] {84} + ¦--expr: c [0/0] {86} + ¦ °--SYMBOL: c [0/0] {85} + ¦--COMMENT: # don [2/0] {87} + °--expr: a %>% [1/0] {88} + ¦--expr: a [0/1] {91} + ¦ °--SYMBOL: a [0/0] {90} + ¦--SPECIAL-PIPE: %>% [0/2] {92} + ¦--expr: b [1/1] {94} + ¦ °--SYMBOL: b [0/0] {93} + ¦--SPECIAL-PIPE: %>% [0/2] {95} + °--expr: c [1/0] {97} + °--SYMBOL: c [0/0] {96} diff --git a/tests/testthat/scope-AsIs/scope_indention-out.R b/tests/testthat/scope-AsIs/scope_indention-out.R new file mode 100644 index 000000000..fe39c346a --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_indention-out.R @@ -0,0 +1,21 @@ +# not adding line-break +if (x) {1+1++1} else{3} + +# not removing line-break +# FIXME If linebreaks are not touched: Do not indent token-dependent before '{' +test_that("x", + { + my_test(call) +}) + +# do not replace assignment +a = 3 +data_frame(a = 3) + +# do not resolve semicolon +a <- function(x) x + 1;b;c + +# don't add brackets in pipes +a %>% + b %>% + c diff --git a/tests/testthat/scope-AsIs/scope_indention_tokens-in.R b/tests/testthat/scope-AsIs/scope_indention_tokens-in.R new file mode 100644 index 000000000..6e20f072d --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_indention_tokens-in.R @@ -0,0 +1,21 @@ +# adding line-break +if (x) {1 + 1 + +1} else {3} + +# removing line-break +test_that("x", + { + my_test(call) + }) + + +# do not replace assignment +a =3 +data_frame(a = 3) + +# do not resolve semicolon +a <- function(x) x + 1;b;c + +# don't add brackets in pipes +a %>% + b %>% + c diff --git a/tests/testthat/scope-AsIs/scope_indention_tokens-in_tree b/tests/testthat/scope-AsIs/scope_indention_tokens-in_tree new file mode 100644 index 000000000..c589fac39 --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_indention_tokens-in_tree @@ -0,0 +1,96 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # add [0/0] {1} + ¦--expr: if (x [1/0] {2} + ¦ ¦--IF: if [0/1] {3} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: x [0/0] {6} + ¦ ¦ °--SYMBOL: x [0/0] {5} + ¦ ¦--')': ) [0/1] {7} + ¦ ¦--expr: {1 + [0/1] {8} + ¦ ¦ ¦--'{': { [0/0] {9} + ¦ ¦ ¦--expr: 1 + 1 [0/0] {10} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {13} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {12} + ¦ ¦ ¦ ¦--'+': + [0/1] {14} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {16} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {15} + ¦ ¦ ¦ ¦--'+': + [0/1] {17} + ¦ ¦ ¦ °--expr: +1 [0/0] {18} + ¦ ¦ ¦ ¦--'+': + [0/0] {19} + ¦ ¦ ¦ °--expr: 1 [0/0] {21} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {20} + ¦ ¦ °--'}': } [0/0] {22} + ¦ ¦--ELSE: else [0/1] {23} + ¦ °--expr: {3} [0/0] {24} + ¦ ¦--'{': { [0/0] {25} + ¦ ¦--expr: 3 [0/0] {27} + ¦ ¦ °--NUM_CONST: 3 [0/0] {26} + ¦ °--'}': } [0/0] {28} + ¦--COMMENT: # rem [2/0] {29} + ¦--expr: test_ [1/0] {30} + ¦ ¦--expr: test_ [0/0] {32} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {31} + ¦ ¦--'(': ( [0/0] {33} + ¦ ¦--expr: "x" [0/0] {35} + ¦ ¦ °--STR_CONST: "x" [0/0] {34} + ¦ ¦--',': , [0/10] {36} + ¦ ¦--expr: { + [1/0] {37} + ¦ ¦ ¦--'{': { [0/12] {38} + ¦ ¦ ¦--expr: my_te [1/10] {39} + ¦ ¦ ¦ ¦--expr: my_te [0/0] {41} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: my_te [0/0] {40} + ¦ ¦ ¦ ¦--'(': ( [0/0] {42} + ¦ ¦ ¦ ¦--expr: call [0/0] {44} + ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {43} + ¦ ¦ ¦ °--')': ) [0/0] {45} + ¦ ¦ °--'}': } [1/0] {46} + ¦ °--')': ) [0/0] {47} + ¦--COMMENT: # do [3/0] {48} + ¦--expr_or_assign_or_help: a =3 [1/0] {49} + ¦ ¦--expr: a [0/1] {51} + ¦ ¦ °--SYMBOL: a [0/0] {50} + ¦ ¦--EQ_ASSIGN: = [0/0] {52} + ¦ °--expr: 3 [0/0] {54} + ¦ °--NUM_CONST: 3 [0/0] {53} + ¦--expr: data_ [1/0] {55} + ¦ ¦--expr: data_ [0/0] {57} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {56} + ¦ ¦--'(': ( [0/0] {58} + ¦ ¦--SYMBOL_SUB: a [0/1] {59} + ¦ ¦--EQ_SUB: = [0/1] {60} + ¦ ¦--expr: 3 [0/0] {62} + ¦ ¦ °--NUM_CONST: 3 [0/0] {61} + ¦ °--')': ) [0/0] {63} + ¦--COMMENT: # do [2/0] {64} + ¦--expr: a <- [1/0] {65} + ¦ ¦--expr: a [0/1] {67} + ¦ ¦ °--SYMBOL: a [0/0] {66} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {68} + ¦ °--expr: funct [0/0] {69} + ¦ ¦--FUNCTION: funct [0/0] {70} + ¦ ¦--'(': ( [0/0] {71} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {72} + ¦ ¦--')': ) [0/1] {73} + ¦ °--expr: x + 1 [0/0] {74} + ¦ ¦--expr: x [0/1] {76} + ¦ ¦ °--SYMBOL: x [0/0] {75} + ¦ ¦--'+': + [0/1] {77} + ¦ °--expr: 1 [0/0] {79} + ¦ °--NUM_CONST: 1 [0/0] {78} + ¦--';': ; [0/0] {80} + ¦--expr: b [0/0] {82} + ¦ °--SYMBOL: b [0/0] {81} + ¦--';': ; [0/0] {83} + ¦--expr: c [0/0] {85} + ¦ °--SYMBOL: c [0/0] {84} + ¦--COMMENT: # don [2/0] {86} + °--expr: a %>% [1/0] {87} + ¦--expr: a [0/1] {90} + ¦ °--SYMBOL: a [0/0] {89} + ¦--SPECIAL-PIPE: %>% [0/2] {91} + ¦--expr: b [1/1] {93} + ¦ °--SYMBOL: b [0/0] {92} + ¦--SPECIAL-PIPE: %>% [0/2] {94} + °--expr: c [1/0] {96} + °--SYMBOL: c [0/0] {95} diff --git a/tests/testthat/scope-AsIs/scope_indention_tokens-out.R b/tests/testthat/scope-AsIs/scope_indention_tokens-out.R new file mode 100644 index 000000000..47df4b7fc --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_indention_tokens-out.R @@ -0,0 +1,23 @@ +# adding line-break +if (x) {1 + 1 + +1} else {3} + +# removing line-break +test_that("x", + { + my_test(call) +}) + + +# do not replace assignment +a <-3 +data_frame(a = 3) + +# do not resolve semicolon +a <- function(x) x + 1 +b +c + +# don't add brackets in pipes +a %>% + b() %>% + c() diff --git a/tests/testthat/scope_argument/scope_line_breaks-in.R b/tests/testthat/scope-AsIs/scope_line_breaks-in.R similarity index 100% rename from tests/testthat/scope_argument/scope_line_breaks-in.R rename to tests/testthat/scope-AsIs/scope_line_breaks-in.R diff --git a/tests/testthat/scope-AsIs/scope_line_breaks-in_tree b/tests/testthat/scope-AsIs/scope_line_breaks-in_tree new file mode 100644 index 000000000..2b1d97fb2 --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_line_breaks-in_tree @@ -0,0 +1,96 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # add [0/0] {1} + ¦--expr: if (x [1/0] {2} + ¦ ¦--IF: if [0/1] {3} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: x [0/0] {6} + ¦ ¦ °--SYMBOL: x [0/0] {5} + ¦ ¦--')': ) [0/1] {7} + ¦ ¦--expr: {1 + [0/1] {8} + ¦ ¦ ¦--'{': { [0/0] {9} + ¦ ¦ ¦--expr: 1 + 1 [0/0] {10} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {13} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {12} + ¦ ¦ ¦ ¦--'+': + [0/1] {14} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {16} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {15} + ¦ ¦ ¦ ¦--'+': + [0/1] {17} + ¦ ¦ ¦ °--expr: +1 [0/0] {18} + ¦ ¦ ¦ ¦--'+': + [0/0] {19} + ¦ ¦ ¦ °--expr: 1 [0/0] {21} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {20} + ¦ ¦ °--'}': } [0/0] {22} + ¦ ¦--ELSE: else [0/1] {23} + ¦ °--expr: {3} [0/0] {24} + ¦ ¦--'{': { [0/0] {25} + ¦ ¦--expr: 3 [0/0] {27} + ¦ ¦ °--NUM_CONST: 3 [0/0] {26} + ¦ °--'}': } [0/0] {28} + ¦--COMMENT: # rem [2/0] {29} + ¦--expr: test_ [1/0] {30} + ¦ ¦--expr: test_ [0/0] {32} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {31} + ¦ ¦--'(': ( [0/0] {33} + ¦ ¦--expr: "x" [0/0] {35} + ¦ ¦ °--STR_CONST: "x" [0/0] {34} + ¦ ¦--',': , [0/10] {36} + ¦ ¦--expr: { + [1/0] {37} + ¦ ¦ ¦--'{': { [0/12] {38} + ¦ ¦ ¦--expr: my_te [1/10] {39} + ¦ ¦ ¦ ¦--expr: my_te [0/0] {41} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: my_te [0/0] {40} + ¦ ¦ ¦ ¦--'(': ( [0/0] {42} + ¦ ¦ ¦ ¦--expr: call [0/0] {44} + ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {43} + ¦ ¦ ¦ °--')': ) [0/0] {45} + ¦ ¦ °--'}': } [1/0] {46} + ¦ °--')': ) [0/0] {47} + ¦--COMMENT: # do [3/0] {48} + ¦--expr_or_assign_or_help: a = 3 [1/0] {49} + ¦ ¦--expr: a [0/1] {51} + ¦ ¦ °--SYMBOL: a [0/0] {50} + ¦ ¦--EQ_ASSIGN: = [0/1] {52} + ¦ °--expr: 3 [0/0] {54} + ¦ °--NUM_CONST: 3 [0/0] {53} + ¦--expr: data_ [1/0] {55} + ¦ ¦--expr: data_ [0/0] {57} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {56} + ¦ ¦--'(': ( [0/0] {58} + ¦ ¦--SYMBOL_SUB: a [0/1] {59} + ¦ ¦--EQ_SUB: = [0/1] {60} + ¦ ¦--expr: 3 [0/0] {62} + ¦ ¦ °--NUM_CONST: 3 [0/0] {61} + ¦ °--')': ) [0/0] {63} + ¦--COMMENT: # do [2/0] {64} + ¦--expr: a <- [1/0] {65} + ¦ ¦--expr: a [0/1] {67} + ¦ ¦ °--SYMBOL: a [0/0] {66} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {68} + ¦ °--expr: funct [0/0] {69} + ¦ ¦--FUNCTION: funct [0/0] {70} + ¦ ¦--'(': ( [0/0] {71} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {72} + ¦ ¦--')': ) [0/1] {73} + ¦ °--expr: x + 1 [0/0] {74} + ¦ ¦--expr: x [0/1] {76} + ¦ ¦ °--SYMBOL: x [0/0] {75} + ¦ ¦--'+': + [0/1] {77} + ¦ °--expr: 1 [0/0] {79} + ¦ °--NUM_CONST: 1 [0/0] {78} + ¦--';': ; [0/0] {80} + ¦--expr: b [0/0] {82} + ¦ °--SYMBOL: b [0/0] {81} + ¦--';': ; [0/0] {83} + ¦--expr: c [0/0] {85} + ¦ °--SYMBOL: c [0/0] {84} + ¦--COMMENT: # don [2/0] {86} + °--expr: a %>% [1/0] {87} + ¦--expr: a [0/1] {90} + ¦ °--SYMBOL: a [0/0] {89} + ¦--SPECIAL-PIPE: %>% [0/2] {91} + ¦--expr: b [1/1] {93} + ¦ °--SYMBOL: b [0/0] {92} + ¦--SPECIAL-PIPE: %>% [0/2] {94} + °--expr: c [1/0] {96} + °--SYMBOL: c [0/0] {95} diff --git a/tests/testthat/scope-AsIs/scope_line_breaks-out.R b/tests/testthat/scope-AsIs/scope_line_breaks-out.R new file mode 100644 index 000000000..c5dd80a84 --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_line_breaks-out.R @@ -0,0 +1,24 @@ +# adding line-break +if (x) { +1 + 1 + +1 +} else { +3 +} + +# removing line-break +test_that("x", { + my_test(call) + }) + + +# do not replace assignment +a = 3 +data_frame(a = 3) + +# do not resolve semicolon +a <- function(x) x + 1;b;c + +# don't add brackets in pipes +a %>% + b %>% + c diff --git a/tests/testthat/scope_argument/scope_none-in.R b/tests/testthat/scope-AsIs/scope_none-in.R similarity index 100% rename from tests/testthat/scope_argument/scope_none-in.R rename to tests/testthat/scope-AsIs/scope_none-in.R diff --git a/tests/testthat/scope_argument/scope_none-in_tree b/tests/testthat/scope-AsIs/scope_none-in_tree similarity index 71% rename from tests/testthat/scope_argument/scope_none-in_tree rename to tests/testthat/scope-AsIs/scope_none-in_tree index ceee8e4e9..3c65a9c14 100644 --- a/tests/testthat/scope_argument/scope_none-in_tree +++ b/tests/testthat/scope-AsIs/scope_none-in_tree @@ -3,78 +3,82 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦--COMMENT: #' [1/0] {2} ¦--COMMENT: #' @p [1/0] {3} ¦--COMMENT: #' [1/0] {4} - ¦--expr: [1/0] {5} - ¦ ¦--expr: [0/0] {7} + ¦--expr: a<- f [1/0] {5} + ¦ ¦--expr: a [0/0] {7} ¦ ¦ °--SYMBOL: a [0/0] {6} ¦ ¦--LEFT_ASSIGN: <- [0/1] {8} - ¦ °--expr: [0/0] {9} + ¦ °--expr: funct [0/0] {9} ¦ ¦--FUNCTION: funct [0/0] {10} ¦ ¦--'(': ( [0/0] {11} ¦ ¦--SYMBOL_FORMALS: x [0/0] {12} ¦ ¦--')': ) [0/0] {13} - ¦ °--expr: [0/0] {14} + ¦ °--expr: { + t [0/0] {14} ¦ ¦--'{': { [0/2] {15} - ¦ ¦--expr: [1/2] {16} - ¦ ¦ ¦--expr: [0/0] {18} + ¦ ¦--expr: test_ [1/2] {16} + ¦ ¦ ¦--expr: test_ [0/0] {18} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {17} ¦ ¦ ¦--'(': ( [0/0] {19} - ¦ ¦ ¦--expr: [0/0] {21} + ¦ ¦ ¦--expr: "I wa [0/0] {21} ¦ ¦ ¦ °--STR_CONST: "I wa [0/0] {20} ¦ ¦ ¦--',': , [0/0] {22} - ¦ ¦ ¦--expr: [0/5] {23} + ¦ ¦ ¦--expr: { + [0/5] {23} ¦ ¦ ¦ ¦--'{': { [0/4] {24} - ¦ ¦ ¦ ¦--expr: [1/4] {25} - ¦ ¦ ¦ ¦ ¦--expr: [0/1] {27} + ¦ ¦ ¦ ¦--expr: out < [1/4] {25} + ¦ ¦ ¦ ¦ ¦--expr: out [0/1] {27} ¦ ¦ ¦ ¦ ¦ °--SYMBOL: out [0/0] {26} ¦ ¦ ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {28} - ¦ ¦ ¦ ¦ °--expr: [0/0] {29} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {31} + ¦ ¦ ¦ ¦ °--expr: c(1,c [0/0] {29} + ¦ ¦ ¦ ¦ ¦--expr: c [0/0] {31} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {30} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {32} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {34} + ¦ ¦ ¦ ¦ ¦--expr: 1 [0/0] {34} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {33} ¦ ¦ ¦ ¦ ¦--',': , [0/0] {35} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {36} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {38} + ¦ ¦ ¦ ¦ ¦--expr: c( + [0/0] {36} + ¦ ¦ ¦ ¦ ¦ ¦--expr: c [0/0] {38} ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {37} ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/6] {39} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [1/4] {40} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {42} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 22 +1 [1/4] {40} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: 22 [0/1] {42} ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {41} ¦ ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/0] {43} - ¦ ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {45} + ¦ ¦ ¦ ¦ ¦ ¦ °--expr: 1 [0/0] {45} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {44} ¦ ¦ ¦ ¦ ¦ °--')': ) [1/0] {46} ¦ ¦ ¦ ¦ °--')': ) [0/0] {47} - ¦ ¦ ¦ ¦--expr: [1/2] {48} + ¦ ¦ ¦ ¦--expr: if (x [1/2] {48} ¦ ¦ ¦ ¦ ¦--IF: if [0/1] {49} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {50} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {51} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {53} + ¦ ¦ ¦ ¦ ¦--expr: x > 1 [0/0] {51} + ¦ ¦ ¦ ¦ ¦ ¦--expr: x [0/1] {53} ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {52} ¦ ¦ ¦ ¦ ¦ ¦--GT: > [0/1] {54} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {56} + ¦ ¦ ¦ ¦ ¦ °--expr: 10 [0/0] {56} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 10 [0/0] {55} ¦ ¦ ¦ ¦ ¦--')': ) [0/1] {57} - ¦ ¦ ¦ ¦ °--expr: [0/0] {58} + ¦ ¦ ¦ ¦ °--expr: { + [0/0] {58} ¦ ¦ ¦ ¦ ¦--'{': { [0/6] {59} - ¦ ¦ ¦ ¦ ¦--expr: [1/4] {60} + ¦ ¦ ¦ ¦ ¦--expr: for ( [1/4] {60} ¦ ¦ ¦ ¦ ¦ ¦--FOR: for [0/1] {61} - ¦ ¦ ¦ ¦ ¦ ¦--forcond: [0/1] {62} + ¦ ¦ ¦ ¦ ¦ ¦--forcond: (x in [0/1] {62} ¦ ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {63} ¦ ¦ ¦ ¦ ¦ ¦ ¦--SYMBOL: x [0/1] {64} ¦ ¦ ¦ ¦ ¦ ¦ ¦--IN: in [0/1] {65} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {67} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: 22 [0/0] {67} ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {66} ¦ ¦ ¦ ¦ ¦ ¦ °--')': ) [0/0] {68} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {69} + ¦ ¦ ¦ ¦ ¦ °--expr: { # F [0/0] {69} ¦ ¦ ¦ ¦ ¦ ¦--'{': { [0/1] {70} ¦ ¦ ¦ ¦ ¦ ¦--COMMENT: # FIX [0/8] {71} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [1/6] {72} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {74} + ¦ ¦ ¦ ¦ ¦ ¦--expr: prin( [1/6] {72} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: prin [0/0] {74} ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: prin [0/0] {73} ¦ ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {75} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {77} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: x [0/0] {77} ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {76} ¦ ¦ ¦ ¦ ¦ ¦ °--')': ) [0/0] {78} ¦ ¦ ¦ ¦ ¦ °--'}': } [1/0] {79} @@ -82,71 +86,72 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦ ¦ ¦ °--'}': } [1/0] {81} ¦ ¦ °--')': ) [0/0] {82} ¦ ¦--COMMENT: #we l [1/2] {83} - ¦ ¦--expr: [1/2] {84} - ¦ ¦ ¦--expr: [0/0] {86} + ¦ ¦--expr: c(lis [1/2] {84} + ¦ ¦ ¦--expr: c [0/0] {86} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {85} ¦ ¦ ¦--'(': ( [0/0] {87} - ¦ ¦ ¦--expr: [0/0] {88} - ¦ ¦ ¦ ¦--expr: [0/0] {90} + ¦ ¦ ¦--expr: list( [0/0] {88} + ¦ ¦ ¦ ¦--expr: list [0/0] {90} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: list [0/0] {89} ¦ ¦ ¦ ¦--'(': ( [0/0] {91} - ¦ ¦ ¦ ¦--expr: [0/0] {92} - ¦ ¦ ¦ ¦ ¦--expr: [0/1] {94} + ¦ ¦ ¦ ¦--expr: x + 2 [0/0] {92} + ¦ ¦ ¦ ¦ ¦--expr: x [0/1] {94} ¦ ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {93} ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {95} - ¦ ¦ ¦ ¦ °--expr: [0/0] {97} + ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {97} ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {96} ¦ ¦ ¦ °--')': ) [0/0] {98} ¦ ¦ ¦--',': , [0/4] {99} - ¦ ¦ ¦--expr: [1/1] {100} - ¦ ¦ ¦ ¦--expr: [0/0] {102} + ¦ ¦ ¦--expr: c( [1/1] {100} + ¦ ¦ ¦ ¦--expr: c [0/0] {102} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {101} ¦ ¦ ¦ ¦--'(': ( [0/4] {103} - ¦ ¦ ¦ ¦--expr: [0/3] {104} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {106} + ¦ ¦ ¦ ¦--expr: c( + [0/3] {104} + ¦ ¦ ¦ ¦ ¦--expr: c [0/0] {106} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {105} ¦ ¦ ¦ ¦ ¦--'(': ( [0/6] {107} - ¦ ¦ ¦ ¦ ¦--expr: [1/0] {108} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {110} + ¦ ¦ ¦ ¦ ¦--expr: 26 ^ [1/0] {108} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 26 [0/1] {110} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 26 [0/0] {109} ¦ ¦ ¦ ¦ ¦ ¦--'^': ^ [0/1] {111} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {113} + ¦ ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {113} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {112} ¦ ¦ ¦ ¦ ¦--',': , [0/1] {114} ¦ ¦ ¦ ¦ ¦--COMMENT: # FIX [0/6] {115} - ¦ ¦ ¦ ¦ ¦--expr: [1/0] {117} + ¦ ¦ ¦ ¦ ¦--expr: 8 [1/0] {117} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 8 [0/0] {116} ¦ ¦ ¦ ¦ ¦--',': , [0/6] {118} - ¦ ¦ ¦ ¦ ¦--expr: [1/4] {120} + ¦ ¦ ¦ ¦ ¦--expr: 7 [1/4] {120} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 7 [0/0] {119} ¦ ¦ ¦ ¦ °--')': ) [1/0] {121} ¦ ¦ ¦ °--')': ) [0/0] {122} ¦ ¦ °--')': ) [0/0] {123} - ¦ ¦--expr: [2/0] {124} - ¦ ¦ ¦--expr: [0/0] {126} + ¦ ¦--expr: call( [2/0] {124} + ¦ ¦ ¦--expr: call [0/0] {126} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {125} ¦ ¦ ¦--'(': ( [0/4] {127} - ¦ ¦ ¦--expr: [1/0] {129} + ¦ ¦ ¦--expr: 1 [1/0] {129} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {128} ¦ ¦ ¦--',': , [0/1] {130} - ¦ ¦ ¦--expr: [0/0] {132} + ¦ ¦ ¦--expr: 2 [0/0] {132} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {131} ¦ ¦ ¦--',': , [0/4] {133} - ¦ ¦ ¦--expr: [1/0] {134} - ¦ ¦ ¦ ¦--expr: [0/0] {137} + ¦ ¦ ¦--expr: 23+In [1/0] {134} + ¦ ¦ ¦ ¦--expr: 23 [0/0] {137} ¦ ¦ ¦ ¦ °--NUM_CONST: 23 [0/0] {136} ¦ ¦ ¦ ¦--'+': + [0/0] {138} - ¦ ¦ ¦ ¦--expr: [0/1] {140} + ¦ ¦ ¦ ¦--expr: Inf [0/1] {140} ¦ ¦ ¦ ¦ °--NUM_CONST: Inf [0/0] {139} ¦ ¦ ¦ ¦--'-': - [0/1] {141} - ¦ ¦ ¦ °--expr: [0/0] {143} + ¦ ¦ ¦ °--expr: 99 [0/0] {143} ¦ ¦ ¦ °--NUM_CONST: 99 [0/0] {142} ¦ ¦ ¦--',': , [0/1] {144} - ¦ ¦ ¦--expr: [0/0] {145} - ¦ ¦ ¦ ¦--expr: [0/0] {147} + ¦ ¦ ¦--expr: call( [0/0] {145} + ¦ ¦ ¦ ¦--expr: call [0/0] {147} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {146} ¦ ¦ ¦ ¦--'(': ( [0/6] {148} - ¦ ¦ ¦ ¦--expr: [1/4] {150} + ¦ ¦ ¦ ¦--expr: 16 [1/4] {150} ¦ ¦ ¦ ¦ °--NUM_CONST: 16 [0/0] {149} ¦ ¦ ¦ °--')': ) [1/0] {151} ¦ ¦ °--')': ) [0/0] {152} diff --git a/tests/testthat/scope_argument/scope_none-out.R b/tests/testthat/scope-AsIs/scope_none-out.R similarity index 100% rename from tests/testthat/scope_argument/scope_none-out.R rename to tests/testthat/scope-AsIs/scope_none-out.R diff --git a/tests/testthat/scope_argument/scope_spaces-in.R b/tests/testthat/scope-AsIs/scope_spaces-in.R similarity index 100% rename from tests/testthat/scope_argument/scope_spaces-in.R rename to tests/testthat/scope-AsIs/scope_spaces-in.R diff --git a/tests/testthat/scope-AsIs/scope_spaces-in_tree b/tests/testthat/scope-AsIs/scope_spaces-in_tree new file mode 100644 index 000000000..81781740b --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_spaces-in_tree @@ -0,0 +1,25 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + °--expr: a<-fu [0/0] {1} + ¦--expr: a [0/0] {3} + ¦ °--SYMBOL: a [0/0] {2} + ¦--LEFT_ASSIGN: <- [0/0] {4} + °--expr: funct [0/0] {5} + ¦--FUNCTION: funct [0/0] {6} + ¦--'(': ( [0/0] {7} + ¦--')': ) [0/0] {8} + °--expr: { + [0/0] {9} + ¦--'{': { [0/20] {10} + ¦--expr: 1+1 [1/0] {11} + ¦ ¦--expr: 1 [0/0] {13} + ¦ ¦ °--NUM_CONST: 1 [0/0] {12} + ¦ ¦--'+': + [0/0] {14} + ¦ °--expr: 1 [0/0] {16} + ¦ °--NUM_CONST: 1 [0/0] {15} + ¦--expr_or_assign_or_help: d=3 [1/4] {17} + ¦ ¦--expr: d [0/0] {19} + ¦ ¦ °--SYMBOL: d [0/0] {18} + ¦ ¦--EQ_ASSIGN: = [0/0] {20} + ¦ °--expr: 3 [0/0] {22} + ¦ °--NUM_CONST: 3 [0/0] {21} + °--'}': } [1/0] {23} diff --git a/tests/testthat/scope_argument/scope_spaces-out.R b/tests/testthat/scope-AsIs/scope_spaces-out.R similarity index 100% rename from tests/testthat/scope_argument/scope_spaces-out.R rename to tests/testthat/scope-AsIs/scope_spaces-out.R diff --git a/tests/testthat/scope-AsIs/scope_spaces_indention-in.R b/tests/testthat/scope-AsIs/scope_spaces_indention-in.R new file mode 100644 index 000000000..e92cfb080 --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_spaces_indention-in.R @@ -0,0 +1,21 @@ +# not adding line-break +if (x) {1+1++1} else{3} + +# not removing line-break +# FIXME If linebreaks are not touched: Do not indent token-dependent before '{' +test_that("x", + { + my_test(call) +}) + +# do not replace assignment +a = 3 +data_frame(a = 3) + +# do not resolve semicolon +a <- function(x) x + 1;b;c + +# don't add brackets in pipes +a %>% + b %>% + c diff --git a/tests/testthat/scope-AsIs/scope_spaces_indention-in_tree b/tests/testthat/scope-AsIs/scope_spaces_indention-in_tree new file mode 100644 index 000000000..10fda6ca9 --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_spaces_indention-in_tree @@ -0,0 +1,97 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # not [0/0] {1} + ¦--expr: if (x [1/0] {2} + ¦ ¦--IF: if [0/1] {3} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: x [0/0] {6} + ¦ ¦ °--SYMBOL: x [0/0] {5} + ¦ ¦--')': ) [0/1] {7} + ¦ ¦--expr: {1+1+ [0/1] {8} + ¦ ¦ ¦--'{': { [0/0] {9} + ¦ ¦ ¦--expr: 1+1++ [0/0] {10} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {13} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {12} + ¦ ¦ ¦ ¦--'+': + [0/0] {14} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {16} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {15} + ¦ ¦ ¦ ¦--'+': + [0/0] {17} + ¦ ¦ ¦ °--expr: +1 [0/0] {18} + ¦ ¦ ¦ ¦--'+': + [0/0] {19} + ¦ ¦ ¦ °--expr: 1 [0/0] {21} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {20} + ¦ ¦ °--'}': } [0/0] {22} + ¦ ¦--ELSE: else [0/0] {23} + ¦ °--expr: {3} [0/0] {24} + ¦ ¦--'{': { [0/0] {25} + ¦ ¦--expr: 3 [0/0] {27} + ¦ ¦ °--NUM_CONST: 3 [0/0] {26} + ¦ °--'}': } [0/0] {28} + ¦--COMMENT: # not [2/0] {29} + ¦--COMMENT: # FIX [1/0] {30} + ¦--expr: test_ [1/0] {31} + ¦ ¦--expr: test_ [0/0] {33} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {32} + ¦ ¦--'(': ( [0/0] {34} + ¦ ¦--expr: "x" [0/0] {36} + ¦ ¦ °--STR_CONST: "x" [0/0] {35} + ¦ ¦--',': , [0/2] {37} + ¦ ¦--expr: { + [1/0] {38} + ¦ ¦ ¦--'{': { [0/12] {39} + ¦ ¦ ¦--expr: my_te [1/0] {40} + ¦ ¦ ¦ ¦--expr: my_te [0/0] {42} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: my_te [0/0] {41} + ¦ ¦ ¦ ¦--'(': ( [0/0] {43} + ¦ ¦ ¦ ¦--expr: call [0/0] {45} + ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {44} + ¦ ¦ ¦ °--')': ) [0/0] {46} + ¦ ¦ °--'}': } [1/0] {47} + ¦ °--')': ) [0/0] {48} + ¦--COMMENT: # do [2/0] {49} + ¦--expr_or_assign_or_help: a = 3 [1/0] {50} + ¦ ¦--expr: a [0/1] {52} + ¦ ¦ °--SYMBOL: a [0/0] {51} + ¦ ¦--EQ_ASSIGN: = [0/1] {53} + ¦ °--expr: 3 [0/0] {55} + ¦ °--NUM_CONST: 3 [0/0] {54} + ¦--expr: data_ [1/0] {56} + ¦ ¦--expr: data_ [0/0] {58} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {57} + ¦ ¦--'(': ( [0/0] {59} + ¦ ¦--SYMBOL_SUB: a [0/1] {60} + ¦ ¦--EQ_SUB: = [0/1] {61} + ¦ ¦--expr: 3 [0/0] {63} + ¦ ¦ °--NUM_CONST: 3 [0/0] {62} + ¦ °--')': ) [0/0] {64} + ¦--COMMENT: # do [2/0] {65} + ¦--expr: a <- [1/0] {66} + ¦ ¦--expr: a [0/1] {68} + ¦ ¦ °--SYMBOL: a [0/0] {67} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {69} + ¦ °--expr: funct [0/0] {70} + ¦ ¦--FUNCTION: funct [0/0] {71} + ¦ ¦--'(': ( [0/0] {72} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {73} + ¦ ¦--')': ) [0/1] {74} + ¦ °--expr: x + 1 [0/0] {75} + ¦ ¦--expr: x [0/1] {77} + ¦ ¦ °--SYMBOL: x [0/0] {76} + ¦ ¦--'+': + [0/1] {78} + ¦ °--expr: 1 [0/0] {80} + ¦ °--NUM_CONST: 1 [0/0] {79} + ¦--';': ; [0/0] {81} + ¦--expr: b [0/0] {83} + ¦ °--SYMBOL: b [0/0] {82} + ¦--';': ; [0/0] {84} + ¦--expr: c [0/0] {86} + ¦ °--SYMBOL: c [0/0] {85} + ¦--COMMENT: # don [2/0] {87} + °--expr: a %>% [1/0] {88} + ¦--expr: a [0/1] {91} + ¦ °--SYMBOL: a [0/0] {90} + ¦--SPECIAL-PIPE: %>% [0/2] {92} + ¦--expr: b [1/1] {94} + ¦ °--SYMBOL: b [0/0] {93} + ¦--SPECIAL-PIPE: %>% [0/2] {95} + °--expr: c [1/0] {97} + °--SYMBOL: c [0/0] {96} diff --git a/tests/testthat/scope_argument/scope_indention-out.R b/tests/testthat/scope-AsIs/scope_spaces_indention-out.R similarity index 100% rename from tests/testthat/scope_argument/scope_indention-out.R rename to tests/testthat/scope-AsIs/scope_spaces_indention-out.R diff --git a/tests/testthat/scope-AsIs/scope_spaces_line_breaks-in.R b/tests/testthat/scope-AsIs/scope_spaces_line_breaks-in.R new file mode 100644 index 000000000..ac91f0e50 --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_spaces_line_breaks-in.R @@ -0,0 +1,21 @@ +# adding line-break +if (x) {1 + 1 + +1} else {3} + +# removing line-break +test_that("x", + { + my_test( call) + }) + + +# do not replace assignment +a = 3 +data_frame(a = 3) + +# do not resolve semicolon +a <- function(x) x + 1;b;c + +# don't add brackets in pipes +a %>% + b %>% + c diff --git a/tests/testthat/scope-AsIs/scope_spaces_line_breaks-in_tree b/tests/testthat/scope-AsIs/scope_spaces_line_breaks-in_tree new file mode 100644 index 000000000..95f85f205 --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_spaces_line_breaks-in_tree @@ -0,0 +1,96 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # add [0/0] {1} + ¦--expr: if (x [1/0] {2} + ¦ ¦--IF: if [0/1] {3} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: x [0/0] {6} + ¦ ¦ °--SYMBOL: x [0/0] {5} + ¦ ¦--')': ) [0/1] {7} + ¦ ¦--expr: {1 + [0/1] {8} + ¦ ¦ ¦--'{': { [0/0] {9} + ¦ ¦ ¦--expr: 1 + 1 [0/0] {10} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {13} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {12} + ¦ ¦ ¦ ¦--'+': + [0/1] {14} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {16} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {15} + ¦ ¦ ¦ ¦--'+': + [0/1] {17} + ¦ ¦ ¦ °--expr: +1 [0/0] {18} + ¦ ¦ ¦ ¦--'+': + [0/0] {19} + ¦ ¦ ¦ °--expr: 1 [0/0] {21} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {20} + ¦ ¦ °--'}': } [0/0] {22} + ¦ ¦--ELSE: else [0/1] {23} + ¦ °--expr: {3} [0/0] {24} + ¦ ¦--'{': { [0/0] {25} + ¦ ¦--expr: 3 [0/0] {27} + ¦ ¦ °--NUM_CONST: 3 [0/0] {26} + ¦ °--'}': } [0/0] {28} + ¦--COMMENT: # rem [2/0] {29} + ¦--expr: test_ [1/0] {30} + ¦ ¦--expr: test_ [0/0] {32} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {31} + ¦ ¦--'(': ( [0/0] {33} + ¦ ¦--expr: "x" [0/0] {35} + ¦ ¦ °--STR_CONST: "x" [0/0] {34} + ¦ ¦--',': , [0/10] {36} + ¦ ¦--expr: { + [1/0] {37} + ¦ ¦ ¦--'{': { [0/12] {38} + ¦ ¦ ¦--expr: my_te [1/10] {39} + ¦ ¦ ¦ ¦--expr: my_te [0/0] {41} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: my_te [0/0] {40} + ¦ ¦ ¦ ¦--'(': ( [0/1] {42} + ¦ ¦ ¦ ¦--expr: call [0/0] {44} + ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {43} + ¦ ¦ ¦ °--')': ) [0/0] {45} + ¦ ¦ °--'}': } [1/0] {46} + ¦ °--')': ) [0/0] {47} + ¦--COMMENT: # do [3/0] {48} + ¦--expr_or_assign_or_help: a = 3 [1/0] {49} + ¦ ¦--expr: a [0/1] {51} + ¦ ¦ °--SYMBOL: a [0/0] {50} + ¦ ¦--EQ_ASSIGN: = [0/1] {52} + ¦ °--expr: 3 [0/0] {54} + ¦ °--NUM_CONST: 3 [0/0] {53} + ¦--expr: data_ [1/0] {55} + ¦ ¦--expr: data_ [0/0] {57} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {56} + ¦ ¦--'(': ( [0/0] {58} + ¦ ¦--SYMBOL_SUB: a [0/1] {59} + ¦ ¦--EQ_SUB: = [0/1] {60} + ¦ ¦--expr: 3 [0/0] {62} + ¦ ¦ °--NUM_CONST: 3 [0/0] {61} + ¦ °--')': ) [0/0] {63} + ¦--COMMENT: # do [2/0] {64} + ¦--expr: a <- [1/0] {65} + ¦ ¦--expr: a [0/1] {67} + ¦ ¦ °--SYMBOL: a [0/0] {66} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {68} + ¦ °--expr: funct [0/0] {69} + ¦ ¦--FUNCTION: funct [0/0] {70} + ¦ ¦--'(': ( [0/0] {71} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {72} + ¦ ¦--')': ) [0/1] {73} + ¦ °--expr: x + 1 [0/0] {74} + ¦ ¦--expr: x [0/1] {76} + ¦ ¦ °--SYMBOL: x [0/0] {75} + ¦ ¦--'+': + [0/1] {77} + ¦ °--expr: 1 [0/0] {79} + ¦ °--NUM_CONST: 1 [0/0] {78} + ¦--';': ; [0/0] {80} + ¦--expr: b [0/0] {82} + ¦ °--SYMBOL: b [0/0] {81} + ¦--';': ; [0/0] {83} + ¦--expr: c [0/0] {85} + ¦ °--SYMBOL: c [0/0] {84} + ¦--COMMENT: # don [2/0] {86} + °--expr: a %>% [1/0] {87} + ¦--expr: a [0/1] {90} + ¦ °--SYMBOL: a [0/0] {89} + ¦--SPECIAL-PIPE: %>% [0/2] {91} + ¦--expr: b [1/1] {93} + ¦ °--SYMBOL: b [0/0] {92} + ¦--SPECIAL-PIPE: %>% [0/2] {94} + °--expr: c [1/0] {96} + °--SYMBOL: c [0/0] {95} diff --git a/tests/testthat/scope-AsIs/scope_spaces_line_breaks-out.R b/tests/testthat/scope-AsIs/scope_spaces_line_breaks-out.R new file mode 100644 index 000000000..721ffc0af --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_spaces_line_breaks-out.R @@ -0,0 +1,24 @@ +# adding line-break +if (x) { +1 + 1 + +1 +} else { +3 +} + +# removing line-break +test_that("x", { + my_test(call) + }) + + +# do not replace assignment +a = 3 +data_frame(a = 3) + +# do not resolve semicolon +a <- function(x) x + 1;b;c + +# don't add brackets in pipes +a %>% + b %>% + c diff --git a/tests/testthat/scope-AsIs/scope_spaces_tokens-in.R b/tests/testthat/scope-AsIs/scope_spaces_tokens-in.R new file mode 100644 index 000000000..b39fbf37d --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_spaces_tokens-in.R @@ -0,0 +1,21 @@ +# adding line-break +if (x) {1 +1 + +1} else {3} + +# removing line-break +test_that("x", + { + my_test(call) + }) + + +# do not replace assignment +a = 3 +data_frame(a = 3) + +# do not resolve semicolon +a <- function(x) x + 1;b;c + +# don't add brackets in pipes +a %>% + b %>% + c diff --git a/tests/testthat/scope-AsIs/scope_spaces_tokens-in_tree b/tests/testthat/scope-AsIs/scope_spaces_tokens-in_tree new file mode 100644 index 000000000..c738d5f48 --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_spaces_tokens-in_tree @@ -0,0 +1,96 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # add [0/0] {1} + ¦--expr: if (x [1/0] {2} + ¦ ¦--IF: if [0/1] {3} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: x [0/0] {6} + ¦ ¦ °--SYMBOL: x [0/0] {5} + ¦ ¦--')': ) [0/1] {7} + ¦ ¦--expr: {1 +1 [0/1] {8} + ¦ ¦ ¦--'{': { [0/0] {9} + ¦ ¦ ¦--expr: 1 +1 [0/0] {10} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {13} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {12} + ¦ ¦ ¦ ¦--'+': + [0/0] {14} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {16} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {15} + ¦ ¦ ¦ ¦--'+': + [0/1] {17} + ¦ ¦ ¦ °--expr: +1 [0/0] {18} + ¦ ¦ ¦ ¦--'+': + [0/0] {19} + ¦ ¦ ¦ °--expr: 1 [0/0] {21} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {20} + ¦ ¦ °--'}': } [0/0] {22} + ¦ ¦--ELSE: else [0/1] {23} + ¦ °--expr: {3} [0/0] {24} + ¦ ¦--'{': { [0/0] {25} + ¦ ¦--expr: 3 [0/0] {27} + ¦ ¦ °--NUM_CONST: 3 [0/0] {26} + ¦ °--'}': } [0/0] {28} + ¦--COMMENT: # rem [2/0] {29} + ¦--expr: test_ [1/0] {30} + ¦ ¦--expr: test_ [0/0] {32} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {31} + ¦ ¦--'(': ( [0/0] {33} + ¦ ¦--expr: "x" [0/0] {35} + ¦ ¦ °--STR_CONST: "x" [0/0] {34} + ¦ ¦--',': , [0/10] {36} + ¦ ¦--expr: { + [1/0] {37} + ¦ ¦ ¦--'{': { [0/12] {38} + ¦ ¦ ¦--expr: my_te [1/10] {39} + ¦ ¦ ¦ ¦--expr: my_te [0/0] {41} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: my_te [0/0] {40} + ¦ ¦ ¦ ¦--'(': ( [0/0] {42} + ¦ ¦ ¦ ¦--expr: call [0/0] {44} + ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {43} + ¦ ¦ ¦ °--')': ) [0/0] {45} + ¦ ¦ °--'}': } [1/0] {46} + ¦ °--')': ) [0/0] {47} + ¦--COMMENT: # do [3/0] {48} + ¦--expr_or_assign_or_help: a = 3 [1/0] {49} + ¦ ¦--expr: a [0/1] {51} + ¦ ¦ °--SYMBOL: a [0/0] {50} + ¦ ¦--EQ_ASSIGN: = [0/1] {52} + ¦ °--expr: 3 [0/0] {54} + ¦ °--NUM_CONST: 3 [0/0] {53} + ¦--expr: data_ [1/0] {55} + ¦ ¦--expr: data_ [0/0] {57} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {56} + ¦ ¦--'(': ( [0/0] {58} + ¦ ¦--SYMBOL_SUB: a [0/1] {59} + ¦ ¦--EQ_SUB: = [0/1] {60} + ¦ ¦--expr: 3 [0/0] {62} + ¦ ¦ °--NUM_CONST: 3 [0/0] {61} + ¦ °--')': ) [0/0] {63} + ¦--COMMENT: # do [2/0] {64} + ¦--expr: a <- [1/0] {65} + ¦ ¦--expr: a [0/1] {67} + ¦ ¦ °--SYMBOL: a [0/0] {66} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {68} + ¦ °--expr: funct [0/0] {69} + ¦ ¦--FUNCTION: funct [0/0] {70} + ¦ ¦--'(': ( [0/0] {71} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {72} + ¦ ¦--')': ) [0/1] {73} + ¦ °--expr: x + 1 [0/0] {74} + ¦ ¦--expr: x [0/1] {76} + ¦ ¦ °--SYMBOL: x [0/0] {75} + ¦ ¦--'+': + [0/1] {77} + ¦ °--expr: 1 [0/0] {79} + ¦ °--NUM_CONST: 1 [0/0] {78} + ¦--';': ; [0/0] {80} + ¦--expr: b [0/0] {82} + ¦ °--SYMBOL: b [0/0] {81} + ¦--';': ; [0/0] {83} + ¦--expr: c [0/0] {85} + ¦ °--SYMBOL: c [0/0] {84} + ¦--COMMENT: # don [2/0] {86} + °--expr: a %>% [1/0] {87} + ¦--expr: a [0/1] {90} + ¦ °--SYMBOL: a [0/0] {89} + ¦--SPECIAL-PIPE: %>% [0/2] {91} + ¦--expr: b [1/1] {93} + ¦ °--SYMBOL: b [0/0] {92} + ¦--SPECIAL-PIPE: %>% [0/2] {94} + °--expr: c [1/0] {96} + °--SYMBOL: c [0/0] {95} diff --git a/tests/testthat/scope-AsIs/scope_spaces_tokens-out.R b/tests/testthat/scope-AsIs/scope_spaces_tokens-out.R new file mode 100644 index 000000000..4e71757fd --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_spaces_tokens-out.R @@ -0,0 +1,23 @@ +# adding line-break +if (x) {1 + 1 + +1} else {3} + +# removing line-break +test_that("x", + { + my_test(call) + }) + + +# do not replace assignment +a <- 3 +data_frame(a = 3) + +# do not resolve semicolon +a <- function(x) x + 1 +b +c + +# don't add brackets in pipes +a %>% + b() %>% + c() diff --git a/tests/testthat/scope_argument/scope_tokens-in.R b/tests/testthat/scope-AsIs/scope_tokens-in.R similarity index 100% rename from tests/testthat/scope_argument/scope_tokens-in.R rename to tests/testthat/scope-AsIs/scope_tokens-in.R diff --git a/tests/testthat/scope-AsIs/scope_tokens-in_tree b/tests/testthat/scope-AsIs/scope_tokens-in_tree new file mode 100644 index 000000000..2b1d97fb2 --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_tokens-in_tree @@ -0,0 +1,96 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # add [0/0] {1} + ¦--expr: if (x [1/0] {2} + ¦ ¦--IF: if [0/1] {3} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: x [0/0] {6} + ¦ ¦ °--SYMBOL: x [0/0] {5} + ¦ ¦--')': ) [0/1] {7} + ¦ ¦--expr: {1 + [0/1] {8} + ¦ ¦ ¦--'{': { [0/0] {9} + ¦ ¦ ¦--expr: 1 + 1 [0/0] {10} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {13} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {12} + ¦ ¦ ¦ ¦--'+': + [0/1] {14} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {16} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {15} + ¦ ¦ ¦ ¦--'+': + [0/1] {17} + ¦ ¦ ¦ °--expr: +1 [0/0] {18} + ¦ ¦ ¦ ¦--'+': + [0/0] {19} + ¦ ¦ ¦ °--expr: 1 [0/0] {21} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {20} + ¦ ¦ °--'}': } [0/0] {22} + ¦ ¦--ELSE: else [0/1] {23} + ¦ °--expr: {3} [0/0] {24} + ¦ ¦--'{': { [0/0] {25} + ¦ ¦--expr: 3 [0/0] {27} + ¦ ¦ °--NUM_CONST: 3 [0/0] {26} + ¦ °--'}': } [0/0] {28} + ¦--COMMENT: # rem [2/0] {29} + ¦--expr: test_ [1/0] {30} + ¦ ¦--expr: test_ [0/0] {32} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {31} + ¦ ¦--'(': ( [0/0] {33} + ¦ ¦--expr: "x" [0/0] {35} + ¦ ¦ °--STR_CONST: "x" [0/0] {34} + ¦ ¦--',': , [0/10] {36} + ¦ ¦--expr: { + [1/0] {37} + ¦ ¦ ¦--'{': { [0/12] {38} + ¦ ¦ ¦--expr: my_te [1/10] {39} + ¦ ¦ ¦ ¦--expr: my_te [0/0] {41} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: my_te [0/0] {40} + ¦ ¦ ¦ ¦--'(': ( [0/0] {42} + ¦ ¦ ¦ ¦--expr: call [0/0] {44} + ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {43} + ¦ ¦ ¦ °--')': ) [0/0] {45} + ¦ ¦ °--'}': } [1/0] {46} + ¦ °--')': ) [0/0] {47} + ¦--COMMENT: # do [3/0] {48} + ¦--expr_or_assign_or_help: a = 3 [1/0] {49} + ¦ ¦--expr: a [0/1] {51} + ¦ ¦ °--SYMBOL: a [0/0] {50} + ¦ ¦--EQ_ASSIGN: = [0/1] {52} + ¦ °--expr: 3 [0/0] {54} + ¦ °--NUM_CONST: 3 [0/0] {53} + ¦--expr: data_ [1/0] {55} + ¦ ¦--expr: data_ [0/0] {57} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {56} + ¦ ¦--'(': ( [0/0] {58} + ¦ ¦--SYMBOL_SUB: a [0/1] {59} + ¦ ¦--EQ_SUB: = [0/1] {60} + ¦ ¦--expr: 3 [0/0] {62} + ¦ ¦ °--NUM_CONST: 3 [0/0] {61} + ¦ °--')': ) [0/0] {63} + ¦--COMMENT: # do [2/0] {64} + ¦--expr: a <- [1/0] {65} + ¦ ¦--expr: a [0/1] {67} + ¦ ¦ °--SYMBOL: a [0/0] {66} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {68} + ¦ °--expr: funct [0/0] {69} + ¦ ¦--FUNCTION: funct [0/0] {70} + ¦ ¦--'(': ( [0/0] {71} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {72} + ¦ ¦--')': ) [0/1] {73} + ¦ °--expr: x + 1 [0/0] {74} + ¦ ¦--expr: x [0/1] {76} + ¦ ¦ °--SYMBOL: x [0/0] {75} + ¦ ¦--'+': + [0/1] {77} + ¦ °--expr: 1 [0/0] {79} + ¦ °--NUM_CONST: 1 [0/0] {78} + ¦--';': ; [0/0] {80} + ¦--expr: b [0/0] {82} + ¦ °--SYMBOL: b [0/0] {81} + ¦--';': ; [0/0] {83} + ¦--expr: c [0/0] {85} + ¦ °--SYMBOL: c [0/0] {84} + ¦--COMMENT: # don [2/0] {86} + °--expr: a %>% [1/0] {87} + ¦--expr: a [0/1] {90} + ¦ °--SYMBOL: a [0/0] {89} + ¦--SPECIAL-PIPE: %>% [0/2] {91} + ¦--expr: b [1/1] {93} + ¦ °--SYMBOL: b [0/0] {92} + ¦--SPECIAL-PIPE: %>% [0/2] {94} + °--expr: c [1/0] {96} + °--SYMBOL: c [0/0] {95} diff --git a/tests/testthat/scope-AsIs/scope_tokens-out.R b/tests/testthat/scope-AsIs/scope_tokens-out.R new file mode 100644 index 000000000..4e71757fd --- /dev/null +++ b/tests/testthat/scope-AsIs/scope_tokens-out.R @@ -0,0 +1,23 @@ +# adding line-break +if (x) {1 + 1 + +1} else {3} + +# removing line-break +test_that("x", + { + my_test(call) + }) + + +# do not replace assignment +a <- 3 +data_frame(a = 3) + +# do not resolve semicolon +a <- function(x) x + 1 +b +c + +# don't add brackets in pipes +a %>% + b() %>% + c() diff --git a/tests/testthat/scope-character/scope_indention-in.R b/tests/testthat/scope-character/scope_indention-in.R new file mode 100644 index 000000000..e92cfb080 --- /dev/null +++ b/tests/testthat/scope-character/scope_indention-in.R @@ -0,0 +1,21 @@ +# not adding line-break +if (x) {1+1++1} else{3} + +# not removing line-break +# FIXME If linebreaks are not touched: Do not indent token-dependent before '{' +test_that("x", + { + my_test(call) +}) + +# do not replace assignment +a = 3 +data_frame(a = 3) + +# do not resolve semicolon +a <- function(x) x + 1;b;c + +# don't add brackets in pipes +a %>% + b %>% + c diff --git a/tests/testthat/scope-character/scope_indention-in_tree b/tests/testthat/scope-character/scope_indention-in_tree new file mode 100644 index 000000000..10fda6ca9 --- /dev/null +++ b/tests/testthat/scope-character/scope_indention-in_tree @@ -0,0 +1,97 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # not [0/0] {1} + ¦--expr: if (x [1/0] {2} + ¦ ¦--IF: if [0/1] {3} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: x [0/0] {6} + ¦ ¦ °--SYMBOL: x [0/0] {5} + ¦ ¦--')': ) [0/1] {7} + ¦ ¦--expr: {1+1+ [0/1] {8} + ¦ ¦ ¦--'{': { [0/0] {9} + ¦ ¦ ¦--expr: 1+1++ [0/0] {10} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {13} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {12} + ¦ ¦ ¦ ¦--'+': + [0/0] {14} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {16} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {15} + ¦ ¦ ¦ ¦--'+': + [0/0] {17} + ¦ ¦ ¦ °--expr: +1 [0/0] {18} + ¦ ¦ ¦ ¦--'+': + [0/0] {19} + ¦ ¦ ¦ °--expr: 1 [0/0] {21} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {20} + ¦ ¦ °--'}': } [0/0] {22} + ¦ ¦--ELSE: else [0/0] {23} + ¦ °--expr: {3} [0/0] {24} + ¦ ¦--'{': { [0/0] {25} + ¦ ¦--expr: 3 [0/0] {27} + ¦ ¦ °--NUM_CONST: 3 [0/0] {26} + ¦ °--'}': } [0/0] {28} + ¦--COMMENT: # not [2/0] {29} + ¦--COMMENT: # FIX [1/0] {30} + ¦--expr: test_ [1/0] {31} + ¦ ¦--expr: test_ [0/0] {33} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {32} + ¦ ¦--'(': ( [0/0] {34} + ¦ ¦--expr: "x" [0/0] {36} + ¦ ¦ °--STR_CONST: "x" [0/0] {35} + ¦ ¦--',': , [0/2] {37} + ¦ ¦--expr: { + [1/0] {38} + ¦ ¦ ¦--'{': { [0/12] {39} + ¦ ¦ ¦--expr: my_te [1/0] {40} + ¦ ¦ ¦ ¦--expr: my_te [0/0] {42} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: my_te [0/0] {41} + ¦ ¦ ¦ ¦--'(': ( [0/0] {43} + ¦ ¦ ¦ ¦--expr: call [0/0] {45} + ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {44} + ¦ ¦ ¦ °--')': ) [0/0] {46} + ¦ ¦ °--'}': } [1/0] {47} + ¦ °--')': ) [0/0] {48} + ¦--COMMENT: # do [2/0] {49} + ¦--expr_or_assign_or_help: a = 3 [1/0] {50} + ¦ ¦--expr: a [0/1] {52} + ¦ ¦ °--SYMBOL: a [0/0] {51} + ¦ ¦--EQ_ASSIGN: = [0/1] {53} + ¦ °--expr: 3 [0/0] {55} + ¦ °--NUM_CONST: 3 [0/0] {54} + ¦--expr: data_ [1/0] {56} + ¦ ¦--expr: data_ [0/0] {58} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {57} + ¦ ¦--'(': ( [0/0] {59} + ¦ ¦--SYMBOL_SUB: a [0/1] {60} + ¦ ¦--EQ_SUB: = [0/1] {61} + ¦ ¦--expr: 3 [0/0] {63} + ¦ ¦ °--NUM_CONST: 3 [0/0] {62} + ¦ °--')': ) [0/0] {64} + ¦--COMMENT: # do [2/0] {65} + ¦--expr: a <- [1/0] {66} + ¦ ¦--expr: a [0/1] {68} + ¦ ¦ °--SYMBOL: a [0/0] {67} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {69} + ¦ °--expr: funct [0/0] {70} + ¦ ¦--FUNCTION: funct [0/0] {71} + ¦ ¦--'(': ( [0/0] {72} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {73} + ¦ ¦--')': ) [0/1] {74} + ¦ °--expr: x + 1 [0/0] {75} + ¦ ¦--expr: x [0/1] {77} + ¦ ¦ °--SYMBOL: x [0/0] {76} + ¦ ¦--'+': + [0/1] {78} + ¦ °--expr: 1 [0/0] {80} + ¦ °--NUM_CONST: 1 [0/0] {79} + ¦--';': ; [0/0] {81} + ¦--expr: b [0/0] {83} + ¦ °--SYMBOL: b [0/0] {82} + ¦--';': ; [0/0] {84} + ¦--expr: c [0/0] {86} + ¦ °--SYMBOL: c [0/0] {85} + ¦--COMMENT: # don [2/0] {87} + °--expr: a %>% [1/0] {88} + ¦--expr: a [0/1] {91} + ¦ °--SYMBOL: a [0/0] {90} + ¦--SPECIAL-PIPE: %>% [0/2] {92} + ¦--expr: b [1/1] {94} + ¦ °--SYMBOL: b [0/0] {93} + ¦--SPECIAL-PIPE: %>% [0/2] {95} + °--expr: c [1/0] {97} + °--SYMBOL: c [0/0] {96} diff --git a/tests/testthat/scope-character/scope_indention-out.R b/tests/testthat/scope-character/scope_indention-out.R new file mode 100644 index 000000000..cb3b1ebea --- /dev/null +++ b/tests/testthat/scope-character/scope_indention-out.R @@ -0,0 +1,21 @@ +# not adding line-break +if (x) {1 + 1 + +1} else {3} + +# not removing line-break +# FIXME If linebreaks are not touched: Do not indent token-dependent before '{' +test_that("x", + { + my_test(call) +}) + +# do not replace assignment +a = 3 +data_frame(a = 3) + +# do not resolve semicolon +a <- function(x) x + 1;b;c + +# don't add brackets in pipes +a %>% + b %>% + c diff --git a/tests/testthat/scope-character/scope_line_breaks-in.R b/tests/testthat/scope-character/scope_line_breaks-in.R new file mode 100644 index 000000000..8fd4735e5 --- /dev/null +++ b/tests/testthat/scope-character/scope_line_breaks-in.R @@ -0,0 +1,21 @@ +# adding line-break +if (x) {1 + 1 + +1} else {3} + +# removing line-break +test_that("x", + { + my_test(call) + }) + + +# do not replace assignment +a = 3 +data_frame(a = 3) + +# do not resolve semicolon +a <- function(x) x + 1;b;c + +# don't add brackets in pipes +a %>% + b %>% + c diff --git a/tests/testthat/scope-character/scope_line_breaks-in_tree b/tests/testthat/scope-character/scope_line_breaks-in_tree new file mode 100644 index 000000000..2b1d97fb2 --- /dev/null +++ b/tests/testthat/scope-character/scope_line_breaks-in_tree @@ -0,0 +1,96 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # add [0/0] {1} + ¦--expr: if (x [1/0] {2} + ¦ ¦--IF: if [0/1] {3} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: x [0/0] {6} + ¦ ¦ °--SYMBOL: x [0/0] {5} + ¦ ¦--')': ) [0/1] {7} + ¦ ¦--expr: {1 + [0/1] {8} + ¦ ¦ ¦--'{': { [0/0] {9} + ¦ ¦ ¦--expr: 1 + 1 [0/0] {10} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {13} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {12} + ¦ ¦ ¦ ¦--'+': + [0/1] {14} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {16} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {15} + ¦ ¦ ¦ ¦--'+': + [0/1] {17} + ¦ ¦ ¦ °--expr: +1 [0/0] {18} + ¦ ¦ ¦ ¦--'+': + [0/0] {19} + ¦ ¦ ¦ °--expr: 1 [0/0] {21} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {20} + ¦ ¦ °--'}': } [0/0] {22} + ¦ ¦--ELSE: else [0/1] {23} + ¦ °--expr: {3} [0/0] {24} + ¦ ¦--'{': { [0/0] {25} + ¦ ¦--expr: 3 [0/0] {27} + ¦ ¦ °--NUM_CONST: 3 [0/0] {26} + ¦ °--'}': } [0/0] {28} + ¦--COMMENT: # rem [2/0] {29} + ¦--expr: test_ [1/0] {30} + ¦ ¦--expr: test_ [0/0] {32} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {31} + ¦ ¦--'(': ( [0/0] {33} + ¦ ¦--expr: "x" [0/0] {35} + ¦ ¦ °--STR_CONST: "x" [0/0] {34} + ¦ ¦--',': , [0/10] {36} + ¦ ¦--expr: { + [1/0] {37} + ¦ ¦ ¦--'{': { [0/12] {38} + ¦ ¦ ¦--expr: my_te [1/10] {39} + ¦ ¦ ¦ ¦--expr: my_te [0/0] {41} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: my_te [0/0] {40} + ¦ ¦ ¦ ¦--'(': ( [0/0] {42} + ¦ ¦ ¦ ¦--expr: call [0/0] {44} + ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {43} + ¦ ¦ ¦ °--')': ) [0/0] {45} + ¦ ¦ °--'}': } [1/0] {46} + ¦ °--')': ) [0/0] {47} + ¦--COMMENT: # do [3/0] {48} + ¦--expr_or_assign_or_help: a = 3 [1/0] {49} + ¦ ¦--expr: a [0/1] {51} + ¦ ¦ °--SYMBOL: a [0/0] {50} + ¦ ¦--EQ_ASSIGN: = [0/1] {52} + ¦ °--expr: 3 [0/0] {54} + ¦ °--NUM_CONST: 3 [0/0] {53} + ¦--expr: data_ [1/0] {55} + ¦ ¦--expr: data_ [0/0] {57} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {56} + ¦ ¦--'(': ( [0/0] {58} + ¦ ¦--SYMBOL_SUB: a [0/1] {59} + ¦ ¦--EQ_SUB: = [0/1] {60} + ¦ ¦--expr: 3 [0/0] {62} + ¦ ¦ °--NUM_CONST: 3 [0/0] {61} + ¦ °--')': ) [0/0] {63} + ¦--COMMENT: # do [2/0] {64} + ¦--expr: a <- [1/0] {65} + ¦ ¦--expr: a [0/1] {67} + ¦ ¦ °--SYMBOL: a [0/0] {66} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {68} + ¦ °--expr: funct [0/0] {69} + ¦ ¦--FUNCTION: funct [0/0] {70} + ¦ ¦--'(': ( [0/0] {71} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {72} + ¦ ¦--')': ) [0/1] {73} + ¦ °--expr: x + 1 [0/0] {74} + ¦ ¦--expr: x [0/1] {76} + ¦ ¦ °--SYMBOL: x [0/0] {75} + ¦ ¦--'+': + [0/1] {77} + ¦ °--expr: 1 [0/0] {79} + ¦ °--NUM_CONST: 1 [0/0] {78} + ¦--';': ; [0/0] {80} + ¦--expr: b [0/0] {82} + ¦ °--SYMBOL: b [0/0] {81} + ¦--';': ; [0/0] {83} + ¦--expr: c [0/0] {85} + ¦ °--SYMBOL: c [0/0] {84} + ¦--COMMENT: # don [2/0] {86} + °--expr: a %>% [1/0] {87} + ¦--expr: a [0/1] {90} + ¦ °--SYMBOL: a [0/0] {89} + ¦--SPECIAL-PIPE: %>% [0/2] {91} + ¦--expr: b [1/1] {93} + ¦ °--SYMBOL: b [0/0] {92} + ¦--SPECIAL-PIPE: %>% [0/2] {94} + °--expr: c [1/0] {96} + °--SYMBOL: c [0/0] {95} diff --git a/tests/testthat/scope_argument/scope_line_breaks-out.R b/tests/testthat/scope-character/scope_line_breaks-out.R similarity index 100% rename from tests/testthat/scope_argument/scope_line_breaks-out.R rename to tests/testthat/scope-character/scope_line_breaks-out.R diff --git a/tests/testthat/scope-character/scope_none-in.R b/tests/testthat/scope-character/scope_none-in.R new file mode 100644 index 000000000..2c770501f --- /dev/null +++ b/tests/testthat/scope-character/scope_none-in.R @@ -0,0 +1,30 @@ +#'this function does +#' +#' @param x a parameter. +#' indented comments +a<- function(x){ + test_that("I want to test",{ + out <- c(1,c( + 22 +1 + )) + if (x > 10) { + for (x in 22) { # FIXME in operator only to be surrounded by one space. What about %in%? + prin(x) + } + } + } ) + #we like comments too + c(list(x + 2), + c( c( + 26 ^ 2, # FIXME ^ operator has to be surrounded by one space (or none?!), never multiple + 8, + 7 + ) ) ) + + call( + 1, 2, + 23+Inf - 99, call( + 16 + )) +} +# comments everywhere diff --git a/tests/testthat/scope-character/scope_none-in_tree b/tests/testthat/scope-character/scope_none-in_tree new file mode 100644 index 000000000..3c65a9c14 --- /dev/null +++ b/tests/testthat/scope-character/scope_none-in_tree @@ -0,0 +1,159 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: #'thi [0/0] {1} + ¦--COMMENT: #' [1/0] {2} + ¦--COMMENT: #' @p [1/0] {3} + ¦--COMMENT: #' [1/0] {4} + ¦--expr: a<- f [1/0] {5} + ¦ ¦--expr: a [0/0] {7} + ¦ ¦ °--SYMBOL: a [0/0] {6} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {8} + ¦ °--expr: funct [0/0] {9} + ¦ ¦--FUNCTION: funct [0/0] {10} + ¦ ¦--'(': ( [0/0] {11} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {12} + ¦ ¦--')': ) [0/0] {13} + ¦ °--expr: { + t [0/0] {14} + ¦ ¦--'{': { [0/2] {15} + ¦ ¦--expr: test_ [1/2] {16} + ¦ ¦ ¦--expr: test_ [0/0] {18} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {17} + ¦ ¦ ¦--'(': ( [0/0] {19} + ¦ ¦ ¦--expr: "I wa [0/0] {21} + ¦ ¦ ¦ °--STR_CONST: "I wa [0/0] {20} + ¦ ¦ ¦--',': , [0/0] {22} + ¦ ¦ ¦--expr: { + [0/5] {23} + ¦ ¦ ¦ ¦--'{': { [0/4] {24} + ¦ ¦ ¦ ¦--expr: out < [1/4] {25} + ¦ ¦ ¦ ¦ ¦--expr: out [0/1] {27} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: out [0/0] {26} + ¦ ¦ ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {28} + ¦ ¦ ¦ ¦ °--expr: c(1,c [0/0] {29} + ¦ ¦ ¦ ¦ ¦--expr: c [0/0] {31} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {30} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {32} + ¦ ¦ ¦ ¦ ¦--expr: 1 [0/0] {34} + ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {33} + ¦ ¦ ¦ ¦ ¦--',': , [0/0] {35} + ¦ ¦ ¦ ¦ ¦--expr: c( + [0/0] {36} + ¦ ¦ ¦ ¦ ¦ ¦--expr: c [0/0] {38} + ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {37} + ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/6] {39} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 22 +1 [1/4] {40} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: 22 [0/1] {42} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {41} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/0] {43} + ¦ ¦ ¦ ¦ ¦ ¦ °--expr: 1 [0/0] {45} + ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {44} + ¦ ¦ ¦ ¦ ¦ °--')': ) [1/0] {46} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {47} + ¦ ¦ ¦ ¦--expr: if (x [1/2] {48} + ¦ ¦ ¦ ¦ ¦--IF: if [0/1] {49} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {50} + ¦ ¦ ¦ ¦ ¦--expr: x > 1 [0/0] {51} + ¦ ¦ ¦ ¦ ¦ ¦--expr: x [0/1] {53} + ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {52} + ¦ ¦ ¦ ¦ ¦ ¦--GT: > [0/1] {54} + ¦ ¦ ¦ ¦ ¦ °--expr: 10 [0/0] {56} + ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 10 [0/0] {55} + ¦ ¦ ¦ ¦ ¦--')': ) [0/1] {57} + ¦ ¦ ¦ ¦ °--expr: { + [0/0] {58} + ¦ ¦ ¦ ¦ ¦--'{': { [0/6] {59} + ¦ ¦ ¦ ¦ ¦--expr: for ( [1/4] {60} + ¦ ¦ ¦ ¦ ¦ ¦--FOR: for [0/1] {61} + ¦ ¦ ¦ ¦ ¦ ¦--forcond: (x in [0/1] {62} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {63} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--SYMBOL: x [0/1] {64} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--IN: in [0/1] {65} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: 22 [0/0] {67} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 22 [0/0] {66} + ¦ ¦ ¦ ¦ ¦ ¦ °--')': ) [0/0] {68} + ¦ ¦ ¦ ¦ ¦ °--expr: { # F [0/0] {69} + ¦ ¦ ¦ ¦ ¦ ¦--'{': { [0/1] {70} + ¦ ¦ ¦ ¦ ¦ ¦--COMMENT: # FIX [0/8] {71} + ¦ ¦ ¦ ¦ ¦ ¦--expr: prin( [1/6] {72} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: prin [0/0] {74} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: prin [0/0] {73} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {75} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: x [0/0] {77} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {76} + ¦ ¦ ¦ ¦ ¦ ¦ °--')': ) [0/0] {78} + ¦ ¦ ¦ ¦ ¦ °--'}': } [1/0] {79} + ¦ ¦ ¦ ¦ °--'}': } [1/0] {80} + ¦ ¦ ¦ °--'}': } [1/0] {81} + ¦ ¦ °--')': ) [0/0] {82} + ¦ ¦--COMMENT: #we l [1/2] {83} + ¦ ¦--expr: c(lis [1/2] {84} + ¦ ¦ ¦--expr: c [0/0] {86} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {85} + ¦ ¦ ¦--'(': ( [0/0] {87} + ¦ ¦ ¦--expr: list( [0/0] {88} + ¦ ¦ ¦ ¦--expr: list [0/0] {90} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: list [0/0] {89} + ¦ ¦ ¦ ¦--'(': ( [0/0] {91} + ¦ ¦ ¦ ¦--expr: x + 2 [0/0] {92} + ¦ ¦ ¦ ¦ ¦--expr: x [0/1] {94} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {93} + ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {95} + ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {97} + ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {96} + ¦ ¦ ¦ °--')': ) [0/0] {98} + ¦ ¦ ¦--',': , [0/4] {99} + ¦ ¦ ¦--expr: c( [1/1] {100} + ¦ ¦ ¦ ¦--expr: c [0/0] {102} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {101} + ¦ ¦ ¦ ¦--'(': ( [0/4] {103} + ¦ ¦ ¦ ¦--expr: c( + [0/3] {104} + ¦ ¦ ¦ ¦ ¦--expr: c [0/0] {106} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {105} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/6] {107} + ¦ ¦ ¦ ¦ ¦--expr: 26 ^ [1/0] {108} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 26 [0/1] {110} + ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 26 [0/0] {109} + ¦ ¦ ¦ ¦ ¦ ¦--'^': ^ [0/1] {111} + ¦ ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {113} + ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {112} + ¦ ¦ ¦ ¦ ¦--',': , [0/1] {114} + ¦ ¦ ¦ ¦ ¦--COMMENT: # FIX [0/6] {115} + ¦ ¦ ¦ ¦ ¦--expr: 8 [1/0] {117} + ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 8 [0/0] {116} + ¦ ¦ ¦ ¦ ¦--',': , [0/6] {118} + ¦ ¦ ¦ ¦ ¦--expr: 7 [1/4] {120} + ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 7 [0/0] {119} + ¦ ¦ ¦ ¦ °--')': ) [1/0] {121} + ¦ ¦ ¦ °--')': ) [0/0] {122} + ¦ ¦ °--')': ) [0/0] {123} + ¦ ¦--expr: call( [2/0] {124} + ¦ ¦ ¦--expr: call [0/0] {126} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {125} + ¦ ¦ ¦--'(': ( [0/4] {127} + ¦ ¦ ¦--expr: 1 [1/0] {129} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {128} + ¦ ¦ ¦--',': , [0/1] {130} + ¦ ¦ ¦--expr: 2 [0/0] {132} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {131} + ¦ ¦ ¦--',': , [0/4] {133} + ¦ ¦ ¦--expr: 23+In [1/0] {134} + ¦ ¦ ¦ ¦--expr: 23 [0/0] {137} + ¦ ¦ ¦ ¦ °--NUM_CONST: 23 [0/0] {136} + ¦ ¦ ¦ ¦--'+': + [0/0] {138} + ¦ ¦ ¦ ¦--expr: Inf [0/1] {140} + ¦ ¦ ¦ ¦ °--NUM_CONST: Inf [0/0] {139} + ¦ ¦ ¦ ¦--'-': - [0/1] {141} + ¦ ¦ ¦ °--expr: 99 [0/0] {143} + ¦ ¦ ¦ °--NUM_CONST: 99 [0/0] {142} + ¦ ¦ ¦--',': , [0/1] {144} + ¦ ¦ ¦--expr: call( [0/0] {145} + ¦ ¦ ¦ ¦--expr: call [0/0] {147} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {146} + ¦ ¦ ¦ ¦--'(': ( [0/6] {148} + ¦ ¦ ¦ ¦--expr: 16 [1/4] {150} + ¦ ¦ ¦ ¦ °--NUM_CONST: 16 [0/0] {149} + ¦ ¦ ¦ °--')': ) [1/0] {151} + ¦ ¦ °--')': ) [0/0] {152} + ¦ °--'}': } [1/0] {153} + °--COMMENT: # com [1/0] {154} diff --git a/tests/testthat/scope-character/scope_none-out.R b/tests/testthat/scope-character/scope_none-out.R new file mode 100644 index 000000000..2c770501f --- /dev/null +++ b/tests/testthat/scope-character/scope_none-out.R @@ -0,0 +1,30 @@ +#'this function does +#' +#' @param x a parameter. +#' indented comments +a<- function(x){ + test_that("I want to test",{ + out <- c(1,c( + 22 +1 + )) + if (x > 10) { + for (x in 22) { # FIXME in operator only to be surrounded by one space. What about %in%? + prin(x) + } + } + } ) + #we like comments too + c(list(x + 2), + c( c( + 26 ^ 2, # FIXME ^ operator has to be surrounded by one space (or none?!), never multiple + 8, + 7 + ) ) ) + + call( + 1, 2, + 23+Inf - 99, call( + 16 + )) +} +# comments everywhere diff --git a/tests/testthat/scope-character/scope_spaces-in.R b/tests/testthat/scope-character/scope_spaces-in.R new file mode 100644 index 000000000..f8a5c9005 --- /dev/null +++ b/tests/testthat/scope-character/scope_spaces-in.R @@ -0,0 +1,4 @@ +a<-function(){ + 1+1 +d=3 + } diff --git a/tests/testthat/scope-character/scope_spaces-in_tree b/tests/testthat/scope-character/scope_spaces-in_tree new file mode 100644 index 000000000..81781740b --- /dev/null +++ b/tests/testthat/scope-character/scope_spaces-in_tree @@ -0,0 +1,25 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + °--expr: a<-fu [0/0] {1} + ¦--expr: a [0/0] {3} + ¦ °--SYMBOL: a [0/0] {2} + ¦--LEFT_ASSIGN: <- [0/0] {4} + °--expr: funct [0/0] {5} + ¦--FUNCTION: funct [0/0] {6} + ¦--'(': ( [0/0] {7} + ¦--')': ) [0/0] {8} + °--expr: { + [0/0] {9} + ¦--'{': { [0/20] {10} + ¦--expr: 1+1 [1/0] {11} + ¦ ¦--expr: 1 [0/0] {13} + ¦ ¦ °--NUM_CONST: 1 [0/0] {12} + ¦ ¦--'+': + [0/0] {14} + ¦ °--expr: 1 [0/0] {16} + ¦ °--NUM_CONST: 1 [0/0] {15} + ¦--expr_or_assign_or_help: d=3 [1/4] {17} + ¦ ¦--expr: d [0/0] {19} + ¦ ¦ °--SYMBOL: d [0/0] {18} + ¦ ¦--EQ_ASSIGN: = [0/0] {20} + ¦ °--expr: 3 [0/0] {22} + ¦ °--NUM_CONST: 3 [0/0] {21} + °--'}': } [1/0] {23} diff --git a/tests/testthat/scope-character/scope_spaces-out.R b/tests/testthat/scope-character/scope_spaces-out.R new file mode 100644 index 000000000..4036923a9 --- /dev/null +++ b/tests/testthat/scope-character/scope_spaces-out.R @@ -0,0 +1,4 @@ +a <- function() { + 1 + 1 +d = 3 + } diff --git a/tests/testthat/scope-character/scope_tokens-in.R b/tests/testthat/scope-character/scope_tokens-in.R new file mode 100644 index 000000000..8fd4735e5 --- /dev/null +++ b/tests/testthat/scope-character/scope_tokens-in.R @@ -0,0 +1,21 @@ +# adding line-break +if (x) {1 + 1 + +1} else {3} + +# removing line-break +test_that("x", + { + my_test(call) + }) + + +# do not replace assignment +a = 3 +data_frame(a = 3) + +# do not resolve semicolon +a <- function(x) x + 1;b;c + +# don't add brackets in pipes +a %>% + b %>% + c diff --git a/tests/testthat/scope-character/scope_tokens-in_tree b/tests/testthat/scope-character/scope_tokens-in_tree new file mode 100644 index 000000000..2b1d97fb2 --- /dev/null +++ b/tests/testthat/scope-character/scope_tokens-in_tree @@ -0,0 +1,96 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # add [0/0] {1} + ¦--expr: if (x [1/0] {2} + ¦ ¦--IF: if [0/1] {3} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: x [0/0] {6} + ¦ ¦ °--SYMBOL: x [0/0] {5} + ¦ ¦--')': ) [0/1] {7} + ¦ ¦--expr: {1 + [0/1] {8} + ¦ ¦ ¦--'{': { [0/0] {9} + ¦ ¦ ¦--expr: 1 + 1 [0/0] {10} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {13} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {12} + ¦ ¦ ¦ ¦--'+': + [0/1] {14} + ¦ ¦ ¦ ¦--expr: 1 [0/1] {16} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {15} + ¦ ¦ ¦ ¦--'+': + [0/1] {17} + ¦ ¦ ¦ °--expr: +1 [0/0] {18} + ¦ ¦ ¦ ¦--'+': + [0/0] {19} + ¦ ¦ ¦ °--expr: 1 [0/0] {21} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {20} + ¦ ¦ °--'}': } [0/0] {22} + ¦ ¦--ELSE: else [0/1] {23} + ¦ °--expr: {3} [0/0] {24} + ¦ ¦--'{': { [0/0] {25} + ¦ ¦--expr: 3 [0/0] {27} + ¦ ¦ °--NUM_CONST: 3 [0/0] {26} + ¦ °--'}': } [0/0] {28} + ¦--COMMENT: # rem [2/0] {29} + ¦--expr: test_ [1/0] {30} + ¦ ¦--expr: test_ [0/0] {32} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {31} + ¦ ¦--'(': ( [0/0] {33} + ¦ ¦--expr: "x" [0/0] {35} + ¦ ¦ °--STR_CONST: "x" [0/0] {34} + ¦ ¦--',': , [0/10] {36} + ¦ ¦--expr: { + [1/0] {37} + ¦ ¦ ¦--'{': { [0/12] {38} + ¦ ¦ ¦--expr: my_te [1/10] {39} + ¦ ¦ ¦ ¦--expr: my_te [0/0] {41} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: my_te [0/0] {40} + ¦ ¦ ¦ ¦--'(': ( [0/0] {42} + ¦ ¦ ¦ ¦--expr: call [0/0] {44} + ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {43} + ¦ ¦ ¦ °--')': ) [0/0] {45} + ¦ ¦ °--'}': } [1/0] {46} + ¦ °--')': ) [0/0] {47} + ¦--COMMENT: # do [3/0] {48} + ¦--expr_or_assign_or_help: a = 3 [1/0] {49} + ¦ ¦--expr: a [0/1] {51} + ¦ ¦ °--SYMBOL: a [0/0] {50} + ¦ ¦--EQ_ASSIGN: = [0/1] {52} + ¦ °--expr: 3 [0/0] {54} + ¦ °--NUM_CONST: 3 [0/0] {53} + ¦--expr: data_ [1/0] {55} + ¦ ¦--expr: data_ [0/0] {57} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {56} + ¦ ¦--'(': ( [0/0] {58} + ¦ ¦--SYMBOL_SUB: a [0/1] {59} + ¦ ¦--EQ_SUB: = [0/1] {60} + ¦ ¦--expr: 3 [0/0] {62} + ¦ ¦ °--NUM_CONST: 3 [0/0] {61} + ¦ °--')': ) [0/0] {63} + ¦--COMMENT: # do [2/0] {64} + ¦--expr: a <- [1/0] {65} + ¦ ¦--expr: a [0/1] {67} + ¦ ¦ °--SYMBOL: a [0/0] {66} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {68} + ¦ °--expr: funct [0/0] {69} + ¦ ¦--FUNCTION: funct [0/0] {70} + ¦ ¦--'(': ( [0/0] {71} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {72} + ¦ ¦--')': ) [0/1] {73} + ¦ °--expr: x + 1 [0/0] {74} + ¦ ¦--expr: x [0/1] {76} + ¦ ¦ °--SYMBOL: x [0/0] {75} + ¦ ¦--'+': + [0/1] {77} + ¦ °--expr: 1 [0/0] {79} + ¦ °--NUM_CONST: 1 [0/0] {78} + ¦--';': ; [0/0] {80} + ¦--expr: b [0/0] {82} + ¦ °--SYMBOL: b [0/0] {81} + ¦--';': ; [0/0] {83} + ¦--expr: c [0/0] {85} + ¦ °--SYMBOL: c [0/0] {84} + ¦--COMMENT: # don [2/0] {86} + °--expr: a %>% [1/0] {87} + ¦--expr: a [0/1] {90} + ¦ °--SYMBOL: a [0/0] {89} + ¦--SPECIAL-PIPE: %>% [0/2] {91} + ¦--expr: b [1/1] {93} + ¦ °--SYMBOL: b [0/0] {92} + ¦--SPECIAL-PIPE: %>% [0/2] {94} + °--expr: c [1/0] {96} + °--SYMBOL: c [0/0] {95} diff --git a/tests/testthat/scope_argument/scope_tokens-out.R b/tests/testthat/scope-character/scope_tokens-out.R similarity index 100% rename from tests/testthat/scope_argument/scope_tokens-out.R rename to tests/testthat/scope-character/scope_tokens-out.R diff --git a/tests/testthat/scope_argument/scope_indention-in_tree b/tests/testthat/scope_argument/scope_indention-in_tree deleted file mode 100644 index ec4bddcb1..000000000 --- a/tests/testthat/scope_argument/scope_indention-in_tree +++ /dev/null @@ -1,96 +0,0 @@ -ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--COMMENT: # not [0/0] {1} - ¦--expr: [1/0] {2} - ¦ ¦--IF: if [0/1] {3} - ¦ ¦--'(': ( [0/0] {4} - ¦ ¦--expr: [0/0] {6} - ¦ ¦ °--SYMBOL: x [0/0] {5} - ¦ ¦--')': ) [0/1] {7} - ¦ ¦--expr: [0/1] {8} - ¦ ¦ ¦--'{': { [0/0] {9} - ¦ ¦ ¦--expr: [0/0] {10} - ¦ ¦ ¦ ¦--expr: [0/0] {13} - ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {12} - ¦ ¦ ¦ ¦--'+': + [0/0] {14} - ¦ ¦ ¦ ¦--expr: [0/0] {16} - ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {15} - ¦ ¦ ¦ ¦--'+': + [0/0] {17} - ¦ ¦ ¦ °--expr: [0/0] {18} - ¦ ¦ ¦ ¦--'+': + [0/0] {19} - ¦ ¦ ¦ °--expr: [0/0] {21} - ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {20} - ¦ ¦ °--'}': } [0/0] {22} - ¦ ¦--ELSE: else [0/0] {23} - ¦ °--expr: [0/0] {24} - ¦ ¦--'{': { [0/0] {25} - ¦ ¦--expr: [0/0] {27} - ¦ ¦ °--NUM_CONST: 3 [0/0] {26} - ¦ °--'}': } [0/0] {28} - ¦--COMMENT: # not [2/0] {29} - ¦--COMMENT: # FIX [1/0] {30} - ¦--expr: [1/0] {31} - ¦ ¦--expr: [0/0] {33} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {32} - ¦ ¦--'(': ( [0/0] {34} - ¦ ¦--expr: [0/0] {36} - ¦ ¦ °--STR_CONST: "x" [0/0] {35} - ¦ ¦--',': , [0/2] {37} - ¦ ¦--expr: [1/0] {38} - ¦ ¦ ¦--'{': { [0/12] {39} - ¦ ¦ ¦--expr: [1/0] {40} - ¦ ¦ ¦ ¦--expr: [0/0] {42} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: my_te [0/0] {41} - ¦ ¦ ¦ ¦--'(': ( [0/0] {43} - ¦ ¦ ¦ ¦--expr: [0/0] {45} - ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {44} - ¦ ¦ ¦ °--')': ) [0/0] {46} - ¦ ¦ °--'}': } [1/0] {47} - ¦ °--')': ) [0/0] {48} - ¦--COMMENT: # do [2/0] {49} - ¦--expr: [1/0] {49.9} - ¦ ¦--expr: [0/1] {51} - ¦ ¦ °--SYMBOL: a [0/0] {50} - ¦ ¦--EQ_ASSIGN: = [0/1] {52} - ¦ °--expr: [0/0] {54} - ¦ °--NUM_CONST: 3 [0/0] {53} - ¦--expr: [1/0] {55} - ¦ ¦--expr: [0/0] {57} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {56} - ¦ ¦--'(': ( [0/0] {58} - ¦ ¦--SYMBOL_SUB: a [0/1] {59} - ¦ ¦--EQ_SUB: = [0/1] {60} - ¦ ¦--expr: [0/0] {62} - ¦ ¦ °--NUM_CONST: 3 [0/0] {61} - ¦ °--')': ) [0/0] {63} - ¦--COMMENT: # do [2/0] {64} - ¦--expr: [1/0] {65} - ¦ ¦--expr: [0/1] {67} - ¦ ¦ °--SYMBOL: a [0/0] {66} - ¦ ¦--LEFT_ASSIGN: <- [0/1] {68} - ¦ °--expr: [0/0] {69} - ¦ ¦--FUNCTION: funct [0/0] {70} - ¦ ¦--'(': ( [0/0] {71} - ¦ ¦--SYMBOL_FORMALS: x [0/0] {72} - ¦ ¦--')': ) [0/1] {73} - ¦ °--expr: [0/0] {74} - ¦ ¦--expr: [0/1] {76} - ¦ ¦ °--SYMBOL: x [0/0] {75} - ¦ ¦--'+': + [0/1] {77} - ¦ °--expr: [0/0] {79} - ¦ °--NUM_CONST: 1 [0/0] {78} - ¦--';': ; [0/0] {80} - ¦--expr: [0/0] {82} - ¦ °--SYMBOL: b [0/0] {81} - ¦--';': ; [0/0] {83} - ¦--expr: [0/0] {85} - ¦ °--SYMBOL: c [0/0] {84} - ¦--COMMENT: # don [2/0] {86} - °--expr: [1/0] {87} - ¦--expr: [0/1] {90} - ¦ °--SYMBOL: a [0/0] {89} - ¦--SPECIAL-PIPE: %>% [0/2] {91} - ¦--expr: [1/1] {93} - ¦ °--SYMBOL: b [0/0] {92} - ¦--SPECIAL-PIPE: %>% [0/2] {94} - °--expr: [1/0] {96} - °--SYMBOL: c [0/0] {95} diff --git a/tests/testthat/scope_argument/scope_line_breaks-in_tree b/tests/testthat/scope_argument/scope_line_breaks-in_tree deleted file mode 100644 index ec702355a..000000000 --- a/tests/testthat/scope_argument/scope_line_breaks-in_tree +++ /dev/null @@ -1,95 +0,0 @@ -ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--COMMENT: # add [0/0] {1} - ¦--expr: [1/0] {2} - ¦ ¦--IF: if [0/1] {3} - ¦ ¦--'(': ( [0/0] {4} - ¦ ¦--expr: [0/0] {6} - ¦ ¦ °--SYMBOL: x [0/0] {5} - ¦ ¦--')': ) [0/1] {7} - ¦ ¦--expr: [0/1] {8} - ¦ ¦ ¦--'{': { [0/0] {9} - ¦ ¦ ¦--expr: [0/0] {10} - ¦ ¦ ¦ ¦--expr: [0/1] {13} - ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {12} - ¦ ¦ ¦ ¦--'+': + [0/1] {14} - ¦ ¦ ¦ ¦--expr: [0/1] {16} - ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {15} - ¦ ¦ ¦ ¦--'+': + [0/1] {17} - ¦ ¦ ¦ °--expr: [0/0] {18} - ¦ ¦ ¦ ¦--'+': + [0/0] {19} - ¦ ¦ ¦ °--expr: [0/0] {21} - ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {20} - ¦ ¦ °--'}': } [0/0] {22} - ¦ ¦--ELSE: else [0/1] {23} - ¦ °--expr: [0/0] {24} - ¦ ¦--'{': { [0/0] {25} - ¦ ¦--expr: [0/0] {27} - ¦ ¦ °--NUM_CONST: 3 [0/0] {26} - ¦ °--'}': } [0/0] {28} - ¦--COMMENT: # rem [2/0] {29} - ¦--expr: [1/0] {30} - ¦ ¦--expr: [0/0] {32} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {31} - ¦ ¦--'(': ( [0/0] {33} - ¦ ¦--expr: [0/0] {35} - ¦ ¦ °--STR_CONST: "x" [0/0] {34} - ¦ ¦--',': , [0/10] {36} - ¦ ¦--expr: [1/0] {37} - ¦ ¦ ¦--'{': { [0/12] {38} - ¦ ¦ ¦--expr: [1/10] {39} - ¦ ¦ ¦ ¦--expr: [0/0] {41} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: my_te [0/0] {40} - ¦ ¦ ¦ ¦--'(': ( [0/0] {42} - ¦ ¦ ¦ ¦--expr: [0/0] {44} - ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {43} - ¦ ¦ ¦ °--')': ) [0/0] {45} - ¦ ¦ °--'}': } [1/0] {46} - ¦ °--')': ) [0/0] {47} - ¦--COMMENT: # do [3/0] {48} - ¦--expr: [1/0] {48.9} - ¦ ¦--expr: [0/1] {50} - ¦ ¦ °--SYMBOL: a [0/0] {49} - ¦ ¦--EQ_ASSIGN: = [0/1] {51} - ¦ °--expr: [0/0] {53} - ¦ °--NUM_CONST: 3 [0/0] {52} - ¦--expr: [1/0] {54} - ¦ ¦--expr: [0/0] {56} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {55} - ¦ ¦--'(': ( [0/0] {57} - ¦ ¦--SYMBOL_SUB: a [0/1] {58} - ¦ ¦--EQ_SUB: = [0/1] {59} - ¦ ¦--expr: [0/0] {61} - ¦ ¦ °--NUM_CONST: 3 [0/0] {60} - ¦ °--')': ) [0/0] {62} - ¦--COMMENT: # do [2/0] {63} - ¦--expr: [1/0] {64} - ¦ ¦--expr: [0/1] {66} - ¦ ¦ °--SYMBOL: a [0/0] {65} - ¦ ¦--LEFT_ASSIGN: <- [0/1] {67} - ¦ °--expr: [0/0] {68} - ¦ ¦--FUNCTION: funct [0/0] {69} - ¦ ¦--'(': ( [0/0] {70} - ¦ ¦--SYMBOL_FORMALS: x [0/0] {71} - ¦ ¦--')': ) [0/1] {72} - ¦ °--expr: [0/0] {73} - ¦ ¦--expr: [0/1] {75} - ¦ ¦ °--SYMBOL: x [0/0] {74} - ¦ ¦--'+': + [0/1] {76} - ¦ °--expr: [0/0] {78} - ¦ °--NUM_CONST: 1 [0/0] {77} - ¦--';': ; [0/0] {79} - ¦--expr: [0/0] {81} - ¦ °--SYMBOL: b [0/0] {80} - ¦--';': ; [0/0] {82} - ¦--expr: [0/0] {84} - ¦ °--SYMBOL: c [0/0] {83} - ¦--COMMENT: # don [2/0] {85} - °--expr: [1/0] {86} - ¦--expr: [0/1] {89} - ¦ °--SYMBOL: a [0/0] {88} - ¦--SPECIAL-PIPE: %>% [0/2] {90} - ¦--expr: [1/1] {92} - ¦ °--SYMBOL: b [0/0] {91} - ¦--SPECIAL-PIPE: %>% [0/2] {93} - °--expr: [1/0] {95} - °--SYMBOL: c [0/0] {94} diff --git a/tests/testthat/scope_argument/scope_spaces-in_tree b/tests/testthat/scope_argument/scope_spaces-in_tree deleted file mode 100644 index 346d236fa..000000000 --- a/tests/testthat/scope_argument/scope_spaces-in_tree +++ /dev/null @@ -1,24 +0,0 @@ -ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/0] {3} - ¦ °--SYMBOL: a [0/0] {2} - ¦--LEFT_ASSIGN: <- [0/0] {4} - °--expr: [0/0] {5} - ¦--FUNCTION: funct [0/0] {6} - ¦--'(': ( [0/0] {7} - ¦--')': ) [0/0] {8} - °--expr: [0/0] {9} - ¦--'{': { [0/20] {10} - ¦--expr: [1/0] {11} - ¦ ¦--expr: [0/0] {13} - ¦ ¦ °--NUM_CONST: 1 [0/0] {12} - ¦ ¦--'+': + [0/0] {14} - ¦ °--expr: [0/0] {16} - ¦ °--NUM_CONST: 1 [0/0] {15} - ¦--expr: [1/4] {16.9} - ¦ ¦--expr: [0/0] {18} - ¦ ¦ °--SYMBOL: d [0/0] {17} - ¦ ¦--EQ_ASSIGN: = [0/0] {19} - ¦ °--expr: [0/0] {21} - ¦ °--NUM_CONST: 3 [0/0] {20} - °--'}': } [1/0] {22} diff --git a/tests/testthat/scope_argument/scope_tokens-in_tree b/tests/testthat/scope_argument/scope_tokens-in_tree deleted file mode 100644 index ec702355a..000000000 --- a/tests/testthat/scope_argument/scope_tokens-in_tree +++ /dev/null @@ -1,95 +0,0 @@ -ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--COMMENT: # add [0/0] {1} - ¦--expr: [1/0] {2} - ¦ ¦--IF: if [0/1] {3} - ¦ ¦--'(': ( [0/0] {4} - ¦ ¦--expr: [0/0] {6} - ¦ ¦ °--SYMBOL: x [0/0] {5} - ¦ ¦--')': ) [0/1] {7} - ¦ ¦--expr: [0/1] {8} - ¦ ¦ ¦--'{': { [0/0] {9} - ¦ ¦ ¦--expr: [0/0] {10} - ¦ ¦ ¦ ¦--expr: [0/1] {13} - ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {12} - ¦ ¦ ¦ ¦--'+': + [0/1] {14} - ¦ ¦ ¦ ¦--expr: [0/1] {16} - ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {15} - ¦ ¦ ¦ ¦--'+': + [0/1] {17} - ¦ ¦ ¦ °--expr: [0/0] {18} - ¦ ¦ ¦ ¦--'+': + [0/0] {19} - ¦ ¦ ¦ °--expr: [0/0] {21} - ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {20} - ¦ ¦ °--'}': } [0/0] {22} - ¦ ¦--ELSE: else [0/1] {23} - ¦ °--expr: [0/0] {24} - ¦ ¦--'{': { [0/0] {25} - ¦ ¦--expr: [0/0] {27} - ¦ ¦ °--NUM_CONST: 3 [0/0] {26} - ¦ °--'}': } [0/0] {28} - ¦--COMMENT: # rem [2/0] {29} - ¦--expr: [1/0] {30} - ¦ ¦--expr: [0/0] {32} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {31} - ¦ ¦--'(': ( [0/0] {33} - ¦ ¦--expr: [0/0] {35} - ¦ ¦ °--STR_CONST: "x" [0/0] {34} - ¦ ¦--',': , [0/10] {36} - ¦ ¦--expr: [1/0] {37} - ¦ ¦ ¦--'{': { [0/12] {38} - ¦ ¦ ¦--expr: [1/10] {39} - ¦ ¦ ¦ ¦--expr: [0/0] {41} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: my_te [0/0] {40} - ¦ ¦ ¦ ¦--'(': ( [0/0] {42} - ¦ ¦ ¦ ¦--expr: [0/0] {44} - ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {43} - ¦ ¦ ¦ °--')': ) [0/0] {45} - ¦ ¦ °--'}': } [1/0] {46} - ¦ °--')': ) [0/0] {47} - ¦--COMMENT: # do [3/0] {48} - ¦--expr: [1/0] {48.9} - ¦ ¦--expr: [0/1] {50} - ¦ ¦ °--SYMBOL: a [0/0] {49} - ¦ ¦--EQ_ASSIGN: = [0/1] {51} - ¦ °--expr: [0/0] {53} - ¦ °--NUM_CONST: 3 [0/0] {52} - ¦--expr: [1/0] {54} - ¦ ¦--expr: [0/0] {56} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {55} - ¦ ¦--'(': ( [0/0] {57} - ¦ ¦--SYMBOL_SUB: a [0/1] {58} - ¦ ¦--EQ_SUB: = [0/1] {59} - ¦ ¦--expr: [0/0] {61} - ¦ ¦ °--NUM_CONST: 3 [0/0] {60} - ¦ °--')': ) [0/0] {62} - ¦--COMMENT: # do [2/0] {63} - ¦--expr: [1/0] {64} - ¦ ¦--expr: [0/1] {66} - ¦ ¦ °--SYMBOL: a [0/0] {65} - ¦ ¦--LEFT_ASSIGN: <- [0/1] {67} - ¦ °--expr: [0/0] {68} - ¦ ¦--FUNCTION: funct [0/0] {69} - ¦ ¦--'(': ( [0/0] {70} - ¦ ¦--SYMBOL_FORMALS: x [0/0] {71} - ¦ ¦--')': ) [0/1] {72} - ¦ °--expr: [0/0] {73} - ¦ ¦--expr: [0/1] {75} - ¦ ¦ °--SYMBOL: x [0/0] {74} - ¦ ¦--'+': + [0/1] {76} - ¦ °--expr: [0/0] {78} - ¦ °--NUM_CONST: 1 [0/0] {77} - ¦--';': ; [0/0] {79} - ¦--expr: [0/0] {81} - ¦ °--SYMBOL: b [0/0] {80} - ¦--';': ; [0/0] {82} - ¦--expr: [0/0] {84} - ¦ °--SYMBOL: c [0/0] {83} - ¦--COMMENT: # don [2/0] {85} - °--expr: [1/0] {86} - ¦--expr: [0/1] {89} - ¦ °--SYMBOL: a [0/0] {88} - ¦--SPECIAL-PIPE: %>% [0/2] {90} - ¦--expr: [1/1] {92} - ¦ °--SYMBOL: b [0/0] {91} - ¦--SPECIAL-PIPE: %>% [0/2] {93} - °--expr: [1/0] {95} - °--SYMBOL: c [0/0] {94} diff --git a/tests/testthat/serialize_tests/correct-in.R b/tests/testthat/serialize_tests/correct-in.R index d70f6df36..01e79c32a 100644 --- a/tests/testthat/serialize_tests/correct-in.R +++ b/tests/testthat/serialize_tests/correct-in.R @@ -1,4 +1,3 @@ 1 2 3 - diff --git a/tests/testthat/serialize_tests/correct-in_tree b/tests/testthat/serialize_tests/correct-in_tree index aeaab11cf..be3886955 100644 --- a/tests/testthat/serialize_tests/correct-in_tree +++ b/tests/testthat/serialize_tests/correct-in_tree @@ -1,7 +1,7 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {2} + ¦--expr: 1 [0/0] {2} ¦ °--NUM_CONST: 1 [0/0] {1} - ¦--expr: [1/0] {4} + ¦--expr: 2 [1/0] {4} ¦ °--NUM_CONST: 2 [0/0] {3} - °--expr: [1/0] {6} + °--expr: 3 [1/0] {6} °--NUM_CONST: 3 [0/0] {5} diff --git a/tests/testthat/serialize_tests/correct-out.R b/tests/testthat/serialize_tests/correct-out.R index d70f6df36..01e79c32a 100644 --- a/tests/testthat/serialize_tests/correct-out.R +++ b/tests/testthat/serialize_tests/correct-out.R @@ -1,4 +1,3 @@ 1 2 3 - diff --git a/tests/testthat/serialize_tests/k2-another-in_file-out.R b/tests/testthat/serialize_tests/k2-another-in_file-out.R new file mode 100644 index 000000000..9f77f05f3 --- /dev/null +++ b/tests/testthat/serialize_tests/k2-another-in_file-out.R @@ -0,0 +1,3 @@ +call(1, + call2(call(3, 1, 2), + 4)) diff --git a/tests/testthat/serialize_tests/k3-in_tree b/tests/testthat/serialize_tests/k3-in_tree index cbb8dee2d..5f6cd8cf6 100644 --- a/tests/testthat/serialize_tests/k3-in_tree +++ b/tests/testthat/serialize_tests/k3-in_tree @@ -1,6 +1,6 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/0] {3} + °--expr: call( [0/0] {1} + ¦--expr: call [0/0] {3} ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} ¦--'(': ( [0/0] {4} °--')': ) [0/0] {5} diff --git a/tests/testthat/spacing/bang_bang_spacing-in_tree b/tests/testthat/spacing/bang_bang_spacing-in_tree index 9d5e9d334..d7f85e501 100644 --- a/tests/testthat/spacing/bang_bang_spacing-in_tree +++ b/tests/testthat/spacing/bang_bang_spacing-in_tree @@ -1,60 +1,60 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/0] {3} + ¦--expr: a(! ! [0/0] {1} + ¦ ¦--expr: a [0/0] {3} ¦ ¦ °--SYMBOL_FUNCTION_CALL: a [0/0] {2} ¦ ¦--'(': ( [0/0] {4} - ¦ ¦--expr: [0/0] {5} + ¦ ¦--expr: ! !!x [0/0] {5} ¦ ¦ ¦--'!': ! [0/1] {6} - ¦ ¦ °--expr: [0/0] {7} + ¦ ¦ °--expr: !!x [0/0] {7} ¦ ¦ ¦--'!': ! [0/0] {8} - ¦ ¦ °--expr: [0/0] {9} + ¦ ¦ °--expr: !x [0/0] {9} ¦ ¦ ¦--'!': ! [0/0] {10} - ¦ ¦ °--expr: [0/0] {12} + ¦ ¦ °--expr: x [0/0] {12} ¦ ¦ °--SYMBOL: x [0/0] {11} ¦ °--')': ) [0/0] {13} - ¦--expr: [1/0] {14} - ¦ ¦--expr: [0/0] {16} + ¦--expr: k(!!g [1/0] {14} + ¦ ¦--expr: k [0/0] {16} ¦ ¦ °--SYMBOL_FUNCTION_CALL: k [0/0] {15} ¦ ¦--'(': ( [0/0] {17} - ¦ ¦--expr: [0/0] {18} + ¦ ¦--expr: !!g [0/0] {18} ¦ ¦ ¦--'!': ! [0/0] {19} - ¦ ¦ °--expr: [0/0] {20} + ¦ ¦ °--expr: !g [0/0] {20} ¦ ¦ ¦--'!': ! [0/0] {21} - ¦ ¦ °--expr: [0/0] {23} + ¦ ¦ °--expr: g [0/0] {23} ¦ ¦ °--SYMBOL: g [0/0] {22} ¦ °--')': ) [0/0] {24} - ¦--expr: [1/0] {25} - ¦ ¦--expr: [0/0] {27} + ¦--expr: a(!!! [1/0] {25} + ¦ ¦--expr: a [0/0] {27} ¦ ¦ °--SYMBOL_FUNCTION_CALL: a [0/0] {26} ¦ ¦--'(': ( [0/0] {28} - ¦ ¦--expr: [0/0] {29} + ¦ ¦--expr: !!!x [0/0] {29} ¦ ¦ ¦--'!': ! [0/0] {30} - ¦ ¦ °--expr: [0/0] {31} + ¦ ¦ °--expr: !!x [0/0] {31} ¦ ¦ ¦--'!': ! [0/0] {32} - ¦ ¦ °--expr: [0/0] {33} + ¦ ¦ °--expr: !x [0/0] {33} ¦ ¦ ¦--'!': ! [0/0] {34} - ¦ ¦ °--expr: [0/0] {36} + ¦ ¦ °--expr: x [0/0] {36} ¦ ¦ °--SYMBOL: x [0/0] {35} ¦ °--')': ) [0/0] {37} - ¦--expr: [1/0] {38} - ¦ ¦--expr: [0/0] {40} + ¦--expr: a(!! [1/0] {38} + ¦ ¦--expr: a [0/0] {40} ¦ ¦ °--SYMBOL_FUNCTION_CALL: a [0/0] {39} ¦ ¦--'(': ( [0/0] {41} - ¦ ¦--expr: [0/0] {42} + ¦ ¦--expr: !! !x [0/0] {42} ¦ ¦ ¦--'!': ! [0/0] {43} - ¦ ¦ °--expr: [0/0] {44} + ¦ ¦ °--expr: ! !x [0/0] {44} ¦ ¦ ¦--'!': ! [0/1] {45} - ¦ ¦ °--expr: [0/0] {46} + ¦ ¦ °--expr: !x [0/0] {46} ¦ ¦ ¦--'!': ! [0/0] {47} - ¦ ¦ °--expr: [0/0] {49} + ¦ ¦ °--expr: x [0/0] {49} ¦ ¦ °--SYMBOL: x [0/0] {48} ¦ °--')': ) [0/0] {50} - °--expr: [1/0] {51} - ¦--expr: [0/0] {53} + °--expr: a(!b) [1/0] {51} + ¦--expr: a [0/0] {53} ¦ °--SYMBOL_FUNCTION_CALL: a [0/0] {52} ¦--'(': ( [0/0] {54} - ¦--expr: [0/0] {55} + ¦--expr: !b [0/0] {55} ¦ ¦--'!': ! [0/0] {56} - ¦ °--expr: [0/0] {58} + ¦ °--expr: b [0/0] {58} ¦ °--SYMBOL: b [0/0] {57} °--')': ) [0/0] {59} diff --git a/tests/testthat/spacing/bang_bang_spacing-out.R b/tests/testthat/spacing/bang_bang_spacing-out.R index 93afb2e4e..164ecb48a 100644 --- a/tests/testthat/spacing/bang_bang_spacing-out.R +++ b/tests/testthat/spacing/bang_bang_spacing-out.R @@ -1,5 +1,5 @@ -a(! !! x) -k(!! g) -a(!!! x) -a(!! ! x) +a(! !!x) +k(!!g) +a(!!!x) +a(!! !x) a(!b) diff --git a/tests/testthat/spacing/colons-in_tree b/tests/testthat/spacing/colons-in_tree index e4fda2f6f..74758f9da 100644 --- a/tests/testthat/spacing/colons-in_tree +++ b/tests/testthat/spacing/colons-in_tree @@ -1,45 +1,45 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/1] {3} + ¦--expr: 1 : [0/0] {1} + ¦ ¦--expr: 1 [0/1] {3} ¦ ¦ °--NUM_CONST: 1 [0/0] {2} ¦ ¦--':': : [0/2] {4} - ¦ °--expr: [0/0] {6} + ¦ °--expr: 4 [0/0] {6} ¦ °--NUM_CONST: 4 [0/0] {5} - ¦--expr: [2/0] {7} - ¦ ¦--expr: [0/0] {9} + ¦--expr: 1:4 [2/0] {7} + ¦ ¦--expr: 1 [0/0] {9} ¦ ¦ °--NUM_CONST: 1 [0/0] {8} ¦ ¦--':': : [0/0] {10} - ¦ °--expr: [0/0] {12} + ¦ °--expr: 4 [0/0] {12} ¦ °--NUM_CONST: 4 [0/0] {11} - ¦--expr: [2/0] {13} - ¦ ¦--expr: [0/0] {14} + ¦--expr: base [2/0] {13} + ¦ ¦--expr: base [0/0] {14} ¦ ¦ ¦--SYMBOL_PACKAGE: base [0/1] {15} ¦ ¦ ¦--NS_GET: :: [0/1] {16} ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {17} ¦ ¦--'(': ( [0/0] {18} ¦ °--')': ) [0/0] {19} - ¦--expr: [2/0] {20} - ¦ ¦--expr: [0/0] {21} + ¦--expr: base: [2/0] {20} + ¦ ¦--expr: base: [0/0] {21} ¦ ¦ ¦--SYMBOL_PACKAGE: base [0/0] {22} ¦ ¦ ¦--NS_GET: :: [0/0] {23} ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {24} ¦ ¦--'(': ( [0/0] {25} ¦ °--')': ) [0/0] {26} - ¦--expr: [2/0] {27} - ¦ ¦--expr: [0/0] {28} + ¦--expr: xyz:: [2/0] {27} + ¦ ¦--expr: xyz:: [0/0] {28} ¦ ¦ ¦--SYMBOL_PACKAGE: xyz [0/0] {29} ¦ ¦ ¦--NS_GET_INT: ::: [0/1] {30} ¦ ¦ °--SYMBOL_FUNCTION_CALL: xy [0/0] {31} ¦ ¦--'(': ( [0/0] {32} - ¦ ¦--expr: [0/0] {34} + ¦ ¦--expr: 3 [0/0] {34} ¦ ¦ °--NUM_CONST: 3 [0/0] {33} ¦ °--')': ) [0/0] {35} - °--expr: [2/0] {36} - ¦--expr: [0/0] {37} + °--expr: xyz:: [2/0] {36} + ¦--expr: xyz:: [0/0] {37} ¦ ¦--SYMBOL_PACKAGE: xyz [0/0] {38} ¦ ¦--NS_GET_INT: ::: [0/0] {39} ¦ °--SYMBOL_FUNCTION_CALL: xy [0/0] {40} ¦--'(': ( [0/0] {41} - ¦--expr: [0/0] {43} + ¦--expr: 3 [0/0] {43} ¦ °--NUM_CONST: 3 [0/0] {42} °--')': ) [0/0] {44} diff --git a/tests/testthat/spacing/comments-in_tree b/tests/testthat/spacing/comments-in_tree index 8aa8efeea..78c967bae 100644 --- a/tests/testthat/spacing/comments-in_tree +++ b/tests/testthat/spacing/comments-in_tree @@ -1,13 +1,13 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/1] {2} + ¦--expr: a [0/1] {2} ¦ °--SYMBOL: a [0/0] {1} ¦--COMMENT: # com [0/0] {3} - ¦--expr: [1/1] {5} + ¦--expr: b [1/1] {5} ¦ °--SYMBOL: b [0/0] {4} ¦--COMMENT: #comm [0/0] {6} - ¦--expr: [1/4] {8} + ¦--expr: c [1/4] {8} ¦ °--SYMBOL: c [0/0] {7} ¦--COMMENT: # com [0/0] {9} - ¦--expr: [1/1] {11} + ¦--expr: dejk [1/1] {11} ¦ °--SYMBOL: dejk [0/0] {10} °--COMMENT: #comm [0/0] {12} diff --git a/tests/testthat/spacing/round_curly-in_tree b/tests/testthat/spacing/round_curly-in_tree index 91be49e5b..a54f8736a 100644 --- a/tests/testthat/spacing/round_curly-in_tree +++ b/tests/testthat/spacing/round_curly-in_tree @@ -1,65 +1,71 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/1] {3} + ¦--expr: a <- [0/0] {1} + ¦ ¦--expr: a [0/1] {3} ¦ ¦ °--SYMBOL: a [0/0] {2} ¦ ¦--LEFT_ASSIGN: <- [0/1] {4} - ¦ °--expr: [0/0] {5} + ¦ °--expr: funct [0/0] {5} ¦ ¦--FUNCTION: funct [0/0] {6} ¦ ¦--'(': ( [0/0] {7} ¦ ¦--SYMBOL_FORMALS: x [0/0] {8} ¦ ¦--')': ) [0/0] {9} - ¦ °--expr: [0/0] {10} + ¦ °--expr: { +} [0/0] {10} ¦ ¦--'{': { [0/0] {11} ¦ °--'}': } [1/0] {12} - ¦--expr: [2/0] {13} + ¦--expr: if(a) [2/0] {13} ¦ ¦--IF: if [0/0] {14} ¦ ¦--'(': ( [0/0] {15} - ¦ ¦--expr: [0/0] {17} + ¦ ¦--expr: a [0/0] {17} ¦ ¦ °--SYMBOL: a [0/0] {16} ¦ ¦--')': ) [0/0] {18} - ¦ °--expr: [0/0] {19} + ¦ °--expr: { + 3 [0/0] {19} ¦ ¦--'{': { [0/2] {20} - ¦ ¦--expr: [1/0] {22} + ¦ ¦--expr: 3 [1/0] {22} ¦ ¦ °--NUM_CONST: 3 [0/0] {21} ¦ °--'}': } [1/0] {23} - ¦--expr: [2/0] {24} + ¦--expr: for(i [2/0] {24} ¦ ¦--FOR: for [0/0] {25} - ¦ ¦--forcond: [0/0] {26} + ¦ ¦--forcond: (i in [0/0] {26} ¦ ¦ ¦--'(': ( [0/0] {27} ¦ ¦ ¦--SYMBOL: i [0/1] {28} ¦ ¦ ¦--IN: in [0/1] {29} - ¦ ¦ ¦--expr: [0/0] {31} + ¦ ¦ ¦--expr: 10 [0/0] {31} ¦ ¦ ¦ °--NUM_CONST: 10 [0/0] {30} ¦ ¦ °--')': ) [0/0] {32} - ¦ °--expr: [0/0] {33} + ¦ °--expr: { + i [0/0] {33} ¦ ¦--'{': { [0/2] {34} - ¦ ¦--expr: [1/0] {36} + ¦ ¦--expr: i [1/0] {36} ¦ ¦ °--SYMBOL: i [0/0] {35} ¦ °--'}': } [1/0] {37} - °--expr: [2/0] {38} + °--expr: if(x) [2/0] {38} ¦--IF: if [0/0] {39} ¦--'(': ( [0/0] {40} - ¦--expr: [0/0] {42} + ¦--expr: x [0/0] {42} ¦ °--SYMBOL: x [0/0] {41} ¦--')': ) [0/0] {43} - ¦--expr: [0/0] {44} + ¦--expr: { + y [0/0] {44} ¦ ¦--'{': { [0/2] {45} - ¦ ¦--expr: [1/0] {47} + ¦ ¦--expr: y [1/0] {47} ¦ ¦ °--SYMBOL: y [0/0] {46} ¦ °--'}': } [1/0] {48} ¦--ELSE: else [0/1] {49} - °--expr: [0/0] {50} + °--expr: if(x) [0/0] {50} ¦--IF: if [0/0] {51} ¦--'(': ( [0/0] {52} - ¦--expr: [0/0] {54} + ¦--expr: x [0/0] {54} ¦ °--SYMBOL: x [0/0] {53} ¦--')': ) [0/0] {55} - ¦--expr: [0/1] {56} + ¦--expr: { + x [0/1] {56} ¦ ¦--'{': { [0/2] {57} - ¦ ¦--expr: [1/0] {59} + ¦ ¦--expr: x [1/0] {59} ¦ ¦ °--SYMBOL: x [0/0] {58} ¦ °--'}': } [1/0] {60} ¦--ELSE: else [0/0] {61} - °--expr: [0/0] {62} + °--expr: { +} [0/0] {62} ¦--'{': { [0/0] {63} °--'}': } [1/0] {64} diff --git a/tests/testthat/spacing/spacing-square-in.R b/tests/testthat/spacing/spacing-square-in.R new file mode 100644 index 000000000..9e0679d40 --- /dev/null +++ b/tests/testthat/spacing/spacing-square-in.R @@ -0,0 +1,18 @@ +a[[2]] +a[[2 ]] + +a[[ 2]] + +a[[ 2 ]] + + +a[2] +a[2 ] + +a[ 2] + +a[ 2 ] + + +a [[2]] +a [1] diff --git a/tests/testthat/spacing/spacing-square-in_tree b/tests/testthat/spacing/spacing-square-in_tree new file mode 100644 index 000000000..c712ee813 --- /dev/null +++ b/tests/testthat/spacing/spacing-square-in_tree @@ -0,0 +1,76 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: a[[2] [0/0] {1} + ¦ ¦--expr: a [0/0] {3} + ¦ ¦ °--SYMBOL: a [0/0] {2} + ¦ ¦--LBB: [[ [0/0] {4} + ¦ ¦--expr: 2 [0/0] {6} + ¦ ¦ °--NUM_CONST: 2 [0/0] {5} + ¦ ¦--']': ] [0/0] {7} + ¦ °--']': ] [0/0] {8} + ¦--expr: a[[2 [1/0] {9} + ¦ ¦--expr: a [0/0] {11} + ¦ ¦ °--SYMBOL: a [0/0] {10} + ¦ ¦--LBB: [[ [0/0] {12} + ¦ ¦--expr: 2 [0/1] {14} + ¦ ¦ °--NUM_CONST: 2 [0/0] {13} + ¦ ¦--']': ] [0/0] {15} + ¦ °--']': ] [0/0] {16} + ¦--expr: a[[ 2 [2/0] {17} + ¦ ¦--expr: a [0/0] {19} + ¦ ¦ °--SYMBOL: a [0/0] {18} + ¦ ¦--LBB: [[ [0/1] {20} + ¦ ¦--expr: 2 [0/0] {22} + ¦ ¦ °--NUM_CONST: 2 [0/0] {21} + ¦ ¦--']': ] [0/0] {23} + ¦ °--']': ] [0/0] {24} + ¦--expr: a[[ 2 [2/0] {25} + ¦ ¦--expr: a [0/0] {27} + ¦ ¦ °--SYMBOL: a [0/0] {26} + ¦ ¦--LBB: [[ [0/1] {28} + ¦ ¦--expr: 2 [0/1] {30} + ¦ ¦ °--NUM_CONST: 2 [0/0] {29} + ¦ ¦--']': ] [0/0] {31} + ¦ °--']': ] [0/0] {32} + ¦--expr: a[2] [3/0] {33} + ¦ ¦--expr: a [0/0] {35} + ¦ ¦ °--SYMBOL: a [0/0] {34} + ¦ ¦--'[': [ [0/0] {36} + ¦ ¦--expr: 2 [0/0] {38} + ¦ ¦ °--NUM_CONST: 2 [0/0] {37} + ¦ °--']': ] [0/0] {39} + ¦--expr: a[2 ] [1/0] {40} + ¦ ¦--expr: a [0/0] {42} + ¦ ¦ °--SYMBOL: a [0/0] {41} + ¦ ¦--'[': [ [0/0] {43} + ¦ ¦--expr: 2 [0/1] {45} + ¦ ¦ °--NUM_CONST: 2 [0/0] {44} + ¦ °--']': ] [0/0] {46} + ¦--expr: a[ 2] [2/0] {47} + ¦ ¦--expr: a [0/0] {49} + ¦ ¦ °--SYMBOL: a [0/0] {48} + ¦ ¦--'[': [ [0/1] {50} + ¦ ¦--expr: 2 [0/0] {52} + ¦ ¦ °--NUM_CONST: 2 [0/0] {51} + ¦ °--']': ] [0/0] {53} + ¦--expr: a[ 2 [2/0] {54} + ¦ ¦--expr: a [0/0] {56} + ¦ ¦ °--SYMBOL: a [0/0] {55} + ¦ ¦--'[': [ [0/1] {57} + ¦ ¦--expr: 2 [0/1] {59} + ¦ ¦ °--NUM_CONST: 2 [0/0] {58} + ¦ °--']': ] [0/0] {60} + ¦--expr: a [[2 [3/0] {61} + ¦ ¦--expr: a [0/1] {63} + ¦ ¦ °--SYMBOL: a [0/0] {62} + ¦ ¦--LBB: [[ [0/0] {64} + ¦ ¦--expr: 2 [0/0] {66} + ¦ ¦ °--NUM_CONST: 2 [0/0] {65} + ¦ ¦--']': ] [0/0] {67} + ¦ °--']': ] [0/0] {68} + °--expr: a [1] [1/0] {69} + ¦--expr: a [0/1] {71} + ¦ °--SYMBOL: a [0/0] {70} + ¦--'[': [ [0/0] {72} + ¦--expr: 1 [0/0] {74} + ¦ °--NUM_CONST: 1 [0/0] {73} + °--']': ] [0/0] {75} diff --git a/tests/testthat/spacing/spacing-square-out.R b/tests/testthat/spacing/spacing-square-out.R new file mode 100644 index 000000000..6fc7288f6 --- /dev/null +++ b/tests/testthat/spacing/spacing-square-out.R @@ -0,0 +1,18 @@ +a[[2]] +a[[2]] + +a[[2]] + +a[[2]] + + +a[2] +a[2] + +a[2] + +a[2] + + +a[[2]] +a[1] diff --git a/tests/testthat/spacing/spacing-tilde-in.R b/tests/testthat/spacing/spacing-tilde-in.R new file mode 100644 index 000000000..699899ad7 --- /dev/null +++ b/tests/testthat/spacing/spacing-tilde-in.R @@ -0,0 +1,9 @@ +a~b +~b +~b+ c +a + b ~c + +a ~b + ~b +~ b+c +a + b~ c diff --git a/tests/testthat/spacing/spacing-tilde-in_tree b/tests/testthat/spacing/spacing-tilde-in_tree new file mode 100644 index 000000000..b8ac310af --- /dev/null +++ b/tests/testthat/spacing/spacing-tilde-in_tree @@ -0,0 +1,57 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: a~b [0/0] {1} + ¦ ¦--expr: a [0/0] {3} + ¦ ¦ °--SYMBOL: a [0/0] {2} + ¦ ¦--'~': ~ [0/0] {4} + ¦ °--expr: b [0/0] {6} + ¦ °--SYMBOL: b [0/0] {5} + ¦--expr: ~b [1/0] {7} + ¦ ¦--'~': ~ [0/0] {8} + ¦ °--expr: b [0/0] {10} + ¦ °--SYMBOL: b [0/0] {9} + ¦--expr: ~b+ c [1/0] {11} + ¦ ¦--'~': ~ [0/0] {12} + ¦ °--expr: b+ c [0/0] {13} + ¦ ¦--expr: b [0/0] {15} + ¦ ¦ °--SYMBOL: b [0/0] {14} + ¦ ¦--'+': + [0/1] {16} + ¦ °--expr: c [0/0] {18} + ¦ °--SYMBOL: c [0/0] {17} + ¦--expr: a + b [1/0] {19} + ¦ ¦--expr: a + b [0/1] {20} + ¦ ¦ ¦--expr: a [0/1] {22} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {21} + ¦ ¦ ¦--'+': + [0/1] {23} + ¦ ¦ °--expr: b [0/0] {25} + ¦ ¦ °--SYMBOL: b [0/0] {24} + ¦ ¦--'~': ~ [0/0] {26} + ¦ °--expr: c [0/0] {28} + ¦ °--SYMBOL: c [0/0] {27} + ¦--expr: a ~b [2/1] {29} + ¦ ¦--expr: a [0/2] {31} + ¦ ¦ °--SYMBOL: a [0/0] {30} + ¦ ¦--'~': ~ [0/0] {32} + ¦ °--expr: b [0/0] {34} + ¦ °--SYMBOL: b [0/0] {33} + ¦--expr: ~b [1/0] {35} + ¦ ¦--'~': ~ [0/0] {36} + ¦ °--expr: b [0/0] {38} + ¦ °--SYMBOL: b [0/0] {37} + ¦--expr: ~ b+ [1/0] {39} + ¦ ¦--'~': ~ [0/2] {40} + ¦ °--expr: b+c [0/0] {41} + ¦ ¦--expr: b [0/0] {43} + ¦ ¦ °--SYMBOL: b [0/0] {42} + ¦ ¦--'+': + [0/0] {44} + ¦ °--expr: c [0/0] {46} + ¦ °--SYMBOL: c [0/0] {45} + °--expr: a + b [1/0] {47} + ¦--expr: a + b [0/0] {48} + ¦ ¦--expr: a [0/1] {50} + ¦ ¦ °--SYMBOL: a [0/0] {49} + ¦ ¦--'+': + [0/1] {51} + ¦ °--expr: b [0/0] {53} + ¦ °--SYMBOL: b [0/0] {52} + ¦--'~': ~ [0/2] {54} + °--expr: c [0/0] {56} + °--SYMBOL: c [0/0] {55} diff --git a/tests/testthat/spacing/spacing-tilde-out.R b/tests/testthat/spacing/spacing-tilde-out.R new file mode 100644 index 000000000..5f7d6e484 --- /dev/null +++ b/tests/testthat/spacing/spacing-tilde-out.R @@ -0,0 +1,9 @@ +a ~ b +~b +~ b + c +a + b ~ c + +a ~ b +~b +~ b + c +a + b ~ c diff --git a/tests/testthat/spacing/spacing_comma-in_tree b/tests/testthat/spacing/spacing_comma-in_tree index 2fcee1a47..1276d7301 100644 --- a/tests/testthat/spacing/spacing_comma-in_tree +++ b/tests/testthat/spacing/spacing_comma-in_tree @@ -1,20 +1,20 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/0] {3} + °--expr: c( [0/0] {1} + ¦--expr: c [0/0] {3} ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {2} ¦--'(': ( [0/4] {4} - ¦--expr: [0/0] {6} + ¦--expr: 1 [0/0] {6} ¦ °--NUM_CONST: 1 [0/0] {5} ¦--',': , [0/7] {7} - ¦--expr: [0/4] {9} + ¦--expr: 16 [0/4] {9} ¦ °--NUM_CONST: 16 [0/0] {8} ¦--',': , [0/1] {10} - ¦--expr: [0/1] {12} + ¦--expr: 333 [0/1] {12} ¦ °--NUM_CONST: 333 [0/0] {11} ¦--',': , [0/1] {13} - ¦--expr: [0/1] {15} + ¦--expr: 33 [0/1] {15} ¦ °--NUM_CONST: 33 [0/0] {14} ¦--',': , [0/2] {16} - ¦--expr: [0/0] {18} + ¦--expr: 1 [0/0] {18} ¦ °--NUM_CONST: 1 [0/0] {17} °--')': ) [0/0] {19} diff --git a/tests/testthat/spacing/spacing_comma2-in_tree b/tests/testthat/spacing/spacing_comma2-in_tree index f06d8a13d..1b7af56ef 100644 --- a/tests/testthat/spacing/spacing_comma2-in_tree +++ b/tests/testthat/spacing/spacing_comma2-in_tree @@ -1,23 +1,23 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/0] {3} + ¦--expr: call( [0/0] {1} + ¦ ¦--expr: call [0/0] {3} ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} ¦ ¦--'(': ( [0/0] {4} - ¦ ¦--expr: [0/1] {6} + ¦ ¦--expr: arg [0/1] {6} ¦ ¦ °--SYMBOL: arg [0/0] {5} ¦ ¦--',': , [0/0] {7} ¦ ¦--',': , [0/0] {8} - ¦ ¦--expr: [0/0] {10} + ¦ ¦--expr: more_ [0/0] {10} ¦ ¦ °--SYMBOL: more_ [0/0] {9} ¦ °--')': ) [0/0] {11} - °--expr: [1/0] {12} - ¦--expr: [0/0] {14} + °--expr: a[ , [1/0] {12} + ¦--expr: a [0/0] {14} ¦ °--SYMBOL: a [0/0] {13} ¦--'[': [ [0/1] {15} ¦--',': , [0/1] {16} ¦--',': , [0/1] {17} ¦--SYMBOL_SUB: drop [0/1] {18} ¦--EQ_SUB: = [0/1] {19} - ¦--expr: [0/0] {21} + ¦--expr: FALSE [0/0] {21} ¦ °--NUM_CONST: FALSE [0/0] {20} °--']': ] [0/0] {22} diff --git a/tests/testthat/spacing/spacing_function-in_tree b/tests/testthat/spacing/spacing_function-in_tree index 451713d0b..bebe6be90 100644 --- a/tests/testthat/spacing/spacing_function-in_tree +++ b/tests/testthat/spacing/spacing_function-in_tree @@ -1,5 +1,5 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} + °--expr: funct [0/0] {1} ¦--FUNCTION: funct [0/2] {2} ¦--'(': ( [0/0] {3} ¦--SYMBOL_FORMALS: x [0/0] {4} @@ -8,12 +8,13 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦--',': , [0/1] {7} ¦--SYMBOL_FORMALS: z [0/0] {8} ¦--')': ) [0/1] {9} - °--expr: [0/0] {10} + °--expr: { + 3 [0/0] {10} ¦--'{': { [0/2] {11} - ¦--expr: [1/0] {12} - ¦ ¦--expr: [0/1] {14} + ¦--expr: 3 + 1 [1/0] {12} + ¦ ¦--expr: 3 [0/1] {14} ¦ ¦ °--NUM_CONST: 3 [0/0] {13} ¦ ¦--'+': + [0/1] {15} - ¦ °--expr: [0/0] {17} + ¦ °--expr: 1 [0/0] {17} ¦ °--NUM_CONST: 1 [0/0] {16} °--'}': } [1/0] {18} diff --git a/tests/testthat/spacing/spacing_if-in_tree b/tests/testthat/spacing/spacing_if-in_tree index db33c3464..4e4325d2e 100644 --- a/tests/testthat/spacing/spacing_if-in_tree +++ b/tests/testthat/spacing/spacing_if-in_tree @@ -1,12 +1,12 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} + °--expr: if(TR [0/0] {1} ¦--IF: if [0/0] {2} ¦--'(': ( [0/0] {3} - ¦--expr: [0/0] {5} + ¦--expr: TRUE [0/0] {5} ¦ °--NUM_CONST: TRUE [0/0] {4} ¦--')': ) [0/1] {6} - ¦--expr: [0/1] {8} + ¦--expr: x [0/1] {8} ¦ °--SYMBOL: x [0/0] {7} ¦--ELSE: else [0/1] {9} - °--expr: [0/0] {11} + °--expr: y [0/0] {11} °--SYMBOL: y [0/0] {10} diff --git a/tests/testthat/spacing/spacing_in-in_tree b/tests/testthat/spacing/spacing_in-in_tree index a3d8f1cfc..380ead73e 100644 --- a/tests/testthat/spacing/spacing_in-in_tree +++ b/tests/testthat/spacing/spacing_in-in_tree @@ -1,12 +1,12 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} + °--expr: for ( [0/0] {1} ¦--FOR: for [0/1] {2} - ¦--forcond: [0/1] {3} + ¦--forcond: (i [0/1] {3} ¦ ¦--'(': ( [0/0] {4} ¦ ¦--SYMBOL: i [0/5] {5} ¦ ¦--IN: in [0/5] {6} - ¦ ¦--expr: [0/0] {8} + ¦ ¦--expr: 3 [0/0] {8} ¦ ¦ °--NUM_CONST: 3 [0/0] {7} ¦ °--')': ) [0/0] {9} - °--expr: [0/0] {11} + °--expr: 3 [0/0] {11} °--NUM_CONST: 3 [0/0] {10} diff --git a/tests/testthat/start_line/comment-in_tree b/tests/testthat/start_line/comment-in_tree index ba6b6b8f4..32d1dfd9d 100644 --- a/tests/testthat/start_line/comment-in_tree +++ b/tests/testthat/start_line/comment-in_tree @@ -1,16 +1,17 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦--COMMENT: # a c [0/0] {1} - °--expr: [1/0] {2} - ¦--expr: [0/1] {4} + °--expr: a <- [1/0] {2} + ¦--expr: a [0/1] {4} ¦ °--SYMBOL: a [0/0] {3} ¦--LEFT_ASSIGN: <- [0/1] {5} - °--expr: [0/0] {6} + °--expr: funct [0/0] {6} ¦--FUNCTION: funct [0/0] {7} ¦--'(': ( [0/0] {8} ¦--SYMBOL_FORMALS: x [0/0] {9} ¦--')': ) [0/1] {10} - °--expr: [0/0] {11} + °--expr: { + x [0/0] {11} ¦--'{': { [0/2] {12} - ¦--expr: [1/0] {14} + ¦--expr: x [1/0] {14} ¦ °--SYMBOL: x [0/0] {13} °--'}': } [1/0] {15} diff --git a/tests/testthat/start_line/comment-out.R b/tests/testthat/start_line/comment-out.R index 93ffe9388..abcdeb35e 100644 --- a/tests/testthat/start_line/comment-out.R +++ b/tests/testthat/start_line/comment-out.R @@ -1,8 +1,3 @@ - - - - - # a comment a <- function(x) { x diff --git a/tests/testthat/start_line/no_comment-in_tree b/tests/testthat/start_line/no_comment-in_tree index 05348a46a..d10990f55 100644 --- a/tests/testthat/start_line/no_comment-in_tree +++ b/tests/testthat/start_line/no_comment-in_tree @@ -1,15 +1,16 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/1] {3} + °--expr: a <- [0/0] {1} + ¦--expr: a [0/1] {3} ¦ °--SYMBOL: a [0/0] {2} ¦--LEFT_ASSIGN: <- [0/1] {4} - °--expr: [0/0] {5} + °--expr: funct [0/0] {5} ¦--FUNCTION: funct [0/0] {6} ¦--'(': ( [0/0] {7} ¦--SYMBOL_FORMALS: x [0/0] {8} ¦--')': ) [0/1] {9} - °--expr: [0/0] {10} + °--expr: { + x [0/0] {10} ¦--'{': { [0/2] {11} - ¦--expr: [1/0] {13} + ¦--expr: x [1/0] {13} ¦ °--SYMBOL: x [0/0] {12} °--'}': } [1/0] {14} diff --git a/tests/testthat/start_line/no_comment-out.R b/tests/testthat/start_line/no_comment-out.R index 940ca4cf2..b32a8442e 100644 --- a/tests/testthat/start_line/no_comment-out.R +++ b/tests/testthat/start_line/no_comment-out.R @@ -1,8 +1,3 @@ - - - - - a <- function(x) { x } diff --git a/tests/testthat/strict/eof-in_tree b/tests/testthat/strict/eof-in_tree index 6a99e26d0..a95a7929c 100644 --- a/tests/testthat/strict/eof-in_tree +++ b/tests/testthat/strict/eof-in_tree @@ -1,3 +1,3 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {2} + °--expr: blabl [0/0] {2} °--SYMBOL: blabl [0/0] {1} diff --git a/tests/testthat/strict/eol-in_tree b/tests/testthat/strict/eol-in_tree index 949dea22a..fd1fd103d 100644 --- a/tests/testthat/strict/eol-in_tree +++ b/tests/testthat/strict/eol-in_tree @@ -1,9 +1,9 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/0] {3} + ¦--expr: a() [0/0] {1} + ¦ ¦--expr: a [0/0] {3} ¦ ¦ °--SYMBOL_FUNCTION_CALL: a [0/0] {2} ¦ ¦--'(': ( [0/0] {4} ¦ °--')': ) [0/0] {5} - ¦--expr: [1/1] {7} + ¦--expr: b [1/1] {7} ¦ °--SYMBOL: b [0/0] {6} °--COMMENT: # com [0/0] {8} diff --git a/tests/testthat/strict/non_strict-in.R b/tests/testthat/strict/non_strict-in.R index 339774f86..bd4895f69 100644 --- a/tests/testthat/strict/non_strict-in.R +++ b/tests/testthat/strict/non_strict-in.R @@ -4,6 +4,23 @@ test <- function() { 'even if the string contains an escaped \' single quote' 'but not if it contains a "double quote' + "multi-line quotes + remain multi-line + " + + 'That also holds true + if + single quotes are used + .' + + 'strings with embedded\nline breaks are handled correctly' + + '\\' + '\\\'' + '\\\\' + '\\\\\'' + '\'\\\\\'' + # Comments are always preserved function_calls(get_spaces=around_equal) @@ -140,3 +157,9 @@ lm(a~b+c,data=NA) lm(a~.-1,data=NA) a~b:c a~b :c +a ~ b : c + +~ a +~gg +b~ k +call(1,~ qq) diff --git a/tests/testthat/strict/non_strict-in_tree b/tests/testthat/strict/non_strict-in_tree index 074d54208..2caa97471 100644 --- a/tests/testthat/strict/non_strict-in_tree +++ b/tests/testthat/strict/non_strict-in_tree @@ -1,700 +1,763 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/1] {3} + ¦--expr: test [0/0] {1} + ¦ ¦--expr: test [0/1] {3} ¦ ¦ °--SYMBOL: test [0/0] {2} ¦ ¦--LEFT_ASSIGN: <- [0/1] {4} - ¦ °--expr: [0/0] {5} + ¦ °--expr: funct [0/0] {5} ¦ ¦--FUNCTION: funct [0/0] {6} ¦ ¦--'(': ( [0/0] {7} ¦ ¦--')': ) [0/1] {8} - ¦ °--expr: [0/0] {9} + ¦ °--expr: { + " [0/0] {9} ¦ ¦--'{': { [0/2] {10} - ¦ ¦--expr: [1/2] {12} + ¦ ¦--expr: "Doub [1/2] {12} ¦ ¦ °--STR_CONST: "Doub [0/0] {11} - ¦ ¦--expr: [1/2] {14} + ¦ ¦--expr: 'Sing [1/2] {14} ¦ ¦ °--STR_CONST: 'Sing [0/0] {13} - ¦ ¦--expr: [1/2] {16} + ¦ ¦--expr: 'even [1/2] {16} ¦ ¦ °--STR_CONST: 'even [0/0] {15} - ¦ ¦--expr: [1/2] {18} + ¦ ¦--expr: 'but [1/2] {18} ¦ ¦ °--STR_CONST: 'but [0/0] {17} - ¦ ¦--COMMENT: # Com [2/2] {19} - ¦ ¦--expr: [2/2] {20} - ¦ ¦ ¦--expr: [0/0] {22} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {21} - ¦ ¦ ¦--'(': ( [0/0] {23} - ¦ ¦ ¦--SYMBOL_SUB: get_s [0/0] {24} - ¦ ¦ ¦--EQ_SUB: = [0/0] {25} - ¦ ¦ ¦--expr: [0/0] {27} - ¦ ¦ ¦ °--SYMBOL: aroun [0/0] {26} - ¦ ¦ °--')': ) [0/0] {28} - ¦ ¦--expr: [2/2] {29} - ¦ ¦ ¦--expr: [0/0] {31} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: no_sp [0/0] {30} - ¦ ¦ ¦--'(': ( [0/1] {32} - ¦ ¦ ¦--expr: [0/0] {33} - ¦ ¦ ¦ ¦--expr: [0/0] {35} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: after [0/0] {34} - ¦ ¦ ¦ ¦--'(': ( [0/1] {36} - ¦ ¦ ¦ °--')': ) [0/0] {37} - ¦ ¦ ¦--',': , [0/1] {38} - ¦ ¦ ¦--expr: [0/0] {39} - ¦ ¦ ¦ ¦--expr: [0/0] {41} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: paren [0/0] {40} - ¦ ¦ ¦ ¦--'(': ( [0/1] {42} - ¦ ¦ ¦ ¦--expr: [0/0] {43} - ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {44} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {45} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {47} - ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {46} - ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {48} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {50} - ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {49} - ¦ ¦ ¦ ¦ °--')': ) [0/0] {51} - ¦ ¦ ¦ °--')': ) [0/0] {52} - ¦ ¦ °--')': ) [0/0] {53} - ¦ ¦--expr: [1/2] {54} - ¦ ¦ ¦--expr: [0/1] {56} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: no_sp [0/0] {55} - ¦ ¦ ¦--'(': ( [0/0] {57} - ¦ ¦ ¦--expr: [0/0] {58} - ¦ ¦ ¦ ¦--expr: [0/1] {60} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: befor [0/0] {59} - ¦ ¦ ¦ ¦--'(': ( [0/0] {61} - ¦ ¦ ¦ °--')': ) [0/0] {62} - ¦ ¦ ¦--',': , [0/1] {63} - ¦ ¦ ¦--expr: [0/0] {64} - ¦ ¦ ¦ ¦--expr: [0/1] {66} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: paren [0/0] {65} - ¦ ¦ ¦ ¦--'(': ( [0/1] {67} - ¦ ¦ ¦ ¦--expr: [0/0] {68} - ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {69} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {70} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {72} - ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {71} - ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {73} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {75} - ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {74} - ¦ ¦ ¦ ¦ °--')': ) [0/0] {76} - ¦ ¦ ¦ °--')': ) [0/0] {77} - ¦ ¦ °--')': ) [0/0] {78} - ¦ ¦--expr: [1/2] {79} - ¦ ¦ ¦--expr: [0/0] {81} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: no_sp [0/0] {80} - ¦ ¦ ¦--'(': ( [0/0] {82} - ¦ ¦ ¦--expr: [0/0] {83} - ¦ ¦ ¦ ¦--expr: [0/0] {85} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: befor [0/0] {84} - ¦ ¦ ¦ ¦--'(': ( [0/0] {86} - ¦ ¦ ¦ ¦--expr: [0/1] {88} - ¦ ¦ ¦ ¦ °--SYMBOL: closi [0/0] {87} - ¦ ¦ ¦ °--')': ) [0/0] {89} - ¦ ¦ ¦--',': , [0/1] {90} - ¦ ¦ ¦--expr: [0/1] {91} - ¦ ¦ ¦ ¦--expr: [0/0] {93} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: paren [0/0] {92} - ¦ ¦ ¦ ¦--'(': ( [0/0] {94} - ¦ ¦ ¦ ¦--expr: [0/1] {95} - ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {96} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {97} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {99} - ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {98} - ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {100} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {102} - ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {101} - ¦ ¦ ¦ ¦ °--')': ) [0/0] {103} - ¦ ¦ ¦ °--')': ) [0/0] {104} - ¦ ¦ °--')': ) [0/0] {105} - ¦ ¦--expr: [1/2] {106} - ¦ ¦ ¦--expr: [0/0] {108} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: multi [0/0] {107} - ¦ ¦ ¦--'(': ( [0/4] {109} - ¦ ¦ ¦--expr: [1/0] {111} - ¦ ¦ ¦ °--SYMBOL: line [0/0] {110} - ¦ ¦ ¦--',': , [0/4] {112} - ¦ ¦ ¦--expr: [1/2] {114} - ¦ ¦ ¦ °--SYMBOL: call [0/0] {113} - ¦ ¦ °--')': ) [1/0] {115} - ¦ ¦--expr: [1/2] {116} - ¦ ¦ ¦--expr: [0/0] {118} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: multi [0/0] {117} - ¦ ¦ ¦--'(': ( [0/2] {119} - ¦ ¦ °--')': ) [1/0] {120} - ¦ ¦--expr: [2/2] {121} - ¦ ¦ ¦--expr: [0/0] {123} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: one_s [0/0] {122} - ¦ ¦ ¦--'(': ( [0/0] {124} - ¦ ¦ ¦--expr: [0/0] {126} - ¦ ¦ ¦ °--SYMBOL: after [0/0] {125} - ¦ ¦ ¦--',': , [0/0] {127} - ¦ ¦ ¦--expr: [0/0] {128} - ¦ ¦ ¦ ¦--expr: [0/0] {130} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: comma [0/0] {129} - ¦ ¦ ¦ ¦--'(': ( [0/0] {131} - ¦ ¦ ¦ ¦--expr: [0/0] {133} - ¦ ¦ ¦ ¦ °--STR_CONST: "in" [0/0] {132} - ¦ ¦ ¦ ¦--',': , [0/0] {134} - ¦ ¦ ¦ ¦--expr: [0/0] {136} - ¦ ¦ ¦ ¦ °--STR_CONST: "func [0/0] {135} - ¦ ¦ ¦ ¦--',': , [0/2] {137} - ¦ ¦ ¦ ¦--expr: [0/0] {139} - ¦ ¦ ¦ ¦ °--SYMBOL: args [0/0] {138} - ¦ ¦ ¦ °--')': ) [0/0] {140} - ¦ ¦ °--')': ) [0/0] {141} - ¦ ¦--expr: [2/2] {142} - ¦ ¦ ¦--'{': { [0/4] {143} - ¦ ¦ ¦--expr: [1/4] {145} - ¦ ¦ ¦ °--SYMBOL: brace [0/0] {144} - ¦ ¦ ¦--expr: [1/2] {147} - ¦ ¦ ¦ °--SYMBOL: expre [0/0] {146} - ¦ ¦ °--'}': } [1/0] {148} - ¦ ¦--expr: [2/2] {149} - ¦ ¦ ¦--expr: [0/0] {151} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {150} - ¦ ¦ ¦--'(': ( [0/0] {152} - ¦ ¦ ¦--expr: [0/0] {154} - ¦ ¦ ¦ °--STR_CONST: "unna [0/0] {153} - ¦ ¦ ¦--',': , [0/1] {155} - ¦ ¦ ¦--expr: [0/0] {156} - ¦ ¦ ¦ ¦--'{': { [0/4] {157} - ¦ ¦ ¦ ¦--expr: [1/4] {159} - ¦ ¦ ¦ ¦ °--STR_CONST: "func [0/0] {158} - ¦ ¦ ¦ ¦--expr: [1/2] {161} - ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {160} - ¦ ¦ ¦ °--'}': } [1/0] {162} - ¦ ¦ °--')': ) [0/0] {163} - ¦ ¦--expr: [2/2] {164} - ¦ ¦ ¦--expr: [0/0] {166} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {165} - ¦ ¦ ¦--'(': ( [0/0] {167} - ¦ ¦ ¦--SYMBOL_SUB: named [0/1] {168} - ¦ ¦ ¦--EQ_SUB: = [0/1] {169} - ¦ ¦ ¦--expr: [0/0] {170} - ¦ ¦ ¦ ¦--'{': { [0/4] {171} - ¦ ¦ ¦ ¦--expr: [1/4] {173} - ¦ ¦ ¦ ¦ °--STR_CONST: "func [0/0] {172} - ¦ ¦ ¦ ¦--expr: [1/2] {175} - ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {174} - ¦ ¦ ¦ °--'}': } [1/0] {176} - ¦ ¦ °--')': ) [0/0] {177} - ¦ ¦--expr: [2/2] {178} - ¦ ¦ ¦--expr: [0/0] {180} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {179} - ¦ ¦ ¦--'(': ( [0/0] {181} - ¦ ¦ ¦--expr: [0/0] {183} - ¦ ¦ ¦ °--STR_CONST: "unna [0/0] {182} - ¦ ¦ ¦--',': , [0/4] {184} - ¦ ¦ ¦--expr: [0/0] {185} - ¦ ¦ ¦ ¦--'{': { [0/2] {186} - ¦ ¦ ¦ °--'}': } [1/0] {187} - ¦ ¦ °--')': ) [0/0] {188} - ¦ ¦--expr: [2/2] {189} - ¦ ¦ ¦--expr: [0/0] {191} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {190} - ¦ ¦ ¦--'(': ( [0/0] {192} - ¦ ¦ ¦--expr: [0/0] {194} - ¦ ¦ ¦ °--STR_CONST: "unna [0/0] {193} - ¦ ¦ ¦--',': , [0/0] {195} - ¦ ¦ ¦--expr: [0/0] {196} - ¦ ¦ ¦ ¦--'{': { [0/2] {197} - ¦ ¦ ¦ °--'}': } [1/0] {198} - ¦ ¦ °--')': ) [0/0] {199} - ¦ ¦--expr: [2/2] {200} - ¦ ¦ ¦--expr: [0/0] {202} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {201} - ¦ ¦ ¦--'(': ( [0/0] {203} - ¦ ¦ ¦--SYMBOL_SUB: named [0/1] {204} - ¦ ¦ ¦--EQ_SUB: = [0/4] {205} - ¦ ¦ ¦--expr: [0/0] {206} - ¦ ¦ ¦ ¦--'{': { [0/2] {207} - ¦ ¦ ¦ °--'}': } [1/0] {208} - ¦ ¦ °--')': ) [0/0] {209} - ¦ ¦--expr: [2/2] {210} - ¦ ¦ ¦--expr: [0/0] {212} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {211} - ¦ ¦ ¦--'(': ( [0/0] {213} - ¦ ¦ ¦--SYMBOL_SUB: named [0/1] {214} - ¦ ¦ ¦--EQ_SUB: = [0/4] {215} - ¦ ¦ ¦--expr: [0/0] {216} - ¦ ¦ ¦ ¦--'{': { [0/2] {217} - ¦ ¦ ¦ °--'}': } [1/0] {218} - ¦ ¦ °--')': ) [0/0] {219} - ¦ ¦--expr: [2/2] {220} - ¦ ¦ ¦--expr: [0/0] {222} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {221} - ¦ ¦ ¦--'(': ( [0/2] {223} - ¦ ¦ ¦--expr: [0/0] {224} - ¦ ¦ ¦ ¦--'{': { [0/4] {225} - ¦ ¦ ¦ ¦--expr: [1/2] {227} - ¦ ¦ ¦ ¦ °--SYMBOL: empty [0/0] {226} - ¦ ¦ ¦ °--'}': } [1/0] {228} - ¦ ¦ °--')': ) [0/0] {229} - ¦ ¦--expr: [2/2] {230} - ¦ ¦ ¦--expr: [0/0] {232} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {231} - ¦ ¦ ¦--SPECIAL-OTHER: %/% [0/0] {233} - ¦ ¦ °--expr: [0/0] {235} - ¦ ¦ °--SYMBOL: b [0/0] {234} - ¦ ¦--expr: [1/2] {236} - ¦ ¦ ¦--expr: [0/0] {238} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {237} - ¦ ¦ ¦--SPECIAL-OTHER: %% [0/0] {239} - ¦ ¦ °--expr: [0/0] {241} - ¦ ¦ °--SYMBOL: b [0/0] {240} - ¦ ¦--expr: [1/2] {242} - ¦ ¦ ¦--expr: [0/0] {244} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {243} - ¦ ¦ ¦--AND2: && [0/0] {245} - ¦ ¦ °--expr: [0/0] {247} - ¦ ¦ °--SYMBOL: b [0/0] {246} - ¦ ¦--expr: [1/2] {248} - ¦ ¦ ¦--expr: [0/0] {250} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {249} - ¦ ¦ ¦--OR2: || [0/0] {251} - ¦ ¦ °--expr: [0/0] {253} - ¦ ¦ °--SYMBOL: b [0/0] {252} - ¦ ¦--expr: [1/2] {254} - ¦ ¦ ¦--expr: [0/0] {256} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {255} - ¦ ¦ ¦--EQ: == [0/0] {257} - ¦ ¦ °--expr: [0/0] {259} - ¦ ¦ °--SYMBOL: b [0/0] {258} - ¦ ¦--expr: [1/2] {260} - ¦ ¦ ¦--expr: [0/0] {262} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {261} - ¦ ¦ ¦--NE: != [0/0] {263} - ¦ ¦ °--expr: [0/0] {265} - ¦ ¦ °--SYMBOL: b [0/0] {264} - ¦ ¦--expr: [1/2] {266} - ¦ ¦ ¦--expr: [0/0] {268} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {267} - ¦ ¦ ¦--LE: <= [0/0] {269} - ¦ ¦ °--expr: [0/0] {271} - ¦ ¦ °--SYMBOL: b [0/0] {270} - ¦ ¦--expr: [1/2] {272} - ¦ ¦ ¦--expr: [0/0] {274} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {273} - ¦ ¦ ¦--GE: >= [0/0] {275} - ¦ ¦ °--expr: [0/0] {277} - ¦ ¦ °--SYMBOL: b [0/0] {276} - ¦ ¦--expr: [1/2] {278} - ¦ ¦ ¦--expr: [0/0] {280} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {279} - ¦ ¦ ¦--LEFT_ASSIGN: <- [0/0] {281} - ¦ ¦ °--expr: [0/0] {283} - ¦ ¦ °--SYMBOL: b [0/0] {282} - ¦ ¦--expr: [1/2] {284} - ¦ ¦ ¦--expr: [0/0] {286} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {285} - ¦ ¦ ¦--RIGHT_ASSIGN: -> [0/0] {287} - ¦ ¦ °--expr: [0/0] {289} - ¦ ¦ °--SYMBOL: b [0/0] {288} - ¦ ¦--expr: [1/2] {289.9} - ¦ ¦ ¦--expr: [0/0] {291} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {290} - ¦ ¦ ¦--EQ_ASSIGN: = [0/0] {292} - ¦ ¦ °--expr: [0/0] {294} - ¦ ¦ °--SYMBOL: b [0/0] {293} - ¦ ¦--expr: [1/2] {295} - ¦ ¦ ¦--expr: [0/0] {297} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {296} - ¦ ¦ ¦--LT: < [0/0] {298} - ¦ ¦ °--expr: [0/0] {300} - ¦ ¦ °--SYMBOL: b [0/0] {299} - ¦ ¦--expr: [1/2] {301} - ¦ ¦ ¦--expr: [0/0] {303} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {302} - ¦ ¦ ¦--GT: > [0/0] {304} - ¦ ¦ °--expr: [0/0] {306} - ¦ ¦ °--SYMBOL: b [0/0] {305} - ¦ ¦--expr: [1/2] {307} - ¦ ¦ ¦--expr: [0/0] {309} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {308} - ¦ ¦ ¦--'*': * [0/0] {310} - ¦ ¦ °--expr: [0/0] {312} - ¦ ¦ °--SYMBOL: b [0/0] {311} - ¦ ¦--expr: [1/2] {313} - ¦ ¦ ¦--expr: [0/0] {315} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {314} - ¦ ¦ ¦--'/': / [0/0] {316} - ¦ ¦ °--expr: [0/0] {318} - ¦ ¦ °--SYMBOL: b [0/0] {317} - ¦ ¦--expr: [1/2] {319} - ¦ ¦ ¦--expr: [0/0] {321} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {320} - ¦ ¦ ¦--'^': ^ [0/0] {322} - ¦ ¦ °--expr: [0/0] {324} - ¦ ¦ °--SYMBOL: b [0/0] {323} - ¦ ¦--expr: [1/2] {325} - ¦ ¦ ¦--expr: [0/0] {327} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {326} - ¦ ¦ ¦--AND: & [0/0] {328} - ¦ ¦ °--expr: [0/0] {330} - ¦ ¦ °--SYMBOL: b [0/0] {329} - ¦ ¦--expr: [1/2] {331} - ¦ ¦ ¦--expr: [0/0] {333} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {332} - ¦ ¦ ¦--OR: | [0/0] {334} - ¦ ¦ °--expr: [0/0] {336} - ¦ ¦ °--SYMBOL: b [0/0] {335} - ¦ ¦--expr: [1/2] {337} - ¦ ¦ ¦--expr: [0/0] {339} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {338} - ¦ ¦ ¦--LEFT_ASSIGN: := [0/0] {340} - ¦ ¦ °--expr: [0/0] {342} - ¦ ¦ °--SYMBOL: b [0/0] {341} - ¦ ¦--expr: [2/2] {343} - ¦ ¦ ¦--expr: [0/0] {345} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {344} - ¦ ¦ ¦--'+': + [0/0] {346} - ¦ ¦ °--expr: [0/0] {348} - ¦ ¦ °--SYMBOL: b [0/0] {347} - ¦ ¦--expr: [1/2] {349} - ¦ ¦ ¦--expr: [0/0] {351} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {350} - ¦ ¦ ¦--'-': - [0/0] {352} - ¦ ¦ °--expr: [0/0] {354} - ¦ ¦ °--SYMBOL: b [0/0] {353} - ¦ ¦--expr: [1/2] {355} - ¦ ¦ ¦--expr: [0/0] {357} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {356} - ¦ ¦ ¦--'+': + [0/0] {358} - ¦ ¦ °--expr: [0/0] {359} - ¦ ¦ ¦--'+': + [0/0] {360} - ¦ ¦ °--expr: [0/0] {362} - ¦ ¦ °--SYMBOL: b [0/0] {361} - ¦ ¦--expr: [1/2] {363} - ¦ ¦ ¦--expr: [0/0] {365} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {364} - ¦ ¦ ¦--'+': + [0/0] {366} - ¦ ¦ °--expr: [0/0] {367} - ¦ ¦ ¦--'-': - [0/0] {368} - ¦ ¦ °--expr: [0/0] {370} - ¦ ¦ °--SYMBOL: b [0/0] {369} - ¦ ¦--expr: [1/2] {371} - ¦ ¦ ¦--expr: [0/0] {373} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {372} - ¦ ¦ ¦--'+': + [0/0] {374} - ¦ ¦ °--expr: [0/0] {375} - ¦ ¦ ¦--'+': + [0/0] {376} - ¦ ¦ °--expr: [0/0] {378} - ¦ ¦ °--SYMBOL: b [0/0] {377} - ¦ ¦--expr: [1/2] {379} - ¦ ¦ ¦--expr: [0/0] {381} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {380} - ¦ ¦ ¦--'-': - [0/0] {382} - ¦ ¦ °--expr: [0/0] {383} - ¦ ¦ ¦--'+': + [0/0] {384} - ¦ ¦ °--expr: [0/0] {386} - ¦ ¦ °--SYMBOL: b [0/0] {385} - ¦ ¦--expr: [1/2] {387} - ¦ ¦ ¦--expr: [0/0] {389} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {388} - ¦ ¦ ¦--'-': - [0/0] {390} - ¦ ¦ °--expr: [0/0] {391} - ¦ ¦ ¦--'-': - [0/0] {392} - ¦ ¦ °--expr: [0/0] {394} - ¦ ¦ °--SYMBOL: b [0/0] {393} - ¦ ¦--expr: [1/2] {395} - ¦ ¦ ¦--expr: [0/0] {397} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {396} - ¦ ¦ ¦--'+': + [0/0] {398} - ¦ ¦ °--expr: [0/0] {399} - ¦ ¦ ¦--'-': - [0/0] {400} - ¦ ¦ °--expr: [0/0] {401} - ¦ ¦ ¦--'-': - [0/0] {402} - ¦ ¦ °--expr: [0/0] {404} - ¦ ¦ °--SYMBOL: b [0/0] {403} - ¦ ¦--expr: [1/2] {405} - ¦ ¦ ¦--expr: [0/0] {407} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {406} - ¦ ¦ ¦--'-': - [0/0] {408} - ¦ ¦ °--expr: [0/0] {409} - ¦ ¦ ¦--'-': - [0/0] {410} - ¦ ¦ °--expr: [0/0] {411} - ¦ ¦ ¦--'+': + [0/0] {412} - ¦ ¦ °--expr: [0/0] {414} - ¦ ¦ °--SYMBOL: b [0/0] {413} - ¦ ¦--expr: [1/2] {415} - ¦ ¦ ¦--expr: [0/0] {417} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {416} - ¦ ¦ ¦--'(': ( [0/1] {418} - ¦ ¦ ¦--expr: [0/0] {419} - ¦ ¦ ¦ ¦--'+': + [0/1] {420} - ¦ ¦ ¦ °--expr: [0/0] {422} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {421} - ¦ ¦ °--')': ) [0/0] {423} - ¦ ¦--expr: [1/2] {424} - ¦ ¦ ¦--expr: [0/0] {426} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {425} - ¦ ¦ ¦--'(': ( [0/1] {427} - ¦ ¦ ¦--expr: [0/0] {428} - ¦ ¦ ¦ ¦--'-': - [0/1] {429} - ¦ ¦ ¦ °--expr: [0/0] {431} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {430} - ¦ ¦ °--')': ) [0/0] {432} - ¦ ¦--expr: [1/2] {433} - ¦ ¦ ¦--expr: [0/0] {435} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {434} - ¦ ¦ ¦--'(': ( [0/0] {436} - ¦ ¦ ¦--expr: [0/0] {438} - ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {437} - ¦ ¦ ¦--',': , [0/1] {439} - ¦ ¦ ¦--expr: [0/0] {440} - ¦ ¦ ¦ ¦--'+': + [0/1] {441} - ¦ ¦ ¦ °--expr: [0/0] {443} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {442} - ¦ ¦ °--')': ) [0/0] {444} - ¦ ¦--expr: [1/2] {445} - ¦ ¦ ¦--expr: [0/0] {447} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {446} - ¦ ¦ ¦--'(': ( [0/0] {448} - ¦ ¦ ¦--expr: [0/0] {450} - ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {449} - ¦ ¦ ¦--',': , [0/1] {451} - ¦ ¦ ¦--expr: [0/0] {452} - ¦ ¦ ¦ ¦--'-': - [0/1] {453} - ¦ ¦ ¦ °--expr: [0/0] {455} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {454} - ¦ ¦ °--')': ) [0/0] {456} - ¦ ¦--COMMENT: # Onl [2/2] {457} - ¦ ¦--expr: [1/2] {458} - ¦ ¦ ¦--expr: [0/0] {460} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {459} - ¦ ¦ ¦--'(': ( [0/4] {461} - ¦ ¦ ¦--expr: [1/0] {463} - ¦ ¦ ¦ °--SYMBOL: prese [0/0] {462} - ¦ ¦ ¦--',': , [0/1] {464} - ¦ ¦ ¦--expr: [0/0] {466} - ¦ ¦ ¦ °--SYMBOL: dista [0/0] {465} - ¦ ¦ ¦--',': , [0/4] {467} - ¦ ¦ ¦--expr: [1/0] {469} - ¦ ¦ ¦ °--SYMBOL: after [0/0] {468} - ¦ ¦ ¦--',': , [0/5] {470} - ¦ ¦ ¦--expr: [0/0] {472} - ¦ ¦ ¦ °--SYMBOL: comma [0/0] {471} - ¦ ¦ ¦--',': , [0/4] {473} - ¦ ¦ ¦--expr: [1/0] {475} - ¦ ¦ ¦ °--SYMBOL: given [0/0] {474} - ¦ ¦ ¦--',': , [0/0] {476} - ¦ ¦ ¦--expr: [0/2] {478} - ¦ ¦ ¦ °--SYMBOL: one [0/0] {477} - ¦ ¦ °--')': ) [1/0] {479} - ¦ ¦--expr: [2/2] {480} - ¦ ¦ ¦--IF: if [0/0] {481} - ¦ ¦ ¦--'(': ( [0/0] {482} - ¦ ¦ ¦--expr: [0/0] {484} - ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {483} - ¦ ¦ ¦--')': ) [0/0] {485} - ¦ ¦ °--expr: [0/0] {486} - ¦ ¦ ¦--'{': { [0/4] {487} - ¦ ¦ ¦--expr: [1/2] {489} - ¦ ¦ ¦ °--NUM_CONST: FALSE [0/0] {488} - ¦ ¦ °--'}': } [1/0] {490} - ¦ ¦--expr: [2/2] {491} - ¦ ¦ ¦--IF: if [0/0] {492} - ¦ ¦ ¦--'(': ( [0/0] {493} - ¦ ¦ ¦--expr: [0/0] {495} - ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {494} - ¦ ¦ ¦--')': ) [0/0] {496} - ¦ ¦ ¦--expr: [0/0] {497} - ¦ ¦ ¦ ¦--'{': { [0/4] {498} - ¦ ¦ ¦ ¦--expr: [1/2] {500} - ¦ ¦ ¦ ¦ °--NUM_CONST: FALSE [0/0] {499} - ¦ ¦ ¦ °--'}': } [1/0] {501} - ¦ ¦ ¦--ELSE: else [0/0] {502} - ¦ ¦ °--expr: [0/0] {503} + ¦ ¦--expr: "mult [2/2] {20} + ¦ ¦ °--STR_CONST: "mult [0/0] {19} + ¦ ¦--expr: 'That [2/2] {22} + ¦ ¦ °--STR_CONST: 'That [0/0] {21} + ¦ ¦--expr: 'stri [2/2] {24} + ¦ ¦ °--STR_CONST: 'stri [0/0] {23} + ¦ ¦--expr: '\\' [2/2] {26} + ¦ ¦ °--STR_CONST: '\\' [0/0] {25} + ¦ ¦--expr: '\\\' [1/2] {28} + ¦ ¦ °--STR_CONST: '\\\' [0/0] {27} + ¦ ¦--expr: '\\\\ [1/2] {30} + ¦ ¦ °--STR_CONST: '\\\\ [0/0] {29} + ¦ ¦--expr: '\\\\ [1/2] {32} + ¦ ¦ °--STR_CONST: '\\\\ [0/0] {31} + ¦ ¦--expr: '\'\\ [1/2] {34} + ¦ ¦ °--STR_CONST: '\'\\ [0/0] {33} + ¦ ¦--COMMENT: # Com [2/2] {35} + ¦ ¦--expr: funct [2/2] {36} + ¦ ¦ ¦--expr: funct [0/0] {38} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {37} + ¦ ¦ ¦--'(': ( [0/0] {39} + ¦ ¦ ¦--SYMBOL_SUB: get_s [0/0] {40} + ¦ ¦ ¦--EQ_SUB: = [0/0] {41} + ¦ ¦ ¦--expr: aroun [0/0] {43} + ¦ ¦ ¦ °--SYMBOL: aroun [0/0] {42} + ¦ ¦ °--')': ) [0/0] {44} + ¦ ¦--expr: no_sp [2/2] {45} + ¦ ¦ ¦--expr: no_sp [0/0] {47} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: no_sp [0/0] {46} + ¦ ¦ ¦--'(': ( [0/1] {48} + ¦ ¦ ¦--expr: after [0/0] {49} + ¦ ¦ ¦ ¦--expr: after [0/0] {51} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: after [0/0] {50} + ¦ ¦ ¦ ¦--'(': ( [0/1] {52} + ¦ ¦ ¦ °--')': ) [0/0] {53} + ¦ ¦ ¦--',': , [0/1] {54} + ¦ ¦ ¦--expr: paren [0/0] {55} + ¦ ¦ ¦ ¦--expr: paren [0/0] {57} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: paren [0/0] {56} + ¦ ¦ ¦ ¦--'(': ( [0/1] {58} + ¦ ¦ ¦ ¦--expr: (1 + [0/0] {59} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {60} + ¦ ¦ ¦ ¦ ¦--expr: 1 + 2 [0/0] {61} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 1 [0/1] {63} + ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {62} + ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {64} + ¦ ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {66} + ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {65} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {67} + ¦ ¦ ¦ °--')': ) [0/0] {68} + ¦ ¦ °--')': ) [0/0] {69} + ¦ ¦--expr: no_sp [1/2] {70} + ¦ ¦ ¦--expr: no_sp [0/1] {72} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: no_sp [0/0] {71} + ¦ ¦ ¦--'(': ( [0/0] {73} + ¦ ¦ ¦--expr: befor [0/0] {74} + ¦ ¦ ¦ ¦--expr: befor [0/1] {76} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: befor [0/0] {75} + ¦ ¦ ¦ ¦--'(': ( [0/0] {77} + ¦ ¦ ¦ °--')': ) [0/0] {78} + ¦ ¦ ¦--',': , [0/1] {79} + ¦ ¦ ¦--expr: paren [0/0] {80} + ¦ ¦ ¦ ¦--expr: paren [0/1] {82} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: paren [0/0] {81} + ¦ ¦ ¦ ¦--'(': ( [0/1] {83} + ¦ ¦ ¦ ¦--expr: (1 + [0/0] {84} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {85} + ¦ ¦ ¦ ¦ ¦--expr: 1 + 2 [0/0] {86} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 1 [0/1] {88} + ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {87} + ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {89} + ¦ ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {91} + ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {90} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {92} + ¦ ¦ ¦ °--')': ) [0/0] {93} + ¦ ¦ °--')': ) [0/0] {94} + ¦ ¦--expr: no_sp [1/2] {95} + ¦ ¦ ¦--expr: no_sp [0/0] {97} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: no_sp [0/0] {96} + ¦ ¦ ¦--'(': ( [0/0] {98} + ¦ ¦ ¦--expr: befor [0/0] {99} + ¦ ¦ ¦ ¦--expr: befor [0/0] {101} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: befor [0/0] {100} + ¦ ¦ ¦ ¦--'(': ( [0/0] {102} + ¦ ¦ ¦ ¦--expr: closi [0/1] {104} + ¦ ¦ ¦ ¦ °--SYMBOL: closi [0/0] {103} + ¦ ¦ ¦ °--')': ) [0/0] {105} + ¦ ¦ ¦--',': , [0/1] {106} + ¦ ¦ ¦--expr: paren [0/1] {107} + ¦ ¦ ¦ ¦--expr: paren [0/0] {109} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: paren [0/0] {108} + ¦ ¦ ¦ ¦--'(': ( [0/0] {110} + ¦ ¦ ¦ ¦--expr: (1 + [0/1] {111} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {112} + ¦ ¦ ¦ ¦ ¦--expr: 1 + 2 [0/0] {113} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 1 [0/1] {115} + ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {114} + ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {116} + ¦ ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {118} + ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {117} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {119} + ¦ ¦ ¦ °--')': ) [0/0] {120} + ¦ ¦ °--')': ) [0/0] {121} + ¦ ¦--expr: multi [1/2] {122} + ¦ ¦ ¦--expr: multi [0/0] {124} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: multi [0/0] {123} + ¦ ¦ ¦--'(': ( [0/4] {125} + ¦ ¦ ¦--expr: line [1/0] {127} + ¦ ¦ ¦ °--SYMBOL: line [0/0] {126} + ¦ ¦ ¦--',': , [0/4] {128} + ¦ ¦ ¦--expr: call [1/2] {130} + ¦ ¦ ¦ °--SYMBOL: call [0/0] {129} + ¦ ¦ °--')': ) [1/0] {131} + ¦ ¦--expr: multi [1/2] {132} + ¦ ¦ ¦--expr: multi [0/0] {134} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: multi [0/0] {133} + ¦ ¦ ¦--'(': ( [0/2] {135} + ¦ ¦ °--')': ) [1/0] {136} + ¦ ¦--expr: one_s [2/2] {137} + ¦ ¦ ¦--expr: one_s [0/0] {139} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: one_s [0/0] {138} + ¦ ¦ ¦--'(': ( [0/0] {140} + ¦ ¦ ¦--expr: after [0/0] {142} + ¦ ¦ ¦ °--SYMBOL: after [0/0] {141} + ¦ ¦ ¦--',': , [0/0] {143} + ¦ ¦ ¦--expr: comma [0/0] {144} + ¦ ¦ ¦ ¦--expr: comma [0/0] {146} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: comma [0/0] {145} + ¦ ¦ ¦ ¦--'(': ( [0/0] {147} + ¦ ¦ ¦ ¦--expr: "in" [0/0] {149} + ¦ ¦ ¦ ¦ °--STR_CONST: "in" [0/0] {148} + ¦ ¦ ¦ ¦--',': , [0/0] {150} + ¦ ¦ ¦ ¦--expr: "func [0/0] {152} + ¦ ¦ ¦ ¦ °--STR_CONST: "func [0/0] {151} + ¦ ¦ ¦ ¦--',': , [0/2] {153} + ¦ ¦ ¦ ¦--expr: args [0/0] {155} + ¦ ¦ ¦ ¦ °--SYMBOL: args [0/0] {154} + ¦ ¦ ¦ °--')': ) [0/0] {156} + ¦ ¦ °--')': ) [0/0] {157} + ¦ ¦--expr: { + [2/2] {158} + ¦ ¦ ¦--'{': { [0/4] {159} + ¦ ¦ ¦--expr: brace [1/4] {161} + ¦ ¦ ¦ °--SYMBOL: brace [0/0] {160} + ¦ ¦ ¦--expr: expre [1/2] {163} + ¦ ¦ ¦ °--SYMBOL: expre [0/0] {162} + ¦ ¦ °--'}': } [1/0] {164} + ¦ ¦--expr: brace [2/2] {165} + ¦ ¦ ¦--expr: brace [0/0] {167} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {166} + ¦ ¦ ¦--'(': ( [0/0] {168} + ¦ ¦ ¦--expr: "unna [0/0] {170} + ¦ ¦ ¦ °--STR_CONST: "unna [0/0] {169} + ¦ ¦ ¦--',': , [0/1] {171} + ¦ ¦ ¦--expr: { + [0/0] {172} + ¦ ¦ ¦ ¦--'{': { [0/4] {173} + ¦ ¦ ¦ ¦--expr: "func [1/4] {175} + ¦ ¦ ¦ ¦ °--STR_CONST: "func [0/0] {174} + ¦ ¦ ¦ ¦--expr: call [1/2] {177} + ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {176} + ¦ ¦ ¦ °--'}': } [1/0] {178} + ¦ ¦ °--')': ) [0/0] {179} + ¦ ¦--expr: brace [2/2] {180} + ¦ ¦ ¦--expr: brace [0/0] {182} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {181} + ¦ ¦ ¦--'(': ( [0/0] {183} + ¦ ¦ ¦--SYMBOL_SUB: named [0/1] {184} + ¦ ¦ ¦--EQ_SUB: = [0/1] {185} + ¦ ¦ ¦--expr: { + [0/0] {186} + ¦ ¦ ¦ ¦--'{': { [0/4] {187} + ¦ ¦ ¦ ¦--expr: "func [1/4] {189} + ¦ ¦ ¦ ¦ °--STR_CONST: "func [0/0] {188} + ¦ ¦ ¦ ¦--expr: call [1/2] {191} + ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {190} + ¦ ¦ ¦ °--'}': } [1/0] {192} + ¦ ¦ °--')': ) [0/0] {193} + ¦ ¦--expr: brace [2/2] {194} + ¦ ¦ ¦--expr: brace [0/0] {196} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {195} + ¦ ¦ ¦--'(': ( [0/0] {197} + ¦ ¦ ¦--expr: "unna [0/0] {199} + ¦ ¦ ¦ °--STR_CONST: "unna [0/0] {198} + ¦ ¦ ¦--',': , [0/4] {200} + ¦ ¦ ¦--expr: { + } [0/0] {201} + ¦ ¦ ¦ ¦--'{': { [0/2] {202} + ¦ ¦ ¦ °--'}': } [1/0] {203} + ¦ ¦ °--')': ) [0/0] {204} + ¦ ¦--expr: brace [2/2] {205} + ¦ ¦ ¦--expr: brace [0/0] {207} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {206} + ¦ ¦ ¦--'(': ( [0/0] {208} + ¦ ¦ ¦--expr: "unna [0/0] {210} + ¦ ¦ ¦ °--STR_CONST: "unna [0/0] {209} + ¦ ¦ ¦--',': , [0/0] {211} + ¦ ¦ ¦--expr: { + } [0/0] {212} + ¦ ¦ ¦ ¦--'{': { [0/2] {213} + ¦ ¦ ¦ °--'}': } [1/0] {214} + ¦ ¦ °--')': ) [0/0] {215} + ¦ ¦--expr: brace [2/2] {216} + ¦ ¦ ¦--expr: brace [0/0] {218} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {217} + ¦ ¦ ¦--'(': ( [0/0] {219} + ¦ ¦ ¦--SYMBOL_SUB: named [0/1] {220} + ¦ ¦ ¦--EQ_SUB: = [0/4] {221} + ¦ ¦ ¦--expr: { + } [0/0] {222} + ¦ ¦ ¦ ¦--'{': { [0/2] {223} + ¦ ¦ ¦ °--'}': } [1/0] {224} + ¦ ¦ °--')': ) [0/0] {225} + ¦ ¦--expr: brace [2/2] {226} + ¦ ¦ ¦--expr: brace [0/0] {228} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {227} + ¦ ¦ ¦--'(': ( [0/0] {229} + ¦ ¦ ¦--SYMBOL_SUB: named [0/1] {230} + ¦ ¦ ¦--EQ_SUB: = [0/4] {231} + ¦ ¦ ¦--expr: { + } [0/0] {232} + ¦ ¦ ¦ ¦--'{': { [0/2] {233} + ¦ ¦ ¦ °--'}': } [1/0] {234} + ¦ ¦ °--')': ) [0/0] {235} + ¦ ¦--expr: brace [2/2] {236} + ¦ ¦ ¦--expr: brace [0/0] {238} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {237} + ¦ ¦ ¦--'(': ( [0/2] {239} + ¦ ¦ ¦--expr: { + [0/0] {240} + ¦ ¦ ¦ ¦--'{': { [0/4] {241} + ¦ ¦ ¦ ¦--expr: empty [1/2] {243} + ¦ ¦ ¦ ¦ °--SYMBOL: empty [0/0] {242} + ¦ ¦ ¦ °--'}': } [1/0] {244} + ¦ ¦ °--')': ) [0/0] {245} + ¦ ¦--expr: a%/%b [2/2] {246} + ¦ ¦ ¦--expr: a [0/0] {248} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {247} + ¦ ¦ ¦--SPECIAL-OTHER: %/% [0/0] {249} + ¦ ¦ °--expr: b [0/0] {251} + ¦ ¦ °--SYMBOL: b [0/0] {250} + ¦ ¦--expr: a%%b [1/2] {252} + ¦ ¦ ¦--expr: a [0/0] {254} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {253} + ¦ ¦ ¦--SPECIAL-OTHER: %% [0/0] {255} + ¦ ¦ °--expr: b [0/0] {257} + ¦ ¦ °--SYMBOL: b [0/0] {256} + ¦ ¦--expr: a&&b [1/2] {258} + ¦ ¦ ¦--expr: a [0/0] {260} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {259} + ¦ ¦ ¦--AND2: && [0/0] {261} + ¦ ¦ °--expr: b [0/0] {263} + ¦ ¦ °--SYMBOL: b [0/0] {262} + ¦ ¦--expr: a||b [1/2] {264} + ¦ ¦ ¦--expr: a [0/0] {266} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {265} + ¦ ¦ ¦--OR2: || [0/0] {267} + ¦ ¦ °--expr: b [0/0] {269} + ¦ ¦ °--SYMBOL: b [0/0] {268} + ¦ ¦--expr: a==b [1/2] {270} + ¦ ¦ ¦--expr: a [0/0] {272} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {271} + ¦ ¦ ¦--EQ: == [0/0] {273} + ¦ ¦ °--expr: b [0/0] {275} + ¦ ¦ °--SYMBOL: b [0/0] {274} + ¦ ¦--expr: a!=b [1/2] {276} + ¦ ¦ ¦--expr: a [0/0] {278} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {277} + ¦ ¦ ¦--NE: != [0/0] {279} + ¦ ¦ °--expr: b [0/0] {281} + ¦ ¦ °--SYMBOL: b [0/0] {280} + ¦ ¦--expr: a<=b [1/2] {282} + ¦ ¦ ¦--expr: a [0/0] {284} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {283} + ¦ ¦ ¦--LE: <= [0/0] {285} + ¦ ¦ °--expr: b [0/0] {287} + ¦ ¦ °--SYMBOL: b [0/0] {286} + ¦ ¦--expr: a>=b [1/2] {288} + ¦ ¦ ¦--expr: a [0/0] {290} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {289} + ¦ ¦ ¦--GE: >= [0/0] {291} + ¦ ¦ °--expr: b [0/0] {293} + ¦ ¦ °--SYMBOL: b [0/0] {292} + ¦ ¦--expr: a<-b [1/2] {294} + ¦ ¦ ¦--expr: a [0/0] {296} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {295} + ¦ ¦ ¦--LEFT_ASSIGN: <- [0/0] {297} + ¦ ¦ °--expr: b [0/0] {299} + ¦ ¦ °--SYMBOL: b [0/0] {298} + ¦ ¦--expr: a->b [1/2] {300} + ¦ ¦ ¦--expr: a [0/0] {302} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {301} + ¦ ¦ ¦--RIGHT_ASSIGN: -> [0/0] {303} + ¦ ¦ °--expr: b [0/0] {305} + ¦ ¦ °--SYMBOL: b [0/0] {304} + ¦ ¦--expr_or_assign_or_help: a=b [1/2] {306} + ¦ ¦ ¦--expr: a [0/0] {308} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {307} + ¦ ¦ ¦--EQ_ASSIGN: = [0/0] {309} + ¦ ¦ °--expr: b [0/0] {311} + ¦ ¦ °--SYMBOL: b [0/0] {310} + ¦ ¦--expr: ab [1/2] {318} + ¦ ¦ ¦--expr: a [0/0] {320} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {319} + ¦ ¦ ¦--GT: > [0/0] {321} + ¦ ¦ °--expr: b [0/0] {323} + ¦ ¦ °--SYMBOL: b [0/0] {322} + ¦ ¦--expr: a*b [1/2] {324} + ¦ ¦ ¦--expr: a [0/0] {326} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {325} + ¦ ¦ ¦--'*': * [0/0] {327} + ¦ ¦ °--expr: b [0/0] {329} + ¦ ¦ °--SYMBOL: b [0/0] {328} + ¦ ¦--expr: a/b [1/2] {330} + ¦ ¦ ¦--expr: a [0/0] {332} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {331} + ¦ ¦ ¦--'/': / [0/0] {333} + ¦ ¦ °--expr: b [0/0] {335} + ¦ ¦ °--SYMBOL: b [0/0] {334} + ¦ ¦--expr: a^b [1/2] {336} + ¦ ¦ ¦--expr: a [0/0] {338} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {337} + ¦ ¦ ¦--'^': ^ [0/0] {339} + ¦ ¦ °--expr: b [0/0] {341} + ¦ ¦ °--SYMBOL: b [0/0] {340} + ¦ ¦--expr: a&b [1/2] {342} + ¦ ¦ ¦--expr: a [0/0] {344} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {343} + ¦ ¦ ¦--AND: & [0/0] {345} + ¦ ¦ °--expr: b [0/0] {347} + ¦ ¦ °--SYMBOL: b [0/0] {346} + ¦ ¦--expr: a|b [1/2] {348} + ¦ ¦ ¦--expr: a [0/0] {350} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {349} + ¦ ¦ ¦--OR: | [0/0] {351} + ¦ ¦ °--expr: b [0/0] {353} + ¦ ¦ °--SYMBOL: b [0/0] {352} + ¦ ¦--expr: a:=b [1/2] {354} + ¦ ¦ ¦--expr: a [0/0] {356} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {355} + ¦ ¦ ¦--LEFT_ASSIGN: := [0/0] {357} + ¦ ¦ °--expr: b [0/0] {359} + ¦ ¦ °--SYMBOL: b [0/0] {358} + ¦ ¦--expr: a+b [2/2] {360} + ¦ ¦ ¦--expr: a [0/0] {362} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {361} + ¦ ¦ ¦--'+': + [0/0] {363} + ¦ ¦ °--expr: b [0/0] {365} + ¦ ¦ °--SYMBOL: b [0/0] {364} + ¦ ¦--expr: a-b [1/2] {366} + ¦ ¦ ¦--expr: a [0/0] {368} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {367} + ¦ ¦ ¦--'-': - [0/0] {369} + ¦ ¦ °--expr: b [0/0] {371} + ¦ ¦ °--SYMBOL: b [0/0] {370} + ¦ ¦--expr: a++b [1/2] {372} + ¦ ¦ ¦--expr: a [0/0] {374} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {373} + ¦ ¦ ¦--'+': + [0/0] {375} + ¦ ¦ °--expr: +b [0/0] {376} + ¦ ¦ ¦--'+': + [0/0] {377} + ¦ ¦ °--expr: b [0/0] {379} + ¦ ¦ °--SYMBOL: b [0/0] {378} + ¦ ¦--expr: a+-b [1/2] {380} + ¦ ¦ ¦--expr: a [0/0] {382} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {381} + ¦ ¦ ¦--'+': + [0/0] {383} + ¦ ¦ °--expr: -b [0/0] {384} + ¦ ¦ ¦--'-': - [0/0] {385} + ¦ ¦ °--expr: b [0/0] {387} + ¦ ¦ °--SYMBOL: b [0/0] {386} + ¦ ¦--expr: a++b [1/2] {388} + ¦ ¦ ¦--expr: a [0/0] {390} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {389} + ¦ ¦ ¦--'+': + [0/0] {391} + ¦ ¦ °--expr: +b [0/0] {392} + ¦ ¦ ¦--'+': + [0/0] {393} + ¦ ¦ °--expr: b [0/0] {395} + ¦ ¦ °--SYMBOL: b [0/0] {394} + ¦ ¦--expr: a-+b [1/2] {396} + ¦ ¦ ¦--expr: a [0/0] {398} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {397} + ¦ ¦ ¦--'-': - [0/0] {399} + ¦ ¦ °--expr: +b [0/0] {400} + ¦ ¦ ¦--'+': + [0/0] {401} + ¦ ¦ °--expr: b [0/0] {403} + ¦ ¦ °--SYMBOL: b [0/0] {402} + ¦ ¦--expr: a--b [1/2] {404} + ¦ ¦ ¦--expr: a [0/0] {406} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {405} + ¦ ¦ ¦--'-': - [0/0] {407} + ¦ ¦ °--expr: -b [0/0] {408} + ¦ ¦ ¦--'-': - [0/0] {409} + ¦ ¦ °--expr: b [0/0] {411} + ¦ ¦ °--SYMBOL: b [0/0] {410} + ¦ ¦--expr: a+--b [1/2] {412} + ¦ ¦ ¦--expr: a [0/0] {414} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {413} + ¦ ¦ ¦--'+': + [0/0] {415} + ¦ ¦ °--expr: --b [0/0] {416} + ¦ ¦ ¦--'-': - [0/0] {417} + ¦ ¦ °--expr: -b [0/0] {418} + ¦ ¦ ¦--'-': - [0/0] {419} + ¦ ¦ °--expr: b [0/0] {421} + ¦ ¦ °--SYMBOL: b [0/0] {420} + ¦ ¦--expr: a--+b [1/2] {422} + ¦ ¦ ¦--expr: a [0/0] {424} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {423} + ¦ ¦ ¦--'-': - [0/0] {425} + ¦ ¦ °--expr: -+b [0/0] {426} + ¦ ¦ ¦--'-': - [0/0] {427} + ¦ ¦ °--expr: +b [0/0] {428} + ¦ ¦ ¦--'+': + [0/0] {429} + ¦ ¦ °--expr: b [0/0] {431} + ¦ ¦ °--SYMBOL: b [0/0] {430} + ¦ ¦--expr: call( [1/2] {432} + ¦ ¦ ¦--expr: call [0/0] {434} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {433} + ¦ ¦ ¦--'(': ( [0/1] {435} + ¦ ¦ ¦--expr: + a [0/0] {436} + ¦ ¦ ¦ ¦--'+': + [0/1] {437} + ¦ ¦ ¦ °--expr: a [0/0] {439} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {438} + ¦ ¦ °--')': ) [0/0] {440} + ¦ ¦--expr: call( [1/2] {441} + ¦ ¦ ¦--expr: call [0/0] {443} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {442} + ¦ ¦ ¦--'(': ( [0/1] {444} + ¦ ¦ ¦--expr: - a [0/0] {445} + ¦ ¦ ¦ ¦--'-': - [0/1] {446} + ¦ ¦ ¦ °--expr: a [0/0] {448} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {447} + ¦ ¦ °--')': ) [0/0] {449} + ¦ ¦--expr: call( [1/2] {450} + ¦ ¦ ¦--expr: call [0/0] {452} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {451} + ¦ ¦ ¦--'(': ( [0/0] {453} + ¦ ¦ ¦--expr: 5 [0/0] {455} + ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {454} + ¦ ¦ ¦--',': , [0/1] {456} + ¦ ¦ ¦--expr: + a [0/0] {457} + ¦ ¦ ¦ ¦--'+': + [0/1] {458} + ¦ ¦ ¦ °--expr: a [0/0] {460} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {459} + ¦ ¦ °--')': ) [0/0] {461} + ¦ ¦--expr: call( [1/2] {462} + ¦ ¦ ¦--expr: call [0/0] {464} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {463} + ¦ ¦ ¦--'(': ( [0/0] {465} + ¦ ¦ ¦--expr: 5 [0/0] {467} + ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {466} + ¦ ¦ ¦--',': , [0/1] {468} + ¦ ¦ ¦--expr: - a [0/0] {469} + ¦ ¦ ¦ ¦--'-': - [0/1] {470} + ¦ ¦ ¦ °--expr: a [0/0] {472} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {471} + ¦ ¦ °--')': ) [0/0] {473} + ¦ ¦--COMMENT: # Onl [2/2] {474} + ¦ ¦--expr: call( [1/2] {475} + ¦ ¦ ¦--expr: call [0/0] {477} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {476} + ¦ ¦ ¦--'(': ( [0/4] {478} + ¦ ¦ ¦--expr: prese [1/0] {480} + ¦ ¦ ¦ °--SYMBOL: prese [0/0] {479} + ¦ ¦ ¦--',': , [0/1] {481} + ¦ ¦ ¦--expr: dista [0/0] {483} + ¦ ¦ ¦ °--SYMBOL: dista [0/0] {482} + ¦ ¦ ¦--',': , [0/4] {484} + ¦ ¦ ¦--expr: after [1/0] {486} + ¦ ¦ ¦ °--SYMBOL: after [0/0] {485} + ¦ ¦ ¦--',': , [0/5] {487} + ¦ ¦ ¦--expr: comma [0/0] {489} + ¦ ¦ ¦ °--SYMBOL: comma [0/0] {488} + ¦ ¦ ¦--',': , [0/4] {490} + ¦ ¦ ¦--expr: given [1/0] {492} + ¦ ¦ ¦ °--SYMBOL: given [0/0] {491} + ¦ ¦ ¦--',': , [0/0] {493} + ¦ ¦ ¦--expr: one [0/2] {495} + ¦ ¦ ¦ °--SYMBOL: one [0/0] {494} + ¦ ¦ °--')': ) [1/0] {496} + ¦ ¦--expr: if(TR [2/2] {497} + ¦ ¦ ¦--IF: if [0/0] {498} + ¦ ¦ ¦--'(': ( [0/0] {499} + ¦ ¦ ¦--expr: TRUE [0/0] {501} + ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {500} + ¦ ¦ ¦--')': ) [0/0] {502} + ¦ ¦ °--expr: { + [0/0] {503} ¦ ¦ ¦--'{': { [0/4] {504} - ¦ ¦ ¦--expr: [1/2] {506} - ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {505} + ¦ ¦ ¦--expr: FALSE [1/2] {506} + ¦ ¦ ¦ °--NUM_CONST: FALSE [0/0] {505} ¦ ¦ °--'}': } [1/0] {507} - ¦ ¦--expr: [2/2] {508} - ¦ ¦ ¦--WHILE: while [0/0] {509} + ¦ ¦--expr: if(TR [2/2] {508} + ¦ ¦ ¦--IF: if [0/0] {509} ¦ ¦ ¦--'(': ( [0/0] {510} - ¦ ¦ ¦--expr: [0/0] {512} + ¦ ¦ ¦--expr: TRUE [0/0] {512} ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {511} ¦ ¦ ¦--')': ) [0/0] {513} - ¦ ¦ °--expr: [0/0] {514} - ¦ ¦ ¦--'{': { [0/4] {515} - ¦ ¦ ¦--expr: [1/2] {517} - ¦ ¦ ¦ °--NUM_CONST: FALSE [0/0] {516} - ¦ ¦ °--'}': } [1/0] {518} - ¦ ¦--expr: [2/2] {519} - ¦ ¦ ¦--expr: [0/1] {521} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: singl [0/0] {520} - ¦ ¦ ¦--'(': ( [0/1] {522} - ¦ ¦ ¦--expr: [0/1] {524} - ¦ ¦ ¦ °--STR_CONST: "func [0/0] {523} - ¦ ¦ ¦--',': , [0/0] {525} - ¦ ¦ ¦--expr: [0/1] {527} - ¦ ¦ ¦ °--SYMBOL: call [0/0] {526} - ¦ ¦ °--')': ) [0/0] {528} - ¦ ¦--expr: [2/2] {529} - ¦ ¦ ¦--expr: [0/1] {531} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: multi [0/0] {530} - ¦ ¦ ¦--'(': ( [0/2] {532} - ¦ ¦ ¦--expr: [1/0] {534} - ¦ ¦ ¦ °--STR_CONST: "func [0/0] {533} - ¦ ¦ ¦--',': , [0/1] {535} - ¦ ¦ ¦--expr: [0/1] {537} - ¦ ¦ ¦ °--SYMBOL: call [0/0] {536} - ¦ ¦ °--')': ) [0/0] {538} - ¦ ¦--expr: [2/2] {539} - ¦ ¦ ¦--expr: [0/1] {541} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {540} - ¦ ¦ ¦--'(': ( [0/1] {542} - ¦ ¦ ¦--expr: [0/1] {543} - ¦ ¦ ¦ ¦--expr: [0/1] {545} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {544} - ¦ ¦ ¦ ¦--'(': ( [0/1] {546} - ¦ ¦ ¦ ¦--expr: [0/1] {548} - ¦ ¦ ¦ ¦ °--STR_CONST: "in" [0/0] {547} - ¦ ¦ ¦ ¦--',': , [0/0] {549} - ¦ ¦ ¦ ¦--expr: [0/1] {551} - ¦ ¦ ¦ ¦ °--SYMBOL: one [0/0] {550} - ¦ ¦ ¦ ¦--',': , [0/0] {552} - ¦ ¦ ¦ ¦--expr: [0/1] {554} - ¦ ¦ ¦ ¦ °--SYMBOL: line [0/0] {553} - ¦ ¦ ¦ °--')': ) [0/0] {555} - ¦ ¦ °--')': ) [0/0] {556} - ¦ ¦--expr: [2/2] {557} - ¦ ¦ ¦--expr: [0/1] {559} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {558} - ¦ ¦ ¦--'(': ( [0/1] {560} - ¦ ¦ ¦--expr: [0/1] {561} - ¦ ¦ ¦ ¦--expr: [0/1] {563} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {562} - ¦ ¦ ¦ ¦--'(': ( [0/2] {564} - ¦ ¦ ¦ ¦--expr: [1/0] {566} - ¦ ¦ ¦ ¦ °--STR_CONST: "in" [0/0] {565} - ¦ ¦ ¦ ¦--',': , [0/6] {567} - ¦ ¦ ¦ ¦--expr: [1/0] {569} - ¦ ¦ ¦ ¦ °--SYMBOL: multi [0/0] {568} - ¦ ¦ ¦ ¦--',': , [0/0] {570} - ¦ ¦ ¦ ¦--expr: [0/1] {572} - ¦ ¦ ¦ ¦ °--SYMBOL: lines [0/0] {571} - ¦ ¦ ¦ °--')': ) [0/0] {573} - ¦ ¦ °--')': ) [0/0] {574} - ¦ ¦--expr: [2/2] {575} - ¦ ¦ ¦--expr: [0/0] {577} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {576} - ¦ ¦ ¦--'(': ( [0/2] {578} - ¦ ¦ ¦--expr: [1/0] {579} - ¦ ¦ ¦ ¦--expr: [0/1] {581} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {580} - ¦ ¦ ¦ ¦--'(': ( [0/1] {582} - ¦ ¦ ¦ ¦--expr: [0/1] {584} - ¦ ¦ ¦ ¦ °--SYMBOL: with [0/0] {583} - ¦ ¦ ¦ °--')': ) [0/0] {585} - ¦ ¦ ¦--',': , [0/6] {586} - ¦ ¦ ¦--expr: [1/2] {588} - ¦ ¦ ¦ °--SYMBOL: many [0/0] {587} - ¦ ¦ ¦--',': , [1/5] {589} - ¦ ¦ ¦--expr: [0/2] {591} - ¦ ¦ ¦ °--SYMBOL: first [0/0] {590} - ¦ ¦ °--')': ) [0/0] {592} - ¦ ¦--expr: [2/2] {593} - ¦ ¦ ¦--expr: [0/0] {595} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {594} - ¦ ¦ ¦--'(': ( [0/4] {596} - ¦ ¦ ¦--expr: [1/0] {597} - ¦ ¦ ¦ ¦--expr: [0/1] {599} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {598} - ¦ ¦ ¦ ¦--'(': ( [0/1] {600} - ¦ ¦ ¦ ¦--expr: [0/1] {602} - ¦ ¦ ¦ ¦ °--SYMBOL: with [0/0] {601} - ¦ ¦ ¦ °--')': ) [0/0] {603} - ¦ ¦ ¦--',': , [0/2] {604} - ¦ ¦ ¦--COMMENT: # a c [0/4] {605} - ¦ ¦ ¦--expr: [1/1] {607} - ¦ ¦ ¦ °--SYMBOL: many [0/0] {606} - ¦ ¦ ¦--COMMENT: #more [0/4] {608} - ¦ ¦ ¦--',': , [1/5] {609} - ¦ ¦ ¦--expr: [0/2] {611} - ¦ ¦ ¦ °--SYMBOL: first [0/0] {610} - ¦ ¦ °--')': ) [0/0] {612} - ¦ ¦--expr: [2/0] {613} - ¦ ¦ ¦--expr: [0/0] {615} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: diffi [0/0] {614} - ¦ ¦ ¦--'(': ( [0/0] {616} - ¦ ¦ ¦--expr: [0/0] {617} - ¦ ¦ ¦ ¦--expr: [0/0] {619} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {618} - ¦ ¦ ¦ ¦--'(': ( [0/4] {620} - ¦ ¦ ¦ ¦--expr: [1/0] {622} - ¦ ¦ ¦ ¦ °--STR_CONST: "func [0/0] {621} - ¦ ¦ ¦ ¦--',': , [0/1] {623} - ¦ ¦ ¦ ¦--expr: [0/2] {625} - ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {624} - ¦ ¦ ¦ °--')': ) [1/0] {626} - ¦ ¦ ¦--',': , [0/4] {627} - ¦ ¦ ¦--expr: [1/0] {629} - ¦ ¦ ¦ °--SYMBOL: with [0/0] {628} - ¦ ¦ ¦--',': , [0/1] {630} - ¦ ¦ ¦--expr: [0/0] {632} - ¦ ¦ ¦ °--SYMBOL: more [0/0] {631} - ¦ ¦ ¦--',': , [0/1] {633} - ¦ ¦ ¦--expr: [0/2] {635} - ¦ ¦ ¦ °--SYMBOL: args [0/0] {634} - ¦ ¦ °--')': ) [1/0] {636} - ¦ °--'}': } [1/0] {637} - ¦--COMMENT: # for [3/0] {638} - ¦--expr: [1/0] {639} - ¦ ¦--expr: [0/0] {641} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: lm [0/0] {640} - ¦ ¦--'(': ( [0/0] {642} - ¦ ¦--expr: [0/0] {643} - ¦ ¦ ¦--expr: [0/0] {645} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {644} - ¦ ¦ ¦--'~': ~ [0/0] {646} - ¦ ¦ °--expr: [0/0] {647} - ¦ ¦ ¦--expr: [0/0] {649} - ¦ ¦ ¦ °--SYMBOL: b [0/0] {648} - ¦ ¦ ¦--'+': + [0/0] {650} - ¦ ¦ °--expr: [0/0] {652} - ¦ ¦ °--SYMBOL: c [0/0] {651} - ¦ ¦--',': , [0/0] {653} - ¦ ¦--SYMBOL_SUB: data [0/0] {654} - ¦ ¦--EQ_SUB: = [0/0] {655} - ¦ ¦--expr: [0/0] {657} - ¦ ¦ °--NUM_CONST: NA [0/0] {656} - ¦ °--')': ) [0/0] {658} - ¦--expr: [1/0] {659} - ¦ ¦--expr: [0/0] {661} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: lm [0/0] {660} - ¦ ¦--'(': ( [0/0] {662} - ¦ ¦--expr: [0/0] {663} - ¦ ¦ ¦--expr: [0/0] {665} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {664} - ¦ ¦ ¦--'~': ~ [0/0] {666} - ¦ ¦ °--expr: [0/0] {667} - ¦ ¦ ¦--expr: [0/0] {669} - ¦ ¦ ¦ °--SYMBOL: . [0/0] {668} - ¦ ¦ ¦--'-': - [0/0] {670} - ¦ ¦ °--expr: [0/0] {672} - ¦ ¦ °--NUM_CONST: 1 [0/0] {671} - ¦ ¦--',': , [0/0] {673} - ¦ ¦--SYMBOL_SUB: data [0/0] {674} - ¦ ¦--EQ_SUB: = [0/0] {675} - ¦ ¦--expr: [0/0] {677} - ¦ ¦ °--NUM_CONST: NA [0/0] {676} - ¦ °--')': ) [0/0] {678} - ¦--expr: [1/0] {679} - ¦ ¦--expr: [0/0] {681} - ¦ ¦ °--SYMBOL: a [0/0] {680} - ¦ ¦--'~': ~ [0/0] {682} - ¦ °--expr: [0/0] {683} - ¦ ¦--expr: [0/0] {685} - ¦ ¦ °--SYMBOL: b [0/0] {684} - ¦ ¦--':': : [0/0] {686} - ¦ °--expr: [0/0] {688} - ¦ °--SYMBOL: c [0/0] {687} - °--expr: [1/0] {689} - ¦--expr: [0/0] {691} - ¦ °--SYMBOL: a [0/0] {690} - ¦--'~': ~ [0/0] {692} - °--expr: [0/0] {693} - ¦--expr: [0/1] {695} - ¦ °--SYMBOL: b [0/0] {694} - ¦--':': : [0/0] {696} - °--expr: [0/0] {698} - °--SYMBOL: c [0/0] {697} + ¦ ¦ ¦--expr: { + [0/0] {514} + ¦ ¦ ¦ ¦--'{': { [0/4] {515} + ¦ ¦ ¦ ¦--expr: FALSE [1/2] {517} + ¦ ¦ ¦ ¦ °--NUM_CONST: FALSE [0/0] {516} + ¦ ¦ ¦ °--'}': } [1/0] {518} + ¦ ¦ ¦--ELSE: else [0/0] {519} + ¦ ¦ °--expr: { + [0/0] {520} + ¦ ¦ ¦--'{': { [0/4] {521} + ¦ ¦ ¦--expr: TRUE [1/2] {523} + ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {522} + ¦ ¦ °--'}': } [1/0] {524} + ¦ ¦--expr: while [2/2] {525} + ¦ ¦ ¦--WHILE: while [0/0] {526} + ¦ ¦ ¦--'(': ( [0/0] {527} + ¦ ¦ ¦--expr: TRUE [0/0] {529} + ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {528} + ¦ ¦ ¦--')': ) [0/0] {530} + ¦ ¦ °--expr: { + [0/0] {531} + ¦ ¦ ¦--'{': { [0/4] {532} + ¦ ¦ ¦--expr: FALSE [1/2] {534} + ¦ ¦ ¦ °--NUM_CONST: FALSE [0/0] {533} + ¦ ¦ °--'}': } [1/0] {535} + ¦ ¦--expr: singl [2/2] {536} + ¦ ¦ ¦--expr: singl [0/1] {538} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: singl [0/0] {537} + ¦ ¦ ¦--'(': ( [0/1] {539} + ¦ ¦ ¦--expr: "func [0/1] {541} + ¦ ¦ ¦ °--STR_CONST: "func [0/0] {540} + ¦ ¦ ¦--',': , [0/0] {542} + ¦ ¦ ¦--expr: call [0/1] {544} + ¦ ¦ ¦ °--SYMBOL: call [0/0] {543} + ¦ ¦ °--')': ) [0/0] {545} + ¦ ¦--expr: multi [2/2] {546} + ¦ ¦ ¦--expr: multi [0/1] {548} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: multi [0/0] {547} + ¦ ¦ ¦--'(': ( [0/2] {549} + ¦ ¦ ¦--expr: "func [1/0] {551} + ¦ ¦ ¦ °--STR_CONST: "func [0/0] {550} + ¦ ¦ ¦--',': , [0/1] {552} + ¦ ¦ ¦--expr: call [0/1] {554} + ¦ ¦ ¦ °--SYMBOL: call [0/0] {553} + ¦ ¦ °--')': ) [0/0] {555} + ¦ ¦--expr: neste [2/2] {556} + ¦ ¦ ¦--expr: neste [0/1] {558} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {557} + ¦ ¦ ¦--'(': ( [0/1] {559} + ¦ ¦ ¦--expr: funct [0/1] {560} + ¦ ¦ ¦ ¦--expr: funct [0/1] {562} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {561} + ¦ ¦ ¦ ¦--'(': ( [0/1] {563} + ¦ ¦ ¦ ¦--expr: "in" [0/1] {565} + ¦ ¦ ¦ ¦ °--STR_CONST: "in" [0/0] {564} + ¦ ¦ ¦ ¦--',': , [0/0] {566} + ¦ ¦ ¦ ¦--expr: one [0/1] {568} + ¦ ¦ ¦ ¦ °--SYMBOL: one [0/0] {567} + ¦ ¦ ¦ ¦--',': , [0/0] {569} + ¦ ¦ ¦ ¦--expr: line [0/1] {571} + ¦ ¦ ¦ ¦ °--SYMBOL: line [0/0] {570} + ¦ ¦ ¦ °--')': ) [0/0] {572} + ¦ ¦ °--')': ) [0/0] {573} + ¦ ¦--expr: neste [2/2] {574} + ¦ ¦ ¦--expr: neste [0/1] {576} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {575} + ¦ ¦ ¦--'(': ( [0/1] {577} + ¦ ¦ ¦--expr: funct [0/1] {578} + ¦ ¦ ¦ ¦--expr: funct [0/1] {580} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {579} + ¦ ¦ ¦ ¦--'(': ( [0/2] {581} + ¦ ¦ ¦ ¦--expr: "in" [1/0] {583} + ¦ ¦ ¦ ¦ °--STR_CONST: "in" [0/0] {582} + ¦ ¦ ¦ ¦--',': , [0/6] {584} + ¦ ¦ ¦ ¦--expr: multi [1/0] {586} + ¦ ¦ ¦ ¦ °--SYMBOL: multi [0/0] {585} + ¦ ¦ ¦ ¦--',': , [0/0] {587} + ¦ ¦ ¦ ¦--expr: lines [0/1] {589} + ¦ ¦ ¦ ¦ °--SYMBOL: lines [0/0] {588} + ¦ ¦ ¦ °--')': ) [0/0] {590} + ¦ ¦ °--')': ) [0/0] {591} + ¦ ¦--expr: neste [2/2] {592} + ¦ ¦ ¦--expr: neste [0/0] {594} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {593} + ¦ ¦ ¦--'(': ( [0/2] {595} + ¦ ¦ ¦--expr: funct [1/0] {596} + ¦ ¦ ¦ ¦--expr: funct [0/1] {598} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {597} + ¦ ¦ ¦ ¦--'(': ( [0/1] {599} + ¦ ¦ ¦ ¦--expr: with [0/1] {601} + ¦ ¦ ¦ ¦ °--SYMBOL: with [0/0] {600} + ¦ ¦ ¦ °--')': ) [0/0] {602} + ¦ ¦ ¦--',': , [0/6] {603} + ¦ ¦ ¦--expr: many [1/2] {605} + ¦ ¦ ¦ °--SYMBOL: many [0/0] {604} + ¦ ¦ ¦--',': , [1/5] {606} + ¦ ¦ ¦--expr: first [0/2] {608} + ¦ ¦ ¦ °--SYMBOL: first [0/0] {607} + ¦ ¦ °--')': ) [0/0] {609} + ¦ ¦--expr: neste [2/2] {610} + ¦ ¦ ¦--expr: neste [0/0] {612} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {611} + ¦ ¦ ¦--'(': ( [0/4] {613} + ¦ ¦ ¦--expr: funct [1/0] {614} + ¦ ¦ ¦ ¦--expr: funct [0/1] {616} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {615} + ¦ ¦ ¦ ¦--'(': ( [0/1] {617} + ¦ ¦ ¦ ¦--expr: with [0/1] {619} + ¦ ¦ ¦ ¦ °--SYMBOL: with [0/0] {618} + ¦ ¦ ¦ °--')': ) [0/0] {620} + ¦ ¦ ¦--',': , [0/2] {621} + ¦ ¦ ¦--COMMENT: # a c [0/4] {622} + ¦ ¦ ¦--expr: many [1/1] {624} + ¦ ¦ ¦ °--SYMBOL: many [0/0] {623} + ¦ ¦ ¦--COMMENT: #more [0/4] {625} + ¦ ¦ ¦--',': , [1/5] {626} + ¦ ¦ ¦--expr: first [0/2] {628} + ¦ ¦ ¦ °--SYMBOL: first [0/0] {627} + ¦ ¦ °--')': ) [0/0] {629} + ¦ ¦--expr: diffi [2/0] {630} + ¦ ¦ ¦--expr: diffi [0/0] {632} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: diffi [0/0] {631} + ¦ ¦ ¦--'(': ( [0/0] {633} + ¦ ¦ ¦--expr: neste [0/0] {634} + ¦ ¦ ¦ ¦--expr: neste [0/0] {636} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {635} + ¦ ¦ ¦ ¦--'(': ( [0/4] {637} + ¦ ¦ ¦ ¦--expr: "func [1/0] {639} + ¦ ¦ ¦ ¦ °--STR_CONST: "func [0/0] {638} + ¦ ¦ ¦ ¦--',': , [0/1] {640} + ¦ ¦ ¦ ¦--expr: call [0/2] {642} + ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {641} + ¦ ¦ ¦ °--')': ) [1/0] {643} + ¦ ¦ ¦--',': , [0/4] {644} + ¦ ¦ ¦--expr: with [1/0] {646} + ¦ ¦ ¦ °--SYMBOL: with [0/0] {645} + ¦ ¦ ¦--',': , [0/1] {647} + ¦ ¦ ¦--expr: more [0/0] {649} + ¦ ¦ ¦ °--SYMBOL: more [0/0] {648} + ¦ ¦ ¦--',': , [0/1] {650} + ¦ ¦ ¦--expr: args [0/2] {652} + ¦ ¦ ¦ °--SYMBOL: args [0/0] {651} + ¦ ¦ °--')': ) [1/0] {653} + ¦ °--'}': } [1/0] {654} + ¦--COMMENT: # for [3/0] {655} + ¦--expr: lm(a~ [1/0] {656} + ¦ ¦--expr: lm [0/0] {658} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: lm [0/0] {657} + ¦ ¦--'(': ( [0/0] {659} + ¦ ¦--expr: a~b+c [0/0] {660} + ¦ ¦ ¦--expr: a [0/0] {662} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {661} + ¦ ¦ ¦--'~': ~ [0/0] {663} + ¦ ¦ ¦--expr: b [0/0] {666} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {665} + ¦ ¦ ¦--'+': + [0/0] {667} + ¦ ¦ °--expr: c [0/0] {669} + ¦ ¦ °--SYMBOL: c [0/0] {668} + ¦ ¦--',': , [0/0] {670} + ¦ ¦--SYMBOL_SUB: data [0/0] {671} + ¦ ¦--EQ_SUB: = [0/0] {672} + ¦ ¦--expr: NA [0/0] {674} + ¦ ¦ °--NUM_CONST: NA [0/0] {673} + ¦ °--')': ) [0/0] {675} + ¦--expr: lm(a~ [1/0] {676} + ¦ ¦--expr: lm [0/0] {678} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: lm [0/0] {677} + ¦ ¦--'(': ( [0/0] {679} + ¦ ¦--expr: a~.-1 [0/0] {680} + ¦ ¦ ¦--expr: a [0/0] {682} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {681} + ¦ ¦ ¦--'~': ~ [0/0] {683} + ¦ ¦ ¦--expr: . [0/0] {686} + ¦ ¦ ¦ °--SYMBOL: . [0/0] {685} + ¦ ¦ ¦--'-': - [0/0] {687} + ¦ ¦ °--expr: 1 [0/0] {689} + ¦ ¦ °--NUM_CONST: 1 [0/0] {688} + ¦ ¦--',': , [0/0] {690} + ¦ ¦--SYMBOL_SUB: data [0/0] {691} + ¦ ¦--EQ_SUB: = [0/0] {692} + ¦ ¦--expr: NA [0/0] {694} + ¦ ¦ °--NUM_CONST: NA [0/0] {693} + ¦ °--')': ) [0/0] {695} + ¦--expr: a~b:c [1/0] {696} + ¦ ¦--expr: a [0/0] {698} + ¦ ¦ °--SYMBOL: a [0/0] {697} + ¦ ¦--'~': ~ [0/0] {699} + ¦ °--expr: b:c [0/0] {700} + ¦ ¦--expr: b [0/0] {702} + ¦ ¦ °--SYMBOL: b [0/0] {701} + ¦ ¦--':': : [0/0] {703} + ¦ °--expr: c [0/0] {705} + ¦ °--SYMBOL: c [0/0] {704} + ¦--expr: a~b : [1/0] {706} + ¦ ¦--expr: a [0/0] {708} + ¦ ¦ °--SYMBOL: a [0/0] {707} + ¦ ¦--'~': ~ [0/0] {709} + ¦ °--expr: b :c [0/0] {710} + ¦ ¦--expr: b [0/1] {712} + ¦ ¦ °--SYMBOL: b [0/0] {711} + ¦ ¦--':': : [0/0] {713} + ¦ °--expr: c [0/0] {715} + ¦ °--SYMBOL: c [0/0] {714} + ¦--expr: a ~ [1/0] {716} + ¦ ¦--expr: a [0/3] {718} + ¦ ¦ °--SYMBOL: a [0/0] {717} + ¦ ¦--'~': ~ [0/3] {719} + ¦ °--expr: b : [0/0] {720} + ¦ ¦--expr: b [0/2] {722} + ¦ ¦ °--SYMBOL: b [0/0] {721} + ¦ ¦--':': : [0/1] {723} + ¦ °--expr: c [0/0] {725} + ¦ °--SYMBOL: c [0/0] {724} + ¦--expr: ~ a [2/0] {726} + ¦ ¦--'~': ~ [0/3] {727} + ¦ °--expr: a [0/0] {729} + ¦ °--SYMBOL: a [0/0] {728} + ¦--expr: ~gg [1/0] {730} + ¦ ¦--'~': ~ [0/0] {731} + ¦ °--expr: gg [0/0] {733} + ¦ °--SYMBOL: gg [0/0] {732} + ¦--expr: b~ [1/0] {734} + ¦ ¦--expr: b [0/0] {736} + ¦ ¦ °--SYMBOL: b [0/0] {735} + ¦ ¦--'~': ~ [0/3] {737} + ¦ °--expr: k [0/0] {739} + ¦ °--SYMBOL: k [0/0] {738} + °--expr: call( [1/0] {740} + ¦--expr: call [0/0] {742} + ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {741} + ¦--'(': ( [0/0] {743} + ¦--expr: 1 [0/0] {745} + ¦ °--NUM_CONST: 1 [0/0] {744} + ¦--',': , [0/0] {746} + ¦--expr: ~ qq [0/0] {747} + ¦ ¦--'~': ~ [0/1] {748} + ¦ °--expr: qq [0/0] {750} + ¦ °--SYMBOL: qq [0/0] {749} + °--')': ) [0/0] {751} diff --git a/tests/testthat/strict/non_strict-out.R b/tests/testthat/strict/non_strict-out.R index 7e19f4756..08f773a73 100644 --- a/tests/testthat/strict/non_strict-out.R +++ b/tests/testthat/strict/non_strict-out.R @@ -4,6 +4,23 @@ test <- function() { "even if the string contains an escaped ' single quote" 'but not if it contains a "double quote' + "multi-line quotes + remain multi-line + " + + "That also holds true + if + single quotes are used + ." + + "strings with embedded\nline breaks are handled correctly" + + "\\" + "\\'" + "\\\\" + "\\\\'" + "'\\\\'" + # Comments are always preserved function_calls(get_spaces = around_equal) @@ -65,7 +82,7 @@ test <- function() { a > b a * b a / b - a ^ b + a^b a & b a | b a := b @@ -118,8 +135,8 @@ test <- function() { nested( function_call (with), - many - , first_level_args) + many, + first_level_args) nested( function_call (with), # a comment and @@ -135,7 +152,13 @@ test <- function() { # formula -lm(a~b + c, data = NA) -lm(a~. - 1, data = NA) -a~b:c -a~b:c +lm(a ~ b + c, data = NA) +lm(a ~ . - 1, data = NA) +a ~ b:c +a ~ b:c +a ~ b:c + +~a +~gg +b ~ k +call(1, ~qq) diff --git a/tests/testthat/strict/strict-in.R b/tests/testthat/strict/strict-in.R index 339774f86..ae85a96f5 100644 --- a/tests/testthat/strict/strict-in.R +++ b/tests/testthat/strict/strict-in.R @@ -139,4 +139,9 @@ test <- function() { lm(a~b+c,data=NA) lm(a~.-1,data=NA) a~b:c +a ~ b : c a~b :c +~ a +~gg +b~k +call(1, ~ qq) diff --git a/tests/testthat/strict/strict-in_tree b/tests/testthat/strict/strict-in_tree index 074d54208..94428955b 100644 --- a/tests/testthat/strict/strict-in_tree +++ b/tests/testthat/strict/strict-in_tree @@ -1,700 +1,747 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/1] {3} + ¦--expr: test [0/0] {1} + ¦ ¦--expr: test [0/1] {3} ¦ ¦ °--SYMBOL: test [0/0] {2} ¦ ¦--LEFT_ASSIGN: <- [0/1] {4} - ¦ °--expr: [0/0] {5} + ¦ °--expr: funct [0/0] {5} ¦ ¦--FUNCTION: funct [0/0] {6} ¦ ¦--'(': ( [0/0] {7} ¦ ¦--')': ) [0/1] {8} - ¦ °--expr: [0/0] {9} + ¦ °--expr: { + " [0/0] {9} ¦ ¦--'{': { [0/2] {10} - ¦ ¦--expr: [1/2] {12} + ¦ ¦--expr: "Doub [1/2] {12} ¦ ¦ °--STR_CONST: "Doub [0/0] {11} - ¦ ¦--expr: [1/2] {14} + ¦ ¦--expr: 'Sing [1/2] {14} ¦ ¦ °--STR_CONST: 'Sing [0/0] {13} - ¦ ¦--expr: [1/2] {16} + ¦ ¦--expr: 'even [1/2] {16} ¦ ¦ °--STR_CONST: 'even [0/0] {15} - ¦ ¦--expr: [1/2] {18} + ¦ ¦--expr: 'but [1/2] {18} ¦ ¦ °--STR_CONST: 'but [0/0] {17} ¦ ¦--COMMENT: # Com [2/2] {19} - ¦ ¦--expr: [2/2] {20} - ¦ ¦ ¦--expr: [0/0] {22} + ¦ ¦--expr: funct [2/2] {20} + ¦ ¦ ¦--expr: funct [0/0] {22} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {21} ¦ ¦ ¦--'(': ( [0/0] {23} ¦ ¦ ¦--SYMBOL_SUB: get_s [0/0] {24} ¦ ¦ ¦--EQ_SUB: = [0/0] {25} - ¦ ¦ ¦--expr: [0/0] {27} + ¦ ¦ ¦--expr: aroun [0/0] {27} ¦ ¦ ¦ °--SYMBOL: aroun [0/0] {26} ¦ ¦ °--')': ) [0/0] {28} - ¦ ¦--expr: [2/2] {29} - ¦ ¦ ¦--expr: [0/0] {31} + ¦ ¦--expr: no_sp [2/2] {29} + ¦ ¦ ¦--expr: no_sp [0/0] {31} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: no_sp [0/0] {30} ¦ ¦ ¦--'(': ( [0/1] {32} - ¦ ¦ ¦--expr: [0/0] {33} - ¦ ¦ ¦ ¦--expr: [0/0] {35} + ¦ ¦ ¦--expr: after [0/0] {33} + ¦ ¦ ¦ ¦--expr: after [0/0] {35} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: after [0/0] {34} ¦ ¦ ¦ ¦--'(': ( [0/1] {36} ¦ ¦ ¦ °--')': ) [0/0] {37} ¦ ¦ ¦--',': , [0/1] {38} - ¦ ¦ ¦--expr: [0/0] {39} - ¦ ¦ ¦ ¦--expr: [0/0] {41} + ¦ ¦ ¦--expr: paren [0/0] {39} + ¦ ¦ ¦ ¦--expr: paren [0/0] {41} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: paren [0/0] {40} ¦ ¦ ¦ ¦--'(': ( [0/1] {42} - ¦ ¦ ¦ ¦--expr: [0/0] {43} + ¦ ¦ ¦ ¦--expr: (1 + [0/0] {43} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {44} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {45} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {47} + ¦ ¦ ¦ ¦ ¦--expr: 1 + 2 [0/0] {45} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 1 [0/1] {47} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {46} ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {48} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {50} + ¦ ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {50} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {49} ¦ ¦ ¦ ¦ °--')': ) [0/0] {51} ¦ ¦ ¦ °--')': ) [0/0] {52} ¦ ¦ °--')': ) [0/0] {53} - ¦ ¦--expr: [1/2] {54} - ¦ ¦ ¦--expr: [0/1] {56} + ¦ ¦--expr: no_sp [1/2] {54} + ¦ ¦ ¦--expr: no_sp [0/1] {56} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: no_sp [0/0] {55} ¦ ¦ ¦--'(': ( [0/0] {57} - ¦ ¦ ¦--expr: [0/0] {58} - ¦ ¦ ¦ ¦--expr: [0/1] {60} + ¦ ¦ ¦--expr: befor [0/0] {58} + ¦ ¦ ¦ ¦--expr: befor [0/1] {60} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: befor [0/0] {59} ¦ ¦ ¦ ¦--'(': ( [0/0] {61} ¦ ¦ ¦ °--')': ) [0/0] {62} ¦ ¦ ¦--',': , [0/1] {63} - ¦ ¦ ¦--expr: [0/0] {64} - ¦ ¦ ¦ ¦--expr: [0/1] {66} + ¦ ¦ ¦--expr: paren [0/0] {64} + ¦ ¦ ¦ ¦--expr: paren [0/1] {66} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: paren [0/0] {65} ¦ ¦ ¦ ¦--'(': ( [0/1] {67} - ¦ ¦ ¦ ¦--expr: [0/0] {68} + ¦ ¦ ¦ ¦--expr: (1 + [0/0] {68} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {69} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {70} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {72} + ¦ ¦ ¦ ¦ ¦--expr: 1 + 2 [0/0] {70} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 1 [0/1] {72} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {71} ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {73} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {75} + ¦ ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {75} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {74} ¦ ¦ ¦ ¦ °--')': ) [0/0] {76} ¦ ¦ ¦ °--')': ) [0/0] {77} ¦ ¦ °--')': ) [0/0] {78} - ¦ ¦--expr: [1/2] {79} - ¦ ¦ ¦--expr: [0/0] {81} + ¦ ¦--expr: no_sp [1/2] {79} + ¦ ¦ ¦--expr: no_sp [0/0] {81} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: no_sp [0/0] {80} ¦ ¦ ¦--'(': ( [0/0] {82} - ¦ ¦ ¦--expr: [0/0] {83} - ¦ ¦ ¦ ¦--expr: [0/0] {85} + ¦ ¦ ¦--expr: befor [0/0] {83} + ¦ ¦ ¦ ¦--expr: befor [0/0] {85} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: befor [0/0] {84} ¦ ¦ ¦ ¦--'(': ( [0/0] {86} - ¦ ¦ ¦ ¦--expr: [0/1] {88} + ¦ ¦ ¦ ¦--expr: closi [0/1] {88} ¦ ¦ ¦ ¦ °--SYMBOL: closi [0/0] {87} ¦ ¦ ¦ °--')': ) [0/0] {89} ¦ ¦ ¦--',': , [0/1] {90} - ¦ ¦ ¦--expr: [0/1] {91} - ¦ ¦ ¦ ¦--expr: [0/0] {93} + ¦ ¦ ¦--expr: paren [0/1] {91} + ¦ ¦ ¦ ¦--expr: paren [0/0] {93} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: paren [0/0] {92} ¦ ¦ ¦ ¦--'(': ( [0/0] {94} - ¦ ¦ ¦ ¦--expr: [0/1] {95} + ¦ ¦ ¦ ¦--expr: (1 + [0/1] {95} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {96} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {97} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {99} + ¦ ¦ ¦ ¦ ¦--expr: 1 + 2 [0/0] {97} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 1 [0/1] {99} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {98} ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {100} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {102} + ¦ ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {102} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {101} ¦ ¦ ¦ ¦ °--')': ) [0/0] {103} ¦ ¦ ¦ °--')': ) [0/0] {104} ¦ ¦ °--')': ) [0/0] {105} - ¦ ¦--expr: [1/2] {106} - ¦ ¦ ¦--expr: [0/0] {108} + ¦ ¦--expr: multi [1/2] {106} + ¦ ¦ ¦--expr: multi [0/0] {108} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: multi [0/0] {107} ¦ ¦ ¦--'(': ( [0/4] {109} - ¦ ¦ ¦--expr: [1/0] {111} + ¦ ¦ ¦--expr: line [1/0] {111} ¦ ¦ ¦ °--SYMBOL: line [0/0] {110} ¦ ¦ ¦--',': , [0/4] {112} - ¦ ¦ ¦--expr: [1/2] {114} + ¦ ¦ ¦--expr: call [1/2] {114} ¦ ¦ ¦ °--SYMBOL: call [0/0] {113} ¦ ¦ °--')': ) [1/0] {115} - ¦ ¦--expr: [1/2] {116} - ¦ ¦ ¦--expr: [0/0] {118} + ¦ ¦--expr: multi [1/2] {116} + ¦ ¦ ¦--expr: multi [0/0] {118} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: multi [0/0] {117} ¦ ¦ ¦--'(': ( [0/2] {119} ¦ ¦ °--')': ) [1/0] {120} - ¦ ¦--expr: [2/2] {121} - ¦ ¦ ¦--expr: [0/0] {123} + ¦ ¦--expr: one_s [2/2] {121} + ¦ ¦ ¦--expr: one_s [0/0] {123} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: one_s [0/0] {122} ¦ ¦ ¦--'(': ( [0/0] {124} - ¦ ¦ ¦--expr: [0/0] {126} + ¦ ¦ ¦--expr: after [0/0] {126} ¦ ¦ ¦ °--SYMBOL: after [0/0] {125} ¦ ¦ ¦--',': , [0/0] {127} - ¦ ¦ ¦--expr: [0/0] {128} - ¦ ¦ ¦ ¦--expr: [0/0] {130} + ¦ ¦ ¦--expr: comma [0/0] {128} + ¦ ¦ ¦ ¦--expr: comma [0/0] {130} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: comma [0/0] {129} ¦ ¦ ¦ ¦--'(': ( [0/0] {131} - ¦ ¦ ¦ ¦--expr: [0/0] {133} + ¦ ¦ ¦ ¦--expr: "in" [0/0] {133} ¦ ¦ ¦ ¦ °--STR_CONST: "in" [0/0] {132} ¦ ¦ ¦ ¦--',': , [0/0] {134} - ¦ ¦ ¦ ¦--expr: [0/0] {136} + ¦ ¦ ¦ ¦--expr: "func [0/0] {136} ¦ ¦ ¦ ¦ °--STR_CONST: "func [0/0] {135} ¦ ¦ ¦ ¦--',': , [0/2] {137} - ¦ ¦ ¦ ¦--expr: [0/0] {139} + ¦ ¦ ¦ ¦--expr: args [0/0] {139} ¦ ¦ ¦ ¦ °--SYMBOL: args [0/0] {138} ¦ ¦ ¦ °--')': ) [0/0] {140} ¦ ¦ °--')': ) [0/0] {141} - ¦ ¦--expr: [2/2] {142} + ¦ ¦--expr: { + [2/2] {142} ¦ ¦ ¦--'{': { [0/4] {143} - ¦ ¦ ¦--expr: [1/4] {145} + ¦ ¦ ¦--expr: brace [1/4] {145} ¦ ¦ ¦ °--SYMBOL: brace [0/0] {144} - ¦ ¦ ¦--expr: [1/2] {147} + ¦ ¦ ¦--expr: expre [1/2] {147} ¦ ¦ ¦ °--SYMBOL: expre [0/0] {146} ¦ ¦ °--'}': } [1/0] {148} - ¦ ¦--expr: [2/2] {149} - ¦ ¦ ¦--expr: [0/0] {151} + ¦ ¦--expr: brace [2/2] {149} + ¦ ¦ ¦--expr: brace [0/0] {151} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {150} ¦ ¦ ¦--'(': ( [0/0] {152} - ¦ ¦ ¦--expr: [0/0] {154} + ¦ ¦ ¦--expr: "unna [0/0] {154} ¦ ¦ ¦ °--STR_CONST: "unna [0/0] {153} ¦ ¦ ¦--',': , [0/1] {155} - ¦ ¦ ¦--expr: [0/0] {156} + ¦ ¦ ¦--expr: { + [0/0] {156} ¦ ¦ ¦ ¦--'{': { [0/4] {157} - ¦ ¦ ¦ ¦--expr: [1/4] {159} + ¦ ¦ ¦ ¦--expr: "func [1/4] {159} ¦ ¦ ¦ ¦ °--STR_CONST: "func [0/0] {158} - ¦ ¦ ¦ ¦--expr: [1/2] {161} + ¦ ¦ ¦ ¦--expr: call [1/2] {161} ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {160} ¦ ¦ ¦ °--'}': } [1/0] {162} ¦ ¦ °--')': ) [0/0] {163} - ¦ ¦--expr: [2/2] {164} - ¦ ¦ ¦--expr: [0/0] {166} + ¦ ¦--expr: brace [2/2] {164} + ¦ ¦ ¦--expr: brace [0/0] {166} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {165} ¦ ¦ ¦--'(': ( [0/0] {167} ¦ ¦ ¦--SYMBOL_SUB: named [0/1] {168} ¦ ¦ ¦--EQ_SUB: = [0/1] {169} - ¦ ¦ ¦--expr: [0/0] {170} + ¦ ¦ ¦--expr: { + [0/0] {170} ¦ ¦ ¦ ¦--'{': { [0/4] {171} - ¦ ¦ ¦ ¦--expr: [1/4] {173} + ¦ ¦ ¦ ¦--expr: "func [1/4] {173} ¦ ¦ ¦ ¦ °--STR_CONST: "func [0/0] {172} - ¦ ¦ ¦ ¦--expr: [1/2] {175} + ¦ ¦ ¦ ¦--expr: call [1/2] {175} ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {174} ¦ ¦ ¦ °--'}': } [1/0] {176} ¦ ¦ °--')': ) [0/0] {177} - ¦ ¦--expr: [2/2] {178} - ¦ ¦ ¦--expr: [0/0] {180} + ¦ ¦--expr: brace [2/2] {178} + ¦ ¦ ¦--expr: brace [0/0] {180} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {179} ¦ ¦ ¦--'(': ( [0/0] {181} - ¦ ¦ ¦--expr: [0/0] {183} + ¦ ¦ ¦--expr: "unna [0/0] {183} ¦ ¦ ¦ °--STR_CONST: "unna [0/0] {182} ¦ ¦ ¦--',': , [0/4] {184} - ¦ ¦ ¦--expr: [0/0] {185} + ¦ ¦ ¦--expr: { + } [0/0] {185} ¦ ¦ ¦ ¦--'{': { [0/2] {186} ¦ ¦ ¦ °--'}': } [1/0] {187} ¦ ¦ °--')': ) [0/0] {188} - ¦ ¦--expr: [2/2] {189} - ¦ ¦ ¦--expr: [0/0] {191} + ¦ ¦--expr: brace [2/2] {189} + ¦ ¦ ¦--expr: brace [0/0] {191} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {190} ¦ ¦ ¦--'(': ( [0/0] {192} - ¦ ¦ ¦--expr: [0/0] {194} + ¦ ¦ ¦--expr: "unna [0/0] {194} ¦ ¦ ¦ °--STR_CONST: "unna [0/0] {193} ¦ ¦ ¦--',': , [0/0] {195} - ¦ ¦ ¦--expr: [0/0] {196} + ¦ ¦ ¦--expr: { + } [0/0] {196} ¦ ¦ ¦ ¦--'{': { [0/2] {197} ¦ ¦ ¦ °--'}': } [1/0] {198} ¦ ¦ °--')': ) [0/0] {199} - ¦ ¦--expr: [2/2] {200} - ¦ ¦ ¦--expr: [0/0] {202} + ¦ ¦--expr: brace [2/2] {200} + ¦ ¦ ¦--expr: brace [0/0] {202} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {201} ¦ ¦ ¦--'(': ( [0/0] {203} ¦ ¦ ¦--SYMBOL_SUB: named [0/1] {204} ¦ ¦ ¦--EQ_SUB: = [0/4] {205} - ¦ ¦ ¦--expr: [0/0] {206} + ¦ ¦ ¦--expr: { + } [0/0] {206} ¦ ¦ ¦ ¦--'{': { [0/2] {207} ¦ ¦ ¦ °--'}': } [1/0] {208} ¦ ¦ °--')': ) [0/0] {209} - ¦ ¦--expr: [2/2] {210} - ¦ ¦ ¦--expr: [0/0] {212} + ¦ ¦--expr: brace [2/2] {210} + ¦ ¦ ¦--expr: brace [0/0] {212} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {211} ¦ ¦ ¦--'(': ( [0/0] {213} ¦ ¦ ¦--SYMBOL_SUB: named [0/1] {214} ¦ ¦ ¦--EQ_SUB: = [0/4] {215} - ¦ ¦ ¦--expr: [0/0] {216} + ¦ ¦ ¦--expr: { + } [0/0] {216} ¦ ¦ ¦ ¦--'{': { [0/2] {217} ¦ ¦ ¦ °--'}': } [1/0] {218} ¦ ¦ °--')': ) [0/0] {219} - ¦ ¦--expr: [2/2] {220} - ¦ ¦ ¦--expr: [0/0] {222} + ¦ ¦--expr: brace [2/2] {220} + ¦ ¦ ¦--expr: brace [0/0] {222} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: brace [0/0] {221} ¦ ¦ ¦--'(': ( [0/2] {223} - ¦ ¦ ¦--expr: [0/0] {224} + ¦ ¦ ¦--expr: { + [0/0] {224} ¦ ¦ ¦ ¦--'{': { [0/4] {225} - ¦ ¦ ¦ ¦--expr: [1/2] {227} + ¦ ¦ ¦ ¦--expr: empty [1/2] {227} ¦ ¦ ¦ ¦ °--SYMBOL: empty [0/0] {226} ¦ ¦ ¦ °--'}': } [1/0] {228} ¦ ¦ °--')': ) [0/0] {229} - ¦ ¦--expr: [2/2] {230} - ¦ ¦ ¦--expr: [0/0] {232} + ¦ ¦--expr: a%/%b [2/2] {230} + ¦ ¦ ¦--expr: a [0/0] {232} ¦ ¦ ¦ °--SYMBOL: a [0/0] {231} ¦ ¦ ¦--SPECIAL-OTHER: %/% [0/0] {233} - ¦ ¦ °--expr: [0/0] {235} + ¦ ¦ °--expr: b [0/0] {235} ¦ ¦ °--SYMBOL: b [0/0] {234} - ¦ ¦--expr: [1/2] {236} - ¦ ¦ ¦--expr: [0/0] {238} + ¦ ¦--expr: a%%b [1/2] {236} + ¦ ¦ ¦--expr: a [0/0] {238} ¦ ¦ ¦ °--SYMBOL: a [0/0] {237} ¦ ¦ ¦--SPECIAL-OTHER: %% [0/0] {239} - ¦ ¦ °--expr: [0/0] {241} + ¦ ¦ °--expr: b [0/0] {241} ¦ ¦ °--SYMBOL: b [0/0] {240} - ¦ ¦--expr: [1/2] {242} - ¦ ¦ ¦--expr: [0/0] {244} + ¦ ¦--expr: a&&b [1/2] {242} + ¦ ¦ ¦--expr: a [0/0] {244} ¦ ¦ ¦ °--SYMBOL: a [0/0] {243} ¦ ¦ ¦--AND2: && [0/0] {245} - ¦ ¦ °--expr: [0/0] {247} + ¦ ¦ °--expr: b [0/0] {247} ¦ ¦ °--SYMBOL: b [0/0] {246} - ¦ ¦--expr: [1/2] {248} - ¦ ¦ ¦--expr: [0/0] {250} + ¦ ¦--expr: a||b [1/2] {248} + ¦ ¦ ¦--expr: a [0/0] {250} ¦ ¦ ¦ °--SYMBOL: a [0/0] {249} ¦ ¦ ¦--OR2: || [0/0] {251} - ¦ ¦ °--expr: [0/0] {253} + ¦ ¦ °--expr: b [0/0] {253} ¦ ¦ °--SYMBOL: b [0/0] {252} - ¦ ¦--expr: [1/2] {254} - ¦ ¦ ¦--expr: [0/0] {256} + ¦ ¦--expr: a==b [1/2] {254} + ¦ ¦ ¦--expr: a [0/0] {256} ¦ ¦ ¦ °--SYMBOL: a [0/0] {255} ¦ ¦ ¦--EQ: == [0/0] {257} - ¦ ¦ °--expr: [0/0] {259} + ¦ ¦ °--expr: b [0/0] {259} ¦ ¦ °--SYMBOL: b [0/0] {258} - ¦ ¦--expr: [1/2] {260} - ¦ ¦ ¦--expr: [0/0] {262} + ¦ ¦--expr: a!=b [1/2] {260} + ¦ ¦ ¦--expr: a [0/0] {262} ¦ ¦ ¦ °--SYMBOL: a [0/0] {261} ¦ ¦ ¦--NE: != [0/0] {263} - ¦ ¦ °--expr: [0/0] {265} + ¦ ¦ °--expr: b [0/0] {265} ¦ ¦ °--SYMBOL: b [0/0] {264} - ¦ ¦--expr: [1/2] {266} - ¦ ¦ ¦--expr: [0/0] {268} + ¦ ¦--expr: a<=b [1/2] {266} + ¦ ¦ ¦--expr: a [0/0] {268} ¦ ¦ ¦ °--SYMBOL: a [0/0] {267} ¦ ¦ ¦--LE: <= [0/0] {269} - ¦ ¦ °--expr: [0/0] {271} + ¦ ¦ °--expr: b [0/0] {271} ¦ ¦ °--SYMBOL: b [0/0] {270} - ¦ ¦--expr: [1/2] {272} - ¦ ¦ ¦--expr: [0/0] {274} + ¦ ¦--expr: a>=b [1/2] {272} + ¦ ¦ ¦--expr: a [0/0] {274} ¦ ¦ ¦ °--SYMBOL: a [0/0] {273} ¦ ¦ ¦--GE: >= [0/0] {275} - ¦ ¦ °--expr: [0/0] {277} + ¦ ¦ °--expr: b [0/0] {277} ¦ ¦ °--SYMBOL: b [0/0] {276} - ¦ ¦--expr: [1/2] {278} - ¦ ¦ ¦--expr: [0/0] {280} + ¦ ¦--expr: a<-b [1/2] {278} + ¦ ¦ ¦--expr: a [0/0] {280} ¦ ¦ ¦ °--SYMBOL: a [0/0] {279} ¦ ¦ ¦--LEFT_ASSIGN: <- [0/0] {281} - ¦ ¦ °--expr: [0/0] {283} + ¦ ¦ °--expr: b [0/0] {283} ¦ ¦ °--SYMBOL: b [0/0] {282} - ¦ ¦--expr: [1/2] {284} - ¦ ¦ ¦--expr: [0/0] {286} + ¦ ¦--expr: a->b [1/2] {284} + ¦ ¦ ¦--expr: a [0/0] {286} ¦ ¦ ¦ °--SYMBOL: a [0/0] {285} ¦ ¦ ¦--RIGHT_ASSIGN: -> [0/0] {287} - ¦ ¦ °--expr: [0/0] {289} + ¦ ¦ °--expr: b [0/0] {289} ¦ ¦ °--SYMBOL: b [0/0] {288} - ¦ ¦--expr: [1/2] {289.9} - ¦ ¦ ¦--expr: [0/0] {291} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {290} - ¦ ¦ ¦--EQ_ASSIGN: = [0/0] {292} - ¦ ¦ °--expr: [0/0] {294} - ¦ ¦ °--SYMBOL: b [0/0] {293} - ¦ ¦--expr: [1/2] {295} - ¦ ¦ ¦--expr: [0/0] {297} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {296} - ¦ ¦ ¦--LT: < [0/0] {298} - ¦ ¦ °--expr: [0/0] {300} - ¦ ¦ °--SYMBOL: b [0/0] {299} - ¦ ¦--expr: [1/2] {301} - ¦ ¦ ¦--expr: [0/0] {303} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {302} - ¦ ¦ ¦--GT: > [0/0] {304} - ¦ ¦ °--expr: [0/0] {306} - ¦ ¦ °--SYMBOL: b [0/0] {305} - ¦ ¦--expr: [1/2] {307} - ¦ ¦ ¦--expr: [0/0] {309} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {308} - ¦ ¦ ¦--'*': * [0/0] {310} - ¦ ¦ °--expr: [0/0] {312} - ¦ ¦ °--SYMBOL: b [0/0] {311} - ¦ ¦--expr: [1/2] {313} - ¦ ¦ ¦--expr: [0/0] {315} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {314} - ¦ ¦ ¦--'/': / [0/0] {316} - ¦ ¦ °--expr: [0/0] {318} - ¦ ¦ °--SYMBOL: b [0/0] {317} - ¦ ¦--expr: [1/2] {319} - ¦ ¦ ¦--expr: [0/0] {321} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {320} - ¦ ¦ ¦--'^': ^ [0/0] {322} - ¦ ¦ °--expr: [0/0] {324} - ¦ ¦ °--SYMBOL: b [0/0] {323} - ¦ ¦--expr: [1/2] {325} - ¦ ¦ ¦--expr: [0/0] {327} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {326} - ¦ ¦ ¦--AND: & [0/0] {328} - ¦ ¦ °--expr: [0/0] {330} - ¦ ¦ °--SYMBOL: b [0/0] {329} - ¦ ¦--expr: [1/2] {331} - ¦ ¦ ¦--expr: [0/0] {333} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {332} - ¦ ¦ ¦--OR: | [0/0] {334} - ¦ ¦ °--expr: [0/0] {336} - ¦ ¦ °--SYMBOL: b [0/0] {335} - ¦ ¦--expr: [1/2] {337} - ¦ ¦ ¦--expr: [0/0] {339} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {338} - ¦ ¦ ¦--LEFT_ASSIGN: := [0/0] {340} - ¦ ¦ °--expr: [0/0] {342} - ¦ ¦ °--SYMBOL: b [0/0] {341} - ¦ ¦--expr: [2/2] {343} - ¦ ¦ ¦--expr: [0/0] {345} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {344} - ¦ ¦ ¦--'+': + [0/0] {346} - ¦ ¦ °--expr: [0/0] {348} - ¦ ¦ °--SYMBOL: b [0/0] {347} - ¦ ¦--expr: [1/2] {349} - ¦ ¦ ¦--expr: [0/0] {351} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {350} - ¦ ¦ ¦--'-': - [0/0] {352} - ¦ ¦ °--expr: [0/0] {354} - ¦ ¦ °--SYMBOL: b [0/0] {353} - ¦ ¦--expr: [1/2] {355} - ¦ ¦ ¦--expr: [0/0] {357} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {356} - ¦ ¦ ¦--'+': + [0/0] {358} - ¦ ¦ °--expr: [0/0] {359} - ¦ ¦ ¦--'+': + [0/0] {360} - ¦ ¦ °--expr: [0/0] {362} - ¦ ¦ °--SYMBOL: b [0/0] {361} - ¦ ¦--expr: [1/2] {363} - ¦ ¦ ¦--expr: [0/0] {365} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {364} - ¦ ¦ ¦--'+': + [0/0] {366} - ¦ ¦ °--expr: [0/0] {367} - ¦ ¦ ¦--'-': - [0/0] {368} - ¦ ¦ °--expr: [0/0] {370} - ¦ ¦ °--SYMBOL: b [0/0] {369} - ¦ ¦--expr: [1/2] {371} - ¦ ¦ ¦--expr: [0/0] {373} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {372} - ¦ ¦ ¦--'+': + [0/0] {374} - ¦ ¦ °--expr: [0/0] {375} - ¦ ¦ ¦--'+': + [0/0] {376} - ¦ ¦ °--expr: [0/0] {378} - ¦ ¦ °--SYMBOL: b [0/0] {377} - ¦ ¦--expr: [1/2] {379} - ¦ ¦ ¦--expr: [0/0] {381} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {380} - ¦ ¦ ¦--'-': - [0/0] {382} - ¦ ¦ °--expr: [0/0] {383} - ¦ ¦ ¦--'+': + [0/0] {384} - ¦ ¦ °--expr: [0/0] {386} - ¦ ¦ °--SYMBOL: b [0/0] {385} - ¦ ¦--expr: [1/2] {387} - ¦ ¦ ¦--expr: [0/0] {389} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {388} - ¦ ¦ ¦--'-': - [0/0] {390} - ¦ ¦ °--expr: [0/0] {391} - ¦ ¦ ¦--'-': - [0/0] {392} - ¦ ¦ °--expr: [0/0] {394} - ¦ ¦ °--SYMBOL: b [0/0] {393} - ¦ ¦--expr: [1/2] {395} - ¦ ¦ ¦--expr: [0/0] {397} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {396} - ¦ ¦ ¦--'+': + [0/0] {398} - ¦ ¦ °--expr: [0/0] {399} - ¦ ¦ ¦--'-': - [0/0] {400} - ¦ ¦ °--expr: [0/0] {401} - ¦ ¦ ¦--'-': - [0/0] {402} - ¦ ¦ °--expr: [0/0] {404} - ¦ ¦ °--SYMBOL: b [0/0] {403} - ¦ ¦--expr: [1/2] {405} - ¦ ¦ ¦--expr: [0/0] {407} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {406} - ¦ ¦ ¦--'-': - [0/0] {408} - ¦ ¦ °--expr: [0/0] {409} - ¦ ¦ ¦--'-': - [0/0] {410} - ¦ ¦ °--expr: [0/0] {411} - ¦ ¦ ¦--'+': + [0/0] {412} - ¦ ¦ °--expr: [0/0] {414} - ¦ ¦ °--SYMBOL: b [0/0] {413} - ¦ ¦--expr: [1/2] {415} - ¦ ¦ ¦--expr: [0/0] {417} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {416} - ¦ ¦ ¦--'(': ( [0/1] {418} - ¦ ¦ ¦--expr: [0/0] {419} - ¦ ¦ ¦ ¦--'+': + [0/1] {420} - ¦ ¦ ¦ °--expr: [0/0] {422} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {421} - ¦ ¦ °--')': ) [0/0] {423} - ¦ ¦--expr: [1/2] {424} - ¦ ¦ ¦--expr: [0/0] {426} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {425} - ¦ ¦ ¦--'(': ( [0/1] {427} - ¦ ¦ ¦--expr: [0/0] {428} - ¦ ¦ ¦ ¦--'-': - [0/1] {429} - ¦ ¦ ¦ °--expr: [0/0] {431} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {430} - ¦ ¦ °--')': ) [0/0] {432} - ¦ ¦--expr: [1/2] {433} - ¦ ¦ ¦--expr: [0/0] {435} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {434} - ¦ ¦ ¦--'(': ( [0/0] {436} - ¦ ¦ ¦--expr: [0/0] {438} - ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {437} - ¦ ¦ ¦--',': , [0/1] {439} - ¦ ¦ ¦--expr: [0/0] {440} - ¦ ¦ ¦ ¦--'+': + [0/1] {441} - ¦ ¦ ¦ °--expr: [0/0] {443} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {442} - ¦ ¦ °--')': ) [0/0] {444} - ¦ ¦--expr: [1/2] {445} - ¦ ¦ ¦--expr: [0/0] {447} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {446} - ¦ ¦ ¦--'(': ( [0/0] {448} - ¦ ¦ ¦--expr: [0/0] {450} - ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {449} - ¦ ¦ ¦--',': , [0/1] {451} - ¦ ¦ ¦--expr: [0/0] {452} - ¦ ¦ ¦ ¦--'-': - [0/1] {453} - ¦ ¦ ¦ °--expr: [0/0] {455} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {454} - ¦ ¦ °--')': ) [0/0] {456} - ¦ ¦--COMMENT: # Onl [2/2] {457} - ¦ ¦--expr: [1/2] {458} - ¦ ¦ ¦--expr: [0/0] {460} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {459} - ¦ ¦ ¦--'(': ( [0/4] {461} - ¦ ¦ ¦--expr: [1/0] {463} - ¦ ¦ ¦ °--SYMBOL: prese [0/0] {462} - ¦ ¦ ¦--',': , [0/1] {464} - ¦ ¦ ¦--expr: [0/0] {466} - ¦ ¦ ¦ °--SYMBOL: dista [0/0] {465} - ¦ ¦ ¦--',': , [0/4] {467} - ¦ ¦ ¦--expr: [1/0] {469} - ¦ ¦ ¦ °--SYMBOL: after [0/0] {468} - ¦ ¦ ¦--',': , [0/5] {470} - ¦ ¦ ¦--expr: [0/0] {472} - ¦ ¦ ¦ °--SYMBOL: comma [0/0] {471} - ¦ ¦ ¦--',': , [0/4] {473} - ¦ ¦ ¦--expr: [1/0] {475} - ¦ ¦ ¦ °--SYMBOL: given [0/0] {474} - ¦ ¦ ¦--',': , [0/0] {476} - ¦ ¦ ¦--expr: [0/2] {478} - ¦ ¦ ¦ °--SYMBOL: one [0/0] {477} - ¦ ¦ °--')': ) [1/0] {479} - ¦ ¦--expr: [2/2] {480} - ¦ ¦ ¦--IF: if [0/0] {481} - ¦ ¦ ¦--'(': ( [0/0] {482} - ¦ ¦ ¦--expr: [0/0] {484} - ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {483} - ¦ ¦ ¦--')': ) [0/0] {485} - ¦ ¦ °--expr: [0/0] {486} - ¦ ¦ ¦--'{': { [0/4] {487} - ¦ ¦ ¦--expr: [1/2] {489} - ¦ ¦ ¦ °--NUM_CONST: FALSE [0/0] {488} - ¦ ¦ °--'}': } [1/0] {490} - ¦ ¦--expr: [2/2] {491} - ¦ ¦ ¦--IF: if [0/0] {492} - ¦ ¦ ¦--'(': ( [0/0] {493} - ¦ ¦ ¦--expr: [0/0] {495} - ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {494} - ¦ ¦ ¦--')': ) [0/0] {496} - ¦ ¦ ¦--expr: [0/0] {497} - ¦ ¦ ¦ ¦--'{': { [0/4] {498} - ¦ ¦ ¦ ¦--expr: [1/2] {500} - ¦ ¦ ¦ ¦ °--NUM_CONST: FALSE [0/0] {499} - ¦ ¦ ¦ °--'}': } [1/0] {501} - ¦ ¦ ¦--ELSE: else [0/0] {502} - ¦ ¦ °--expr: [0/0] {503} - ¦ ¦ ¦--'{': { [0/4] {504} - ¦ ¦ ¦--expr: [1/2] {506} - ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {505} - ¦ ¦ °--'}': } [1/0] {507} - ¦ ¦--expr: [2/2] {508} - ¦ ¦ ¦--WHILE: while [0/0] {509} - ¦ ¦ ¦--'(': ( [0/0] {510} - ¦ ¦ ¦--expr: [0/0] {512} - ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {511} - ¦ ¦ ¦--')': ) [0/0] {513} - ¦ ¦ °--expr: [0/0] {514} - ¦ ¦ ¦--'{': { [0/4] {515} - ¦ ¦ ¦--expr: [1/2] {517} - ¦ ¦ ¦ °--NUM_CONST: FALSE [0/0] {516} - ¦ ¦ °--'}': } [1/0] {518} - ¦ ¦--expr: [2/2] {519} - ¦ ¦ ¦--expr: [0/1] {521} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: singl [0/0] {520} - ¦ ¦ ¦--'(': ( [0/1] {522} - ¦ ¦ ¦--expr: [0/1] {524} - ¦ ¦ ¦ °--STR_CONST: "func [0/0] {523} - ¦ ¦ ¦--',': , [0/0] {525} - ¦ ¦ ¦--expr: [0/1] {527} - ¦ ¦ ¦ °--SYMBOL: call [0/0] {526} - ¦ ¦ °--')': ) [0/0] {528} - ¦ ¦--expr: [2/2] {529} - ¦ ¦ ¦--expr: [0/1] {531} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: multi [0/0] {530} - ¦ ¦ ¦--'(': ( [0/2] {532} - ¦ ¦ ¦--expr: [1/0] {534} - ¦ ¦ ¦ °--STR_CONST: "func [0/0] {533} - ¦ ¦ ¦--',': , [0/1] {535} - ¦ ¦ ¦--expr: [0/1] {537} - ¦ ¦ ¦ °--SYMBOL: call [0/0] {536} - ¦ ¦ °--')': ) [0/0] {538} - ¦ ¦--expr: [2/2] {539} - ¦ ¦ ¦--expr: [0/1] {541} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {540} - ¦ ¦ ¦--'(': ( [0/1] {542} - ¦ ¦ ¦--expr: [0/1] {543} - ¦ ¦ ¦ ¦--expr: [0/1] {545} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {544} - ¦ ¦ ¦ ¦--'(': ( [0/1] {546} - ¦ ¦ ¦ ¦--expr: [0/1] {548} - ¦ ¦ ¦ ¦ °--STR_CONST: "in" [0/0] {547} - ¦ ¦ ¦ ¦--',': , [0/0] {549} - ¦ ¦ ¦ ¦--expr: [0/1] {551} - ¦ ¦ ¦ ¦ °--SYMBOL: one [0/0] {550} - ¦ ¦ ¦ ¦--',': , [0/0] {552} - ¦ ¦ ¦ ¦--expr: [0/1] {554} - ¦ ¦ ¦ ¦ °--SYMBOL: line [0/0] {553} - ¦ ¦ ¦ °--')': ) [0/0] {555} - ¦ ¦ °--')': ) [0/0] {556} - ¦ ¦--expr: [2/2] {557} - ¦ ¦ ¦--expr: [0/1] {559} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {558} - ¦ ¦ ¦--'(': ( [0/1] {560} - ¦ ¦ ¦--expr: [0/1] {561} - ¦ ¦ ¦ ¦--expr: [0/1] {563} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {562} - ¦ ¦ ¦ ¦--'(': ( [0/2] {564} - ¦ ¦ ¦ ¦--expr: [1/0] {566} - ¦ ¦ ¦ ¦ °--STR_CONST: "in" [0/0] {565} - ¦ ¦ ¦ ¦--',': , [0/6] {567} - ¦ ¦ ¦ ¦--expr: [1/0] {569} - ¦ ¦ ¦ ¦ °--SYMBOL: multi [0/0] {568} - ¦ ¦ ¦ ¦--',': , [0/0] {570} - ¦ ¦ ¦ ¦--expr: [0/1] {572} - ¦ ¦ ¦ ¦ °--SYMBOL: lines [0/0] {571} - ¦ ¦ ¦ °--')': ) [0/0] {573} - ¦ ¦ °--')': ) [0/0] {574} - ¦ ¦--expr: [2/2] {575} - ¦ ¦ ¦--expr: [0/0] {577} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {576} - ¦ ¦ ¦--'(': ( [0/2] {578} - ¦ ¦ ¦--expr: [1/0] {579} - ¦ ¦ ¦ ¦--expr: [0/1] {581} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {580} - ¦ ¦ ¦ ¦--'(': ( [0/1] {582} - ¦ ¦ ¦ ¦--expr: [0/1] {584} - ¦ ¦ ¦ ¦ °--SYMBOL: with [0/0] {583} - ¦ ¦ ¦ °--')': ) [0/0] {585} - ¦ ¦ ¦--',': , [0/6] {586} - ¦ ¦ ¦--expr: [1/2] {588} - ¦ ¦ ¦ °--SYMBOL: many [0/0] {587} - ¦ ¦ ¦--',': , [1/5] {589} - ¦ ¦ ¦--expr: [0/2] {591} - ¦ ¦ ¦ °--SYMBOL: first [0/0] {590} - ¦ ¦ °--')': ) [0/0] {592} - ¦ ¦--expr: [2/2] {593} - ¦ ¦ ¦--expr: [0/0] {595} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {594} - ¦ ¦ ¦--'(': ( [0/4] {596} - ¦ ¦ ¦--expr: [1/0] {597} - ¦ ¦ ¦ ¦--expr: [0/1] {599} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {598} - ¦ ¦ ¦ ¦--'(': ( [0/1] {600} - ¦ ¦ ¦ ¦--expr: [0/1] {602} - ¦ ¦ ¦ ¦ °--SYMBOL: with [0/0] {601} - ¦ ¦ ¦ °--')': ) [0/0] {603} - ¦ ¦ ¦--',': , [0/2] {604} - ¦ ¦ ¦--COMMENT: # a c [0/4] {605} - ¦ ¦ ¦--expr: [1/1] {607} - ¦ ¦ ¦ °--SYMBOL: many [0/0] {606} - ¦ ¦ ¦--COMMENT: #more [0/4] {608} - ¦ ¦ ¦--',': , [1/5] {609} - ¦ ¦ ¦--expr: [0/2] {611} - ¦ ¦ ¦ °--SYMBOL: first [0/0] {610} - ¦ ¦ °--')': ) [0/0] {612} - ¦ ¦--expr: [2/0] {613} - ¦ ¦ ¦--expr: [0/0] {615} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: diffi [0/0] {614} - ¦ ¦ ¦--'(': ( [0/0] {616} - ¦ ¦ ¦--expr: [0/0] {617} - ¦ ¦ ¦ ¦--expr: [0/0] {619} - ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {618} - ¦ ¦ ¦ ¦--'(': ( [0/4] {620} - ¦ ¦ ¦ ¦--expr: [1/0] {622} - ¦ ¦ ¦ ¦ °--STR_CONST: "func [0/0] {621} - ¦ ¦ ¦ ¦--',': , [0/1] {623} - ¦ ¦ ¦ ¦--expr: [0/2] {625} - ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {624} - ¦ ¦ ¦ °--')': ) [1/0] {626} - ¦ ¦ ¦--',': , [0/4] {627} - ¦ ¦ ¦--expr: [1/0] {629} - ¦ ¦ ¦ °--SYMBOL: with [0/0] {628} - ¦ ¦ ¦--',': , [0/1] {630} - ¦ ¦ ¦--expr: [0/0] {632} - ¦ ¦ ¦ °--SYMBOL: more [0/0] {631} - ¦ ¦ ¦--',': , [0/1] {633} - ¦ ¦ ¦--expr: [0/2] {635} - ¦ ¦ ¦ °--SYMBOL: args [0/0] {634} - ¦ ¦ °--')': ) [1/0] {636} - ¦ °--'}': } [1/0] {637} - ¦--COMMENT: # for [3/0] {638} - ¦--expr: [1/0] {639} - ¦ ¦--expr: [0/0] {641} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: lm [0/0] {640} - ¦ ¦--'(': ( [0/0] {642} - ¦ ¦--expr: [0/0] {643} - ¦ ¦ ¦--expr: [0/0] {645} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {644} - ¦ ¦ ¦--'~': ~ [0/0] {646} - ¦ ¦ °--expr: [0/0] {647} - ¦ ¦ ¦--expr: [0/0] {649} - ¦ ¦ ¦ °--SYMBOL: b [0/0] {648} - ¦ ¦ ¦--'+': + [0/0] {650} - ¦ ¦ °--expr: [0/0] {652} - ¦ ¦ °--SYMBOL: c [0/0] {651} - ¦ ¦--',': , [0/0] {653} - ¦ ¦--SYMBOL_SUB: data [0/0] {654} - ¦ ¦--EQ_SUB: = [0/0] {655} - ¦ ¦--expr: [0/0] {657} - ¦ ¦ °--NUM_CONST: NA [0/0] {656} - ¦ °--')': ) [0/0] {658} - ¦--expr: [1/0] {659} - ¦ ¦--expr: [0/0] {661} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: lm [0/0] {660} - ¦ ¦--'(': ( [0/0] {662} - ¦ ¦--expr: [0/0] {663} - ¦ ¦ ¦--expr: [0/0] {665} - ¦ ¦ ¦ °--SYMBOL: a [0/0] {664} - ¦ ¦ ¦--'~': ~ [0/0] {666} - ¦ ¦ °--expr: [0/0] {667} - ¦ ¦ ¦--expr: [0/0] {669} - ¦ ¦ ¦ °--SYMBOL: . [0/0] {668} - ¦ ¦ ¦--'-': - [0/0] {670} - ¦ ¦ °--expr: [0/0] {672} - ¦ ¦ °--NUM_CONST: 1 [0/0] {671} - ¦ ¦--',': , [0/0] {673} - ¦ ¦--SYMBOL_SUB: data [0/0] {674} - ¦ ¦--EQ_SUB: = [0/0] {675} - ¦ ¦--expr: [0/0] {677} - ¦ ¦ °--NUM_CONST: NA [0/0] {676} - ¦ °--')': ) [0/0] {678} - ¦--expr: [1/0] {679} - ¦ ¦--expr: [0/0] {681} - ¦ ¦ °--SYMBOL: a [0/0] {680} - ¦ ¦--'~': ~ [0/0] {682} - ¦ °--expr: [0/0] {683} - ¦ ¦--expr: [0/0] {685} - ¦ ¦ °--SYMBOL: b [0/0] {684} - ¦ ¦--':': : [0/0] {686} - ¦ °--expr: [0/0] {688} - ¦ °--SYMBOL: c [0/0] {687} - °--expr: [1/0] {689} - ¦--expr: [0/0] {691} - ¦ °--SYMBOL: a [0/0] {690} - ¦--'~': ~ [0/0] {692} - °--expr: [0/0] {693} - ¦--expr: [0/1] {695} - ¦ °--SYMBOL: b [0/0] {694} - ¦--':': : [0/0] {696} - °--expr: [0/0] {698} - °--SYMBOL: c [0/0] {697} + ¦ ¦--expr_or_assign_or_help: a=b [1/2] {290} + ¦ ¦ ¦--expr: a [0/0] {292} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {291} + ¦ ¦ ¦--EQ_ASSIGN: = [0/0] {293} + ¦ ¦ °--expr: b [0/0] {295} + ¦ ¦ °--SYMBOL: b [0/0] {294} + ¦ ¦--expr: ab [1/2] {302} + ¦ ¦ ¦--expr: a [0/0] {304} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {303} + ¦ ¦ ¦--GT: > [0/0] {305} + ¦ ¦ °--expr: b [0/0] {307} + ¦ ¦ °--SYMBOL: b [0/0] {306} + ¦ ¦--expr: a*b [1/2] {308} + ¦ ¦ ¦--expr: a [0/0] {310} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {309} + ¦ ¦ ¦--'*': * [0/0] {311} + ¦ ¦ °--expr: b [0/0] {313} + ¦ ¦ °--SYMBOL: b [0/0] {312} + ¦ ¦--expr: a/b [1/2] {314} + ¦ ¦ ¦--expr: a [0/0] {316} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {315} + ¦ ¦ ¦--'/': / [0/0] {317} + ¦ ¦ °--expr: b [0/0] {319} + ¦ ¦ °--SYMBOL: b [0/0] {318} + ¦ ¦--expr: a^b [1/2] {320} + ¦ ¦ ¦--expr: a [0/0] {322} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {321} + ¦ ¦ ¦--'^': ^ [0/0] {323} + ¦ ¦ °--expr: b [0/0] {325} + ¦ ¦ °--SYMBOL: b [0/0] {324} + ¦ ¦--expr: a&b [1/2] {326} + ¦ ¦ ¦--expr: a [0/0] {328} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {327} + ¦ ¦ ¦--AND: & [0/0] {329} + ¦ ¦ °--expr: b [0/0] {331} + ¦ ¦ °--SYMBOL: b [0/0] {330} + ¦ ¦--expr: a|b [1/2] {332} + ¦ ¦ ¦--expr: a [0/0] {334} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {333} + ¦ ¦ ¦--OR: | [0/0] {335} + ¦ ¦ °--expr: b [0/0] {337} + ¦ ¦ °--SYMBOL: b [0/0] {336} + ¦ ¦--expr: a:=b [1/2] {338} + ¦ ¦ ¦--expr: a [0/0] {340} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {339} + ¦ ¦ ¦--LEFT_ASSIGN: := [0/0] {341} + ¦ ¦ °--expr: b [0/0] {343} + ¦ ¦ °--SYMBOL: b [0/0] {342} + ¦ ¦--expr: a+b [2/2] {344} + ¦ ¦ ¦--expr: a [0/0] {346} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {345} + ¦ ¦ ¦--'+': + [0/0] {347} + ¦ ¦ °--expr: b [0/0] {349} + ¦ ¦ °--SYMBOL: b [0/0] {348} + ¦ ¦--expr: a-b [1/2] {350} + ¦ ¦ ¦--expr: a [0/0] {352} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {351} + ¦ ¦ ¦--'-': - [0/0] {353} + ¦ ¦ °--expr: b [0/0] {355} + ¦ ¦ °--SYMBOL: b [0/0] {354} + ¦ ¦--expr: a++b [1/2] {356} + ¦ ¦ ¦--expr: a [0/0] {358} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {357} + ¦ ¦ ¦--'+': + [0/0] {359} + ¦ ¦ °--expr: +b [0/0] {360} + ¦ ¦ ¦--'+': + [0/0] {361} + ¦ ¦ °--expr: b [0/0] {363} + ¦ ¦ °--SYMBOL: b [0/0] {362} + ¦ ¦--expr: a+-b [1/2] {364} + ¦ ¦ ¦--expr: a [0/0] {366} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {365} + ¦ ¦ ¦--'+': + [0/0] {367} + ¦ ¦ °--expr: -b [0/0] {368} + ¦ ¦ ¦--'-': - [0/0] {369} + ¦ ¦ °--expr: b [0/0] {371} + ¦ ¦ °--SYMBOL: b [0/0] {370} + ¦ ¦--expr: a++b [1/2] {372} + ¦ ¦ ¦--expr: a [0/0] {374} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {373} + ¦ ¦ ¦--'+': + [0/0] {375} + ¦ ¦ °--expr: +b [0/0] {376} + ¦ ¦ ¦--'+': + [0/0] {377} + ¦ ¦ °--expr: b [0/0] {379} + ¦ ¦ °--SYMBOL: b [0/0] {378} + ¦ ¦--expr: a-+b [1/2] {380} + ¦ ¦ ¦--expr: a [0/0] {382} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {381} + ¦ ¦ ¦--'-': - [0/0] {383} + ¦ ¦ °--expr: +b [0/0] {384} + ¦ ¦ ¦--'+': + [0/0] {385} + ¦ ¦ °--expr: b [0/0] {387} + ¦ ¦ °--SYMBOL: b [0/0] {386} + ¦ ¦--expr: a--b [1/2] {388} + ¦ ¦ ¦--expr: a [0/0] {390} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {389} + ¦ ¦ ¦--'-': - [0/0] {391} + ¦ ¦ °--expr: -b [0/0] {392} + ¦ ¦ ¦--'-': - [0/0] {393} + ¦ ¦ °--expr: b [0/0] {395} + ¦ ¦ °--SYMBOL: b [0/0] {394} + ¦ ¦--expr: a+--b [1/2] {396} + ¦ ¦ ¦--expr: a [0/0] {398} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {397} + ¦ ¦ ¦--'+': + [0/0] {399} + ¦ ¦ °--expr: --b [0/0] {400} + ¦ ¦ ¦--'-': - [0/0] {401} + ¦ ¦ °--expr: -b [0/0] {402} + ¦ ¦ ¦--'-': - [0/0] {403} + ¦ ¦ °--expr: b [0/0] {405} + ¦ ¦ °--SYMBOL: b [0/0] {404} + ¦ ¦--expr: a--+b [1/2] {406} + ¦ ¦ ¦--expr: a [0/0] {408} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {407} + ¦ ¦ ¦--'-': - [0/0] {409} + ¦ ¦ °--expr: -+b [0/0] {410} + ¦ ¦ ¦--'-': - [0/0] {411} + ¦ ¦ °--expr: +b [0/0] {412} + ¦ ¦ ¦--'+': + [0/0] {413} + ¦ ¦ °--expr: b [0/0] {415} + ¦ ¦ °--SYMBOL: b [0/0] {414} + ¦ ¦--expr: call( [1/2] {416} + ¦ ¦ ¦--expr: call [0/0] {418} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {417} + ¦ ¦ ¦--'(': ( [0/1] {419} + ¦ ¦ ¦--expr: + a [0/0] {420} + ¦ ¦ ¦ ¦--'+': + [0/1] {421} + ¦ ¦ ¦ °--expr: a [0/0] {423} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {422} + ¦ ¦ °--')': ) [0/0] {424} + ¦ ¦--expr: call( [1/2] {425} + ¦ ¦ ¦--expr: call [0/0] {427} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {426} + ¦ ¦ ¦--'(': ( [0/1] {428} + ¦ ¦ ¦--expr: - a [0/0] {429} + ¦ ¦ ¦ ¦--'-': - [0/1] {430} + ¦ ¦ ¦ °--expr: a [0/0] {432} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {431} + ¦ ¦ °--')': ) [0/0] {433} + ¦ ¦--expr: call( [1/2] {434} + ¦ ¦ ¦--expr: call [0/0] {436} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {435} + ¦ ¦ ¦--'(': ( [0/0] {437} + ¦ ¦ ¦--expr: 5 [0/0] {439} + ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {438} + ¦ ¦ ¦--',': , [0/1] {440} + ¦ ¦ ¦--expr: + a [0/0] {441} + ¦ ¦ ¦ ¦--'+': + [0/1] {442} + ¦ ¦ ¦ °--expr: a [0/0] {444} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {443} + ¦ ¦ °--')': ) [0/0] {445} + ¦ ¦--expr: call( [1/2] {446} + ¦ ¦ ¦--expr: call [0/0] {448} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {447} + ¦ ¦ ¦--'(': ( [0/0] {449} + ¦ ¦ ¦--expr: 5 [0/0] {451} + ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {450} + ¦ ¦ ¦--',': , [0/1] {452} + ¦ ¦ ¦--expr: - a [0/0] {453} + ¦ ¦ ¦ ¦--'-': - [0/1] {454} + ¦ ¦ ¦ °--expr: a [0/0] {456} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {455} + ¦ ¦ °--')': ) [0/0] {457} + ¦ ¦--COMMENT: # Onl [2/2] {458} + ¦ ¦--expr: call( [1/2] {459} + ¦ ¦ ¦--expr: call [0/0] {461} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {460} + ¦ ¦ ¦--'(': ( [0/4] {462} + ¦ ¦ ¦--expr: prese [1/0] {464} + ¦ ¦ ¦ °--SYMBOL: prese [0/0] {463} + ¦ ¦ ¦--',': , [0/1] {465} + ¦ ¦ ¦--expr: dista [0/0] {467} + ¦ ¦ ¦ °--SYMBOL: dista [0/0] {466} + ¦ ¦ ¦--',': , [0/4] {468} + ¦ ¦ ¦--expr: after [1/0] {470} + ¦ ¦ ¦ °--SYMBOL: after [0/0] {469} + ¦ ¦ ¦--',': , [0/5] {471} + ¦ ¦ ¦--expr: comma [0/0] {473} + ¦ ¦ ¦ °--SYMBOL: comma [0/0] {472} + ¦ ¦ ¦--',': , [0/4] {474} + ¦ ¦ ¦--expr: given [1/0] {476} + ¦ ¦ ¦ °--SYMBOL: given [0/0] {475} + ¦ ¦ ¦--',': , [0/0] {477} + ¦ ¦ ¦--expr: one [0/2] {479} + ¦ ¦ ¦ °--SYMBOL: one [0/0] {478} + ¦ ¦ °--')': ) [1/0] {480} + ¦ ¦--expr: if(TR [2/2] {481} + ¦ ¦ ¦--IF: if [0/0] {482} + ¦ ¦ ¦--'(': ( [0/0] {483} + ¦ ¦ ¦--expr: TRUE [0/0] {485} + ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {484} + ¦ ¦ ¦--')': ) [0/0] {486} + ¦ ¦ °--expr: { + [0/0] {487} + ¦ ¦ ¦--'{': { [0/4] {488} + ¦ ¦ ¦--expr: FALSE [1/2] {490} + ¦ ¦ ¦ °--NUM_CONST: FALSE [0/0] {489} + ¦ ¦ °--'}': } [1/0] {491} + ¦ ¦--expr: if(TR [2/2] {492} + ¦ ¦ ¦--IF: if [0/0] {493} + ¦ ¦ ¦--'(': ( [0/0] {494} + ¦ ¦ ¦--expr: TRUE [0/0] {496} + ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {495} + ¦ ¦ ¦--')': ) [0/0] {497} + ¦ ¦ ¦--expr: { + [0/0] {498} + ¦ ¦ ¦ ¦--'{': { [0/4] {499} + ¦ ¦ ¦ ¦--expr: FALSE [1/2] {501} + ¦ ¦ ¦ ¦ °--NUM_CONST: FALSE [0/0] {500} + ¦ ¦ ¦ °--'}': } [1/0] {502} + ¦ ¦ ¦--ELSE: else [0/0] {503} + ¦ ¦ °--expr: { + [0/0] {504} + ¦ ¦ ¦--'{': { [0/4] {505} + ¦ ¦ ¦--expr: TRUE [1/2] {507} + ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {506} + ¦ ¦ °--'}': } [1/0] {508} + ¦ ¦--expr: while [2/2] {509} + ¦ ¦ ¦--WHILE: while [0/0] {510} + ¦ ¦ ¦--'(': ( [0/0] {511} + ¦ ¦ ¦--expr: TRUE [0/0] {513} + ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {512} + ¦ ¦ ¦--')': ) [0/0] {514} + ¦ ¦ °--expr: { + [0/0] {515} + ¦ ¦ ¦--'{': { [0/4] {516} + ¦ ¦ ¦--expr: FALSE [1/2] {518} + ¦ ¦ ¦ °--NUM_CONST: FALSE [0/0] {517} + ¦ ¦ °--'}': } [1/0] {519} + ¦ ¦--expr: singl [2/2] {520} + ¦ ¦ ¦--expr: singl [0/1] {522} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: singl [0/0] {521} + ¦ ¦ ¦--'(': ( [0/1] {523} + ¦ ¦ ¦--expr: "func [0/1] {525} + ¦ ¦ ¦ °--STR_CONST: "func [0/0] {524} + ¦ ¦ ¦--',': , [0/0] {526} + ¦ ¦ ¦--expr: call [0/1] {528} + ¦ ¦ ¦ °--SYMBOL: call [0/0] {527} + ¦ ¦ °--')': ) [0/0] {529} + ¦ ¦--expr: multi [2/2] {530} + ¦ ¦ ¦--expr: multi [0/1] {532} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: multi [0/0] {531} + ¦ ¦ ¦--'(': ( [0/2] {533} + ¦ ¦ ¦--expr: "func [1/0] {535} + ¦ ¦ ¦ °--STR_CONST: "func [0/0] {534} + ¦ ¦ ¦--',': , [0/1] {536} + ¦ ¦ ¦--expr: call [0/1] {538} + ¦ ¦ ¦ °--SYMBOL: call [0/0] {537} + ¦ ¦ °--')': ) [0/0] {539} + ¦ ¦--expr: neste [2/2] {540} + ¦ ¦ ¦--expr: neste [0/1] {542} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {541} + ¦ ¦ ¦--'(': ( [0/1] {543} + ¦ ¦ ¦--expr: funct [0/1] {544} + ¦ ¦ ¦ ¦--expr: funct [0/1] {546} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {545} + ¦ ¦ ¦ ¦--'(': ( [0/1] {547} + ¦ ¦ ¦ ¦--expr: "in" [0/1] {549} + ¦ ¦ ¦ ¦ °--STR_CONST: "in" [0/0] {548} + ¦ ¦ ¦ ¦--',': , [0/0] {550} + ¦ ¦ ¦ ¦--expr: one [0/1] {552} + ¦ ¦ ¦ ¦ °--SYMBOL: one [0/0] {551} + ¦ ¦ ¦ ¦--',': , [0/0] {553} + ¦ ¦ ¦ ¦--expr: line [0/1] {555} + ¦ ¦ ¦ ¦ °--SYMBOL: line [0/0] {554} + ¦ ¦ ¦ °--')': ) [0/0] {556} + ¦ ¦ °--')': ) [0/0] {557} + ¦ ¦--expr: neste [2/2] {558} + ¦ ¦ ¦--expr: neste [0/1] {560} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {559} + ¦ ¦ ¦--'(': ( [0/1] {561} + ¦ ¦ ¦--expr: funct [0/1] {562} + ¦ ¦ ¦ ¦--expr: funct [0/1] {564} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {563} + ¦ ¦ ¦ ¦--'(': ( [0/2] {565} + ¦ ¦ ¦ ¦--expr: "in" [1/0] {567} + ¦ ¦ ¦ ¦ °--STR_CONST: "in" [0/0] {566} + ¦ ¦ ¦ ¦--',': , [0/6] {568} + ¦ ¦ ¦ ¦--expr: multi [1/0] {570} + ¦ ¦ ¦ ¦ °--SYMBOL: multi [0/0] {569} + ¦ ¦ ¦ ¦--',': , [0/0] {571} + ¦ ¦ ¦ ¦--expr: lines [0/1] {573} + ¦ ¦ ¦ ¦ °--SYMBOL: lines [0/0] {572} + ¦ ¦ ¦ °--')': ) [0/0] {574} + ¦ ¦ °--')': ) [0/0] {575} + ¦ ¦--expr: neste [2/2] {576} + ¦ ¦ ¦--expr: neste [0/0] {578} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {577} + ¦ ¦ ¦--'(': ( [0/2] {579} + ¦ ¦ ¦--expr: funct [1/0] {580} + ¦ ¦ ¦ ¦--expr: funct [0/1] {582} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {581} + ¦ ¦ ¦ ¦--'(': ( [0/1] {583} + ¦ ¦ ¦ ¦--expr: with [0/1] {585} + ¦ ¦ ¦ ¦ °--SYMBOL: with [0/0] {584} + ¦ ¦ ¦ °--')': ) [0/0] {586} + ¦ ¦ ¦--',': , [0/6] {587} + ¦ ¦ ¦--expr: many [1/2] {589} + ¦ ¦ ¦ °--SYMBOL: many [0/0] {588} + ¦ ¦ ¦--',': , [1/5] {590} + ¦ ¦ ¦--expr: first [0/2] {592} + ¦ ¦ ¦ °--SYMBOL: first [0/0] {591} + ¦ ¦ °--')': ) [0/0] {593} + ¦ ¦--expr: neste [2/2] {594} + ¦ ¦ ¦--expr: neste [0/0] {596} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {595} + ¦ ¦ ¦--'(': ( [0/4] {597} + ¦ ¦ ¦--expr: funct [1/0] {598} + ¦ ¦ ¦ ¦--expr: funct [0/1] {600} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: funct [0/0] {599} + ¦ ¦ ¦ ¦--'(': ( [0/1] {601} + ¦ ¦ ¦ ¦--expr: with [0/1] {603} + ¦ ¦ ¦ ¦ °--SYMBOL: with [0/0] {602} + ¦ ¦ ¦ °--')': ) [0/0] {604} + ¦ ¦ ¦--',': , [0/2] {605} + ¦ ¦ ¦--COMMENT: # a c [0/4] {606} + ¦ ¦ ¦--expr: many [1/1] {608} + ¦ ¦ ¦ °--SYMBOL: many [0/0] {607} + ¦ ¦ ¦--COMMENT: #more [0/4] {609} + ¦ ¦ ¦--',': , [1/5] {610} + ¦ ¦ ¦--expr: first [0/2] {612} + ¦ ¦ ¦ °--SYMBOL: first [0/0] {611} + ¦ ¦ °--')': ) [0/0] {613} + ¦ ¦--expr: diffi [2/0] {614} + ¦ ¦ ¦--expr: diffi [0/0] {616} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: diffi [0/0] {615} + ¦ ¦ ¦--'(': ( [0/0] {617} + ¦ ¦ ¦--expr: neste [0/0] {618} + ¦ ¦ ¦ ¦--expr: neste [0/0] {620} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: neste [0/0] {619} + ¦ ¦ ¦ ¦--'(': ( [0/4] {621} + ¦ ¦ ¦ ¦--expr: "func [1/0] {623} + ¦ ¦ ¦ ¦ °--STR_CONST: "func [0/0] {622} + ¦ ¦ ¦ ¦--',': , [0/1] {624} + ¦ ¦ ¦ ¦--expr: call [0/2] {626} + ¦ ¦ ¦ ¦ °--SYMBOL: call [0/0] {625} + ¦ ¦ ¦ °--')': ) [1/0] {627} + ¦ ¦ ¦--',': , [0/4] {628} + ¦ ¦ ¦--expr: with [1/0] {630} + ¦ ¦ ¦ °--SYMBOL: with [0/0] {629} + ¦ ¦ ¦--',': , [0/1] {631} + ¦ ¦ ¦--expr: more [0/0] {633} + ¦ ¦ ¦ °--SYMBOL: more [0/0] {632} + ¦ ¦ ¦--',': , [0/1] {634} + ¦ ¦ ¦--expr: args [0/2] {636} + ¦ ¦ ¦ °--SYMBOL: args [0/0] {635} + ¦ ¦ °--')': ) [1/0] {637} + ¦ °--'}': } [1/0] {638} + ¦--COMMENT: # for [3/0] {639} + ¦--expr: lm(a~ [1/0] {640} + ¦ ¦--expr: lm [0/0] {642} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: lm [0/0] {641} + ¦ ¦--'(': ( [0/0] {643} + ¦ ¦--expr: a~b+c [0/0] {644} + ¦ ¦ ¦--expr: a [0/0] {646} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {645} + ¦ ¦ ¦--'~': ~ [0/0] {647} + ¦ ¦ ¦--expr: b [0/0] {650} + ¦ ¦ ¦ °--SYMBOL: b [0/0] {649} + ¦ ¦ ¦--'+': + [0/0] {651} + ¦ ¦ °--expr: c [0/0] {653} + ¦ ¦ °--SYMBOL: c [0/0] {652} + ¦ ¦--',': , [0/0] {654} + ¦ ¦--SYMBOL_SUB: data [0/0] {655} + ¦ ¦--EQ_SUB: = [0/0] {656} + ¦ ¦--expr: NA [0/0] {658} + ¦ ¦ °--NUM_CONST: NA [0/0] {657} + ¦ °--')': ) [0/0] {659} + ¦--expr: lm(a~ [1/0] {660} + ¦ ¦--expr: lm [0/0] {662} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: lm [0/0] {661} + ¦ ¦--'(': ( [0/0] {663} + ¦ ¦--expr: a~.-1 [0/0] {664} + ¦ ¦ ¦--expr: a [0/0] {666} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {665} + ¦ ¦ ¦--'~': ~ [0/0] {667} + ¦ ¦ ¦--expr: . [0/0] {670} + ¦ ¦ ¦ °--SYMBOL: . [0/0] {669} + ¦ ¦ ¦--'-': - [0/0] {671} + ¦ ¦ °--expr: 1 [0/0] {673} + ¦ ¦ °--NUM_CONST: 1 [0/0] {672} + ¦ ¦--',': , [0/0] {674} + ¦ ¦--SYMBOL_SUB: data [0/0] {675} + ¦ ¦--EQ_SUB: = [0/0] {676} + ¦ ¦--expr: NA [0/0] {678} + ¦ ¦ °--NUM_CONST: NA [0/0] {677} + ¦ °--')': ) [0/0] {679} + ¦--expr: a~b:c [1/0] {680} + ¦ ¦--expr: a [0/0] {682} + ¦ ¦ °--SYMBOL: a [0/0] {681} + ¦ ¦--'~': ~ [0/0] {683} + ¦ °--expr: b:c [0/0] {684} + ¦ ¦--expr: b [0/0] {686} + ¦ ¦ °--SYMBOL: b [0/0] {685} + ¦ ¦--':': : [0/0] {687} + ¦ °--expr: c [0/0] {689} + ¦ °--SYMBOL: c [0/0] {688} + ¦--expr: a ~ [1/0] {690} + ¦ ¦--expr: a [0/3] {692} + ¦ ¦ °--SYMBOL: a [0/0] {691} + ¦ ¦--'~': ~ [0/3] {693} + ¦ °--expr: b : [0/0] {694} + ¦ ¦--expr: b [0/2] {696} + ¦ ¦ °--SYMBOL: b [0/0] {695} + ¦ ¦--':': : [0/1] {697} + ¦ °--expr: c [0/0] {699} + ¦ °--SYMBOL: c [0/0] {698} + ¦--expr: a~b : [1/0] {700} + ¦ ¦--expr: a [0/0] {702} + ¦ ¦ °--SYMBOL: a [0/0] {701} + ¦ ¦--'~': ~ [0/0] {703} + ¦ °--expr: b :c [0/0] {704} + ¦ ¦--expr: b [0/1] {706} + ¦ ¦ °--SYMBOL: b [0/0] {705} + ¦ ¦--':': : [0/0] {707} + ¦ °--expr: c [0/0] {709} + ¦ °--SYMBOL: c [0/0] {708} + ¦--expr: ~ [1/0] {710} + ¦ ¦--'~': ~ [0/4] {711} + ¦ °--expr: a [0/0] {713} + ¦ °--SYMBOL: a [0/0] {712} + ¦--expr: ~gg [1/0] {714} + ¦ ¦--'~': ~ [0/0] {715} + ¦ °--expr: gg [0/0] {717} + ¦ °--SYMBOL: gg [0/0] {716} + ¦--expr: b~k [1/0] {718} + ¦ ¦--expr: b [0/0] {720} + ¦ ¦ °--SYMBOL: b [0/0] {719} + ¦ ¦--'~': ~ [0/0] {721} + ¦ °--expr: k [0/0] {723} + ¦ °--SYMBOL: k [0/0] {722} + °--expr: call( [1/0] {724} + ¦--expr: call [0/0] {726} + ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {725} + ¦--'(': ( [0/0] {727} + ¦--expr: 1 [0/0] {729} + ¦ °--NUM_CONST: 1 [0/0] {728} + ¦--',': , [0/1] {730} + ¦--expr: ~ qq [0/0] {731} + ¦ ¦--'~': ~ [0/1] {732} + ¦ °--expr: qq [0/0] {734} + ¦ °--SYMBOL: qq [0/0] {733} + °--')': ) [0/0] {735} diff --git a/tests/testthat/strict/strict-out.R b/tests/testthat/strict/strict-out.R index 32b2c331b..3422f4e5b 100644 --- a/tests/testthat/strict/strict-out.R +++ b/tests/testthat/strict/strict-out.R @@ -65,7 +65,7 @@ test <- function() { a > b a * b a / b - a ^ b + a^b a & b a | b a := b @@ -87,7 +87,7 @@ test <- function() { # Only with conservative settings: call( preserves, distance, - after, commas, + after, commas, given_has, one ) @@ -120,8 +120,8 @@ test <- function() { nested( function_call(with), - many - , first_level_args + many, + first_level_args ) nested( @@ -140,7 +140,12 @@ test <- function() { # formula -lm(a~b + c, data = NA) -lm(a~. - 1, data = NA) -a~b:c -a~b:c +lm(a ~ b + c, data = NA) +lm(a ~ . - 1, data = NA) +a ~ b:c +a ~ b:c +a ~ b:c +~a +~gg +b ~ k +call(1, ~qq) diff --git a/tests/testthat/stylerignore/adding-removing-in.R b/tests/testthat/stylerignore/adding-removing-in.R new file mode 100644 index 000000000..4c3516caf --- /dev/null +++ b/tests/testthat/stylerignore/adding-removing-in.R @@ -0,0 +1,60 @@ +# styler: off +1 +1;3 +# styler: on +# a comment +c(z ) + + +# styler: off +if (FALSE) + 3 +x = 3 + +y = 2 # comment +# styler: on + +if (FALSE) { + 3 +} + + +# styler: off +function() + NULL +# styler: on + + +# styler: off +if (f(x)) { + 3 +} else + 4 +# styler: on + + +# styler: off +while (x < 4) n() +# styler: on + + +# styler: off +for(i in 1:3) { + i +} +# styler: on + +# styler: off +for (i in 1:3) + g(i) - 2 + +# styler: on +1+ 547809 + + +1 +1 # styler: off + +1;1 # styler: off + +# styler: off +1 +1;3 # commnet +# styler: on diff --git a/tests/testthat/stylerignore/adding-removing-in_tree b/tests/testthat/stylerignore/adding-removing-in_tree new file mode 100644 index 000000000..df503a1ec --- /dev/null +++ b/tests/testthat/stylerignore/adding-removing-in_tree @@ -0,0 +1,180 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # sty [0/0] {1} + ¦--expr: 1 +1 [1/0] {2} + ¦ ¦--expr: 1 [0/1] {4} + ¦ ¦ °--NUM_CONST: 1 [0/0] {3} + ¦ ¦--'+': + [0/0] {5} + ¦ °--expr: 1 [0/0] {7} + ¦ °--NUM_CONST: 1 [0/0] {6} + ¦--';': ; [0/0] {8} + ¦--expr: 3 [0/0] {10} + ¦ °--NUM_CONST: 3 [0/0] {9} + ¦--COMMENT: # sty [1/0] {11} + ¦--COMMENT: # a c [1/0] {12} + ¦--expr: c(z ) [1/0] {13} + ¦ ¦--expr: c [0/0] {15} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {14} + ¦ ¦--'(': ( [0/0] {16} + ¦ ¦--expr: z [0/1] {18} + ¦ ¦ °--SYMBOL: z [0/0] {17} + ¦ °--')': ) [0/0] {19} + ¦--COMMENT: # sty [3/0] {20} + ¦--expr: if (F [1/0] {21} + ¦ ¦--IF: if [0/1] {22} + ¦ ¦--'(': ( [0/0] {23} + ¦ ¦--expr: FALSE [0/0] {25} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {24} + ¦ ¦--')': ) [0/2] {26} + ¦ °--expr: 3 [1/0] {28} + ¦ °--NUM_CONST: 3 [0/0] {27} + ¦--expr_or_assign_or_help: x = 3 [1/0] {29} + ¦ ¦--expr: x [0/1] {31} + ¦ ¦ °--SYMBOL: x [0/0] {30} + ¦ ¦--EQ_ASSIGN: = [0/1] {32} + ¦ °--expr: 3 [0/0] {34} + ¦ °--NUM_CONST: 3 [0/0] {33} + ¦--expr_or_assign_or_help: y = 2 [2/1] {35} + ¦ ¦--expr: y [0/1] {37} + ¦ ¦ °--SYMBOL: y [0/0] {36} + ¦ ¦--EQ_ASSIGN: = [0/1] {38} + ¦ °--expr: 2 [0/0] {40} + ¦ °--NUM_CONST: 2 [0/0] {39} + ¦--COMMENT: # com [0/0] {41} + ¦--COMMENT: # sty [1/0] {42} + ¦--expr: if (F [2/0] {43} + ¦ ¦--IF: if [0/1] {44} + ¦ ¦--'(': ( [0/0] {45} + ¦ ¦--expr: FALSE [0/0] {47} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {46} + ¦ ¦--')': ) [0/1] {48} + ¦ °--expr: { + 3 [0/0] {49} + ¦ ¦--'{': { [0/2] {50} + ¦ ¦--expr: 3 [1/0] {52} + ¦ ¦ °--NUM_CONST: 3 [0/0] {51} + ¦ °--'}': } [1/0] {53} + ¦--COMMENT: # sty [3/0] {54} + ¦--expr: funct [1/0] {55} + ¦ ¦--FUNCTION: funct [0/0] {56} + ¦ ¦--'(': ( [0/0] {57} + ¦ ¦--')': ) [0/2] {58} + ¦ °--expr: NULL [1/0] {60} + ¦ °--NULL_CONST: NULL [0/0] {59} + ¦--COMMENT: # sty [1/0] {61} + ¦--COMMENT: # sty [3/0] {62} + ¦--expr: if (f [1/0] {63} + ¦ ¦--IF: if [0/1] {64} + ¦ ¦--'(': ( [0/0] {65} + ¦ ¦--expr: f(x) [0/0] {66} + ¦ ¦ ¦--expr: f [0/0] {68} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: f [0/0] {67} + ¦ ¦ ¦--'(': ( [0/0] {69} + ¦ ¦ ¦--expr: x [0/0] {71} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {70} + ¦ ¦ °--')': ) [0/0] {72} + ¦ ¦--')': ) [0/1] {73} + ¦ ¦--expr: { + 3 [0/1] {74} + ¦ ¦ ¦--'{': { [0/2] {75} + ¦ ¦ ¦--expr: 3 [1/0] {77} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {76} + ¦ ¦ °--'}': } [1/0] {78} + ¦ ¦--ELSE: else [0/2] {79} + ¦ °--expr: 4 [1/0] {81} + ¦ °--NUM_CONST: 4 [0/0] {80} + ¦--COMMENT: # sty [1/0] {82} + ¦--COMMENT: # sty [3/0] {83} + ¦--expr: while [1/0] {84} + ¦ ¦--WHILE: while [0/1] {85} + ¦ ¦--'(': ( [0/0] {86} + ¦ ¦--expr: x < 4 [0/0] {87} + ¦ ¦ ¦--expr: x [0/1] {89} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {88} + ¦ ¦ ¦--LT: < [0/1] {90} + ¦ ¦ °--expr: 4 [0/0] {92} + ¦ ¦ °--NUM_CONST: 4 [0/0] {91} + ¦ ¦--')': ) [0/1] {93} + ¦ °--expr: n() [0/0] {94} + ¦ ¦--expr: n [0/0] {96} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: n [0/0] {95} + ¦ ¦--'(': ( [0/0] {97} + ¦ °--')': ) [0/0] {98} + ¦--COMMENT: # sty [1/0] {99} + ¦--COMMENT: # sty [3/0] {100} + ¦--expr: for(i [1/0] {101} + ¦ ¦--FOR: for [0/0] {102} + ¦ ¦--forcond: (i in [0/1] {103} + ¦ ¦ ¦--'(': ( [0/0] {104} + ¦ ¦ ¦--SYMBOL: i [0/1] {105} + ¦ ¦ ¦--IN: in [0/1] {106} + ¦ ¦ ¦--expr: 1:3 [0/0] {107} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {109} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {108} + ¦ ¦ ¦ ¦--':': : [0/0] {110} + ¦ ¦ ¦ °--expr: 3 [0/0] {112} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {111} + ¦ ¦ °--')': ) [0/0] {113} + ¦ °--expr: { + i [0/0] {114} + ¦ ¦--'{': { [0/2] {115} + ¦ ¦--expr: i [1/0] {117} + ¦ ¦ °--SYMBOL: i [0/0] {116} + ¦ °--'}': } [1/0] {118} + ¦--COMMENT: # sty [1/0] {119} + ¦--COMMENT: # sty [2/0] {120} + ¦--expr: for ( [1/0] {121} + ¦ ¦--FOR: for [0/1] {122} + ¦ ¦--forcond: (i in [0/2] {123} + ¦ ¦ ¦--'(': ( [0/0] {124} + ¦ ¦ ¦--SYMBOL: i [0/1] {125} + ¦ ¦ ¦--IN: in [0/1] {126} + ¦ ¦ ¦--expr: 1:3 [0/0] {127} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {129} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {128} + ¦ ¦ ¦ ¦--':': : [0/0] {130} + ¦ ¦ ¦ °--expr: 3 [0/0] {132} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {131} + ¦ ¦ °--')': ) [0/0] {133} + ¦ °--expr: g(i) [1/0] {134} + ¦ ¦--expr: g(i) [0/1] {135} + ¦ ¦ ¦--expr: g [0/0] {137} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: g [0/0] {136} + ¦ ¦ ¦--'(': ( [0/0] {138} + ¦ ¦ ¦--expr: i [0/0] {140} + ¦ ¦ ¦ °--SYMBOL: i [0/0] {139} + ¦ ¦ °--')': ) [0/0] {141} + ¦ ¦--'-': - [0/1] {142} + ¦ °--expr: 2 [0/0] {144} + ¦ °--NUM_CONST: 2 [0/0] {143} + ¦--COMMENT: # sty [2/0] {145} + ¦--expr: 1+ 54 [1/0] {146} + ¦ ¦--expr: 1 [0/0] {148} + ¦ ¦ °--NUM_CONST: 1 [0/0] {147} + ¦ ¦--'+': + [0/1] {149} + ¦ °--expr: 54780 [0/0] {151} + ¦ °--NUM_CONST: 54780 [0/0] {150} + ¦--expr: 1 +1 [3/1] {152} + ¦ ¦--expr: 1 [0/1] {154} + ¦ ¦ °--NUM_CONST: 1 [0/0] {153} + ¦ ¦--'+': + [0/0] {155} + ¦ °--expr: 1 [0/0] {157} + ¦ °--NUM_CONST: 1 [0/0] {156} + ¦--COMMENT: # sty [0/0] {158} + ¦--expr: 1 [2/0] {160} + ¦ °--NUM_CONST: 1 [0/0] {159} + ¦--';': ; [0/0] {161} + ¦--expr: 1 [0/1] {163} + ¦ °--NUM_CONST: 1 [0/0] {162} + ¦--COMMENT: # sty [0/0] {164} + ¦--COMMENT: # sty [2/0] {165} + ¦--expr: 1 +1 [1/0] {166} + ¦ ¦--expr: 1 [0/1] {168} + ¦ ¦ °--NUM_CONST: 1 [0/0] {167} + ¦ ¦--'+': + [0/0] {169} + ¦ °--expr: 1 [0/0] {171} + ¦ °--NUM_CONST: 1 [0/0] {170} + ¦--';': ; [0/0] {172} + ¦--expr: 3 [0/1] {174} + ¦ °--NUM_CONST: 3 [0/0] {173} + ¦--COMMENT: # com [0/0] {175} + °--COMMENT: # sty [1/0] {176} diff --git a/tests/testthat/stylerignore/adding-removing-out.R b/tests/testthat/stylerignore/adding-removing-out.R new file mode 100644 index 000000000..ce516fe16 --- /dev/null +++ b/tests/testthat/stylerignore/adding-removing-out.R @@ -0,0 +1,60 @@ +# styler: off +1 +1;3 +# styler: on +# a comment +c(z) + + +# styler: off +if (FALSE) + 3 +x = 3 + +y = 2 # comment +# styler: on + +if (FALSE) { + 3 +} + + +# styler: off +function() + NULL +# styler: on + + +# styler: off +if (f(x)) { + 3 +} else + 4 +# styler: on + + +# styler: off +while (x < 4) n() +# styler: on + + +# styler: off +for(i in 1:3) { + i +} +# styler: on + +# styler: off +for (i in 1:3) + g(i) - 2 + +# styler: on +1 + 547809 + + +1 +1 # styler: off + +1;1 # styler: off + +# styler: off +1 +1;3 # commnet +# styler: on diff --git a/tests/testthat/stylerignore/alignment-in.R b/tests/testthat/stylerignore/alignment-in.R new file mode 100644 index 000000000..b128cbac5 --- /dev/null +++ b/tests/testthat/stylerignore/alignment-in.R @@ -0,0 +1,5 @@ +ps( + interaction_constraints = p_uty(tgs = "train"), + monotone_constraints = p_uty(dfault = 0, tags = c("train", "control"), custom_check = function(x) { checkmate::check_integerish(x, lower = -1, upper = 1, any.missing = FALSE) }), # styler: off + normalize_type = p_fct(c("tee", "forest"), default = "tree", tags = "train"), +) diff --git a/tests/testthat/stylerignore/alignment-in_tree b/tests/testthat/stylerignore/alignment-in_tree new file mode 100644 index 000000000..18ae34374 --- /dev/null +++ b/tests/testthat/stylerignore/alignment-in_tree @@ -0,0 +1,110 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + °--expr: ps( + [0/0] {1} + ¦--expr: ps [0/0] {3} + ¦ °--SYMBOL_FUNCTION_CALL: ps [0/0] {2} + ¦--'(': ( [0/2] {4} + ¦--SYMBOL_SUB: inter [1/1] {5} + ¦--EQ_SUB: = [0/1] {6} + ¦--expr: p_uty [0/0] {7} + ¦ ¦--expr: p_uty [0/0] {9} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: p_uty [0/0] {8} + ¦ ¦--'(': ( [0/0] {10} + ¦ ¦--SYMBOL_SUB: tgs [0/1] {11} + ¦ ¦--EQ_SUB: = [0/1] {12} + ¦ ¦--expr: "trai [0/0] {14} + ¦ ¦ °--STR_CONST: "trai [0/0] {13} + ¦ °--')': ) [0/0] {15} + ¦--',': , [0/2] {16} + ¦--SYMBOL_SUB: monot [1/4] {17} + ¦--EQ_SUB: = [0/1] {18} + ¦--expr: p_uty [0/0] {19} + ¦ ¦--expr: p_uty [0/0] {21} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: p_uty [0/0] {20} + ¦ ¦--'(': ( [0/0] {22} + ¦ ¦--SYMBOL_SUB: dfaul [0/1] {23} + ¦ ¦--EQ_SUB: = [0/1] {24} + ¦ ¦--expr: 0 [0/0] {26} + ¦ ¦ °--NUM_CONST: 0 [0/0] {25} + ¦ ¦--',': , [0/1] {27} + ¦ ¦--SYMBOL_SUB: tags [0/1] {28} + ¦ ¦--EQ_SUB: = [0/1] {29} + ¦ ¦--expr: c("tr [0/0] {30} + ¦ ¦ ¦--expr: c [0/0] {32} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {31} + ¦ ¦ ¦--'(': ( [0/0] {33} + ¦ ¦ ¦--expr: "trai [0/0] {35} + ¦ ¦ ¦ °--STR_CONST: "trai [0/0] {34} + ¦ ¦ ¦--',': , [0/1] {36} + ¦ ¦ ¦--expr: "cont [0/0] {38} + ¦ ¦ ¦ °--STR_CONST: "cont [0/0] {37} + ¦ ¦ °--')': ) [0/0] {39} + ¦ ¦--',': , [0/1] {40} + ¦ ¦--SYMBOL_SUB: custo [0/1] {41} + ¦ ¦--EQ_SUB: = [0/1] {42} + ¦ ¦--expr: funct [0/0] {43} + ¦ ¦ ¦--FUNCTION: funct [0/0] {44} + ¦ ¦ ¦--'(': ( [0/0] {45} + ¦ ¦ ¦--SYMBOL_FORMALS: x [0/0] {46} + ¦ ¦ ¦--')': ) [0/1] {47} + ¦ ¦ °--expr: { ch [0/0] {48} + ¦ ¦ ¦--'{': { [0/2] {49} + ¦ ¦ ¦--expr: check [0/1] {50} + ¦ ¦ ¦ ¦--expr: check [0/0] {51} + ¦ ¦ ¦ ¦ ¦--SYMBOL_PACKAGE: check [0/0] {52} + ¦ ¦ ¦ ¦ ¦--NS_GET: :: [0/0] {53} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: check [0/0] {54} + ¦ ¦ ¦ ¦--'(': ( [0/0] {55} + ¦ ¦ ¦ ¦--expr: x [0/0] {57} + ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {56} + ¦ ¦ ¦ ¦--',': , [0/1] {58} + ¦ ¦ ¦ ¦--SYMBOL_SUB: lower [0/1] {59} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {60} + ¦ ¦ ¦ ¦--expr: -1 [0/0] {61} + ¦ ¦ ¦ ¦ ¦--'-': - [0/0] {62} + ¦ ¦ ¦ ¦ °--expr: 1 [0/0] {64} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {63} + ¦ ¦ ¦ ¦--',': , [0/1] {65} + ¦ ¦ ¦ ¦--SYMBOL_SUB: upper [0/1] {66} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {67} + ¦ ¦ ¦ ¦--expr: 1 [0/0] {69} + ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {68} + ¦ ¦ ¦ ¦--',': , [0/1] {70} + ¦ ¦ ¦ ¦--SYMBOL_SUB: any.m [0/1] {71} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {72} + ¦ ¦ ¦ ¦--expr: FALSE [0/0] {74} + ¦ ¦ ¦ ¦ °--NUM_CONST: FALSE [0/0] {73} + ¦ ¦ ¦ °--')': ) [0/0] {75} + ¦ ¦ °--'}': } [0/0] {76} + ¦ °--')': ) [0/0] {77} + ¦--',': , [0/1] {78} + ¦--COMMENT: # sty [0/2] {79} + ¦--SYMBOL_SUB: norma [1/10] {80} + ¦--EQ_SUB: = [0/1] {81} + ¦--expr: p_fct [0/0] {82} + ¦ ¦--expr: p_fct [0/0] {84} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: p_fct [0/0] {83} + ¦ ¦--'(': ( [0/0] {85} + ¦ ¦--expr: c("te [0/0] {86} + ¦ ¦ ¦--expr: c [0/0] {88} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {87} + ¦ ¦ ¦--'(': ( [0/0] {89} + ¦ ¦ ¦--expr: "tee" [0/0] {91} + ¦ ¦ ¦ °--STR_CONST: "tee" [0/0] {90} + ¦ ¦ ¦--',': , [0/1] {92} + ¦ ¦ ¦--expr: "fore [0/0] {94} + ¦ ¦ ¦ °--STR_CONST: "fore [0/0] {93} + ¦ ¦ °--')': ) [0/0] {95} + ¦ ¦--',': , [0/1] {96} + ¦ ¦--SYMBOL_SUB: defau [0/1] {97} + ¦ ¦--EQ_SUB: = [0/1] {98} + ¦ ¦--expr: "tree [0/0] {100} + ¦ ¦ °--STR_CONST: "tree [0/0] {99} + ¦ ¦--',': , [0/1] {101} + ¦ ¦--SYMBOL_SUB: tags [0/1] {102} + ¦ ¦--EQ_SUB: = [0/1] {103} + ¦ ¦--expr: "trai [0/0] {105} + ¦ ¦ °--STR_CONST: "trai [0/0] {104} + ¦ °--')': ) [0/0] {106} + ¦--',': , [0/0] {107} + °--')': ) [1/0] {108} diff --git a/tests/testthat/stylerignore/alignment-out.R b/tests/testthat/stylerignore/alignment-out.R new file mode 100644 index 000000000..b128cbac5 --- /dev/null +++ b/tests/testthat/stylerignore/alignment-out.R @@ -0,0 +1,5 @@ +ps( + interaction_constraints = p_uty(tgs = "train"), + monotone_constraints = p_uty(dfault = 0, tags = c("train", "control"), custom_check = function(x) { checkmate::check_integerish(x, lower = -1, upper = 1, any.missing = FALSE) }), # styler: off + normalize_type = p_fct(c("tee", "forest"), default = "tree", tags = "train"), +) diff --git a/tests/testthat/stylerignore/braces-in.R b/tests/testthat/stylerignore/braces-in.R new file mode 100644 index 000000000..6da0a3b0d --- /dev/null +++ b/tests/testthat/stylerignore/braces-in.R @@ -0,0 +1,25 @@ +x<- function() +3 # styler: off + +x<- function() # styler: off + 3 + + +if (x) # styler: off + 3 else + 4 + +if (x) { + 3 +} else # styler: off + 4 + +if (x) + 3 else 4 # styler: off + +while (x) # styler: off + "x" + + +while (x) + "x"# styler: off diff --git a/tests/testthat/stylerignore/braces-in_tree b/tests/testthat/stylerignore/braces-in_tree new file mode 100644 index 000000000..a24768ff6 --- /dev/null +++ b/tests/testthat/stylerignore/braces-in_tree @@ -0,0 +1,81 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: x<- f [0/1] {1} + ¦ ¦--expr: x [0/0] {3} + ¦ ¦ °--SYMBOL: x [0/0] {2} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {4} + ¦ °--expr: funct [0/0] {5} + ¦ ¦--FUNCTION: funct [0/0] {6} + ¦ ¦--'(': ( [0/0] {7} + ¦ ¦--')': ) [0/0] {8} + ¦ °--expr: 3 [1/0] {10} + ¦ °--NUM_CONST: 3 [0/0] {9} + ¦--COMMENT: # sty [0/0] {11} + ¦--expr: x<- f [2/0] {12} + ¦ ¦--expr: x [0/0] {14} + ¦ ¦ °--SYMBOL: x [0/0] {13} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {15} + ¦ °--expr: funct [0/0] {16} + ¦ ¦--FUNCTION: funct [0/0] {17} + ¦ ¦--'(': ( [0/0] {18} + ¦ ¦--')': ) [0/1] {19} + ¦ ¦--COMMENT: # sty [0/2] {20} + ¦ °--expr: 3 [1/0] {22} + ¦ °--NUM_CONST: 3 [0/0] {21} + ¦--expr: if (x [3/0] {23} + ¦ ¦--IF: if [0/1] {24} + ¦ ¦--'(': ( [0/0] {25} + ¦ ¦--expr: x [0/0] {27} + ¦ ¦ °--SYMBOL: x [0/0] {26} + ¦ ¦--')': ) [0/1] {28} + ¦ ¦--COMMENT: # sty [0/2] {29} + ¦ ¦--expr: 3 [1/1] {31} + ¦ ¦ °--NUM_CONST: 3 [0/0] {30} + ¦ ¦--ELSE: else [0/2] {32} + ¦ °--expr: 4 [1/0] {34} + ¦ °--NUM_CONST: 4 [0/0] {33} + ¦--expr: if (x [2/0] {35} + ¦ ¦--IF: if [0/1] {36} + ¦ ¦--'(': ( [0/0] {37} + ¦ ¦--expr: x [0/0] {39} + ¦ ¦ °--SYMBOL: x [0/0] {38} + ¦ ¦--')': ) [0/1] {40} + ¦ ¦--expr: { + 3 [0/1] {41} + ¦ ¦ ¦--'{': { [0/2] {42} + ¦ ¦ ¦--expr: 3 [1/0] {44} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {43} + ¦ ¦ °--'}': } [1/0] {45} + ¦ ¦--ELSE: else [0/2] {46} + ¦ ¦--COMMENT: # sty [0/2] {47} + ¦ °--expr: 4 [1/0] {49} + ¦ °--NUM_CONST: 4 [0/0] {48} + ¦--expr: if (x [2/1] {50} + ¦ ¦--IF: if [0/1] {51} + ¦ ¦--'(': ( [0/0] {52} + ¦ ¦--expr: x [0/0] {54} + ¦ ¦ °--SYMBOL: x [0/0] {53} + ¦ ¦--')': ) [0/2] {55} + ¦ ¦--expr: 3 [1/1] {57} + ¦ ¦ °--NUM_CONST: 3 [0/0] {56} + ¦ ¦--ELSE: else [0/1] {58} + ¦ °--expr: 4 [0/0] {60} + ¦ °--NUM_CONST: 4 [0/0] {59} + ¦--COMMENT: # sty [0/0] {61} + ¦--expr: while [2/0] {62} + ¦ ¦--WHILE: while [0/1] {63} + ¦ ¦--'(': ( [0/0] {64} + ¦ ¦--expr: x [0/0] {66} + ¦ ¦ °--SYMBOL: x [0/0] {65} + ¦ ¦--')': ) [0/1] {67} + ¦ ¦--COMMENT: # sty [0/2] {68} + ¦ °--expr: "x" [1/0] {70} + ¦ °--STR_CONST: "x" [0/0] {69} + ¦--expr: while [3/0] {71} + ¦ ¦--WHILE: while [0/1] {72} + ¦ ¦--'(': ( [0/0] {73} + ¦ ¦--expr: x [0/0] {75} + ¦ ¦ °--SYMBOL: x [0/0] {74} + ¦ ¦--')': ) [0/2] {76} + ¦ °--expr: "x" [1/0] {78} + ¦ °--STR_CONST: "x" [0/0] {77} + °--COMMENT: # sty [0/0] {79} diff --git a/tests/testthat/stylerignore/braces-out.R b/tests/testthat/stylerignore/braces-out.R new file mode 100644 index 000000000..72d284060 --- /dev/null +++ b/tests/testthat/stylerignore/braces-out.R @@ -0,0 +1,25 @@ +x <- function() +3 # styler: off + +x<- function() # styler: off + 3 + + +if (x) # styler: off + 3 else + 4 + +if (x) { + 3 +} else # styler: off + 4 + +if (x) + 3 else 4 # styler: off + +while (x) # styler: off + "x" + + +while (x) + "x"# styler: off diff --git a/tests/testthat/stylerignore/crossing-with-expressions-in.R b/tests/testthat/stylerignore/crossing-with-expressions-in.R new file mode 100644 index 000000000..6c7bb85cc --- /dev/null +++ b/tests/testthat/stylerignore/crossing-with-expressions-in.R @@ -0,0 +1,20 @@ +call( + # styler: off + 1+ 1, test_xkj("hier", na.rm = 3 ,py = 43 + ) +) + +# also if there are more comments +test_xkj("hier", na.rm = 3, py = 43 + ) + + +x="new" # styler: off +y=1 # none + +more_calls( + # styler: on + with( + arguments)) +1 + 1 +a(!b) diff --git a/tests/testthat/stylerignore/crossing-with-expressions-in_tree b/tests/testthat/stylerignore/crossing-with-expressions-in_tree new file mode 100644 index 000000000..d1e859747 --- /dev/null +++ b/tests/testthat/stylerignore/crossing-with-expressions-in_tree @@ -0,0 +1,91 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: call( [0/0] {1} + ¦ ¦--expr: call [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} + ¦ ¦--'(': ( [0/2] {4} + ¦ ¦--COMMENT: # sty [1/2] {5} + ¦ ¦--expr: 1+ 1 [1/0] {6} + ¦ ¦ ¦--expr: 1 [0/0] {8} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {7} + ¦ ¦ ¦--'+': + [0/1] {9} + ¦ ¦ °--expr: 1 [0/0] {11} + ¦ ¦ °--NUM_CONST: 1 [0/0] {10} + ¦ ¦--',': , [0/1] {12} + ¦ ¦--expr: test_ [0/0] {13} + ¦ ¦ ¦--expr: test_ [0/0] {15} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {14} + ¦ ¦ ¦--'(': ( [0/0] {16} + ¦ ¦ ¦--expr: "hier [0/0] {18} + ¦ ¦ ¦ °--STR_CONST: "hier [0/0] {17} + ¦ ¦ ¦--',': , [0/1] {19} + ¦ ¦ ¦--SYMBOL_SUB: na.rm [0/1] {20} + ¦ ¦ ¦--EQ_SUB: = [0/1] {21} + ¦ ¦ ¦--expr: 3 [0/1] {23} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {22} + ¦ ¦ ¦--',': , [0/0] {24} + ¦ ¦ ¦--SYMBOL_SUB: py [0/1] {25} + ¦ ¦ ¦--EQ_SUB: = [0/1] {26} + ¦ ¦ ¦--expr: 43 [0/2] {28} + ¦ ¦ ¦ °--NUM_CONST: 43 [0/0] {27} + ¦ ¦ °--')': ) [1/0] {29} + ¦ °--')': ) [1/0] {30} + ¦--COMMENT: # als [2/0] {31} + ¦--expr: test_ [1/0] {32} + ¦ ¦--expr: test_ [0/0] {34} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {33} + ¦ ¦--'(': ( [0/0] {35} + ¦ ¦--expr: "hier [0/0] {37} + ¦ ¦ °--STR_CONST: "hier [0/0] {36} + ¦ ¦--',': , [0/1] {38} + ¦ ¦--SYMBOL_SUB: na.rm [0/1] {39} + ¦ ¦--EQ_SUB: = [0/1] {40} + ¦ ¦--expr: 3 [0/0] {42} + ¦ ¦ °--NUM_CONST: 3 [0/0] {41} + ¦ ¦--',': , [0/1] {43} + ¦ ¦--SYMBOL_SUB: py [0/1] {44} + ¦ ¦--EQ_SUB: = [0/1] {45} + ¦ ¦--expr: 43 [0/9] {47} + ¦ ¦ °--NUM_CONST: 43 [0/0] {46} + ¦ °--')': ) [1/0] {48} + ¦--expr_or_assign_or_help: x="ne [3/1] {49} + ¦ ¦--expr: x [0/0] {51} + ¦ ¦ °--SYMBOL: x [0/0] {50} + ¦ ¦--EQ_ASSIGN: = [0/0] {52} + ¦ °--expr: "new" [0/0] {54} + ¦ °--STR_CONST: "new" [0/0] {53} + ¦--COMMENT: # sty [0/0] {55} + ¦--expr_or_assign_or_help: y=1 [1/1] {56} + ¦ ¦--expr: y [0/0] {58} + ¦ ¦ °--SYMBOL: y [0/0] {57} + ¦ ¦--EQ_ASSIGN: = [0/0] {59} + ¦ °--expr: 1 [0/0] {61} + ¦ °--NUM_CONST: 1 [0/0] {60} + ¦--COMMENT: # non [0/0] {62} + ¦--expr: more_ [2/0] {63} + ¦ ¦--expr: more_ [0/0] {65} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: more_ [0/0] {64} + ¦ ¦--'(': ( [0/2] {66} + ¦ ¦--COMMENT: # sty [1/2] {67} + ¦ ¦--expr: with( [1/0] {68} + ¦ ¦ ¦--expr: with [0/0] {70} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: with [0/0] {69} + ¦ ¦ ¦--'(': ( [0/4] {71} + ¦ ¦ ¦--expr: argum [1/0] {73} + ¦ ¦ ¦ °--SYMBOL: argum [0/0] {72} + ¦ ¦ °--')': ) [0/0] {74} + ¦ °--')': ) [0/0] {75} + ¦--expr: 1 + 1 [1/0] {76} + ¦ ¦--expr: 1 [0/1] {78} + ¦ ¦ °--NUM_CONST: 1 [0/0] {77} + ¦ ¦--'+': + [0/1] {79} + ¦ °--expr: 1 [0/0] {81} + ¦ °--NUM_CONST: 1 [0/0] {80} + °--expr: a(!b) [1/0] {82} + ¦--expr: a [0/0] {84} + ¦ °--SYMBOL_FUNCTION_CALL: a [0/0] {83} + ¦--'(': ( [0/0] {85} + ¦--expr: !b [0/0] {86} + ¦ ¦--'!': ! [0/0] {87} + ¦ °--expr: b [0/0] {89} + ¦ °--SYMBOL: b [0/0] {88} + °--')': ) [0/0] {90} diff --git a/tests/testthat/stylerignore/crossing-with-expressions-out.R b/tests/testthat/stylerignore/crossing-with-expressions-out.R new file mode 100644 index 000000000..52cfa0e9f --- /dev/null +++ b/tests/testthat/stylerignore/crossing-with-expressions-out.R @@ -0,0 +1,22 @@ +call( + # styler: off + 1+ 1, test_xkj("hier", na.rm = 3 ,py = 43 + ) +) + +# also if there are more comments +test_xkj("hier", na.rm = 3, py = 43 + ) + + +x="new" # styler: off +y=1 # none + +more_calls( + # styler: on + with( + arguments + ) +) +1 + 1 +a(!b) diff --git a/tests/testthat/stylerignore/simple-in.R b/tests/testthat/stylerignore/simple-in.R new file mode 100644 index 000000000..f507d8cea --- /dev/null +++ b/tests/testthat/stylerignore/simple-in.R @@ -0,0 +1,25 @@ + +call(1 ) # styler: off +# styler: off +# also if there are more comments +test_xkj("hier", na.rm = 3, py = 43 + ) + + +x="new" # styler: off +y=1 # none + +more_calls(with(arguments)) +# styler: on +1 + 1 +a(!b) + + +# -------- +x="new" # styler: off +y=1 # none + +more_calls(with(arguments)) +# styler: off +1 + 1 +a(!b) diff --git a/tests/testthat/stylerignore/simple-in_tree b/tests/testthat/stylerignore/simple-in_tree new file mode 100644 index 000000000..be7eeea9e --- /dev/null +++ b/tests/testthat/stylerignore/simple-in_tree @@ -0,0 +1,113 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: call( [0/1] {1} + ¦ ¦--expr: call [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--expr: 1 [0/1] {6} + ¦ ¦ °--NUM_CONST: 1 [0/0] {5} + ¦ °--')': ) [0/0] {7} + ¦--COMMENT: # sty [0/0] {8} + ¦--COMMENT: # sty [1/0] {9} + ¦--COMMENT: # als [1/0] {10} + ¦--expr: test_ [1/0] {11} + ¦ ¦--expr: test_ [0/0] {13} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: test_ [0/0] {12} + ¦ ¦--'(': ( [0/0] {14} + ¦ ¦--expr: "hier [0/0] {16} + ¦ ¦ °--STR_CONST: "hier [0/0] {15} + ¦ ¦--',': , [0/1] {17} + ¦ ¦--SYMBOL_SUB: na.rm [0/1] {18} + ¦ ¦--EQ_SUB: = [0/1] {19} + ¦ ¦--expr: 3 [0/0] {21} + ¦ ¦ °--NUM_CONST: 3 [0/0] {20} + ¦ ¦--',': , [0/1] {22} + ¦ ¦--SYMBOL_SUB: py [0/1] {23} + ¦ ¦--EQ_SUB: = [0/1] {24} + ¦ ¦--expr: 43 [0/9] {26} + ¦ ¦ °--NUM_CONST: 43 [0/0] {25} + ¦ °--')': ) [1/0] {27} + ¦--expr_or_assign_or_help: x="ne [3/1] {28} + ¦ ¦--expr: x [0/0] {30} + ¦ ¦ °--SYMBOL: x [0/0] {29} + ¦ ¦--EQ_ASSIGN: = [0/0] {31} + ¦ °--expr: "new" [0/0] {33} + ¦ °--STR_CONST: "new" [0/0] {32} + ¦--COMMENT: # sty [0/0] {34} + ¦--expr_or_assign_or_help: y=1 [1/1] {35} + ¦ ¦--expr: y [0/0] {37} + ¦ ¦ °--SYMBOL: y [0/0] {36} + ¦ ¦--EQ_ASSIGN: = [0/0] {38} + ¦ °--expr: 1 [0/0] {40} + ¦ °--NUM_CONST: 1 [0/0] {39} + ¦--COMMENT: # non [0/0] {41} + ¦--expr: more_ [2/0] {42} + ¦ ¦--expr: more_ [0/0] {44} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: more_ [0/0] {43} + ¦ ¦--'(': ( [0/0] {45} + ¦ ¦--expr: with( [0/0] {46} + ¦ ¦ ¦--expr: with [0/0] {48} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: with [0/0] {47} + ¦ ¦ ¦--'(': ( [0/0] {49} + ¦ ¦ ¦--expr: argum [0/0] {51} + ¦ ¦ ¦ °--SYMBOL: argum [0/0] {50} + ¦ ¦ °--')': ) [0/0] {52} + ¦ °--')': ) [0/0] {53} + ¦--COMMENT: # sty [1/0] {54} + ¦--expr: 1 + 1 [1/0] {55} + ¦ ¦--expr: 1 [0/1] {57} + ¦ ¦ °--NUM_CONST: 1 [0/0] {56} + ¦ ¦--'+': + [0/1] {58} + ¦ °--expr: 1 [0/0] {60} + ¦ °--NUM_CONST: 1 [0/0] {59} + ¦--expr: a(!b) [1/0] {61} + ¦ ¦--expr: a [0/0] {63} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: a [0/0] {62} + ¦ ¦--'(': ( [0/0] {64} + ¦ ¦--expr: !b [0/0] {65} + ¦ ¦ ¦--'!': ! [0/0] {66} + ¦ ¦ °--expr: b [0/0] {68} + ¦ ¦ °--SYMBOL: b [0/0] {67} + ¦ °--')': ) [0/0] {69} + ¦--COMMENT: # --- [3/0] {70} + ¦--expr_or_assign_or_help: x="ne [1/1] {71} + ¦ ¦--expr: x [0/0] {73} + ¦ ¦ °--SYMBOL: x [0/0] {72} + ¦ ¦--EQ_ASSIGN: = [0/0] {74} + ¦ °--expr: "new" [0/0] {76} + ¦ °--STR_CONST: "new" [0/0] {75} + ¦--COMMENT: # sty [0/0] {77} + ¦--expr_or_assign_or_help: y=1 [1/1] {78} + ¦ ¦--expr: y [0/0] {80} + ¦ ¦ °--SYMBOL: y [0/0] {79} + ¦ ¦--EQ_ASSIGN: = [0/0] {81} + ¦ °--expr: 1 [0/0] {83} + ¦ °--NUM_CONST: 1 [0/0] {82} + ¦--COMMENT: # non [0/0] {84} + ¦--expr: more_ [2/0] {85} + ¦ ¦--expr: more_ [0/0] {87} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: more_ [0/0] {86} + ¦ ¦--'(': ( [0/0] {88} + ¦ ¦--expr: with( [0/0] {89} + ¦ ¦ ¦--expr: with [0/0] {91} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: with [0/0] {90} + ¦ ¦ ¦--'(': ( [0/0] {92} + ¦ ¦ ¦--expr: argum [0/0] {94} + ¦ ¦ ¦ °--SYMBOL: argum [0/0] {93} + ¦ ¦ °--')': ) [0/0] {95} + ¦ °--')': ) [0/0] {96} + ¦--COMMENT: # sty [1/0] {97} + ¦--expr: 1 + 1 [1/0] {98} + ¦ ¦--expr: 1 [0/1] {100} + ¦ ¦ °--NUM_CONST: 1 [0/0] {99} + ¦ ¦--'+': + [0/1] {101} + ¦ °--expr: 1 [0/0] {103} + ¦ °--NUM_CONST: 1 [0/0] {102} + °--expr: a(!b) [1/0] {104} + ¦--expr: a [0/0] {106} + ¦ °--SYMBOL_FUNCTION_CALL: a [0/0] {105} + ¦--'(': ( [0/0] {107} + ¦--expr: !b [0/0] {108} + ¦ ¦--'!': ! [0/0] {109} + ¦ °--expr: b [0/0] {111} + ¦ °--SYMBOL: b [0/0] {110} + °--')': ) [0/0] {112} diff --git a/tests/testthat/stylerignore/simple-out.R b/tests/testthat/stylerignore/simple-out.R new file mode 100644 index 000000000..a1986de7a --- /dev/null +++ b/tests/testthat/stylerignore/simple-out.R @@ -0,0 +1,24 @@ +call(1 ) # styler: off +# styler: off +# also if there are more comments +test_xkj("hier", na.rm = 3, py = 43 + ) + + +x="new" # styler: off +y=1 # none + +more_calls(with(arguments)) +# styler: on +1 + 1 +a(!b) + + +# -------- +x="new" # styler: off +y <- 1 # none + +more_calls(with(arguments)) +# styler: off +1 + 1 +a(!b) diff --git a/tests/testthat/test-cache-clean-up.R b/tests/testthat/test-cache-clean-up.R new file mode 100644 index 000000000..66adce823 --- /dev/null +++ b/tests/testthat/test-cache-clean-up.R @@ -0,0 +1,23 @@ +test_that("styler tests did not use R.cache in user root", { + skip_on_cran() + skip_on_covr() + skip_during_parallel() + expect_true( + length(list.files(R.cache::getCachePath("styler"), recursive = TRUE)) == 0L + ) +}) + +test_that("clear Cache", { + # if R CMD CHECK is detected, R.cache root is set to a temp + # directory by default. + # https://github.com/HenrikBengtsson/R.cache/commit/c7ac171f15f035674346d5504049c38cf07c268f + # Hence, this clean up won't clean up the user directory. + skip_during_parallel() + cache_path <- R.cache::getCachePath("styler") + R.cache::clearCache(cache_path, recursive = TRUE, prompt = FALSE) + skip_on_cran() + skip_on_covr() + expect_true( + length(list.dirs(R.cache::getCachePath("styler"))) == 1L + ) +}) diff --git a/tests/testthat/test-cache-high-level-api.R b/tests/testthat/test-cache-high-level-api.R new file mode 100644 index 000000000..e645f8bc8 --- /dev/null +++ b/tests/testthat/test-cache-high-level-api.R @@ -0,0 +1,221 @@ +test_that("activated cache brings speedup on style_file() API", { + local_test_setup() + skip_on_cran() + skip_on_covr() + skip_during_parallel() + n <- n_times_faster_with_cache( + test_path("reference-objects/caching.R"), + test_path("reference-objects/caching.R"), + fun = style_file + ) + expect_gt(n, 30) +}) + +text <- c( + "#' Roxygen", + "#' Comment", + "#' @examples 1 + 1", + "k <- function() {", + " 1 + 3", + " if (x) {", + " k()", + " }", + "}", + "" +) %>% + rep(10) + +test_that("activated cache brings speedup on style_text() API on character vector", { + skip_on_cran() + skip_on_covr() + skip_during_parallel() + n <- n_times_faster_with_cache( + text, text, + fun = style_text + ) + expect_gt(n, 40) +}) + +test_that("activated cache brings speedup on style_text() API on character scalar", { + skip_on_cran() + skip_on_covr() + skip_during_parallel() + text2 <- paste0(text, collapse = "\n") + + n <- n_times_faster_with_cache( + text2, text2, + fun = style_text + ) + expect_gt(n, 55) +}) + + +test_that("trailing line breaks are ignored for caching", { + local_test_setup(cache = TRUE) + text1 <- paste0(text, collapse = "\n") + text2 <- c(paste0(text, collapse = "\n"), "\n", "\n", "\n", "\n") + style_text(text1) + style_text(text2) + expect_equal(cache_info(format = "tabular")$n, 3) + skip_on_cran() + skip_on_covr() + skip_during_parallel() + n <- n_times_faster_with_cache(text1, text2) + expect_gt(n, 55) +}) + +test_that("trailing line breaks are ignored for caching in one scalar", { + local_test_setup(cache = TRUE) + text1 <- paste0(text, collapse = "\n") + text2 <- c(paste0(text, collapse = "\n"), "\n", "\n", "\n", "\n") + style_text(text1) + style_text(text2) + expect_equal(cache_info(format = "tabular")$n, 3) + skip_on_cran() + skip_on_covr() + skip_during_parallel() + n <- n_times_faster_with_cache(text1, text2) + expect_gt(n, 55) +}) + +test_that("trailing line breaks are ignored for caching in one scalar", { + local_test_setup(cache = TRUE) + text1 <- paste0(text, collapse = "\n") + text2 <- paste0( + paste0(text, collapse = "\n"), "\n", "\n", "\n", "\n", + collapse = "" + ) + style_text(text1) + style_text(text2) + expect_equal(cache_info(format = "tabular")$n, 3) + skip_on_cran() + skip_on_covr() + skip_during_parallel() + n <- n_times_faster_with_cache(text1, text2) + expect_gt(n, 55) +}) + +test_that("speedup higher when cached roxygen example code is multiple expressions", { + skip_on_cran() + skip_on_covr() + skip_during_parallel() + text_long <- c( + "#' Roxygen", + "#' Comment", + "#' @examples", + "#' call(1 + 1, 33)", + "#' if (x > 4)", + "#' bb = 3", + "#' call(x,y=2)", + "k <- function() {", + " 1 + 1", + " if (x) {", + " k()", + " }", + "}", + "" + ) + text_long_styled <- style_text(text_long) + text_long_styled_changed <- text_long_styled + text_long_styled_changed[14] <- " }" + speedup_multiple_roygen_example <- n_times_faster_with_cache( + text_long_styled, text_long_styled_changed + ) + text_short_styled <- text_long_styled[-c(5:8)] + text_short_styled_changed <- text_short_styled + text_short_styled_changed[10] <- " }" + speedup_many_roygen_examples <- n_times_faster_with_cache( + text_short_styled, text_short_styled_changed + ) + # the speed gain for longer expression is 1.1x higher + expect_true( + speedup_multiple_roygen_example / speedup_many_roygen_examples > 1.02 + ) +}) + + + +test_that("no speedup when tranformer changes", { + skip_on_cran() + skip_on_covr() + skip_during_parallel() + local_test_setup() + t1 <- tidyverse_style() + first <- system.time(style_text(text, transformers = t1)) + t1 <- tidyverse_style(indent_by = 4) + second <- system.time(style_text(text, transformers = t1)) + expect_true(first["elapsed"] < 1.3 * second["elapsed"]) +}) + + +test_that("unactivated cache does not bring speedup", { + skip_on_cran() + skip_on_covr() + local_test_setup() + skip_during_parallel() + first <- system.time(style_file(test_path("reference-objects/caching.R"))) + second <- system.time(style_file(test_path("reference-objects/caching.R"))) + expect_false(first["elapsed"] / 4 > second["elapsed"]) +}) + +test_that("avoid deleting comments #584 (see commit messages)", { + local_test_setup() + text <- c( + "1 + 1", + "# Comment", + "# another", + "NULL" + ) + style_text(text) + text2 <- c( + "1 + 1", + "# x", + "# another", + "NULL" + ) + expect_equal(as.character(style_text(text2)), text2) +}) + +test_that("avoid removing roxygen mask (see commit messages in #584)", { + local_test_setup() + text <- c( + "c(", + " 1, 2,", + " x - 2", + ")" + ) + style_text(text) + text2 <- c( + "#' Stuff", + "#'", + "#' @examples", + "#' c(", + "#' 1, 2,", + "#' x - 2", + "#' )", + "#' x", + "NULL" + ) + expect_equal(as.character(style_text(text2)), text2) +}) + +test_that("partial caching of multiple expressions on one line works", { + local_test_setup() + text <- "1" + style_text(text) + text2 <- "1 # comment" + styled <- style_text(text2) + expect_equal( + as.character(styled), + text2 + ) + + style_text("mtcars") + style_text(c("mtcars %>%", "f()")) + final_text <- c("mtcars %>%", " f() #") + expect_equal(as.character(style_text(final_text)), final_text) +}) + +test_that("cache is deactivated at end of caching related testthat file", { + expect_false(cache_is_activated()) +}) diff --git a/tests/testthat/test-cache-interaction-base-indention.R b/tests/testthat/test-cache-interaction-base-indention.R new file mode 100644 index 000000000..d5dc6b6e3 --- /dev/null +++ b/tests/testthat/test-cache-interaction-base-indention.R @@ -0,0 +1,155 @@ +test_that("base_indention is respected in caching", { + text <- c("1 + 1") + local_test_setup(cache = TRUE) + without_indention <- style_text(text) + local_test_setup(cache = TRUE) + style_text(text, base_indention = 5) + expect_equal( + style_text(text), + without_indention + ) +}) + +test_that("include_roxygen_exmples is respected in caching", { + text <- c("#' Roxygen", "#'", "#' @examplesIf", "#' 1+1", "1 + 1") + local_test_setup(cache = TRUE) + with_examples <- style_text(text) + local_test_setup(cache = TRUE) + style_text(text, include_roxygen_examples = FALSE) + expect_equal( + style_text(text, include_roxygen_examples = TRUE), + with_examples + ) +}) + + +test_that("expression caching when first expression does not comply", { + local_test_setup(cache = TRUE) + more <- 'x<- 1 + "multi +line string" + c(a = 3) + another( + "x", y = 4 + ) +' + expect_out <- c( + " x <- 1", + ' "multi', + 'line string"', + " c(a = 3)", + " another(", + ' "x",', + " y = 4", + " )" + ) + out <- style_text(more, base_indention = 3) %>% + as.character() + expect_equal( + out, + expect_out + ) + out <- style_text(more, base_indention = 3) %>% + as.character() + expect_equal( + out, + expect_out + ) + out <- style_text(more, base_indention = 4) %>% + as.character() + expect_equal( + out, + c( + " x <- 1", + ' "multi', + 'line string"', + " c(a = 3)", + " another(", + ' "x",', + " y = 4", + " )" + ) + ) + sg <- tidyverse_style() + # TODO caching with base indention 3 + expect_true( + is_cached("x <- 1", sg, more_specs = cache_more_specs(TRUE, 4)) + ) + + sg <- tidyverse_style() + expect_true( + is_cached("x <- 1", sg, more_specs = cache_more_specs(TRUE, 3)) + ) +}) + +test_that("expression caching when last expression does not comply", { + local_test_setup(cache = TRUE) + more <- ' x <- 1 + "multi +line string" + c(a = 3) + another( + "x", y = 4) +' + expect_out <- c( + " x <- 1", + ' "multi', + 'line string"', + " c(a = 3)", + " another(", + ' "x",', + " y = 4", + " )" + ) + out <- style_text(more, base_indention = 3) %>% + as.character() + expect_equal( + out, + expect_out + ) + out <- style_text(more, base_indention = 3) %>% + as.character() + expect_equal( + out, + expect_out + ) +}) + +test_that("expression caching when middle expression does not comply", { + local_test_setup(cache = TRUE) + more <- ' x <- 1 + "multi +line string" + c(a= 3) + another( + "x", y = 4 + ) +' + expect_out <- c( + " x <- 1", + ' "multi', + 'line string"', + " c(a = 3)", + " another(", + ' "x",', + " y = 4", + " )" + ) + out <- style_text(more, base_indention = 3) %>% + as.character() + expect_equal( + out, + expect_out + ) + out <- style_text(more, base_indention = 3) %>% + as.character() + expect_equal( + out, + expect_out + ) +}) + + +test_that("cache is deactivated at end of caching related testthat file", { + expect_false(cache_is_activated()) +}) diff --git a/tests/testthat/test-cache-interaction-more-specs.R b/tests/testthat/test-cache-interaction-more-specs.R new file mode 100644 index 000000000..9d3dd1af7 --- /dev/null +++ b/tests/testthat/test-cache-interaction-more-specs.R @@ -0,0 +1,29 @@ +test_that("base_indention is respected in caching", { + local_test_setup(cache = TRUE) + text <- c("1 + 1") + without_indention <- style_text(text) + local_test_setup(cache = TRUE) + style_text(text, base_indention = 5) + expect_equal( + style_text(text), + without_indention + ) +}) + +test_that("ignore_alignment is respected in caching", { + local_test_setup(cache = TRUE) + text <- c("call(", " arxone = 1,", " tw3 = 2", ")") + text_without_alignment <- c("call(", " arxone = 1,", " tw3 = 2", ")") + with_detection <- style_text(text) + withr::local_options(styler.ignore_alignment = TRUE) + without_detection <- style_text(text) + expect_equal( + as.character(without_detection), + as.character(text_without_alignment) + ) + expect_equal(cache_info(format = "tabular")$n, 2) +}) + +test_that("cache is deactivated at end of caching related testthat file", { + expect_false(cache_is_activated()) +}) diff --git a/tests/testthat/test-cache-interaction-roxygen-code-examples.R b/tests/testthat/test-cache-interaction-roxygen-code-examples.R new file mode 100644 index 000000000..7aa3df2b7 --- /dev/null +++ b/tests/testthat/test-cache-interaction-roxygen-code-examples.R @@ -0,0 +1,49 @@ +test_that("roxyen code examples are written to cache as both individual expressions and as whole text", { + local_test_setup(cache = TRUE) + more_specs <- cache_more_specs_default() + text <- c( + "#' Comment", + "#'", + "#' Stuff", + "#' @examples", + "#' 1 + 1", + "#' f(x )", + "NULL", + "103" + ) + styled <- style_text(text) + expect_equal(cache_info(format = "tabular")$n, 6) + # 1 whole (with comments) + # 1 code whole + # 1 code by expr + # 1 roxygen whole + # 2 roxygen individual + # total: 6 + expect_true( + is_cached(as.character(styled), tidyverse_style(), more_specs = more_specs) + ) + expect_true( + is_cached(c("", "1 + 1", "f(x)"), tidyverse_style(), more_specs = more_specs) + ) + expect_true( + is_cached(c("1 + 1"), tidyverse_style(), more_specs = more_specs) + ) + expect_true( + is_cached(c("f(x)"), tidyverse_style(), more_specs = more_specs) + ) + expect_true( + is_cached(c("NULL"), tidyverse_style(), more_specs = more_specs) + ) + expect_true( + is_cached(c("103"), tidyverse_style(), more_specs = more_specs) + ) + expect_false( + is_cached(c("f(x )"), tidyverse_style(), more_specs = more_specs) + ) +}) + +test_that("cache is deactivated at end of caching related testthat file", { + expect_false(cache_is_activated()) +}) + +# consider dropping transformer text from cache key to speed up. diff --git a/tests/testthat/test-cache-low-level-api.R b/tests/testthat/test-cache-low-level-api.R new file mode 100644 index 000000000..0c9ed6acd --- /dev/null +++ b/tests/testthat/test-cache-low-level-api.R @@ -0,0 +1,133 @@ +test_that("caching utils make right blocks with semi-colon", { + blocks_simple_uncached <- compute_parse_data_nested(c("1 + 1", "2; 1+1")) %>% + dplyr::mutate(is_cached = FALSE) %>% + cache_find_block() + expect_equal(blocks_simple_uncached, c(1, 1, 1, 1)) + + blocks_simple_cached <- compute_parse_data_nested(c("1 + 1", "2; 1+1")) %>% + dplyr::mutate(is_cached = TRUE) %>% + cache_find_block() + expect_equal(blocks_simple_cached, c(1, 1, 1, 1)) + + blocks_edge <- compute_parse_data_nested(c("1 + 1", "2; 1+1")) %>% + dplyr::mutate(is_cached = c(TRUE, TRUE, FALSE, FALSE)) %>% + cache_find_block() + expect_equal(blocks_edge, c(1, 2, 2, 2)) +}) + +test_that("caching utils make right blocks with comments", { + text <- ' + ### comment + x = 1 ### comment + y = 2 # comment + x<-1 ###comment + y <- 2 # comment + "a string here" + + # something something + tau1 = 1 # here? + ' + + + blocks_simple_uncached <- compute_parse_data_nested(text) %>% + dplyr::mutate(is_cached = c( + FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, + TRUE, FALSE, FALSE, FALSE + )) %>% + cache_find_block() + expect_equal(blocks_simple_uncached, c(1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4, 4, 4)) + + text <- " + ### comment + x = 1 + y = 2 # comment + x<-1 + y <- 2 # comment + + # something something + tau1 = 1 # here? + " + blocks_simple_cached <- compute_parse_data_nested(text) %>% + dplyr::mutate(is_cached = c( + FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE + )) %>% + cache_find_block() + expect_equal(blocks_simple_cached, c(1, 1, 1, 1, 1, 2, 2, 2, 2, 2)) +}) + + +test_that("blank lines are correctly identified", { + local_test_setup(cache = TRUE) + + text <- c( + "1 + 1", + "", + "", + "f(x)", + "", + "", + "", + "x < 3", + "function() NULL" + ) + # when not cached, all code in same block + more_specs <- cache_more_specs_default() + pd_nested <- compute_parse_data_nested(text, + transformers = tidyverse_style(), + more_specs = more_specs + ) + cache_by_expression(text, tidyverse_style(), more_specs = more_specs) + expect_equal( + pd_nested$block, rep(1, 4) + ) + + expect_equal( + find_blank_lines_to_next_block(pd_nested), + 1 + ) + + # when partly cached, not all code in same block + text[4] <- "f (x)" + pd_nested <- compute_parse_data_nested(text, tidyverse_style(), more_specs = more_specs) + expect_equal( + pd_nested$block, c(1, 2, 3, 3) + ) + + expect_equal( + find_blank_lines_to_next_block(pd_nested), + c(1, 3, 4) + ) +}) + +test_that("caching utils make right blocks with comments", { + blocks_simple_uncached <- compute_parse_data_nested(c("1 + 1", "2 # comment")) %>% + dplyr::mutate(is_cached = FALSE) %>% + cache_find_block() + expect_equal(blocks_simple_uncached, c(1, 1, 1)) + + blocks_simple_cached <- compute_parse_data_nested(c("1 + 1", "2 # comment2")) %>% + dplyr::mutate(is_cached = TRUE) %>% + cache_find_block() + expect_equal(blocks_simple_cached, c(1, 1, 1)) + + blocks_edge <- compute_parse_data_nested(c("1 + 1", "2 # 1+1")) %>% + dplyr::mutate(is_cached = c(TRUE, TRUE, FALSE)) %>% + cache_find_block() + expect_equal(blocks_edge, c(1, 2, 2)) +}) + + +################################################################################ + +test_that("Individual comment expressions are not cached", { + local_test_setup(cache = TRUE) + style_text(c("# g", "1")) + cache_info <- cache_info(format = "tabular") + # because output text is cached as a whole, there should be 2 cached + # expressions now + expect_equal(cache_info$n, 2) +}) + +test_that("cache is deactivated at end of caching related testthat file", { + expect_false(cache_is_activated()) +}) diff --git a/tests/testthat/test-cache-with-r-cache.R b/tests/testthat/test-cache-with-r-cache.R new file mode 100644 index 000000000..318121d50 --- /dev/null +++ b/tests/testthat/test-cache-with-r-cache.R @@ -0,0 +1,75 @@ +test_that("Cache management works", { + # clearing a cache inactivates the caching functionality. + expect_false(cache_info(format = "tabular")$activated) + local_test_setup(cache = TRUE) + # at fresh startup + expect_s3_class(cache_info(format = "tabular"), "data.frame") + expect_error(capture.output(cache_info()), NA) + expect_equal(basename(cache_activate()), styler_version) + expect_equal(basename(cache_activate("xyz")), "xyz") + expect_equal(getOption("styler.cache_name"), "xyz") + # when cache xyz is activated, cache_info() shows deactivated for other caches + expect_false(cache_info(styler_version, format = "tabular")$activated) + expect_error(capture.output(cache_info(format = "lucid")), NA) + # cache_info() defaults to the currently active cache + expect_equal(basename(cache_info(format = "tabular")$location), "xyz") + + cache_deactivate() + # cache_info() defaults to the cache of the version of styler if + # not cache is active + expect_equal( + basename(cache_info(format = "tabular")$location), styler_version + ) + expect_false(cache_info(format = "tabular")$activated) + expect_equal(getOption("styler.cache_location"), NULL) + expect_error(cache_clear("testthat", ask = FALSE), NA) +}) + +test_that("cached expressions are displayed propperly", { + skip_if(getRversion() < "4.2") + + cache_info <- cache_info("testthat", format = "tabular") + expect_snapshot({ + cache_info[, c("n", "size", "last_modified", "activated")] + }) + + local_test_setup(cache = TRUE) + style_text("1+1") + cache_info <- cache_info(format = "tabular") + cache_info$size <- round(cache_info$size, -2) + expect_snapshot({ + cache_info[, c("n", "size", "activated")] + }) + style_text("a <-function() NULL") + cache_info <- cache_info(format = "tabular") + cache_info$size <- round(cache_info$size, -2) + expect_snapshot({ + cache_info[, c("n", "size", "activated")] + }) +}) + + +test_that("When expressions are cached, number of newlines between them are preserved", { + local_test_setup(cache = TRUE) + text <- c( + "1 + 1", + "", + "", + "f(x)", + "", + "", + "", + "x < 3", + "function() NULL" + ) + # add to cache + expect_equal(text[1:4], as.character(style_text(text[1:4]))) + # applied cache + expect_equal(text[1:4], as.character(style_text(text[1:4]))) + + expect_equal(text, as.character(style_text(text))) +}) + +test_that("cache is deactivated at end of caching related testthat file", { + expect_false(cache_is_activated()) +}) diff --git a/tests/testthat/test-create_token.R b/tests/testthat/test-create_token.R index 59dfe3b6d..c00892808 100644 --- a/tests/testthat/test-create_token.R +++ b/tests/testthat/test-create_token.R @@ -1,49 +1,61 @@ -context("token insertion") + test_that("can create a token that has relevant columns", { pd_names <- c( "token", "text", "short", "lag_newlines", "newlines", "pos_id", "token_before", "token_after", "terminal", "internal", - "spaces", "multi_line", "indention_ref_pos_id", "indent", "child" + "spaces", "multi_line", "indention_ref_pos_id", "indent", "child", + "stylerignore", "block", "is_cached" ) expect_equal( - names(create_tokens("'{'", "{", pos_ids = 3)), + names(create_tokens("'{'", "{", pos_ids = 3, stylerignore = FALSE, indents = 0)), pd_names ) }) test_that("pos_id can be created", { - pd <- create_tokens("XZY_TEST", "test", pos_ids = 3) + pd <- create_tokens("XZY_TEST", "test", pos_ids = 3, stylerignore = FALSE, indents = 0) new_id <- create_pos_ids(pd, 1L, by = 0.4) expect_error( - bind_rows( - create_tokens("XZY_TEST", "test", pos_ids = new_id), + vec_rbind( + create_tokens("XZY_TEST", "test", + pos_ids = new_id, + stylerignore = FALSE, indents = 0 + ), pd ), - NA + NA ) }) test_that("unambiguous pos_id won't be created (down)", { - pd <- create_tokens("XZY_TEST", "test", pos_ids = 3) + pd <- create_tokens("XZY_TEST", "test", + pos_ids = 3, + stylerignore = FALSE, indents = 0 + ) new_id <- create_pos_ids(pd, 1L, by = 0.4) - pd <- bind_rows( - create_tokens("XZY_TEST", "test", pos_ids = new_id), + pd <- vec_rbind( + create_tokens("XZY_TEST", "test", + pos_ids = new_id, + stylerignore = FALSE, indents = 0 + ), pd ) expect_error(create_pos_id(pd, 1L, by = 0.4)) }) test_that("unambiguous pos_id won't be created (up)", { - pd <- create_tokens("XZY_TEST", "test", pos_ids = 3) + pd <- create_tokens("XZY_TEST", "test", + pos_ids = 3, + stylerignore = FALSE, indents = 0 + ) new_id <- create_pos_ids(pd, 1L, by = 0.4, after = TRUE) - pd <- bind_rows( - create_tokens("XZY_TEST", "test", pos_ids = new_id), + pd <- vec_rbind( + create_tokens("XZY_TEST", "test", pos_ids = new_id, stylerignore = FALSE, indents = 0), pd ) expect_error(create_pos_id(pd, 1L, by = 0.4, after = TRUE)) }) - diff --git a/tests/testthat/test-create_tree.R b/tests/testthat/test-create_tree.R index 8fcd8ba63..c8bcfb319 100644 --- a/tests/testthat/test-create_tree.R +++ b/tests/testthat/test-create_tree.R @@ -1,7 +1,8 @@ -context("test tree creation") + test_that("create_trees outputs identical structure if trees have same structure", { skip_if_not_installed("DiagrammeR") + skip_if_not_installed("data.tree") eq <- "a <- fun(a = b)" arrow <- "a <- data.frame(x = qq)" expect_equal( @@ -12,10 +13,11 @@ test_that("create_trees outputs identical structure if trees have same structure test_that("create_trees outputs are not identical structure if trees have different structure", { skip_if_not_installed("DiagrammeR") + skip_if_not_installed("data.tree") eq <- "a <- fun(a = 1:3)" arrow <- "a <- data.frame(x = qq)" expect_true( - nrow(create_tree(eq, structure_only = TRUE)) != - nrow(create_tree(arrow, structure_only = TRUE)) + nrow(create_tree(eq, structure_only = TRUE)) != + nrow(create_tree(arrow, structure_only = TRUE)) ) }) diff --git a/tests/testthat/test-curly-curly.R b/tests/testthat/test-curly-curly.R new file mode 100644 index 000000000..d61299e75 --- /dev/null +++ b/tests/testthat/test-curly-curly.R @@ -0,0 +1,8 @@ + + +test_that("curly-culry", { + expect_warning(test_collection("curly-curly", + "mixed", + transformer = style_text + ), NA) +}) diff --git a/tests/testthat/test-detect-alignment.R b/tests/testthat/test-detect-alignment.R new file mode 100644 index 000000000..e0db9a93f --- /dev/null +++ b/tests/testthat/test-detect-alignment.R @@ -0,0 +1,8 @@ +test_that("does apply spacing rules only if not aligned", { + expect_warning(test_collection("alignment", + transformer = style_text + ), NA) + + text <- "tribble(\n ~x, ~y,\n 11, list(a = 1),\n 2, list(bjj = 2)\n)" + expect_warning(style_text(text), NA) +}) diff --git a/tests/testthat/test-escaping.R b/tests/testthat/test-escaping.R new file mode 100644 index 000000000..f9a5c7864 --- /dev/null +++ b/tests/testthat/test-escaping.R @@ -0,0 +1,21 @@ +test_that("escaping of characters works", { + expect_warning(test_collection("escaping", "basic", + transformer = style_text + ), NA) + + expect_error(test_collection("escaping", "fail-parsing-1", + transformer = style_text + ), ":2:7: unexpected ") + + expect_error(test_collection("escaping", "fail-parsing-2", + transformer = style_text + ), "x <-") + + expect_error(test_collection("escaping", "fail-parsing-3", + transformer = style_text + )) + + expect_error(test_collection("escaping", "fail-parsing-4", + transformer = style_text + ), ":6:0:") +}) diff --git a/tests/testthat/test-exception_handling.R b/tests/testthat/test-exception_handling.R index f2acb6f7d..74afef5e8 100644 --- a/tests/testthat/test-exception_handling.R +++ b/tests/testthat/test-exception_handling.R @@ -1,4 +1,4 @@ -context("Exception handling") + test_that("style_text returns custom error", { expect_error(style_text("a <- 3 4"), "unexpected numeric constant") @@ -18,7 +18,20 @@ test_that("style_text with no tokens returns empty string and warning", { test_that("style_file with no tokens returns empty string and warning", { capture_output(expect_warning( - style_file(testthat_file("exception_handling", "empty_file.R")), + style_file(testthat_file("exception_handling", "empty_file.R"), dry = "on"), "not contain any tokens." )) }) + +test_that("warning is given when transformers does not contain a version", { + sg <- create_style_guide(style_guide_version = NULL) + if (packageVersion("styler") < "2.0") { + expect_fun <- expect_warning + } else { + expect_fun <- expect_error + } + expect_fun( + assert_transformers(sg), + "name and a version field are deprecated and will be removed in a future version of styler" + ) +}) diff --git a/tests/testthat/test-fun_dec.R b/tests/testthat/test-fun_dec.R new file mode 100644 index 000000000..be01c4af6 --- /dev/null +++ b/tests/testthat/test-fun_dec.R @@ -0,0 +1,9 @@ +test_that("reindent function declaration", { + expect_warning(test_collection("fun_dec", "fun_dec_scope_spaces", + transformer = style_text, scope = "spaces" + ), NA) + + expect_warning(test_collection("fun_dec", "line_break_fun_dec", + transformer = style_text + ), NA) +}) diff --git a/tests/testthat/test-helpers.R b/tests/testthat/test-helpers.R index e2012b46d..c5f3a5619 100644 --- a/tests/testthat/test-helpers.R +++ b/tests/testthat/test-helpers.R @@ -1,11 +1,21 @@ -context("various helpers") -test_that("can construct vertical", { - expect_error(construct_vertical(c("1 + 1", "nw")), NA) + +test_that("can construct and print vertical", { + skip_if_not_installed("prettycode") + expect_snapshot({ + construct_vertical(c("1 + 1", "nw")) + }) +}) + + +test_that("file types can be asserted", { + expect_error(assert_filetype(".Rnw"), "case is ignored") }) test_that("can lookup tokens", { - expect_error(lookup_new_special(), NA) + expect_snapshot({ + lookup_new_special() + }) }) test_that("can extend non-comment", { diff --git a/tests/testthat/test-identify-roxygen-examples.R b/tests/testthat/test-identify-roxygen-examples.R new file mode 100644 index 000000000..8eee143e2 --- /dev/null +++ b/tests/testthat/test-identify-roxygen-examples.R @@ -0,0 +1,75 @@ + + +#' Things to consider: +#' * one function declaration or many +#' * example(s) is last tag or not? +#' * malformatted examples +#' * \dontrun examples + +test_that("one function, last tag, properly formatted, no dontrun", { + expect_equal( + identify_start_to_stop_of_roxygen_examples(testthat_file( + "roxygen-examples-identify/1-one-function-example-last-proper-run.R" + )), + list(c(6)) + ) + + expect_equal( + identify_start_to_stop_of_roxygen_examples(testthat_file( + "roxygen-examples-identify/2-one-function-examples-last-proper-run.R" + )), + list(seq(6, 11)) + ) +}) + +test_that("one function, not last, tag, properly formatted, no dontrun", { + expect_equal( + identify_start_to_stop_of_roxygen_examples(testthat_file( + "roxygen-examples-identify/3-one-function-example-not-last-proper-run.R" + )), + list(seq(5, 5)) + ) + + expect_equal( + identify_start_to_stop_of_roxygen_examples(testthat_file( + "roxygen-examples-identify/4-one-function-examples-not-last-proper-run.R" + )), + list(seq(5, 9)) + ) +}) + +test_that("multiple functions, last, tag, properly formatted, no dontrun", { + expect_equal( + identify_start_to_stop_of_roxygen_examples(testthat_file( + "roxygen-examples-identify/5-multiple-function-examples-last-proper-run.R" + )), + list(seq(5, 9), seq(17, 17)) + ) +}) + +test_that("multiple functions, not last, tag, properly formatted, no dontrun", { + expect_equal( + identify_start_to_stop_of_roxygen_examples(testthat_file( + "roxygen-examples-identify/6-multiple-function-examples-not-last-proper-run.R" + )), + list(seq(5, 5), seq(13, 17)) + ) +}) + +test_that("basic exampleIf", { + expect_equal( + identify_start_to_stop_of_roxygen_examples(testthat_file( + "roxygen-examples-identify/20-exampleIf-simple-in.R" + )), + list(seq(5, 6)) + ) +}) + +test_that("multiple exampleIf", { + expect_equal( + identify_start_to_stop_of_roxygen_examples(testthat_file( + "roxygen-examples-identify/21-exampleIf-multiple-in.R" + )), + list(seq(5, 6), seq(7, 8)) + ) +}) diff --git a/tests/testthat/test-indent-character.R b/tests/testthat/test-indent-character.R new file mode 100644 index 000000000..855cb468c --- /dev/null +++ b/tests/testthat/test-indent-character.R @@ -0,0 +1,15 @@ +test_that("indention character can be arbitrary", { + sg <- function(indent_by = 1) { + create_style_guide( + indention = list(purrr::partial(indent_braces, indent_by = indent_by)), + indent_character = "\t", + style_guide_name = "test", + style_guide_version = 1 + ) + } + expect_equal( + style_text("{\n1\n}", style = sg) %>% + as.character(), + c("{", "\t1", "}") + ) +}) diff --git a/tests/testthat/test-indention_curly.R b/tests/testthat/test-indention_curly.R index 11c33045a..5064dcb4d 100644 --- a/tests/testthat/test-indention_curly.R +++ b/tests/testthat/test-indention_curly.R @@ -1,37 +1,47 @@ -context("indent curly brackets") + test_that("indention on one-liner curley only is not changed", { expect_warning(test_collection("indention_curly_brackets", - "one_line_curly", - transformer = style_text), NA) - + "one_line_curly", + transformer = style_text + ), NA) }) test_that("indention with multi-line curley only is correct", { expect_warning(test_collection("indention_curly_brackets", - "multi_line_curly_only", - transformer = style_text), NA) - + "multi_line_curly_only", + transformer = style_text_without_curly_curly + ), NA) }) test_that("indention with multi-line curley and round is correct", { expect_warning(test_collection("indention_curly_brackets", - "multi_line_curly_round_only", - transformer = style_text), NA) + "multi_line_curly_round_only", + transformer = style_text + ), NA) +}) +test_that("custom indention for curly braces is corretct ", { + expect_warning(test_collection("indention_curly_brackets", + "custom", + transformer = style_text, indent_by = 4 + ), NA) }) -test_that(paste("complete styling via top level api is correct", - "(round, curly, spacing)"), { +test_that(paste( + "complete styling via top-level api is correct", + "(round, curly, spacing)" +), { expect_warning(test_collection("indention_curly_brackets", - "multi_line_curly_round_spacing", - transformer = style_text), NA) + "multi_line_curly_round_spacing", + transformer = style_text + ), NA) expect_warning(test_collection("indention_curly_brackets", - "multi_line_curly_while_for_if_fun", - transformer = style_text), NA) - + "multi_line_curly_while_for_if_fun", + transformer = style_text + ), NA) }) diff --git a/tests/testthat/test-indention_fun_calls.R b/tests/testthat/test-indention_fun_calls.R new file mode 100644 index 000000000..f44c037b8 --- /dev/null +++ b/tests/testthat/test-indention_fun_calls.R @@ -0,0 +1,7 @@ + + +test_that("edge cases work", { + expect_warning(test_collection("indention_fun_calls", + transformer = style_text, strict = FALSE + ), NA) +}) diff --git a/tests/testthat/test-indention_multiple.R b/tests/testthat/test-indention_multiple.R index 3c171e3b7..a99634f3d 100644 --- a/tests/testthat/test-indention_multiple.R +++ b/tests/testthat/test-indention_multiple.R @@ -1,58 +1,59 @@ -context("test indent multiple") + test_that("multiple round brackets don't cause extraindention", { expect_warning(test_collection("indention_multiple", - "round_only", - transformer = style_text), NA) + "round_only", + transformer = style_text + ), NA) expect_warning(test_collection("indention_multiple", - "round_closing_on_same_line", - transformer = style_text), NA) + "round_closing_on_same_line", + transformer = style_text + ), NA) }) test_that("multiple curly brackets don't cause extraindention", { expect_warning(test_collection("indention_multiple", - "curly_only", - transformer = style_text), NA) - + "curly_only", + transformer = style_text_without_curly_curly + ), NA) }) test_that("multiple curly and round brackets don't cause extraindention", { expect_warning(test_collection("indention_multiple", - "curly_and_round", - transformer = style_text), NA) - + "curly_and_round", + transformer = style_text_without_curly_curly + ), NA) }) test_that("multiple curly and round brackets overall test", { expect_warning(test_collection("indention_multiple", - "overall", - transformer = style_text, - write_back = TRUE), NA) - + "overall", + transformer = style_text + ), NA) }) test_that("if and ifelse interacting with curly braces works", { expect_warning(test_collection("indention_multiple", - "if_else_curly", - transformer = style_text, - write_back = TRUE, strict = FALSE), NA) + "if_else_curly", + transformer = style_text, strict = FALSE + ), NA) }) test_that("edge cases work", { expect_warning(test_collection("indention_multiple", - "edge_", - transformer = style_text), NA) - + "edge_strict", + transformer = style_text_without_curly_curly + ), NA) }) test_that("token / braces interaction works", { expect_warning(test_collection("indention_multiple", - "fun_for_new_line", - transformer = style_text), NA) + "fun_for_new_line", + transformer = style_text_without_curly_curly + ), NA) }) - diff --git a/tests/testthat/test-indention_operators.R b/tests/testthat/test-indention_operators.R index 75ae1561f..3e62f85f8 100644 --- a/tests/testthat/test-indention_operators.R +++ b/tests/testthat/test-indention_operators.R @@ -1,58 +1,157 @@ -context("indention operators") + test_that("pipe is indended correctly", { expect_warning(test_collection("indention_operators", - "pipe", - transformer = style_text, - write_back = TRUE), NA) + "pipe", + transformer = style_text + ), NA) +}) + +test_that("base pipe is indended correctly", { + skip_if(getRversion() < "4.1") + expect_warning(test_collection("indention_operators", + "base_pipe", + transformer = style_text + ), NA) }) test_that("mathematical operators are indended correctly", { expect_warning(test_collection("indention_operators", - "plus_minus", - transformer = style_op), NA) + "plus_minus", + transformer = style_op + ), NA) + + expect_warning(test_collection("indention_operators", + "multiply_divide", + transformer = style_op + ), NA) +}) + + +test_that("while / for / if without curly brackets", { + expect_warning(test_collection("indention_operators", + "while_for_if_without_curly_non_strict", + transformer = style_text, strict = FALSE + ), NA) + expect_warning(test_collection("indention_operators", + "while_for_without_curly_same_line_non_strict", + transformer = style_text, strict = FALSE + ), NA) expect_warning(test_collection("indention_operators", - "multiply_divide", - transformer = style_op), NA) + "if-else-no-braces-not-strict", + transformer = style_text, strict = FALSE + ), NA) }) +test_that("function multiline without curly brackets", { + expect_warning(test_collection("indention_operators", + "function-multiline-no-braces-strict", + transformer = style_text, strict = TRUE + ), NA) + expect_warning(test_collection("indention_operators", + "function-multiline-no-braces-non-strict", + transformer = style_text, strict = FALSE + ), NA) +}) test_that("while / for / if without curly brackets", { expect_warning(test_collection("indention_operators", - "while_for_if_without_curly", - transformer = style_text, strict = FALSE), NA) + "while_for_if_without_curly_strict", + transformer = style_text, strict = TRUE + ), NA) +}) + + +test_that("nested for and indention", { + expect_warning( + test_collection("indention_operators", + "nested-for-spacing-scope-indention", + transformer = style_text, scope = "indention" + ), + NA + ) + + expect_warning( + test_collection("indention_operators", + "nested-for-spacing-scope-spaces", + transformer = style_text, scope = "spaces" + ), + NA + ) }) test_that("logical, special EQ_SUB and EQ_ASSIGN tokens are indented correctly", { expect_warning(test_collection("indention_operators", - "logical_special", - transformer = style_text, scope = "line_breaks"), NA) + "logical_special", + transformer = style_text, scope = "line_breaks" + ), NA) expect_warning(test_collection("indention_operators", - "eq", - transformer = style_text), NA) + "eq_assign", + transformer = style_text + ), NA) + expect_warning(test_collection("indention_operators", + "eq_formal_simple", + transformer = style_text + ), NA) }) test_that("dollar is indented and spaced correctly", { expect_warning(test_collection("indention_operators", - "dollar", - transformer = style_text), NA) + "dollar", + transformer = style_text + ), NA) }) test_that( - "code is indented correctly if not first pontial trigger causes indention", { + "code is indented correctly if not first pontial trigger causes indention", + { expect_warning( test_collection( "indention_operators", "not_first_trigger", transformer = style_text ), - NA) + NA + ) + } +) + +test_that("indents eq_sub correctly with various levels of scope", { + expect_warning(test_collection("indention_operators", + "eq_sub_complex_indention", + transformer = style_text, scope = "indention" + ), NA) + + expect_warning(test_collection("indention_operators", + "eq_sub_complex_tokens", + transformer = style_text, scope = "tokens" + ), NA) +}) + +test_that("indents eq_formals correctly with various levels of scope", { + expect_warning(test_collection("indention_operators", + "eq_formals_complex_indention", + transformer = style_text, scope = "indention" + ), NA) + + expect_warning(test_collection("indention_operators", + "eq_formals_complex_tokens", + transformer = style_text, scope = "tokens" + ), NA) +}) + +test_that("tilde causes indention and is flattened out", { + expect_warning(test_collection("indention_operators", + "tilde", + transformer = style_text + ), NA) }) test_that("overall", { expect_warning(test_collection("indention_operators", - "overall", - transformer = style_text), NA) + "overall", + transformer = style_text + ), NA) }) diff --git a/tests/testthat/test-indention_round_brackets.R b/tests/testthat/test-indention_round_brackets.R index 6a15cbcd9..c0c6a9409 100644 --- a/tests/testthat/test-indention_round_brackets.R +++ b/tests/testthat/test-indention_round_brackets.R @@ -1,35 +1,31 @@ -context("Function calls with round brackets") + test_that("one-line function call yields correct indention", { expect_warning(test_collection("indention_round_brackets", - "one_line", - transformer = style_text), NA) - - + "one_line", + transformer = style_text + ), NA) }) ## ............................................................................ test_that(paste("multi-line function call yields correct indention"), { - expect_warning(test_collection("indention_round_brackets", - "multi_line", - transformer = style_text), NA) - + "multi_line", + transformer = style_text + ), NA) }) ## ............................................................................ -context("grouping arithmetic expressions with round brackets. ") + # Does NOT cover indention by operators such as +" test_that("arithmetic grouping with braces yields correctly indention", { - expect_warning(test_collection("indention_round_brackets", - "arithmetic", - transformer = style_text), NA) - - + "arithmetic", + transformer = style_text + ), NA) }) diff --git a/tests/testthat/test-insertion_comment_interaction.R b/tests/testthat/test-insertion_comment_interaction.R index e9a7fa978..698b58d03 100644 --- a/tests/testthat/test-insertion_comment_interaction.R +++ b/tests/testthat/test-insertion_comment_interaction.R @@ -1,24 +1,27 @@ -context("test comment token insertion interaction") + ## ............................................................................ ## strict = TRUE #### test_that("token are added correctly to conditional statements", { expect_warning(test_collection( - "insertion_comment_interaction", "just_if_strict", - transformer = style_text), NA) + "insertion_comment_interaction", "just_if_strict", + transformer = style_text + ), NA) }) test_that("token are added correctly to conditional statements", { expect_warning(test_collection( "insertion_comment_interaction", "if_else_strict", - transformer = style_text), NA) + transformer = style_text + ), NA) }) test_that("token are added correctly to conditional statements", { expect_warning(test_collection( "insertion_comment_interaction", "if_else_if_else_strict", - transformer = style_text), NA) + transformer = style_text + ), NA) }) @@ -28,17 +31,20 @@ test_that("token are added correctly to conditional statements", { test_that("token are added correctly to conditional statements", { expect_warning(test_collection( "insertion_comment_interaction", "just_if_non_strict", - transformer = style_text, strict = FALSE), NA) + transformer = style_text, strict = FALSE + ), NA) }) test_that("token are added correctly to conditional statements", { expect_warning(test_collection( "insertion_comment_interaction", "if_else_non_strict", - transformer = style_text, strict = FALSE), NA) + transformer = style_text, strict = FALSE + ), NA) }) test_that("token are added correctly to conditional statements", { expect_warning(test_collection( "insertion_comment_interaction", "if_else_if_else_non_strict", - transformer = style_text, strict = FALSE), NA) + transformer = style_text, strict = FALSE + ), NA) }) diff --git a/tests/testthat/test-interaction-caching-comments.R b/tests/testthat/test-interaction-caching-comments.R new file mode 100644 index 000000000..8924157db --- /dev/null +++ b/tests/testthat/test-interaction-caching-comments.R @@ -0,0 +1,37 @@ +test_that("Correclty removes comments that are not top-level when making pd shallow (low-level)", { + local_test_setup(cache = TRUE) + text7 <- c( + "call(", + " # inline-comment", + " 1 + 1,", + " f(),", + " x(5)", + ")", + "# styler" + ) + style_text(text7) # only making pd shallow when call is cached. + pd_flat <- text_to_flat_pd(text7, tidyverse_style(), more_specs = cache_more_specs_default()) + expect_false(any(pd_flat$text == "# inline-comment")) +}) + +test_that("Correclty removes comments that are not top-level when making pd shallow (high-level)", { + local_test_setup(cache = TRUE) + text7 <- c( + "call(", + "# styler: off", + "1 +1,", + "f(),", + " x(5))", + "# styler" + ) + style_text(text7) + text7[length(text7)] <- "# comment" + expect_equal( + style_text(text7) %>% as.character(), + text7 + ) +}) + +test_that("cache is deactivated at end of caching related testthat file", { + expect_false(cache_is_activated()) +}) diff --git a/tests/testthat/test-interaction-caching-stylerignore.R b/tests/testthat/test-interaction-caching-stylerignore.R new file mode 100644 index 000000000..255cb67a4 --- /dev/null +++ b/tests/testthat/test-interaction-caching-stylerignore.R @@ -0,0 +1,235 @@ +test_that("caching works with stylerignore for multi-token lines when partly cached before", { + local_test_setup(cache = TRUE) + text1 <- "1 + 1" + expect_equal( + as.character(style_text(text1)), + text1 + ) + + text2 <- c( + "# styler: off", + "1 + 1", + "# styler: on", + "# a comment" + ) + expect_equal( + as.character(style_text(text2)), + text2 + ) +}) + +test_that("caching works with stylerignore for multi-token lines", { + local_test_setup(cache = TRUE) + text3 <- c( + "# styler: off", + "1 + 1 #comment2", + "# styler: on", + "#a comment" + ) + text3_correct <- c( + "# styler: off", + "1 + 1 #comment2", + "# styler: on", + "# a comment" + ) + + expect_equal( + as.character(style_text(text3)), + text3_correct + ) + + expect_equal( + as.character(style_text(text3_correct)), + text3_correct + ) + + text4 <- c( + "# styler: off", + "1 +1", + "x(x)", + "# styler: on", + "# a comment" + ) + + expect_equal( + as.character(style_text(text4)), + text4 + ) +}) + +test_that("caching works ", { + local_test_setup(cache = TRUE) + text1 <- "1 + 1" + expect_equal( + as.character(style_text(text1)), + text1 + ) + + text2 <- c( + "# styler: off", + "1 + 1", + "# styler: on", + "# a comment" + ) + expect_equal( + as.character(style_text(text2)), + text2 + ) +}) + +# when a top-level expression is cached, it means it is already complying to +# the style. +# Since top-level comments are not cached, the expression in the stylerignore +# sequence will be in a different block if cached and not be senth though +# apply_stylerignore. + +# if the stylerignore tag is top-level +test_that("caching works for top-level expressions", { + local_test_setup(cache = TRUE) + text1 <- "1 + 1" + expect_equal( + as.character(style_text(text1)), + text1 + ) + + text2 <- c( + "# styler: off", + "1 + 1", + "# styler: on", + "# a comment" + ) + expect_equal( + as.character(style_text(text2)), + text2 + ) +}) + +# if the stylerignore tag is not top-level +# since we only cache top-level expressions, the whole expression is either +# cached or not, depending on whether it is complying to the style guide. +test_that("caching works for non-top-level expressions", { + local_test_setup(cache = TRUE) + text1 <- "1 + 1" + expect_equal( + as.character(style_text(text1)), + text1 + ) + + text2 <- c( + "cal8(", + " # styler: off", + " 1 + 1,", + " # styler: on", + ")", + "# comment" + ) + expect_equal( + as.character(style_text(text2)), + text2 + ) +}) + +test_that("does not cache stylerignore sequences", { + local_test_setup(cache = TRUE) + text <- c( + "1+1# styler: off" + ) + style_text(text) + expect_false( + is_cached("1+1", tidyverse_style(), more_specs = cache_more_specs_default()) + ) + local_test_setup(cache = TRUE) + text <- c( + "# styler: off", + "1+1" + ) + style_text(text) + expect_false( + is_cached( + "1+1", + tidyverse_style(), + more_specs = cache_more_specs_default() + ) + ) +}) + +test_that("indention preserved in stylerignore when caching activated", { + local_test_setup(cache = TRUE) + text6 <- c( + "# styler: off", + "1 + 1", + " x(5)", + "# styler: on", + "# a comment" + ) + expect_equal( + as.character(style_text(text6)), + text6 + ) +}) + +test_that("changing ignore markers invalidates cache", { + opts <- list( + list(styler.ignore_stop = "noqua: stop", n = 1), + list(styler.ignore_start = "noqua: start", n = 3) + ) + purrr::walk(opts, function(opt) { + local_test_setup(cache = TRUE) + text7 <- c( + "# styler: off", + "1 + 1", + "# styler: on" + ) + style_text(text7) + rlang::exec(withr::local_options, !!!opt[-length(opt)]) + style_text(text7) + expect_equal(cache_info(format = "tabular")$n, opt[["n"]]) + }) +}) + + + +test_that("all expressions within a stylerignore sequence (whether cached or not) are put in the same block (low-level)", { + transformers <- tidyverse_style() + specs <- transformers$more_specs_style_guide + full <- c( + "# styler: off", + "a", + "flush(", + "1", + ")", + "# styler: on" + ) + without_ignore <- full[c(-1L, -length(full))] + local_test_setup(cache = TRUE) + + expect_true(all(compute_parse_data_nested(without_ignore, transformers, specs)$block == 1L)) + + cache_by_expression("a", transformers, more_specs = NULL) + is_cached("a", transformers, more_specs = NULL) + cache_by_expression("flush(\n 1\n)", transformers, more_specs = NULL) + cache_by_expression(c("a", "flush(", " 1", ")"), transformers, more_specs = NULL) + + expect_true(all(compute_parse_data_nested(full)$block == 1L)) +}) + + +test_that("all expressions within a stylerignore sequence (whether cached or not) are put in the same block (high-level)", { + full <- c( + "# styler: off", + "a", + "flush(", + "1", + ")", + "# styler: on" + ) + without_ignore <- full[c(-1L, -length(full))] + local_test_setup(cache = TRUE) + + expect_identical(as.character(style_text(without_ignore)), c("a", "flush(", " 1", ")")) + expect_identical(as.character(style_text(full)), full) +}) + +test_that("cache is deactivated at end of caching related testthat file", { + expect_false(cache_is_activated()) +}) diff --git a/tests/testthat/test-io.R b/tests/testthat/test-io.R new file mode 100644 index 000000000..6195fd52d --- /dev/null +++ b/tests/testthat/test-io.R @@ -0,0 +1,36 @@ +test_that("non-ASCII characters are handled properly for text styling", { + expect_equal( + style_text("glück <-3") %>% unclass(), "glück <- 3" + ) +}) + + +test_that("non-ASCII characters are handled properly for file styling", { + skip_if(.Platform$OS.type != "windows") + + withr::with_locale( + c(LC_CTYPE = "English_United States.1252"), + { + tmp <- tempfile(fileext = ".R") + con <- file(tmp, encoding = "UTF-8") + on.exit(close(con), add = TRUE) + + # c.f. dplyr's tests/testthat/helper-encoding.R + writeLines("Gl\u00fcck+1", con) + + style_file(tmp) + result <- readLines(con) + expect_equal(result, "Gl\u00fcck + 1") + } + ) +}) + +test_that("empty files are converted to zero-bite files", { + local_test_setup() + for (file_content in list(character(), "", c("", ""))) { + tmp <- tempfile(fileext = ".R") + write_utf8(file_content, tmp) + suppressWarnings(style_file(tmp)) + expect_true(file.size(tmp) == 0) + } +}) diff --git a/tests/testthat/test-line_breaks_and_other.R b/tests/testthat/test-line_breaks_and_other.R index ca34c8d43..7568ac71f 100644 --- a/tests/testthat/test-line_breaks_and_other.R +++ b/tests/testthat/test-line_breaks_and_other.R @@ -1,27 +1,76 @@ -context("linebreaking added / removed correctly") + test_that("line breaks involing curly brackets", { expect_warning(test_collection("line_breaks_and_other", "curly", - transformer = style_text), NA) + transformer = style_text + ), NA) }) +test_that("line breaks involing curly brackets", { + expect_warning(test_collection("line_breaks_and_other", "braces-fun-calls", + transformer = style_text + ), NA) +}) + + test_that("line breaks involing curly brackets", { expect_warning(test_collection("line_breaks_and_other", "edge_comment_and_curly", - transformer = style_text), NA) + transformer = style_text + ), NA) }) test_that("adding and removing line breaks", { expect_warning(test_collection("line_breaks_and_other", "if", - transformer = style_text), NA) + transformer = style_text + ), NA) }) test_that("no line break after %>% if next token is comment", { expect_warning(test_collection("line_breaks_and_other", "pipe_and", - transformer = style_text), NA) + transformer = style_text + ), NA) }) -test_that("line break after closing brace in function calls if possible", { - expect_warning(test_collection("line_breaks_and_other", "line_break_fun_dec", - transformer = style_text), NA) +test_that("line break before comma is removed and placed after comma ", { + expect_warning(test_collection("line_breaks_and_other", "comma", + transformer = style_text + ), NA) +}) + +test_that("line break before comma is removed and placed after comma ", { + expect_warning(test_collection("line_breaks_and_other", "pipe-line", + transformer = style_text + ), NA) +}) + +test_that("Can handle base R pie", { + skip_if(getRversion() < "4.1") + expect_warning(test_collection("line_breaks_and_other", "base-pipe-line", + transformer = style_text + ), NA) +}) + +test_that("line break added for ggplot2 call", { + expect_warning(test_collection("line_breaks_and_other", "ggplot2", + transformer = style_text + ), NA) +}) + +test_that("drop redundant line breaks in assignments", { + expect_warning(test_collection("line_breaks_and_other", "assignment", + transformer = style_text, scope = I(c("line_breaks", "tokens")) + ), NA) +}) + +test_that("line is correctly broken around = ", { + expect_warning(test_collection("line_breaks_and_other", "around-eq-sub", + transformer = style_text + ), NA) +}) + +test_that("comments are not moved down after {", { + expect_warning(test_collection("line_breaks_and_other", "comment-around-curly", + transformer = style_text + ), NA) }) diff --git a/tests/testthat/test-line_breaks_fun_call.R b/tests/testthat/test-line_breaks_fun_call.R index 86c53b3c1..939a95d1b 100644 --- a/tests/testthat/test-line_breaks_fun_call.R +++ b/tests/testthat/test-line_breaks_fun_call.R @@ -1,31 +1,64 @@ -context("line breaks for function calls") + test_that("line breaks work in general", { expect_warning(test_collection("line_breaks_fun_call", - "token_dependent_mixed", - transformer = style_text), NA) + "token_dependent_mixed", + transformer = style_text + ), NA) + + expect_warning(test_collection("line_breaks_fun_call", + "token_dependent_complex_strict", + transformer = style_text + ), NA) +}) + +test_that("blank lines in function calls are removed for strict = TRUE", { + expect_warning(test_collection("line_breaks_fun_call", + "blank-strict", + transformer = style_text + ), NA) expect_warning(test_collection("line_breaks_fun_call", - "token_dependent_complex_strict", - transformer = style_text), NA) + "blank-non-strict", + transformer = style_text, strict = FALSE + ), NA) }) + test_that("line breaks are not applied with non-strict", { expect_warning(test_collection("line_breaks_fun_call", - "token_dependent_complex_non_strict", - transformer = style_text, strict = FALSE), NA) + "token_dependent_complex_non_strict", + transformer = style_text, strict = FALSE + ), NA) }) test_that("line breaks work with comments", { expect_warning(test_collection("line_breaks_fun_call", - "token_dependent_comments", - transformer = style_text), NA) + "token_dependent_comments", + transformer = style_text + ), NA) + expect_warning(test_collection("line_breaks_fun_call", + "line_breaks_and_comments", + transformer = style_text + ), NA) +}) + +test_that("line breaks work with exceptions", { + expect_warning(test_collection("line_breaks_fun_call", + "switch_ifelse", + transformer = style_text + ), NA) +}) + +test_that("line breaks work with exceptions", { expect_warning(test_collection("line_breaks_fun_call", - "line_breaks_and_comments", - transformer = style_text), NA) + "named_arguments", + transformer = style_text + ), NA) }) test_that("line breaks work with exceptions", { expect_warning(test_collection("line_breaks_fun_call", - "switch_ifelse", - transformer = style_text), NA) + "unindent", + transformer = style_text + ), NA) }) diff --git a/tests/testthat/test-math_token_spacing.R b/tests/testthat/test-math_token_spacing.R index 57f11b07a..9b1aa9009 100644 --- a/tests/testthat/test-math_token_spacing.R +++ b/tests/testthat/test-math_token_spacing.R @@ -1,4 +1,4 @@ -context("math token spacing") + test_that("invalid tokens return error", { expect_error(test_collection( @@ -7,7 +7,8 @@ test_that("invalid tokens return error", { style = tidyverse_style, scope = "spaces", math_token_spacing = specify_math_token_spacing("hdk"), - strict = FALSE), "lookup") + strict = FALSE + ), "lookup") }) test_that("non-strict default: spacing around all", { @@ -17,7 +18,8 @@ test_that("non-strict default: spacing around all", { style = tidyverse_style, scope = "spaces", math_token_spacing = specify_math_token_spacing(), - strict = FALSE), NA) + strict = FALSE + ), NA) }) test_that("strict default: spacing around all", { @@ -27,7 +29,8 @@ test_that("strict default: spacing around all", { style = tidyverse_style, scope = "spaces", math_token_spacing = tidyverse_math_token_spacing(), - strict = TRUE), NA) + strict = TRUE + ), NA) }) test_that("strict no space around +", { @@ -37,7 +40,7 @@ test_that("strict no space around +", { style = tidyverse_style, scope = "spaces", math_token_spacing = specify_math_token_spacing(zero = "'+'") - ), NA) + ), NA) }) test_that("strict no space around all but ^", { diff --git a/tests/testthat/test-multiple_expressions.R b/tests/testthat/test-multiple_expressions.R index 825b985b1..783f012dd 100644 --- a/tests/testthat/test-multiple_expressions.R +++ b/tests/testthat/test-multiple_expressions.R @@ -1,14 +1,16 @@ library("testthat") -context("multiple expressions") + test_that("simple multiple expressions are styled correctly", { expect_warning(test_collection("multiple_expressions", - "two_simple", - transformer = style_text), NA) + "two_simple", + transformer = style_text + ), NA) }) test_that("complex multiple expressions are styled correctly", { expect_warning(test_collection("multiple_expressions", - "three_complex", - transformer = style_text), NA) + "three_complex", + transformer = style_text + ), NA) }) diff --git a/tests/testthat/test-parse_comments.R b/tests/testthat/test-parse_comments.R index dff80a242..53ec3e9b3 100644 --- a/tests/testthat/test-parse_comments.R +++ b/tests/testthat/test-parse_comments.R @@ -1,38 +1,74 @@ -library("testthat") -context("correctly treats comments") - test_that("spacing within comments is done correctly", { expect_warning(test_collection("parse_comments", - "within_spacing_with_force", - transformer = style_text, - style = tidyverse_style, - start_comments_with_one_space = TRUE), NA) + "within_spacing_with_force", + transformer = style_text, + style = tidyverse_style, + start_comments_with_one_space = TRUE + ), NA) expect_warning(test_collection("parse_comments", - "within_spacing_without_force", - transformer = style_text, - style = tidyverse_style, - start_comments_with_one_space = FALSE), NA) + "within_spacing_without_force", + transformer = style_text, + style = tidyverse_style, + start_comments_with_one_space = FALSE + ), NA) expect_warning(test_collection("parse_comments", - "eol_eof_spaces", - transformer = style_text), NA) + "eol_eof_spaces", + transformer = style_text + ), NA) }) test_that("comments are treated corectly", { expect_warning(test_collection("parse_comments", - "mixed", - transformer = style_empty), NA) + "mixed", + transformer = style_empty + ), NA) + + expect_warning(test_collection("parse_comments", + "just_comments", + transformer = style_empty + ), NA) + + + expect_warning(test_collection("parse_comments", + "with_indention", + transformer = style_text + ), NA) +}) + +test_that("rplumber tags / syntax is handled properly", { + expect_warning(test_collection("parse_comments", + "rplumber", + transformer = style_text + ), NA) +}) + +test_that("hashbangs are respected", { expect_warning(test_collection("parse_comments", - "just_comments", - transformer = style_empty), NA) + "shebang", + transformer = style_text + ), NA) +}) +test_that("xaringan markers are respected", { + expect_warning(test_collection("parse_comments", + "xaringan", + transformer = style_text + ), NA) +}) +test_that("output prefix markers are respected", { expect_warning(test_collection("parse_comments", - "with_indention", - transformer = style_text, - write_back = TRUE), NA) + "output-prefix", + transformer = style_text + ), NA) +}) - # top-level test with indention +test_that("code chunk headers for spinning are respected", { + expect_warning(test_collection("parse_comments", + "spinning_code_chunk_headers", + transformer = style_text + ), NA) }) diff --git a/tests/testthat/test-parsing.R b/tests/testthat/test-parsing.R index 05cc637d6..c5cb2463a 100644 --- a/tests/testthat/test-parsing.R +++ b/tests/testthat/test-parsing.R @@ -1,11 +1,14 @@ -context("circumvent parsing bugs") + test_that("repreated parsing solves wrong parent assignment", { - expect_warning(test_collection( - "parsing", "repeated_parsing", - transformer = style_text, - strict = FALSE), - NA) + expect_warning( + test_collection( + "parsing", "repeated_parsing", + transformer = style_text, + strict = FALSE + ), + NA + ) # move to temp dir dir <- tempfile("styler") @@ -15,15 +18,56 @@ test_that("repreated parsing solves wrong parent assignment", { file.copy(path_perm, dir) sys_call <- paste0( - "R -q -e \"styler::style_file(\\\"", path_temp, "\\\")\"" + "R -q -e \"styler::cache_deactivate(); styler::style_file(\\\"", path_temp, "\\\")\"" ) calls_sys(sys_call, intern = FALSE, ignore.stdout = TRUE, ignore.stderr = TRUE) - ref <- enc::read_lines_enc(testthat_file("parsing", "repeated_parsing-out.R")) - result <- enc::read_lines_enc(path_temp) + ref <- read_utf8_bare(testthat_file("parsing", "repeated_parsing-out.R")) + result <- read_utf8_bare(path_temp) expect_equal(ref, result) unlink(dir) }) test_that("long strings are parsed correctly", { - test_collection("parsing", "long_strings", transformer = style_text) + expect_warning( + test_collection("parsing", "long_strings", transformer = style_text), + NA + ) +}) + +test_that("0x number representation is preserved with(out) L", { + text <- "0x00000002L" + expect_true(all(tokenize(text)$text == text)) + text <- "0x00000002" + expect_true(all(tokenize(text)$text == text)) + text <- "a <- 0x2L" + pd <- get_parse_data(text) + expect_equal(gsub("0x2L?", "0x2L", pd$text), tokenize(text)$text) +}) + + +test_that("CRLF EOLs fail with informative error", { + expect_error( + style_text("glück <- 3\r\n glück + 1"), + "Please change the EOL character in your editor to Unix style and try again." + ) + expect_error( + style_text(c("glück <- 3", "glück + 1\r\n 3")), + "Please change the EOL character in your editor to Unix style and try again." + ) +}) + + +test_that("mixed CRLF / LF EOLs fail", { + expect_error( + style_text("a + 3 -4 -> x\nx + 2\r\n glück + 1"), + "unexpected input" + ) +}) + +test_that("unicode can't be propprely handled on Windows for R < 4.2", { + msg <- ifelse(getRversion() < "4.2" && is_windows(), + "Can't parse input due to unicode restriction in base R\\.", + NA + ) + expect_error(style_text('suit <- "♠"'), msg) }) diff --git a/tests/testthat/test-public_api-0.R b/tests/testthat/test-public_api-0.R new file mode 100644 index 000000000..e0d46f105 --- /dev/null +++ b/tests/testthat/test-public_api-0.R @@ -0,0 +1,96 @@ +test_that("styler can style package", { + capture_output(expect_false({ + styled <- style_pkg(testthat_file("public-api", "xyzpackage")) + any(styled$changed) + })) +}) + +test_that("styler can style package and exclude some directories", { + capture_output( + styled <- style_pkg(testthat_file("public-api", "xyzpackage"), + exclude_dirs = "tests" + ) + ) + expect_true(nrow(styled) == 1) + expect_false(any(grepl("tests/testthat/test-package-xyz.R", styled$file))) +}) + +test_that("styler can style package and exclude some sub-directories", { + capture_output( + styled <- style_pkg(testthat_file("public-api", "xyzpackage"), + exclude_dirs = "tests/testthat" + ) + ) + expect_true(nrow(styled) == 2) + expect_true(any(grepl("tests/testthat.R", styled$file))) + expect_false(any(grepl("tests/testthat/test-package-xyz.R", styled$file))) +}) + + + +test_that("styler can style package and exclude some directories and files", { + capture_output(expect_true({ + styled <- style_pkg(testthat_file("public-api", "xyzpackage"), + exclude_dirs = "tests", + exclude_files = ".Rprofile" + ) + nrow(styled) == 1 + })) + + capture_output(expect_true({ + styled <- style_pkg(testthat_file("public-api", "xyzpackage"), + exclude_dirs = "tests", + exclude_files = "./.Rprofile" + ) + nrow(styled) == 1 + })) +}) + + +test_that("styler can style directory", { + capture_output(expect_false({ + styled <- style_dir(testthat_file("public-api", "xyzdir")) + any(styled$changed) + })) +}) + +test_that("styler can style directories and exclude", { + capture_output(expect_true({ + styled <- style_dir( + testthat_file("public-api", "renvpkg"), + exclude_dirs = "renv" + ) + nrow(styled) == 2 + })) + capture_output(expect_true({ + styled <- style_dir( + testthat_file("public-api", "renvpkg"), + exclude_dirs = c("renv", "tests/testthat") + ) + nrow(styled) == 1 + })) + + capture_output(expect_true({ + styled <- style_dir( + testthat_file("public-api", "renvpkg"), + exclude_dirs = "./renv" + ) + nrow(styled) == 2 + })) + + capture_output(expect_true({ + styled <- style_dir( + testthat_file("public-api", "renvpkg"), + exclude_dirs = "./renv", recursive = FALSE + ) + nrow(styled) == 0 + })) + + capture_output(expect_true({ + styled <- style_dir( + testthat_file("public-api", "renvpkg"), + recursive = FALSE + ) + nrow(styled) == 0 + })) +}) diff --git a/tests/testthat/test-public_api-1.R b/tests/testthat/test-public_api-1.R new file mode 100644 index 000000000..e7297d2dd --- /dev/null +++ b/tests/testthat/test-public_api-1.R @@ -0,0 +1,144 @@ +test_that("styler can style files", { + # just one + capture_output(expect_equal( + { + out <- style_file(c( + testthat_file("public-api", "xyzfile", "random-script.R") + ), strict = FALSE) + out$changed + }, + rep(FALSE, 1), + ignore_attr = TRUE + )) + # multiple not in the same working directory + capture_output(expect_equal( + { + out <- style_file(c( + testthat_file("public-api", "xyzfile", "random-script.R"), + testthat_file("public-api", "xyzfile", "subfolder", "random-script.R") + ), strict = FALSE) + out$changed + }, + rep(FALSE, 2), + ignore_attr = TRUE + )) +}) + + +test_that("styler does not return error when there is no file to style", { + capture_output(expect_error(style_dir( + testthat_file("public-api", "xyzemptydir"), + strict = FALSE + ), NA)) +}) + + + +test_that("styler can style Rmd file", { + expect_false({ + out <- style_file( + testthat_file("public-api", "xyzfile_rmd", "random.Rmd"), + strict = FALSE + ) + out$changed + }) + + styled <- style_file( + testthat_file("public-api", "xyzfile_rmd", "random2.Rmd"), + strict = FALSE + ) + expect_false(styled$changed) +}) + +test_that("styler can style Rmarkdown file", { + expect_false({ + out <- style_file( + testthat_file("public-api", "xyzfile_rmd", "random.Rmarkdown"), + strict = FALSE + ) + out$changed + }) + + + styled <- style_file( + testthat_file("public-api", "xyzfile_rmd", "random2.Rmarkdown"), + strict = FALSE + ) + expect_false(styled$changed) +}) + + +test_that("styler can style qmd file", { + expect_false({ + out <- style_file( + testthat_file("public-api", "xyzfile_qmd", "new.qmd"), + strict = FALSE + ) + out$changed + }) + + styled <- style_file( + testthat_file("public-api", "xyzfile_rmd", "random2.Rmarkdown"), + strict = FALSE + ) + expect_false(styled$changed) +}) + +test_that("styler handles malformed Rmd file and invalid R code in chunk", { + capture_output(expect_warning( + style_file(testthat_file("public-api", "xyzfile_rmd", "invalid4.Rmd"), strict = FALSE), + "3: " + )) + + capture_output(expect_warning( + style_file(testthat_file("public-api", "xyzfile_rmd", "invalid7.Rmd"), strict = FALSE), + "Malformed file" + )) +}) + + + + +test_that("messages (via cat()) of style_file are correct", { + for (encoding in ls_testable_encodings()) { + withr::with_options( + list(cli.unicode = encoding == "utf8"), + { + # Message if scope > line_breaks and code changes + expect_snapshot({ + cat(catch_style_file_output(file.path( + "public-api", + "xyzdir-dirty", + "dirty-sample-with-scope-tokens.R" + )), sep = "\n") + }) + + # No message if scope > line_breaks and code does not change + expect_snapshot({ + cat(catch_style_file_output(file.path( + "public-api", "xyzdir-dirty", "clean-sample-with-scope-tokens.R" + )), sep = "\n") + }) + + # No message if scope <= line_breaks even if code is changed. + expect_snapshot({ + cat(catch_style_file_output(file.path( + "public-api", "xyzdir-dirty", "dirty-sample-with-scope-spaces.R" + )), sep = "\n") + }) + } + ) + } +}) + +test_that("Messages can be suppressed", { + withr::with_options( + list(styler.quiet = TRUE), + { + output <- catch_style_file_output(file.path( + "public-api", "xyzdir-dirty", "dirty-sample-with-scope-spaces.R" + )) + expect_equal(output, character(0)) + } + ) +}) diff --git a/tests/testthat/test-public_api-2.R b/tests/testthat/test-public_api-2.R new file mode 100644 index 000000000..8606a4afd --- /dev/null +++ b/tests/testthat/test-public_api-2.R @@ -0,0 +1,109 @@ +test_that("styler can style R, Rmd and Rmarkdown files via style_dir()", { + msg <- capture_output( + style_dir(testthat_file("public-api", "xyz-r-and-rmd-dir"), + filetype = c("R", "Rmd", "Rmarkdown") + ) + ) + expect_true(any(grepl("random-script-in-sub-dir.R", msg, fixed = TRUE))) + expect_true(any(grepl("random-rmd-script.Rmd", msg, fixed = TRUE))) + expect_true(any(grepl("random-rmd-script.Rmarkdown", msg, fixed = TRUE))) +}) + +test_that("styler can style Rmd files only via style_dir()", { + msg <- capture_output( + style_dir(testthat_file("public-api", "xyz-r-and-rmd-dir"), + filetype = "Rmd" + ) + ) + expect_true(any(grepl("random-rmd-script.Rmd", msg, fixed = TRUE))) + expect_false(any(grepl("random-script-in-sub-dir.R", msg, fixed = TRUE))) + expect_false(any(grepl("random-rmd-script.Rmarkdown", msg, fixed = TRUE))) +}) + +test_that("styler can style .r and .rmd files only via style_dir()", { + msg <- capture_output( + style_dir(testthat_file("public-api", "xyz-r-and-rmd-dir"), + filetype = c(".r", ".rmd") + ) + ) + expect_true(any(grepl("random-script-in-sub-dir.R", msg, fixed = TRUE))) + expect_true(any(grepl("random-rmd-script.Rmd", msg, fixed = TRUE))) + expect_false(any(grepl("random-rmd-script.Rmarkdown", msg, fixed = TRUE))) +}) + + + +test_that("styler can style R and Rmd files via style_pkg()", { + msg <- capture_output( + style_pkg(testthat_file("public-api", "xyzpackage-rmd"), + filetype = c("R", "Rmd", "Rmarkdown") + ) + ) + expect_true(any(grepl("hello-world.R", msg, fixed = TRUE))) + expect_true(any(grepl("test-package-xyz.R", msg, fixed = TRUE))) + expect_true(any(grepl("random.Rmd", msg, fixed = TRUE))) + expect_true(any(grepl("random.Rmarkdown", msg, fixed = TRUE))) + expect_true(any(grepl("README.Rmd", msg, fixed = TRUE))) + expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE))) +}) + +test_that("style_pkg() styles qmd files by default", { + msg <- capture_output( + style_pkg(testthat_file("public-api", "xyzpackage-qmd")) + ) + expect_true(any(grepl("hello-world.R", msg, fixed = TRUE))) + expect_true(any(grepl("test-package-xyz.R", msg, fixed = TRUE))) + expect_true(any(grepl("random.Rmd", msg, fixed = TRUE))) + expect_true(any(grepl("random.Rmarkdown", msg, fixed = TRUE))) + expect_true(any(grepl("README.Rmd", msg, fixed = TRUE))) + expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE))) + expect_true(any(grepl("new.qmd", msg, fixed = TRUE))) +}) + +test_that("style_pkg() can find qmd anywhere", { + msg <- capture_output( + style_pkg(testthat_file("public-api", "xyzpackage-qmd"), + filetype = ".Qmd" + ) + ) + expect_no_match(msg, "hello-world.R", fixed = TRUE) + expect_no_match(msg, "test-package-xyz.R", fixed = TRUE) + expect_no_match(msg, "random.Rmd", fixed = TRUE) + expect_no_match(msg, "random.Rmarkdown", fixed = TRUE) + expect_no_match(msg, "README.Rmd", fixed = TRUE) + expect_no_match(msg, "RcppExports.R", fixed = TRUE) + expect_match(msg, "new.qmd", fixed = TRUE) +}) + + +test_that("styler can style Rmd files only via style_pkg()", { + msg <- capture_output( + style_pkg(testthat_file("public-api", "xyzpackage-rmd"), + filetype = "Rmd" + ) + ) + expect_false(any(grepl("hello-world.R", msg, fixed = TRUE))) + expect_false(any(grepl("test-package-xyz.R", msg, fixed = TRUE))) + expect_true(any(grepl("random.Rmd", msg, fixed = TRUE))) + expect_false(any(grepl("random.Rmarkdown", msg, fixed = TRUE))) + expect_true(any(grepl("README.Rmd", msg, fixed = TRUE))) + expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE))) +}) + +test_that("styler can style Rmarkdown files only via style_pkg()", { + msg <- capture_output( + style_pkg(testthat_file("public-api", "xyzpackage-rmd"), + filetype = "Rmarkdown" + ) + ) + expect_false(any(grepl("hello-world.R", msg, fixed = TRUE))) + expect_false(any(grepl("test-package-xyz.R", msg, fixed = TRUE))) + expect_false(any(grepl("random.Rmd", msg, fixed = TRUE))) + expect_true(any(grepl("random.Rmarkdown", msg, fixed = TRUE))) + expect_false(any(grepl("README.Rmd", msg, fixed = TRUE))) + expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE))) +}) + +test_that("insufficient R version returns error", { + expect_error(stop_insufficient_r_version()) +}) diff --git a/tests/testthat/test-public_api-3.R b/tests/testthat/test-public_api-3.R new file mode 100644 index 000000000..8b1e926fb --- /dev/null +++ b/tests/testthat/test-public_api-3.R @@ -0,0 +1,197 @@ +test_that("styler can style Rnw file", { + expect_false({ + out <- style_file( + testthat_file("public-api", "xyzfile-rnw", "random.Rnw"), + strict = FALSE + ) + out$changed + }) + styled <- style_file( + testthat_file("public-api", "xyzfile-rnw", "random2.Rnw"), + strict = FALSE + ) + expect_false(styled$changed) +}) + +test_that("styler handles malformed Rnw file and invalid R code in chunk", { + capture_output(expect_warning( + style_file(testthat_file("public-api", "xyzfile-rnw", "random3.Rnw"), strict = FALSE) + )) + + capture_output(expect_warning( + style_file(testthat_file("public-api", "xyzfile-rnw", "random4.Rnw"), strict = FALSE) + )) +}) + + + +test_that("styler can style R, Rmd and Rnw files via style_pkg()", { + msg <- capture_output( + style_pkg(testthat_file("public-api", "xyzpackage-rnw"), + filetype = c("R", "Rmd", "Rnw") + ) + ) + expect_true(any(grepl("hello-world.R", msg, fixed = TRUE))) + expect_true(any(grepl("test-package-xyz.R", msg, fixed = TRUE))) + expect_true(any(grepl("random.Rmd", msg, fixed = TRUE))) + expect_true(any(grepl("random.Rnw", msg, fixed = TRUE))) + expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE))) +}) + +test_that("styler can style Rnw files only via style_pkg()", { + msg <- capture_output( + style_pkg(testthat_file("public-api", "xyzpackage-rnw"), + filetype = "Rnw" + ) + ) + expect_false(any(grepl("hello-world.R", msg, fixed = TRUE))) + expect_false(any(grepl("test-package-xyz.R", msg, fixed = TRUE))) + expect_false(any(grepl("random.Rmd", msg, fixed = TRUE))) + expect_true(any(grepl("random.Rnw", msg, fixed = TRUE))) + expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE))) +}) + +test_that("dry run options work:", { + local_test_setup() + path <- test_path("public-api/dry/unstyled.R") + # test the testing function + expect_error(test_dry(path, style_file, styled = TRUE)) + + # real tests + ## R + test_dry(path, style_file) + path <- test_path("public-api/dry/styled.R") + test_dry(path, style_file, styled = TRUE) + + ## Rmd + test_dry(test_path("public-api/dry/unstyled.Rmd"), style_file, styled = FALSE) + test_dry(test_path("public-api/dry/styled.Rmd"), style_file, styled = TRUE) + + ## Rnw + test_dry(test_path("public-api/dry/unstyled.Rnw"), style_file, styled = FALSE) + test_dry(test_path("public-api/dry/styled.Rnw"), style_file, styled = TRUE) +}) + +test_that("base indention works", { + # for single-line strings + n_spaces <- 5 + text_in <- "x<- function() NULL" + expect_equal( + style_text(text_in, base_indention = n_spaces), + construct_vertical(paste0(add_spaces(n_spaces), style_text(text_in))) + ) + # for multi-line strings + text_in <- c( + "x<- function()", + '"here\nis"', + "NULL", + "1+ 1" + ) + text_out <- c( + " x <- function() {", + ' "here', + 'is"', + " }", + " NULL", + " 1 + 1" + ) + expect_equal( + as.character(style_text(text_in, base_indention = n_spaces)), + text_out + ) +}) + +test_that("scope can be specified as is", { + capture_output(expect_false({ + styled <- style_pkg(testthat_file("public-api", "xyzpackage"), scope = I("spaces")) + any(styled$changed) + })) + + file <- testthat_file("public-api", "xyzpackage", "R", "hello-world.R") + capture_output(expect_false({ + styled <- style_file(file, scope = I("line_breaks")) + any(styled$changed) + })) + expect_equal( + style_text(c("1+14;x=2"), scope = I(c("line_breaks", "tokens"))), + construct_vertical(c("1+14", "x<-2")) + ) +}) + +test_that("Can properly determine style_after_saving", { + withr::with_envvar(list(save_after_styling = TRUE), { + expect_warning(op <- save_after_styling_is_active(), "is depreciated") + expect_equal(op, TRUE) + }) + + withr::with_envvar(list(save_after_styling = FALSE), { + expect_warning(op <- save_after_styling_is_active(), "is depreciated") + expect_equal(op, FALSE) + }) + + + withr::with_options(list(styler.save_after_styling = TRUE), { + expect_silent(op <- save_after_styling_is_active()) + expect_equal(op, TRUE) + }) + + withr::with_options(list(styler.save_after_styling = TRUE), { + withr::with_envvar(list(save_after_styling = FALSE), { + expect_warning(op <- save_after_styling_is_active(), "is depreciated") + expect_equal(op, TRUE) + }) + }) + + withr::with_options(list(styler.save_after_styling = FALSE), { + expect_silent(op <- save_after_styling_is_active()) + expect_equal(op, FALSE) + }) +}) + +test_that("Can display warning on unset styler cache", { + withr::local_options(styler.cache_root = NULL) + withr::local_seed(7) + expect_message( + ask_to_switch_to_non_default_cache_root(ask = TRUE), + regexp = "styler::caching", + fixed = TRUE + ) +}) + +test_that("No sensitive to decimal option", { + skip_if_not_installed("prettycode") + withr::local_options(OutDec = ",") + expect_snapshot({ + style_text("1") + }) +}) + +test_that("Can display warning on unset styler cache", { + withr::local_options(styler.cache_root = "styler-perm") + withr::local_seed(7) + expect_silent(ask_to_switch_to_non_default_cache_root(ask = TRUE)) +}) + + +test_that("alignment detection can be turned off.", { + withr::local_options( + "styler.ignore_alignment" = TRUE, + "styler.colored_print.vertical" = FALSE + ) + text_in <- paste0( + "call(\n", + " xb = 13,\n", + " t = 'a'\n", + ")" + ) + text_out <- c( + "call(", + " xb = 13,", + " t = \"a\"", + ")" + ) + + expect_true(all( + style_text(text_in) == text_out + )) +}) diff --git a/tests/testthat/test-public_api.R b/tests/testthat/test-public_api.R deleted file mode 100644 index 9c6c80805..000000000 --- a/tests/testthat/test-public_api.R +++ /dev/null @@ -1,153 +0,0 @@ -context("public API") - - - -test_that("styler can style package", { - capture_output(expect_false({ - styled <- style_pkg(testthat_file("public-api", "xyzpackage")) - any(styled$changed) - })) -}) - -test_that("styler can style directory", { - capture_output(expect_false({ - styled <- style_dir(testthat_file("public-api", "xyzdir")) - any(styled$changed) - })) -}) - -test_that("styler can style files", { - capture_output(expect_false({ - out <- style_file(testthat_file("public-api", "xyzfile", "random-script.R"), strict = FALSE) - out$changed - })) - - capture_output(expect_false(any({ - out <- style_file( - rep(testthat_file("public-api", "xyzfile", "random-script.R"), 2), - strict = FALSE - ) - out$changed - }))) -}) - - -test_that("styler does not return error when there is no file to style", { - capture_output(expect_error(style_dir( - testthat_file("public-api", "xyzemptydir"), strict = FALSE), NA - )) -}) - -context("public API - Rmd in style_file()") - -test_that("styler can style Rmd file", { - capture_output(expect_false({ - out <- style_file( - testthat_file("public-api", "xyzfile_rmd", "random.Rmd"), strict = FALSE - ) - out$changed - })) - - capture_output(expect_warning( - styled <- style_file(testthat_file("public-api", "xyzfile_rmd", "random2.Rmd"), strict = FALSE) - )) - expect_false(styled$changed) -}) - -test_that("styler handles malformed Rmd file and invalid R code in chunk", { - capture_output(expect_warning( - style_file(testthat_file("public-api", "xyzfile_rmd", "random3.Rmd"), strict = FALSE) - )) - - capture_output(expect_warning( - style_file(testthat_file("public-api", "xyzfile_rmd", "random4.Rmd"), strict = FALSE) - )) -}) - -context("messages are correct") - -test_that("messages (via cat()) of style_file are correct", { - skip_on_os("windows") - # Message if scope > line_breaks and code changes - temp_path <- copy_to_tempdir(testthat_file( - "public-api", "xyzdir-dirty", "dirty-sample-with-scope-tokens.R" - )) - expect_equal_to_reference(enc::to_utf8(capture.output( - style_file(temp_path, scope = "tokens"))), - testthat_file("public-api/xyzdir-dirty/dirty-reference-with-scope-tokens") - ) - unlink(dirname(temp_path)) - - # No message if scope > line_breaks and code does not change - temp_path <- copy_to_tempdir(testthat_file("public-api", "xyzdir-dirty", "clean-sample-with-scope-tokens.R")) - expect_equal_to_reference( - enc::to_utf8(capture.output(style_file(temp_path, scope = "tokens"))), - testthat_file("public-api/xyzdir-dirty/clean-reference-with-scope-tokens") - ) - unlink(dirname(temp_path)) - - # No message if scope <= line_breaks even if code is changed. - temp_path <- copy_to_tempdir(testthat_file("public-api", "xyzdir-dirty", "dirty-sample-with-scope-spaces.R")) - expect_equal_to_reference( - enc::to_utf8(capture.output(style_file(temp_path, scope = "spaces"))), - testthat_file("public-api/xyzdir-dirty/dirty-reference-with-scope-spaces") - ) - unlink(dirname(temp_path)) -}) - -context("public API - Rmd in style_dir()") - -test_that("styler can style R and Rmd files via style_dir()", { - msg <- capture_output( - style_dir(testthat_file("public-api", "xyz-r-and-rmd-dir"), - filetype = c("R", "Rmd")) - ) - expect_true(any(grepl("random-script-in-sub-dir.R", msg, fixed = TRUE))) - expect_true(any(grepl("random-rmd-script.Rmd", msg, fixed = TRUE))) -}) - -test_that("styler can style Rmd files only via style_dir()", { - msg <- capture_output( - style_dir(testthat_file("public-api", "xyz-r-and-rmd-dir"), - filetype = "Rmd") - ) - expect_true(any(grepl("random-rmd-script.Rmd", msg, fixed = TRUE))) - expect_false(any(grepl("random-script-in-sub-dir.R", msg, fixed = TRUE))) -}) - -test_that("styler can style .r and .rmd files via style_dir()", { - msg <- capture_output( - style_dir(testthat_file("public-api", "xyz-r-and-rmd-dir"), - filetype = c(".r", ".rmd")) - ) - expect_true(any(grepl("random-script-in-sub-dir.R", msg, fixed = TRUE))) - expect_true(any(grepl("random-rmd-script.Rmd", msg, fixed = TRUE))) -}) - -context("public API - Rmd in style_pkg()") - -test_that("styler can style R and Rmd files via style_pkg()", { - msg <- capture_output( - style_pkg(testthat_file("public-api", "xyzpackage-rmd"), - filetype = c("R", "Rmd")) - ) - expect_true(any(grepl("hello-world.R", msg, fixed = TRUE))) - expect_true(any(grepl("test-package-xyz.R", msg, fixed = TRUE))) - expect_true(any(grepl("random.Rmd", msg, fixed = TRUE))) - expect_true(any(grepl("README.Rmd", msg, fixed = TRUE))) - expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE))) - -}) - -test_that("styler can style Rmd files only via style_pkg()", { - msg <- capture_output( - style_pkg(testthat_file("public-api", "xyzpackage-rmd"), - filetype = "Rmd") - ) - expect_false(any(grepl("hello-world.R", msg, fixed = TRUE))) - expect_false(any(grepl("test-package-xyz.R", msg, fixed = TRUE))) - expect_true(any(grepl("random.Rmd", msg, fixed = TRUE))) - expect_true(any(grepl("README.Rmd", msg, fixed = TRUE))) - expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE))) -}) - diff --git a/tests/testthat/test-relocate_eq_assign.R b/tests/testthat/test-relocate_eq_assign.R index 51efc9c07..fe7b13bc1 100644 --- a/tests/testthat/test-relocate_eq_assign.R +++ b/tests/testthat/test-relocate_eq_assign.R @@ -1,7 +1,8 @@ -context("EQ_ASSIGN relocation") + # Tests code in R/relevel.R test_that("tree hierarchy is the same no matter whether = or <- is used", { skip_if_not_installed("DiagrammeR") + skip_if_not_installed("data.tree") assign_left <- create_tree( "x <- 5 @@ -21,12 +22,25 @@ test_that("tree hierarchy is the same no matter whether = or <- is used", { expect_equal(assign_eq, assign_left) assign_left <- create_tree( - "x = b = 5", structure_only = TRUE + "x = b = 5", + structure_only = TRUE ) - assign_eq <- create_tree( - "x <- b <- 5", structure_only = TRUE + assign_eq <- create_tree( + "x <- b <- 5", + structure_only = TRUE ) expect_equal(assign_eq, assign_left) + + + assign_left_many <- create_tree( + "x = b = c = d = r= 5", + structure_only = TRUE + ) + assign_eq_many <- create_tree( + "x <- b <- c <- d <- r <- 5", + structure_only = TRUE + ) + expect_equal(assign_eq_many, assign_left_many) }) test_that("braces are added in the right place in ifelse if eq_assign is in expr", { @@ -37,6 +51,23 @@ test_that("braces are added in the right place in ifelse if eq_assign is in expr ), NA) }) +test_that("complicated reassignment works", { + expect_warning(test_collection( + "relocate_eq_assign", "eq_assign_multiple_tokens_eq_only", + transformer = style_text, + scope = "tokens", + style = tidyverse_style + ), NA) + + expect_warning(test_collection( + "relocate_eq_assign", "eq_assign_multiple_tokens_mixed", + transformer = style_text, + scope = "tokens", + style = tidyverse_style + ), NA) +}) + + test_that("eq_assign is not replaced", { expect_warning(test_collection( "relocate_eq_assign", "eq_assign_ifelse_scope_line_breaks", diff --git a/tests/testthat/test-rmd.R b/tests/testthat/test-rmd.R index aad4ca2d3..6ff803d80 100644 --- a/tests/testthat/test-rmd.R +++ b/tests/testthat/test-rmd.R @@ -1,10 +1,70 @@ -context("rmd") + test_that("can style .Rmd files", { - test_collection( - "rmd", - transformer = transform_rmd, + expect_warning(test_collection("rmd", "simple", + transformer = transform_mixed, + transformer_fun = style_text, + filetype = "Rmd", + write_tree = FALSE + ), NA) + + expect_warning(test_collection("rmd", "r_and_non_r_code_chunks", + transformer = transform_mixed, + transformer_fun = style_text, + filetype = "Rmd", + write_tree = FALSE + ), NA) + + expect_warning(test_collection("rmd", "nested", + transformer = transform_mixed, + transformer_fun = style_text, + filetype = "Rmd", + write_tree = FALSE + ), NA) + + expect_warning(test_collection("rmd", "invalid", + transformer = transform_mixed, transformer_fun = style_text, + filetype = "Rmd", write_tree = FALSE - ) + ), NA) + + ## new 3-5 + expect_warning(test_collection("rmd", "random3", + transformer = transform_mixed, + transformer_fun = style_text, + filetype = "Rmd", + write_tree = FALSE + ), NA) + expect_warning(test_collection("rmd", "random5", + transformer = transform_mixed, + transformer_fun = style_text, + filetype = "Rmd", + write_tree = FALSE + ), NA) + expect_warning(test_collection("rmd", "random6", + transformer = transform_mixed, + transformer_fun = style_text, + filetype = "Rmd", + write_tree = FALSE + ), NA) + expect_warning(test_collection("rmd", "random7", + transformer = transform_mixed, + transformer_fun = style_text, + filetype = "Rmd", + write_tree = FALSE + ), NA) +}) + + +test_that("code chunks without code are returned as zero lines", { + local_test_setup() + t <- make_transformer(transformers = tidyverse_style()) + lines <- "" + + expect_equal(transform_mixed_non_empty("", t), character(0)) + expect_equal(transform_mixed_non_empty("\n", t), character(0)) + expect_equal(transform_mixed_non_empty(c("", ""), t), character(0)) + expect_equal(transform_mixed_non_empty(c("", " "), t), character(0)) + expect_equal(transform_mixed_non_empty(c("\t", ""), t), character(0)) }) diff --git a/tests/testthat/test-rnw.R b/tests/testthat/test-rnw.R new file mode 100644 index 000000000..1a9158d3f --- /dev/null +++ b/tests/testthat/test-rnw.R @@ -0,0 +1,18 @@ + + +test_that("can style .Rnw files", { + expect_warning(test_collection( + "rnw", "008-outdec", + transformer = transform_mixed, + transformer_fun = style_text, + filetype = "Rnw", + write_tree = FALSE + ), NA) + expect_warning(test_collection( + "rnw", "011-conditional-eval", + transformer = transform_mixed, + transformer_fun = style_text, + filetype = "Rnw", + write_tree = FALSE + ), NA) +}) diff --git a/tests/testthat/test-roundtrip.R b/tests/testthat/test-roundtrip.R index 406b111b7..d50242bc2 100644 --- a/tests/testthat/test-roundtrip.R +++ b/tests/testthat/test-roundtrip.R @@ -1,17 +1,35 @@ -context("roundtrip works") -test_that("can_verify_roundtrip works", { - expect_true(can_verify_roundtrip(tidyverse_style(scope = "line_breaks"))) - expect_true(can_verify_roundtrip(tidyverse_style(scope = "spaces"))) - expect_true(can_verify_roundtrip(tidyverse_style(scope = "indention"))) - expect_false(can_verify_roundtrip(tidyverse_style(scope = "tokens"))) + +test_that("parse_tree_must_be_identical works", { + expect_true( + parse_tree_must_be_identical(tidyverse_style(scope = "line_breaks")) + ) + expect_true(parse_tree_must_be_identical(tidyverse_style(scope = "spaces"))) + expect_true( + parse_tree_must_be_identical(tidyverse_style(scope = "indention")) + ) + expect_false(parse_tree_must_be_identical(tidyverse_style(scope = "tokens"))) }) test_that("correct styling does not give an error", { - expect_error(verify_roundtrip("1+1", "1 + 1"), NA) + expect_snapshot({ + verify_roundtrip("1+1", "1 + 1") + }) }) test_that("corrupt styling does give an error", { - expect_error(verify_roundtrip("1-1", "1 + 1"), "bug") + expect_snapshot_error(verify_roundtrip("1-1", "1 + 1")) +}) + + +test_that("the output is asserted to be parsable", { + expect_error( + verify_roundtrip("1+1", "1 +) 1", parsable_only = TRUE), + "Styling resulted in code that isn't parsable." + ) + + expect_silent( + verify_roundtrip("1+1", "1 + 1", parsable_only = TRUE) + ) }) diff --git a/tests/testthat/test-roxygen-examples-complete.R b/tests/testthat/test-roxygen-examples-complete.R new file mode 100644 index 000000000..0d78cc4f0 --- /dev/null +++ b/tests/testthat/test-roxygen-examples-complete.R @@ -0,0 +1,152 @@ +test_that("analogous to test-roxygen-examples-complete", { + expect_warning(test_collection( + "roxygen-examples-complete", "^01", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^11", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^12-fun", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^12-dont", + transformer = style_text + ), NA) + + + expect_warning(test_collection( + "roxygen-examples-complete", "^13", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^14", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^02", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^03", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^04", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^05", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^06", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^07", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^08", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^09", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^10", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^15", + transformer = style_text, scope = "spaces" + ), NA) + + # Don't warn about empty strings in roxygen comments + expect_warning(test_collection( + "roxygen-examples-complete", "^16", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^17", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^18", + transformer = style_text + ), NA) + expect_warning(test_collection( + "roxygen-examples-complete", "^19", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^20", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^21", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^22", + transformer = style_text + ), NA) + + expect_error(test_collection( + "roxygen-examples-complete", "^23", + transformer = style_text + ), "issues/1242") + + expect_warning(test_collection( + "roxygen-examples-complete", "^24", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^25", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^26", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^27", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^28", + transformer = style_text + ), NA) + + expect_warning(test_collection( + "roxygen-examples-complete", "^29", + transformer = style_text + ), NA) +}) diff --git a/tests/testthat/test-roxygen-examples-parse.R b/tests/testthat/test-roxygen-examples-parse.R new file mode 100644 index 000000000..d733b1585 --- /dev/null +++ b/tests/testthat/test-roxygen-examples-parse.R @@ -0,0 +1,171 @@ + + +test_that("simple examples can be parsed", { + expected_out <- c("\n", "x <- 1\n") + expect_equal(parse_roxygen(c("#' @examples", "#' x <- 1"))$text, expected_out) + expect_equal(parse_roxygen(c("#'\t@examples", "#' x <- 1"))$text, expected_out) + expect_equal(parse_roxygen(c("#'@examples ", "#' x <- 1"))$text, expected_out) + expect_equal(parse_roxygen(c("#'@examples \t", "#' x <- 1"))$text, expected_out) + expect_equal(parse_roxygen(c("#'\t@examples \t", "#' x <- 1"))$text, expected_out) + expect_equal(parse_roxygen(c("#' \t@examples \t", "#' x <- 1"))$text, expected_out) + + # with code on same line + expected_out <- c("2\n", "x <- 1\n") + expect_equal(parse_roxygen(c("#' @examples 2", "#' x <- 1"))$text, expected_out) + expect_equal(parse_roxygen(c("#'\t@examples 2", "#' x <- 1"))$text, expected_out) + expect_equal(parse_roxygen(c("#'@examples 2", "#' x <- 1"))$text, expected_out) + expect_equal(parse_roxygen(c("#'@examples \t 2", "#' x <- 1"))$text, expected_out) + expect_equal(parse_roxygen(c("#'\t@examples \t 2", "#' x <- 1"))$text, expected_out) + expect_equal(parse_roxygen(c("#' \t@examples \t2", "#' x <- 1"))$text, expected_out) +}) + +test_that("donts can be parsed", { + expect_equal( + parse_roxygen(c("#' @examples", "#' \\dontrun{1}"))$text, + c("\n", "\\dontrun", "{", "1", "}", "\n") + ) + expect_equal( + parse_roxygen( + c( + "#' @examplesIf (TRUE)", + "#' \\donttest{", + "#' fu(x = 3)", "#' }" + ) + )$text, + c( + "(TRUE)\n", + "\\donttest", + "{", "\n", + "fu(x = 3)\n", + "}", + "\n" + ) + ) +}) + +test_that("braces examples can be parsed", { + expect_equal( + parse_roxygen( + c( + "#' @examples x <- '{'", + "#' \\donttest{", + "#' fu(x = 3)", "#' }" + ) + )$text, + c( + "x <- '", "", + "{", "'\n", + "\\donttest", "{", "\n", + "fu(x = 3)\n", + "}", + "\n" + ) + ) + + expect_equal( + parse_roxygen( + c( + "#' @examplesIf c(c(c(TRUE)))", + "#' x <- '{'", + "#' \\dontrun{", + "#' fu(x = 3)", + "#' }" + ) + )$text, + c( + "c(c(c(TRUE)))\n", + "x <- '", "", "{", "'\n", + "\\dontrun", "{", "\n", + "fu(x = 3)\n", + "}", "\n" + ) + ) + + expect_equal( + parse_roxygen( + c( + "#' @examples x <- '{'", + "#' \\dontrun{", + "#' c('{', \"'{{{\" ,\"[\")", + "#'", + "#'", + "#' }", + "#'", + "#'" + ) + )$text, + c( + "x <- '", "", "{", "'\n", + "\\dontrun", "{", "\n", + "c('{', \"'{{{\" ,\"[\")\n", + "\n", "\n", + "}\n", "\n", "\n" + ) + ) + expect_equal( + parse_roxygen( + c( + "#' @examples", + "#' x <- '{'", + "#' \\dontrun{", + "#' x<-'{'", + "#' }" + ) + )$text, + c( + "\n", + "x <- '", "", "{", "'\n", + "\\dontrun", "{", "\n", + "x<-'{'\n", + "}\n" + ) + ) + + expect_equal( + parse_roxygen( + c( + "#' @examples", + "#' x <- '{'", + "#' {", + "#' 1 + 1", + "#' }", + "#' \\dontrun{", + "#' {", + "#' 1 + 1", + "#' }", + "#' }" + ) + )$text, + c( + "\n", + "x <- '", "", "{", "'\n", + "", "{", "\n", + "1 + 1\n", + "}", "\n", + "\\dontrun", "{", "\n", + "{\n", + "1 + 1\n", + "}\n", + "}", "\n" + ) + ) + + expect_equal( + parse_roxygen(c( + "#' @examples parse_roxygen(", + "#' c(", + "#' \"#' @examples\",", + "#' \"#' c(\\\"'{{{\\\")\"", + "#' )", + "#' )" + ))$text, + c( + "parse_roxygen(\n", + " c(\n", + " \"#' @examples\",\n", + " \"#' c(\\\"'", "", "{", "", "{", "", "{", "\\\")\"\n", + " )\n", + ")\n" + ) + ) +}) diff --git a/tests/testthat/test-scope-AsIs.R b/tests/testthat/test-scope-AsIs.R new file mode 100644 index 000000000..97eec4ebf --- /dev/null +++ b/tests/testthat/test-scope-AsIs.R @@ -0,0 +1,72 @@ + + +test_that("no indention manipulation but spaces manipulation", { + expect_warning(test_collection( + "scope-AsIs", "scope_spaces-", + transformer = style_text, style = tidyverse_style, scope = I("spaces") + ), NA) +}) + +test_that("just indention", { + expect_warning(test_collection( + "scope-AsIs", "scope_indention-", + transformer = style_text, + style = tidyverse_style, scope = I("indention") + ), NA) +}) + +test_that("indention and spaces", { + expect_warning(test_collection( + "scope-AsIs", "scope_spaces_indention-", + transformer = style_text, + style = tidyverse_style, scope = I(c("indention", "spaces")) + ), NA) +}) + + +test_that("line-break manipulation", { + expect_warning(test_collection( + "scope-AsIs", "scope_line_breaks-", + transformer = style_text, + style = tidyverse_style, + scope = I("line_breaks") + ), NA) +}) + + +test_that("line-break manipulation", { + expect_warning(test_collection( + "scope-AsIs", "scope_spaces_line_breaks-", + transformer = style_text, + style = tidyverse_style, + scope = I(c("line_breaks", "spaces")) + ), NA) +}) + + +test_that("tokens and indention", { + expect_warning(test_collection( + "scope-AsIs", "scope_indention_tokens-", + transformer = style_text, + style = tidyverse_style, + scope = I(c("tokens", "indention")) + ), NA) +}) + +test_that("tokens and indention", { + expect_warning(test_collection( + "scope-AsIs", "scope_spaces_tokens-", + transformer = style_text, + style = tidyverse_style, + scope = I(c("spaces", "tokens")) + ), NA) +}) + +test_that("no manipulation at all", { + expect_warning(test_collection( + "scope-AsIs", "scope_none-", + transformer = style_text, + style = tidyverse_style, + scope = I("none") + ), NA) +}) diff --git a/tests/testthat/test-scope_argument.R b/tests/testthat/test-scope-character.R similarity index 64% rename from tests/testthat/test-scope_argument.R rename to tests/testthat/test-scope-character.R index dc32bc67e..256f7d670 100644 --- a/tests/testthat/test-scope_argument.R +++ b/tests/testthat/test-scope-character.R @@ -1,42 +1,45 @@ -context("scope argument") + test_that("no indention manipulation but spaces manipulation", { expect_warning(test_collection( - "scope_argument", "scope_spaces", - transformer = style_text, style = tidyverse_style, scope = "spaces"), NA) + "scope-character", "scope_spaces", + transformer = style_text, style = tidyverse_style, scope = "spaces" + ), NA) }) test_that("no line-break manipulation", { expect_warning(test_collection( - "scope_argument", "scope_indention", + "scope-character", "scope_indention", transformer = style_text, - style = tidyverse_style, scope = "indention"), NA) + style = tidyverse_style, scope = "indention" + ), NA) }) test_that("no token manipulation", { expect_warning(test_collection( - "scope_argument", "scope_line_breaks", + "scope-character", "scope_line_breaks", transformer = style_text, style = tidyverse_style, - scope = "line_breaks"), NA) + scope = "line_breaks" + ), NA) }) test_that("no space manipulation", { expect_warning(test_collection( - "scope_argument", "scope_tokens", + "scope-character", "scope_tokens", transformer = style_text, style = tidyverse_style, - scope = "tokens"), NA) + scope = "tokens" + ), NA) }) test_that("no manipulation at all", { expect_warning(test_collection( - "scope_argument", "scope_none", + "scope-character", "scope_none", transformer = style_text, style = tidyverse_style, - scope= "none"), NA) + scope = "none" + ), NA) }) - - diff --git a/tests/testthat/test-serialize_tests.R b/tests/testthat/test-serialize_tests.R index 6c65a58df..2b858a77a 100644 --- a/tests/testthat/test-serialize_tests.R +++ b/tests/testthat/test-serialize_tests.R @@ -1,30 +1,51 @@ -context("test testing functions") + test_that("No files to compare returns error", { expect_error(test_collection("serialize_tests", "xyz", - transformer = as_is),"no items") -}) - -test_that("Can handle multiple in for one out file", { - expect_warning(test_collection("serialize_tests", "k2", - transformer = identity), - c("k2\\-another\\-in_file.*k2\\-out")) - - expect_warning(test_collection("serialize_tests", "k2", - transformer = identity), - c("k2\\-in.*k2\\-out")) + transformer = as_is + ), "no items") }) - test_that("properly detects non-match", { - expect_warning(test_collection("serialize_tests", "k3", - transformer = identity, - write_back = FALSE), - "different") + path_out <- test_path("serialize_tests", "k3-out.R") + before <- readLines(path_out) + withr::defer(writeLines(before, path_out)) + expect_warning( + test_collection("serialize_tests", "k3", + transformer = identity + ), + "different" + ) }) test_that("properly detects match", { - expect_message(test_collection("serialize_tests", "correct", - transformer = identity), - "identical") + expect_message( + test_collection("serialize_tests", "correct", + transformer = identity + ), + "identical" + ) +}) + +test_that("detects non-matching style guides", { + sg <- create_style_guide( + space = list( + a1 = function(...) NULL, + b1 = function(...) 1 + ), + transformers_drop = specify_transformers_drop( + spaces = c(a1 = "'+'") + ) + ) + expect_silent(test_transformers_drop(sg)) + + sg <- create_style_guide( + space = list( + a1 = function(...) NULL + ), + transformers_drop = specify_transformers_drop( + spaces = c(a2 = "'+'") + ) + ) + expect_error(test_transformers_drop(sg)) }) diff --git a/tests/testthat/test-spacing.R b/tests/testthat/test-spacing.R index 763e931a8..9caadf9e4 100644 --- a/tests/testthat/test-spacing.R +++ b/tests/testthat/test-spacing.R @@ -1,60 +1,80 @@ -context("spacing") + test_that("curly braces", { expect_warning(test_collection( "spacing", "round", - transformer = style_text), NA) + transformer = style_text + ), NA) }) test_that(":, ::, and :::", { expect_warning(test_collection( "spacing", "colon", - transformer = style_text), NA) + transformer = style_text + ), NA) }) test_that("comments and strict = FALSE", { expect_warning(test_collection( "spacing", "comments", - transformer = style_text, stric = FALSE), NA) + transformer = style_text, stric = FALSE + ), NA) }) test_that("Space placed after 'if' and before '('", { expect_warning(test_collection( "spacing", "spacing_if", - transformer = style_text), NA) + transformer = style_text + ), NA) }) test_that("space before comma is removed", { expect_warning(test_collection( "spacing", "spacing_comma", - transformer = style_text), NA) + transformer = style_text + ), NA) }) test_that("two commas are separated by a space", { expect_warning(test_collection( "spacing", "spacing_comma2", - transformer = style_text), NA) + transformer = style_text + ), NA) }) test_that("spacing between ! and bang is perserved", { expect_warning(test_collection( "spacing", "bang_bang_spacing", - transformer = style_text), NA) + transformer = style_text + ), NA) }) test_that("spacing around in works", { expect_warning(test_collection( "spacing", "spacing_in", - transformer = style_text), NA) + transformer = style_text + ), NA) }) test_that("no spaces after token FUNCTION", { expect_warning(test_collection( "spacing", "spacing_function", - transformer = style_text, strict = FALSE), NA) + transformer = style_text, strict = FALSE + ), NA) }) +test_that("spacing around tilde", { + expect_warning(test_collection( + "spacing", "spacing-tilde", + transformer = style_text, strict = TRUE + ), NA) +}) - +test_that("spacing around square brackets / braces", { + expect_warning(test_collection( + "spacing", "spacing-square", + transformer = style_text, strict = TRUE + ), NA) +}) diff --git a/tests/testthat/test-square_brackets.R b/tests/testthat/test-square_brackets.R index 3b317af7a..a400c1f30 100644 --- a/tests/testthat/test-square_brackets.R +++ b/tests/testthat/test-square_brackets.R @@ -1,7 +1,8 @@ -context("indention square brackets") + test_that("square brackets cause indention", { expect_warning(test_collection( "indention_square_brackets", - "square_brackets_line_break", transformer = style_text), NA) + transformer = style_text + ), NA) }) diff --git a/tests/testthat/test-start_line.R b/tests/testthat/test-start_line.R index fd3ffd87e..f154dc555 100644 --- a/tests/testthat/test-start_line.R +++ b/tests/testthat/test-start_line.R @@ -1,6 +1,7 @@ -context("start token") -test_that("leading spaces are preseved at start of text", { + +test_that("leading spaces are preserved at start of text", { expect_warning(test_collection("start_line", - transformer = style_empty, write_back = TRUE), NA) + transformer = style_empty + ), NA) }) diff --git a/tests/testthat/test-strict.R b/tests/testthat/test-strict.R index 331a9cc47..774096980 100644 --- a/tests/testthat/test-strict.R +++ b/tests/testthat/test-strict.R @@ -1,29 +1,33 @@ -context("test strict") + test_that("can style example source file with strict = TRUE", { expect_warning(test_collection( "strict", "strict", transformer = style_text, - strict = TRUE), NA) + strict = TRUE + ), NA) }) test_that("can style example source file with strict = FALSE", { expect_warning(test_collection( "strict", "non_strict", transformer = style_text, - strict = FALSE), NA) + strict = FALSE + ), NA) }) test_that("removes space at EOL", { expect_warning(test_collection( "strict", "eol", transformer = style_text, - strict = FALSE), NA) + strict = FALSE + ), NA) }) test_that("removes blank lines at EOF", { expect_warning(test_collection( "strict", "eof", transformer = style_text, - strict = FALSE), NA) + strict = FALSE + ), NA) }) diff --git a/tests/testthat/test-style-guides.R b/tests/testthat/test-style-guides.R new file mode 100644 index 000000000..9a2c5b38c --- /dev/null +++ b/tests/testthat/test-style-guides.R @@ -0,0 +1,50 @@ +test_that("inconsistent scope intput raises an error", { + # inexistant scope + expect_error(scope_normalize("animal"), "must be one of ") + expect_error(scope_normalize(I("animal")), "must be one of ") + expect_error(scope_normalize(I(c("animal", "spaces"))), "must be one of ") + + # other than one with character + expect_error(scope_normalize(c("none", "tokens")), "either of class `AsIs` or length") +}) + +test_that("consistent input yields right output", { + levels <- c("none", "spaces", "indention", "line_breaks", "tokens") + expect_equal( + scope_normalize(I("tokens")), + factor("tokens", levels = levels, ordered = TRUE) + ) + expect_equal( + scope_normalize(I("none")), + factor("none", levels = levels, ordered = TRUE) + ) + + expect_equal( + scope_normalize(I("indention")), + factor("indention", levels = levels, ordered = TRUE) + ) + + expect_equal( + scope_normalize(I(c("indention", "tokens"))), + factor(c("indention", "tokens"), levels = levels, ordered = TRUE) + ) + + expect_equal( + scope_normalize("spaces"), + factor(c("none", "spaces"), levels = levels, ordered = TRUE) + ) + + expect_equal( + scope_normalize("indention"), + factor(c("none", "spaces", "indention"), levels = levels, ordered = TRUE) + ) + + expect_equal( + scope_normalize("line_breaks"), + factor(c("none", "spaces", "indention", "line_breaks"), levels = levels, ordered = TRUE) + ) + expect_equal( + scope_normalize("tokens"), + factor(levels, levels = levels, ordered = TRUE) + ) +}) diff --git a/tests/testthat/test-stylerignore.R b/tests/testthat/test-stylerignore.R new file mode 100644 index 000000000..e17fb9adf --- /dev/null +++ b/tests/testthat/test-stylerignore.R @@ -0,0 +1,157 @@ +test_that("gives warning markers are not correct", { + expect_warning(style_text(c( + "1+1", + "# styler: on", + "# styler: off" + ))) +}) + +test_that("trailing spaces are stripped when checking marker and written back", { + expect_equal( + style_text(c( + "# styler: off ", + "1+1", + "# styler: on " + )) %>% + as.character(), + c("# styler: off", "1+1", "# styler: on") + ) +}) + +test_that("last stopping marker can be omitted", { + expect_equal( + style_text(c( + "# styler: off", + "1+1" + )) %>% + as.character(), + c("# styler: off", "1+1") + ) +}) + +test_that("last stopping marker can be omitted", { + expect_equal( + style_text(c( + "# styler: off", + "call( 1)", + " # styler: on", + "call(2 +0)", + "# styler: off", + "x=2" + )) %>% + as.character(), + c( + "# styler: off", "call( 1)", "# styler: on", "call(2 + 0)", + "# styler: off", "x=2" + ) + ) +}) + +test_that("works for one line", { + expect_equal( + style_text(c( + "1+1", + "1+1# styler: off", + "1+1" + )) %>% + as.character(), + c("1 + 1", "1+1# styler: off", "1 + 1") + ) +}) + + +test_that("works with other markers", { + expect_equal( + withr::with_options( + list(styler.ignore_start = "# startignore", styler.ignore_stop = "# xxx"), + { + style_text(c( + "1+1", + "1+1# startignore", + "1+1" + )) %>% + as.character() + } + ), + c("1 + 1", "1+1# startignore", "1 + 1") + ) +}) + + +test_that("works for multiple markers inline", { + withr::local_options(styler.ignore_start = "# noeq", ) + expect_equal( + style_text(c( + "1+1", + "1+1# noeq", + "1+1" + )) %>% + as.character(), + c("1 + 1", "1+1# noeq", "1 + 1") + ) +}) + + +test_that("works for multiple markers inline on one line", { + withr::local_options(styler.ignore_start = "nolint start|styler: off") + expect_equal( + style_text(c( + "1+1", + "1+1# nolint start styler: off", + "1+1" + )) %>% + as.character(), + c("1 + 1", "1+1# nolint start styler: off", "1 + 1") + ) +}) + + +test_that("works with other markers", { + expect_warning( + withr::with_options( + list(styler.ignore_start = "# startignore", styler.ignore_stop = "# xxx"), + { + style_text(c( + "1+1", + "# xxx", + "1+1", + "1+1", + "# startignore" + )) %>% + as.character() + } + ), + "Invalid stylerignore sequence" + ) +}) + + +test_that("Simple example works", { + expect_warning(test_collection("stylerignore", "simple", + transformer = style_text + ), NA) +}) + +test_that("stylerignore does not need coincidence with top-level expressions", { + expect_warning(test_collection("stylerignore", "crossing", + transformer = style_text + ), NA) +}) + +test_that("token adding or removing works in stylerignore", { + expect_warning(test_collection("stylerignore", "adding-removing", + transformer = style_text + ), NA) +}) + +test_that("no token added or removed in complex case", { + expect_warning(test_collection("stylerignore", "braces", + transformer = style_text + ), NA) +}) + +test_that("stylerignore sequences are respected in alignment detection", { + expect_warning(test_collection("stylerignore", "alignment", + transformer = style_text + ), NA) +}) diff --git a/tests/testthat/test-testing.R b/tests/testthat/test-testing.R new file mode 100644 index 000000000..2cc791f7f --- /dev/null +++ b/tests/testthat/test-testing.R @@ -0,0 +1,48 @@ +test_that("local_test_setup changes back to old cache location", { + split_path <- function(x) { + if (Sys.info()[1] == "Windows") { + # not on other platforms as normalizePath messes up /private/var and /var + x <- normalizePath(x, mustWork = FALSE) + } + unlist(strsplit(x, .Platform$file.sep, fixed = TRUE)) + } + withr::defer(cache_deactivate(verbose = FALSE)) + old <- cache_info(format = "tabular") + cache_activate(verbose = FALSE) + + test <- function() { + local_test_setup() + base <- split_path(tempfile())[2] + expect_equal( + split_path(cache_info(format = "tabular")$location)[2], + base + ) + } + test() + expect_equal(cache_info(format = "tabular")$location, old$location) + expect_true(cache_info(format = "tabular")$activated) +}) + +test_that("local_test_setup changes back to old cache location", { + old <- cache_info(format = "tabular") + # don't activate + split_path <- function(x) { + if (Sys.info()[1] == "Windows") { + # not on other platforms as normalizePath messes up /private/var and /var + x <- normalizePath(x, mustWork = FALSE) + } + unlist(strsplit(x, .Platform$file.sep, fixed = TRUE)) + } + + test <- function() { + local_test_setup() + base <- split_path(tempfile())[2] + expect_equal( + split_path(cache_info(format = "tabular")$location)[2], + base + ) + } + test() + expect_equal(cache_info(format = "tabular")$location, old$location) + expect_false(cache_info(format = "tabular")$activated) +}) diff --git a/tests/testthat/test-tidyeval.R b/tests/testthat/test-tidyeval.R index 7362f8fb6..76b7a2457 100644 --- a/tests/testthat/test-tidyeval.R +++ b/tests/testthat/test-tidyeval.R @@ -1,16 +1,19 @@ -context("tidyeval") + test_that("no spaces within bang-bang operator !!!", { expect_warning(test_collection("tidyeval", "bang_bang", - transformer = style_text), NA) + transformer = style_text + ), NA) }) test_that(":= has correct spacing", { expect_warning(test_collection("tidyeval", "setting_var", - transformer = style_text), NA) + transformer = style_text + ), NA) }) test_that("Space before comma if preceding token is EQ_SUB", { expect_warning(test_collection("tidyeval", "eq_sub", - transformer = style_text), NA) + transformer = style_text + ), NA) }) diff --git a/tests/testthat/test-token_adding_removing.R b/tests/testthat/test-token_adding_removing.R index 4b2b7990b..cd0026a0f 100644 --- a/tests/testthat/test-token_adding_removing.R +++ b/tests/testthat/test-token_adding_removing.R @@ -1,24 +1,65 @@ -context("adding / removing token") + test_that("other manipulations are correct (add braces, semi-colon etc.)", { expect_warning(test_collection("token_adding_removing", "mixed_token", - transformer = style_text), NA) + transformer = style_text + ), NA) }) test_that("braces in if-else clause are added correctly", { expect_warning(test_collection("token_adding_removing", "if_else_strict", - transformer = style_text), NA) + transformer = style_text + ), NA) expect_warning(test_collection("token_adding_removing", "if_else_non_strict", - transformer = style_text, strict = FALSE), NA) + transformer = style_text, strict = FALSE + ), NA) + expect_warning(test_collection("token_adding_removing", "if-else-comma", + transformer = style_text, strict = TRUE + ), NA) }) test_that("double braces are treated correctly", { expect_warning(test_collection("token_adding_removing", "double_braces", - transformer = style_text), NA) + transformer = style_text + ), NA) }) test_that("double braces are treated correctly", { expect_warning(test_collection("token_adding_removing", "token_creation_find_pos", - transformer = style_text), NA) + transformer = style_text + ), NA) +}) + +test_that("braces only added to pipe if RHS is a symbol", { + expect_warning(test_collection("token_adding_removing", "add_brackets_in_pipe", + transformer = style_text + ), NA) +}) + + + +test_that("No braces are added if conditional statement is within pipe", { + expect_warning(test_collection("token_adding_removing", "else-pipe", + transformer = style_text + ), NA) +}) + +test_that("No brace is added within `substitute()`", { + expect_warning(test_collection("token_adding_removing", "substitute", + transformer = style_text + ), NA) +}) + + +test_that("stylreignore interacts correctly with wrap_expr_in_curly", { + expect_warning(test_collection("token_adding_removing", "if_else_stylerignore", + transformer = style_text + ), NA) +}) + +test_that("stylreignore interacts correctly with wrap_expr_in_curly", { + expect_warning(test_collection("token_adding_removing", "for_while_stylerignore", + transformer = style_text + ), NA) }) diff --git a/tests/testthat/test-transformers-drop.R b/tests/testthat/test-transformers-drop.R new file mode 100644 index 000000000..f3d5731fe --- /dev/null +++ b/tests/testthat/test-transformers-drop.R @@ -0,0 +1,154 @@ +# c/cp from remove_space_after_excl but for self-containement repeated +remove_space_after_excl_ <- function(pd_flat) { + excl <- (pd_flat$token == "'!'") & + (pd_flat$token_after != "'!'") & + (pd_flat$newlines == 0L) + pd_flat$spaces[excl] <- 0L + pd_flat +} + +t <- create_style_guide( + space = list(remove_space_after_excl_), + transformers_drop = specify_transformers_drop( + spaces = list(remove_space_after_excl_ = c("'!'")) + ), + style_guide_name = "styler::t@https://github.com/r-lib", + style_guide_version = as.character(packageVersion("styler")) +) + +t_no_drop <- create_style_guide( + space = list(remove_space_after_excl_), + transformers_drop = NULL, +) + +t_empty_drop1 <- create_style_guide( + space = list(remove_space_after_excl_), + transformers_drop = list(space = list()), +) + +t_empty_drop2 <- create_style_guide( + space = list(remove_space_after_excl_), + transformers_drop = list(), +) + +test_that("empty string as input can be handled", { + t_new <- transformers_drop( + "", t + ) + + t_new2 <- transformers_drop( + character(), t + ) + expect_equal(t_new, t_new2) +}) + +test_that("transformers are not removed if they are used", { + t_new <- transformers_drop( + "!x", t + ) + expect_equal(t_new, t) +}) + +test_that("transformers are removed if they are unused", { + t_fun <- transformers_drop( + "x", t + ) + t_manual <- t + t_manual$space$remove_space_after_excl_ <- NULL + expect_equal(t_fun, t_manual) +}) + +test_that("tidyverse transformers are correctly named", { + # test that all dropping rules match an actual rule in the style guide + expect_silent( + test_transformers_drop(tidyverse_style()) + ) +}) + +test_that("tidyverse transformers are correctly dropped", { + # TODO maybe there is a more minimal test than this. + t_style <- tidyverse_style() + t_fun <- transformers_drop("x", t_style) + + names_line_break <- c( + "set_line_break_around_comma_and_or", + "set_line_break_after_assignment", + "set_line_break_after_opening_if_call_is_multi_line", + "set_line_break_before_closing_call", + "remove_line_break_in_fun_call", + "set_line_break_after_ggplot2_plus" + ) + expect_setequal(names(t_fun$line_break), names_line_break) + + names_spaces <- c( + "remove_space_before_closing_paren", + "remove_space_before_opening_paren", + "remove_space_before_comma", + "spacing_around_op", + "remove_space_after_opening_paren", + "set_space_between_levels" + ) + + expect_setequal(names(t_fun$space), names_spaces) + + names_indention <- c("indent_braces", "indent_op", "indent_without_paren") + expect_setequal(names(t_fun$indention), names_indention) + + names_tokens <- "fix_quotes" + expect_setequal(names(t_fun$token), names_tokens) +}) + + +test_that("if no transformers_drop is specified, no transformer is removed and no error issued", { + t_fun <- transformers_drop( + "x", t_no_drop + ) + expect_equal(t_fun, t_no_drop) + + t_fun <- transformers_drop( + "x", t_empty_drop1 + ) + expect_equal(t_fun, t_empty_drop1) + + t_fun <- transformers_drop( + "x", t_empty_drop2 + ) + expect_equal(t_fun, t_empty_drop2) +}) + +test_that("semi-colon is parsed without error", { + expect_equal( + transformers_drop(c("!a", ";", "b"), t), + t + ) +}) + + +test_that("can handle old style guide without transformer object", { + t_new <- t + t_new$transformers_drop <- NULL + expect_error( + transformers_drop(c("!a", ";", "b"), t_new), + NA + ) + expect_error( + style_text("1;3", transformers = t_new), + NA + ) +}) + +test_that("can handle default", { + t_no_drop <- create_style_guide( + space = list(remove_space_after_excl_), + style_guide_name = "styler::t@https://github.com/r-lib", + style_guide_version = as.character(packageVersion("styler")) + ) + expect_error( + transformers_drop(c("!a", ";", "b"), t_no_drop), + NA + ) + expect_error( + style_text("a =2 ", transformers = t_no_drop), + NA + ) +}) diff --git a/tests/testthat/test-unary.R b/tests/testthat/test-unary.R index 0972447ac..8fe52eb0c 100644 --- a/tests/testthat/test-unary.R +++ b/tests/testthat/test-unary.R @@ -1,15 +1,18 @@ -context("no spaces before unary operator") + test_that("no spaces before unary operator", { expect_warning(test_collection("unary_spacing", - "unary_simple", - transformer = style_text), NA) + "unary_simple", + transformer = style_text + ), NA) expect_warning(test_collection("unary_spacing", - "unary_complex", - transformer = style_text), NA) + "unary_complex", + transformer = style_text + ), NA) expect_warning(test_collection("unary_spacing", - "unary_indention", - transformer = style_text), NA) + "unary_indention", + transformer = style_text + ), NA) }) diff --git a/tests/testthat/test-unindention.R b/tests/testthat/test-unindention.R index 988bc5469..cd06de29c 100644 --- a/tests/testthat/test-unindention.R +++ b/tests/testthat/test-unindention.R @@ -1,25 +1,26 @@ -context("unindention") + test_that("round brackets are unindented correctly", { expect_warning(test_collection("unindention", - "mixed", - transformer = style_text, - write_back = TRUE), NA) + "mixed", + transformer = style_text_without_curly_curly + ), NA) }) test_that("tokens are not dropped in named vector", { expect_warning(test_collection("unindention", - "vec", - transformer = style_text, - write_back = TRUE), NA) + "vec", + transformer = style_text + ), NA) }) -test_that(paste("if last token is multi-line and no line break precede,", - "unindention is correct"), { +test_that(paste( + "if last token is multi-line and no line break precede,", + "unindention is correct" +), { expect_warning(test_collection("unindention", - "vec", - transformer = style_text, - write_back = TRUE), NA) + "vec", + transformer = style_text + ), NA) }) - diff --git a/tests/testthat/test-unindention_regex.R b/tests/testthat/test-unindention_regex.R index ed212e4e8..7e364fa46 100644 --- a/tests/testthat/test-unindention_regex.R +++ b/tests/testthat/test-unindention_regex.R @@ -1,4 +1,4 @@ -context("unindention regex") + test_that("forced regex token-dependent indention", { expect_warning(test_collection( "unindention_regex", "regex_force_with", @@ -6,24 +6,29 @@ test_that("forced regex token-dependent indention", { "^# ", "^## ", "^### " - ))), NA) + )) + ), NA) }) test_that("do not force regex token-dependent indention without pattern", { expect_warning(test_collection( "unindention_regex", "regex_force_no", transformer = style_text, - reindention = specify_reindention(NULL)), NA) + reindention = specify_reindention(NULL) + ), NA) }) test_that("forced regex token-dependent indention without pattern", { - expect_warning(test_collection( - "unindention_regex", "random_non_comment_indention", - transformer = style_text, reindention = specify_reindention( - regex_pattern = "bbx", - indention = 5, - comments_only = FALSE - )), - NA) + expect_warning( + test_collection( + "unindention_regex", "random_non_comment_indention", + transformer = style_text, reindention = specify_reindention( + regex_pattern = "bbx", + indention = 5, + comments_only = FALSE + ) + ), + NA + ) }) diff --git a/tests/testthat/test-utils.R b/tests/testthat/test-utils.R new file mode 100644 index 000000000..ed43923ff --- /dev/null +++ b/tests/testthat/test-utils.R @@ -0,0 +1,18 @@ + + +test_that("non-comment-helpers", { + pd <- compute_parse_data_nested("a <- # hi \n x %>% b()") + child <- pd$child[[1]] + expect_equal(previous_non_comment(child, 4), 2) + expect_equal(next_non_comment(child, 2), 4) +}) + +test_that("files with and without blank EOF line are read correctly", { + expect_snapshot({ + read_utf8(test_path("reference-objects/missing-blank-at-EOF.R")) + }) + + expect_snapshot({ + read_utf8(test_path("reference-objects/non-missing-blank-at-EOF.R")) + }) +}) diff --git a/tests/testthat/test-varia.R b/tests/testthat/test-varia.R new file mode 100644 index 000000000..422363a0d --- /dev/null +++ b/tests/testthat/test-varia.R @@ -0,0 +1,24 @@ + + +test_that("ensure_last_n_empty", { + expect_equal( + ensure_last_n_empty("x"), + c("x", "") + ) + expect_equal( + ensure_last_n_empty(c("x", "")), + c("x", "") + ) + expect_equal( + ensure_last_n_empty(c("1", "2")), + c("1", "2", "") + ) +}) + +test_that("unsaved file is recognized from path", { + expect_true(is_unsaved_file("")) +}) + +test_that("inexistant levels in factor creation lead to error", { + expect_error(character_to_ordered(c("x", "Y"), levels = "x")) +}) diff --git a/tests/testthat/test-zzz.R b/tests/testthat/test-zzz.R new file mode 100644 index 000000000..dbbee5082 --- /dev/null +++ b/tests/testthat/test-zzz.R @@ -0,0 +1,13 @@ +test_that("can delete empty cache directory", { + skip_if(getRversion() < package_version("4.0.0")) + skip_on_cran() + tmpdir <- withr::local_tempdir() + withr::local_dir(tmpdir) + dir.create("xxx") + expect_true(delete_if_cache_directory(file.path(getwd(), "xxx"))) + dir.create("xxx") + file.create("xxx/yyy") + list.files("xxx") + expect_false(delete_if_cache_directory(file.path(getwd(), "xxx"))) + expect_true(file.exists(tmpdir)) +}) diff --git a/tests/testthat/tests-cache-require-serial.R b/tests/testthat/tests-cache-require-serial.R new file mode 100644 index 000000000..96c861247 --- /dev/null +++ b/tests/testthat/tests-cache-require-serial.R @@ -0,0 +1,41 @@ +test_that("top-level test: Caches top-level expressions efficiently on style_text()", { + local_test_setup(cache = TRUE) + text <- test_path("cache-with-r-cache/mlflow-1-in.R") %>% + readLines() + benchmark <- system.time(text_styled <- as.character(style_text(text))) + expect_equal(text, text_styled) + full_cached_benchmark <- system.time(text_styled2 <- as.character(style_text(text_styled))) + expect_equal(text, text_styled2) + + # modify one function declaration + text_styled[2] <- gsub(")", " )", text_styled[2], fixed = TRUE) + partially_cached_benchmark <- system.time( + text_cached_partially <- as.character(style_text(text_styled)) + ) + expect_equal(text, text_cached_partially) + cache_deactivate() + not_cached_benchmark <- system.time( + text_not_cached <- as.character(style_text(text_styled)) + ) + expect_equal(text, text_not_cached) + + skip_on_cran() + skip_on_covr() + expect_lt( + partially_cached_benchmark["elapsed"] * 1.5, + not_cached_benchmark["elapsed"] + ) + expect_lt(full_cached_benchmark["elapsed"] * 35, benchmark["elapsed"]) +}) + + +test_that("roxygen code examples are written to cache as whole expressions bring speedgain", { + skip_on_cran() + local_test_setup(cache = TRUE) + text <- readLines(test_path("cache-with-r-cache/roxygen-cache-1.R")) + first <- system.time(styled <- style_text(text)) + # don't use full cache, only roxygen cache + styled[1] <- "#' This is a nother text" + second <- system.time(style_text(styled)) + expect_gt(first["elapsed"], second["elapsed"] * 2.5) +}) diff --git a/tests/testthat/tidyeval/bang_bang-in_tree b/tests/testthat/tidyeval/bang_bang-in_tree index 2252b926f..5e03ff01a 100644 --- a/tests/testthat/tidyeval/bang_bang-in_tree +++ b/tests/testthat/tidyeval/bang_bang-in_tree @@ -1,165 +1,166 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/1] {3} + ¦--expr: names [0/0] {1} + ¦ ¦--expr: names [0/1] {3} ¦ ¦ °--SYMBOL: names [0/0] {2} ¦ ¦--LEFT_ASSIGN: <- [0/1] {4} - ¦ °--expr: [0/0] {5} - ¦ ¦--expr: [0/0] {7} + ¦ °--expr: c(SL [0/0] {5} + ¦ ¦--expr: c [0/0] {7} ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {6} ¦ ¦--'(': ( [0/0] {8} ¦ ¦--SYMBOL_SUB: SL [0/1] {9} ¦ ¦--EQ_SUB: = [0/1] {10} - ¦ ¦--expr: [0/0] {12} + ¦ ¦--expr: 'Sepa [0/0] {12} ¦ ¦ °--STR_CONST: 'Sepa [0/0] {11} ¦ °--')': ) [0/0] {13} - ¦--expr: [1/0] {14} - ¦ ¦--expr: [0/0] {16} + ¦--expr: head( [1/0] {14} + ¦ ¦--expr: head [0/0] {16} ¦ ¦ °--SYMBOL_FUNCTION_CALL: head [0/0] {15} ¦ ¦--'(': ( [0/0] {17} - ¦ ¦--expr: [0/0] {18} - ¦ ¦ ¦--expr: [0/0] {19} + ¦ ¦--expr: dplyr [0/0] {18} + ¦ ¦ ¦--expr: dplyr [0/0] {19} ¦ ¦ ¦ ¦--SYMBOL_PACKAGE: dplyr [0/0] {20} ¦ ¦ ¦ ¦--NS_GET: :: [0/0] {21} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: renam [0/0] {22} ¦ ¦ ¦--'(': ( [0/0] {23} - ¦ ¦ ¦--expr: [0/0] {24} - ¦ ¦ ¦ ¦--expr: [0/0] {26} + ¦ ¦ ¦--expr: iris[ [0/0] {24} + ¦ ¦ ¦ ¦--expr: iris [0/0] {26} ¦ ¦ ¦ ¦ °--SYMBOL: iris [0/0] {25} ¦ ¦ ¦ ¦--'[': [ [0/0] {27} ¦ ¦ ¦ ¦--',': , [0/0] {28} - ¦ ¦ ¦ ¦--expr: [0/0] {29} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {31} + ¦ ¦ ¦ ¦--expr: 1:2 [0/0] {29} + ¦ ¦ ¦ ¦ ¦--expr: 1 [0/0] {31} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {30} ¦ ¦ ¦ ¦ ¦--':': : [0/0] {32} - ¦ ¦ ¦ ¦ °--expr: [0/0] {34} + ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {34} ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {33} ¦ ¦ ¦ °--']': ] [0/0] {35} ¦ ¦ ¦--',': , [0/1] {36} - ¦ ¦ ¦--expr: [0/0] {37} + ¦ ¦ ¦--expr: ! ! ! [0/0] {37} ¦ ¦ ¦ ¦--'!': ! [0/1] {38} - ¦ ¦ ¦ °--expr: [0/0] {39} + ¦ ¦ ¦ °--expr: ! ! n [0/0] {39} ¦ ¦ ¦ ¦--'!': ! [0/1] {40} - ¦ ¦ ¦ °--expr: [0/0] {41} + ¦ ¦ ¦ °--expr: ! nam [0/0] {41} ¦ ¦ ¦ ¦--'!': ! [0/1] {42} - ¦ ¦ ¦ °--expr: [0/0] {44} + ¦ ¦ ¦ °--expr: names [0/0] {44} ¦ ¦ ¦ °--SYMBOL: names [0/0] {43} ¦ ¦ °--')': ) [0/0] {45} ¦ ¦--',': , [0/1] {46} - ¦ ¦--expr: [0/0] {48} + ¦ ¦--expr: 3 [0/0] {48} ¦ ¦ °--NUM_CONST: 3 [0/0] {47} ¦ °--')': ) [0/0] {49} - ¦--expr: [1/0] {50} - ¦ ¦--expr: [0/0] {52} + ¦--expr: head( [1/0] {50} + ¦ ¦--expr: head [0/0] {52} ¦ ¦ °--SYMBOL_FUNCTION_CALL: head [0/0] {51} ¦ ¦--'(': ( [0/0] {53} - ¦ ¦--expr: [0/0] {54} - ¦ ¦ ¦--expr: [0/0] {55} + ¦ ¦--expr: dplyr [0/0] {54} + ¦ ¦ ¦--expr: dplyr [0/0] {55} ¦ ¦ ¦ ¦--SYMBOL_PACKAGE: dplyr [0/0] {56} ¦ ¦ ¦ ¦--NS_GET: :: [0/0] {57} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: renam [0/0] {58} ¦ ¦ ¦--'(': ( [0/0] {59} - ¦ ¦ ¦--expr: [0/0] {60} - ¦ ¦ ¦ ¦--expr: [0/0] {62} + ¦ ¦ ¦--expr: iris[ [0/0] {60} + ¦ ¦ ¦ ¦--expr: iris [0/0] {62} ¦ ¦ ¦ ¦ °--SYMBOL: iris [0/0] {61} ¦ ¦ ¦ ¦--'[': [ [0/0] {63} ¦ ¦ ¦ ¦--',': , [0/0] {64} - ¦ ¦ ¦ ¦--expr: [0/0] {65} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {67} + ¦ ¦ ¦ ¦--expr: 1:2 [0/0] {65} + ¦ ¦ ¦ ¦ ¦--expr: 1 [0/0] {67} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {66} ¦ ¦ ¦ ¦ ¦--':': : [0/0] {68} - ¦ ¦ ¦ ¦ °--expr: [0/0] {70} + ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {70} ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {69} ¦ ¦ ¦ °--']': ] [0/0] {71} ¦ ¦ ¦--',': , [0/0] {72} - ¦ ¦ ¦--expr: [0/0] {73} + ¦ ¦ ¦--expr: !! ! [0/0] {73} ¦ ¦ ¦ ¦--'!': ! [0/0] {74} - ¦ ¦ ¦ °--expr: [0/0] {75} + ¦ ¦ ¦ °--expr: ! ! n [0/0] {75} ¦ ¦ ¦ ¦--'!': ! [0/1] {76} - ¦ ¦ ¦ °--expr: [0/0] {77} + ¦ ¦ ¦ °--expr: ! nam [0/0] {77} ¦ ¦ ¦ ¦--'!': ! [0/1] {78} - ¦ ¦ ¦ °--expr: [0/0] {80} + ¦ ¦ ¦ °--expr: names [0/0] {80} ¦ ¦ ¦ °--SYMBOL: names [0/0] {79} ¦ ¦ °--')': ) [0/0] {81} ¦ ¦--',': , [0/1] {82} - ¦ ¦--expr: [0/0] {84} + ¦ ¦--expr: 3 [0/0] {84} ¦ ¦ °--NUM_CONST: 3 [0/0] {83} ¦ °--')': ) [0/0] {85} - ¦--expr: [1/0] {86} - ¦ ¦--expr: [0/0] {88} + ¦--expr: head( [1/0] {86} + ¦ ¦--expr: head [0/0] {88} ¦ ¦ °--SYMBOL_FUNCTION_CALL: head [0/0] {87} ¦ ¦--'(': ( [0/0] {89} - ¦ ¦--expr: [0/0] {90} - ¦ ¦ ¦--expr: [0/0] {91} + ¦ ¦--expr: dplyr [0/0] {90} + ¦ ¦ ¦--expr: dplyr [0/0] {91} ¦ ¦ ¦ ¦--SYMBOL_PACKAGE: dplyr [0/0] {92} ¦ ¦ ¦ ¦--NS_GET: :: [0/0] {93} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: renam [0/0] {94} ¦ ¦ ¦--'(': ( [0/0] {95} - ¦ ¦ ¦--expr: [0/0] {96} - ¦ ¦ ¦ ¦--expr: [0/0] {98} + ¦ ¦ ¦--expr: iris[ [0/0] {96} + ¦ ¦ ¦ ¦--expr: iris [0/0] {98} ¦ ¦ ¦ ¦ °--SYMBOL: iris [0/0] {97} ¦ ¦ ¦ ¦--'[': [ [0/0] {99} ¦ ¦ ¦ ¦--',': , [0/0] {100} - ¦ ¦ ¦ ¦--expr: [0/0] {101} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {103} + ¦ ¦ ¦ ¦--expr: 1:2 [0/0] {101} + ¦ ¦ ¦ ¦ ¦--expr: 1 [0/0] {103} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {102} ¦ ¦ ¦ ¦ ¦--':': : [0/0] {104} - ¦ ¦ ¦ ¦ °--expr: [0/0] {106} + ¦ ¦ ¦ ¦ °--expr: 2 [0/0] {106} ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {105} ¦ ¦ ¦ °--']': ] [0/0] {107} ¦ ¦ ¦--',': , [0/1] {108} - ¦ ¦ ¦--expr: [0/0] {109} + ¦ ¦ ¦--expr: !!!na [0/0] {109} ¦ ¦ ¦ ¦--'!': ! [0/0] {110} - ¦ ¦ ¦ °--expr: [0/0] {111} + ¦ ¦ ¦ °--expr: !!nam [0/0] {111} ¦ ¦ ¦ ¦--'!': ! [0/0] {112} - ¦ ¦ ¦ °--expr: [0/0] {113} + ¦ ¦ ¦ °--expr: !name [0/0] {113} ¦ ¦ ¦ ¦--'!': ! [0/0] {114} - ¦ ¦ ¦ °--expr: [0/0] {116} + ¦ ¦ ¦ °--expr: names [0/0] {116} ¦ ¦ ¦ °--SYMBOL: names [0/0] {115} ¦ ¦ °--')': ) [0/0] {117} ¦ ¦--',': , [0/1] {118} - ¦ ¦--expr: [0/0] {120} + ¦ ¦--expr: 3 [0/0] {120} ¦ ¦ °--NUM_CONST: 3 [0/0] {119} ¦ °--')': ) [0/0] {121} - °--expr: [1/0] {122} - ¦--expr: [0/1] {124} + °--expr: my_su [1/0] {122} + ¦--expr: my_su [0/1] {124} ¦ °--SYMBOL: my_su [0/0] {123} ¦--LEFT_ASSIGN: <- [0/1] {125} - °--expr: [0/0] {126} + °--expr: funct [0/0] {126} ¦--FUNCTION: funct [0/0] {127} ¦--'(': ( [0/0] {128} ¦--SYMBOL_FORMALS: df [0/0] {129} ¦--',': , [0/1] {130} ¦--SYMBOL_FORMALS: group [0/0] {131} ¦--')': ) [0/1] {132} - °--expr: [0/0] {133} + °--expr: { + d [0/0] {133} ¦--'{': { [0/2] {134} - ¦--expr: [1/0] {135} - ¦ ¦--expr: [0/1] {138} + ¦--expr: df %> [1/0] {135} + ¦ ¦--expr: df [0/1] {138} ¦ ¦ °--SYMBOL: df [0/0] {137} ¦ ¦--SPECIAL-PIPE: %>% [0/4] {139} - ¦ ¦--expr: [1/1] {140} - ¦ ¦ ¦--expr: [0/0] {142} + ¦ ¦--expr: group [1/1] {140} + ¦ ¦ ¦--expr: group [0/0] {142} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: group [0/0] {141} ¦ ¦ ¦--'(': ( [0/1] {143} - ¦ ¦ ¦--expr: [0/0] {144} + ¦ ¦ ¦--expr: ! ! g [0/0] {144} ¦ ¦ ¦ ¦--'!': ! [0/1] {145} - ¦ ¦ ¦ °--expr: [0/0] {146} + ¦ ¦ ¦ °--expr: ! gro [0/0] {146} ¦ ¦ ¦ ¦--'!': ! [0/1] {147} - ¦ ¦ ¦ °--expr: [0/0] {149} + ¦ ¦ ¦ °--expr: group [0/0] {149} ¦ ¦ ¦ °--SYMBOL: group [0/0] {148} ¦ ¦ °--')': ) [0/0] {150} ¦ ¦--SPECIAL-PIPE: %>% [0/4] {151} - ¦ °--expr: [1/0] {152} - ¦ ¦--expr: [0/0] {154} + ¦ °--expr: summa [1/0] {152} + ¦ ¦--expr: summa [0/0] {154} ¦ ¦ °--SYMBOL_FUNCTION_CALL: summa [0/0] {153} ¦ ¦--'(': ( [0/0] {155} ¦ ¦--SYMBOL_SUB: a [0/1] {156} ¦ ¦--EQ_SUB: = [0/1] {157} - ¦ ¦--expr: [0/0] {158} - ¦ ¦ ¦--expr: [0/0] {160} + ¦ ¦--expr: mean( [0/0] {158} + ¦ ¦ ¦--expr: mean [0/0] {160} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: mean [0/0] {159} ¦ ¦ ¦--'(': ( [0/0] {161} - ¦ ¦ ¦--expr: [0/0] {163} + ¦ ¦ ¦--expr: a [0/0] {163} ¦ ¦ ¦ °--SYMBOL: a [0/0] {162} ¦ ¦ °--')': ) [0/0] {164} ¦ °--')': ) [0/0] {165} diff --git a/tests/testthat/tidyeval/bang_bang-out.R b/tests/testthat/tidyeval/bang_bang-out.R index a53e4c215..755c3210f 100644 --- a/tests/testthat/tidyeval/bang_bang-out.R +++ b/tests/testthat/tidyeval/bang_bang-out.R @@ -1,9 +1,9 @@ nameshift <- c(SL = "Sepal.Length") -head(dplyr::rename(iris[, 1:2], ! ! ! nameshift), 3) -head(dplyr::rename(iris[, 1:2], !! ! nameshift), 3) -head(dplyr::rename(iris[, 1:2], !!! nameshift), 3) +head(dplyr::rename(iris[, 1:2], ! ! !nameshift), 3) +head(dplyr::rename(iris[, 1:2], !! !nameshift), 3) +head(dplyr::rename(iris[, 1:2], !!!nameshift), 3) my_summarise <- function(df, group_var) { df %>% - group_by(! ! group_var) %>% + group_by(! !group_var) %>% summarise(a = mean(a)) } diff --git a/tests/testthat/tidyeval/eq_sub_and_comma-in_tree b/tests/testthat/tidyeval/eq_sub_and_comma-in_tree index af0da18b6..f707a6e07 100644 --- a/tests/testthat/tidyeval/eq_sub_and_comma-in_tree +++ b/tests/testthat/tidyeval/eq_sub_and_comma-in_tree @@ -1,16 +1,17 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/1] {3} + °--expr: a <- [0/0] {1} + ¦--expr: a [0/1] {3} ¦ °--SYMBOL: a [0/0] {2} ¦--LEFT_ASSIGN: <- [0/1] {4} - °--expr: [0/0] {5} + °--expr: funct [0/0] {5} ¦--FUNCTION: funct [0/0] {6} ¦--'(': ( [0/0] {7} ¦--')': ) [0/1] {8} - °--expr: [0/0] {9} + °--expr: { + d [0/0] {9} ¦--'{': { [0/2] {10} - ¦--expr: [1/0] {11} - ¦ ¦--expr: [0/0] {13} + ¦--expr: data_ [1/0] {11} + ¦ ¦--expr: data_ [0/0] {13} ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {12} ¦ ¦--'(': ( [0/4] {14} ¦ ¦--SYMBOL_SUB: b [1/1] {15} diff --git a/tests/testthat/tidyeval/setting_var_names-in_tree b/tests/testthat/tidyeval/setting_var_names-in_tree index fe87157ae..32cc2cfda 100644 --- a/tests/testthat/tidyeval/setting_var_names-in_tree +++ b/tests/testthat/tidyeval/setting_var_names-in_tree @@ -1,46 +1,46 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/1] {4} + °--expr: mtcar [0/0] {1} + ¦--expr: mtcar [0/1] {4} ¦ °--SYMBOL: mtcar [0/0] {3} ¦--SPECIAL-PIPE: %>% [0/2] {5} - ¦--expr: [1/1] {6} - ¦ ¦--expr: [0/0] {8} + ¦--expr: group [1/1] {6} + ¦ ¦--expr: group [0/0] {8} ¦ ¦ °--SYMBOL_FUNCTION_CALL: group [0/0] {7} ¦ ¦--'(': ( [0/0] {9} - ¦ ¦--expr: [0/0] {11} + ¦ ¦--expr: am [0/0] {11} ¦ ¦ °--SYMBOL: am [0/0] {10} ¦ °--')': ) [0/0] {12} ¦--SPECIAL-PIPE: %>% [0/2] {13} - °--expr: [1/0] {14} - ¦--expr: [0/0] {16} + °--expr: summa [1/0] {14} + ¦--expr: summa [0/0] {16} ¦ °--SYMBOL_FUNCTION_CALL: summa [0/0] {15} ¦--'(': ( [0/4] {17} - ¦--expr: [1/0] {18} - ¦ ¦--expr: [0/0] {19} + ¦--expr: !!mea [1/0] {18} + ¦ ¦--expr: !!mea [0/0] {19} ¦ ¦ ¦--'!': ! [0/0] {20} - ¦ ¦ °--expr: [0/0] {21} + ¦ ¦ °--expr: !mean [0/0] {21} ¦ ¦ ¦--'!': ! [0/0] {22} - ¦ ¦ °--expr: [0/0] {24} + ¦ ¦ °--expr: mean_ [0/0] {24} ¦ ¦ °--SYMBOL: mean_ [0/0] {23} ¦ ¦--LEFT_ASSIGN: := [0/0] {25} - ¦ °--expr: [0/0] {26} - ¦ ¦--expr: [0/0] {28} + ¦ °--expr: mean( [0/0] {26} + ¦ ¦--expr: mean [0/0] {28} ¦ ¦ °--SYMBOL_FUNCTION_CALL: mean [0/0] {27} ¦ ¦--'(': ( [0/0] {29} - ¦ ¦--expr: [0/0] {31} + ¦ ¦--expr: cyl [0/0] {31} ¦ ¦ °--SYMBOL: cyl [0/0] {30} ¦ °--')': ) [0/0] {32} ¦--',': , [0/4] {33} - ¦--expr: [1/2] {34} - ¦ ¦--expr: [0/4] {35} + ¦--expr: !!cou [1/2] {34} + ¦ ¦--expr: !!cou [0/4] {35} ¦ ¦ ¦--'!': ! [0/0] {36} - ¦ ¦ °--expr: [0/0] {37} + ¦ ¦ °--expr: !coun [0/0] {37} ¦ ¦ ¦--'!': ! [0/0] {38} - ¦ ¦ °--expr: [0/0] {40} + ¦ ¦ °--expr: count [0/0] {40} ¦ ¦ °--SYMBOL: count [0/0] {39} ¦ ¦--LEFT_ASSIGN: := [0/0] {41} - ¦ °--expr: [0/0] {42} - ¦ ¦--expr: [0/0] {44} + ¦ °--expr: n() [0/0] {42} + ¦ ¦--expr: n [0/0] {44} ¦ ¦ °--SYMBOL_FUNCTION_CALL: n [0/0] {43} ¦ ¦--'(': ( [0/0] {45} ¦ °--')': ) [0/0] {46} diff --git a/tests/testthat/tidyeval/setting_var_names-out.R b/tests/testthat/tidyeval/setting_var_names-out.R index cdb7d53c4..5dc74ad3f 100644 --- a/tests/testthat/tidyeval/setting_var_names-out.R +++ b/tests/testthat/tidyeval/setting_var_names-out.R @@ -1,6 +1,6 @@ mtcars %>% group_by(am) %>% summarise( - !! mean_nm := mean(cyl), - !! count_nm := n() + !!mean_nm := mean(cyl), + !!count_nm := n() ) diff --git a/tests/testthat/token_adding_removing/add_brackets_in_pipe-in.R b/tests/testthat/token_adding_removing/add_brackets_in_pipe-in.R new file mode 100644 index 000000000..a803abadb --- /dev/null +++ b/tests/testthat/token_adding_removing/add_brackets_in_pipe-in.R @@ -0,0 +1,5 @@ +1 %>% 2 +1 %x% 1 +1 %x% y +1 %>% x +1 %s% 1 diff --git a/tests/testthat/token_adding_removing/add_brackets_in_pipe-in_tree b/tests/testthat/token_adding_removing/add_brackets_in_pipe-in_tree new file mode 100644 index 000000000..41937bcfc --- /dev/null +++ b/tests/testthat/token_adding_removing/add_brackets_in_pipe-in_tree @@ -0,0 +1,31 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: 1 %>% [0/0] {1} + ¦ ¦--expr: 1 [0/1] {3} + ¦ ¦ °--NUM_CONST: 1 [0/0] {2} + ¦ ¦--SPECIAL-PIPE: %>% [0/1] {4} + ¦ °--expr: 2 [0/0] {6} + ¦ °--NUM_CONST: 2 [0/0] {5} + ¦--expr: 1 %x% [1/0] {7} + ¦ ¦--expr: 1 [0/1] {9} + ¦ ¦ °--NUM_CONST: 1 [0/0] {8} + ¦ ¦--SPECIAL-OTHER: %x% [0/1] {10} + ¦ °--expr: 1 [0/0] {12} + ¦ °--NUM_CONST: 1 [0/0] {11} + ¦--expr: 1 %x% [1/0] {13} + ¦ ¦--expr: 1 [0/1] {15} + ¦ ¦ °--NUM_CONST: 1 [0/0] {14} + ¦ ¦--SPECIAL-OTHER: %x% [0/1] {16} + ¦ °--expr: y [0/0] {18} + ¦ °--SYMBOL: y [0/0] {17} + ¦--expr: 1 %>% [1/0] {19} + ¦ ¦--expr: 1 [0/1] {21} + ¦ ¦ °--NUM_CONST: 1 [0/0] {20} + ¦ ¦--SPECIAL-PIPE: %>% [0/1] {22} + ¦ °--expr: x [0/0] {24} + ¦ °--SYMBOL: x [0/0] {23} + °--expr: 1 %s% [1/0] {25} + ¦--expr: 1 [0/1] {27} + ¦ °--NUM_CONST: 1 [0/0] {26} + ¦--SPECIAL-OTHER: %s% [0/1] {28} + °--expr: 1 [0/0] {30} + °--NUM_CONST: 1 [0/0] {29} diff --git a/tests/testthat/token_adding_removing/add_brackets_in_pipe-out.R b/tests/testthat/token_adding_removing/add_brackets_in_pipe-out.R new file mode 100644 index 000000000..3bb029b9f --- /dev/null +++ b/tests/testthat/token_adding_removing/add_brackets_in_pipe-out.R @@ -0,0 +1,5 @@ +1 %>% 2 +1 %x% 1 +1 %x% y +1 %>% x() +1 %s% 1 diff --git a/tests/testthat/token_adding_removing/add_brackets_in_pipe-stylerignore-in.R b/tests/testthat/token_adding_removing/add_brackets_in_pipe-stylerignore-in.R new file mode 100644 index 000000000..345e62c6f --- /dev/null +++ b/tests/testthat/token_adding_removing/add_brackets_in_pipe-stylerignore-in.R @@ -0,0 +1,54 @@ +# styler: off +aflh({ + isfris %>% + # comment + tjnfaxasf12af7987A() %>% # comment + tjnfaxdfaasfaf7987A() %>% + tjnfxasfaf798fA() %>% + tf797A() %>% # more + # comment + yyexprzB() +}) +# styler: on + + +aflh({ + isfris %>% + # comment + tjnfaxasf12af7987A() %>% # comment + tjnfaxdfaasfaf7987A() %>% + tjnfxasfaf798fA() %>% + tf797A() %>% # more + # comment + yyexprzB() +}) + + +# styler: off +aflh({ + isfris %>% + # comment + tjnfaxasf12af7987A %>% + tjnfaxdfaasfaf7987A %>% + tjnfxasfaf798fA %>% + # more + tf797A %>% # comments + # here + yyexprzB # + # whatnot +}) +# styler: on + + +aflh({ + isfris %>% + # comment + tjnfaxasf12af7987A %>% + tjnfaxdfaasfaf7987A %>% + tjnfxasfaf798fA %>% + # more + tf797A %>% # comments + # here + yyexprzB # + # whatnot +}) diff --git a/tests/testthat/token_adding_removing/add_brackets_in_pipe-stylerignore-in_tree b/tests/testthat/token_adding_removing/add_brackets_in_pipe-stylerignore-in_tree new file mode 100644 index 000000000..0e89e475e --- /dev/null +++ b/tests/testthat/token_adding_removing/add_brackets_in_pipe-stylerignore-in_tree @@ -0,0 +1,163 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # sty [0/0] {1} + ¦--expr: aflh( [1/0] {2} + ¦ ¦--expr: aflh [0/0] {4} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: aflh [0/0] {3} + ¦ ¦--'(': ( [0/0] {5} + ¦ ¦--expr: { + i [0/0] {6} + ¦ ¦ ¦--'{': { [0/2] {7} + ¦ ¦ ¦--expr: isfri [1/0] {8} + ¦ ¦ ¦ ¦--expr: isfri [0/1] {14} + ¦ ¦ ¦ ¦ °--SYMBOL: isfri [0/0] {13} + ¦ ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {15} + ¦ ¦ ¦ ¦--COMMENT: # com [1/4] {16} + ¦ ¦ ¦ ¦--expr: tjnfa [1/1] {17} + ¦ ¦ ¦ ¦ ¦--expr: tjnfa [0/0] {19} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: tjnfa [0/0] {18} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {20} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {21} + ¦ ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {22} + ¦ ¦ ¦ ¦--COMMENT: # com [0/4] {23} + ¦ ¦ ¦ ¦--expr: tjnfa [1/1] {24} + ¦ ¦ ¦ ¦ ¦--expr: tjnfa [0/0] {26} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: tjnfa [0/0] {25} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {27} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {28} + ¦ ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {29} + ¦ ¦ ¦ ¦--expr: tjnfx [1/1] {30} + ¦ ¦ ¦ ¦ ¦--expr: tjnfx [0/0] {32} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: tjnfx [0/0] {31} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {33} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {34} + ¦ ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {35} + ¦ ¦ ¦ ¦--expr: tf797 [1/1] {36} + ¦ ¦ ¦ ¦ ¦--expr: tf797 [0/0] {38} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: tf797 [0/0] {37} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {39} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {40} + ¦ ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {41} + ¦ ¦ ¦ ¦--COMMENT: # mor [0/4] {42} + ¦ ¦ ¦ ¦--COMMENT: # com [1/4] {43} + ¦ ¦ ¦ °--expr: yyexp [1/0] {44} + ¦ ¦ ¦ ¦--expr: yyexp [0/0] {46} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: yyexp [0/0] {45} + ¦ ¦ ¦ ¦--'(': ( [0/0] {47} + ¦ ¦ ¦ °--')': ) [0/0] {48} + ¦ ¦ °--'}': } [1/0] {49} + ¦ °--')': ) [0/0] {50} + ¦--COMMENT: # sty [1/0] {51} + ¦--expr: aflh( [3/0] {52} + ¦ ¦--expr: aflh [0/0] {54} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: aflh [0/0] {53} + ¦ ¦--'(': ( [0/0] {55} + ¦ ¦--expr: { + i [0/0] {56} + ¦ ¦ ¦--'{': { [0/2] {57} + ¦ ¦ ¦--expr: isfri [1/0] {58} + ¦ ¦ ¦ ¦--expr: isfri [0/1] {64} + ¦ ¦ ¦ ¦ °--SYMBOL: isfri [0/0] {63} + ¦ ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {65} + ¦ ¦ ¦ ¦--COMMENT: # com [1/4] {66} + ¦ ¦ ¦ ¦--expr: tjnfa [1/1] {67} + ¦ ¦ ¦ ¦ ¦--expr: tjnfa [0/0] {69} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: tjnfa [0/0] {68} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {70} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {71} + ¦ ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {72} + ¦ ¦ ¦ ¦--COMMENT: # com [0/4] {73} + ¦ ¦ ¦ ¦--expr: tjnfa [1/1] {74} + ¦ ¦ ¦ ¦ ¦--expr: tjnfa [0/0] {76} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: tjnfa [0/0] {75} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {77} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {78} + ¦ ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {79} + ¦ ¦ ¦ ¦--expr: tjnfx [1/1] {80} + ¦ ¦ ¦ ¦ ¦--expr: tjnfx [0/0] {82} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: tjnfx [0/0] {81} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {83} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {84} + ¦ ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {85} + ¦ ¦ ¦ ¦--expr: tf797 [1/1] {86} + ¦ ¦ ¦ ¦ ¦--expr: tf797 [0/0] {88} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: tf797 [0/0] {87} + ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {89} + ¦ ¦ ¦ ¦ °--')': ) [0/0] {90} + ¦ ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {91} + ¦ ¦ ¦ ¦--COMMENT: # mor [0/4] {92} + ¦ ¦ ¦ ¦--COMMENT: # com [1/4] {93} + ¦ ¦ ¦ °--expr: yyexp [1/0] {94} + ¦ ¦ ¦ ¦--expr: yyexp [0/0] {96} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: yyexp [0/0] {95} + ¦ ¦ ¦ ¦--'(': ( [0/0] {97} + ¦ ¦ ¦ °--')': ) [0/0] {98} + ¦ ¦ °--'}': } [1/0] {99} + ¦ °--')': ) [0/0] {100} + ¦--COMMENT: # sty [3/0] {101} + ¦--expr: aflh( [1/0] {102} + ¦ ¦--expr: aflh [0/0] {104} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: aflh [0/0] {103} + ¦ ¦--'(': ( [0/0] {105} + ¦ ¦--expr: { + i [0/0] {106} + ¦ ¦ ¦--'{': { [0/2] {107} + ¦ ¦ ¦--expr: isfri [1/1] {108} + ¦ ¦ ¦ ¦--expr: isfri [0/1] {114} + ¦ ¦ ¦ ¦ °--SYMBOL: isfri [0/0] {113} + ¦ ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {115} + ¦ ¦ ¦ ¦--COMMENT: # com [1/4] {116} + ¦ ¦ ¦ ¦--expr: tjnfa [1/1] {118} + ¦ ¦ ¦ ¦ °--SYMBOL: tjnfa [0/0] {117} + ¦ ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {119} + ¦ ¦ ¦ ¦--expr: tjnfa [1/1] {121} + ¦ ¦ ¦ ¦ °--SYMBOL: tjnfa [0/0] {120} + ¦ ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {122} + ¦ ¦ ¦ ¦--expr: tjnfx [1/1] {124} + ¦ ¦ ¦ ¦ °--SYMBOL: tjnfx [0/0] {123} + ¦ ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {125} + ¦ ¦ ¦ ¦--COMMENT: # mor [1/4] {126} + ¦ ¦ ¦ ¦--expr: tf797 [1/1] {128} + ¦ ¦ ¦ ¦ °--SYMBOL: tf797 [0/0] {127} + ¦ ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {129} + ¦ ¦ ¦ ¦--COMMENT: # com [0/4] {130} + ¦ ¦ ¦ ¦--COMMENT: # her [1/4] {131} + ¦ ¦ ¦ °--expr: yyexp [1/0] {133} + ¦ ¦ ¦ °--SYMBOL: yyexp [0/0] {132} + ¦ ¦ ¦--COMMENT: # [0/2] {134} + ¦ ¦ ¦--COMMENT: # wha [1/0] {135} + ¦ ¦ °--'}': } [1/0] {136} + ¦ °--')': ) [0/0] {137} + ¦--COMMENT: # sty [1/0] {138} + °--expr: aflh( [3/0] {139} + ¦--expr: aflh [0/0] {141} + ¦ °--SYMBOL_FUNCTION_CALL: aflh [0/0] {140} + ¦--'(': ( [0/0] {142} + ¦--expr: { + i [0/0] {143} + ¦ ¦--'{': { [0/2] {144} + ¦ ¦--expr: isfri [1/1] {145} + ¦ ¦ ¦--expr: isfri [0/1] {151} + ¦ ¦ ¦ °--SYMBOL: isfri [0/0] {150} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {152} + ¦ ¦ ¦--COMMENT: # com [1/4] {153} + ¦ ¦ ¦--expr: tjnfa [1/1] {155} + ¦ ¦ ¦ °--SYMBOL: tjnfa [0/0] {154} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {156} + ¦ ¦ ¦--expr: tjnfa [1/1] {158} + ¦ ¦ ¦ °--SYMBOL: tjnfa [0/0] {157} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {159} + ¦ ¦ ¦--expr: tjnfx [1/1] {161} + ¦ ¦ ¦ °--SYMBOL: tjnfx [0/0] {160} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/4] {162} + ¦ ¦ ¦--COMMENT: # mor [1/4] {163} + ¦ ¦ ¦--expr: tf797 [1/1] {165} + ¦ ¦ ¦ °--SYMBOL: tf797 [0/0] {164} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {166} + ¦ ¦ ¦--COMMENT: # com [0/4] {167} + ¦ ¦ ¦--COMMENT: # her [1/4] {168} + ¦ ¦ °--expr: yyexp [1/0] {170} + ¦ ¦ °--SYMBOL: yyexp [0/0] {169} + ¦ ¦--COMMENT: # [0/2] {171} + ¦ ¦--COMMENT: # wha [1/0] {172} + ¦ °--'}': } [1/0] {173} + °--')': ) [0/0] {174} diff --git a/tests/testthat/token_adding_removing/add_brackets_in_pipe-stylerignore-out.R b/tests/testthat/token_adding_removing/add_brackets_in_pipe-stylerignore-out.R new file mode 100644 index 000000000..cdf6879f5 --- /dev/null +++ b/tests/testthat/token_adding_removing/add_brackets_in_pipe-stylerignore-out.R @@ -0,0 +1,54 @@ +# styler: off +aflh({ + isfris %>% + # comment + tjnfaxasf12af7987A() %>% # comment + tjnfaxdfaasfaf7987A() %>% + tjnfxasfaf798fA() %>% + tf797A() %>% # more + # comment + yyexprzB() +}) +# styler: on + + +aflh({ + isfris %>% + # comment + tjnfaxasf12af7987A() %>% # comment + tjnfaxdfaasfaf7987A() %>% + tjnfxasfaf798fA() %>% + tf797A() %>% # more + # comment + yyexprzB() +}) + + +# styler: off +aflh({ + isfris %>% + # comment + tjnfaxasf12af7987A %>% + tjnfaxdfaasfaf7987A %>% + tjnfxasfaf798fA %>% + # more + tf797A %>% # comments + # here + yyexprzB # + # whatnot +}) +# styler: on + + +aflh({ + isfris %>% + # comment + tjnfaxasf12af7987A() %>% + tjnfaxdfaasfaf7987A() %>% + tjnfxasfaf798fA() %>% + # more + tf797A() %>% # comments + # here + yyexprzB() # + # whatnot +}) diff --git a/tests/testthat/token_adding_removing/double_braces-in_tree b/tests/testthat/token_adding_removing/double_braces-in_tree index 901096bed..6ae6d16e2 100644 --- a/tests/testthat/token_adding_removing/double_braces-in_tree +++ b/tests/testthat/token_adding_removing/double_braces-in_tree @@ -1,27 +1,27 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} + ¦--expr: if (X [0/0] {1} ¦ ¦--IF: if [0/1] {2} ¦ ¦--'(': ( [0/0] {3} - ¦ ¦--expr: [0/0] {5} + ¦ ¦--expr: X [0/0] {5} ¦ ¦ °--SYMBOL: X [0/0] {4} ¦ ¦--')': ) [0/2] {6} - ¦ °--expr: [1/0] {7} - ¦ ¦--expr: [0/0] {9} + ¦ °--expr: retur [1/0] {7} + ¦ ¦--expr: retur [0/0] {9} ¦ ¦ °--SYMBOL_FUNCTION_CALL: retur [0/0] {8} ¦ ¦--'(': ( [0/0] {10} - ¦ ¦--expr: [0/0] {12} + ¦ ¦--expr: TRUE [0/0] {12} ¦ ¦ °--NUM_CONST: TRUE [0/0] {11} ¦ °--')': ) [0/0] {13} - °--expr: [2/0] {14} + °--expr: if (X [2/0] {14} ¦--IF: if [0/1] {15} ¦--'(': ( [0/0] {16} - ¦--expr: [0/0] {18} + ¦--expr: X [0/0] {18} ¦ °--SYMBOL: X [0/0] {17} ¦--')': ) [0/1] {19} - °--expr: [0/0] {20} - ¦--expr: [0/0] {22} + °--expr: retur [0/0] {20} + ¦--expr: retur [0/0] {22} ¦ °--SYMBOL_FUNCTION_CALL: retur [0/0] {21} ¦--'(': ( [0/0] {23} - ¦--expr: [0/0] {25} + ¦--expr: FALSE [0/0] {25} ¦ °--NUM_CONST: FALSE [0/0] {24} °--')': ) [0/0] {26} diff --git a/tests/testthat/token_adding_removing/double_braces-out.R b/tests/testthat/token_adding_removing/double_braces-out.R index 2bba0cc21..4df1c6536 100644 --- a/tests/testthat/token_adding_removing/double_braces-out.R +++ b/tests/testthat/token_adding_removing/double_braces-out.R @@ -2,4 +2,6 @@ if (X) { return(TRUE) } -if (X) return(FALSE) +if (X) { + return(FALSE) +} diff --git a/tests/testthat/token_adding_removing/else-pipe-in.R b/tests/testthat/token_adding_removing/else-pipe-in.R new file mode 100644 index 000000000..b0a932ec6 --- /dev/null +++ b/tests/testthat/token_adding_removing/else-pipe-in.R @@ -0,0 +1,9 @@ +mtcars %>% + mutate( + x = 1 + ) %>% + if (FALSE) { + mutate(., country = 2) + } else . + +# adding braces around . in else changes evaluation diff --git a/tests/testthat/token_adding_removing/else-pipe-in_tree b/tests/testthat/token_adding_removing/else-pipe-in_tree new file mode 100644 index 000000000..c8f1974f9 --- /dev/null +++ b/tests/testthat/token_adding_removing/else-pipe-in_tree @@ -0,0 +1,41 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: mtcar [0/0] {1} + ¦ ¦--expr: mtcar [0/1] {4} + ¦ ¦ °--SYMBOL: mtcar [0/0] {3} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {5} + ¦ ¦--expr: mutat [1/1] {6} + ¦ ¦ ¦--expr: mutat [0/0] {8} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: mutat [0/0] {7} + ¦ ¦ ¦--'(': ( [0/4] {9} + ¦ ¦ ¦--SYMBOL_SUB: x [1/1] {10} + ¦ ¦ ¦--EQ_SUB: = [0/1] {11} + ¦ ¦ ¦--expr: 1 [0/2] {13} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {12} + ¦ ¦ °--')': ) [1/0] {14} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {15} + ¦ °--expr: if (F [1/0] {16} + ¦ ¦--IF: if [0/1] {17} + ¦ ¦--'(': ( [0/0] {18} + ¦ ¦--expr: FALSE [0/0] {20} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {19} + ¦ ¦--')': ) [0/1] {21} + ¦ ¦--expr: { + [0/1] {22} + ¦ ¦ ¦--'{': { [0/4] {23} + ¦ ¦ ¦--expr: mutat [1/2] {24} + ¦ ¦ ¦ ¦--expr: mutat [0/0] {26} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: mutat [0/0] {25} + ¦ ¦ ¦ ¦--'(': ( [0/0] {27} + ¦ ¦ ¦ ¦--expr: . [0/0] {29} + ¦ ¦ ¦ ¦ °--SYMBOL: . [0/0] {28} + ¦ ¦ ¦ ¦--',': , [0/1] {30} + ¦ ¦ ¦ ¦--SYMBOL_SUB: count [0/1] {31} + ¦ ¦ ¦ ¦--EQ_SUB: = [0/1] {32} + ¦ ¦ ¦ ¦--expr: 2 [0/0] {34} + ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {33} + ¦ ¦ ¦ °--')': ) [0/0] {35} + ¦ ¦ °--'}': } [1/0] {36} + ¦ ¦--ELSE: else [0/1] {37} + ¦ °--expr: . [0/0] {39} + ¦ °--SYMBOL: . [0/0] {38} + °--COMMENT: # add [2/0] {40} diff --git a/tests/testthat/token_adding_removing/else-pipe-out.R b/tests/testthat/token_adding_removing/else-pipe-out.R new file mode 100644 index 000000000..b0a932ec6 --- /dev/null +++ b/tests/testthat/token_adding_removing/else-pipe-out.R @@ -0,0 +1,9 @@ +mtcars %>% + mutate( + x = 1 + ) %>% + if (FALSE) { + mutate(., country = 2) + } else . + +# adding braces around . in else changes evaluation diff --git a/tests/testthat/token_adding_removing/for_while_stylerignore-in.R b/tests/testthat/token_adding_removing/for_while_stylerignore-in.R new file mode 100644 index 000000000..bd4dbd543 --- /dev/null +++ b/tests/testthat/token_adding_removing/for_while_stylerignore-in.R @@ -0,0 +1,78 @@ +while(TRUE) + 3 + +# styler: off +while(TRUE) + 3 + +# styler: on +while(TRUE) + # styler: off + 3 + +# styler: on + +for (i # styler: off + in 3) + 3 + +# styler: off +for (i + in 3) + 3 +# styler: on + + +# styler: off +for (i + in 3) { + 3} +# styler: on + + +for (i + in 3) { + 3} # styler: off + +for (i + in 3) {# styler: off + 3} + +for (i# styler: off + in 3) { + 3} + + +while( + FALSE +) { + # styler: off + 1 + # styler: on +} + +while( + FALSE # comment +) { + # styler: off + 1 + # styler: on +} + +while( # styler: off + FALSE +) { + + 1 + +} + +while( + # styler: off + FALSE +) { + + 1 + +} +# styler: on diff --git a/tests/testthat/token_adding_removing/for_while_stylerignore-in_tree b/tests/testthat/token_adding_removing/for_while_stylerignore-in_tree new file mode 100644 index 000000000..ef9ea4bbb --- /dev/null +++ b/tests/testthat/token_adding_removing/for_while_stylerignore-in_tree @@ -0,0 +1,180 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: while [0/0] {1} + ¦ ¦--WHILE: while [0/0] {2} + ¦ ¦--'(': ( [0/0] {3} + ¦ ¦--expr: TRUE [0/0] {5} + ¦ ¦ °--NUM_CONST: TRUE [0/0] {4} + ¦ ¦--')': ) [0/2] {6} + ¦ °--expr: 3 [1/0] {8} + ¦ °--NUM_CONST: 3 [0/0] {7} + ¦--COMMENT: # sty [2/0] {9} + ¦--expr: while [1/0] {10} + ¦ ¦--WHILE: while [0/0] {11} + ¦ ¦--'(': ( [0/0] {12} + ¦ ¦--expr: TRUE [0/0] {14} + ¦ ¦ °--NUM_CONST: TRUE [0/0] {13} + ¦ ¦--')': ) [0/2] {15} + ¦ °--expr: 3 [1/0] {17} + ¦ °--NUM_CONST: 3 [0/0] {16} + ¦--COMMENT: # sty [2/0] {18} + ¦--expr: while [1/0] {19} + ¦ ¦--WHILE: while [0/0] {20} + ¦ ¦--'(': ( [0/0] {21} + ¦ ¦--expr: TRUE [0/0] {23} + ¦ ¦ °--NUM_CONST: TRUE [0/0] {22} + ¦ ¦--')': ) [0/2] {24} + ¦ ¦--COMMENT: # sty [1/2] {25} + ¦ °--expr: 3 [1/0] {27} + ¦ °--NUM_CONST: 3 [0/0] {26} + ¦--COMMENT: # sty [2/0] {28} + ¦--expr: for ( [2/0] {29} + ¦ ¦--FOR: for [0/1] {30} + ¦ ¦--forcond: (i # [0/2] {31} + ¦ ¦ ¦--'(': ( [0/0] {32} + ¦ ¦ ¦--SYMBOL: i [0/1] {33} + ¦ ¦ ¦--COMMENT: # sty [0/5] {34} + ¦ ¦ ¦--IN: in [1/1] {35} + ¦ ¦ ¦--expr: 3 [0/0] {37} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {36} + ¦ ¦ °--')': ) [0/0] {38} + ¦ °--expr: 3 [1/0] {40} + ¦ °--NUM_CONST: 3 [0/0] {39} + ¦--COMMENT: # sty [2/0] {41} + ¦--expr: for ( [1/0] {42} + ¦ ¦--FOR: for [0/1] {43} + ¦ ¦--forcond: (i + [0/2] {44} + ¦ ¦ ¦--'(': ( [0/0] {45} + ¦ ¦ ¦--SYMBOL: i [0/5] {46} + ¦ ¦ ¦--IN: in [1/1] {47} + ¦ ¦ ¦--expr: 3 [0/0] {49} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {48} + ¦ ¦ °--')': ) [0/0] {50} + ¦ °--expr: 3 [1/0] {52} + ¦ °--NUM_CONST: 3 [0/0] {51} + ¦--COMMENT: # sty [1/0] {53} + ¦--COMMENT: # sty [3/0] {54} + ¦--expr: for ( [1/0] {55} + ¦ ¦--FOR: for [0/1] {56} + ¦ ¦--forcond: (i + [0/2] {57} + ¦ ¦ ¦--'(': ( [0/0] {58} + ¦ ¦ ¦--SYMBOL: i [0/5] {59} + ¦ ¦ ¦--IN: in [1/1] {60} + ¦ ¦ ¦--expr: 3 [0/0] {62} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {61} + ¦ ¦ °--')': ) [0/0] {63} + ¦ °--expr: { + 3 [0/0] {64} + ¦ ¦--'{': { [0/2] {65} + ¦ ¦--expr: 3 [1/0] {67} + ¦ ¦ °--NUM_CONST: 3 [0/0] {66} + ¦ °--'}': } [0/0] {68} + ¦--COMMENT: # sty [1/0] {69} + ¦--expr: for ( [3/1] {70} + ¦ ¦--FOR: for [0/1] {71} + ¦ ¦--forcond: (i + [0/2] {72} + ¦ ¦ ¦--'(': ( [0/0] {73} + ¦ ¦ ¦--SYMBOL: i [0/5] {74} + ¦ ¦ ¦--IN: in [1/1] {75} + ¦ ¦ ¦--expr: 3 [0/0] {77} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {76} + ¦ ¦ °--')': ) [0/0] {78} + ¦ °--expr: { + 3 [0/0] {79} + ¦ ¦--'{': { [0/2] {80} + ¦ ¦--expr: 3 [1/0] {82} + ¦ ¦ °--NUM_CONST: 3 [0/0] {81} + ¦ °--'}': } [0/0] {83} + ¦--COMMENT: # sty [0/0] {84} + ¦--expr: for ( [2/0] {85} + ¦ ¦--FOR: for [0/1] {86} + ¦ ¦--forcond: (i + [0/2] {87} + ¦ ¦ ¦--'(': ( [0/0] {88} + ¦ ¦ ¦--SYMBOL: i [0/5] {89} + ¦ ¦ ¦--IN: in [1/1] {90} + ¦ ¦ ¦--expr: 3 [0/0] {92} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {91} + ¦ ¦ °--')': ) [0/0] {93} + ¦ °--expr: {# st [0/0] {94} + ¦ ¦--'{': { [0/0] {95} + ¦ ¦--COMMENT: # sty [0/2] {96} + ¦ ¦--expr: 3 [1/0] {98} + ¦ ¦ °--NUM_CONST: 3 [0/0] {97} + ¦ °--'}': } [0/0] {99} + ¦--expr: for ( [2/0] {100} + ¦ ¦--FOR: for [0/1] {101} + ¦ ¦--forcond: (i# s [0/2] {102} + ¦ ¦ ¦--'(': ( [0/0] {103} + ¦ ¦ ¦--SYMBOL: i [0/0] {104} + ¦ ¦ ¦--COMMENT: # sty [0/5] {105} + ¦ ¦ ¦--IN: in [1/1] {106} + ¦ ¦ ¦--expr: 3 [0/0] {108} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {107} + ¦ ¦ °--')': ) [0/0] {109} + ¦ °--expr: { + 3 [0/0] {110} + ¦ ¦--'{': { [0/2] {111} + ¦ ¦--expr: 3 [1/0] {113} + ¦ ¦ °--NUM_CONST: 3 [0/0] {112} + ¦ °--'}': } [0/0] {114} + ¦--expr: while [3/0] {115} + ¦ ¦--WHILE: while [0/0] {116} + ¦ ¦--'(': ( [0/2] {117} + ¦ ¦--expr: FALSE [1/0] {119} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {118} + ¦ ¦--')': ) [1/1] {120} + ¦ °--expr: { + # [0/0] {121} + ¦ ¦--'{': { [0/2] {122} + ¦ ¦--COMMENT: # sty [1/2] {123} + ¦ ¦--expr: 1 [1/2] {125} + ¦ ¦ °--NUM_CONST: 1 [0/0] {124} + ¦ ¦--COMMENT: # sty [1/0] {126} + ¦ °--'}': } [1/0] {127} + ¦--expr: while [2/0] {128} + ¦ ¦--WHILE: while [0/0] {129} + ¦ ¦--'(': ( [0/2] {130} + ¦ ¦--expr: FALSE [1/1] {132} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {131} + ¦ ¦--COMMENT: # com [0/0] {133} + ¦ ¦--')': ) [1/1] {134} + ¦ °--expr: { + # [0/0] {135} + ¦ ¦--'{': { [0/2] {136} + ¦ ¦--COMMENT: # sty [1/2] {137} + ¦ ¦--expr: 1 [1/2] {139} + ¦ ¦ °--NUM_CONST: 1 [0/0] {138} + ¦ ¦--COMMENT: # sty [1/0] {140} + ¦ °--'}': } [1/0] {141} + ¦--expr: while [2/0] {142} + ¦ ¦--WHILE: while [0/0] {143} + ¦ ¦--'(': ( [0/1] {144} + ¦ ¦--COMMENT: # sty [0/2] {145} + ¦ ¦--expr: FALSE [1/0] {147} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {146} + ¦ ¦--')': ) [1/1] {148} + ¦ °--expr: { + + [0/0] {149} + ¦ ¦--'{': { [0/2] {150} + ¦ ¦--expr: 1 [2/0] {152} + ¦ ¦ °--NUM_CONST: 1 [0/0] {151} + ¦ °--'}': } [2/0] {153} + ¦--expr: while [2/0] {154} + ¦ ¦--WHILE: while [0/0] {155} + ¦ ¦--'(': ( [0/2] {156} + ¦ ¦--COMMENT: # sty [1/2] {157} + ¦ ¦--expr: FALSE [1/0] {159} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {158} + ¦ ¦--')': ) [1/1] {160} + ¦ °--expr: { + + [0/0] {161} + ¦ ¦--'{': { [0/2] {162} + ¦ ¦--expr: 1 [2/0] {164} + ¦ ¦ °--NUM_CONST: 1 [0/0] {163} + ¦ °--'}': } [2/0] {165} + °--COMMENT: # sty [1/0] {166} diff --git a/tests/testthat/token_adding_removing/for_while_stylerignore-out.R b/tests/testthat/token_adding_removing/for_while_stylerignore-out.R new file mode 100644 index 000000000..1234fec74 --- /dev/null +++ b/tests/testthat/token_adding_removing/for_while_stylerignore-out.R @@ -0,0 +1,79 @@ +while (TRUE) { + 3 +} + +# styler: off +while(TRUE) + 3 + +# styler: on +while (TRUE) + # styler: off + 3 + +# styler: on + +for (i # styler: off + in 3) + 3 + +# styler: off +for (i + in 3) + 3 +# styler: on + + +# styler: off +for (i + in 3) { + 3} +# styler: on + + +for (i + in 3) { + 3} # styler: off + +for (i + in 3) {# styler: off + 3 +} + +for (i# styler: off + in 3) { + 3 +} + + +while ( + FALSE +) { + # styler: off + 1 + # styler: on +} + +while ( + FALSE # comment +) { + # styler: off + 1 + # styler: on +} + +while( # styler: off + FALSE +) { + 1 +} + +while ( + # styler: off + FALSE +) { + + 1 + +} +# styler: on diff --git a/tests/testthat/token_adding_removing/if-else-comma-in.R b/tests/testthat/token_adding_removing/if-else-comma-in.R new file mode 100644 index 000000000..40d144c09 --- /dev/null +++ b/tests/testthat/token_adding_removing/if-else-comma-in.R @@ -0,0 +1,15 @@ +call( + if (x) + y, + if(x) + z +) + +call(if (x) y , + if(x) z ) + +call(if (x) y, + if(x) z ) + +call(if (x) y, + if(x) z) diff --git a/tests/testthat/token_adding_removing/if-else-comma-in_tree b/tests/testthat/token_adding_removing/if-else-comma-in_tree new file mode 100644 index 000000000..00da35b6a --- /dev/null +++ b/tests/testthat/token_adding_removing/if-else-comma-in_tree @@ -0,0 +1,89 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: call( [0/0] {1} + ¦ ¦--expr: call [0/0] {3} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {2} + ¦ ¦--'(': ( [0/2] {4} + ¦ ¦--expr: if (x [1/0] {5} + ¦ ¦ ¦--IF: if [0/1] {6} + ¦ ¦ ¦--'(': ( [0/0] {7} + ¦ ¦ ¦--expr: x [0/0] {9} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {8} + ¦ ¦ ¦--')': ) [0/4] {10} + ¦ ¦ °--expr: y [1/0] {12} + ¦ ¦ °--SYMBOL: y [0/0] {11} + ¦ ¦--',': , [0/2] {13} + ¦ ¦--expr: if(x) [1/0] {14} + ¦ ¦ ¦--IF: if [0/0] {15} + ¦ ¦ ¦--'(': ( [0/0] {16} + ¦ ¦ ¦--expr: x [0/0] {18} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {17} + ¦ ¦ ¦--')': ) [0/4] {19} + ¦ ¦ °--expr: z [1/0] {21} + ¦ ¦ °--SYMBOL: z [0/0] {20} + ¦ °--')': ) [1/0] {22} + ¦--expr: call( [2/0] {23} + ¦ ¦--expr: call [0/0] {25} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {24} + ¦ ¦--'(': ( [0/0] {26} + ¦ ¦--expr: if (x [0/1] {27} + ¦ ¦ ¦--IF: if [0/1] {28} + ¦ ¦ ¦--'(': ( [0/0] {29} + ¦ ¦ ¦--expr: x [0/0] {31} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {30} + ¦ ¦ ¦--')': ) [0/1] {32} + ¦ ¦ °--expr: y [0/0] {34} + ¦ ¦ °--SYMBOL: y [0/0] {33} + ¦ ¦--',': , [0/5] {35} + ¦ ¦--expr: if(x) [1/1] {36} + ¦ ¦ ¦--IF: if [0/0] {37} + ¦ ¦ ¦--'(': ( [0/0] {38} + ¦ ¦ ¦--expr: x [0/0] {40} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {39} + ¦ ¦ ¦--')': ) [0/1] {41} + ¦ ¦ °--expr: z [0/0] {43} + ¦ ¦ °--SYMBOL: z [0/0] {42} + ¦ °--')': ) [0/0] {44} + ¦--expr: call( [2/0] {45} + ¦ ¦--expr: call [0/0] {47} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {46} + ¦ ¦--'(': ( [0/0] {48} + ¦ ¦--expr: if (x [0/0] {49} + ¦ ¦ ¦--IF: if [0/1] {50} + ¦ ¦ ¦--'(': ( [0/0] {51} + ¦ ¦ ¦--expr: x [0/0] {53} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {52} + ¦ ¦ ¦--')': ) [0/1] {54} + ¦ ¦ °--expr: y [0/0] {56} + ¦ ¦ °--SYMBOL: y [0/0] {55} + ¦ ¦--',': , [0/5] {57} + ¦ ¦--expr: if(x) [1/1] {58} + ¦ ¦ ¦--IF: if [0/0] {59} + ¦ ¦ ¦--'(': ( [0/0] {60} + ¦ ¦ ¦--expr: x [0/0] {62} + ¦ ¦ ¦ °--SYMBOL: x [0/0] {61} + ¦ ¦ ¦--')': ) [0/1] {63} + ¦ ¦ °--expr: z [0/0] {65} + ¦ ¦ °--SYMBOL: z [0/0] {64} + ¦ °--')': ) [0/0] {66} + °--expr: call( [2/0] {67} + ¦--expr: call [0/0] {69} + ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {68} + ¦--'(': ( [0/0] {70} + ¦--expr: if (x [0/0] {71} + ¦ ¦--IF: if [0/1] {72} + ¦ ¦--'(': ( [0/0] {73} + ¦ ¦--expr: x [0/0] {75} + ¦ ¦ °--SYMBOL: x [0/0] {74} + ¦ ¦--')': ) [0/1] {76} + ¦ °--expr: y [0/0] {78} + ¦ °--SYMBOL: y [0/0] {77} + ¦--',': , [0/5] {79} + ¦--expr: if(x) [1/0] {80} + ¦ ¦--IF: if [0/0] {81} + ¦ ¦--'(': ( [0/0] {82} + ¦ ¦--expr: x [0/0] {84} + ¦ ¦ °--SYMBOL: x [0/0] {83} + ¦ ¦--')': ) [0/1] {85} + ¦ °--expr: z [0/0] {87} + ¦ °--SYMBOL: z [0/0] {86} + °--')': ) [0/0] {88} diff --git a/tests/testthat/token_adding_removing/if-else-comma-out.R b/tests/testthat/token_adding_removing/if-else-comma-out.R new file mode 100644 index 000000000..4bbf4bdee --- /dev/null +++ b/tests/testthat/token_adding_removing/if-else-comma-out.R @@ -0,0 +1,23 @@ +call( + if (x) { + y + }, + if (x) { + z + } +) + +call( + if (x) y, + if (x) z +) + +call( + if (x) y, + if (x) z +) + +call( + if (x) y, + if (x) z +) diff --git a/tests/testthat/token_adding_removing/if_else_non_strict-in_tree b/tests/testthat/token_adding_removing/if_else_non_strict-in_tree index 785d5f6e4..ad8fd9ee7 100644 --- a/tests/testthat/token_adding_removing/if_else_non_strict-in_tree +++ b/tests/testthat/token_adding_removing/if_else_non_strict-in_tree @@ -1,165 +1,173 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} + ¦--expr: { + i [0/0] {1} ¦ ¦--'{': { [0/2] {2} - ¦ ¦--expr: [1/0] {3} + ¦ ¦--expr: if (T [1/0] {3} ¦ ¦ ¦--IF: if [0/1] {4} ¦ ¦ ¦--'(': ( [0/0] {5} - ¦ ¦ ¦--expr: [0/0] {7} + ¦ ¦ ¦--expr: TRUE [0/0] {7} ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {6} ¦ ¦ ¦--')': ) [0/4] {8} - ¦ ¦ ¦--expr: [1/0] {10} + ¦ ¦ ¦--expr: 3 [1/0] {10} ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {9} ¦ ¦ ¦--ELSE: else [1/0] {11} - ¦ ¦ °--expr: [1/0] {13} + ¦ ¦ °--expr: 5 [1/0] {13} ¦ ¦ °--NUM_CONST: 5 [0/0] {12} ¦ °--'}': } [1/0] {14} - ¦--expr: [3/0] {15} + ¦--expr: { + i [3/0] {15} ¦ ¦--'{': { [0/2] {16} - ¦ ¦--expr: [1/3] {17} + ¦ ¦--expr: if (T [1/3] {17} ¦ ¦ ¦--IF: if [0/1] {18} ¦ ¦ ¦--'(': ( [0/0] {19} - ¦ ¦ ¦--expr: [0/0] {21} + ¦ ¦ ¦--expr: TRUE [0/0] {21} ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {20} ¦ ¦ ¦--')': ) [0/1] {22} - ¦ ¦ ¦--expr: [0/0] {23} + ¦ ¦ ¦--expr: { + [0/0] {23} ¦ ¦ ¦ ¦--'{': { [0/4] {24} - ¦ ¦ ¦ ¦--expr: [1/4] {26} + ¦ ¦ ¦ ¦--expr: 3 [1/4] {26} ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {25} - ¦ ¦ ¦ ¦--expr: [1/3] {27} - ¦ ¦ ¦ ¦ ¦--expr: [0/1] {29} + ¦ ¦ ¦ ¦--expr: a + b [1/3] {27} + ¦ ¦ ¦ ¦ ¦--expr: a [0/1] {29} ¦ ¦ ¦ ¦ ¦ °--SYMBOL: a [0/0] {28} ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {30} - ¦ ¦ ¦ ¦ °--expr: [0/0] {32} + ¦ ¦ ¦ ¦ °--expr: b [0/0] {32} ¦ ¦ ¦ ¦ °--SYMBOL: b [0/0] {31} ¦ ¦ ¦ °--'}': } [1/0] {33} ¦ ¦ ¦--ELSE: else [0/4] {34} - ¦ ¦ °--expr: [1/0] {36} + ¦ ¦ °--expr: 5 [1/0] {36} ¦ ¦ °--NUM_CONST: 5 [0/0] {35} - ¦ ¦--expr: [2/0] {37} - ¦ ¦ ¦--expr: [0/0] {39} + ¦ ¦--expr: c() [2/0] {37} + ¦ ¦ ¦--expr: c [0/0] {39} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {38} ¦ ¦ ¦--'(': ( [0/0] {40} ¦ ¦ °--')': ) [0/0] {41} ¦ °--'}': } [1/0] {42} - ¦--expr: [3/0] {43} + ¦--expr: { + i [3/0] {43} ¦ ¦--'{': { [0/2] {44} - ¦ ¦--expr: [1/0] {45} + ¦ ¦--expr: if (T [1/0] {45} ¦ ¦ ¦--IF: if [0/1] {46} ¦ ¦ ¦--'(': ( [0/0] {47} - ¦ ¦ ¦--expr: [0/0] {49} + ¦ ¦ ¦--expr: TRUE [0/0] {49} ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {48} ¦ ¦ ¦--')': ) [0/4] {50} - ¦ ¦ ¦--expr: [1/2] {52} + ¦ ¦ ¦--expr: 3 [1/2] {52} ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {51} ¦ ¦ ¦--ELSE: else [1/1] {53} - ¦ ¦ °--expr: [0/0] {54} + ¦ ¦ °--expr: { + [0/0] {54} ¦ ¦ ¦--'{': { [0/4] {55} - ¦ ¦ ¦--expr: [1/4] {56} - ¦ ¦ ¦ ¦--expr: [0/0] {58} + ¦ ¦ ¦--expr: h() [1/4] {56} + ¦ ¦ ¦ ¦--expr: h [0/0] {58} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {57} ¦ ¦ ¦ ¦--'(': ( [0/0] {59} ¦ ¦ ¦ °--')': ) [0/0] {60} - ¦ ¦ ¦--expr: [1/1] {62} + ¦ ¦ ¦--expr: 5 [1/1] {62} ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {61} ¦ ¦ °--'}': } [0/0] {63} ¦ °--'}': } [1/0] {64} - ¦--expr: [3/0] {65} + ¦--expr: { + i [3/0] {65} ¦ ¦--'{': { [0/2] {66} - ¦ ¦--expr: [1/0] {67} + ¦ ¦--expr: if (T [1/0] {67} ¦ ¦ ¦--IF: if [0/1] {68} ¦ ¦ ¦--'(': ( [0/0] {69} - ¦ ¦ ¦--expr: [0/0] {71} + ¦ ¦ ¦--expr: TRUE [0/0] {71} ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {70} ¦ ¦ ¦--')': ) [0/1] {72} - ¦ ¦ ¦--expr: [0/0] {73} + ¦ ¦ ¦--expr: { + [0/0] {73} ¦ ¦ ¦ ¦--'{': { [0/4] {74} - ¦ ¦ ¦ ¦--expr: [1/2] {76} + ¦ ¦ ¦ ¦--expr: 3 [1/2] {76} ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {75} ¦ ¦ ¦ °--'}': } [1/0] {77} ¦ ¦ ¦--ELSE: else [0/1] {78} - ¦ ¦ °--expr: [0/0] {79} + ¦ ¦ °--expr: { + [0/0] {79} ¦ ¦ ¦--'{': { [0/4] {80} - ¦ ¦ ¦--expr: [1/4] {81} - ¦ ¦ ¦ ¦--expr: [0/0] {83} + ¦ ¦ ¦--expr: s() [1/4] {81} + ¦ ¦ ¦ ¦--expr: s [0/0] {83} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: s [0/0] {82} ¦ ¦ ¦ ¦--'(': ( [0/0] {84} ¦ ¦ ¦ °--')': ) [0/0] {85} - ¦ ¦ ¦--expr: [1/1] {87} + ¦ ¦ ¦--expr: 5 [1/1] {87} ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {86} ¦ ¦ °--'}': } [0/0] {88} ¦ °--'}': } [1/0] {89} - ¦--expr: [2/0] {90} + ¦--expr: if (T [2/0] {90} ¦ ¦--IF: if [0/1] {91} ¦ ¦--'(': ( [0/0] {92} - ¦ ¦--expr: [0/0] {94} + ¦ ¦--expr: TRUE [0/0] {94} ¦ ¦ °--NUM_CONST: TRUE [0/0] {93} ¦ ¦--')': ) [0/2] {95} - ¦ ¦--expr: [1/1] {97} + ¦ ¦--expr: 1 [1/1] {97} ¦ ¦ °--NUM_CONST: 1 [0/0] {96} ¦ ¦--ELSE: else [0/4] {98} - ¦ °--expr: [1/0] {100} + ¦ °--expr: 3 [1/0] {100} ¦ °--NUM_CONST: 3 [0/0] {99} - ¦--expr: [2/0] {101} + ¦--expr: if (F [2/0] {101} ¦ ¦--IF: if [0/1] {102} ¦ ¦--'(': ( [0/0] {103} - ¦ ¦--expr: [0/0] {105} + ¦ ¦--expr: FALSE [0/0] {105} ¦ ¦ °--NUM_CONST: FALSE [0/0] {104} ¦ ¦--')': ) [0/2] {106} - ¦ ¦--expr: [1/1] {107} - ¦ ¦ ¦--expr: [0/1] {109} + ¦ ¦--expr: 1 + a [1/1] {107} + ¦ ¦ ¦--expr: 1 [0/1] {109} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {108} ¦ ¦ ¦--'+': + [0/1] {110} - ¦ ¦ °--expr: [0/0] {111} - ¦ ¦ ¦--expr: [0/1] {113} + ¦ ¦ °--expr: a * ( [0/0] {111} + ¦ ¦ ¦--expr: a [0/1] {113} ¦ ¦ ¦ °--SYMBOL: a [0/0] {112} ¦ ¦ ¦--'*': * [0/1] {114} - ¦ ¦ °--expr: [0/0] {115} + ¦ ¦ °--expr: ( 31/ [0/0] {115} ¦ ¦ ¦--'(': ( [0/1] {116} - ¦ ¦ ¦--expr: [0/0] {117} - ¦ ¦ ¦ ¦--expr: [0/0] {119} + ¦ ¦ ¦--expr: 31/2 [0/0] {117} + ¦ ¦ ¦ ¦--expr: 31 [0/0] {119} ¦ ¦ ¦ ¦ °--NUM_CONST: 31 [0/0] {118} ¦ ¦ ¦ ¦--'/': / [0/0] {120} - ¦ ¦ ¦ °--expr: [0/0] {122} + ¦ ¦ ¦ °--expr: 2 [0/0] {122} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {121} ¦ ¦ °--')': ) [0/0] {123} ¦ ¦--ELSE: else [0/4] {124} - ¦ °--expr: [1/0] {125} - ¦ ¦--expr: [0/0] {127} + ¦ °--expr: 3^k [1/0] {125} + ¦ ¦--expr: 3 [0/0] {127} ¦ ¦ °--NUM_CONST: 3 [0/0] {126} ¦ ¦--'^': ^ [0/0] {128} - ¦ °--expr: [0/0] {130} + ¦ °--expr: k [0/0] {130} ¦ °--SYMBOL: k [0/0] {129} - ¦--expr: [3/0] {131} + ¦--expr: if (T [3/0] {131} ¦ ¦--IF: if [0/1] {132} ¦ ¦--'(': ( [0/0] {133} - ¦ ¦--expr: [0/0] {135} + ¦ ¦--expr: TRUE [0/0] {135} ¦ ¦ °--NUM_CONST: TRUE [0/0] {134} ¦ ¦--')': ) [0/2] {136} - ¦ ¦--expr: [1/1] {137} - ¦ ¦ ¦--expr: [0/0] {139} + ¦ ¦--expr: 1+1 [1/1] {137} + ¦ ¦ ¦--expr: 1 [0/0] {139} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {138} ¦ ¦ ¦--'+': + [0/0] {140} - ¦ ¦ °--expr: [0/0] {142} + ¦ ¦ °--expr: 1 [0/0] {142} ¦ ¦ °--NUM_CONST: 1 [0/0] {141} ¦ ¦--ELSE: else [0/4] {143} - ¦ °--expr: [1/0] {145} + ¦ °--expr: 3 [1/0] {145} ¦ °--NUM_CONST: 3 [0/0] {144} - °--expr: [2/0] {146} + °--expr: if (T [2/0] {146} ¦--IF: if [0/1] {147} ¦--'(': ( [0/0] {148} - ¦--expr: [0/0] {150} + ¦--expr: TRUE [0/0] {150} ¦ °--NUM_CONST: TRUE [0/0] {149} ¦--')': ) [0/2] {151} - ¦--expr: [1/1] {152} - ¦ ¦--expr: [0/1] {154} + ¦--expr: 1 + 1 [1/1] {152} + ¦ ¦--expr: 1 [0/1] {154} ¦ ¦ °--NUM_CONST: 1 [0/0] {153} ¦ ¦--'+': + [0/1] {155} - ¦ °--expr: [0/0] {157} + ¦ °--expr: 1 [0/0] {157} ¦ °--NUM_CONST: 1 [0/0] {156} ¦--ELSE: else [0/1] {158} - °--expr: [0/0] {159} - ¦--expr: [0/1] {161} + °--expr: a +4 [0/0] {159} + ¦--expr: a [0/1] {161} ¦ °--SYMBOL: a [0/0] {160} ¦--'+': + [0/0] {162} - °--expr: [0/0] {164} + °--expr: 4 [0/0] {164} °--NUM_CONST: 4 [0/0] {163} diff --git a/tests/testthat/token_adding_removing/if_else_non_strict-out.R b/tests/testthat/token_adding_removing/if_else_non_strict-out.R index 6ebf7483c..26362f481 100644 --- a/tests/testthat/token_adding_removing/if_else_non_strict-out.R +++ b/tests/testthat/token_adding_removing/if_else_non_strict-out.R @@ -42,7 +42,7 @@ if (TRUE) if (FALSE) 1 + a * (31 / 2) else - 3 ^ k + 3^k if (TRUE) diff --git a/tests/testthat/token_adding_removing/if_else_strict-in_tree b/tests/testthat/token_adding_removing/if_else_strict-in_tree index 700808bf1..e5d3a6155 100644 --- a/tests/testthat/token_adding_removing/if_else_strict-in_tree +++ b/tests/testthat/token_adding_removing/if_else_strict-in_tree @@ -1,165 +1,173 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} + ¦--expr: { + i [0/0] {1} ¦ ¦--'{': { [0/2] {2} - ¦ ¦--expr: [1/0] {3} + ¦ ¦--expr: if (T [1/0] {3} ¦ ¦ ¦--IF: if [0/1] {4} ¦ ¦ ¦--'(': ( [0/0] {5} - ¦ ¦ ¦--expr: [0/0] {7} + ¦ ¦ ¦--expr: TRUE [0/0] {7} ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {6} ¦ ¦ ¦--')': ) [0/4] {8} - ¦ ¦ ¦--expr: [1/0] {10} + ¦ ¦ ¦--expr: 3 [1/0] {10} ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {9} ¦ ¦ ¦--ELSE: else [1/0] {11} - ¦ ¦ °--expr: [1/0] {13} + ¦ ¦ °--expr: 5 [1/0] {13} ¦ ¦ °--NUM_CONST: 5 [0/0] {12} ¦ °--'}': } [1/0] {14} - ¦--expr: [3/0] {15} + ¦--expr: { + i [3/0] {15} ¦ ¦--'{': { [0/2] {16} - ¦ ¦--expr: [1/3] {17} + ¦ ¦--expr: if (T [1/3] {17} ¦ ¦ ¦--IF: if [0/1] {18} ¦ ¦ ¦--'(': ( [0/0] {19} - ¦ ¦ ¦--expr: [0/0] {21} + ¦ ¦ ¦--expr: TRUE [0/0] {21} ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {20} ¦ ¦ ¦--')': ) [0/1] {22} - ¦ ¦ ¦--expr: [0/0] {23} + ¦ ¦ ¦--expr: { + [0/0] {23} ¦ ¦ ¦ ¦--'{': { [0/4] {24} - ¦ ¦ ¦ ¦--expr: [1/4] {26} + ¦ ¦ ¦ ¦--expr: 3 [1/4] {26} ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {25} - ¦ ¦ ¦ ¦--expr: [1/3] {27} - ¦ ¦ ¦ ¦ ¦--expr: [0/1] {29} + ¦ ¦ ¦ ¦--expr: a + b [1/3] {27} + ¦ ¦ ¦ ¦ ¦--expr: a [0/1] {29} ¦ ¦ ¦ ¦ ¦ °--SYMBOL: a [0/0] {28} ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {30} - ¦ ¦ ¦ ¦ °--expr: [0/0] {32} + ¦ ¦ ¦ ¦ °--expr: b [0/0] {32} ¦ ¦ ¦ ¦ °--SYMBOL: b [0/0] {31} ¦ ¦ ¦ °--'}': } [1/0] {33} ¦ ¦ ¦--ELSE: else [0/4] {34} - ¦ ¦ °--expr: [1/0] {36} + ¦ ¦ °--expr: 5 [1/0] {36} ¦ ¦ °--NUM_CONST: 5 [0/0] {35} - ¦ ¦--expr: [2/0] {37} - ¦ ¦ ¦--expr: [0/0] {39} + ¦ ¦--expr: c() [2/0] {37} + ¦ ¦ ¦--expr: c [0/0] {39} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {38} ¦ ¦ ¦--'(': ( [0/0] {40} ¦ ¦ °--')': ) [0/0] {41} ¦ °--'}': } [1/0] {42} - ¦--expr: [3/0] {43} + ¦--expr: { + i [3/0] {43} ¦ ¦--'{': { [0/2] {44} - ¦ ¦--expr: [1/0] {45} + ¦ ¦--expr: if (T [1/0] {45} ¦ ¦ ¦--IF: if [0/1] {46} ¦ ¦ ¦--'(': ( [0/0] {47} - ¦ ¦ ¦--expr: [0/0] {49} + ¦ ¦ ¦--expr: TRUE [0/0] {49} ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {48} ¦ ¦ ¦--')': ) [0/4] {50} - ¦ ¦ ¦--expr: [1/2] {52} + ¦ ¦ ¦--expr: 3 [1/2] {52} ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {51} ¦ ¦ ¦--ELSE: else [1/1] {53} - ¦ ¦ °--expr: [0/0] {54} + ¦ ¦ °--expr: { + [0/0] {54} ¦ ¦ ¦--'{': { [0/4] {55} - ¦ ¦ ¦--expr: [1/4] {56} - ¦ ¦ ¦ ¦--expr: [0/0] {58} + ¦ ¦ ¦--expr: h() [1/4] {56} + ¦ ¦ ¦ ¦--expr: h [0/0] {58} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {57} ¦ ¦ ¦ ¦--'(': ( [0/0] {59} ¦ ¦ ¦ °--')': ) [0/0] {60} - ¦ ¦ ¦--expr: [1/1] {62} + ¦ ¦ ¦--expr: 5 [1/1] {62} ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {61} ¦ ¦ °--'}': } [0/0] {63} ¦ °--'}': } [1/0] {64} - ¦--expr: [3/0] {65} + ¦--expr: { + i [3/0] {65} ¦ ¦--'{': { [0/2] {66} - ¦ ¦--expr: [1/0] {67} + ¦ ¦--expr: if (T [1/0] {67} ¦ ¦ ¦--IF: if [0/1] {68} ¦ ¦ ¦--'(': ( [0/0] {69} - ¦ ¦ ¦--expr: [0/0] {71} + ¦ ¦ ¦--expr: TRUE [0/0] {71} ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {70} ¦ ¦ ¦--')': ) [0/1] {72} - ¦ ¦ ¦--expr: [0/0] {73} + ¦ ¦ ¦--expr: { + [0/0] {73} ¦ ¦ ¦ ¦--'{': { [0/4] {74} - ¦ ¦ ¦ ¦--expr: [1/2] {76} + ¦ ¦ ¦ ¦--expr: 3 [1/2] {76} ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {75} ¦ ¦ ¦ °--'}': } [1/0] {77} ¦ ¦ ¦--ELSE: else [0/1] {78} - ¦ ¦ °--expr: [0/0] {79} + ¦ ¦ °--expr: { + [0/0] {79} ¦ ¦ ¦--'{': { [0/4] {80} - ¦ ¦ ¦--expr: [1/4] {81} - ¦ ¦ ¦ ¦--expr: [0/0] {83} + ¦ ¦ ¦--expr: s() [1/4] {81} + ¦ ¦ ¦ ¦--expr: s [0/0] {83} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: s [0/0] {82} ¦ ¦ ¦ ¦--'(': ( [0/0] {84} ¦ ¦ ¦ °--')': ) [0/0] {85} - ¦ ¦ ¦--expr: [1/1] {87} + ¦ ¦ ¦--expr: 5 [1/1] {87} ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {86} ¦ ¦ °--'}': } [0/0] {88} ¦ °--'}': } [1/0] {89} - ¦--expr: [2/0] {90} + ¦--expr: if (T [2/0] {90} ¦ ¦--IF: if [0/1] {91} ¦ ¦--'(': ( [0/0] {92} - ¦ ¦--expr: [0/0] {94} + ¦ ¦--expr: TRUE [0/0] {94} ¦ ¦ °--NUM_CONST: TRUE [0/0] {93} ¦ ¦--')': ) [0/2] {95} - ¦ ¦--expr: [1/1] {97} + ¦ ¦--expr: 1 [1/1] {97} ¦ ¦ °--NUM_CONST: 1 [0/0] {96} ¦ ¦--ELSE: else [0/2] {98} - ¦ °--expr: [1/0] {100} + ¦ °--expr: 3 [1/0] {100} ¦ °--NUM_CONST: 3 [0/0] {99} - ¦--expr: [2/0] {101} + ¦--expr: if (F [2/0] {101} ¦ ¦--IF: if [0/1] {102} ¦ ¦--'(': ( [0/0] {103} - ¦ ¦--expr: [0/0] {105} + ¦ ¦--expr: FALSE [0/0] {105} ¦ ¦ °--NUM_CONST: FALSE [0/0] {104} ¦ ¦--')': ) [0/2] {106} - ¦ ¦--expr: [1/1] {107} - ¦ ¦ ¦--expr: [0/1] {109} + ¦ ¦--expr: 1 + a [1/1] {107} + ¦ ¦ ¦--expr: 1 [0/1] {109} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {108} ¦ ¦ ¦--'+': + [0/1] {110} - ¦ ¦ °--expr: [0/0] {111} - ¦ ¦ ¦--expr: [0/1] {113} + ¦ ¦ °--expr: a * ( [0/0] {111} + ¦ ¦ ¦--expr: a [0/1] {113} ¦ ¦ ¦ °--SYMBOL: a [0/0] {112} ¦ ¦ ¦--'*': * [0/1] {114} - ¦ ¦ °--expr: [0/0] {115} + ¦ ¦ °--expr: ( 31/ [0/0] {115} ¦ ¦ ¦--'(': ( [0/1] {116} - ¦ ¦ ¦--expr: [0/0] {117} - ¦ ¦ ¦ ¦--expr: [0/0] {119} + ¦ ¦ ¦--expr: 31/2 [0/0] {117} + ¦ ¦ ¦ ¦--expr: 31 [0/0] {119} ¦ ¦ ¦ ¦ °--NUM_CONST: 31 [0/0] {118} ¦ ¦ ¦ ¦--'/': / [0/0] {120} - ¦ ¦ ¦ °--expr: [0/0] {122} + ¦ ¦ ¦ °--expr: 2 [0/0] {122} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {121} ¦ ¦ °--')': ) [0/0] {123} ¦ ¦--ELSE: else [0/2] {124} - ¦ °--expr: [1/0] {125} - ¦ ¦--expr: [0/0] {127} + ¦ °--expr: 3^k [1/0] {125} + ¦ ¦--expr: 3 [0/0] {127} ¦ ¦ °--NUM_CONST: 3 [0/0] {126} ¦ ¦--'^': ^ [0/0] {128} - ¦ °--expr: [0/0] {130} + ¦ °--expr: k [0/0] {130} ¦ °--SYMBOL: k [0/0] {129} - ¦--expr: [3/0] {131} + ¦--expr: if (T [3/0] {131} ¦ ¦--IF: if [0/1] {132} ¦ ¦--'(': ( [0/0] {133} - ¦ ¦--expr: [0/0] {135} + ¦ ¦--expr: TRUE [0/0] {135} ¦ ¦ °--NUM_CONST: TRUE [0/0] {134} ¦ ¦--')': ) [0/2] {136} - ¦ ¦--expr: [1/1] {137} - ¦ ¦ ¦--expr: [0/0] {139} + ¦ ¦--expr: 1+1 [1/1] {137} + ¦ ¦ ¦--expr: 1 [0/0] {139} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {138} ¦ ¦ ¦--'+': + [0/0] {140} - ¦ ¦ °--expr: [0/0] {142} + ¦ ¦ °--expr: 1 [0/0] {142} ¦ ¦ °--NUM_CONST: 1 [0/0] {141} ¦ ¦--ELSE: else [0/4] {143} - ¦ °--expr: [1/0] {145} + ¦ °--expr: 3 [1/0] {145} ¦ °--NUM_CONST: 3 [0/0] {144} - °--expr: [3/0] {146} + °--expr: if (T [3/0] {146} ¦--IF: if [0/1] {147} ¦--'(': ( [0/0] {148} - ¦--expr: [0/0] {150} + ¦--expr: TRUE [0/0] {150} ¦ °--NUM_CONST: TRUE [0/0] {149} ¦--')': ) [0/2] {151} - ¦--expr: [1/1] {152} - ¦ ¦--expr: [0/1] {154} + ¦--expr: 1 + 1 [1/1] {152} + ¦ ¦--expr: 1 [0/1] {154} ¦ ¦ °--NUM_CONST: 1 [0/0] {153} ¦ ¦--'+': + [0/1] {155} - ¦ °--expr: [0/0] {157} + ¦ °--expr: 1 [0/0] {157} ¦ °--NUM_CONST: 1 [0/0] {156} ¦--ELSE: else [0/1] {158} - °--expr: [0/0] {159} - ¦--expr: [0/1] {161} + °--expr: a +4 [0/0] {159} + ¦--expr: a [0/1] {161} ¦ °--SYMBOL: a [0/0] {160} ¦--'+': + [0/0] {162} - °--expr: [0/0] {164} + °--expr: 4 [0/0] {164} °--NUM_CONST: 4 [0/0] {163} diff --git a/tests/testthat/token_adding_removing/if_else_strict-out.R b/tests/testthat/token_adding_removing/if_else_strict-out.R index 205069f4e..45cae9432 100644 --- a/tests/testthat/token_adding_removing/if_else_strict-out.R +++ b/tests/testthat/token_adding_removing/if_else_strict-out.R @@ -47,7 +47,7 @@ if (TRUE) { if (FALSE) { 1 + a * (31 / 2) } else { - 3 ^ k + 3^k } diff --git a/tests/testthat/token_adding_removing/if_else_stylerignore-in.R b/tests/testthat/token_adding_removing/if_else_stylerignore-in.R new file mode 100644 index 000000000..a52401cdb --- /dev/null +++ b/tests/testthat/token_adding_removing/if_else_stylerignore-in.R @@ -0,0 +1,64 @@ +a =1 +b=3 +k = 9 +h <- function() 1 +s <- h +{ + if (TRUE) # styler: off + 3 + else + 5 # styler: off +} + + +{ + if (TRUE) { # styler: off + 3 + a + b + }else + 5 # styler: off + + c() +} + +# styler: off +{ + if (TRUE) + 3 + else { + h() + 5 } +} +# styler: on + +{ + if (TRUE) { + 3 # styler: off + }else { + s() + 5 } +} + +if (TRUE) # styler: off + 1 else + 3 + +if (FALSE) # styler: off + 1 + a * ( 31/2) else + 3^k + + +if (TRUE) + 1+1 else # styler: off + 3 + +if (TRUE) + 1 + 1 else a +4 + +# styler: off +{if (TRUE) + 3 +else + 5 +} +# styler: on diff --git a/tests/testthat/token_adding_removing/if_else_stylerignore-in_tree b/tests/testthat/token_adding_removing/if_else_stylerignore-in_tree new file mode 100644 index 000000000..77a7ae18b --- /dev/null +++ b/tests/testthat/token_adding_removing/if_else_stylerignore-in_tree @@ -0,0 +1,232 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr_or_assign_or_help: a =1 [0/0] {1} + ¦ ¦--expr: a [0/1] {3} + ¦ ¦ °--SYMBOL: a [0/0] {2} + ¦ ¦--EQ_ASSIGN: = [0/0] {4} + ¦ °--expr: 1 [0/0] {6} + ¦ °--NUM_CONST: 1 [0/0] {5} + ¦--expr_or_assign_or_help: b=3 [1/0] {7} + ¦ ¦--expr: b [0/0] {9} + ¦ ¦ °--SYMBOL: b [0/0] {8} + ¦ ¦--EQ_ASSIGN: = [0/0] {10} + ¦ °--expr: 3 [0/0] {12} + ¦ °--NUM_CONST: 3 [0/0] {11} + ¦--expr_or_assign_or_help: k = 9 [1/0] {13} + ¦ ¦--expr: k [0/1] {15} + ¦ ¦ °--SYMBOL: k [0/0] {14} + ¦ ¦--EQ_ASSIGN: = [0/1] {16} + ¦ °--expr: 9 [0/0] {18} + ¦ °--NUM_CONST: 9 [0/0] {17} + ¦--expr: h <- [1/0] {19} + ¦ ¦--expr: h [0/1] {21} + ¦ ¦ °--SYMBOL: h [0/0] {20} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {22} + ¦ °--expr: funct [0/0] {23} + ¦ ¦--FUNCTION: funct [0/0] {24} + ¦ ¦--'(': ( [0/0] {25} + ¦ ¦--')': ) [0/1] {26} + ¦ °--expr: 1 [0/0] {28} + ¦ °--NUM_CONST: 1 [0/0] {27} + ¦--expr: s <- [1/0] {29} + ¦ ¦--expr: s [0/1] {31} + ¦ ¦ °--SYMBOL: s [0/0] {30} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {32} + ¦ °--expr: h [0/0] {34} + ¦ °--SYMBOL: h [0/0] {33} + ¦--expr: { + i [1/0] {35} + ¦ ¦--'{': { [0/2] {36} + ¦ ¦--expr: if (T [1/1] {37} + ¦ ¦ ¦--IF: if [0/1] {38} + ¦ ¦ ¦--'(': ( [0/0] {39} + ¦ ¦ ¦--expr: TRUE [0/0] {41} + ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {40} + ¦ ¦ ¦--')': ) [0/1] {42} + ¦ ¦ ¦--COMMENT: # sty [0/4] {43} + ¦ ¦ ¦--expr: 3 [1/2] {45} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {44} + ¦ ¦ ¦--ELSE: else [1/4] {46} + ¦ ¦ °--expr: 5 [1/0] {48} + ¦ ¦ °--NUM_CONST: 5 [0/0] {47} + ¦ ¦--COMMENT: # sty [0/0] {49} + ¦ °--'}': } [1/0] {50} + ¦--expr: { + i [3/0] {51} + ¦ ¦--'{': { [0/2] {52} + ¦ ¦--expr: if (T [1/1] {53} + ¦ ¦ ¦--IF: if [0/1] {54} + ¦ ¦ ¦--'(': ( [0/0] {55} + ¦ ¦ ¦--expr: TRUE [0/0] {57} + ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {56} + ¦ ¦ ¦--')': ) [0/1] {58} + ¦ ¦ ¦--expr: { # s [0/0] {59} + ¦ ¦ ¦ ¦--'{': { [0/1] {60} + ¦ ¦ ¦ ¦--COMMENT: # sty [0/4] {61} + ¦ ¦ ¦ ¦--expr: 3 [1/4] {63} + ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {62} + ¦ ¦ ¦ ¦--expr: a + b [1/2] {64} + ¦ ¦ ¦ ¦ ¦--expr: a [0/1] {66} + ¦ ¦ ¦ ¦ ¦ °--SYMBOL: a [0/0] {65} + ¦ ¦ ¦ ¦ ¦--'+': + [0/1] {67} + ¦ ¦ ¦ ¦ °--expr: b [0/0] {69} + ¦ ¦ ¦ ¦ °--SYMBOL: b [0/0] {68} + ¦ ¦ ¦ °--'}': } [1/0] {70} + ¦ ¦ ¦--ELSE: else [0/4] {71} + ¦ ¦ °--expr: 5 [1/0] {73} + ¦ ¦ °--NUM_CONST: 5 [0/0] {72} + ¦ ¦--COMMENT: # sty [0/2] {74} + ¦ ¦--expr: c() [2/0] {75} + ¦ ¦ ¦--expr: c [0/0] {77} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {76} + ¦ ¦ ¦--'(': ( [0/0] {78} + ¦ ¦ °--')': ) [0/0] {79} + ¦ °--'}': } [1/0] {80} + ¦--COMMENT: # sty [2/0] {81} + ¦--expr: { + i [1/0] {82} + ¦ ¦--'{': { [0/2] {83} + ¦ ¦--expr: if (T [1/0] {84} + ¦ ¦ ¦--IF: if [0/1] {85} + ¦ ¦ ¦--'(': ( [0/0] {86} + ¦ ¦ ¦--expr: TRUE [0/0] {88} + ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {87} + ¦ ¦ ¦--')': ) [0/4] {89} + ¦ ¦ ¦--expr: 3 [1/2] {91} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {90} + ¦ ¦ ¦--ELSE: else [1/1] {92} + ¦ ¦ °--expr: { + [0/0] {93} + ¦ ¦ ¦--'{': { [0/4] {94} + ¦ ¦ ¦--expr: h() [1/4] {95} + ¦ ¦ ¦ ¦--expr: h [0/0] {97} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: h [0/0] {96} + ¦ ¦ ¦ ¦--'(': ( [0/0] {98} + ¦ ¦ ¦ °--')': ) [0/0] {99} + ¦ ¦ ¦--expr: 5 [1/1] {101} + ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {100} + ¦ ¦ °--'}': } [0/0] {102} + ¦ °--'}': } [1/0] {103} + ¦--COMMENT: # sty [1/0] {104} + ¦--expr: { + i [2/0] {105} + ¦ ¦--'{': { [0/2] {106} + ¦ ¦--expr: if (T [1/0] {107} + ¦ ¦ ¦--IF: if [0/1] {108} + ¦ ¦ ¦--'(': ( [0/0] {109} + ¦ ¦ ¦--expr: TRUE [0/0] {111} + ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {110} + ¦ ¦ ¦--')': ) [0/1] {112} + ¦ ¦ ¦--expr: { + [0/0] {113} + ¦ ¦ ¦ ¦--'{': { [0/4] {114} + ¦ ¦ ¦ ¦--expr: 3 [1/1] {116} + ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {115} + ¦ ¦ ¦ ¦--COMMENT: # sty [0/2] {117} + ¦ ¦ ¦ °--'}': } [1/0] {118} + ¦ ¦ ¦--ELSE: else [0/1] {119} + ¦ ¦ °--expr: { + [0/0] {120} + ¦ ¦ ¦--'{': { [0/4] {121} + ¦ ¦ ¦--expr: s() [1/4] {122} + ¦ ¦ ¦ ¦--expr: s [0/0] {124} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: s [0/0] {123} + ¦ ¦ ¦ ¦--'(': ( [0/0] {125} + ¦ ¦ ¦ °--')': ) [0/0] {126} + ¦ ¦ ¦--expr: 5 [1/1] {128} + ¦ ¦ ¦ °--NUM_CONST: 5 [0/0] {127} + ¦ ¦ °--'}': } [0/0] {129} + ¦ °--'}': } [1/0] {130} + ¦--expr: if (T [2/0] {131} + ¦ ¦--IF: if [0/1] {132} + ¦ ¦--'(': ( [0/0] {133} + ¦ ¦--expr: TRUE [0/0] {135} + ¦ ¦ °--NUM_CONST: TRUE [0/0] {134} + ¦ ¦--')': ) [0/1] {136} + ¦ ¦--COMMENT: # sty [0/2] {137} + ¦ ¦--expr: 1 [1/1] {139} + ¦ ¦ °--NUM_CONST: 1 [0/0] {138} + ¦ ¦--ELSE: else [0/4] {140} + ¦ °--expr: 3 [1/0] {142} + ¦ °--NUM_CONST: 3 [0/0] {141} + ¦--expr: if (F [2/0] {143} + ¦ ¦--IF: if [0/1] {144} + ¦ ¦--'(': ( [0/0] {145} + ¦ ¦--expr: FALSE [0/0] {147} + ¦ ¦ °--NUM_CONST: FALSE [0/0] {146} + ¦ ¦--')': ) [0/1] {148} + ¦ ¦--COMMENT: # sty [0/2] {149} + ¦ ¦--expr: 1 + a [1/1] {150} + ¦ ¦ ¦--expr: 1 [0/1] {152} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {151} + ¦ ¦ ¦--'+': + [0/1] {153} + ¦ ¦ °--expr: a * ( [0/0] {154} + ¦ ¦ ¦--expr: a [0/1] {156} + ¦ ¦ ¦ °--SYMBOL: a [0/0] {155} + ¦ ¦ ¦--'*': * [0/1] {157} + ¦ ¦ °--expr: ( 31/ [0/0] {158} + ¦ ¦ ¦--'(': ( [0/1] {159} + ¦ ¦ ¦--expr: 31/2 [0/0] {160} + ¦ ¦ ¦ ¦--expr: 31 [0/0] {162} + ¦ ¦ ¦ ¦ °--NUM_CONST: 31 [0/0] {161} + ¦ ¦ ¦ ¦--'/': / [0/0] {163} + ¦ ¦ ¦ °--expr: 2 [0/0] {165} + ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {164} + ¦ ¦ °--')': ) [0/0] {166} + ¦ ¦--ELSE: else [0/4] {167} + ¦ °--expr: 3^k [1/0] {168} + ¦ ¦--expr: 3 [0/0] {170} + ¦ ¦ °--NUM_CONST: 3 [0/0] {169} + ¦ ¦--'^': ^ [0/0] {171} + ¦ °--expr: k [0/0] {173} + ¦ °--SYMBOL: k [0/0] {172} + ¦--expr: if (T [3/0] {174} + ¦ ¦--IF: if [0/1] {175} + ¦ ¦--'(': ( [0/0] {176} + ¦ ¦--expr: TRUE [0/0] {178} + ¦ ¦ °--NUM_CONST: TRUE [0/0] {177} + ¦ ¦--')': ) [0/2] {179} + ¦ ¦--expr: 1+1 [1/1] {180} + ¦ ¦ ¦--expr: 1 [0/0] {182} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {181} + ¦ ¦ ¦--'+': + [0/0] {183} + ¦ ¦ °--expr: 1 [0/0] {185} + ¦ ¦ °--NUM_CONST: 1 [0/0] {184} + ¦ ¦--ELSE: else [0/1] {186} + ¦ ¦--COMMENT: # sty [0/4] {187} + ¦ °--expr: 3 [1/0] {189} + ¦ °--NUM_CONST: 3 [0/0] {188} + ¦--expr: if (T [2/0] {190} + ¦ ¦--IF: if [0/1] {191} + ¦ ¦--'(': ( [0/0] {192} + ¦ ¦--expr: TRUE [0/0] {194} + ¦ ¦ °--NUM_CONST: TRUE [0/0] {193} + ¦ ¦--')': ) [0/2] {195} + ¦ ¦--expr: 1 + 1 [1/1] {196} + ¦ ¦ ¦--expr: 1 [0/1] {198} + ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {197} + ¦ ¦ ¦--'+': + [0/1] {199} + ¦ ¦ °--expr: 1 [0/0] {201} + ¦ ¦ °--NUM_CONST: 1 [0/0] {200} + ¦ ¦--ELSE: else [0/1] {202} + ¦ °--expr: a +4 [0/0] {203} + ¦ ¦--expr: a [0/1] {205} + ¦ ¦ °--SYMBOL: a [0/0] {204} + ¦ ¦--'+': + [0/0] {206} + ¦ °--expr: 4 [0/0] {208} + ¦ °--NUM_CONST: 4 [0/0] {207} + ¦--COMMENT: # sty [2/0] {209} + ¦--expr: {if ( [1/0] {210} + ¦ ¦--'{': { [0/0] {211} + ¦ ¦--expr: if (T [0/0] {212} + ¦ ¦ ¦--IF: if [0/1] {213} + ¦ ¦ ¦--'(': ( [0/0] {214} + ¦ ¦ ¦--expr: TRUE [0/0] {216} + ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {215} + ¦ ¦ ¦--')': ) [0/2] {217} + ¦ ¦ ¦--expr: 3 [1/0] {219} + ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {218} + ¦ ¦ ¦--ELSE: else [1/2] {220} + ¦ ¦ °--expr: 5 [1/0] {222} + ¦ ¦ °--NUM_CONST: 5 [0/0] {221} + ¦ °--'}': } [1/0] {223} + °--COMMENT: # sty [1/0] {224} diff --git a/tests/testthat/token_adding_removing/if_else_stylerignore-out.R b/tests/testthat/token_adding_removing/if_else_stylerignore-out.R new file mode 100644 index 000000000..2daff00f6 --- /dev/null +++ b/tests/testthat/token_adding_removing/if_else_stylerignore-out.R @@ -0,0 +1,68 @@ +a <- 1 +b <- 3 +k <- 9 +h <- function() 1 +s <- h +{ + if (TRUE) # styler: off + 3 + else + 5 # styler: off +} + + +{ + if (TRUE) { # styler: off + 3 + a + b + } else + 5 # styler: off + + c() +} + +# styler: off +{ + if (TRUE) + 3 + else { + h() + 5 } +} +# styler: on + +{ + if (TRUE) { + 3 # styler: off + } else { + s() + 5 + } +} + +if (TRUE) # styler: off + 1 else + 3 + +if (FALSE) # styler: off + 1 + a * (31 / 2) else + 3^k + + +if (TRUE) + 1+1 else # styler: off + 3 + +if (TRUE) { + 1 + 1 +} else { + a + 4 +} + +# styler: off +{if (TRUE) + 3 +else + 5 +} +# styler: on diff --git a/tests/testthat/token_adding_removing/mixed_token-in.R b/tests/testthat/token_adding_removing/mixed_token-in.R index 2d5c539e9..c984b805e 100644 --- a/tests/testthat/token_adding_removing/mixed_token-in.R +++ b/tests/testthat/token_adding_removing/mixed_token-in.R @@ -11,12 +11,6 @@ a; b ;c;d 'text with "quotes"' -# no linebreak after special if they fit in a line -a %>% b() %>%c() - -# linebreak after special -a %>% - b() %>%c() # adding brackets in pipes a %>% diff --git a/tests/testthat/token_adding_removing/mixed_token-in_tree b/tests/testthat/token_adding_removing/mixed_token-in_tree index 8f7efb769..2e91e3aaf 100644 --- a/tests/testthat/token_adding_removing/mixed_token-in_tree +++ b/tests/testthat/token_adding_removing/mixed_token-in_tree @@ -1,86 +1,54 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦--COMMENT: # = r [0/0] {1} - ¦--expr: [1/0] {1.9} - ¦ ¦--expr: [0/1] {3} - ¦ ¦ °--SYMBOL: a [0/0] {2} - ¦ ¦--EQ_ASSIGN: = [0/1] {4} - ¦ °--expr: [0/0] {6} - ¦ °--NUM_CONST: 3 [0/0] {5} - ¦--expr: [1/0] {7} - ¦ ¦--expr: [0/0] {9} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {8} - ¦ ¦--'(': ( [0/0] {10} - ¦ ¦--SYMBOL_SUB: a [0/1] {11} - ¦ ¦--EQ_SUB: = [0/1] {12} - ¦ ¦--expr: [0/0] {14} - ¦ ¦ °--NUM_CONST: 3 [0/0] {13} - ¦ °--')': ) [0/0] {15} - ¦--COMMENT: # sem [2/0] {16} - ¦--expr: [1/0] {18} - ¦ °--SYMBOL: a [0/0] {17} - ¦--';': ; [0/1] {19} - ¦--expr: [0/1] {21} - ¦ °--SYMBOL: b [0/0] {20} - ¦--';': ; [0/0] {22} - ¦--expr: [0/0] {24} - ¦ °--SYMBOL: c [0/0] {23} - ¦--';': ; [0/0] {25} - ¦--expr: [0/0] {27} - ¦ °--SYMBOL: d [0/0] {26} - ¦--COMMENT: # quo [3/0] {28} - ¦--expr: [1/0] {30} - ¦ °--STR_CONST: "text [0/0] {29} - ¦--expr: [1/0] {32} - ¦ °--STR_CONST: 'text [0/0] {31} - ¦--COMMENT: # no [3/0] {33} - ¦--expr: [1/0] {34} - ¦ ¦--expr: [0/1] {37} - ¦ ¦ °--SYMBOL: a [0/0] {36} - ¦ ¦--SPECIAL-PIPE: %>% [0/1] {38} - ¦ ¦--expr: [0/1] {39} - ¦ ¦ ¦--expr: [0/0] {41} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {40} - ¦ ¦ ¦--'(': ( [0/0] {42} - ¦ ¦ °--')': ) [0/0] {43} - ¦ ¦--SPECIAL-PIPE: %>% [0/0] {44} - ¦ °--expr: [0/0] {45} - ¦ ¦--expr: [0/0] {47} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {46} - ¦ ¦--'(': ( [0/0] {48} - ¦ °--')': ) [0/0] {49} - ¦--COMMENT: # lin [2/0] {50} - ¦--expr: [1/0] {51} - ¦ ¦--expr: [0/1] {54} - ¦ ¦ °--SYMBOL: a [0/0] {53} - ¦ ¦--SPECIAL-PIPE: %>% [0/2] {55} - ¦ ¦--expr: [1/1] {56} - ¦ ¦ ¦--expr: [0/0] {58} - ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: b [0/0] {57} - ¦ ¦ ¦--'(': ( [0/0] {59} - ¦ ¦ °--')': ) [0/0] {60} - ¦ ¦--SPECIAL-PIPE: %>% [0/0] {61} - ¦ °--expr: [0/0] {62} - ¦ ¦--expr: [0/0] {64} - ¦ ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {63} - ¦ ¦--'(': ( [0/0] {65} - ¦ °--')': ) [0/0] {66} - ¦--COMMENT: # add [2/0] {67} - ¦--expr: [1/0] {68} - ¦ ¦--expr: [0/1] {71} - ¦ ¦ °--SYMBOL: a [0/0] {70} - ¦ ¦--SPECIAL-PIPE: %>% [0/2] {72} - ¦ ¦--expr: [1/1] {74} - ¦ ¦ °--SYMBOL: b [0/0] {73} - ¦ ¦--SPECIAL-PIPE: %>% [0/2] {75} - ¦ °--expr: [1/0] {77} - ¦ °--SYMBOL: c [0/0] {76} - ¦--COMMENT: # add [2/0] {78} - °--expr: [1/0] {79} - ¦--expr: [0/1] {82} - ¦ °--SYMBOL: a [0/0] {81} - ¦--SPECIAL-PIPE: %>% [0/1] {83} - ¦--expr: [0/1] {85} - ¦ °--SYMBOL: b [0/0] {84} - ¦--SPECIAL-PIPE: %>% [0/2] {86} - °--expr: [1/0] {88} - °--SYMBOL: c [0/0] {87} + ¦--expr_or_assign_or_help: a = 3 [1/0] {2} + ¦ ¦--expr: a [0/1] {4} + ¦ ¦ °--SYMBOL: a [0/0] {3} + ¦ ¦--EQ_ASSIGN: = [0/1] {5} + ¦ °--expr: 3 [0/0] {7} + ¦ °--NUM_CONST: 3 [0/0] {6} + ¦--expr: data_ [1/0] {8} + ¦ ¦--expr: data_ [0/0] {10} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: data_ [0/0] {9} + ¦ ¦--'(': ( [0/0] {11} + ¦ ¦--SYMBOL_SUB: a [0/1] {12} + ¦ ¦--EQ_SUB: = [0/1] {13} + ¦ ¦--expr: 3 [0/0] {15} + ¦ ¦ °--NUM_CONST: 3 [0/0] {14} + ¦ °--')': ) [0/0] {16} + ¦--COMMENT: # sem [2/0] {17} + ¦--expr: a [1/0] {19} + ¦ °--SYMBOL: a [0/0] {18} + ¦--';': ; [0/1] {20} + ¦--expr: b [0/1] {22} + ¦ °--SYMBOL: b [0/0] {21} + ¦--';': ; [0/0] {23} + ¦--expr: c [0/0] {25} + ¦ °--SYMBOL: c [0/0] {24} + ¦--';': ; [0/0] {26} + ¦--expr: d [0/0] {28} + ¦ °--SYMBOL: d [0/0] {27} + ¦--COMMENT: # quo [3/0] {29} + ¦--expr: "text [1/0] {31} + ¦ °--STR_CONST: "text [0/0] {30} + ¦--expr: 'text [1/0] {33} + ¦ °--STR_CONST: 'text [0/0] {32} + ¦--COMMENT: # add [4/0] {34} + ¦--expr: a %>% [1/0] {35} + ¦ ¦--expr: a [0/1] {38} + ¦ ¦ °--SYMBOL: a [0/0] {37} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {39} + ¦ ¦--expr: b [1/1] {41} + ¦ ¦ °--SYMBOL: b [0/0] {40} + ¦ ¦--SPECIAL-PIPE: %>% [0/2] {42} + ¦ °--expr: c [1/0] {44} + ¦ °--SYMBOL: c [0/0] {43} + ¦--COMMENT: # add [2/0] {45} + °--expr: a %>% [1/0] {46} + ¦--expr: a [0/1] {49} + ¦ °--SYMBOL: a [0/0] {48} + ¦--SPECIAL-PIPE: %>% [0/1] {50} + ¦--expr: b [0/1] {52} + ¦ °--SYMBOL: b [0/0] {51} + ¦--SPECIAL-PIPE: %>% [0/2] {53} + °--expr: c [1/0] {55} + °--SYMBOL: c [0/0] {54} diff --git a/tests/testthat/token_adding_removing/mixed_token-out.R b/tests/testthat/token_adding_removing/mixed_token-out.R index 3c003f944..e80aafb4d 100644 --- a/tests/testthat/token_adding_removing/mixed_token-out.R +++ b/tests/testthat/token_adding_removing/mixed_token-out.R @@ -14,13 +14,6 @@ d 'text with "quotes"' -# no linebreak after special if they fit in a line -a %>% b() %>% c() - -# linebreak after special -a %>% - b() %>% - c() # adding brackets in pipes a %>% diff --git a/tests/testthat/token_adding_removing/substitute-in.R b/tests/testthat/token_adding_removing/substitute-in.R new file mode 100644 index 000000000..83305008d --- /dev/null +++ b/tests/testthat/token_adding_removing/substitute-in.R @@ -0,0 +1,3 @@ +expr <- substitute(airquality %>% FUN_EXPR, env = list(FUN_EXPR = call("FUN_head"))) +a %>% + x diff --git a/tests/testthat/token_adding_removing/substitute-in_tree b/tests/testthat/token_adding_removing/substitute-in_tree new file mode 100644 index 000000000..70c35013e --- /dev/null +++ b/tests/testthat/token_adding_removing/substitute-in_tree @@ -0,0 +1,39 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--expr: expr [0/0] {1} + ¦ ¦--expr: expr [0/1] {3} + ¦ ¦ °--SYMBOL: expr [0/0] {2} + ¦ ¦--LEFT_ASSIGN: <- [0/1] {4} + ¦ °--expr: subst [0/0] {5} + ¦ ¦--expr: subst [0/0] {7} + ¦ ¦ °--SYMBOL_FUNCTION_CALL: subst [0/0] {6} + ¦ ¦--'(': ( [0/0] {8} + ¦ ¦--expr: airqu [0/0] {9} + ¦ ¦ ¦--expr: airqu [0/1] {11} + ¦ ¦ ¦ °--SYMBOL: airqu [0/0] {10} + ¦ ¦ ¦--SPECIAL-PIPE: %>% [0/1] {12} + ¦ ¦ °--expr: FUN_E [0/0] {14} + ¦ ¦ °--SYMBOL: FUN_E [0/0] {13} + ¦ ¦--',': , [0/1] {15} + ¦ ¦--SYMBOL_SUB: env [0/1] {16} + ¦ ¦--EQ_SUB: = [0/1] {17} + ¦ ¦--expr: list( [0/0] {18} + ¦ ¦ ¦--expr: list [0/0] {20} + ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: list [0/0] {19} + ¦ ¦ ¦--'(': ( [0/0] {21} + ¦ ¦ ¦--SYMBOL_SUB: FUN_E [0/1] {22} + ¦ ¦ ¦--EQ_SUB: = [0/1] {23} + ¦ ¦ ¦--expr: call( [0/0] {24} + ¦ ¦ ¦ ¦--expr: call [0/0] {26} + ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {25} + ¦ ¦ ¦ ¦--'(': ( [0/0] {27} + ¦ ¦ ¦ ¦--expr: "FUN_ [0/0] {29} + ¦ ¦ ¦ ¦ °--STR_CONST: "FUN_ [0/0] {28} + ¦ ¦ ¦ °--')': ) [0/0] {30} + ¦ ¦ °--')': ) [0/0] {31} + ¦ °--')': ) [0/0] {32} + °--expr: a %>% [1/0] {33} + ¦--expr: a [0/1] {35} + ¦ °--SYMBOL: a [0/0] {34} + ¦--SPECIAL-PIPE: %>% [0/2] {36} + °--expr: x [1/0] {38} + °--SYMBOL: x [0/0] {37} diff --git a/tests/testthat/token_adding_removing/substitute-out.R b/tests/testthat/token_adding_removing/substitute-out.R new file mode 100644 index 000000000..415d84d87 --- /dev/null +++ b/tests/testthat/token_adding_removing/substitute-out.R @@ -0,0 +1,3 @@ +expr <- substitute(airquality %>% FUN_EXPR, env = list(FUN_EXPR = call("FUN_head"))) +a %>% + x() diff --git a/tests/testthat/token_adding_removing/token_creation_find_pos-in_tree b/tests/testthat/token_adding_removing/token_creation_find_pos-in_tree index 319ef5aee..d7851f86c 100644 --- a/tests/testthat/token_adding_removing/token_creation_find_pos-in_tree +++ b/tests/testthat/token_adding_removing/token_creation_find_pos-in_tree @@ -1,186 +1,189 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/1] {3} + ¦--expr: print [0/0] {1} + ¦ ¦--expr: print [0/1] {3} ¦ ¦ °--SYMBOL: print [0/0] {2} ¦ ¦--LEFT_ASSIGN: <- [0/1] {4} - ¦ °--expr: [0/0] {5} + ¦ °--expr: funct [0/0] {5} ¦ ¦--FUNCTION: funct [0/0] {6} ¦ ¦--'(': ( [0/0] {7} ¦ ¦--SYMBOL_FORMALS: x [0/0] {8} ¦ ¦--',': , [0/1] {9} ¦ ¦--SYMBOL_FORMALS: ... [0/0] {10} ¦ ¦--')': ) [0/1] {11} - ¦ °--expr: [0/0] {12} + ¦ °--expr: { + l [0/0] {12} ¦ ¦--'{': { [0/2] {13} - ¦ ¦--expr: [1/2] {14} - ¦ ¦ ¦--expr: [0/1] {16} + ¦ ¦--expr: lines [1/2] {14} + ¦ ¦ ¦--expr: lines [0/1] {16} ¦ ¦ ¦ °--SYMBOL: lines [0/0] {15} ¦ ¦ ¦--LEFT_ASSIGN: <- [0/1] {17} - ¦ ¦ °--expr: [0/0] {18} - ¦ ¦ ¦--expr: [0/0] {20} + ¦ ¦ °--expr: m(y, [0/0] {18} + ¦ ¦ ¦--expr: m [0/0] {20} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: m [0/0] {19} ¦ ¦ ¦--'(': ( [0/0] {21} - ¦ ¦ ¦--expr: [0/0] {23} + ¦ ¦ ¦--expr: y [0/0] {23} ¦ ¦ ¦ °--SYMBOL: y [0/0] {22} ¦ ¦ ¦--',': , [0/1] {24} - ¦ ¦ ¦--expr: [0/0] {26} + ¦ ¦ ¦--expr: ... [0/0] {26} ¦ ¦ ¦ °--SYMBOL: ... [0/0] {25} ¦ ¦ ¦--',': , [0/1] {27} ¦ ¦ ¦--SYMBOL_SUB: print [0/1] {28} ¦ ¦ ¦--EQ_SUB: = [0/1] {29} - ¦ ¦ ¦--expr: [0/0] {31} + ¦ ¦ ¦--expr: TRUE [0/0] {31} ¦ ¦ ¦ °--NUM_CONST: TRUE [0/0] {30} ¦ ¦ °--')': ) [0/0] {32} - ¦ ¦--expr: [1/0] {33} - ¦ ¦ ¦--expr: [0/0] {35} + ¦ ¦--expr: paste [1/0] {33} + ¦ ¦ ¦--expr: paste [0/0] {35} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: paste [0/0] {34} ¦ ¦ ¦--'(': ( [0/0] {36} - ¦ ¦ ¦--expr: [0/0] {38} + ¦ ¦ ¦--expr: lines [0/0] {38} ¦ ¦ ¦ °--SYMBOL: lines [0/0] {37} ¦ ¦ ¦--',': , [0/1] {39} ¦ ¦ ¦--SYMBOL_SUB: sep [0/1] {40} ¦ ¦ ¦--EQ_SUB: = [0/1] {41} - ¦ ¦ ¦--expr: [0/0] {43} + ¦ ¦ ¦--expr: "\n" [0/0] {43} ¦ ¦ ¦ °--STR_CONST: "\n" [0/0] {42} ¦ ¦ °--')': ) [0/0] {44} ¦ °--'}': } [1/0] {45} ¦--COMMENT: # No [2/0] {46} - ¦--expr: [2/0] {47} - ¦ ¦--expr: [0/1] {49} + ¦--expr: kng < [2/0] {47} + ¦ ¦--expr: kng [0/1] {49} ¦ ¦ °--SYMBOL: kng [0/0] {48} ¦ ¦--LEFT_ASSIGN: <- [0/1] {50} - ¦ °--expr: [0/0] {51} + ¦ °--expr: funct [0/0] {51} ¦ ¦--FUNCTION: funct [0/0] {52} ¦ ¦--'(': ( [0/0] {53} ¦ ¦--SYMBOL_FORMALS: x [0/0] {54} ¦ ¦--',': , [0/1] {55} ¦ ¦--SYMBOL_FORMALS: y [0/0] {56} ¦ ¦--')': ) [0/1] {57} - ¦ °--expr: [0/0] {58} - ¦ ¦--expr: [0/0] {60} + ¦ °--expr: spm(f [0/0] {58} + ¦ ¦--expr: spm [0/0] {60} ¦ ¦ °--SYMBOL_FUNCTION_CALL: spm [0/0] {59} ¦ ¦--'(': ( [0/0] {61} ¦ ¦--SYMBOL_SUB: fmt [0/1] {62} ¦ ¦--EQ_SUB: = [0/1] {63} - ¦ ¦--expr: [0/0] {65} + ¦ ¦--expr: "%i" [0/0] {65} ¦ ¦ °--STR_CONST: "%i" [0/0] {64} ¦ ¦--',': , [0/1] {66} - ¦ ¦--expr: [0/0] {67} - ¦ ¦ ¦--expr: [0/0] {69} + ¦ ¦--expr: lgd(x [0/0] {67} + ¦ ¦ ¦--expr: lgd [0/0] {69} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: lgd [0/0] {68} ¦ ¦ ¦--'(': ( [0/0] {70} - ¦ ¦ ¦--expr: [0/0] {72} + ¦ ¦ ¦--expr: x [0/0] {72} ¦ ¦ ¦ °--SYMBOL: x [0/0] {71} ¦ ¦ °--')': ) [0/0] {73} ¦ ¦--',': , [0/1] {74} - ¦ ¦--expr: [0/0] {75} - ¦ ¦ ¦--expr: [0/0] {77} + ¦ ¦--expr: tds(y [0/0] {75} + ¦ ¦ ¦--expr: tds [0/0] {77} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: tds [0/0] {76} ¦ ¦ ¦--'(': ( [0/0] {78} - ¦ ¦ ¦--expr: [0/0] {80} + ¦ ¦ ¦--expr: y [0/0] {80} ¦ ¦ ¦ °--SYMBOL: y [0/0] {79} ¦ ¦ °--')': ) [0/0] {81} ¦ °--')': ) [0/0] {82} - ¦--expr: [1/0] {83} - ¦ ¦--expr: [0/1] {85} + ¦--expr: tka < [1/0] {83} + ¦ ¦--expr: tka [0/1] {85} ¦ ¦ °--SYMBOL: tka [0/0] {84} ¦ ¦--LEFT_ASSIGN: <- [0/1] {86} - ¦ °--expr: [0/0] {87} + ¦ °--expr: funct [0/0] {87} ¦ ¦--FUNCTION: funct [0/0] {88} ¦ ¦--'(': ( [0/0] {89} ¦ ¦--SYMBOL_FORMALS: my [0/0] {90} ¦ ¦--',': , [0/1] {91} ¦ ¦--SYMBOL_FORMALS: y [0/0] {92} ¦ ¦--')': ) [0/1] {93} - ¦ °--expr: [0/0] {94} - ¦ ¦--expr: [0/0] {96} + ¦ °--expr: ttt(g [0/0] {94} + ¦ ¦--expr: ttt [0/0] {96} ¦ ¦ °--SYMBOL_FUNCTION_CALL: ttt [0/0] {95} ¦ ¦--'(': ( [0/0] {97} ¦ ¦--SYMBOL_SUB: gmks [0/1] {98} ¦ ¦--EQ_SUB: = [0/1] {99} - ¦ ¦--expr: [0/0] {101} + ¦ ¦--expr: "%s" [0/0] {101} ¦ ¦ °--STR_CONST: "%s" [0/0] {100} ¦ ¦--',': , [0/1] {102} - ¦ ¦--expr: [0/0] {103} - ¦ ¦ ¦--expr: [0/0] {105} + ¦ ¦--expr: slice [0/0] {103} + ¦ ¦ ¦--expr: slice [0/0] {105} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: slice [0/0] {104} ¦ ¦ ¦--'(': ( [0/0] {106} - ¦ ¦ ¦--expr: [0/0] {108} + ¦ ¦ ¦--expr: x [0/0] {108} ¦ ¦ ¦ °--SYMBOL: x [0/0] {107} ¦ ¦ °--')': ) [0/0] {109} ¦ ¦--',': , [0/1] {110} - ¦ ¦--expr: [0/0] {111} - ¦ ¦ ¦--expr: [0/0] {113} + ¦ ¦--expr: acast [0/0] {111} + ¦ ¦ ¦--expr: acast [0/0] {113} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: acast [0/0] {112} ¦ ¦ ¦--'(': ( [0/0] {114} - ¦ ¦ ¦--expr: [0/0] {116} + ¦ ¦ ¦--expr: d [0/0] {116} ¦ ¦ ¦ °--SYMBOL: d [0/0] {115} ¦ ¦ °--')': ) [0/0] {117} ¦ °--')': ) [0/0] {118} - °--expr: [2/0] {119} - ¦--expr: [0/1] {121} + °--expr: anoth [2/0] {119} + ¦--expr: anoth [0/1] {121} ¦ °--SYMBOL: anoth [0/0] {120} ¦--LEFT_ASSIGN: <- [0/1] {122} - °--expr: [0/0] {123} + °--expr: funct [0/0] {123} ¦--FUNCTION: funct [0/0] {124} ¦--'(': ( [0/0] {125} ¦--SYMBOL_FORMALS: x [0/0] {126} ¦--',': , [0/1] {127} ¦--SYMBOL_FORMALS: y [0/0] {128} ¦--')': ) [0/1] {129} - °--expr: [0/0] {130} + °--expr: { + i [0/0] {130} ¦--'{': { [0/2] {131} - ¦--expr: [1/2] {132} + ¦--expr: if (! [1/2] {132} ¦ ¦--IF: if [0/1] {133} ¦ ¦--'(': ( [0/0] {134} - ¦ ¦--expr: [0/0] {135} - ¦ ¦ ¦--expr: [0/1] {136} + ¦ ¦--expr: !fun( [0/0] {135} + ¦ ¦ ¦--expr: !fun( [0/1] {136} ¦ ¦ ¦ ¦--'!': ! [0/0] {137} - ¦ ¦ ¦ °--expr: [0/0] {138} - ¦ ¦ ¦ ¦--expr: [0/0] {140} + ¦ ¦ ¦ °--expr: fun(x [0/0] {138} + ¦ ¦ ¦ ¦--expr: fun [0/0] {140} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: fun [0/0] {139} ¦ ¦ ¦ ¦--'(': ( [0/0] {141} - ¦ ¦ ¦ ¦--expr: [0/0] {143} + ¦ ¦ ¦ ¦--expr: x [0/0] {143} ¦ ¦ ¦ ¦ °--SYMBOL: x [0/0] {142} ¦ ¦ ¦ °--')': ) [0/0] {144} ¦ ¦ ¦--AND2: && [0/1] {145} - ¦ ¦ °--expr: [0/0] {146} + ¦ ¦ °--expr: !not_ [0/0] {146} ¦ ¦ ¦--'!': ! [0/0] {147} - ¦ ¦ °--expr: [0/0] {148} - ¦ ¦ ¦--expr: [0/0] {150} + ¦ ¦ °--expr: not_i [0/0] {148} + ¦ ¦ ¦--expr: not_i [0/0] {150} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: not_i [0/0] {149} ¦ ¦ ¦--'(': ( [0/0] {151} - ¦ ¦ ¦--expr: [0/0] {153} + ¦ ¦ ¦--expr: y [0/0] {153} ¦ ¦ ¦ °--SYMBOL: y [0/0] {152} ¦ ¦ °--')': ) [0/0] {154} ¦ ¦--')': ) [0/1] {155} - ¦ °--expr: [0/0] {156} + ¦ °--expr: { + [0/0] {156} ¦ ¦--'{': { [0/4] {157} - ¦ ¦--expr: [1/2] {158} - ¦ ¦ ¦--expr: [0/0] {160} + ¦ ¦--expr: retur [1/2] {158} + ¦ ¦ ¦--expr: retur [0/0] {160} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: retur [0/0] {159} ¦ ¦ ¦--'(': ( [0/0] {161} - ¦ ¦ ¦--expr: [0/0] {163} + ¦ ¦ ¦--expr: s [0/0] {163} ¦ ¦ ¦ °--SYMBOL: s [0/0] {162} ¦ ¦ °--')': ) [0/0] {164} ¦ °--'}': } [1/0] {165} - ¦--expr: [1/0] {166} - ¦ ¦--expr: [0/0] {168} + ¦--expr: ident [1/0] {166} + ¦ ¦--expr: ident [0/0] {168} ¦ ¦ °--SYMBOL_FUNCTION_CALL: ident [0/0] {167} ¦ ¦--'(': ( [0/0] {169} - ¦ ¦--expr: [0/0] {170} - ¦ ¦ ¦--expr: [0/0] {172} + ¦ ¦--expr: kss(n [0/0] {170} + ¦ ¦ ¦--expr: kss [0/0] {172} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: kss [0/0] {171} ¦ ¦ ¦--'(': ( [0/0] {173} - ¦ ¦ ¦--expr: [0/0] {175} + ¦ ¦ ¦--expr: nmp [0/0] {175} ¦ ¦ ¦ °--SYMBOL: nmp [0/0] {174} ¦ ¦ °--')': ) [0/0] {176} ¦ ¦--',': , [0/1] {177} - ¦ ¦--expr: [0/0] {178} - ¦ ¦ ¦--expr: [0/0] {180} + ¦ ¦--expr: gsk(r [0/0] {178} + ¦ ¦ ¦--expr: gsk [0/0] {180} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: gsk [0/0] {179} ¦ ¦ ¦--'(': ( [0/0] {181} - ¦ ¦ ¦--expr: [0/0] {183} + ¦ ¦ ¦--expr: rdm [0/0] {183} ¦ ¦ ¦ °--SYMBOL: rdm [0/0] {182} ¦ ¦ °--')': ) [0/0] {184} ¦ °--')': ) [0/0] {185} diff --git a/tests/testthat/token_adding_removing/token_creation_find_pos-out.R b/tests/testthat/token_adding_removing/token_creation_find_pos-out.R index 233f92ca0..61459a3cf 100644 --- a/tests/testthat/token_adding_removing/token_creation_find_pos-out.R +++ b/tests/testthat/token_adding_removing/token_creation_find_pos-out.R @@ -1,4 +1,3 @@ - print_out <- function(x, ...) { lines <- m(y, ..., print = TRUE) paste(lines, sep = "\n") diff --git a/tests/testthat/unary_spacing/unary_complex-in_tree b/tests/testthat/unary_spacing/unary_complex-in_tree index d3ffb96be..42b3fce70 100644 --- a/tests/testthat/unary_spacing/unary_complex-in_tree +++ b/tests/testthat/unary_spacing/unary_complex-in_tree @@ -1,81 +1,81 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/0] {4} + ¦--expr: 1+(1 [0/0] {1} + ¦ ¦--expr: 1 [0/0] {4} ¦ ¦ °--NUM_CONST: 1 [0/0] {3} ¦ ¦--'+': + [0/0] {5} - ¦ ¦--expr: [0/0] {6} + ¦ ¦--expr: (1 [0/0] {6} ¦ ¦ ¦--'(': ( [0/0] {7} - ¦ ¦ ¦--expr: [0/0] {8} - ¦ ¦ ¦ ¦--expr: [0/3] {11} + ¦ ¦ ¦--expr: 1 - [0/0] {8} + ¦ ¦ ¦ ¦--expr: 1 [0/3] {11} ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {10} ¦ ¦ ¦ ¦--'-': - [0/1] {12} - ¦ ¦ ¦ ¦--expr: [0/5] {13} + ¦ ¦ ¦ ¦--expr: (- (- [0/5] {13} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {14} - ¦ ¦ ¦ ¦ ¦--expr: [0/3] {15} + ¦ ¦ ¦ ¦ ¦--expr: - (- [0/3] {15} ¦ ¦ ¦ ¦ ¦ ¦--'-': - [0/1] {16} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {17} + ¦ ¦ ¦ ¦ ¦ °--expr: (- 3 [0/0] {17} ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {18} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {19} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/1] {21} + ¦ ¦ ¦ ¦ ¦ ¦--expr: - 3 + [0/0] {19} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: - 3 [0/1] {21} ¦ ¦ ¦ ¦ ¦ ¦ ¦ ¦--'-': - [0/1] {22} - ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {24} + ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--expr: 3 [0/0] {24} ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {23} ¦ ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/2] {25} - ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {27} + ¦ ¦ ¦ ¦ ¦ ¦ ¦--expr: 11 [0/0] {27} ¦ ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 11 [0/0] {26} ¦ ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/0] {28} - ¦ ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {29} + ¦ ¦ ¦ ¦ ¦ ¦ °--expr: + 1 [0/0] {29} ¦ ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/3] {30} - ¦ ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {32} + ¦ ¦ ¦ ¦ ¦ ¦ °--expr: 1 [0/0] {32} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {31} ¦ ¦ ¦ ¦ ¦ °--')': ) [0/0] {33} ¦ ¦ ¦ ¦ °--')': ) [0/0] {34} ¦ ¦ ¦ ¦--'-': - [0/0] {35} - ¦ ¦ ¦ °--expr: [0/0] {37} + ¦ ¦ ¦ °--expr: 4 [0/0] {37} ¦ ¦ ¦ °--NUM_CONST: 4 [0/0] {36} ¦ ¦ °--')': ) [0/0] {38} ¦ ¦--'-': - [0/0] {39} - ¦ °--expr: [0/0] {40} + ¦ °--expr: -40 [0/0] {40} ¦ ¦--'-': - [0/0] {41} - ¦ °--expr: [0/0] {43} + ¦ °--expr: 40 [0/0] {43} ¦ °--NUM_CONST: 40 [0/0] {42} - °--expr: [1/0] {44} - ¦--expr: [0/0] {47} + °--expr: 1+(1- [1/0] {44} + ¦--expr: 1 [0/0] {47} ¦ °--NUM_CONST: 1 [0/0] {46} ¦--'+': + [0/0] {48} - ¦--expr: [0/0] {49} + ¦--expr: (1-(- [0/0] {49} ¦ ¦--'(': ( [0/0] {50} - ¦ ¦--expr: [0/0] {51} - ¦ ¦ ¦--expr: [0/0] {54} + ¦ ¦--expr: 1-(-( [0/0] {51} + ¦ ¦ ¦--expr: 1 [0/0] {54} ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {53} ¦ ¦ ¦--'-': - [0/0] {55} - ¦ ¦ ¦--expr: [0/0] {56} + ¦ ¦ ¦--expr: (-(-3 [0/0] {56} ¦ ¦ ¦ ¦--'(': ( [0/0] {57} - ¦ ¦ ¦ ¦--expr: [0/0] {58} + ¦ ¦ ¦ ¦--expr: -(-3+ [0/0] {58} ¦ ¦ ¦ ¦ ¦--'-': - [0/0] {59} - ¦ ¦ ¦ ¦ °--expr: [0/0] {60} + ¦ ¦ ¦ ¦ °--expr: (-3+1 [0/0] {60} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {61} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {62} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {64} + ¦ ¦ ¦ ¦ ¦--expr: -3+11 [0/0] {62} + ¦ ¦ ¦ ¦ ¦ ¦--expr: -3 [0/0] {64} ¦ ¦ ¦ ¦ ¦ ¦ ¦--'-': - [0/0] {65} - ¦ ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {67} + ¦ ¦ ¦ ¦ ¦ ¦ °--expr: 3 [0/0] {67} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {66} ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/0] {68} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {70} + ¦ ¦ ¦ ¦ ¦ ¦--expr: 11 [0/0] {70} ¦ ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 11 [0/0] {69} ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/0] {71} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {72} + ¦ ¦ ¦ ¦ ¦ °--expr: +1 [0/0] {72} ¦ ¦ ¦ ¦ ¦ ¦--'+': + [0/0] {73} - ¦ ¦ ¦ ¦ ¦ °--expr: [0/0] {75} + ¦ ¦ ¦ ¦ ¦ °--expr: 1 [0/0] {75} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 1 [0/0] {74} ¦ ¦ ¦ ¦ °--')': ) [0/0] {76} ¦ ¦ ¦ °--')': ) [0/0] {77} ¦ ¦ ¦--'-': - [0/0] {78} - ¦ ¦ °--expr: [0/0] {80} + ¦ ¦ °--expr: 4 [0/0] {80} ¦ ¦ °--NUM_CONST: 4 [0/0] {79} ¦ °--')': ) [0/0] {81} ¦--'-': - [0/0] {82} - °--expr: [0/0] {83} + °--expr: -40 [0/0] {83} ¦--'-': - [0/0] {84} - °--expr: [0/0] {86} + °--expr: 40 [0/0] {86} °--NUM_CONST: 40 [0/0] {85} diff --git a/tests/testthat/unary_spacing/unary_indention-in_tree b/tests/testthat/unary_spacing/unary_indention-in_tree index eedffd47f..a1d0470e5 100644 --- a/tests/testthat/unary_spacing/unary_indention-in_tree +++ b/tests/testthat/unary_spacing/unary_indention-in_tree @@ -1,52 +1,57 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} - ¦ ¦--expr: [0/1] {4} + ¦--expr: 1 + + [0/0] {1} + ¦ ¦--expr: 1 [0/1] {4} ¦ ¦ °--NUM_CONST: 1 [0/0] {3} ¦ ¦--'+': + [0/5] {5} - ¦ ¦--expr: [1/1] {6} - ¦ ¦ ¦--expr: [0/4] {9} + ¦ ¦--expr: 2 [1/1] {6} + ¦ ¦ ¦--expr: 2 [0/4] {9} ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {8} ¦ ¦ ¦--'/': / [0/2] {10} - ¦ ¦ ¦--expr: [1/0] {12} + ¦ ¦ ¦--expr: 8 [1/0] {12} ¦ ¦ ¦ °--NUM_CONST: 8 [0/0] {11} ¦ ¦ ¦--'/': / [0/5] {13} - ¦ ¦ °--expr: [1/0] {15} + ¦ ¦ °--expr: 5 [1/0] {15} ¦ ¦ °--NUM_CONST: 5 [0/0] {14} ¦ ¦--'+': + [0/0] {16} - ¦ °--expr: [1/0] {18} + ¦ °--expr: 13 [1/0] {18} ¦ °--NUM_CONST: 13 [0/0] {17} - ¦--expr: [3/0] {19} - ¦ ¦--expr: [0/1] {22} + ¦--expr: 1 + + [3/0] {19} + ¦ ¦--expr: 1 [0/1] {22} ¦ ¦ °--NUM_CONST: 1 [0/0] {21} ¦ ¦--'+': + [0/2] {23} - ¦ ¦--expr: [1/1] {24} + ¦ ¦--expr: + 1 [1/1] {24} ¦ ¦ ¦--'+': + [0/1] {25} - ¦ ¦ °--expr: [0/0] {27} + ¦ ¦ °--expr: 1 [0/0] {27} ¦ ¦ °--NUM_CONST: 1 [0/0] {26} ¦ ¦--'-': - [0/0] {28} - ¦ °--expr: [1/0] {29} - ¦ ¦--expr: [0/1] {31} + ¦ °--expr: -1 / + [1/0] {29} + ¦ ¦--expr: -1 [0/1] {31} ¦ ¦ ¦--'-': - [0/0] {32} - ¦ ¦ °--expr: [0/0] {34} + ¦ ¦ °--expr: 1 [0/0] {34} ¦ ¦ °--NUM_CONST: 1 [0/0] {33} ¦ ¦--'/': / [0/2] {35} - ¦ ¦--expr: [1/2] {37} + ¦ ¦--expr: 27 [1/2] {37} ¦ ¦ °--NUM_CONST: 27 [0/0] {36} ¦ ¦--'/': / [0/2] {38} - ¦ °--expr: [1/0] {39} + ¦ °--expr: - 3 [1/0] {39} ¦ ¦--'-': - [0/1] {40} - ¦ °--expr: [0/0] {42} + ¦ °--expr: 3 [0/0] {42} ¦ °--NUM_CONST: 3 [0/0] {41} - °--expr: [2/0] {43} - ¦--expr: [0/1] {46} + °--expr: 1 / + [2/0] {43} + ¦--expr: 1 [0/1] {46} ¦ °--NUM_CONST: 1 [0/0] {45} ¦--'/': / [0/2] {47} - ¦--expr: [1/1] {49} + ¦--expr: 2 [1/1] {49} ¦ °--NUM_CONST: 2 [0/0] {48} ¦--'+': + [0/2] {50} - °--expr: [1/0] {51} - ¦--expr: [0/1] {53} + °--expr: 33 * + [1/0] {51} + ¦--expr: 33 [0/1] {53} ¦ °--NUM_CONST: 33 [0/0] {52} ¦--'*': * [0/2] {54} - °--expr: [1/0] {56} + °--expr: 2 [1/0] {56} °--NUM_CONST: 2 [0/0] {55} diff --git a/tests/testthat/unary_spacing/unary_simple-in_tree b/tests/testthat/unary_spacing/unary_simple-in_tree index 23e2c2a95..bebf9c503 100644 --- a/tests/testthat/unary_spacing/unary_simple-in_tree +++ b/tests/testthat/unary_spacing/unary_simple-in_tree @@ -1,26 +1,26 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/0] {6} + °--expr: 1+-1/ [0/0] {1} + ¦--expr: 1 [0/0] {6} ¦ °--NUM_CONST: 1 [0/0] {5} ¦--'+': + [0/0] {7} - ¦--expr: [0/0] {8} - ¦ ¦--expr: [0/0] {9} + ¦--expr: -1/2 [0/0] {8} + ¦ ¦--expr: -1 [0/0] {9} ¦ ¦ ¦--'-': - [0/0] {10} - ¦ ¦ °--expr: [0/0] {12} + ¦ ¦ °--expr: 1 [0/0] {12} ¦ ¦ °--NUM_CONST: 1 [0/0] {11} ¦ ¦--'/': / [0/0] {13} - ¦ °--expr: [0/0] {15} + ¦ °--expr: 2 [0/0] {15} ¦ °--NUM_CONST: 2 [0/0] {14} ¦--'-': - [0/0] {16} - ¦--expr: [0/0] {18} + ¦--expr: 3 [0/0] {18} ¦ °--NUM_CONST: 3 [0/0] {17} ¦--'-': - [0/0] {19} - ¦--expr: [0/0] {20} + ¦--expr: -3 [0/0] {20} ¦ ¦--'-': - [0/0] {21} - ¦ °--expr: [0/0] {23} + ¦ °--expr: 3 [0/0] {23} ¦ °--NUM_CONST: 3 [0/0] {22} ¦--'+': + [0/0] {24} - °--expr: [0/0] {25} + °--expr: +3 [0/0] {25} ¦--'+': + [0/0] {26} - °--expr: [0/0] {28} + °--expr: 3 [0/0] {28} °--NUM_CONST: 3 [0/0] {27} diff --git a/tests/testthat/unindention/mixed-double-in.R b/tests/testthat/unindention/mixed-double-in.R new file mode 100644 index 000000000..44c161fb2 --- /dev/null +++ b/tests/testthat/unindention/mixed-double-in.R @@ -0,0 +1,88 @@ +# classical + +function(x, + y) { + 1 +} + + +function(x, + y, + k) { + 1 +} + + +function(x, + y) { + 1 +} + +function( + x, + y) { + 1 +} + + +function(x, y) { + 1 +} + +function(x, + # + y) { + 1 +} + + +# double +function(x, +y) { + 1 +} + + +function(x, +y, + k) { + 1 +} + + +function( + + x, + y) { + 1 +} + + +function( + x, y) { + 1 +} + +function(x, +# + y) { + 1 +} + + +# last brace +function( + x, y) NULL + +function( + x, y +) NULL + +function( + x, + y) NULL + +function( + x, + y +) NULL diff --git a/tests/testthat/unindention/mixed-double-in_tree b/tests/testthat/unindention/mixed-double-in_tree new file mode 100644 index 000000000..48a5a8d9b --- /dev/null +++ b/tests/testthat/unindention/mixed-double-in_tree @@ -0,0 +1,189 @@ +ROOT (token: short_text [lag_newlines/spaces] {pos_id}) + ¦--COMMENT: # cla [0/0] {1} + ¦--expr: funct [2/0] {2} + ¦ ¦--FUNCTION: funct [0/0] {3} + ¦ ¦--'(': ( [0/0] {4} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {5} + ¦ ¦--',': , [0/9] {6} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {7} + ¦ ¦--')': ) [0/1] {8} + ¦ °--expr: { + 1 [0/0] {9} + ¦ ¦--'{': { [0/2] {10} + ¦ ¦--expr: 1 [1/0] {12} + ¦ ¦ °--NUM_CONST: 1 [0/0] {11} + ¦ °--'}': } [1/0] {13} + ¦--expr: funct [3/0] {14} + ¦ ¦--FUNCTION: funct [0/0] {15} + ¦ ¦--'(': ( [0/0] {16} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {17} + ¦ ¦--',': , [0/9] {18} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {19} + ¦ ¦--',': , [0/9] {20} + ¦ ¦--SYMBOL_FORMALS: k [1/0] {21} + ¦ ¦--')': ) [0/1] {22} + ¦ °--expr: { + 1 [0/0] {23} + ¦ ¦--'{': { [0/2] {24} + ¦ ¦--expr: 1 [1/0] {26} + ¦ ¦ °--NUM_CONST: 1 [0/0] {25} + ¦ °--'}': } [1/0] {27} + ¦--expr: funct [3/0] {28} + ¦ ¦--FUNCTION: funct [0/0] {29} + ¦ ¦--'(': ( [0/0] {30} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {31} + ¦ ¦--',': , [0/9] {32} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {33} + ¦ ¦--')': ) [0/1] {34} + ¦ °--expr: { + 1 [0/0] {35} + ¦ ¦--'{': { [0/2] {36} + ¦ ¦--expr: 1 [1/0] {38} + ¦ ¦ °--NUM_CONST: 1 [0/0] {37} + ¦ °--'}': } [1/0] {39} + ¦--expr: funct [2/0] {40} + ¦ ¦--FUNCTION: funct [0/0] {41} + ¦ ¦--'(': ( [0/9] {42} + ¦ ¦--SYMBOL_FORMALS: x [1/0] {43} + ¦ ¦--',': , [0/9] {44} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {45} + ¦ ¦--')': ) [0/1] {46} + ¦ °--expr: { + 1 [0/0] {47} + ¦ ¦--'{': { [0/2] {48} + ¦ ¦--expr: 1 [1/0] {50} + ¦ ¦ °--NUM_CONST: 1 [0/0] {49} + ¦ °--'}': } [1/0] {51} + ¦--expr: funct [3/0] {52} + ¦ ¦--FUNCTION: funct [0/0] {53} + ¦ ¦--'(': ( [0/0] {54} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {55} + ¦ ¦--',': , [0/1] {56} + ¦ ¦--SYMBOL_FORMALS: y [0/0] {57} + ¦ ¦--')': ) [0/1] {58} + ¦ °--expr: { + 1 [0/0] {59} + ¦ ¦--'{': { [0/2] {60} + ¦ ¦--expr: 1 [1/0] {62} + ¦ ¦ °--NUM_CONST: 1 [0/0] {61} + ¦ °--'}': } [1/0] {63} + ¦--expr: funct [2/0] {64} + ¦ ¦--FUNCTION: funct [0/0] {65} + ¦ ¦--'(': ( [0/0] {66} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {67} + ¦ ¦--',': , [0/9] {68} + ¦ ¦--COMMENT: # [1/9] {69} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {70} + ¦ ¦--')': ) [0/1] {71} + ¦ °--expr: { + 1 [0/0] {72} + ¦ ¦--'{': { [0/2] {73} + ¦ ¦--expr: 1 [1/0] {75} + ¦ ¦ °--NUM_CONST: 1 [0/0] {74} + ¦ °--'}': } [1/0] {76} + ¦--COMMENT: # dou [3/0] {77} + ¦--expr: funct [1/0] {78} + ¦ ¦--FUNCTION: funct [0/0] {79} + ¦ ¦--'(': ( [0/0] {80} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {81} + ¦ ¦--',': , [0/0] {82} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {83} + ¦ ¦--')': ) [0/1] {84} + ¦ °--expr: { + 1 [0/0] {85} + ¦ ¦--'{': { [0/2] {86} + ¦ ¦--expr: 1 [1/0] {88} + ¦ ¦ °--NUM_CONST: 1 [0/0] {87} + ¦ °--'}': } [1/0] {89} + ¦--expr: funct [3/0] {90} + ¦ ¦--FUNCTION: funct [0/0] {91} + ¦ ¦--'(': ( [0/0] {92} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {93} + ¦ ¦--',': , [0/0] {94} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {95} + ¦ ¦--',': , [0/9] {96} + ¦ ¦--SYMBOL_FORMALS: k [1/0] {97} + ¦ ¦--')': ) [0/1] {98} + ¦ °--expr: { + 1 [0/0] {99} + ¦ ¦--'{': { [0/2] {100} + ¦ ¦--expr: 1 [1/0] {102} + ¦ ¦ °--NUM_CONST: 1 [0/0] {101} + ¦ °--'}': } [1/0] {103} + ¦--expr: funct [3/0] {104} + ¦ ¦--FUNCTION: funct [0/0] {105} + ¦ ¦--'(': ( [0/4] {106} + ¦ ¦--SYMBOL_FORMALS: x [2/0] {107} + ¦ ¦--',': , [0/4] {108} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {109} + ¦ ¦--')': ) [0/1] {110} + ¦ °--expr: { + 1 [0/0] {111} + ¦ ¦--'{': { [0/2] {112} + ¦ ¦--expr: 1 [1/0] {114} + ¦ ¦ °--NUM_CONST: 1 [0/0] {113} + ¦ °--'}': } [1/0] {115} + ¦--expr: funct [3/0] {116} + ¦ ¦--FUNCTION: funct [0/0] {117} + ¦ ¦--'(': ( [0/2] {118} + ¦ ¦--SYMBOL_FORMALS: x [1/0] {119} + ¦ ¦--',': , [0/1] {120} + ¦ ¦--SYMBOL_FORMALS: y [0/0] {121} + ¦ ¦--')': ) [0/1] {122} + ¦ °--expr: { + 1 [0/0] {123} + ¦ ¦--'{': { [0/2] {124} + ¦ ¦--expr: 1 [1/0] {126} + ¦ ¦ °--NUM_CONST: 1 [0/0] {125} + ¦ °--'}': } [1/0] {127} + ¦--expr: funct [2/0] {128} + ¦ ¦--FUNCTION: funct [0/0] {129} + ¦ ¦--'(': ( [0/0] {130} + ¦ ¦--SYMBOL_FORMALS: x [0/0] {131} + ¦ ¦--',': , [0/0] {132} + ¦ ¦--COMMENT: # [1/25] {133} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {134} + ¦ ¦--')': ) [0/1] {135} + ¦ °--expr: { + 1 [0/0] {136} + ¦ ¦--'{': { [0/2] {137} + ¦ ¦--expr: 1 [1/0] {139} + ¦ ¦ °--NUM_CONST: 1 [0/0] {138} + ¦ °--'}': } [1/0] {140} + ¦--COMMENT: # las [3/0] {141} + ¦--expr: funct [1/0] {142} + ¦ ¦--FUNCTION: funct [0/0] {143} + ¦ ¦--'(': ( [0/4] {144} + ¦ ¦--SYMBOL_FORMALS: x [1/0] {145} + ¦ ¦--',': , [0/1] {146} + ¦ ¦--SYMBOL_FORMALS: y [0/0] {147} + ¦ ¦--')': ) [0/1] {148} + ¦ °--expr: NULL [0/0] {150} + ¦ °--NULL_CONST: NULL [0/0] {149} + ¦--expr: funct [2/0] {151} + ¦ ¦--FUNCTION: funct [0/0] {152} + ¦ ¦--'(': ( [0/4] {153} + ¦ ¦--SYMBOL_FORMALS: x [1/0] {154} + ¦ ¦--',': , [0/1] {155} + ¦ ¦--SYMBOL_FORMALS: y [0/0] {156} + ¦ ¦--')': ) [1/1] {157} + ¦ °--expr: NULL [0/0] {159} + ¦ °--NULL_CONST: NULL [0/0] {158} + ¦--expr: funct [2/0] {160} + ¦ ¦--FUNCTION: funct [0/0] {161} + ¦ ¦--'(': ( [0/4] {162} + ¦ ¦--SYMBOL_FORMALS: x [1/0] {163} + ¦ ¦--',': , [0/4] {164} + ¦ ¦--SYMBOL_FORMALS: y [1/0] {165} + ¦ ¦--')': ) [0/1] {166} + ¦ °--expr: NULL [0/0] {168} + ¦ °--NULL_CONST: NULL [0/0] {167} + °--expr: funct [2/0] {169} + ¦--FUNCTION: funct [0/0] {170} + ¦--'(': ( [0/4] {171} + ¦--SYMBOL_FORMALS: x [1/0] {172} + ¦--',': , [0/4] {173} + ¦--SYMBOL_FORMALS: y [1/0] {174} + ¦--')': ) [1/1] {175} + °--expr: NULL [0/0] {177} + °--NULL_CONST: NULL [0/0] {176} diff --git a/tests/testthat/unindention/mixed-double-out.R b/tests/testthat/unindention/mixed-double-out.R new file mode 100644 index 000000000..a1c7e24d2 --- /dev/null +++ b/tests/testthat/unindention/mixed-double-out.R @@ -0,0 +1,94 @@ +# classical + +function(x, + y) { + 1 +} + + +function(x, + y, + k) { + 1 +} + + +function(x, + y) { + 1 +} + +function(x, + y) { + 1 +} + + +function(x, y) { + 1 +} + +function(x, + # + y) { + 1 +} + + +# double +function( + x, + y) { + 1 +} + + +function( + x, + y, + k) { + 1 +} + + +function( + x, + y) { + 1 +} + + +function( + x, y) { + 1 +} + +function(x, + # + y) { + 1 +} + + +# last brace +function( + x, y) { + NULL +} + +function( + x, y) { + NULL +} + +function( + x, + y) { + NULL +} + +function( + x, + y) { + NULL +} diff --git a/tests/testthat/unindention/mixed-in_tree b/tests/testthat/unindention/mixed-in_tree index b9ea9cb88..275bbebef 100644 --- a/tests/testthat/unindention/mixed-in_tree +++ b/tests/testthat/unindention/mixed-in_tree @@ -1,48 +1,57 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - ¦--expr: [0/0] {1} + ¦--expr: { + ( [0/0] {1} ¦ ¦--'{': { [0/2] {2} - ¦ ¦--expr: [1/3] {3} + ¦ ¦--expr: ((( + [1/3] {3} ¦ ¦ ¦--'(': ( [0/0] {4} - ¦ ¦ ¦--expr: [0/0] {5} + ¦ ¦ ¦--expr: (( + 2 [0/0] {5} ¦ ¦ ¦ ¦--'(': ( [0/0] {6} - ¦ ¦ ¦ ¦--expr: [0/0] {7} + ¦ ¦ ¦ ¦--expr: ( + 2 + [0/0] {7} ¦ ¦ ¦ ¦ ¦--'(': ( [0/1] {8} - ¦ ¦ ¦ ¦ ¦--expr: [1/4] {10} + ¦ ¦ ¦ ¦ ¦--expr: 2 [1/4] {10} ¦ ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {9} ¦ ¦ ¦ ¦ °--')': ) [1/0] {11} ¦ ¦ ¦ °--')': ) [0/0] {12} ¦ ¦ °--')': ) [0/0] {13} ¦ °--'}': } [1/0] {14} - °--expr: [2/0] {15} + °--expr: { +{ + [2/0] {15} ¦--'{': { [0/0] {16} - ¦--expr: [1/0] {17} + ¦--expr: { + [1/0] {17} ¦ ¦--'{': { [0/8] {18} - ¦ ¦--expr: [1/5] {19} - ¦ ¦ ¦--expr: [0/0] {21} + ¦ ¦--expr: call( [1/5] {19} + ¦ ¦ ¦--expr: call [0/0] {21} ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call [0/0] {20} ¦ ¦ ¦--'(': ( [0/10] {22} - ¦ ¦ ¦--expr: [1/0] {23} - ¦ ¦ ¦ ¦--expr: [0/0] {25} + ¦ ¦ ¦--expr: call1 [1/0] {23} + ¦ ¦ ¦ ¦--expr: call1 [0/0] {25} ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: call1 [0/0] {24} ¦ ¦ ¦ ¦--'(': ( [0/0] {26} - ¦ ¦ ¦ ¦--expr: [0/0] {28} + ¦ ¦ ¦ ¦--expr: 2 [0/0] {28} ¦ ¦ ¦ ¦ °--NUM_CONST: 2 [0/0] {27} ¦ ¦ ¦ ¦--',': , [0/1] {29} - ¦ ¦ ¦ ¦--expr: [0/0] {31} + ¦ ¦ ¦ ¦--expr: 3 [0/0] {31} ¦ ¦ ¦ ¦ °--NUM_CONST: 3 [0/0] {30} ¦ ¦ ¦ °--')': ) [0/0] {32} ¦ ¦ ¦--',': , [0/10] {33} - ¦ ¦ ¦--expr: [1/1] {34} + ¦ ¦ ¦--expr: { + [1/1] {34} ¦ ¦ ¦ ¦--'{': { [0/15] {35} - ¦ ¦ ¦ ¦--expr: [1/2] {36} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {38} + ¦ ¦ ¦ ¦--expr: sin(c [1/2] {36} + ¦ ¦ ¦ ¦ ¦--expr: sin [0/0] {38} ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: sin [0/0] {37} ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {39} - ¦ ¦ ¦ ¦ ¦--expr: [0/0] {40} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {42} + ¦ ¦ ¦ ¦ ¦--expr: cos(p [0/0] {40} + ¦ ¦ ¦ ¦ ¦ ¦--expr: cos [0/0] {42} ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL_FUNCTION_CALL: cos [0/0] {41} ¦ ¦ ¦ ¦ ¦ ¦--'(': ( [0/0] {43} - ¦ ¦ ¦ ¦ ¦ ¦--expr: [0/0] {45} + ¦ ¦ ¦ ¦ ¦ ¦--expr: pi [0/0] {45} ¦ ¦ ¦ ¦ ¦ ¦ °--SYMBOL: pi [0/0] {44} ¦ ¦ ¦ ¦ ¦ °--')': ) [0/0] {46} ¦ ¦ ¦ ¦ °--')': ) [0/0] {47} diff --git a/tests/testthat/unindention/mixed-out.R b/tests/testthat/unindention/mixed-out.R index 5dce16bf7..9c543fdf8 100644 --- a/tests/testthat/unindention/mixed-out.R +++ b/tests/testthat/unindention/mixed-out.R @@ -7,7 +7,8 @@ { { call( - call1(2, 3), { + call1(2, 3), + { sin(cos(pi)) } ) diff --git a/tests/testthat/unindention/vec_with_fun-in_tree b/tests/testthat/unindention/vec_with_fun-in_tree index b020c849e..d34be57bd 100644 --- a/tests/testthat/unindention/vec_with_fun-in_tree +++ b/tests/testthat/unindention/vec_with_fun-in_tree @@ -1,17 +1,18 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/0] {3} + °--expr: c(a= [0/0] {1} + ¦--expr: c [0/0] {3} ¦ °--SYMBOL_FUNCTION_CALL: c [0/0] {2} ¦--'(': ( [0/0] {4} ¦--SYMBOL_SUB: a [0/0] {5} ¦--EQ_SUB: = [0/1] {6} - ¦--expr: [0/0] {7} + ¦--expr: funct [0/0] {7} ¦ ¦--FUNCTION: funct [0/0] {8} ¦ ¦--'(': ( [0/0] {9} ¦ ¦--')': ) [0/1] {10} - ¦ °--expr: [0/0] {11} + ¦ °--expr: { + [0/0] {11} ¦ ¦--'{': { [0/5] {12} - ¦ ¦--expr: [1/2] {14} + ¦ ¦--expr: 33 [1/2] {14} ¦ ¦ °--NUM_CONST: 33 [0/0] {13} ¦ °--'}': } [1/0] {15} °--')': ) [0/0] {16} diff --git a/tests/testthat/unindention_regex/random_non_comment_indention-in_tree b/tests/testthat/unindention_regex/random_non_comment_indention-in_tree index b68288ffe..68bd330dc 100644 --- a/tests/testthat/unindention_regex/random_non_comment_indention-in_tree +++ b/tests/testthat/unindention_regex/random_non_comment_indention-in_tree @@ -1,16 +1,17 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) - °--expr: [0/0] {1} - ¦--expr: [0/1] {3} + °--expr: a <- [0/0] {1} + ¦--expr: a [0/1] {3} ¦ °--SYMBOL: a [0/0] {2} ¦--LEFT_ASSIGN: <- [0/1] {4} - °--expr: [0/0] {5} + °--expr: funct [0/0] {5} ¦--FUNCTION: funct [0/0] {6} ¦--'(': ( [0/0] {7} ¦--')': ) [0/1] {8} - °--expr: [0/0] {9} + °--expr: { + bb [0/0] {9} ¦--'{': { [0/1] {10} - ¦--expr: [1/2] {12} + ¦--expr: bbx [1/2] {12} ¦ °--SYMBOL: bbx [0/0] {11} - ¦--expr: [1/2] {14} + ¦--expr: x [1/2] {14} ¦ °--SYMBOL: x [0/0] {13} °--'}': } [1/0] {15} diff --git a/tests/testthat/unindention_regex/regex_force_no_pattern-in_tree b/tests/testthat/unindention_regex/regex_force_no_pattern-in_tree index fb06e915c..80aec2371 100644 --- a/tests/testthat/unindention_regex/regex_force_no_pattern-in_tree +++ b/tests/testthat/unindention_regex/regex_force_no_pattern-in_tree @@ -1,20 +1,21 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦--COMMENT: # _ [0/0] {1} ¦--COMMENT: # l [1/0] {2} - °--expr: [1/0] {3} - ¦--expr: [0/1] {5} + °--expr: a <- [1/0] {3} + ¦--expr: a [0/1] {5} ¦ °--SYMBOL: a [0/0] {4} ¦--LEFT_ASSIGN: <- [0/1] {6} - °--expr: [0/0] {7} + °--expr: funct [0/0] {7} ¦--FUNCTION: funct [0/0] {8} ¦--'(': ( [0/0] {9} ¦--')': ) [0/1] {10} - °--expr: [0/0] {11} + °--expr: { + # [0/0] {11} ¦--'{': { [0/2] {12} ¦--COMMENT: ### . [1/2] {13} ¦--COMMENT: ### i [1/2] {14} - ¦--expr: [1/2] {15} - ¦ ¦--expr: [0/0] {17} + ¦--expr: q() [1/2] {15} + ¦ ¦--expr: q [0/0] {17} ¦ ¦ °--SYMBOL_FUNCTION_CALL: q [0/0] {16} ¦ ¦--'(': ( [0/0] {18} ¦ °--')': ) [0/0] {19} diff --git a/tests/testthat/unindention_regex/regex_force_with_pattern-in_tree b/tests/testthat/unindention_regex/regex_force_with_pattern-in_tree index 1084a9b3f..66c1cda0a 100644 --- a/tests/testthat/unindention_regex/regex_force_with_pattern-in_tree +++ b/tests/testthat/unindention_regex/regex_force_with_pattern-in_tree @@ -1,20 +1,21 @@ ROOT (token: short_text [lag_newlines/spaces] {pos_id}) ¦--COMMENT: # _ [0/0] {1} ¦--COMMENT: # l [1/0] {2} - °--expr: [1/0] {3} - ¦--expr: [0/1] {5} + °--expr: a <- [1/0] {3} + ¦--expr: a [0/1] {5} ¦ °--SYMBOL: a [0/0] {4} ¦--LEFT_ASSIGN: <- [0/1] {6} - °--expr: [0/0] {7} + °--expr: funct [0/0] {7} ¦--FUNCTION: funct [0/0] {8} ¦--'(': ( [0/0] {9} ¦--')': ) [0/1] {10} - °--expr: [0/0] {11} + °--expr: { + ## [0/0] {11} ¦--'{': { [0/1] {12} ¦--COMMENT: ### . [1/5] {13} ¦--COMMENT: ### i [1/5] {14} - ¦--expr: [1/4] {15} - ¦ ¦--expr: [0/0] {17} + ¦--expr: q() [1/4] {15} + ¦ ¦--expr: q [0/0] {17} ¦ ¦ °--SYMBOL_FUNCTION_CALL: q [0/0] {16} ¦ ¦--'(': ( [0/0] {18} ¦ °--')': ) [0/0] {19} diff --git a/tic.R b/tic.R deleted file mode 100644 index b5bcfcdf4..000000000 --- a/tic.R +++ /dev/null @@ -1,16 +0,0 @@ -add_package_checks(notes_are_errors = getRversion() >= "3.2") - -if (Sys.getenv("id_rsa") != "" && ci()$is_tag() && Sys.getenv("BUILD_PKGDOWN") != "") { - # pkgdown documentation can be built optionally. Other example criteria: - # - `inherits(ci(), "TravisCI")`: Only for Travis CI - # - `ci()$is_tag()`: Only for tags, not for branches - # - `Sys.getenv("BUILD_PKGDOWN") != ""`: If the env var "BUILD_PKGDOWN" is set - # - `Sys.getenv("TRAVIS_EVENT_TYPE") == "cron"`: Only for Travis cron jobs - get_stage("before_deploy") %>% - add_step(step_setup_ssh()) - - get_stage("deploy") %>% - add_step(step_build_pkgdown()) %>% - add_code_step(writeLines("styler.r-lib.org", "docs/CNAME")) %>% - add_step(step_push_deploy("docs", "gh-pages")) -} diff --git a/touchstone/.gitignore b/touchstone/.gitignore new file mode 100644 index 000000000..3cda5045e --- /dev/null +++ b/touchstone/.gitignore @@ -0,0 +1,4 @@ +* +!.gitignore +!config.json +!script.R diff --git a/touchstone/config.json b/touchstone/config.json new file mode 100644 index 000000000..84f9d3441 --- /dev/null +++ b/touchstone/config.json @@ -0,0 +1,9 @@ +{ + "os": "ubuntu-latest", + "r": "4.2", + "rspm": "https://packagemanager.rstudio.com/all/__linux__/focal/2023-03-01", + "benchmarking_repo": "lorenzwalthert/here", + "benchmarking_ref": "bf0167746da7fe4fb156082bad93c9e5cd3386bd", + "benchmarking_path": "touchstone/sources/here", + +} diff --git a/touchstone/script.R b/touchstone/script.R new file mode 100644 index 000000000..1bbcec1df --- /dev/null +++ b/touchstone/script.R @@ -0,0 +1,50 @@ +library(touchstone) + +branch_install() + +clear_branch_caches <- function() { + purrr::walk(c(branch_get_or_fail("GITHUB_BASE_REF"), branch_get_or_fail("GITHUB_HEAD_REF")), + styler::cache_clear, + ask = FALSE + ) +} + +clear_branch_caches() + + +benchmark_run( + expr_before_benchmark = { + library(styler) + cache_deactivate() + }, + without_cache = style_pkg("touchstone/sources/here", filetype = c("R", "rmd")), + n = 30 +) + +clear_branch_caches() +benchmark_run( + expr_before_benchmark = { + library(styler) + cache_activate(gert::git_branch()) + }, + cache_applying = style_pkg("touchstone/sources/here", filetype = c("R", "rmd")), + n = 30 +) + +clear_branch_caches() + +benchmark_run( + expr_before_benchmark = { + library(styler) + cache_activate(gert::git_branch()) + }, + cache_recording = { + gert::git_reset_hard(repo = "touchstone/sources/here") + style_pkg("touchstone/sources/here", filetype = c("R", "rmd")) + }, + n = 30 +) + +clear_branch_caches() + +benchmark_analyze() diff --git a/vignettes/.gitignore b/vignettes/.gitignore new file mode 100644 index 000000000..3c0f6bdf8 --- /dev/null +++ b/vignettes/.gitignore @@ -0,0 +1,2 @@ +*.R +*.html diff --git a/vignettes/caching.Rmd b/vignettes/caching.Rmd new file mode 100644 index 000000000..b15c7b92a --- /dev/null +++ b/vignettes/caching.Rmd @@ -0,0 +1,54 @@ +--- +title: "Caching" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Caching} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +```{r, include = FALSE} +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>" +) +options(styler.colored_print.vertical = FALSE) +styler::cache_deactivate() +``` + +```{r setup} +library(styler) +``` + +This is a developer vignette to explain how caching works and what we learned on the way. + +The main caching features were implemented in the following two pull requests: + +- #538: Implemented simple caching and utilities for managing caches. Input text is styled as a whole and added to the cache afterwards. This makes most sense given that the very same expression will probably never be passed to styler, unless it is already compliant with the style guide. Apart from the (negligible) inode, caching text has a memory cost of 0. Speed boosts only result if the whole text passed to styler is compliant to the style guide in use. Changing one line in a file with hundreds of lines means each line will be styled again. This is a major drawback and makes the cache only useful for a use with a pre-commit framework (the initial motivation) or when functions like `style_pkg()` are run often and most files were not changed. + +- #578: Adds a second layer of caching by caching top-level expressions individually. This will bring speed boosts to the situation where very little is changed but there are many top-level expressions. Hence, changing one line in a big file will invalidate the cache for the expression the line is part of, i.e. when changing `x <- 2` to `x = 2` below, styler will have to restyle the function definition, but not `another(call)` and all other expressions that were not changed. + +```{r, eval = FALSE} +function() { + # a comment + x <- 2 # <- change this line +} + +another(call) +``` + +While #538 also required a lot of thought, this is not necessarily visible in the diff. The main challenge was to figure out how the caching should work conceptually and where we best insert the functionality as well as how to make caching work for edge cases like trailing blank lines etc. For details on the conceptual side and requirements, see #538. + +In comparison, the diff in #578 is much larger. We can walk through the main changes introduced here: + +- Each nest gained a column *is_cached* to indicate if an expression is cached. It's only ever set for the top-level nest, but set to `NA` for all other nests. Also, comments are not cached because they are essentially top level terminals which are very cheap to style (also because hardly any rule concerns them) and because each comment is a top-level expression, simply styling them is cheaper than checking for each of them if it is in the cache. + +- Each nest also gained a column *block* to denote the block to which it belongs for styling. Running each top-level expression through `parse_transform_serialize_r()` separately is relatively expensive. We prefer to put multiple top-level expressions into a block and process the block. This is done with `parse_transform_serialize_r_block()`. Note that before we implemented this PR, all top-level expressions were sent through `parse_transform_serialize_r()` as one block. Leaving out some exceptions in this explanation, we always put uncached top-level expressions in a block and cached top-level expressions into a block and then style the uncached ones. + +- Apart from the actual styling, a very costly part of formatting code with styler is to compute the nested parse data with `compute_parse_data_nested()`. When caching top-level expressions, it is evident that building up the nested structure for cached code is unnecessary because we don't actually style it, but simply return `text`. For this reason, we introduce the concept of a shallow nest. It can only occur at the top level. For the top-level expressions we know that they are cached, we remove all children before building up the nested parse table and let them act as `terminals` and will later simply return their `text`. Hence, in the nested parse table, no cached expressions have children. + +- Because we now style blocks of expressions and we want to preserve the line breaks between them, we need to keep track of all blank lines between expressions, which was not necessary previously because all expressions were in a block and the blank lines separating them were stored in `newlines` and `lag_newlines` except for all blank lines before the first expression. + +- Because we wanted to cache by expression, but process by block of expression, we needed to decompose the block into individual expressions and add them to the cache once we obtained the final text. We could probably also have added expressions to the cache before we put the text together, but the problem is that at some point we turn the nested structure into a flat structure and as this must happen with a `post_visit()` approach, we'd have to implement a complicated routine to check if we are now about to put together all top-level expressions and then if yes write them to the cache. A simple (but maybe not so elegant) parsing of the output as implemented in `cache_by_expression()` seemed reasonable in terms of limiting complexity and keeping efficiency. + +For more detailed explanation and documentation, please consult the help files of the internals. diff --git a/vignettes/customizing_styler.Rmd b/vignettes/customizing_styler.Rmd index 40d7b81fe..e566daabd 100644 --- a/vignettes/customizing_styler.Rmd +++ b/vignettes/customizing_styler.Rmd @@ -1,7 +1,5 @@ --- title: "Customizing styler" -author: "Lorenz Walthert" -date: "8/10/2017" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Customizing styler} @@ -9,101 +7,63 @@ vignette: > %\VignetteEncoding{UTF-8} --- -This vignette provides a high-level overview of how styler works and how you -can define your own style guide and format code according to it. +```{r} +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>" +) +options(styler.colored_print.vertical = FALSE) +styler::cache_deactivate() +``` + +This vignette provides a high-level overview of how styler works and how you can define your own style guide and format code according to it. If you simply want to customize the tidyverse style guide to your needs, check out `vignette("styler")`, to remove some rules, have a look at `vignette("remove_rules")`. How to distribute a custom style guide is described in `vignette("distribute_custom_style_guides")`. # How styler works There are three major steps that styler performs in order to style code: -1. Create an abstract syntax tree (AST) from `utils::getParseData()` that - contains positional information for every token. We call this a nested parse - table. You can learn more about how exactly this is done in the vignettes - "Data Structures" and "Manipulating the nested parse table". -2. Apply transformer functions at each level of the nested parse table. We use a - visitor approach, i.e. a function that takes functions as arguments and - applies them to every level of nesting. You can find out more about it on - the help file for `visit()`. Note that the function is not exported by styler. - The visitor will take care of applying the functions on every - level of nesting - and we can supply transformer functions that operate on - one level of nesting. In the sequel, we use the term *nest* to refer to - such a parse table at one level of nesting. A *nest* always represents a - complete expression. Before we apply the transformers, we have to initialize - two columns `lag_newlines` and `spaces`, which contain - the number of line breaks before the token and the number of spaces - after the token. These will be the columns that most of our transformer - functions will modify. -3. Serialize the nested parse table, that is, extract the terminal tokens from - the nested parse table and add spaces and line breaks between them as - specified in the nested parse table. - -The `transformers` argument is, apart from the code to style, the key argument -of functions such as `style_text()` and friends. By default, it is created -via the `style` argument. The transformers are a named list of transformer -functions and other arguments passed to styler. To use the default style guide -of styler ([the tidyverse style guide](http://style.tidyverse.org/)), call -`tidyverse_style()` to get the list of the transformer functions. Let's quickly -look at what those are. +1. Create an abstract syntax tree (AST) from `utils::getParseData()` that contains positional information for every token. We call this a nested parse table. + +2. Apply transformer functions at each level of the nested parse table. We use a visitor approach, i.e. a function that takes functions as arguments and applies them to every level of nesting. You can find out more about it on the help file for `visit()`. Note that the function is not exported by styler. The visitor will take care of applying the functions on every level of nesting - and we can supply transformer functions that operate on one level of nesting. In the sequel, we use the term *nest* to refer to such a parse table at one level of nesting. A *nest* always represents a complete expression. Before we apply the transformers, we have to initialize two columns `lag_newlines` and `spaces`, which contain the number of line breaks before the token and the number of spaces after the token. These will be the columns that most of our transformer functions will modify. + +3. Serialize the nested parse table, that is, extract the terminal tokens from the nested parse table and add spaces and line breaks between them as specified in the nested parse table. + +The `transformers` argument is, apart from the code to style, the key argument of functions such as `style_text()` and friends. By default, it is created via the `style` argument. The transformers are a named list of transformer functions and other arguments passed to styler. To use the default style guide of styler ([the tidyverse style guide](https://style.tidyverse.org/)), call `tidyverse_style()` to get the list of the transformer functions. Let's quickly look at what those are. ```{r, message = FALSE} library("styler") +cache_deactivate() library("dplyr") names(tidyverse_style()) str(tidyverse_style(), give.attr = FALSE, list.len = 3) ``` -We note that there are different types of transformer functions. `initialize` -initializes some variables in the nested parse table (so it is not actually a -transformer), and the other elements modify either spacing, line breaks or -tokens. `use_raw_indention` is not a function, it is just an option. All -transformer functions have a similar structure. Let's take a look at one: +We note that there are different types of transformer functions. `initialize` initializes some variables in the nested parse table (so it is not actually a transformer), and the other elements modify either spacing, line breaks or tokens. `use_raw_indention` is not a function, it is just an option. All transformer functions have a similar structure. Let's take a look at one: ```{r} tidyverse_style()$space$remove_space_after_opening_paren ``` -As the name says, this function removes spaces after the opening parenthesis. -But how? Its input is a *nest*. Since the visitor will go through all levels of -nesting, we just need a function that can be applied to a *nest*, that is, to a -parse table at one level of nesting. We can compute the nested parse table and -look at one of the levels of nesting that is interesting for us (more on the -data structure in the vignettes "Data structures" and "Manipulating the parse -table"): +As the name says, this function removes spaces after the opening parenthesis. But how? Its input is a *nest*. Since the visitor will go through all levels of nesting, we just need a function that can be applied to a *nest*, that is, to a parse table at one level of nesting. We can compute the nested parse table and look at one of the levels of nesting that is interesting for us (more on the data structure in the vignettes "Data structures" and "Manipulating the parse table"): ```{r} string_to_format <- "call( 3)" pd <- styler:::compute_parse_data_nested(string_to_format) %>% - styler:::pre_visit(c(default_style_guide_attributes)) + styler:::pre_visit_one(default_style_guide_attributes) pd$child[[1]] %>% select(token, terminal, text, newlines, spaces) ``` -`default_style_guide_attributes()` is called to initialize some variables, it does not actually -transform the parse table. - -All the function `remove_space_after_opening_paren()` now does is to look for -the opening bracket and set the column `spaces` of the token to zero. Note that -it is very important to check whether there is also a line break following after -that token. If so, `spaces` should not be touched because of the way `spaces` -and `newlines` are defined. `spaces` are the number of spaces after a token and -`newlines`. Hence, if a line break follows, spaces are not EOL spaces, but -rather the spaces directly before the next token. If there was a line break -after the token and the rule did not check for that, indention for the token -following `(` would be removed. This would be unwanted for example if -`use_raw_indention` is set to `TRUE` (which means indention should not be -touched). If we apply the rule to our parse table, we can see that the column -`spaces` changes and is now zero for all tokens: +`default_style_guide_attributes()` is called to initialize some variables, it does not actually transform the parse table. + +All the function `remove_space_after_opening_paren()` now does is to look for the opening bracket and set the column `spaces` of the token to zero. Note that it is very important to check whether there is also a line break following after that token. If so, `spaces` should not be touched because of the way `spaces` and `newlines` are defined. `spaces` are the number of spaces after a token and `newlines`. Hence, if a line break follows, spaces are not EOL spaces, but rather the spaces directly before the next token. If there was a line break after the token and the rule did not check for that, indention for the token following `(` would be removed. This would be unwanted for example if `use_raw_indention` is set to `TRUE` (which means indention should not be touched). If we apply the rule to our parse table, we can see that the column `spaces` changes and is now zero for all tokens: ```{r} styler:::remove_space_after_opening_paren(pd$child[[1]]) %>% select(token, terminal, text, newlines, spaces) ``` -All top-level styling functions have a `style` argument (which defaults -to `tidyverse_style`). If you check out the help file, you can see that the -argument `style` is only used to create the default `transformers` argument, -which defaults to `style(...)`. This allows for the styling options to be -set without having to specify them inside the function passed to `transformers`. +All top-level styling functions have a `style` argument (which defaults to `tidyverse_style`). If you check out the help file, you can see that the argument `style` is only used to create the default `transformers` argument, which defaults to `style(...)`. This allows for the styling options to be set without having to specify them inside the function passed to `transformers`. Let's clarify this with an example. The following yields the same result: @@ -115,90 +75,66 @@ all.equal( ) ``` -Now let's do the whole styling of a string with just this one transformer -introduced above. We do this by first creating a style guide with the designated -wrapper function `create_style_guide()`. It takes transformer functions as input -and returns them in a named list that meets the formal requirements for styling -functions. +Now let's do the whole styling of a string with just this one transformer introduced above. We do this by first creating a style guide with the designated wrapper function `create_style_guide()`. It takes transformer functions as input and returns them in a named list that meets the formal requirements for styling functions. We also set a name and version of the style guide according to the convention outlined in `create_style_guide()`. ```{r} space_after_opening_style <- function(are_you_sure) { - create_style_guide(space = if (are_you_sure) styler:::remove_space_after_opening_paren) + create_style_guide( + space = list(remove_space_after_opening_paren = + if (are_you_sure) styler:::remove_space_after_opening_paren), + style_guide_name = "styler::space_after_opening_style@https://github.com/r-lib/styler", + style_guide_version = read.dcf(here::here("DESCRIPTION"))[, "Version"] + ) } -style_text("call( 1,1)", style = space_after_opening_style, are_you_sure = FALSE) ``` -Well, we probably want: +Make sure to also disable caching during development with `cache_deactivate()` because styling the same text with a different style guide that has the same version and name will fool the cache invalidation in the case your style guide has transformer functions with different function bodies. Make sure to increment the version number of your style guide with every release. It should correspond to the version of the package from which you export your style guide. + +We can not try the style guide: ```{r} style_text("call( 1,1)", style = space_after_opening_style, are_you_sure = TRUE) ``` -Note that the return value of your `style` function may not contain `NULL` -elements. +Note that the return value of your `style` function may not contain `NULL` elements. -I hope you have acquired a basic understanding of how styler transforms code. -You can provide your own transformer functions and use `create_style_guide()` -to create customized code styling. If you do so, there are a few more things you -should be aware of, which are described in the next section. +I hope you have acquired a basic understanding of how styler transforms code. You can provide your own transformer functions and use `create_style_guide()` to create customized code styling. If you do so, there are a few more things you should be aware of, which are described in the next section. # Implementation details -For both spaces and line break information in the nested parse table, we use -four attributes in total: `newlines`, `lag_newlines`, `spaces`, and -`lag_spaces`. `lag_spaces` is created from `spaces` only just before the parse -table is serialized, so it is not relevant for manipulating the parse table as -described above. These columns are to some degree redundant, but with just lag -or lead, we would lose information on the first or the last element -respectively, so we need both. - -The sequence in which styler applies rules on each level of nesting is given in -the list below: - -* call `default_style_guide_attributes()` to initialize some variables. -* modify the line breaks (modifying `lag_newlines` only based on - `token`, `token_before`, `token_after` and `text`). -* modify the spaces (modifying `spaces` only based on `lag_newlines`, - `newlines`, `multi_line`, `token`, `token_before`, `token_after` and `text`). -* modify the tokens (based on `newlines` `lag_newlines`, `spaces` `multi_line`, - `token`, `token_before`, `token_after` and `text`). -* modify the indention by changing `indention_ref_id` (based on `newlines` - `lag_newlines`, `spaces` `multi_line`, `token`, `token_before`, `token_after` - and `text`). - -You can also look this up in the function that applies the transformers: -`apply_transformers()`: +For both spaces and line break information in the nested parse table, we use four attributes in total: `newlines`, `lag_newlines`, `spaces`, and `lag_spaces`. `lag_spaces` is created from `spaces` only just before the parse table is serialized, so it is not relevant for manipulating the parse table as described above. These columns are to some degree redundant, but with just lag or lead, we would lose information on the first or the last element respectively, so we need both. + +The sequence in which styler applies rules on each level of nesting is given in the list below: + +- call `default_style_guide_attributes()` to initialize some variables. + +- modify the line breaks (modifying `lag_newlines` only based on `token`, `token_before`, `token_after` and `text`). + +- modify the spaces (modifying `spaces` only based on `lag_newlines`, `newlines`, `multi_line`, `token`, `token_before`, `token_after` and `text`). + +- modify the tokens (based on `newlines` `lag_newlines`, `spaces` `multi_line`, `token`, `token_before`, `token_after` and `text`). + +- modify the indention by changing `indention_ref_id` (based on `newlines` `lag_newlines`, `spaces` `multi_line`, `token`, `token_before`, `token_after` and `text`). + +You can also look this up in the function that applies the transformers: `apply_transformers()`: ```{r} styler:::apply_transformers ``` -This means that the order of the styling is clearly defined and it is for -example not possible to modify line breaks based on spacing, because spacing -will be set after line breaks are set. Do not rely on the column `col1`, -`col2`, `line1` and `line2` in the parse table in any of your functions since -these columns only reflect the position of tokens at the point of parsing, +This means that the order of the styling is clearly defined and it is for example not possible to modify line breaks based on spacing, because spacing will be set after line breaks are set. Do not rely on the column `col1`, `col2`, `line1` and `line2` in the parse table in any of your functions since these columns only reflect the position of tokens at the point of parsing, + i.e. they are not kept up to date throughout the process of styling. -Also, as indicated above, work with `lag_newlines` only in your line break -rules. For development purposes, you may also want to use the unexported -function `test_collection()` to help you with testing your style guide. You can -find more information in the help file for the function. +Also, as indicated above, work with `lag_newlines` only in your line break rules. For development purposes, you may also want to use the unexported function `test_collection()` to help you with testing your style guide. You can find more information in the help file for the function. -If you write functions that modify spaces, don't forget to make sure that you -don't modify EOL spacing, since that is needed for `use_raw_indention`, as -highlighted previously. +If you write functions that modify spaces, don't forget to make sure that you don't modify EOL spacing, since that is needed for `use_raw_indention`, as highlighted previously. -Finally, take note of the naming convention. All function names starting with -`set-*` correspond to the `strict` option, that is, setting some value to an -exact number. `add-*` is softer. For example, `add_spaces_around_op()`, only -makes sure that there is at least one space around operators, but if the -code to style contains multiple, the transformer will not change that. +Finally, take note of the naming convention. All function names starting with `set-*` correspond to the `strict` option, that is, setting some value to an exact number. `add-*` is softer. For example, `add_spaces_around_op()`, only makes sure that there is at least one space around operators, but if the code to style contains multiple, the transformer will not change that. # Showcasing the development of a styling rule -For illustrative purposes, we create a new style guide that has one rule only: -Curly braces are always on a new line. So for example: +For illustrative purposes, we create a new style guide that has one rule only: Curly braces are always on a new line. So for example: ```{r} add_one <- function(x) { @@ -215,9 +151,7 @@ add_one <- function(x) } ``` -We first need to get familiar with the structure of the nested parse table. -Note that the structure of the nested parse table is not affected by the -position of line breaks and spaces. Let's first create the nested parse table. +We first need to get familiar with the structure of the nested parse table. Note that the structure of the nested parse table is not affected by the position of line breaks and spaces. Let's first create the nested parse table. ```{r} code <- c("add_one <- function(x) { x + 1 }") @@ -227,40 +161,38 @@ code <- c("add_one <- function(x) { x + 1 }") styler:::create_tree(code) ``` - ## levelName - ## 1 ROOT (token: short_text [lag_newlines/spaces] {id}) - ## 2 °--expr: [0/0] {23} - ## 3 ¦--expr: [0/1] {3} - ## 4 ¦ °--SYMBOL: add_o [0/0] {1} - ## 5 ¦--LEFT_ASSIGN: <- [0/1] {2} - ## 6 °--expr: [0/0] {22} - ## 7 ¦--FUNCTION: funct [0/0] {4} - ## 8 ¦--'(': ( [0/0] {5} - ## 9 ¦--SYMBOL_FORMALS: x [0/0] {6} - ## 10 ¦--')': ) [0/1] {7} - ## 11 °--expr: [0/0] {19} - ## 12 ¦--'{': { [0/1] {9} - ## 13 ¦--expr: [0/1] {16} - ## 14 ¦ ¦--expr: [0/1] {12} - ## 15 ¦ ¦ °--SYMBOL: x [0/0] {10} - ## 16 ¦ ¦--'+': + [0/1] {11} - ## 17 ¦ °--expr: [0/0] {14} - ## 18 ¦ °--NUM_CONST: 1 [0/0] {13} - ## 19 °--'}': } [0/0] {15} + ## levelName + ## 1 ROOT (token: short_text [lag_newlines/spaces] {id}) + ## 2 °--expr: [0/0] {23} + ## 3 ¦--expr: [0/1] {3} + ## 4 ¦ °--SYMBOL: add_o [0/0] {1} + ## 5 ¦--LEFT_ASSIGN: <- [0/1] {2} + ## 6 °--expr: [0/0] {22} + ## 7 ¦--FUNCTION: funct [0/0] {4} + ## 8 ¦--'(': ( [0/0] {5} + ## 9 ¦--SYMBOL_FORMALS: x [0/0] {6} + ## 10 ¦--')': ) [0/1] {7} + ## 11 °--expr: [0/0] {19} + ## 12 ¦--'{': { [0/1] {9} + ## 13 ¦--expr: [0/1] {16} + ## 14 ¦ ¦--expr: [0/1] {12} + ## 15 ¦ ¦ °--SYMBOL: x [0/0] {10} + ## 16 ¦ ¦--'+': + [0/1] {11} + ## 17 ¦ °--expr: [0/0] {14} + ## 18 ¦ °--NUM_CONST: 1 [0/0] {13} + ## 19 °--'}': } [0/0] {15} + ```{r} pd <- styler:::compute_parse_data_nested(code) ``` -The token of interest here has id number 10. Let's navigate there. Since -line break rules manipulate the lags *before* the token, we need to change -`lag_newlines` at the token "'{'". +The token of interest here has id number 10. Let's navigate there. Since line break rules manipulate the lags *before* the token, we need to change `lag_newlines` at the token "'{'". ```{r} pd$child[[1]]$child[[3]]$child[[5]] ``` -Remember what we said above: A transformer takes a flat parse table as input, -updates it and returns it. So here it's actually simple: +Remember what we said above: A transformer takes a flat parse table as input, updates it and returns it. So here it's actually simple: ```{r} set_line_break_before_curly_opening <- function(pd_flat) { @@ -270,12 +202,15 @@ set_line_break_before_curly_opening <- function(pd_flat) { } ``` -Almost done. Now, the last thing we need to do is to use `create_style_guide()` -to create our style guide consisting of that function. +Almost done. Now, the last thing we need to do is to use `create_style_guide()` to create our style guide consisting of that function. ```{r} set_line_break_before_curly_opening_style <- function() { - create_style_guide(line_break = set_line_break_before_curly_opening) + create_style_guide( + line_break = list(set_line_break_before_curly_opening), + style_guide_name = "styler::set_line_break_before_curly_opening_style@https://github.com/r-lib/styler", + style_guide_version = read.dcf(here::here("DESCRIPTION"))[, "Version"] + ) } ``` @@ -285,8 +220,7 @@ Now you can style your string according to it. style_text(code, style = set_line_break_before_curly_opening_style) ``` -Note that when removing line breaks, always take care of comments, since you -don't want: +Note that when removing line breaks, always take care of comments, since you don't want: ```{r, eval = FALSE} a <- function() # comments should remain EOL @@ -303,14 +237,14 @@ a <- function() # comments should remain EOL { } ``` -The easiest way of taking care of that is not applying the rule if there is a -comment before the token of interest, which can be checked for within your -transformer function. The transformer function from the tidyverse style that -removes line breaks before the curly opening bracket looks as follows: +The easiest way of taking care of that is not applying the rule if there is a comment before the token of interest, which can be checked for within your transformer function. The transformer function from the tidyverse style that removes line breaks before the round closing bracket that comes after a curly brace looks as follows: ```{r} -styler:::remove_line_break_before_curly_opening +styler:::remove_line_break_before_round_closing_after_curly ``` -With our example function `set_line_break_before_curly_opening()` we don't need -to worry about that as we are only adding line breaks, but we don't remove them. +With our example function `set_line_break_before_curly_opening()` we don't need to worry about that as we are only adding line breaks, but we don't remove them. + +# Cache invalidation + +Note that it if you re-distribute the style guide, it's your responsibility to set the version and the style guide name in `create_style_guide()` correctly. If you distribute a new version of your style guide and you don't increment the version number, it might have drastic consequences for your user: Under some circumstances (see `help("cache_make_key")`), your new style guide won't invalidate the cache although it should and applying your style guide to code that has previously been styled won't result in any change. There is currently no mechanism in styler that prevents you from making this mistake. diff --git a/vignettes/detect-alignment.Rmd b/vignettes/detect-alignment.Rmd new file mode 100644 index 000000000..b3872d7f5 --- /dev/null +++ b/vignettes/detect-alignment.Rmd @@ -0,0 +1,168 @@ +--- +title: "Alignment detection" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Alignment detection} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +```{r, include=FALSE} +knitr::opts_chunk$set( + eval = FALSE, + collapse = TRUE, + comment = "#>" +) +styler::cache_deactivate() +``` + +# Overview + +Sometimes, you deliberately align code to make it more readable. + +```{r} +call( + a = 3, + bre = 3213232 +) +``` + +Until styler 1.1.1.9002 (with `strict = TRUE`, e.g. as in `styler::style_file(..., strict = TRUE)`), this was formatted as follows: + +```{r} +call( + a = 3, + bre = 3213232 +) +``` + +because no alignment detection was built in.[^1] + +[^1]: With `strict = FALSE`, the spacing would have been kept, however, `strict = FALSE` has a number of other implications because it is in general less invasive. For example, it would not add braces and line breaks to "if (TRUE) return()". + +styler \>= 1.1.1.9003 detects aforementioned alignment for function calls. This vignette describes how aligned code is defined in styler and gives some examples so users can format their aligned code to match the definition styler uses to ensure their code is not unintentionally reformatted. + +## Examples + +These typical examples match *styler*'s definition of alignment. Note the spacing around operators and commas. + +```{r} +tibble::tribble( + ~key_here, ~right_aligned, + "left", "right", # comments are allowed + "long string", "shrt" # columns can overlap ('~' above ',') +) + +tibble::tribble( + ~key_here, ~left_aligned, + "left", "right", # comments are allowed + "long string", "shrt" # columns can overlap ('~' above ',') +) + +# right-aligned after = +purrr::map(x, fun, # arguments on same line as opening brace are not considered + arg2 = 2, + ar = f(k, x) +) + +# left aligned after = +purrr::map(x, fun, # arguments on same line as opening brace are not considered + arg2 = 2, + ar = f(k, x) +) +``` + +# Details + +An important definition used in the remainder is the one of a **column**. All arguments of a function call that have the same position but are placed on different lines form a column. The below call shows a call with two columns and two rows. Columns separate arguments of the function call, so the separator is the comma. The first row is named because all arguments are named, the second is unnamed: + +```{r} +call( + # column 1 | column 2 | + abkj = f(2), 7, # | row 1 + more_ = "a", 2 # | row 2 +) +``` + +**For alignment detection, the first column is omitted if not all arguments in that column are named** + +## Function calls + +Below, we try to explain in an intuitive way how your code should look like to be recognized as aligned. + +Make commas match position vertically and align everything right before commas: + +```{r} +# all arguments of first column named -> must right align values after `=`, +# one or more spaces around `=`, none before and at least one after the comma. +# aligned if the (imaginary) comma on the last line is in line with the commas +fell( + x = 1, + y = 23, + zz = NULL +) + +# this works also with more than one column +fell( + x = 1, annoying = 3, + y = 23, # nothing in column 2 for row 2 + zz = NULL, finally = "stuff" +) + +# or if not all arguments of the first column are named +gell( + p = 2, g = gg(x), n = 3 * 3, # + 31, fds = -1, gz = f / 3, +) +``` + +... or match position of `=` vertically and align everything after this operator left + +```{r} +# all arguments of first column named -> must left align values after `=`, +# at least one space before `=`, exactly one after, none before and at least one +# after the comma. +# aligned if the first values after `=` are aligned (and exactly one space after +# `=`) +fell( + x = 1, + y = 23, + zz = NULL +) + +# this works also with more than one column +fell( + x = 1, annoying = 3, + y = 23, # nothing in column 2 for row 2 + zz = NULL, finally = "stuff" +) + +# or if not all arguments of the first column are named +gell( + p = 2, g = gg(x), n = 3 * 3, # + 31, fds = -1, gz = f / 3 + 1, +) +``` + +... or match the start of the token after `,` + +```{r} +call( + x = 2, p = "another", + y = "hhjkjkbew", x = 3 +) + +tibble::tribble( + ~x, ~y, + "another", 1:3, + "b", 1211234 +) +``` + +## Comments + +not supported yet. + +## Assignment + +not supported yet. diff --git a/vignettes/distribute_custom_style_guides.Rmd b/vignettes/distribute_custom_style_guides.Rmd new file mode 100644 index 000000000..031a13876 --- /dev/null +++ b/vignettes/distribute_custom_style_guides.Rmd @@ -0,0 +1,59 @@ +--- +title: "Distribute custom style guides" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Distribute custom style guides} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +```{r, include = FALSE} +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>" +) +styler::cache_deactivate() +options(styler.colored_print.vertical = FALSE) +``` + +This vignette describes how you can distribute your own style guide. It builds on `vignette("customizing_styler")` and assumes you understand how to create a style guide with `create_style_guide()`. + +## Reference implementations + +There are a few packages that implement a third-party style guide that are maintained by styler contributors: + +- [lorenzwalthert/styler.nocomments](https://github.com/lorenzwalthert/styler.nocomments) +- [lorenzwalthert/semicoloner](https://github.com/lorenzwalthert/semicoloner) +- [lorenzwalthert/oneliner](https://github.com/lorenzwalthert/oneliner) +- [mlr-org/styler.mlr](https://github.com/mlr-org/styler.mlr) + +Other available style guides include: + +- [ropensci-review-tools/spaceout](https://github.com/ropensci-review-tools/spaceout) +- [gadenbuie/grkstyle](https://github.com/gadenbuie/grkstyle) + +To start out, you can use the [GitHub Template](https://github.com/lorenzwalthert/styler.yours) for third-party style guides that has already the right directory structure and patterns described below in place. + +You made one too? Please submit a PR to include it in the list. + +## Design patterns + +The style guides mentioned above follow best practices and can serve as a good and rather minimal example of how to implement your own style guide. Most importantly, these two packages: + +- export all functions that {styler} exports, but the `style` argument set to the custom style guide, plus custom style guides. The advantage of this is that you can use that namespace as a drop-in replacement for styler everywhere. In particular, if you want to use the tidyverse style guide, use `styler::style_pkg()`, if you want to use a third-party style guide, use the other namespace, e.g. `styler.mlr::style_pkg()` +- depend on {styler} and use {styler} internals via `:::`. These internals are subject to change without prior notice, which is why the packages also have unit tests. Also note that importing internals from another package means your package can't be added to CRAN because packages calling private methods from other packages don't pass CRAN checks. The only way around this would be to export some styler internals, e.g. via a {styler.infra} package, but that would be a lot of work on our side and therefore not currently on the roadmap. Another alternative for developers might be to use , which we have not explored so far. +- implement unit tests following {styler}'s testing convention with `*-in.R` and `*-out.R` files that are checked with `styler:::test_collection()`. + +When creating a custom style guide and distribute it, we want to quickly recall important arguments for `create_style_guide()` from the docs: + +- `style_guide_name`, `style_guide_version` and `more_specs_style_guide`: These arguments are relevant for caching and make sure the user's cache is invalidated on releasing a new version. The documentation specifies how to set these arguments. +- `transformers_drop`: This argument can be created with `specify_transformers_drop()` to define conditions under which a transformer can be removed from the style guide without an effect on the result. This makes styler faster. For example, if you have a transformer that removes the token `;` and replaces it with a line break, it is only required if the code to style contains this token. Since this will hardly be the case for people who adhere to the tidyverse style guide, we formulate such a rule like this + +```{r} +styler::specify_transformers_drop( + spaces = list(style_space_around_tilde = "'~'"), + tokens = list(resolve_semicolon = "';'") +) +``` + +Where the name must correspond to the transformer function in question and the value is the token that must be absent in order to drop the transformer. diff --git a/vignettes/gsoc_proposal/data_structures.Rmd b/vignettes/gsoc_proposal/data_structures.Rmd deleted file mode 100644 index 09f26e093..000000000 --- a/vignettes/gsoc_proposal/data_structures.Rmd +++ /dev/null @@ -1,208 +0,0 @@ ---- -title: "Data Structures" -author: "Lorenz Walthert" -date: "`r Sys.Date()`" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Vignette Title} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -> This vignette is partly outdated since nested structure was implemented -> completely. - -This vignette illustrates how the core of `styler` currently^[at commit `e6ddee0f510d3c9e3e22ef68586068fa5c6bc140`] works, i.e. how -rules are applied to a parse table and how limitations of this approach can be -overcome with a refined approach. - -Status quo - the flat approach ------------------------------- - -Roughly speaking, a string containing code to be formatted is parsed -with `parse` and the output is passed to `getParseData` in order to -obtain a parse table with detailed information about every token. For a -simple example string -"`a <- function(x) { if(x > 1) { 1+1 } else {x} }`" to be formatted, the -parse table on which `styler` performs the manipulations looks similar -to the one presented below. - - library("styler") - library("dplyr") - - code <- "a <- function(x) { if(x > 1) { 1+1 } else {x} }" - - (parse_table <- styler:::compute_parse_data_flat_enhanced(code)) - - ## # A tibble: 24 x 14 - ## line1 col1 line2 col2 token text terminal short newlines - ## - ## 1 1 0 1 0 START NA 0 - ## 2 1 1 1 1 SYMBOL a TRUE a 0 - ## 3 1 3 1 4 LEFT_ASSIGN <- TRUE <- 0 - ## 4 1 6 1 13 FUNCTION function TRUE funct 0 - ## 5 1 14 1 14 '(' ( TRUE ( 0 - ## 6 1 15 1 15 SYMBOL_FORMALS x TRUE x 0 - ## 7 1 16 1 16 ')' ) TRUE ) 0 - ## 8 1 18 1 18 '{' { TRUE { 0 - ## 9 1 20 1 21 IF if TRUE if 0 - ## 10 1 22 1 22 '(' ( TRUE ( 0 - ## # ... with 14 more rows, and 5 more variables: lag_newlines , - ## # spaces , multi_line , indention_ref_id , indent - -The column `spaces` was computed from the columns `col1` and `col2`, -`newlines` was computed from `line1` and `line2` respectively. - -So far, styler can set the spaces around the operators correctly. In our -example, that involves adding spaces around `+`, so in the `spaces` -column, element nine and ten must be set to one. This means that a space -is added after `1` and after `+`. To get the spacing right and cover the -various cases, a set of functions has to be applied to the parse table -subsequently (and in the right order), which is essentially done via -`Reduce()`. After all modifications on the table are completed, -`serialize_parse_data()` collapses the `text` column and adds the number -of spaces and line breaks specified in `spaces` and `newlines` in -between the elements of `text`. If we serialize our table and don't -perform any modification, we obviously just get back what we started -with. - - styler:::serialize_parse_data_flat(parse_table) - - ## [1] "a <- function(x) { if(x > 1) { 1+1 } else {x} }" - -Refining the flat approach - nesting the parse table ----------------------------------------------------- - -Although the flat approach is good place to start, e.g. for fixing -spaces between operators, it has its limitations. In particular, it -treats each token the same way in the sense that it does not account for -the context of the token, i.e. in which sub-expression it appears. To -set the indention correctly, we need a hierarchical view on the parse -data, since all tokens in a sub-expression have the same indention -level. Hence, a natural approach would be to create a nested parse table -instead of a flat parse table and then take a recursion over all -elements in the table, so for each sub(-sub etc.)-expression, a separate -parse table would be created and the modifications would be applied to -this table before putting everything back together. A function to create -a nested parse table already exists in `styler`. Let's have a look at -the top level: - - (l1 <- styler:::compute_parse_data_nested(code)[-1]) - - ## # A tibble: 1 x 13 - ## col1 line2 col2 id parent token terminal text short token_before - ## - ## 1 1 1 47 49 0 expr FALSE - ## # ... with 3 more variables: token_after , internal , - ## # child - -The tibble contains the column `child`, which itself contains a tibble. -If we "enter" the first child, we can see that the expression was split -up further. - - l1$child[[1]] %>% - select(text, terminal, child, token) - - ## # A tibble: 3 x 4 - ## text terminal child token - ## - ## 1 FALSE expr - ## 2 <- TRUE LEFT_ASSIGN - ## 3 FALSE expr - -And further... - - l1$child[[1]]$child[[3]]$child[[5]] - - ## # A tibble: 3 x 14 - ## line1 col1 line2 col2 id parent token terminal text short - ## - ## 1 1 18 1 18 9 45 '{' TRUE { { - ## 2 1 20 1 45 42 45 expr FALSE - ## 3 1 47 1 47 40 45 '}' TRUE } } - ## # ... with 4 more variables: token_before , token_after , - ## # internal , child - -... and so on. Every child that is not a terminal contains another -tibble where the sub-expression is split up further - until we are left -with tibbles that only contain terminals. - -Recall the above example. -`a <- function(x) { if(x > 1) { 1+1 } else {x} }`. In the last printed -parse table, we can see that see that the whole if condition is a -sub-expression of `code`, surrounded by two curly brackets. Hence, one -would like to set the indention level for this sub-expression before -doing anything with it in more detail. Later, when we progressed deeper -into the nested table, we hit a similar pattern: - - l1$child[[1]]$child[[3]]$child[[5]]$child[[2]]$child[[5]] - - ## # A tibble: 3 x 14 - ## line1 col1 line2 col2 id parent token terminal text short - ## - ## 1 1 30 1 30 20 30 '{' TRUE { { - ## 2 1 32 1 34 27 30 expr FALSE - ## 3 1 36 1 36 26 30 '}' TRUE } } - ## # ... with 4 more variables: token_before , token_after , - ## # internal , child - -Again, we have two curly brackets and an expression inside. We would -like to set the indention level for the expression `1+1` in the same way -as for the whole if condition. - -The simple example above makes it evident that a recursive approach to -this problem would be the most natural. - -The code for a function that kind of sketches the idea and illustrates -such a recursion is given below. - -It takes a nested parse table as input and then does the recursion over -all children. If the child is a terminal, it returns the text, -otherwise, it "enters" the child to find the terminals inside of the -child and returns them. - - serialize <- function(x) { - out <- Map( - function(terminal, text, child) { - if (terminal) - text - else - serialize(child) - }, - x$terminal, x$text, x$child - ) - out - } - - x <- styler:::compute_parse_data_nested(code) - serialize(x) %>% unlist - - ## [1] "a" "<-" "function" "(" "x" ")" - ## [7] "{" "if" "(" "x" ">" "1" - ## [13] ")" "{" "1" "+" "1" "}" - ## [19] "else" "{" "x" "}" "}" - -How to exactly implement a similar recursion to not just return each -text token separately, but the styled text as one string (or one string -per line) is subject to future work, so would be the functions to be -applied to a sub-expression parse table that create correct indention. -Similar to `compute_parse_data_flat_enhanced`, the column `spaces` and -`newlines` would be required to be computed by -`compute_parse_data_nested` as well as a new column `indention`. - -Final Remarks -------------- - -Although a flat structure would possibly also allow us to solve the -problem of indention, it is a less elegant and flexible solution to the -problem. It would involve looking for an opening curly bracket in the -parse table, set the indention level for all subsequent rows in the -parse table until the next opening or closing curly bracket is hit and -then intending one level further or setting indention back to where it -was at the beginning of the table. - -Note that the vignette just addressed the question of indention caused -by curly brackets and has not dealt with other operators that would -trigger indention, such as `(` or `+`. - -[1] at commit `e6ddee0f510d3c9e3e22ef68586068fa5c6bc140` diff --git a/vignettes/gsoc_proposal/manipulating_nested_parse_data.Rmd b/vignettes/gsoc_proposal/manipulating_nested_parse_data.Rmd deleted file mode 100644 index 2b0cebd4b..000000000 --- a/vignettes/gsoc_proposal/manipulating_nested_parse_data.Rmd +++ /dev/null @@ -1,271 +0,0 @@ ---- -title: "Manipulating the nested parse table" -author: "Lorenz Walthert" -date: "`r Sys.Date()`" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Manipulating the nested parse table} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- -> This vignette is partly outdated since nested structure was implemented -> completely. In particular, the serialization is now done differently. - - library("dplyr") - library("purrr") - pkgload::load_all() - - -This vignette builds on the vignette "Data Structures" and discusses how -to go forward with the nested structure of the parse data. -In order to compute the white space information in a nested data structure, we -use a [visitor approach](https://en.wikipedia.org/wiki/Visitor_pattern) to -separate the algorithm (computing white space information and later apply -transformations) from the object (nested data structure). -The function `create_filler()` (name depreciated, now called -`default_style_guide_attributes()`) can then be used to compute current -white space information on every level of nesting within the nested parse data -if applied in combination with the visitor. In the sequel, a parse table at -one level of nesting will be denoted with the term *nest*, which always -represents a complete expression. Our visiting functions `pre_visit()` and -`post_visit()` take an object to -operate on and a list of functions. Concretely, the object is the -nested parse table. Each function is applied at each level of -nesting nesting before the next level of nesting is entered. You can find out -more about the visitor on the help file for `visit` (note that this function -is not exported by styler). - pre_visit - - ## function(pd_nested, funs) { - ## if (is.null(pd_nested)) return() - ## pd_transformed <- visit_one(pd_nested, funs) - ## - ## pd_transformed$child <- map(pd_transformed$child, pre_visit, funs = funs) - ## pd_transformed - ## } - ## - - visit_one - - ## function(pd_flat, funs) { - ## reduce(funs, function(x, fun) fun(x), - ## .init = pd_flat) - ## } - ## -This comes with two advantages. - - -* We go through the whole structure only as many times as we call the visitor - (instead of every *_nested() function going through it once, which is more - efficient in terms of speed. - - -- We don't need a \*\_nested() version of every function we want to - apply to the parse tables, in particular the rules in R/rules.R -- We go through the whole structure only as many times as we call the visitor - (instead of every - \*\_nested() function going through it once), which is more efficient - in terms of speed. - -`create_filler()` was adapted to also initialize indention and -lag\_newlines. - - create_filler - - ## function(pd_flat) { - ## - ## pd_flat$line3 <- lead(pd_flat$line1, default = tail(pd_flat$line2, 1)) - ## pd_flat$col3 <- lead(pd_flat$col1, default = tail(pd_flat$col2, 1) + 1L) - ## pd_flat$newlines <- pd_flat$line3 - pd_flat$line2 - ## pd_flat$lag_newlines <- lag(pd_flat$newlines, default = 0L) - ## pd_flat$col2_nl <- if_else(pd_flat$newlines > 0L, 0L, pd_flat$col2) - ## pd_flat$spaces <- pd_flat$col3 - pd_flat$col2_nl - 1L - ## pd_flat$multi_line <- ifelse(pd_flat$terminal, FALSE, NA) - ## pd_flat$indention_ref_id <- NA - ## ret <- pd_flat[, !(names(pd_flat) %in% c("line3", "col3", "col2_nl"))] - ## - ## - ## if (!("indent" %in% names(ret))) { - ## ret$indent <- 0 - ## } - ## - ## if (any(ret$spaces < 0L)) { - ## stop("Invalid parse data") - ## } - ## - ## ret - ## } - ## - - code <- "a <- function(x) { if(x > 1) { 1+1 } else {x} }" - pd_nested <- compute_parse_data_nested(code) - pd_nested_enhanced <- pre_visit(pd_nested, c(create_filler)) - pd_nested_enhanced - - ## # A tibble: 1 x 20 - ## line1 col1 line2 col2 id parent token terminal text short - ## - ## 1 1 1 1 47 49 0 expr FALSE - ## # ... with 10 more variables: token_before , token_after , - ## # internal , child , newlines , lag_newlines , - ## # spaces , multi_line , indention_ref_id , indent - -As a next step, we need to find a way to serialize the nested tibble, or -in other words, to transform it to its character vector representation. -As a starting point, consider the function `serialize` that was -introduced in the vignette "Data Structures". - - serialize <- function(x) { - out <- Map( - function(terminal, text, child) { - if (terminal) - text - else - serialize(child) - }, - x$terminal, x$text, x$child - ) - out - } - - serialize(pd_nested) %>% unlist - - ## [1] "a" "<-" "function" "(" "x" ")" - ## [7] "{" "if" "(" "x" ">" "1" - ## [13] ")" "{" "1" "+" "1" "}" - ## [19] "else" "{" "x" "}" "}" - -`serialize` can be combined with `serialize_parse_data_flat`. The latter -pastes together the column "text" of a flat parse table by taking into -account space and line break information, splits the string by line -break and returns it. - - serialize_parse_data_flat - - ## function(pd_flat) { - ## pd_flat %>% - ## summarize_( - ## text_ws = ~paste0( - ## text, newlines_and_spaces(newlines, spaces), - ## collapse = "")) %>% - ## .[["text_ws"]] %>% - ## strsplit("\n", fixed = TRUE) %>% - ## .[[1L]] - ## } - ## - -However, things get a bit more complicated, mainly because line break -and white space information is not only contained in the terminal -tibbles of the nested parse data, but even before, as the following -example shows. - - pd_nested_enhanced$child[[1]] - - ## # A tibble: 3 x 20 - ## line1 col1 line2 col2 id parent token terminal text short - ## - ## 1 1 1 1 1 3 49 expr FALSE - ## 2 1 3 1 4 2 49 LEFT_ASSIGN TRUE <- <- - ## 3 1 6 1 47 48 49 expr FALSE - ## # ... with 10 more variables: token_before , token_after , - ## # internal , child , newlines , lag_newlines , - ## # spaces , multi_line , indention_ref_id , indent - - pd_nested_enhanced$child[[1]]$child[[1]] - - ## # A tibble: 1 x 20 - ## line1 col1 line2 col2 id parent token terminal text short - ## - ## 1 1 1 1 1 1 3 SYMBOL TRUE a a - ## # ... with 10 more variables: token_before , token_after , - ## # child , internal , newlines , lag_newlines , - ## # spaces , multi_line , indention_ref_id , indent - -After "a" in `code`, there is a space, but this information is not -contained in the tibble where we find the terminal "a". In general, we -must add newlines and spaces values *after* we computed character vector -representation of the expression. In our example: we know that there is -a space after the non-terminal "a" by looking at -`pd_nested_enhanced$child[[1]]`. Therefore, we need to add this space to -the very last terminal within `pd_nested_enhanced$child[[1]]` before we -collapse everything together. - - serialize_parse_data_nested_helper - - ## function(pd_nested, pass_indent) { - ## out <- pmap(list(pd_nested$terminal, pd_nested$text, pd_nested$child, - ## pd_nested$spaces, pd_nested$lag_newlines, pd_nested$indent), - ## function(terminal, text, child, spaces, lag_newlines, indent) { - ## total_indent <- pass_indent + indent - ## preceding_linebreak <- if_else(lag_newlines > 0, 1, 0) - ## if (terminal) { - ## c(add_newlines(lag_newlines), - ## add_spaces(total_indent * preceding_linebreak), - ## text, - ## add_spaces(spaces)) - ## } else { - ## c(add_newlines(lag_newlines), - ## add_spaces(total_indent * preceding_linebreak), - ## serialize_parse_data_nested_helper(child, total_indent), - ## add_spaces(spaces)) - ## } - ## } - ## ) - ## out - ## } - ## - - serialize_parse_data_nested - - ## function(pd_nested) { - ## out <- c(add_newlines(start_on_line(pd_nested) - 1), - ## serialize_parse_data_nested_helper(pd_nested, pass_indent = 0)) %>% - ## unlist() %>% - ## paste0(collapse = "") %>% - ## strsplit("\n", fixed = TRUE) %>% - ## .[[1L]] %>% - ## trimws(which = "right") - ## out - ## } - ## - -Before we are done, we need to add information regarding indention to -the parse table. We can add indention after every line break that comes -after a round bracket with `indent_round()`. And then serialize it. - - pre_visit(pd_nested, - c(create_filler, - purrr::partial(indent_round, indent_by = 2))) - - ## # A tibble: 1 x 20 - ## line1 col1 line2 col2 id parent token terminal text short - ## - ## 1 1 1 1 47 49 0 expr FALSE - ## # ... with 10 more variables: token_before , token_after , - ## # internal , child , newlines , lag_newlines , - ## # spaces , multi_line , indention_ref_id , indent - -We can see how indention works with a more complicated example - - indented <- c( - "call(", - " 1,", - " call2(", - " 2, 3,", - " call3(1, 2, 22),", - " 5", - " ),", - " 144", - ")" - ) - - not_indented <- trimws(indented) - back_and_forth <- not_indented %>% - compute_parse_data_nested() %>% - pre_visit(c(create_filler, - purrr::partial(indent_round, indent_by = 2))) %>% - serialize_parse_data_nested() - - identical(indented, back_and_forth) - - ## [1] TRUE diff --git a/vignettes/introducing_styler.Rmd b/vignettes/introducing_styler.Rmd deleted file mode 100644 index 3069001bf..000000000 --- a/vignettes/introducing_styler.Rmd +++ /dev/null @@ -1,205 +0,0 @@ ---- -title: "An introduction to styler" -author: "Lorenz Walthert" -date: "`r Sys.Date()`" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{An introduction to styler} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -This vignette introduces the basic functionality of styler and showcases -how styler applies a few rules of the -[tidyverse style guide](http://style.tidyverse.org/index.html) to example code. -Note that you can create your own style guide and customize styler even further, -as described in the vignette "Customizing styler". - -```{r, echo = FALSE} -knitr::opts_chunk$set(echo = TRUE, comment = "") -knitr::knit_engines$set(list( - styler = function(options) { - options$comment <- "" - knitr::engine_output( - options, - - c("# Before", options$code), - c("# After", styler::style_text(options$code)) - ) - } -)) - -``` - -It's possible to use different levels of 'invasiveness', as described in the -help file for the only style guide implemented so far, which is the -[tidyverse style guide](http://style.tidyverse.org/index.html). The style guide -in use is passed to the styling function (i.e `style_text()` and friends) via -the `style` argument, which defaults to `tidyverse_style`. In addition to this -argument, there are further customization options. For example, we can limit -ourselves to styling just spacing information by indicating this with the -`scope` argument: - -```{r} -library("styler") -library("magrittr") -style_text("a=3; 2", scope = "spaces") -``` - -Or, on the other extreme of the scale, styling spaces, indention, line breaks -and tokens (which is the default): - -```{r} -style_text("a=3; 2", scope = "tokens") -``` - - -`scope` always includes less-invasive styling than the option chosen, -e.g. `spaces = "line_breaks"` includes styling spaces and indention in addition -to line breaks. - - -We can also choose to style line breaks but not tokens: -```{r} -style_text("if(x) {66 } else {a=3}", scope = "line_breaks") -``` - -Note that `scope = "spaces"` does not touch indention -```{r} -code <- c( - "a <- function() { ", - " a=3", - "}" -) - -style_text(code, scope = "spaces") -``` - -But `scope = "indention"` - as the name says - does. -```{r} -style_text(code, scope = "indention") -``` - - -Another option that is helpful to determine the level of 'invasiveness' is -`strict`. If set to `TRUE`, spaces and line breaks before or after tokens are -set to either zero or one. However, in some situations this might be undesirable, -as the following example shows: - -```{r} -style_text( - "data_frame( - small = 2 , - medium = 4,#comment without space - large = 6 - )", strict = FALSE -) -``` - -We prefer to keep the equal sign after "small", "medium" and large aligned, -so we set `strict = FALSE` to set spacing to *at least* one around `=`. - -Also, spaces before comments are preserved with that option. - -```{r} -style_text( - "a <- 'one' #just one - abc <- 'three' # three", - strict = FALSE -) -``` - - -Though simple, hopefully the above examples convey some of the flexibility of -the configuration options available in `styler`. Let us for now focus on a -configuration with `strict = TRUE` and `scope = "tokens"` and illustrate a few -more examples of code before and after styling. - -`styler` can identify and handle unary operators and other math tokens: - -```{styler} -1++1-1-1/2 -``` - -This is tidyverse style. However, styler offers very granular control for -math token spacing. Assuming you like spacing around `+` and `-`, but not -around `/` and `*` and `^`, do the following: -```{r} -style_text( - "1++1/2*2^2", - math_token_spacing = specify_math_token_spacing(zero = c("'/'", "'*'", "'^'")) -) -``` - -It can also format complicated expressions that involve line breaking and -indention based on both brace expressions and operators: - -```{styler} -if (x >3) {stop("this is an error")} else { -c(there_are_fairly_long, -1 / 33 * -2 * long_long_variable_names)%>% k( - -) } -``` - -Lines are broken after `(` if a function call spans multiple lines: - -```{styler} -do_a_long_and_complicated_fun_cal("which", has, way, to, - "and longer then lorem ipsum in its full length" - ) -``` - -`styler` replaces `=` with `<-` for assignment: -```{styler} -one = "one string" -``` - -It converts single quotes within strings if necessary: -```{styler} -one <- 'one string' -two <- "one string in a 'string'" -``` - -And adds braces to function calls in pipes: - -```{styler} -a %>% - b %>% - c -``` - -Function declarations are indented if multi-line: - -```{styler} -my_fun <- function(x, -y, -z) { - just(z) -} -``` - -`styler` can also deal with tidyeval syntax: - -```{styler} -mtcars %>% - group_by( !!my_vars ) -``` - -If you, say, don't want comments starting with `###` to be indented, you can -formulate an unindention rule: -```{r} -style_text( - c( - "a <- function() {", - "### not to be indented", - "# indent normally", - "33", - "}" - ), - reindention = specify_reindention(regex_pattern = "###", indention = 0) - -) -``` - diff --git a/vignettes/performance_improvements.Rmd b/vignettes/performance_improvements.Rmd deleted file mode 100644 index d3e0a3077..000000000 --- a/vignettes/performance_improvements.Rmd +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: "Performance Improvements" -author: "Lorenz Walthert" -date: "7/24/2017" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Performance Improvements} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} - ---- - -We want to make styler faster. -```{r, echo = FALSE} -knitr::opts_chunk$set(eval = FALSE) -``` - - -```{r} -library(styler) -microbenchmark::microbenchmark( - base = style_file("tests/testthat/indention_multiple/overall-in.R"), - times = 2 -) -#> Unit: seconds -#> expr min lq mean median uq max neval -#> base 4.131253 4.131253 4.172017 4.172017 4.212781 4.212781 2 -``` - -Replacing mutate statments. -```{r} -microbenchmark::microbenchmark( - base = style_file("tests/testthat/indention_multiple/overall-in.R"), - times = 2 -) -#> Unit: seconds -#> expr min lq mean median uq max neval -#> base 2.13616 2.13616 2.223659 2.223659 2.311158 2.311158 2 -``` - -Move `opening` argument out of needs indention. -```{r} -microbenchmark::microbenchmark( - base = style_file("tests/testthat/indention_multiple/overall-in.R"), - times = 5 -) - -#> Unit: seconds -#> expr min lq mean median uq max neval -#> base 2.18097 2.184721 2.225294 2.200893 2.241799 2.318089 5 -``` - -Dropping unnecessary select and arrange stuffstatments -```{r} -microbenchmark::microbenchmark( - base = style_file("tests/testthat/indention_multiple/overall-in.R"), - times = 5 -) -#> Unit: seconds -#> expr min lq mean median uq max neval -#> base 2.109271 2.134377 2.147821 2.158567 2.165384 2.171505 5 -``` - - -Some more stuff (early return, purr) -```{r} -microbenchmark::microbenchmark( - base = style_file("tests/testthat/indention_multiple/overall-in.R"), - times = 5 -) -#> Unit: milliseconds -#> expr min lq mean median uq max neval -#> base 930.4391 944.9253 969.2838 951.4632 951.6571 1067.934 5 -``` - diff --git a/vignettes/remove_rules.Rmd b/vignettes/remove_rules.Rmd new file mode 100644 index 000000000..657538c76 --- /dev/null +++ b/vignettes/remove_rules.Rmd @@ -0,0 +1,206 @@ +--- +title: "Remove rules" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Remove rules} + %\VignetteEncoding{UTF-8} + %\VignetteEngine{knitr::rmarkdown} +editor_options: + markdown: + wrap: 79 +--- + +```{r, include = FALSE} +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>" +) +styler::cache_deactivate() +``` + +```{r, echo = FALSE, include = FALSE} +options(styler.colored_print.vertical = FALSE) +``` + +If you want to change the behavior of styler to match your desired style, there +are multiple ways: + +- Use the tidyverse style guide, but not with the default options. Starting + point for this approach is the `help("tidyverse_style")` for the + function `tidyverse_style()`, which returns the transformer functions that + prettify your code. Most of these options are explained in + `vignette("styler")`. + +- If you can't get styler behaving the way you want using the arguments of + `tidyverse_style()`, you have another option, which is described in a + `vignette("customizing_styler")`: Creating your own style guide from + scratch. Yes, I admit, it's pretty long and if you don't want to become a + *styler expert*, it may be a little bit overwhelming. + +- If you don't care about how to create new rules but you simply want to + *remove* a rule, I have good news for you: There is a quick way to do it. + And that's what the remainder of this vignette focuses on. + +Once you are happy with your style guide, you might want to have a look at how +to distribute it, which is described in +`vignette("distribute_custom_style_guides")`. + +# Theory + +Here are the steps required to deactivate a rule you don't like + +- Figure out which transformer function in the transformers returned by + `tidyerse_style()` corresponds to the rule you want to remove. + +- Set that element in the list to `NULL`, which is equivalent to removing it. + +- Pass the list to `style_text` as a transformer. + +# Practice + +Lets assume you want to remove the rule that turns `=` into `<-` for +assignment. That means you want + +``` +string = "hi there" +``` + +to remain unchanged after applying styler. This is not the case if you use the +default style guide of styler: + +```{r, comment = ""} +library(styler) +style_text("string = 'hi there'") +``` + +So you need to figure out which rule is responsible for this. Let's check the +transformer categories used with the tidyverse style guide. + +```{r} +transformers <- tidyverse_style() +names(transformers) +``` + +From the aforementioned +[vignette](https://styler.r-lib.org/articles/customizing_styler.html): + +> We note that there are different types of transformer functions. initialize +> initializes some variables in the nested parse table (so it is not actually a +> transformer), and the other elements modify either spacing, line breaks or +> tokens. use_raw_indention is not a function, it is just an option. + +Now, we can look at the names of the rules that are sub-elements of the +transformer categories. + +```{r} +library(magrittr) +levels <- c("space", "line_break", "indention", "token") +purrr::map( + levels, + ~ names(transformers[[.x]]) +) %>% + purrr::set_names(levels) +``` + +Spotted the rule we want to get rid of? It's under `token` and it's called +`force_assignment_op`. I agree, we could have chosen a better name. If you are +not sure if you can guess from the name of the rule what it does you can also +have a look at the function declaration of this (unexported) function. + +```{r} +styler:::force_assignment_op +``` + +Next, you simply set that element to `NULL`. + +```{r} +transformers$token$force_assignment_op <- NULL +``` + +And you can use the modified transformer list as input to `style_text()` + +```{r} +style_text("string = 'hi there'", transformers = transformers) +``` + +If you want to use it the same way as `tidyverse_style()`, here's the last +step: + +```{r} +eq_assign_style <- function(...) { + transformers <- tidyverse_style(...) + transformers$token$force_assignment_op <- NULL + transformers +} + +style_text("string = 'hi there'", style = eq_assign_style) +``` + +That's it. Note that the transformer functions and how they are returned by +`tidyverse_style()` is not part of the exposed API. This means that the order, +the naming etc. may change. Also, remember we did not add a rule to replace +`<-` with `=`, but we only removed a rule to replace `=` with `<-`, so `<-` +won't be touched: + +```{r} +style_text("string <- 'hi there'", style = eq_assign_style) +``` + +If you want to turn `<-` into `=`, you need to add a rule as described in +`vignette("customizing_styler")`. + +If you have trouble identifying a rule based on rule names, + +- First write an example whose results is not the one you wanted, e.g. + +``` r +code <- " +f <- function () { + +return (1) +}" +``` + +is code that will have the first empty line in the function body removed by +styler. + +- Then pinpoint the probable rule type (e.g. line breaks if you want less new + lines). +- In a local styler clone, add e.g. a `return(pd)` at the top of the body to + deactivate the rule quickly, or add a `print(pd)` or `browser()` call in + the functions of that type (e.g. the different functions of + `R/rules-line-breaks.R`), `load_all()`, run your example, see if that + function made the change. move the `print(pd)` or `browser()` call to + another function if not. +- Once you've identified the culprit (in this case + `style_line_break_around_curly`), set it to `NULL` as shown earlier. + +# Some other rules and their transformers + +- You don't like multi-line ifelse statements getting wrapped around curly + braces: `transformers$token$wrap_if_else_multi_line_in_curly`. + +- You don't like multi-line calls to be broken before the first named + argument: + `transformers$line_break$set_line_break_after_opening_if_call_is_multi_line` + (interacting with + `transformers$line_break$set_line_break_before_closing_call`). + +- You don't like the line being broken after the pipe: + `transformers$line_break$add_line_break_after_pipe` + +- You don't like single quotes to be replaced by double quotes: + `transformers$space$fix_quotes`. + +- You don't like comments to start with one space: + `transformers$space$start_comments_with_space` + +I think you get the idea. I nevertheless recommend using the [tidyverse style +guide](https://style.tidyverse.org/) as is since + +- it is a well-established, thought-through style. + +- using a consistent style (no matter which) reduces friction in the + community. + +If you have questions, don't hesitate to create an issue in the GitHub repo. diff --git a/vignettes/strict.Rmd b/vignettes/strict.Rmd new file mode 100644 index 000000000..880a5db10 --- /dev/null +++ b/vignettes/strict.Rmd @@ -0,0 +1,94 @@ +--- +title: "The effect of `strict = FALSE`" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{The effect of `strict = FALSE`} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +```{r, include = FALSE} +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>", + results = "hide" +) + +styler::cache_deactivate() + +knitr::knit_engines$set(list( + styler = function(options) { + options$comment <- "" + knitr::engine_output( + options, + { + before <- options$code + after <- as.character(styler::style_text(options$code, strict = FALSE)) + if (!identical(trimws(before, "right"), after)) { + stop( + "Before unlike after. Before:", paste(before, sep = "\n"), + "After: ", paste(after, sep = "\n") + ) + } + after + }, + "" + ) + } +)) +``` + +This vignette shows how output from styler might differ when `strict = FALSE`. For brevity, we don't show the output of `strict = TRUE`, but it should be pretty simple for the user to derive it from the bullet point(s) or simply paste the code in the console to see the output. + +```{r setup} +library(styler) +``` + +- multi-line function declarations without curly braces are tolerated. + +```{styler} +function() + NULL +``` + +- Spaces before opening parenthesis, tilde as well as around comments and math token must be at least one, not exactly one. + +```{styler} +1 + (1 + 3) +1 ~ more() # comment +``` + +- More than one line break is tolerated before closing curly brace and line breaks between curly and round braces are not removed. + +```{styler} +test({ + 1 + +} +) +``` + +- Multi-line calls don't put the closing brace on a new line nor trigger a line break after the opening brace. + +```{styler} +call( + this) +call(2, + more +) +``` + +- No line break inserted after pipes nor ggplot2 or pipe expressions. + +```{styler} +ggplot2::ggplot(data, aes(x, y)) + geom_line() + scale_x_continuous() + +this %>% is() %>% a() %>% long() %>% pipe() +``` + +- ifelse statements don't get curly braces added when multi-line. + +```{styler} +if (TRUE) 3 else + 5 +``` diff --git a/vignettes/styler.Rmd b/vignettes/styler.Rmd new file mode 100644 index 000000000..dcb451adc --- /dev/null +++ b/vignettes/styler.Rmd @@ -0,0 +1,210 @@ +--- +title: "Get started" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Get started} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +```{r, echo = FALSE} +knitr::opts_chunk$set(echo = TRUE, comment = "") +knitr::knit_engines$set(list( + styler = function(options) { + options$comment <- "" + knitr::engine_output( + options, + c("# Before", options$code), + c("# After", styler::style_text(options$code)) + ) + } +)) + +options(styler.colored_print.vertical = FALSE) +styler::cache_deactivate() +``` + +# Entry-points + +styler provides the following API to format code: + +- `style_file()` styles `.R`, `.qmd`, `.Rmd`, `.Rmarkdown`, `.Rnw`, and `.Rprofile` files. + +- `style_dir()` styles all these files in a directory. + +- `style_pkg()` styles the source files of an R package. + +- RStudio Addins for styling the active file, styling the current package and styling the highlighted selection, see `help("styler_addins")`. + +Beyond that, styler can be used through other tools documented in the `vignette("third-party-integrations")`. Let's get started. +```{r} +library(styler) +``` + + + +### Passing arguments to the style guide + +styler separates the abstract definition of a style guide from the application of it. That's why you must supply a style guide via `transformers` when styling (in case you don't want to rely on the defaults): + +```{r} +style_text("a + b", transformers = tidyverse_style(scope = "indention")) +``` + +The styler API was designed so that you can pass arguments to the style guide via the styling function (e.g. `style_file()`) to allow more concise syntax: + +```{r, results = 'hide'} +# equivalent +style_text("a + b", transformers = tidyverse_style(scope = "indention")) +style_text("a + b", scope = "indention") +``` + +The magic is possible thanks to `...`. See `style_text()` for details. + +# Invasiveness + +### `scope`: What to style? + +This argument of `tidyverse_style()` determines the invasiveness of styling. The following levels for `scope` are available (in increasing order): + +- "none": Performs no transformation at all. + +- "spaces": Manipulates spacing between token on the same line. + +- "indention": Manipulates the indention, i.e. number of spaces at the beginning of each line. + +- "line_breaks": Manipulates line breaks between tokens. + +- "tokens": manipulates tokens. + +There are two ways to specify the scope of styling. + +- As a string: In this case all less invasive scope levels are implied, e.g. `"line_breaks"` includes `"indention"`, `"spaces"`. This is brief and what most users need. This is supported in `styler >= 1.0.0`. + +- As vector of class `AsIs`: Each level has to be listed explicitly by wrapping one ore more levels of the scope in `I()`. This offers more granular control at the expense of more verbosity. This is supported in `styler > 1.3.2`. + +```{r} +# tokens and everything less invasive +style_text("a=2", scope = "tokens") + +# just tokens and indention +style_text("a=2", scope = I(c("tokens", "indention"))) +``` + +As you can see from the output, the assignment operator `=` is replaced with `<-` in both cases, but spacing remained unchanged in the second example. + +### How `strict` do you want styler to be? + +Another option that is helpful to determine the level of 'invasiveness' is `strict` (defaulting to `TRUE`). Some rules won't be applied so strictly with `strict = FALSE`, assuming you deliberately formatted things the way they are. Please see in `vignette("strict")`. For `styler >= 1.2` alignment in function calls is detected and preserved so you don't need `strict = FALSE`, e.g. + +```{r} +style_text( + "tibble::tibble( + small = 2 , + medium = 4,#comment without space + large = 6 + )" +) +``` + +The details are in `vignette("detect-alignment")`. + +# Ignoring certain lines + +You can tell styler to ignore some lines if you want to keep current formatting. You can mark whole blocks or inline expressions with `styler: on` and `styler: off`: + +```{r} +styler::style_text( + " + #> blocks + blibala= 3 + # styler: off + I_have(good+reasons, to = turn_off, + styler + ) + # styler: on + 1+1 + + #> inline + ignore( this) # styler: off + f( ) # not ignored anymore +" +) +``` + +You can also use custom markers as described in `help("stylerignore", package = "styler")`. As described above and in `vignette("detect-alignment")`, some alignment is recognized and hence, *stylerignore* should not be necessary in that context. + +# Caching + +styler is rather slow, so leveraging a cache for styled code brings big speedups in many situations. Starting with version `1.3.0`, you can benefit from it. For people using styler interactively (e.g. in RStudio), typing `styler::cache_info()` and then confirming the creation of a permanent cache is sufficient. Please refer to `help("caching")` for more information. The cache is by default dependent on the version of styler which means if you upgrade, the cache will be re-built. Also, the cache takes literally 0 disk space because only the hash of styled code is stored. + +# Dry mode + +As of version `1.3.2`, styler has a dry mode which avoids writing output to the file(s) you want to format. The following options are available: + +- *off* (default): Write back to the file if applying styling changes the input. + +- *on*: Applies styling and returns the results without writing changes (if any) back to the file(s). + +- *fail*: returns an error if the result of styling is not identical to the input. + +In any case, you can use the (invisible) return value of `style_file()` and friends to learn how files were changed (or would have changed): + +```{r, comment = "#>"} +out <- withr::with_tempfile( + "code.R", + { + writeLines("1+1", "code.R") + style_file("code.R", dry = "on") + } +) +out +``` + +# More configuration options + +### Roxygen code example styling + +This is enabled by default, you can turn it off with `include_roxygen_examples = FALSE`. + +### Custom math token spacing + +`styler` can identify and handle unary operators and other math tokens: + +```{styler} +1++1-1-1/2 +``` + +This is tidyverse style. However, styler offers very granular control for math token spacing. Assuming you like spacing around `+` and `-`, but not around `/` and `*` and `^`, do the following: + +```{r} +style_text( + "1++1/2*2^2", + math_token_spacing = specify_math_token_spacing(zero = c("'/'", "'*'", "'^'")) +) +``` + +### Custom indention + +If you, say, don't want comments starting with `###` to be indented and indention to be 4 instead of two spaces, you can formulate an unindention rule and set `indent_by` to 4: + +```{r} +style_text( + c( + "a <- function() {", + "### not to be indented", + "# indent normally", + "33", + "}" + ), + reindention = specify_reindention(regex_pattern = "###", indention = 0), + indent_by = 4 +) +``` + +### Custom style guides + +These were some (not all) configurations exposed in `style_file()` and friends as well as `tidyverse_style()`. If the above did not give you the flexibility you hoped for, your can create your own style guide and customize styler even further: + +- either by removing rules from the tidyverse style guide as described in `vignette("remove_rules")`. +- or by creating your own style guide from scratch as described in `vignette("customizing_styler")`. diff --git a/vignettes/third-party-integrations.Rmd b/vignettes/third-party-integrations.Rmd new file mode 100644 index 000000000..679ad363f --- /dev/null +++ b/vignettes/third-party-integrations.Rmd @@ -0,0 +1,41 @@ +--- +title: "Third-party integrations" +output: rmarkdown::html_vignette +vignette: > + %\VignetteIndexEntry{Third-party integrations} + %\VignetteEngine{knitr::rmarkdown} + %\VignetteEncoding{UTF-8} +--- + +```{r, include = FALSE} +knitr::opts_chunk$set( + collapse = TRUE, + comment = "#>" +) +``` + +styler functionality is available in other tools, most notably + +- as a pre-commit hook `style-files` in to format before commit (locally) and enforced as a continuous integration step in the cloud through . + +- via `usethis::use_tidy_style()` styles your project according to the tidyverse style guide. + +- through commenting a PR on GitHub with `\style` when the [GitHub Action](https://github.com/features/actions) [*Tidyverse CI*](https://github.com/r-lib/actions/tree/master/examples#tidyverse-ci-workflow) is used. The most convenient way to set this up is via [`usethis::use_tidy_github_actions()`](https://usethis.r-lib.org/reference/tidyverse.html). + +- through the [GitHub Action Workflow](https://github.com/r-lib/actions/tree/v2-branch/examples#style-package) that triggers `styler::style_pkg()` on changes in source files. Setting this up is easiest with `usethis::use_github_action("style")`. + +- as a formatter for R Markdown without modifying the source. This feature is implemented as a code chunk option in knitr. use `tidy = "styler"` in the header of a code chunks (e.g.```` ```{r name-of-the-chunk, tidy = "styler"} ````), or `knitr::opts_chunk$set(tidy = "styler")` at the top of your RMarkdown script. + +- via the [R language server](https://github.com/REditorSupport/languageserver) to format your code in VS Code, atom and others. + +- As a fixer to the [ale Plug-in](https://github.com/dense-analysis/ale/pull/2401) for VIM. + +- in `reprex::reprex(..., style = TRUE)` to prettify reprex code before printing. To permanently use `style = TRUE` without specifying it every time, you can add the following line to your `.Rprofile` (via `usethis::edit_r_profile()`): `options(reprex.styler = TRUE)`. + +- in the *format-all* command for Emacs in [emacs-format-all-the-code](https://github.com/lassik/emacs-format-all-the-code). + +- As a [Jupyterlab code formatter](https://ryantam626.github.io/jupyterlab_code_formatter/index.html). + +- for pretty-printing [drake](https://github.com/ropensci/drake) workflow data frames with `drake::drake_plan_source()`. + +Do you know another way to use styler that is not listed here? Please let us know by [opening an issue](https://github.com/r-lib/styler/issues) and we'll extend the list.