pax_global_header00006660000000000000000000000064144313503260014513gustar00rootroot0000000000000052 comment=e559f0db61f5ad1446602253e0543568ff662bc8 asciidoctor-2.0.20/000077500000000000000000000000001443135032600140775ustar00rootroot00000000000000asciidoctor-2.0.20/.gitattributes000066400000000000000000000000421443135032600167660ustar00rootroot00000000000000* text=auto eol=lf *.rb diff=ruby asciidoctor-2.0.20/.github/000077500000000000000000000000001443135032600154375ustar00rootroot00000000000000asciidoctor-2.0.20/.github/FUNDING.yml000066400000000000000000000000351443135032600172520ustar00rootroot00000000000000open_collective: asciidoctor asciidoctor-2.0.20/.github/workflows/000077500000000000000000000000001443135032600174745ustar00rootroot00000000000000asciidoctor-2.0.20/.github/workflows/ci.yml000066400000000000000000000120311443135032600206070ustar00rootroot00000000000000name: CI on: push: branches: ['**'] paths-ignore: ['*.adoc', 'docs/**'] pull_request: paths-ignore: ['*.adoc', 'docs/**'] schedule: - cron: '0 2 * * *' workflow_dispatch: concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true permissions: read-all jobs: activate: if: | (github.event_name == 'schedule' && github.repository_owner == 'asciidoctor') || github.event_name == 'push' || github.event_name == 'workflow_dispatch' || (github.event_name == 'pull_request' && !startsWith(github.head_ref, 'docs/')) runs-on: ubuntu-latest steps: - name: Proceed run: echo ok go build: needs: activate strategy: matrix: os: [ubuntu-latest, windows-latest] ruby: [jruby-9.4.2.0, '2.3', '2.7', '3.0', '3.2'] asciimath-version: [~] haml-version: [~] pygments-version: ['~> 2.3.0'] rouge-version: [~] exclude: # remove 3.2 on ubuntu-latest to mark as primary - os: ubuntu-latest ruby: '3.2' include: - os: ubuntu-latest ruby: truffleruby-22.2.0 - os: ubuntu-latest ruby: jruby-9.1.17.0 pygments-version: ~ - os: windows-latest ruby: jruby-9.1.17.0 pygments-version: ~ - os: macos-latest ruby: '3.2' - os: ubuntu-latest ruby: '2.3' asciimath-version: '~> 1.0.0' - os: ubuntu-latest ruby: '2.3' haml-version: '~> 5.2.0' - os: ubuntu-latest ruby: '2.3' pygments-version: '~> 1.2.0' - os: ubuntu-latest ruby: '2.3' rouge-version: '~> 2.0.0' - os: ubuntu-latest ruby: '3.2' primary: primary runs-on: ${{ matrix.os }} env: BUNDLE_WITHOUT: coverage:docs SOURCE_DATE_EPOCH: '1521504000' # work around `Permission denied - NUL' error when installing/using the bundler gem on JRuby for Windows JRUBY_OPTS: '-J-Djdk.io.File.enableADS=true' steps: - name: Checkout uses: actions/checkout@v3 - name: Install prerequisites for Nokogiri (Linux) if: matrix.os == 'ubuntu-latest' run: sudo apt-get install libxslt1-dev - name: Configure Nokogiri installation (Linux, macOS) if: matrix.os == 'macos-latest' || matrix.os == 'ubuntu-latest' run: echo 'BUNDLE_BUILD__NOKOGIRI=--use-system-libraries' >> $GITHUB_ENV - name: Set AsciiMath version if: matrix.asciimath-version run: echo 'ASCIIMATH_VERSION=${{ matrix.asciimath-version }}' >> $GITHUB_ENV - name: Set Pygments version if: matrix.pygments-version run: echo 'PYGMENTS_VERSION=${{ matrix.pygments-version }}' >> $GITHUB_ENV - name: Install prerequisites for Pygments ~> 1.2.0 (Linux) if: matrix.os == 'ubuntu-latest' && matrix.pygments-version == '~> 1.2.0' run: sudo apt-get install python2 - name: Set Rouge version if: matrix.rouge-version run: echo 'ROUGE_VERSION=${{ matrix.rouge-version }}' >> $GITHUB_ENV - name: Enable coverage (primary only) if: matrix.primary run: | echo 'BUNDLE_WITHOUT=docs' >> $GITHUB_ENV echo 'COVERAGE=true' >> $GITHUB_ENV - name: Upgrade Java (JRuby for Windows) if: matrix.os == 'windows-latest' && matrix.ruby == 'jruby-9.4.2.0' uses: actions/setup-java@v3 with: distribution: temurin java-version: '17' - name: Install Ruby (uses cached dependencies for non-scheduled build) uses: ruby/setup-ruby@v1 with: ruby-version: ${{ matrix.ruby }} bundler: ${{ matrix.os == 'windows-latest' && '2.3.22' || 'default' }} bundler-cache: ${{ github.event_name != 'schedule' }} - name: Install dependencies (scheduled build only) if: github.event_name == 'schedule' run: | bundle config --local path vendor/bundle bundle --jobs 3 --retry 3 - name: Check default stylesheet if: matrix.primary run: | cd src/stylesheets npm i --silent --no-audit --no-fund --no-package-lock --no-progress npm run lint npm run build cd - if [ -n "$(git diff --name-only -- data/stylesheets)" ]; then echo 'Detected the following uncommitted changes to the default stylesheet:' git --no-pager diff exit 1 fi - name: Run tests run: bundle exec ruby -w $(bundle exec ruby -e 'print File.join Gem.bindir, %q(rake)') test:all build-dependents: name: build:dependents if: success() && github.event_name == 'push' && github.repository == 'asciidoctor/asciidoctor' needs: build runs-on: ubuntu-latest env: BUNDLE_WITHOUT: docs:coverage steps: - name: Checkout uses: actions/checkout@v3 - name: Install Ruby uses: ruby/setup-ruby@v1 with: ruby-version: '3.2' bundler-cache: true - name: Build dependents env: GITHUB_API_TOKEN: ${{ secrets._GITHUB_API_TOKEN }} run: bundle exec rake build:dependents asciidoctor-2.0.20/.github/workflows/deploy-docs.yml000066400000000000000000000005631443135032600224450ustar00rootroot00000000000000name: Deploy Docs on: push: branches: [ v2.0.x ] paths: [ 'docs/**' ] permissions: read-all jobs: build: if: github.repository_owner == 'asciidoctor' runs-on: ubuntu-latest steps: - name: Trigger env: GH_TOKEN: ${{ secrets.GH_TOKEN_SCOPE_REPO }} run: gh workflow run trigger.yml -R asciidoctor/docs.asciidoctor.org -r main asciidoctor-2.0.20/.github/workflows/release.yml000066400000000000000000000033041443135032600216370ustar00rootroot00000000000000name: Release run-name: ${{ github.workflow }} ${{ github.event.inputs.release-version }} on: workflow_dispatch: inputs: release-version: description: Enter version to release (e.g., 2.1.0). required: false release-beer: default: TBD description: Enter beer to mark the occasion. required: false jobs: activate: runs-on: ubuntu-latest if: github.repository_owner == 'asciidoctor' && github.event_name == 'workflow_dispatch' steps: - run: echo ok go perform: needs: activate runs-on: ubuntu-latest environment: releases env: SOURCE_DATE_EPOCH: '1521504000' PYGMENTS_VERSION: '~> 2.3.0' steps: - name: Checkout uses: actions/checkout@v3 with: token: ${{ secrets[format('GH_TOKEN_{0}', github.actor)] }} - name: Install Ruby uses: ruby/setup-ruby@v1 with: ruby-version: '3.2' bundler-cache: false - name: Configure Bundler run: | bundle config --local path .bundle/gems bundle config --local without coverage docs - name: Install dependencies run: bundle --jobs 3 --retry 3 - name: Run tests run: bundle exec rake test:all - name: Setup release environment run: | echo ${{ secrets[format('GH_TOKEN_{0}', github.actor)] }} | gh auth login --with-token echo RELEASE_VERSION=${{ github.event.inputs.release-version }} >> $GITHUB_ENV echo RELEASE_BEER=${{ toJSON(github.event.inputs.release-beer) }} >> $GITHUB_ENV echo RELEASE_RUBYGEMS_API_KEY=${{ secrets[format('RUBYGEMS_API_KEY_{0}', github.actor)] }} >> $GITHUB_ENV - name: Build, tag, and publish gem run: ./release.sh asciidoctor-2.0.20/.gitignore000066400000000000000000000001541443135032600160670ustar00rootroot00000000000000/Gemfile.lock /.bundle/ /*.gem /*.html /.idea/ /.ruby-gemset /.ruby-version /.yardoc/ /pkg/ /rdoc/ /vendor/ asciidoctor-2.0.20/.simplecov000066400000000000000000000006201443135032600160770ustar00rootroot00000000000000SimpleCov.start do load_profile 'test_frameworks' coverage_dir ENV['COVERAGE_REPORTS'] || 'tmp/coverage' if ENV['SHIPPABLE'] require 'simplecov-csv' formatter SimpleCov::Formatter::CSVFormatter else #formatter SimpleCov::Formatter::MultiFormatter[SimpleCov::Formatter::HTMLFormatter, SimpleCov::Formatter::CSVFormatter] formatter SimpleCov::Formatter::HTMLFormatter end end asciidoctor-2.0.20/.yardopts000066400000000000000000000002711443135032600157450ustar00rootroot00000000000000--charset UTF-8 --readme README.adoc --hide-api private --plugin tomdoc --title "Asciidoctor API Documentation" --output-dir rdoc lib/**/*.rb - CHANGELOG.adoc CONTRIBUTING.adoc LICENSE asciidoctor-2.0.20/CHANGELOG.adoc000066400000000000000000003754701443135032600162360ustar00rootroot00000000000000= Asciidoctor Changelog :url-asciidoctor: https://asciidoctor.org :url-asciidoc: https://docs.asciidoctor.org/asciidoc/latest/ :url-repo: https://github.com/asciidoctor/asciidoctor :icons: font :star: icon:star[role=red] ifndef::icons[] :star: ★ endif::[] {url-asciidoctor}[Asciidoctor] is a _fast_, open source text processor and publishing toolchain for converting {url-asciidoc}[AsciiDoc] content into HTML 5, DocBook 5, and other formats. This document provides a high-level view of the changes introduced in Asciidoctor by release. For an even more detailed look at what has changed, refer to the {url-repo}/commits/[commit history] on GitHub. This project utilizes semantic versioning. // tag::compact[] == 2.0.20 (2023-05-18) - @mojavelinux Bug Fixes:: * Update `release-version` attribute in READMEs and man page during release * Rebuild man page during release === Details {url-repo}/releases/tag/v2.0.20[git tag] | {url-repo}/compare/v2.0.19\...v2.0.20[full diff] // end::compact[] == 2.0.19 (2023-05-17) - @mojavelinux Improvements:: * Return empty string instead of nil if raw or verbatim block has no lines * Don't uppercase monospace span in section title in manpage output (#4402) * Simplify processing of implicit link (i.e., autolink) by separating implicit and explicit match * Generate partintro block consistently (#4450) * Add Kiswahili translation for built-in labels (PR #4454) (*@bkmgit*) Compliance:: * Fix call order so use of an include file with invalid encoding continues to raise error when using Ruby >= 3.2.0 * Fix test assertion for fallback Rouge stylesheet to be compatible with Rouge 4.1 (#4406) (*@tmzullinger*) * Support `notitle` option on section as alternative to `untitled` to hide title (#4437) * Add support for Haml 6 to template converter (#4429) Bug Fixes:: * Process constrained inline passthrough inside monospace span (#4458) * Catalog inline ref defined using anchor macro even when resolved reftext is empty * Use while loop rather than recursion to locate next line to process; prevents stack limit error (#4368) * Avoid matching numeric character references when searching for # in xref target (#4393) * Use correct selector to collapse margin on first and last child of sidebar * Don't allow target of include directive to start with a space (to distinguish it from a dlist item) or to end with a space * Manify alt text of block image in manpage output (#4401) * Adjust font size of term in horizontal dlist to match font size of term in regular dlist * Implicitly attach nested list that starts with block attribute lines to dlist entry (#4268) * Don't swallow square brackets when processing escaped URL macro * Treat `uri:classloader:` as an absolute path prefix when running on JRuby (#3929) * Apply reftext substitutions to value of `mantitle` attribute in DocBook output (#4448) * Enclose `` tag in `
` tag in DocBook output for man page (#4452) * Correctly handle compat role on monospace and constrained passthrough when box attrlist or formatted text is escaped Build / Infrastructure:: * Update latest CRuby in CI workflow to 3.2 * Update latest JRuby in CI workflow to 9.4.2.0 === Details {url-repo}/releases/tag/v2.0.19[git tag] | {url-repo}/compare/v2.0.18\...v2.0.19[full diff] == 2.0.18 (2022-10-15) - @mojavelinux Improvements:: * Propagate `:to_dir` option to document of AsciiDoc table cell (#4297) * Force encoding of attribute data passed via CLI to UTF-8 if transcoding fails (#4351) (*@zkaip*) * Add include role to link macro that replaces include directive when include is not enabled Bug Fixes:: * Change internal `uriish?` helper to only detect a URI pattern at start of a string; avoids misleading messages (#4357) * Prevent highlight.js warning when no language is set on source block; don't call `highlightBlock` if `data-lang` attribute is absent (#4263) * Don't raise error if `Asciidoctor::Extensions.unregister` is called before groups are initialized (#4270) * If path is included both partially and fully, store it with true value (included fully) in includes table of document catalog * Reset registry if activate is called on it again (#4256) * Format source location in exception message when extension code is malformed * Fix lineno on reader when `skip-front-matter` attribute is set but end of front matter is not found * Fix `Asciidoctor::Cli::Invoker` constructor when first argument is a hash * Update default stylesheet to honor marker on unordered list when marker is defined on ancestor unordered list (#4361) === Details {url-repo}/releases/tag/v2.0.18[git tag] | {url-repo}/compare/v2.0.17\...v2.0.18[full diff] == 2.0.17 (2022-01-05) - @mojavelinux Bug Fixes:: * Don't crash if process method for custom block returns an abstract block with context `:compound` that isn't of type `Block` (e.g., a list) * Ignore return value of process method for custom block or block macro if value matches parent argument * Remove unnamespaced selectors in Pygments stylesheet * Normalize output from Pygments to use `linenos` class for inline line numbering and trim space after number; update default stylesheet accordingly * Change `AbstractBlock#sections?` to return false when called on block that isn't a Section or Document (PR #3591) *@mogztter* * Hide built-in marker on HTML summary element in Safari when using default stylesheet (#4162) * Hide outline around HTML summary when activated in Safari (#4162) * Include primary video in value of `playlist` attribute when embedding YouTube video (#4156) * Honor `stripes=none` on nested table (#4165) * Update default stylesheet to fix spacing around empty list item (#4184) * Honor `:header_only` option when parsing document with manpage doctype (#4192) * Use numeric character reference for closing square bracket around alt text of icon * Process `author` or `authors` document attribute in document header when implicit doctitle is absent (#4206) * Patch open-uri-cached gem to work with Ruby 3.1 (update: drop patch now that open-uri-cached has been fixed) (#4227) Improvements:: * Prevent line numbers on source blocks in HTML output from being selected (applies to pygments and coderay) (#4128) * Allow hash to be specified for Vimeo video either in video ID or using `hash` attribute (#4176) * Remove unnecessary specificity in default stylesheet for styling p element inside list item * Remove obsolete gist embed styles from default stylesheet * Allow `--failure-level` to be set to default value, `FATAL` * Sort levels in help for `--failure-level` option in ascending order * Invert FR translations for caution & warning admonition labels (#4212) (*@cyChop*) * Add tests for open-uri-cached integration that is activated by the `cache-uri` attribute * Don't warn if negated tag is not found in include file (#4230) Documentation:: * Document how to extend an existing converter or create a new converter (#4136) * Document the syntax topic of the `--help` CLI option (#4175) * Document how to uninstall the Asciidoctor gem (#4154) * Document how to enable and use the sourcemap (the `:sourcemap` option) * Document how to catalog additional assets (the `:catalog_assets` option) == 2.0.16 (2021-08-03) - @mojavelinux Bug Fixes:: * Include all lines outside of specified tagged region when tag filter on include directive is a single negated tag (#4048) * Only interpret negated wildcard in tag filter on include directive as implicit globstar if it precedes other tags (#4086) * Change ifeval directive to resolve to false if comparison operation cannot be performed (#4046) * Don't crash if `:to_file` option is passed to `load` or `load_file` and value is not a string (#4055) * Use automatic link text if ID in shorthand xref is followed by dangling comma (e.g., `+<>+`) * Update default stylesheet to indent blocks attached to list item in checklist (#2550) * Update default stylesheet to re-enable styling of implicit lead role on first paragraph of preamble inside AsciiDoc table cell * Update default stylesheet to fix conflict between text decoration and bottom border on abbr[title] element * Change invalid font family "sans" in default stylesheet to "sans-serif" * Fix missing automatic reftext for internal xrefs in manpage output (#4110) * Replace numeric character reference for plus in manpage output (#4059) * Replace numeric character reference for degree sign in manpage output (#4059) * Convert apostrophe to the portable `+\*(Aq+` variable instead of the groff-specific escape `\(aq` (#4060) (*@felipec*) * Document the `-e, --embedded` option flag in the man page, which replaces the outdated `-e, --eruby` option flag Improvements:: * Use queue to iterate over lines in reader instead of stack (#4106) * Uppercase automatic reftext for level-2 section titles in manpage output if reftext matches section title (#4110) * Show safe modes in strictness order in CLI help (#4065) * Remove redundant styles from the default stylesheet * Update font styles for summary element in default stylesheet to match font styles of paragraph (#4114) * Update default stylesheet to indent content of details element (#4116) * Update default stylesheet to use custom marker for summary element to make appearance consistent (#4116) * Add Vietnamese translation of built-in attributes (PR #4066) (*@nguyenhoa93*) * Add Thai translation of built-in attributes (PR #4113) (*@ammaneena*) Build / Infrastructure:: * Import source of default stylesheet into this repository; use PostCSS with cssnano to minify (#4062) * Use autoprefixer to manage browser prefixes in default stylesheet (#4118) == 2.0.15 (2021-04-27) - @mojavelinux Bug Fixes:: * Don't include trailing period, question mark, or exclamation point in target (URL) of autolink (#3860) * Don't assign nil value to named attribute mapped to absent positional attribute when parsing attrlist (#4033) * Remove leading and trailing spaces around role on inline phrase (#4035) * Ignore empty role on inline phrase defined using legacy syntax and followed by comma (#4035) * Use xreftext on document as fallback link text in HTML output for inter-document xref that resolves to current document when no link text is provided (#4032) * Use xreftext on document as fallback link text in HTML output for internal xref with empty fragment when no link text is provided (#4032) * Use document ID as linkend in DocBook output for internal xref with empty fragment; auto-generating one if necessary (#4032) Improvements:: * Format keyboard references in monospace in manpage output Build / Infrastructure:: * Get remaining invoker tests working on JRuby 9.1 for Windows == 2.0.14 (2021-04-19) - @mojavelinux Bug Fixes:: * Don't allow AsciiDoc table cell to set document attribute that was unset from the API (exceptions include: `compat-mode`, `toc`, `showtitle`, and `notitle`) (#4017) * Ensure default document attributes unset in parent document remain unset in AsciiDoc table cell (#2586) * Allow the `showtitle` / `notitle` attribute to be toggled in an AsciiDoc table cell if set or unset in parent document (#4018) * Ensure mtime of input file honors TZ environment variable on JRuby for Windows (affects value of `docdatetime` attribute) (#3550) * Honor caption attribute on blocks that support captioned title even if corresponding `*-caption` document attribute (e.g., `example-caption`) is not set (#4023) * Suppress missing attribute warning when applying substitutions to implicit document title for assignment to intrinsic `doctitle` attribute (#4024) * Increment counter (but not the corresponding attribute) if attribute is locked (#4013) Improvements:: * Use attribute, if set, as seed value for counter even if not already registered as a counter (#4014) * Allow subs attribute value on Inline node returned by process method for custom inline macro to be a String (#3938) * Allow value of `user-home` attribute to be overridden by API or CLI (#3732) Build / Infrastructure:: * Run tests on JRuby for Windows (#3550) == 2.0.13 (2021-04-10) - @mojavelinux Bug Fixes:: * Rollback change for #3470, which added logic to remove leading and trailing empty lines in an AsciiDoc include file; instead skip empty lines before processing document header (#3997) * Don't allow `counter` and `counter2` attribute directives to override locked attributes (#3939) (*@mogztter*) * Fix crash when resolving next value in sequence for counter with non-numeric value (#3940) * Honor list of tags following negated wildcard on include directive (#3932) * Update default stylesheet to remove dash in front of cite on nested quote block (#3847) * Don't mangle formatting macros when uppercasing section titles in man page output (#3892) * Don't escape hyphen in `manname` in man page output * Remove extra `.sp` line before content of verse block in man page output * Fix layout of footnotes in man page output (#3989) * Fix formatting of footnote text with URL in man page output (#3988) * Remove redundant trailing space on URL followed by non-adjacent text in man page output (#4004) * Use `.bp` macro at location of page break in man page output (#3992) Improvements:: * Extract method to create lexer and formatter in Rouge adapter (#3953) (*@Oblomov*) * Add support for pygments.rb 2.x (#3969) (*@slonopotamus*) * Allow `NullLogger` to be enabled by setting the `:logger` option to a falsy value (#3982) * Substitute attributes in manpurpose part of NAME section in manpage doctype (#4000) * Output all mannames in name section of HTML output for manpage doctype (#3757) Build / Infrastructure:: * Enable running tests as root (PR #3874) (*@mikemckiernan*) * Run tests against both pygments.rb 1.x and 2.x (#3969) (*@slonopotamus*) * Speed up CI by using Bundler cache (PR #3901) (*@slonopotamus*) Documentation:: * Import documentation for processor into this repository (#3861) (*@graphitefriction*) * Add Belarusian translation of built-in attributes (PR #3928) (*@morganov*) == 2.0.12 (2020-11-10) - @mojavelinux Bug Fixes:: * Set type and target property on unresolved footnote reference and unset id property (fixes regression) (#3825) * Fix crash when inlining an SVG if the explicit width or height value on the image node is not a string (#3829) * Reset word wrap behavior to normal on tables, then re-enable again for admonition content, horizontal dlist description, and AsciiDoc table cells (#3833) Improvements:: * Pass through role to DocBook output for inline image (#3832) Compliance:: * Defer use of Ruby >= 2.3 constructs to restore compatibility with Ruby 2.0 until at least next minor release (#3827) * Don't append the default px unit identifier to the explicit width or height value when inlining an SVG (#3829) Build / Infrastructure:: * Migrate Linux CI jobs to GitHub Actions (#3837) * Migrate Windows CI jobs to GitHub Actions (#3839) * Run CI job on macOS (#3842) == 2.0.11 (2020-11-02) - @mojavelinux Bug Fixes:: * Fix infinite loop when callout list with obsolete syntax is found inside list item (#3472) * Fix infinite loop when xreftext contains a circular reference path in HTML and manpage converters (#3543) * Apply text formatting to table cells in implicit header row when column has the "a" or "l" style (#3760) * Fix errant reference warning for valid reference when running in compat mode (#3555) * Initialize backend traits for converter (if not previously initialized) using assigned basebackend; mimics Asciidoctor < 2 behavior (#3341) * Set source_location on preamble block when sourcemap option is enabled (#3799) * Link the notitle and showtitle attributes so they act as opposites for the same toggle (#3804) * Pass options to constructor of Rouge lexer instead of #lex method; restores compatibility with Rouge >= 3.4 (#3336) * Don't clobber cgi-style options on language when enabling start_inline option on the Rouge PHP lexer (#3336) * Fix parsing of wrapped link and xref text, including when an attrlist signature is detected (#3331) * Restore deprecated writable number property on AbstractBlock * Always use title as xreftext if target block has an empty caption, regardless of xrefstyle value (#3745) * Allow a bibliography reference to be used inside a footnote (#3325) * Fix bottom margin collapsing on AsciiDoc table cell (#3370) * Remove excess hard line break in multi-line AsciiMath blocks (#3407) * Only strip trailing spaces from lines of AsciiDoc include file (#3436) * Remove errant optional flag in regexp for menu macro that breaks Asciidoctor.js (#3433) * Preserve repeating backslashes when generating manpage output (#3456) * Honor percentage width specified on macro of inline SVG (#3464) * Removing leading and trailing blank lines in AsciiDoc include file to match assumption of parser (#3470) * Activate extensions when :extensions option is set even if Extensions API is not yet loaded (#3570) * Don't activate global extensions if :extensions option is false (#3570) * Escape ellipsis at start of line in manpage output (#3645) (*@jnavila*) * Don't register footnote with ID if a footnote is already registered with that ID (#3690) * Honor start attribute on ordered list in manpage output (#3714) * Warn instead of crashing if SVG to inline is empty (#3638) (*@mogztter*) * Compute highlight line ranges on source block relative to value of start attribute (#3519) (*@mogztter*) * Prevent collapsible block from incrementing example number by assigning an empty caption (#3639) * Use custom init function for highlight.js to select the correct `code` elements (#3761) * Fix resolved value of :to_dir when both :to_file and :to_dir options are set to absolute paths (#3778) * Fix crash if value of `stylesheets` attribute contains a folder and the destination directory for the stylesheet does not exist (even when the `:mkdirs` option is set) (#3808) * Fix crash if value passed by API for `copycss` attribute is not a string (#3592) * Restore label in front of each bibliography entry in DocBook output that was dropped by fix for #3085 (#3782) * Apply max width to each top-level container instead of body in HTML output (#3513) * Don't apply border-collapse: separate to HTML for table blocks; fixes double border at boundary of colspan/rowspan (#3793) (*@ahus1*) * Don't remove right border on last table cell in row (#2563) * Rework table borders to leverage border collapsing (apply frame border to table, grid border to cells, and selectively override border on cells to accommodate frame) (#3387) Compliance:: * Account for empty positional attribute when parsing attrlist (#3813) * Add support for muted option to self-hosted video (#3408) * Move style tag for convert-time syntax highlighters (coderay, rouge, pygments) into head (#3462) * Move style tag for client-side syntax highlighters (highlight.js, prettify) into head (#3503) * Define entry point API methods (load, convert, load_file, convert_file) as class methods instead of module_function to avoid conflict with Kernel.load (#3625) * Retain attribute order on HTML code tag for source block to remain consistent with output from 1.5.x (#3786) * Correct language code for Korean language file from kr to ko (#3807) (*@jnavila*) Improvements:: * Apply word wrap (i.e., `word-wrap: anywhere`) to body in default stylesheet (#3544) * Allow `nobreak` and `nowrap` roles to be used on any inline element (#3544) * Add CSS class to support pre-wrap role to preserve leading, trailing, and repeating spaces in phrase (#3815) * Preserve guard around XML-style callout when icons are not enabled (#3319) * Use `.fam C` command to switch font family for verbatim blocks to monospaced text in manpage output (#3561) * Remove redundant test for halign and valign attributes on table cell in DocBook converter * Allow encoding of include file to be specified using encoding attribute (#3248) * Allow template to be used to override outline by only specifying the outline template (#3491) * Upgrade MathJax from 2.7.5 to 2.7.9 * Upgrade highlight.js from 9.15.10 to 9.18.3 (note that this increases script size from 48.8 KB to 71.5 KB) * Skip unused default attribute assignments for embedded document * Allow a URL macro to have a preceding single or double quote (#3376) * Add support for erubi template engine; use it in place of erubis in test suite; note the use of erubis is deprecated (#3737) * Download and embed remote custom stylesheet if allow-uri-read is set (#3765) * Remove direction property from default stylesheet (#3753) (*@abdnh*) * remove max width setting on content column for print media in default stylesheet (#3802) * Normalize frame value "topbot" to "ends" in HTML output (consistently use frame-ends class) (#3797) * Add role setter method on AbstractNode (#3614) * Map chapter-signifier and part-signifier attributes in locale attribute files to replace chapter-label and part-label (#3817) Build / Infrastructure:: * Run test suite on TruffleRuby nightly (*@mogztter*, *@erebor*) * Upgrade TruffleRuby to 20.0.0 (*@mogztter*) * Trigger upstream builds for AsciidoctorJ on Github Actions (*@robertpanzer*) == 2.0.10 (2019-05-31) - @mojavelinux Bug Fixes:: * fix Asciidoctor.convert_file to honor `header_footer: false` option when writing to file (#3316) * fix placement of title on excerpt block (#3289) * always pass same options to SyntaxHighlighter#docinfo, regardless of value of location argument * fix signature of SyntaxHighlighter#docinfo method (#3300) * when `icons` is set to `image`, enable image icons, but don't use it as the value of the `icontype` attribute (#3308) == 2.0.9 (2019-04-30) - @mojavelinux Bug Fixes:: * process multiple single-item menu macros in same line (#3279) * register images in catalog correctly (#3283) * rename AbstractNode#options method to AbstractNode#enabled_options so it doesn't get shadowed by Document#options (#3282) * don't fail to convert document if alt attribute is not set on block or inline image (typically by an extension) * fix lineno of source location on blocks that follow a detached list continuation (#3281) * assume inline image type is "image" if not set (typically by an extension) == 2.0.8 (2019-04-22) - @mojavelinux Bug Fixes:: * restore background color applied to literal blocks by default stylesheet (#3258) * use portability constants (CC_ALL, CC_ANY) in regular expressions defined in built-in converters (DocBook5 and ManPage) * use portability constant (CC_ANY) in regular expression for custom inline macros * use smarter margin collapsing for AsciiDoc table cell content; prevent passthrough content from being cut off (#3256) * don't limit footnote ref to ASCII charset; allow any word character in Unicode to be used (#3269) Improvements:: * register_for methods accept arguments as symbols (#3274) * use Concurrent::Map instead of Concurrent::Hash in template converter * use module_function keyword to define methods in Helpers * move regular expression definitions to separate source file (internal change) == 2.0.7 (2019-04-13) - @mojavelinux Bug Fixes:: * fix crash when resolving ID from text and at least one candidate contains an unresolved xref (#3254) * fix compatibility with Rouge 2.0 Improvements:: * improve documentation for the `-a` CLI option; explain that `@` modifier can be placed at end of name as alternative to end of value * move source for main API entry points (load, load_file, convert, convert_file) to separate files (internal change) * define main API entry points (load, load_file, convert, convert_file) as module functions Also see https://github.com/asciidoctor/asciidoctor/milestone/33?closed=1[issues resolved in 2.0.x] (cumulative). == 2.0.6 (2019-04-04) - @mojavelinux Bug Fixes:: * assume implicit AsciiDoc extension on inter-document xref macro target with no extension (e.g., `document#`); restores 1.5.x behavior (#3231) * don't fail to load application if call to Dir.home fails; use a rescue with fallback values (#3238) * Helpers.rootname should only consider final path segment when dropping file extension Improvements:: * implement Helpers.extname as a more efficient and flexible File.extname method * check for AsciiDoc file extension using end_with? instead of resolving the extname and using a lookup Also see https://github.com/asciidoctor/asciidoctor/milestone/33?closed=1[issues resolved in 2.0.x] (cumulative). == 2.0.5 (2019-04-01) - @mojavelinux Bug Fixes:: * fix crash when source highlighter is Rouge and source language is not set on block (#3223) * update CLI and SyntaxHighlighter to allow Asciidoctor to load cleanly on Ruby 2.0 - 2.2 * CLI should use $stdin instead of STDIN to be consistent with the use of $stdout * mark encoding of stdio objects used in CLI as UTF-8 (#3225) * make Asciidoctor::SyntaxHighlighter::Config.register_for method public as documented Also see https://github.com/asciidoctor/asciidoctor/milestone/33?closed=1[issues resolved in 2.0.x] (cumulative). == 2.0.4 (2019-03-31) - @mojavelinux Bug Fixes:: * allow Asciidoctor to load cleanly on Ruby 2.0 - 2.2 for distributions that provide support for these older Ruby versions * make Asciidoctor::Converter::Config.register_for method public as documented * remove unused Asciidoctor::Converter::BackendTraits#derive_backend_traits private method * move Asciidoctor::Converter::BackendTraits.derive_backend_traits method to Asciidoctor::Converter * mark render and render_file methods as deprecated in API docs Also see https://github.com/asciidoctor/asciidoctor/milestone/33?closed=1[issues resolved in 2.0.x] (cumulative). == 2.0.3 (2019-03-28) - @mojavelinux Bug Fixes:: * fix crash when attrlist is used on literal monospace phrase (#3216) * update use of magic regexp variables to fix compatibility with Opal / Asciidoctor.js (#3214) Also see https://github.com/asciidoctor/asciidoctor/milestone/33?closed=1[issues resolved in 2.0.x] (cumulative). == 2.0.2 (2019-03-26) - @mojavelinux Bug Fixes:: * apply verbatim substitutions to literal paragraphs attached to list item (#3205) * implement #lines and #source methods on Table::Cell based on cell text (#3207) Also see https://github.com/asciidoctor/asciidoctor/milestone/33?closed=1[issues resolved in 2.0.x] (cumulative). == 2.0.1 (2019-03-25) - @mojavelinux Bug Fixes:: * convert titles of cataloged block and section nodes containing attribute references eagerly to resolve attributes while in scope (#3202) * customize MathJax (using a postfilter hook) to apply displaymath formatting to AsciiMath block (#2498) * fix misspelling of deprecated default_attrs DSL function (missing trailing "s") * remove unused location property (attr_accessor :location) on DocinfoProcessor class * look for deprecated extension option :pos_attrs if :positional_attrs option is missing (#3199) * add detail to load error message if path differs from gem name (#1884) Build / Infrastructure:: * bundle .yardopts in RubyGem (#3193) Also see https://github.com/asciidoctor/asciidoctor/milestone/33?closed=1[issues resolved in 2.0.x] (cumulative). == 2.0.0 (2019-03-22) - @mojavelinux Enhancements / Compliance:: * drop support for Ruby < 2.3 and JRuby < 9.1 and remove workarounds (#2764) * drop support for Slim < 3 (#2998) * drop the converter for the docbook45 backend from core; moved to https://github.com/asciidoctor/asciidoctor-docbook45 (#3005) * apply substitutions to section and block titles in normal substitution order (#1173) * make syntax highlighter pluggable; extract all logic into adapter classes (#2106) * add syntax highlighter adapter for Rouge (#1040) * redesign Converter API based on SyntaxHighlighter API; remap deprecated API to new API to ensure compatibility (#2891) * repurpose built-in converters as regular converters (#2891) * make registration and resolution of global converters thread-safe (#2891) * fold the default converter factory into the Converter module (#2891) * add a default implementation for Converter#convert in the Base converter (#2891) * rename Converter::BackendInfo to Converter::BackendTraits; map backend_info to new backend_traits method (#2891) * allow built-in converter classes to be resolved using Converter#for and instantiated using Converter#create (#2891) * allow converter factory to be passed using :converter_factory API option (#2891) * honor htmlsyntax if defined on converter (#2891) * add backend_traits_source keyword argument to CompositeConverter constructor (#2891) * add support for start attribute when using prettify to highlight source blocks with line numbering enabled * use String#encode to encode String as UTF-8 instead of using String#force_encoding (#2764) * add FILE_READ_MODE, URI_READ_MODE, and FILE_WRITE_MODE constants to control open mode when reading files and URIs and writing files (#2764) * set visibility of private and protected methods (#2764) * always run docinfo processor extensions regardless of safe mode (gives control to extension) (#2966) * use infinitive verb form for extension DSL method names; map deprecated method names where appropriate * add docinfo insertion slot for header location to built-in converters (#1720) * add support for the `muted` option on vimeo videos (allows autoplay to work in Chrome) (#3014) * use value of prettify-theme attribute as is if it starts with http:// or https:// (#3020) * allow icontype to be set using icons attribute (#2953) * when using a server-side syntax highlighter, highlight content of source block even if source language is not set (#3027) * automatically promote a listing block without an explicit style to a source block if language is set (#1117) * remove the 2-character (i.e., `""`) quote block syntax * don't allow block role to inherit from document attribute; only look for role in block attributes (#1944) * split out functionality of -w CLI flag (script warnings) from -v CLI flag (verbose logging) (#3030) * log possible invalid references at info level (#3030) * log dropped lines at info level when attribute-missing=drop-line (#2861) * honor attribute-missing setting when processing include directives and block macros (#2855) * log warning when include directive is not resolved due to missing attribute or blank target; always include warning in output document (#2868) * use the third argument of AbstractNode#attr / AbstractNode#attr? to set the name of a fallback attribute to look for on the document (#1934) * change default value of third argument to Abstractnode#attr / AbstractNode#attr? to nil so attribute doesn't inherit by default (#3059) * look for table-frame, table-grid, and table-stripes attributes on document as fallback for frame, grid, and stripes attributes on table (#3059) * add support for hover mode for table stripes (stripes=hover) (#3110) * always assume the target of a shorthand inter-document xref is a reference to an AsciiDoc document (source-to-source) (#3021) * if the target of a formal xref macro has a file extension, assume it's a path reference (#3021) * never assume target of a formal xref macro is a path reference unless a file extension or fragment is present (#3021) * encode characters in URI to comply with RFC-3986 * implement full support for styled xreftext in manpage converter (#3077) * allow the ID and role properties to be set on a list item of ordered and unordered lists via the API (#2840) * yield processor instance to registration block for document processor if block has non-zero arity (i.e., has parameters) * add Document#parsed? method to check whether document has been parsed * modify Cell class to extend from AbstractBlock instead of AbstractNode (#2963) * implement block? and inline? methods on Column, both which return false (#2963) * drop verse table cell style (treat as normal table cell) (#3111) * allow negated subs to be specified on inline pass macro (#2191) * log warning if footnoteref macro is found and compat mode is not enabled (#3114) * log info message if inline macro processor returns a String value (#3176) * apply subs to Inline node returned by inline macro processor if subs attribute is specified (#3178) * add create_inline_pass helper method to base extension processor class (#3178) * log debug message instead of warning if block style is unknown (#3092) * allow backend to delegate to a registered backend using the syntax synthetic:delegate when using custom templates (e.g., slides:html) (#891) * AbstractBlock#find_by looks inside AsciiDoc table cells if traverse_documents selector option is true (#3101) * AbstractBlock#find_by finds table cells, which can be selected using the :table_cell context in the selector (#2524) * allow ampersand to be used in e-mail address (#2553) * propagate ID assigned to inline passthrough (#2912) * rename control keywords in find_by to better align with the standard NodeFilter terminology * stop find_by iteration if filter block returns :stop directive * rename header_footer option to standalone (while still honoring header_footer for backwards compatibility) (#1444) * replace anchors and xrefs before footnotes (replace footnotes last in macros substitution group) * apply substitution for custom inline macro before all other macros * only promote index terms automatically (A, B, C becomes A > B > C + B > C + C) if indexterm-promotion option is set on document (#1487) * add support for see and see-also on index terms; parse attributes on indexterm macros if text contains `=` (#2047) * drop :indexterms table from document catalog (in preparation for solution to #450 in a 2.x release) * load additional languages for highlight.js as defined in the comma-separated highlightjs-languages attribute (#3036) * log warning if conditional expression in ifeval directive is invalid (#3161) * drop lines that contain an invalid preprocessor directive (#3161) * rename AbstractBlock#find_by directives; use :prune in place of :skip_children and :reject in place of :skip * convert example block into details/summary tag set if collapsible option is set; open by default if open option is set (#1699) * substitute replacements in author values used in document header (#2441) * require space after semi-colon that separates multiple authors (#2441) * catalog inline anchors at start of callout list items (#2818) (*@owenh000*) * add parse_attributes helper method to base extension Processor class (#2134) * require at least one character in the term position of a description list (#2766) Improvements:: * propagate document ID to DocBook output (#3011) * always store section numeral as string; compute roman numeral for part at assignment time (@vmj) * refactor code to use modern Hash syntax * define LIB_DIR constant; rename *_PATH constants to *_DIR constants to be consistent with RubyGems terminology (#2764) * only define ROOT_DIR if not already defined (for compatibility with Asciidoctor.js) * move custom docinfo content in footer below built-in docinfo content in footer in HTML converter (#3017) * read and write files using File methods instead of IO methods (#2995) * value comparison in AbstractNode#attr? is only performed if expected value is truthy * align default CodeRay style with style for other syntax highlighters (#2106) * ensure linenos class is added to linenos column when source highlighter is pygments and pygments-css=style * disable table stripes by default (#3110) * rename CSS class of Pygments line numbering table to linenotable (to align with Rouge) (#1040) * remove unused Converter#convert_with_options method (#2891) * add -e, --embedded CLI flag as alias for -s, --no-header-footer (require long option to specify eRuby impl) (#1444) * don't store the options attribute on the block once the options are parsed (#3051) * add an options method on AbstractNode to retrieve the set of option names (#3051) * pass :input_mtime option to Document constructor; let Document constructor assign docdate/time/year attributes (#3029) * never mutate strings; add a `frozen_string_literal: true` magic comment to top of all Ruby source files (#3054) * always use docdate and doctime to compute docyear and docdatetime (#3064) * rename PreprocessorReader#exceeded_max_depth? to PreprocessorReader#exceeds_max_depth? and return nil if includes are disabled * stop populating :ids table in document catalog (#3084) * always use :refs table in document catalog to look for registered IDs (#3084) * don't compute and store reference text in document catalog (#3084) * populate reference text table lazily for resolving ID by reference text (#3084) * don't store fallback reference text on :bibref node (#3085) * call AbstractNode#reftext instead of AbstractNode#text to retrieve reference text for bibref node (#3085) * only map unparsed attrlist of inline macro to target when format is short * add clearer exception message when source data is binary or has invalid encoding (#2884) * rename context for table cell and table column to :table_cell and :table_column, respectively * rename hardbreaks document attribute to hardbreaks-option; retain hardbreaks as a deprecated alias (#3123) * extend TLD for implicit e-mail addresses to 5 characters (#3154) * truncate with precision (instead of rounding) when computing absolute width for columns in DocBook output (#3131) * drop legacy LaTeX math delimiters (e.g, `$..$`) if present (#1339) * use proper terminology in warning message about mismatched preprocessor directive (#3165) * rename low-level extension attribute name :pos_attrs to :positional_attrs * mark default_attrs extension DSL method deprecated in favor of default_attributes * upgrade MathJax to 2.7.5 Bug Fixes:: * fix crash caused by inline passthrough macro with the macros sub clearing the remaining passthrough placeholders (#3089) * fix crash if ifeval directive is missing expression (#3164) * prevent relative leveloffset from making section level negative and causing hang (#3152) * don't fail to parse Markdown-style quote block that only contains attribution line (#2989) * enforce rule that Setext section title must have at least one alphanumeric character; fixes problem w/ block nested inside quote block (#3060) * apply header subs to doctitle value when assigning it back to the doctitle document attribute (#3106) * don't fail if value of pygments-style attribute is not recognized; gracefully fallback to default style (#2106) * do not alter the $LOAD_PATH (#2764) * fix crash if stem block is empty (#3118) * remove conditional comment for IE in output of built-in HTML converter; fixes sidebar table of contents (#2983) * fix styling of source blocks with linenums enabled when using prettify as syntax highlighter (#640) * update default stylesheet to support prettify themes (#3020) * remove hard-coded color values on source blocks in default stylesheet (#3020) * add fallback if relative path cannot be computed because the paths are located on different drives (#2944) * ignore explicit section level style (#1852) * don't eat space before callout number in source block if line-comment attribute is empty (#3121) * check if type is defined in a way that's compatible with autoload * fix invalid check for DSL in extension class (previously always returned true) * scope constant lookups (#2764) * use byteslice instead of slice to remove BOM from string (#2764) * don't fail if value of -a CLI option is empty string or equals sign (#2997) * allow failure level of CLI to be set to info * Reader#push_include should not fail if data is nil * fix deprecated ERB trim mode that was causing warning (#3006) * move time anchor after query string on vimeo video to avoid dropping options * allow color for generic text, line numbers, and line number border to inherit from Pygments style (#2106) * enforce and report relative include depth properly (depth=0 rather than depth=1 disables nested includes) * allow outfilesuffix to be soft set from API (#2640) * don't split paragraphs in table cell at line that resolves to blank if adjacent to other non-blank lines (#2963) * initialize the level to WARN when instantiating the NullLogger * next_adjacent_block should not fail when called on dlist item (#3133) * don't suppress browser styles for summary tag; add pointer cursor and panel margin bottom (#3155) * only consider TLDs in e-mail address that have ASCII alpha characters * allow underscore in domain of e-mail address Build / Infrastructure:: * clear SOURCE_DATE_EPOCH env var when testing timezones (PR #2969) (*@aerostitch*) * remove compat folder (removes the AsciiDoc.py config file that provides pseudo-compliance with Asciidoctor and a stylesheet for an old Font Awesome migration) * add Ruby 2.6.0 to build matrix * stop running CI job on unsupported versions of Ruby * exclude test suite, build script, and Gemfile from gem (#3044) * split build tasks out into individual files Also see https://github.com/asciidoctor/asciidoctor/milestone/33?closed=1[issues resolved in 2.0.x] (cumulative). == 1.5.8 (2018-10-28) - @mojavelinux Enhancements:: * if set, add value of part-signifier and chapter-signifier attributes to part and chapter titles (#2738) * allow position (float) and alignment (align) to be set on video block (#2425) * substitute attribute references in attrlist of include directive (#2761) * add Document#set_header_attribute method for adding method directly to document header during parsing (#2820) * add helper method to extension processor classes to create lists and list items * allow ordered and unordered lists to be nested to an arbitrary / unlimited depth (#2854) * add `prefer` DSL method to extension registry and document processor to flag extension as preferred (#2848) * allow manname and manpurpose to be set using document attributes; don't look for NAME section in this case (#2810) * substitute attribute references in target of custom block macro (honoring attribute-missing setting) (#2839) * interpret `<.>` as an auto-numbered callout in verbatim blocks and callout lists (#2871) * require marker for items in callout list to have circumfix brackets (e.g., `<1>` instead of `1>`) (#2871) * preserve comment guard in front of callout number in verbatim block if icons is not enabled (#1360) * add more conventional styles to quote block when it has the excerpt role (#2092) * colspecs can be separated by semi-colon instead of comma (#2798) * change AbstractBlock#find_by to respond to StopIteration exception; stop traversal after matching ID (#2900) * change AbstractBlock#find_by to honor return values :skip and :skip_children from filter block to skip node and its descendants or just its descendants, respectively (#2067) * add API to retrieve authors as array; use API in converters (#1042) (*@mogztter*) * add support for start attribute on source block to set starting line number when converting to DocBook (#2915) * track imagesdir for image on node and in catalog (#2779) * allow starting line number to be set using start attribute when highighting source block with Pygments or CodeRay (#1742) * add intrinsic attribute named `pp` that effectively resolves to `++` (#2807) * upgrade highlight.js to 9.13.1 Bug Fixes:: * don't hang on description list item that begins with /// (#2888) * don't crash when using AsciiDoc table cell style on column in CSV table (#2817) * show friendly error if CSV data for table contains unclosed quote (#2878) (*@zelivans*) * don't crash when attribute entry continuation is used on last line of file (#2880) (*@zelivans*) * treat empty/missing value of named block attribute followed by other attributes (e.g., caption=,cols=2*) as empty string * AbstractNode#set_option does nothing if option is already set (PR #2778) * allow revnumber to be an attribute reference in revision info line (#2785) * use ::File.open instead of ::IO.binread in Reader for Asciidoctor.js compatibility * add fallback for timezone when setting doctime * preserve UNC path that begins with a double backslash (Windows) (#2869) * fix formatting of quote block (indentation) in manpage output (#2792) * catalog inline anchors in ordered list items (#2812) * detect closing tag on last line with no trailing newline (#2830) * process `!name@` attribute syntax property; follow-up to #642 * change document extension processor DSL methods to return registered extension instance instead of array of instances * use fallback value for manname-title to prevent crash in manpage converter * consolidate inner whitespace in prose in manpage output (#2890) * only apply subs to node attribute value if enclosed in single quotes (#2905) * don't hide URI scheme if target of link macro is a bare URI scheme * fix crash when child section of part is out of sequence and section numbering is enabled (#2931) * fix crash when restoring passthroughs if passthrough role is enclosed in single quotes (#2882, #2883) * don't eagerly apply subs to inline attributes in general * make sure encoding of output file is UTF-8 * prevent warning about invalid `:asciidoc` option when using custom templates with Slim 4 (#2928) * use Pathname#relative_path_from to compute relative path to file outside of base directory (#2108) Improvements:: * change trailing delimiter on part number to colon (:) (#2738) * interpret open line range as infinite (#2914) * rename number property on AbstractBlock to numeral, but keep number as deprecated alias * use CSS class instead of hard-coded inline float style on tables and images (#2753) * use CSS class instead of hard-coded inline text-align style on block images (#2753) * allow hyphen to be used custom block macro name as long as it's not the first character (#2620) * use shorthands %F and %T instead of %Y-%m-%d and %H:%M:%S to format time * read file in binary mode whenever contents are being normalized * use .drop(0) to duplicate arrays (roughly 1.5x as fast as .dup) * only recognize a bullet glyph which is non-repeating as an unordered list marker * rename SyntaxDsl module to SyntaxProcessorDsl (internal) * fail if name given to block macro contains illegal characters * normalize all whitespace in value of manpurpose attribute * make space before callout number after custom line comment character optional * parse attrlist on inline passthrough as a shorthand attribute syntax or literal role (#2910) * add support for range syntax (.. delimiter) to highlight attribute on source block (#2918) * add support for unbounded range to highlight attribute on source block (#2918) * automatically assign title and caption on image block if title is set on custom block source (#2926) * use OS independent timezone (UTC or time offset) in doctime and localtime attributes (#2770) * report correct line number for inline anchor with id already in use (#2769) * generate manpage even if input is non-conforming or malformed (#1639) * allow authorinitials for single author to be overridden (#669) Documentation:: * translate README into German (#2829) (*@jwehmschulte*) * sync French translation of README (*@mogztter*) * add Swedish translation of built-in attributes (PR #2930) (*@jonasbjork*) Build / Infrastructure:: * replace thread_safe with concurrent-ruby (PR #2822) (*@junaruga*) == 1.5.7.1 (2018-05-10) - @mojavelinux Bug Fixes:: * fix regression where block attributes where being inherited by sibling blocks in a complex list item (#2771) * don't apply lead styling to first paragraph in nested document (AsciiDoc table cell) if role is present (#2624) Build / Infrastructure:: * drop obsolete logic in rake build (*@aerostitch*) * allow lib dir to be overridden for tests using an environment variable (PR #2758) (*@aerostitch*) * load asciidoctor/version from LOAD_PATH in gemspec if not found locally (PR #2760) (*@aerostitch*) == 1.5.7 (2018-05-02) - @mojavelinux Enhancements:: * BREAKING: drop XML tags, character refs, and non-word characters (except hyphen, dot, and space) when auto-generating section IDs (#794) ** hyphen, dot, and space are replaced with value of idseparator, if set; otherwise, spaces are dropped * BREAKING: disable inter-document xrefs in compat mode (#2740) * BREAKING: automatically parse attributes in link macro if equals is present, ignoring linkattrs (except in compat mode) (#2059) * pass non-AsciiDoc file extensions in target of xref through unprocessed (#2740) * process any known AsciiDoc file extension in target of shorthand inter-document xref if hash is also present (e.g., `<>`) (#2740) * only allow .adoc to be used in target of formal xref macro to create an inter-document xref (with or without a hash) (#2740) * allow attribute names to contain any word character defined by Unicode (#2376, PR #2393) * do not recognize attribute entry line if name contains a colon (PR #2377) * route all processor messages through a logger instead of using Kernel#warn (#44, PR #2660) * add MemoryLogger for capturing messages sent to logger into memory (#44, PR #2660) * add NullLogger to prevent messages from being logged (#44, PR #2660) * log message containing source location / cursor as an object; provides more context (#44, PR #2660) * pass cursor for include file to `:include_location` key in message context (PR #2729) * add `:logger` option to API to set logger instance (#44, PR #2660) * add `--failure-level=LEVEL` option to CLI to force non-zero exit code if specified logging level is reached (#2003, PR #2674) * parse text of xref macro as attributes if attribute signature found (equal sign) (#2381) * allow xrefstyle to be specified per xref by assigning the xrefstyle attribute on the xref macro (#2365) * recognize target with .adoc extension in xref macro as an inter-document xref * resolve nested includes in remote documents relative to URI (#2506, PR #2511) * allow `relfilesuffix` attribute to control file extension used for inter-document xrefs (#1273) * support `!name@` (preferred), `!name=@`, `name!@`, and `name!=@` syntax to soft unset attribute from API or CLI (#642, PR #2649) * allow modifier to be placed at end of name to soft set an attribute (e.g., `icons@=font`) (#642, PR #2649) * interpret `false` attribute value defined using API as a soft unset (#642, PR #2649) * number parts if `partnums` attribute is set (#2298) * allow footnote macro to define or reference footnote reference (footnoteref macro now deprecated) (#2347, PR #2362) * allow custom converter to be used with custom templates; converter must declare that it supports templates (#2619) * add syntax help topic to CLI (`-h syntax`) (#1573) * allow manpage path for manpage help topic to be specified using ASCIIDOCTOR_MANPAGE_PATH environment variable (PR #2653) (*@aerostitch*) * if manpage cannot be found in default path inside gem, use `man -w asciidoctor` to resolve installed path (PR #2653) * uncompress contents of manpage for manpage help topic if path ends with .gz (PR #2653) (*@aerostitch*) * define source and manual refmiscinfo entries in manpage output if manual and source attributes are defined (PR #2636) (*@tiwai*) * add syntax for adding hard line breaks in block AsciiMath equations (#2497, PR #2579) (*@dimztimz*) * add positioning option to sectanchors attribute (sectanchors=before or sectanchors=after) (#2485, PR #2486) * allow table striping to be configured using stripes attribute (even, odd, all, or none) or stripes roles on table (#1365, PR #2588) * recognize `ends` as an alias to `topbot` for configuring the table frame * add rel=nofollow property to links (text or image) when nofollow option is set (#2605, PR #2692) * populate Document#source_location when sourcemap option is enabled (#2478, PR #2488) * populate source_location property on list items when sourcemap option is set on document (PR #2069) (*@mogztter*) * populate Table::Cell#source_location when sourcemap option is enabled (#2705) * allow local include to be flagged as optional by setting optional option (#2389, PR #2413) * allow block title to begin with a period (#2358, PR #2359) * catalog inline anchor at start of list items in ordered and unordered lists, description list terms, and table cells (#2257) * register document in catalog if id is set; assign reftext to document attributes if specified in a block attribute line (#2301, PR #2428) * allow automatic width to be applied to individual columns in a table using the special value `~` (#1844) * use the quote element in DocBook converter to represent smart quotes (#2272, PR #2356) (@bk2204) * parse and pass all manpage names to output (i.e., shadow man pages) (#1811, #2543, PR #2414) * parse credit line of shorthand quote block as block attributes; apply normal subs to credit line in shorthand quote blocks (#1667, PR #2452) * populate copyright element in DocBook output from value of copyright attribute (#2728) * preserve directories if source dir and destination dir are set (#1394, PR #2421) * allow linkcss to be unset from API or CLI when safe mode is secure * convert quote to epigraph element in DocBook output if block has epigraph role (#1195, PR #2664) (*@bk2204*) * number special sections in addition to regular sections when sectnums=all (#661, PR #2463) * upgrade to Font Awesome 4.7.0 (#2569) * upgrade to MathJax 4.7.4 Bug Fixes:: * set `:to_dir` option value correctly when output file is specified (#2382) * preserve leading indentation in contents of AsciiDoc table cell if contents starts with a newline (#2712) * the shorthand syntax on the style to set block attributes (id, roles, options) no longer resets block style (#2174) * match include tags anywhere on line as long as offset by word boundary on left and space or newline on right (#2369, PR #2683) * warn if an include tag specified in the include directive is unclosed in the included file (#2361, PR #2696) * use correct parse mode when parsing blocks attached to list item (#1926) * fix typo in gemspec that removed README and CONTRIBUTING files from the generated gem (PR #2650) (*@aerostitch*) * preserve id, role, title, and reftext on open block when converting to DocBook; wrap in `` or `` (#2276) * don't turn bare URI scheme (no host) into a link (#2609, PR #2611) * don't convert inter-document xref to internal anchor unless entire target file is included into current file (#2200) * fix em dash replacement in manpage converter (#2604, PR #2607) * don't output e-mail address twice when replacing bare e-mail address in manpage output (#2654, PR #2665) * use alternate macro for monospaced text in manpage output to not conflict w/ AsciiDoc macros (#2751) * enforce that absolute start path passed to PathResolver#system_path is inside of jail path (#2642, PR #2644) * fix behavior of PathResolver#descends_from? when base path equals / (#2642, PR #2644) * automatically recover if start path passed to PathResolver#system_path is outside of jail path (#2642, PR #2644) * re-enable left justification after invoking tmac URL macro (#2400, PR #2409) * don't report warning about same level 0 section multiple times (#2572) * record timings when calling convert and write on Document (#2574, PR #2575) * duplicate header attributes when restoring; allows header attributes to be restored an arbitrary number of times (#2567, PR #2570) * propagate `:catalog_assets` option to nested document (#2564, PR #2565) * preserve newlines in quoted CSV data (#2041) * allow opening quote around quoted CSV field to be on a line by itself * output table footer after body rows (#2556, PR #2566) (*@PauloFrancaLacerda*) * move @page outside of @media print in default stylesheet (#2531, PR #2532) * don't throw exception if text of dd node is nil (#2529, PR #2530) * don't double escape ampersand in manpage output (#2525) (*@dimztimz*) * fix crash when author_1 attribute is assigned directly (#2481, PR #2487) * fix CSS for highlighted source block inside colist (#2474, PR #2490) * don't append file extension to data uri of admonition icon (#2465, PR #2466) * fix race condition in Helpers.mkdir_p (#2457, PR #2458) * correctly process nested passthrough inside unconstrained monospaced (#2442, PR #2443) * add test to ensure ampersand in author line is not double escaped (#2439, PR #2440) * prevent footnote ID from clashing with auto-generated footnote IDs (#2019) * fix alignment of icons in footnote (#2415, PR #2416) * add graceful fallback if pygments.rb fails to return a value (#2341, PR #2342) * escape specialchars in source if pygments fails to highlight (#2341) * do not recognize attribute entry line if name contains colon (PR #2377) * allow flow indexterm to be enclosed in round brackets (#2363, PR #2364) * set outfilesuffix to match file extension of output file (#2258, PR #2367) * add block title to dlist in manpage output (#1611, PR #2434) * scale text to 80% in print styles (#1484, PR #2576) * fix alignment of abstract title when using default stylesheet (PR #2732) * only set nowrap style on table caption for auto-width table (#2392) * output non-breaking space for man manual if absent in DocBook output (PR #2636) * don't crash if stem type is not recognized (instead, fallback to asciimath) Improvements / Refactoring:: * BREAKING: rename table spread role to stretch (#2589, PR #2591) * use cursor marks to track lines more accurately; record cursor at the start of each block, list item, or table cell (PR #2701, PR #2547) (*@seikichi*) * log a warning message if an unterminated delimited block is detected (#1133, PR #2612) * log a warning when nested section is found inside special section that doesn't support nested sections (#2433, PR #2672) * read files in binary mode to disable automatic endline coercion (then explicitly coerce to UTF-8) (PR #2583, PR #2694) * resolve / expand parent references in start path passed to PathResolver#system_path (#2642, PR #2644) * update PathResolver#expand_path to resolve parent references (#2642, PR #2644) * allow start path passed to PathResolver#system_path to be outside jail if target brings resolved path back inside jail (#2642, PR #2644) * don't run File.expand_path on Dir.pwd (assume Dir.pwd is absolute) (#2642, PR #2644) * posixify working_dir passed to PathResolver constructor if absolute (#2642, PR #2644) * optimize detection for footnote* and indexterm* macros (#2347, PR #2362) * log a warning if a footnote reference cannot be resolved (#2669) * set logger level to DEBUG when verbose is enabled * coerce value of `:template_dirs` option to an Array (PR #2621) * make block roles specified using shorthand syntax additive (#2174) * allow paragraph to masquerade as open block (PR #2412) * move callouts into document catalog (PR #2394) * document ID defined in block attribute line takes precedence over ID defined inside document title line * don't look for link and window attributes on document when resolving these attributes for an image * skip line comments in name section of manpage (#2584, PR #2585) * always activate extension registry passed to processor (PR #2379) * skip extension registry activation if no groups are registered (PR #2373) * don't apply lead styling to first paragraph if role is present (#2624, PR #2625) * raise clearer exception when extension class cannot be resolved (#2622, PR #2623) * add methods to read results from timings (#2578, PR #2580) * collapse bottom margin of last block in AsciiDoc table cell (#2568, PR #2593) * set authorcount to 0 if there are no authors (#2519, PR #2520) * validate fragment of inter-document xref that resolves to current doc (#2448, PR #2449) * put id attribute on tag around phrase instead of preceding anchor (#2445, PR #2446) * add .plist extension to XML circumfix comment family (#2430, PR #2431) (*@akosma*) * alias Document#title method to no args Document#doctitle method (#2429, PR #2432) * upgrade missing or unreadable include file to an error (#2424, PR #2426) * add compliance setting to disable natural cross references (#2405, PR #2460) * make hash in inter-document xref target optional if target has extension (#2404, PR #2406) * add CSS class to part that matches role (#2401, PR #2402) * add fit-content class to auto-width table (#2392) * automatically assign parent reference when adding node to parent (#2398, PR #2403) * leave inline anchor in section title as is if section has ID (#2243, PR #2427) * align and improve error message about invalid use of partintro between HTML5 and DocBook converters * rephrase warning when level 0 sections are found and the doctype is not book * report correct line number when duplicate bibliography anchor is found * only warn if thread_safe gem is missing when using built-in template cache * rename enumerate_section to assign_numeral; update API docs * drop deprecated compact option from CLI; remove from manpage * use more robust mechanism for lazy loading the asciimath gem * use consistent phrase to indicate the processor is automatically recovering from a problem * change Reader#skip_comment_lines to not return skipped lines * add styles to default stylesheet for display on Kindle (kf8) devices (PR #2475) * purge render method from test suite (except to verify alias) Documentation:: * translate 'section-refsig' for German language (PR #2633) (*@ahus1*) * synchronize French README with English version (PR #2637) (*@flashcode*) Build / Infrastructure:: * create an official logo for the project (#48) (*@mmajko*) * update Ruby versions in appveyor build matrix (PR #2388) (*@miltador*) * add mailinglist, changelog, source, and issues URI to gem spec * allow blocks and substitutions tests to be run directly * asciidoctor formula now available for Homebrew (*@zmwangx*) Distribution Packages:: * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (asciidoctor)] * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] * https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] * https://software.opensuse.org/package/rubygem-asciidoctor[OpenSUSE (rubygem-asciidoctor)] == 1.5.6.2 (2018-03-20) - @mojavelinux Bug Fixes:: * fix match for multiple xref macros w/ implicit text in same line (#2450) * PathResolver#root? returns true for absolute URL in browser env (#2595) Improvements / Refactoring:: * resolve include target correctly in browser (xmlhttprequest IO module) (#2599, #2602) * extract method to resolve include path (allowing Asciidoctor.js to override) (#2610) * don't expand docdir value passed to API (#2518) * check mandatory attributes when creating an image block (#2349, PR #2355) (*@mogztter*) * drop is_ prefix from boolean methods in PathResolver (PR #2587) * change Reader#replace_next_line to return true * organize methods in AbstractNode Build / Infrastructure:: * clean up dependencies * add Ruby 2.5.0 to CI build matrix (PR #2528) * update nokogiri to 1.8.0 for ruby >= 2.1 (PR #2380) (*@miltador*) Distribution Packages:: * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] * https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.6.2[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.6.2[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v1.5.6.1...v1.5.6.2[full diff] == 1.5.6.1 (2017-07-23) - @mojavelinux Enhancements:: * Don't include title of special section in DocBook output if untitled option is set (e.g., dedication%untitled) Bug Fixes:: * continue to read blocks inside a delimited block after content is skipped (PR #2318) * don't create an empty paragraph for skipped content inside a delimited block (PR #2319) * allow the subs argument of Substitutors#apply_subs to be nil * coerce group name to symbol when registering extension (#2324) * eagerly substitute attributes in target of inline image macro (#2330) * don't warn if source stylesheet can't be read but destination already exists (#2323) * track include path correctly if path is absolute and outside of base directory (#2107) * preprocess second line of setext section title (PR #2321) * preprocess second line of setext discrete heading (PR #2332) * return filename as relative path if filename doesn't share common root with base directory (#2107) Improvements / Refactoring:: * change default text for inter-document xref (PR #2316) * add additional tests to test behavior of Reader#peek_lines * parse revision info line correctly that only has version and remark; add missing test for scenario * rename AtxSectionRx constant to AtxSectionTitleRx for consistency with SetextSectionTitleRx constant * use terms "atx" and "setext" to refer to section title syntax (PR #2334) * rename HybridLayoutBreakRx constant to ExtLayoutBreakRx * change terminology from "floating title" to "discrete heading" * consolidate skip blank lines and check for end of reader (PR #2325) * have Reader#skip_blank_lines report end of file (PR #2325) * don't mix return type of Parser.build_block method (PR #2328) * don't track eof state in reader (PR #2320) * use shift instead of advance to consume line when return value isn't needed (PR #2322) * replace terminology "floating title" with "discrete heading" * remove unnecessary nil_or_empty? checks in substitutor * leverage built-in assert / refute methods in test suite Build / Infrastructure:: * config Travis CI job to release gem (PR #2333) * add SHA1 hash to message used for triggered builds * trigger build of AsciidoctorJ on every change to core * trigger build of Asciidoctor Diagram on every change to core Distribution Packages:: * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] * https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] * https://software.opensuse.org/package/rubygem-asciidoctor[OpenSUSE (rubygem-asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.6.1[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.6.1[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v1.5.6\...v1.5.6.1[full diff] == 1.5.6 (2017-07-12) - @mojavelinux Enhancements:: * use custom cross reference text if xrefstyle attribute is set (full, short, basic) (#858, #1132) * store referenceable nodes under refs key in document catalog (PR #2220) * apply reftext substitutions (specialchars, quotes, replacements) to value returned by reftext method (PR #2220) * add xreftext method to AbstractBlock, Section, and Inline to produce formatted text for xref (PR #2220) * introduce attributes chapter-refsig, section-refsig, and appendix-refsig to set reference signifier for chapter, section, and appendix, respectively (PR #2220) * add rel="noopener" to links that target _blank or when noopener option is set (#2071) * add option to exclude tags when including a file (#1516) * add meta for shortcut icon if favicon attribute is set (#1574) * allow use of linenums option to enable line numbers on a source block (#1981) * allow extension groups to be unregistered individually (#1701) * catalog bibliography anchors and capture reftext (#560, #1562) * automatically add bibliography style to unordered list in bibliography section (#1924) * disable startinline option when highlighting PHP if mixed option is set on source block (PR #2015) (*@ricpelo*) * configure Slim to resolve includes in specified template dirs (#2214) * dump manpage when -h manpage flag is passed to CLI (#2302) * add resolves_attributes method to DSL for macros (#2122) * invoke convert on result of custom inline macro if value is an inline node (#2132) * resolve attributes for custom short inline macros if requested (#1797) * add convenience method to create section from extension; use same initialization logic as parser (#1957) * add handles? method to DSL for IncludeProcessor (#2119) * pass through preload attribute to video tag (#2046) * add start and end times for audio element (#1930) * set localyear and docyear attributes (#1372) * pass cloaked context to block extension via cloaked-context attribute (#1606) * add support for covers in DocBook 5 converter (#1939) * accept named pipe (fifo) as the input file (#1948) * add AbstractBlock#next_adjacent_block helper method * rename Document#references to catalog; alias references to catalog (PR #2237) * rename extensions_registry option to extension_registry * rename Extensions.build_registry method to create * autoload extensions source file when Asciidoctor::Extensions is referenced (PR #2114, PR #2312) * apply default_attrs to custom inline macro (PR #2127) * allow tab separator for table to be specified using \t (#2073) * add Cell#text= method Improvements:: * significant improvements to performance, especially in parser and substitutors * process include directive inside text of short form preprocessor conditional (#2146) * add support for include tags in languages that only support only circumfix comments (#1729) * allow spaces in target of block image; target must start and end with non-space (#1943) * add warning in verbose mode if xref is not found (#2268) (*@fapdash*) * add warning if duplicate ID is detected (#2244) * validate that output file will not overwrite input file (#1956) * include docfile in warning when stylesheet cannot be read (#2089) * warn if doctype=inline is used and block has unexpected content model (#1890) * set built-in docfilesuffix attribute (#1673) * make sourcemap field on Document read/write (#1916) * allow target of xref to begin with attribute reference (#2007) * allow target of xref to be expressed with leading # (#1546) * allow kbd and btn macros to wrap across multiple lines (#2249) * allow menu macro to span multiple lines; unescape escaped closing bracket * make menu macro less greedy * allow ampersand to be used as the first character of the first segment of a menu (#2171) * enclose menu caret in HTML tag (#2165) * use black text for menu reference; tighten word spacing (#2148) * fix parsing of keys in kbd macro (PR #2222) * add support for the window option for the link on a block image (#2172) * set correct level for special sections in parser (#1261) * always set numbered property on appendix to true * store number for formal block on node (#2208) * set sectname of header section to header (#1996) * add the remove_attr method to AbstractNode (#2227) * use empty string as default value for set_attr method (#1967) * make start argument to system_path optional (#1965) * allow API to control subs applied to ListItem text (#2035) * allow text of ListItem to be assigned (in an extension) (#2033) * make generate_id method on section a static method (#1929) * validate name of custom inline macro; cache inline macro rx (#2136) * align number in conum list to top by default (#1999) * fix CSS positioning of interactive checkbox (#1840) * fix indentation of list items when markers are disabled (none, no-bullet, unnumbered, unstyled) (PR #2286) * instruct icon to inherit cursor if inside a link * close all files opened internally (#1897) * be more precise about splitting kbd characters (#1660) * rename limit method on String to limit_bytesize (#1889) * leverage Ruby's match? method to speed up non-capturing regexps (PR #1938) * preserve inline break in manpages (*@letheed*) * check for presence of SOURCE_DATE_EPOCH instead of value; fail if value is malformed * add Rows#by_section method to return table sections (#2219) * cache which template engines have been loaded to avoid unnecessary processing * rename assign_index method to enumerate_section (PR #2242) * don't process double quotes in xref macro (PR #2241) * optimize attr and attr? methods (PR #2232) * use IO.write instead of File.open w/ block; backport for Opal * backport IO.binread to Ruby 1.8.7 to avoid runtime check * cache backend and doctype values on document * allow normalize option to be set on PreprocessorReader; change default to false * move regular expression constants for Opal to Asciidoctor.js build (PR #2070) * add missing comma in warning message for callout list item out of sequence * combine start_with? / end_with? checks into a single method call * rename UriTerminator constant to UriTerminatorRx * promote subs to top-level constants; freeze arrays * rename PASS_SUBS constant to NONE_SUBS * rename EOL constant to LF (retain EOL as alias) * rename macro regexp constants so name follows type (e.g., InlineImageMacroRx) Compliance:: * retain block content in items of callout list when converting to HTML and man page (#1478) * only substitute specialchars for content in literal table cells (#1912) * fix operator logic for ifndef directive with multiple attributes to align with behavior of AsciiDoc.py; when attributes are separated by commas, content is only included if none of the attributes listed are set; when attributes are separated by pluses, content is included if at least one of the attributes is not set (#1983) * only recognize uniform underline for setext section title (#2083) * don't match headings with mixed leading characters (#2074) * fix layout break from matching lines it shouldn't * fix behavior of attribute substitution in docinfo content (PR #2296) * encode spaces in URI (PR #2274) * treat empty string as a valid block title * preprocess lines of a simple block (#1923) * don't drop trailing blank lines when splitting source into lines (PR #2045) * only drop known AsciiDoc extensions from the inter-document xref path (#2217) * don't number special sections or special subsections by default (#2234) * assign sectname based on name of manuscript element (#2206) * honor leveloffset when resolving implicit doctitle (#2140) * permit leading, trailing, and repeat operators in target of preprocessor conditional (PR #2279) * don't match link macro in block form (i.e., has two colons after prefix) (#2202) * do not match bibliography anchor that begins with digit (#2247) * use [ \t] (or \s) instead of \p{Blank} to match spaces (#2204) * allow named entity to have trailing digits (e.g., there4) (#2144) * only assign style to image alt text if alt text is not specified * substitute replacements in non-generated alt text of block image (PR #2285) * keep track of whether alt text is auto-generated by assigning default-alt attribute (PR #2287) * suppress info element in docbook output if noheader attribute is set (#2155) * preserve leading indentation in literal and verse table cells (#2037) * preserve whitespace in literal and verse table cells (#2029) * set doctype-related attributes in AsciiDoc table cell (#2159) * fix comparison logic when preprocessing first line of AsciiDoc table cell * set filetype to man when backend is manpage (#2055) * respect image scaling in DocBook converter (#1059) * share counters between AsciiDoc table cells and main document (#1942) * generate ID for floating title from converted title (#2016) * split "treeprocessor" into two words; add aliases for compatibility (PR #2179) * allow trailing hyphen in attribute name used in attribute reference * allow escaped closing bracket in text of xref macro * process pass inline macro with empty text; invert extract logic * drop support for reftext document attribute (must be specified on node) * fix compliance with Haml >= 5 (load Haml eagerly; remove ugly option) * don't match inline image macro if target contains endline or leading or trailing spaces * assign id instead of target on ref/bibref node (PR #2307) * remove regexp hacks for Opal (#2110) * drop outdated quoting exceptions for Opal (PR #2081) Bug Fixes:: * don't allow table borders to cascade to nested tables (#2151) * escape special characters in reftext of anchor (#1694) * sanitize content of authors meta tag in HTML output (#2112) * use correct line number in warning for invalid callout item reference (#2275) * fix stray marks added when unescaping unconstrained passthroughs (PR #2079) * don't confuse escaped quotes in CSV data as enclosing quotes (#2008) * don't activate implicit header if cell in first line of table contains a blank line (#1284, #644) * allow compat-mode in AsciiDoc table cell to inherit from parent document (#2153) * manify all normal table cell content (head, body, foot) in manpage output * add missing newline after table caption in manpage output (#2253) * correctly format block title on video in manpage output * don't crash if substitution list resolves to nil (#2183) * fail with informative message if converter cannot be resolved (#2161) * fix regression of not matching short form of custom block macro * encode double quotes in image alt text when used in an attribute (#2061) * encode double quote and strip XML tags in value of xreflabel attribute in DocBook converter (PR #2220) * fix typo in base64 data (PR #2094) (*@mogztter*) * permit pass macro to surround a multi-line attribute value with hard line breaks (#2211) * fix sequential inline anchor macros with empty reftext (#1689) * don't mangle compound names when document has multiple authors (#663) * don't drop last line of verbatim block if it contains only a callout number (#2043) * prevent leading & trailing round brackets from getting caught in indexterm (#1581) * remove cached title when title is set on block (#2022) * remove max-width on the callout number icon (#1895) * eagerly add hljs class for highlight.js (#2221) * fix SOURCE_DATE_EPOCH lookup in Opal * fix paths with file URI scheme are inevitably absolute (PR #1925) (*@mogztter*) * only resolve file URLs when JavaScript IO module is xmlhttprequest (PR #1898) (*@mogztter*) * fix formatting of video title in manpage converter * don't increment line number if peek_lines overruns buffer (fixes some cases when line number is off) * freeze extension processor instance, not class * fix numbering bug in reindex_sections * handle cases when there are no lines for include directive to select Documentation:: * enable admonition icons in README when displayed on GitHub * add German translation of chapter-label (PR #1920) (*@fapdash*) * add Ukrainian translation of built-in attributes (PR #1955) (*@hedrok*) * add Norwegian Nynorsk translation; updated Norwegian Bokmål translation of built-in attributes (PR #2142) (*@huftis*) * add Polish translation of built-in attributes (PR #2131) (*@ldziedziul*) * add Romanian translation of built-in attributes (PR #2125) (*@vitaliel*) * fix Japanese translation of built-in attributes (PR #2116) (*@haradats*) * add Bahasa Indonesia translation of built-in labels (*@triyanwn*) Build / Infrastructure:: * upgrade highlight.js to 9.12.0 (#1652) * include entire test suite in gem (PR #1952) (*@voxik*) * upgrade Slim development dependency to 3.0.x (PR #1953) (*@voxik*) * upgrade Haml development dependency to 5.0.x * upgrade Nokogiri to 1.6.x (except on Ruby 1.8) (PR #1213) * add Ruby 2.4 to CI test matrix (PR #1980) * upgrade cucumber and JRuby in CI build (PR #2005) * fix reference to documentation in attributes.adoc (PR #1901) (*@stonio*) * trap and verify all warnings when tests are run with warnings enabled * set default task in build to test:all * configure run-tests.sh script to run all tests * configure feature tests to only show progress * configure Slim in feature tests to use html as format instead of deprecated html5 * lock version of yard to fix invalid byte sequence in Ruby 1.9.3 * modify rake build to trigger dependent builds (specifically, Asciidoctor.js) (PR #2305) (*@mogztter*) Distribution Packages:: * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] * https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.6[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.6[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v1.5.5\...v1.5.6[full diff] == 1.5.5 (2016-10-05) - @mojavelinux Enhancements:: * Add preference to limit the maximum size of an attribute value (#1861) * Honor SOURCE_DATE_EPOCH environment variable to accommodate reproducible builds (#1721) (*@JojoBoulix*) * Add reversed attribute to ordered list if reversed option is enabled (#1830) * Add support for additional docinfo locations (e.g., :header) * Configure default stylesheet to break monospace word if exceeds length of line using `word-wrap: break-word`; add `nobreak` and `nowrap` roles to prevent breaks (#1814) * Introduce translation file for built-in labels (*@ciampix*) * Provide translations for built-in labels (*@JmyL* - kr, *@ciampix* - it, *@ivannov* - bg, *@maxandersen* - da, *@radcortez* - pt, *@eddumelendez* - es, *@leathersole* - jp, *@aslakknutsen* - no, *@shahryareiv* - fa, *@AlexanderZobkov* - ru, *@dongwq* - zh, *@rmpestano* - pt_BR, *@ncomet* - fr, *@lgvz* - fi, *@patoi* - hu, *@BojanStipic* - sr, *@fwilhe* - de, *@rahmanusta* - tr, *@abelsromero* - ca, *@aboullaite* - ar, *@roelvs* - nl) * Translate README to Chinese (*@diguage*) * Translate README to Japanese (*@Mizuho32*) Improvements:: * Style nested emphasized phrases properly when using default stylesheet (#1691) * Honor explicit table width even when autowidth option is set (#1843) * Only explicit noheader option on table should disable implicit table header (#1849) * Support docbook orient="land" attribute on tables (#1815) * Add alias named list to retrieve parent List of ListItem * Update push_include method to support chaining (#1836) * Enable font smoothing on Firefox on OSX (#1837) * Support combined use of sectanchors and sectlinks in HTML5 output (#1806) * fix API docs for find_by * Upgrade to Font Awesome 4.6.3 (#1723) (*@allenan*, *@mogztter*) * README: add install instructions for Alpine Linux * README: Switch yum commands to dnf in README * README: Mention Mint as a Debian distro that packages Asciidoctor * README: Add caution advising against using gem update to update a system-managed gem (*@oddhack*) * README: sync French version with English version (*@flashcode*) * Add missing endline after title element when converting open block to HTML * Move list_marker_keyword method from AbstractNode to AbstractBlock * Rename definition list to description list internally Compliance:: * Support 6-digit decimal char refs, 5-digit hexadecimal char refs (#1824) * Compatibility fixes for Opal * Check for number using Integer instead of Fixnum class for compatibility with Ruby 2.4 Bug Fixes:: * Use method_defined? instead of respond_to? to check if method is already defined when patching (#1838) * Fix invalid conditional in HTML5 converter when handling of SVG * Processor#parse_content helper no longer shares attribute list between blocks (#1651) * Fix infinite loop if unordered list marker is immediately followed by a dot (#1679) * Don't break SVG source when cleaning if svg start tag name is immediately followed by endline (#1676) * Prevent template converter from crashing if .rb file found in template directory (#1827) * Fix crash when generating section ID when both idprefix & idseparator are blank (#1821) * Use stronger CSS rule for general text color in Pygments stylesheet (#1802) * Don't duplicate forward slash for path relative to root (#1822) Infrastructure:: * Build gem properly in the absence of a git workspace, make compatible with JRuby (#1779) * Run tests in CI using latest versions of Ruby, including Ruby 2.3 (*@ferdinandrosario*) Distribution Packages:: * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] * https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.5[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.5[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v1.5.4\...v1.5.5[full diff] == 1.5.4 (2016-01-03) - @mojavelinux Enhancements:: * translate README into French (#1630) (*@anthonny*, *@mogztter*, *@gscheibel*, *@mgreau*) * allow linkstyle in manpage output to be configured (#1610) Improvements:: * upgrade to MathJax 2.6.0 and disable loading messages * upgrade to Font Awesome 4.5.0 * disable toc if document has no sections (#1633) * convert inline asciimath to MathML (using asciimath gem) in DocBook converter (#1622) * add attribute to control build reproducibility (#1453) (*@bk2204*) * recognize \file:/// as a file root in Opal browser env (#1561) * honor icon attribute on admonition block when font-based icons are enabled (#1593) (*@robertpanzer*) * resolve custom icon relative to iconsdir; add file extension if absent (#1634) * allow asciidoctor cli to resolve library path when invoked without leading ./ Compliance:: * allow special section to be nested at any depth (#1591) * ensure colpcwidth values add up to 100%; increase precision of values to 4 decimal places (#1647) * ignore blank cols attribute on table (#1647) * support shorthand syntax for block attributes on document title (#1650) Bug Fixes:: * don't include default toc in AsciiDoc table cell; don't pass toc location attributes to nested document (#1582) * guard against nil dlist list item in find_by (#1618) * don't swallow trailing line when include file is not readable (#1602) * change xlink namespace to xl in DocBook 5 output to prevent parse error (#1597) * make callouts globally unique within document, including AsciiDoc table cells (#1626) * initialize Slim-related attributes regardless of when Slim was loaded (#1576) (*@terceiro*) * differentiate literal backslash from escape sequence in manpage output (#1604) (*@ds26gte*) * don't mistake line beginning with \. for troff macro in manpage output (#1589) (*@ds26gte*) * escape leading dots so user content doesn't trigger troff macros in manpage output (#1631) (*@ds26gte*) * use \c after .URL macro to remove extraneous space in manpage output (#1590) (*@ds26gte*) * fix missing endline after .URL macro in manpage output (#1613) * properly handle spacing around .URL/.MTO macro in manpage output (#1641) (*@ds26gte*) * don't swallow doctitle attribute followed by block title (#1587) * change strategy for splitting names of author; fixes bug in Opal/Asciidoctor.js * don't fail if library is loaded more than once Infrastructure:: * remove trailing newlines in project source code * update contributing guidelines * explicitly test ifeval scenario raised in issue #1585 * remove backreference substitution hack for Opal/Asciidoctor.js * fix assignment of default Hash value for Opal/Asciidoctor.js * add JRuby 9.0.4.0 and Ruby 2.3.0 to the Travis CI build matrix Distribution Packages:: * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.4[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.4[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v1.5.3\...v1.5.4[full diff] == 1.5.3 (2015-10-31) - @mojavelinux Enhancements:: * add support for interactive & inline SVGs (#1301, #1224) * add built-in manpage backend (#651) (*@davidgamba*) * create Mallard backend; asciidoctor/asciidoctor-mallard (#425) (*@bk2204*) * add AsciiMath to MathML converter to support AsciiMath in DocBook converter (#954) (*@pepijnve*) * allow text of selected lines to be highlighted in source block by Pygments or CodeRay (#1429) * use value of `docinfo` attribute to control docinfo behavior (#1510) * add `docinfosubs` attribute to control which substitutions are performed on docinfo files (#405) (*@mogztter*) * drop ability to specify multiple attributes with a single `-a` flag when using the CLI (#405) (*@mogztter*) * make subtitle separator chars for document title configurable (#1350) (*@rmannibucau*) * make XrefInlineRx regexp more permissive (Mathieu Boespflug) (#844) Improvements:: * load JavaScript and CSS at bottom of HTML document (#1238) (*@mogztter*) * list available backends in help text (#1271) (*@plaindocs*) * properly expand tabs in literal text (#1170, #841) * add `source-indent` as document attribute (#1169) (*@mogztter*) * upgrade MathJax to 2.5.3 (#1329) * upgrade Font Awesome to 4.4.0 (#1465) (*@mogztter*) * upgrade highlight.js to 8.6 (now 8.9.1) (#1390) * don't abort if syntax highlighter isn't available (#1253) * insert docinfo footer below footer div (#1503) * insert toc at default location in embeddable HTML (#1443) * replace _ and - in generated alt text for inline images * restore attributes to header attributes after parse (#1255) * allow docdate and doctime to be overridden (#1495) * add CSS class `.center` for center block alignment (#1456) * recognize U+2022 (bullet) as alternative marker for unordered lists (#1177) (*@mogztter*) * allow videos to work for local files by prepending asset-uri-scheme (Chris) (#1320) * always assign playlist param when loop option is enabled for YouTube video * parse isolated version in revision line (#790) (*@bk2204*) * autoload Tilt when template converter is instantiated (#1313) * don't overwrite existing id entry in references table (#1256) * use outfilesuffix attribute defined in header when resolving outfile (#1412) * make AsciiDoc safe mode option on Slim engine match document (#1347) * honor htmlsyntax attribute when backend is html/html5 (#1530) * tighten spacing of wrapped lines in TOC (#1542) * tune padding around table cells in horizontal dlist (#1418) * load Droid Sans Mono 700 in default stylesheet * set line height of table cells used for syntax highlighting * set font-family of kbd; refine styling (#1423) * extract condition into `quote_lines?` method (*@mogztter*) * extract inline code into `read_paragraph` method (*@mogztter*) * parent of block in ListItem should be ListItem (#1359) * add helper methods to List and ListItem (#1551) * add method `AbstractNode#add_role` and `AbstractNode#remove_role` (#1366) (*@robertpanzer*) * introduce helper methods for sniffing URIs (#1422) * add helper to calculate basename without file extension * document `-I` and `-r` options in the manual page (*@bk2204*) * fix `+--help+` output text for `-I` (*@bk2204*) * don't require open-uri-cached if already loaded * do not attempt to scan pattern of non-existent directory in template converter * prevent CodeRay from bolding every 10th line number Compliance:: * use `` for footnote reference in text instead of `` (#1523) * fix alignment of wrapped text in footnote (#1524) * include full stop after footnote number in embeddable HTML * show manpage title & name section in embeddable HTML (#1179) * resolve missing attribute in ifeval to empty string (#1387) * support unbreakable & breakable options on table (rockyallen) (#1140) Bug Fixes:: * don't truncate exception stack in `Asciidoctor.load` (#1248) * don't fail to save cause of Java exception (#1458) (*@robertpanzer*) * fix precision error in timings report (#1342) * resolve regexp for inline macro lazily (#1336) * block argument to `find_by` should filter results (#1393) * strip comment lines in indented text of dlist item (#1537) * preserve escaped delimiter at end of line in a table (#1306) * correctly calculate colnames for implicit columns (#1556) * don't crash if colspan exceeds colspec (#1460) * account for empty records in colspec (#1375) * ignore empty cols attribute on table * use `.inspect` to print MathJax delimiters (again) (#1198) * use while loop instead of begin/while loop to address bug in Asciidoctor.js (#1408) * force encoding of attribute values passed from cli (#1191) * don't copy css if stylesheet or stylesdir is a URI (#1400) * fix invalid color value in default CodeRay theme * built-in writer no longer fails if output is nil (#1544) * custom template engine options should take precedence * fallback to require with a non-relative path to support Debian package (*@mogztter*) * pass opts to recursive invocations of `PathResolver#system_path` * fix and test external links in docbook backend * use format symbol `:html` instead of `:html5` for Slim to fix warnings * fix documentation for inline_macro and block_macro (Andrea Bedini) * fix grammar in warning messages regarding thread_safe gem Infrastructure:: * migrate opal_ext from core to Asciidoctor.js (#1517) * add Ruby 2.2 to CI build; only specify minor Ruby versions * enable containerized builds on Travis CI * add config to run CI build on AppVeyor * exclude benchmark folder from gem (#1522) Distribution Packages:: * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.3[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.3[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v1.5.2\...v1.5.3[full diff] == 1.5.2 (2014-11-27) - @mojavelinux Enhancements:: * add docinfo extension (#1162) (*@mogztter*) * allow docinfo to be in separate directory from content, specified by `docinfodir` attribute (#511) (*@mogztter*) * enable TeX equation auto-numbering if `eqnums` attribute is set (#1110) (*@jxxcarlson*) Improvements:: * recognize `--` as valid line comment for callout numbers; make line comment configurable (#1068) * upgrade highlight.js to version 8.4 (#1216) * upgrade Font Awesome to version 4.2.0 (#1201) (*@clojens*) * define JAVASCRIPT_PLATFORM constant to simplify conditional logic in the JavaScript environment (#897) * provide access to destination directory, outfile and outdir via Document object (#1203) * print encoding information in version report produced by `asciidoctor -v` (#1210) * add intrinsic attribute named `cpp` that effectively resolves to `C++` (#1208) * preserve URI targets passed to `stylesheet` and related attributes (#1192) * allow numeric characters in block attribute name (#1103) * support custom YouTube playlists (#1105) * make start number for unique id generation configurable (#1148) * normalize and force UTF-8 encoding of docinfo content (#831) * allow subs and default_subs to be specified in Block constructor (#749) * enhance error message when reading binary input files (#1158) (*@mogztter*) * add `append` method as alias to `<<` method on AbstractBlock (#1085) * assign value of `preface-title` as title of preface node (#1090) * fix spacing around checkbox in checklist (#1138) * automatically load Slim's include plugin when using slim templates (#1151) (*@jirutka*) * mixin Slim helpers into execution scope of slim templates (#1143) (*@jirutka*) * improve DocBook output for manpage doctype (#1134, #1142) (*@bk2204*) Compliance:: * substitute attribute entry value in attributes defined outside of header (#1130) * allow empty cell to appear at end of table row (#1106) * only produce one row for table in CSV or DSV format with a single cell (#1180) Bug Fixes:: * add explicit to_s call to generate delimiter settings for MathJax config (#1198) * fix includes that reference absolute Windows paths (#1144) * apply DSL to extension block in a way compatible with Opal Distribution Packages:: * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.2[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.2[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v1.5.1\...v1.5.2[full diff] == 1.5.1 (2014-09-29) - @mojavelinux Bug Fixes:: * recognize tag directives inside comments within XML files for including tagged regions * restore passthroughs inside footnotes when more than one footnote appears on the same line * -S flag in cli recognizes safe mode name as lowercase string * do not match # in character reference when looking for marked text * add namespace to lang attribute in DocBook 5 backend * restore missing space before conum on last line of listing when highlighting with Pygments * place conums on correct lines when line numbers are enabled when highlighting with Pygments * don't expand mailto links in print styles Improvements:: * implement File.read in Node (JavaScript) environment * assign sectnumlevels and toclevels values to maxdepth attribute on AsciiDoc processing instructions in DocBook output * add test for usage of image block macro with data URI * use badges from shields.io in README Distribution Packages:: * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.1[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.1[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v1.5.0\...v1.5.1[full diff] == 1.5.0 (2014-08-12) - @mojavelinux Performance:: * 10% increase in speed compared to 0.1.4 * rewrite built-in converters in Ruby instead of ERB Enhancements:: * {star} introduce new curved quote syntax (pass:["`double quotes`"], pass:['`single quotes`']) if compat-mode attribute not set (#1046) * {star} add single curved quote replacement for pass:[`'] (#715) * {star} use backtick (`) for monospaced text if compat-mode attribute not set (#714, #718) * {star} use single and double plus (pass:[+], pass:[++]) for inline passthrough if compat-mode attribute not set (#714, #718) * {star} disable single quotes as formatting marks for emphasized text if compat-mode attribute not set (#717) * {star} enable compat-mode by default if document has atx-style doctitle * {star} output phrase surrounded by # as marked text (i.e., ) (#225) * {star} add MathJax integration and corresponding blocks and macros (#492, #760) * {star} switch to open source fonts (Open Sans, Noto Serif and Droid Sans Mono) in default stylesheet, major refinements to theme (#879) * {star} embed remote images when data-uri and allow-uri-read attributes are set (#612) * {star} support leveloffset on include directive and honor relative leveloffset values (#530) * {star} switch default docbook backend to docbook5 (#554) (*@bk2204*) * {star} added hide-uri-scheme attribute to hide uri scheme in automatic links (#800) * {star} allow substitutions to be incrementally added & removed (#522) * {star} add compatibility with Opal, add shim compat library, use compatibility regexp, require libraries properly (#679, #836, #846) (*@mogztter*) * {star} output XHTML when backend is xhtml or xhtml5 (#494) * {star} add shorthand subs and specialchars as an alias for specialcharacters (#579) * {star} deprecate toc2 attribute in favor of position and placement values on toc attribute (e.g., toc=left) (#706) * {star} add source map (file and line number) information to blocks (#861) * {star} write to file by default if input is file (#907) * {star} add -r and -I flags from ruby command to asciidoctor command for loading additional libraries (#574) * support backslash (\) as line continuation character in the value of an attribute entry (#1022) * disable subs on pass block by default (#737) * add basic support for resolving xref target from reftext (#589) * add time range anchor to video element (#886) * match implicit URLs that use the file scheme (#853) * added sectnumlevels to control depth of section numbering (#549) * add hardbreaks option to block (#630) * substitute attribute references in manname * warn on reference to missing attribute if attribute-missing is "warn" * only enable toc macro if toc is enabled and toc-placement attribute has the value macro (#706) * add sectnums attribute as alternative alias to numbered attribute (#684) Improvements:: * {star} don't select lines that contain a tag directive when including tagged lines, make tag regexp more strict (#1027) * {star} use https scheme for assets by default * {star} upgrade to Font Awesome 4.1 (#752) (*@mogztter*) * {star} improve print styles, add print styles for book doctype (#997, #952) (*@leif81*) * {star} add proper grid and frame styles for tables (#569) (*@leif81*) * {star} use glyphs for checkboxes when not using font icons (#878) * {star} prefer source-language attribute over language attribute for defining default source language (#888) * {star} pass document as first argument to process method on Preprocessor * don't parse link attributes when linkattrs is set unless text contains equal sign * detect bare links, mark with bare class; don't repeat URL of bare link in print styles * allow Treeprocessor#process method to replace tree (#1035) * add AbstractNode#find_by method to locate nodes in tree (#862) * add API for parsing title and subtitle (#1000) * add use_fallback option to doctitle, document method * constrain subscript & superscript markup (#564, #936) * match cell specs when cell separator is customized (#985) * use stylesheet to set default table width (#975) * display nested elements correctly in toc (#967) (*@kenfinnigan*) * add support for id attribute on links (#935) (*@mogztter*) * add support for title attribute on links (*@aslakknutsen*) * add -t flag to cli to control output of timing information (#909) (*@mogztter*) * rewrite converter API (#778) * rewrite extensions to support extension instances for AsciidoctorJ (#804) * integrate thread_safe gem (#638) * allow inline macro extensions that define a custom regexp to be matched (#792) * make Reader#push_include work with default file, path and dir (#743) (*@bk2204*) * honor custom outfilesuffix and introduce relfileprefix (#801) * add author and copyright to meta in HTML5 backend (#838) * output attribution in front of citetitle for quote and verse blocks * recognize float style with shorthand syntax outside block (#818) * honor background color in syntax highlighting themes (#813) * print runtime environment in version output, support -v as version flag (#785) * unwrap preamble if standalone (#533) * drop leading & trailing blank lines in verbatim & raw content (#724) * remove trailing newlines from source data (#727) * add flag to cli to suppress warnings (#557) * emit warning if tag(s) not found in include file (#639) * use element for vertical table headers instead of header class (#738) (*@davidgamba*) * share select references between AsciiDoc-style cell & main document (#729) * number chapters sequentially, always (#685) * add vbar attribute, make brvbar resolve properly (#643) * add implicit user-home attribute that resolves to user's home directory (#629) * enable sidebar toc for small screens (#628) * add square brackets around button in HTML output (#631) * make language hover text work for all languages in listing block * set background color on toc2 to cover scrolling content (*@neher*) * make document parsing a discrete step, make Reader accessible as property on Document * allow custom converter to set backend info such as outfilesuffix and htmlsyntax * report an informative error message when a converter cannot be resolved (*@mogztter*) * add conum class to b element when icons are disabled, make conum CSS selector more specific * expose Document object to extension point IncludeProcessor (*@aslakknutsen*) * style audioblock title, simplify rules for block titles * alias :name_attributes to :positional_attributes in extension DSL * upgrade to highlight.js 7.4 (and later 8.0) (#756) (*@mogztter*) Compliance:: * only include xmlns in docbook45 backend if xmlns attribute is specified (#929) * add xmlns attribute for xhtml output (*@bk2204*) * warn if table without a body is converted to DocBook (#961) * wrap around admonition inside example block in DocBook 4.5 (#931) * use if block image doesn't have a title (#927) * fix invalid docbook when adding role to formatted text (#956) * move all compliance flags to Compliance module (#624) * add compliance setting to control use of shorthand property syntax (#789) * wrap top-level content inside preamble in DocBook backend when doctype is book (#971) * escape special chars in image alt text (#972) * set starting number in ordered list for docbook (#925) (*@megathaum*) * match word characters in regular expressions as defined by Unicode (#892) * put source language class names on child code element of pre element (#921) * ignore case of attribute in conditional directives (#903) * allow attribute entry to reset / reseed counter (#870) * allow doctype to be set in AsciiDoc table cell (#863) * match URL macro following entity (#819) (*@jmbruel*) * handle BOM when normalizing source (#824) * don't output revhistory if revdate is not set (#802) * perform normal subs on verse content (#799) * automatically wrap part intro content in partintro block, emit warning if part is invalid (#768) * force encoding of docinfo content to UTF-8 (#773) * add scaling & alignment attributes to block image in DocBook backend (#763) * add support for \anchor:[] macro (#531) * substitute anchor and xref macros in footnotes (#676) * remove all string mutation operations for compatibility with Opal (#735) * honor reftext defined in embedded section title anchor (#697) * allow spaces in reftext defined in block anchor (#695) * use reftext of section or block in text of xref link (#693) * number sections in appendix using appendix number (#683) * unescape escaped square closing bracket in footnote text (#677) * support quoted index terms that may contain commas (#597) * don't assign role attribute if quoted text has no roles (#647) * disallow quoted values in block and inline anchors * add % to scaledwidth if no units given * ignore block attribute with unquoted value None * preserve entity references with 5 digits Bug Fixes:: * resolve relative paths relative to base_dir in unsafe mode (#690) * properly handle nested passthroughs (#1034) * don't clobber outfilesuffix attribute if locked (#1024) * correctly calculate columns if colspan used in first row of table (#924) * pass theme to Pygments when pygments-css=style (#919) * fallback to text lexer when using pygments for source highlighting (#987) * only make special section if style is specified (#917) * an unresolved footnote ref should not crash processor (#876) * rescue failure to resolve ::Dir.home (#896) * recognize Windows UNC path as absolute and preserve it (#806) * adjust file glob to account for backslash in Windows paths (#805) * don't match e-mail address inside URL (#866) * test include directive resolves file with space in name (#798) * return nil from Reader#push_include and Reader#pop_include methods (#745) * fixed broken passthroughs caused by source highlighting (#720) * copy custom stylesheet if linkcss is set (#300) * honor list continuations for indented, nested list items (#664) * fix syntax errors in converters (*@jljouannic*) * fix iconfont-remote setting * fix syntax error (target -> node.target) in Docbook 5 converter (*@jf647*) * output and style HTML for toc macro correctly Infrastructure:: * add Ruby 2.1 to list of supported platforms * re-enable rbx in Travis build * switch tests to minitest (*@ktdreyer*) * update RPM for Fedora Rawhide (*@ktdreyer*) * refactor unit tests so they work in RubyMine (*@cmoulliard*) * add preliminary benchmark files to repository (#1021) * clean out old fixtures from test suite (#960) * add initial Cucumber test infrastructure (#731) * use gem tasks from Bundler in Rakefile (#654) * build gemspec files using git ls-tree (#653) * use in-process web server for URI tests * update manpage to reflect updates in 1.5.0 * rework README (#651) (*@mogztter*) Distribution Packages:: * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?milestone=8&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.0[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v0.1.4\...v1.5.0[full diff] == 0.1.4 (2013-09-05) - @mojavelinux Performance:: * 15% increase in speed compared to 0.1.3 Enhancements:: * updated xref inline macro to support inter-document references (#417) * added extension API for document processing (#79) * added include directive processor extension (#100) * added id and role shorthand for formatted (quoted) text (#517) * added shorthand syntax for specifying block options (#481) * added support for checklists in unordered list (#200) * added support for inline style for unordered lists (#620) * added DocBook 5 backend (#411) * added docinfo option for footer (#486) * added Pygments as source highlighter option (pygments) (#538) * added icon inline macro (#529) * recognize implicit table header row (#387) * uri can be used in inline image (#470) * add float attribute to inline image (#616) * allow role to be specified on text enclosed in backticks (#419) * added XML comment-style callouts for use in XML listings (#582) * made callout bullets non-selectable in HTML output (#478) * pre-wrap literal blocks, added nowrap option to listing blocks (#303) * skip (retain) missing attribute references by default (#523) * added attribute-missing attribute to control how a missing attribute is handled (#495) * added attribute-undefined attribute to control how an undefined attribute is handled (#495) * permit !name syntax for undefining attribute (#498) * ignore front matter used by static site generators if skip-front-matter attribute is set (#502) * sanitize contents of HTML title element in html5 backend (#504) * support toc position for toc2 (#467) * cli accepts multiple files as input (#227) (*@lordofthejars*) * added Markdown-style horizontal rules and pass Markdown tests (#455) * added float clearing classes (.clearfix, .float-group) (#602) * don't disable syntax highlighting when explicit subs is used on listing block * asciidoctor package now available in Debian Sid and Ubuntu Saucy (#216) (*@avtobiff*) Compliance:: * embed CSS by default, copy stylesheet when linkcss is set unless copycss! is set (#428) * refactor reader to track include stack (#572) * made include directive resolve relative to current file (#572) * track include stack to enforce maximum depth (#581) * fixed greedy comment blocks and paragraphs (#546) * enable toc and numbered by default in DocBook backend (#540) * ignore comment lines when matching labeled list item (#524) * correctly parse footnotes that contain a URL (#506) * parse manpage metadata, output manpage-specific HTML, set docname and outfilesuffix (#488, #489) * recognize preprocessor directives on first line of AsciiDoc table cell (#453) * include directive can retrieve data from uri if allow-uri-read attribute is set (#445) * support escaping attribute list that precedes formatted (quoted) text (#421) * made improvements to list processing (#472, #469, #364) * support percentage for column widths (#465) * substitute attributes in docinfo files (#403) * numbering no longer increments on unnumbered sections (#393) * fixed false detection of list item with hyphen marker * skip include directives when processing comment blocks * added xmlns to root element in docbook45 backend, set noxmlns attribute to disable * added a Compliance module to control compliance-related behavior * added linkattrs feature to AsciiDoc.py compatibility file (#441) * added level-5 heading to AsciiDoc.py compatibility file (#388) * added new XML-based callouts to AsciiDoc.py compatibility file * added absolute and uri image target matching to AsciiDoc.py compatibility file * added float attribute on inline image macro to AsciiDoc.py compatibility file * removed linkcss in AsciiDoc.py compatibility file * fixed fenced code entry in AsciiDoc.py compatibility file Bug Fixes:: * lowercase attribute names passed to API (#508) * numbered can still be toggled even when enabled in API (#393) * allow JRuby Map as attributes (#396) * don't attempt to highlight callouts when using CodeRay and Pygments (#534) * correctly calculate line length in Ruby 1.8 (#167) * write to specified outfile even when input is stdin (#500) * only split quote attribution on first comma in Markdown blockquotes (#389) * don't attempt to print render times when doc is not rendered * don't recognize line with four backticks as a fenced code block (#611) Improvements:: * upgraded Font Awesome to 3.2.1 (#451) * improved the built-in CodeRay theme to match Asciidoctor styles * link to CodeRay stylesheet if linkcss is set (#381) * style the video block (title & margin) (#590) * added Groovy, Clojure, Python and YAML to floating language hint * only process callouts for blocks in which callouts are found * added content_model to AbstractBlock, rename buffer to lines * use Untitled as document title in rendered output if document has no title * rename include-depth attribute to max-include-depth, set 64 as default value (#591) * the tag attribute can be used on the include directive to identify a single tagged region * output multiple authors in HTML backend (#399) * allow multiple template directories to be specified, document in usage and manpage (#437) * added option to cli to specify template engine (#406) * added support for external video hosting services in video block macro (#587) (*@xcoulon*) * strip leading separator(s) on section id if idprefix is blank (#551) * customized styling of toc placed inside body content (#507) * consolidate toc attribute so toc with or without toc-position can make sidebar toc (#618) * properly style floating images (inline & block) (#460) * add float attribute to inline images (#616) * use ul list for TOC in HTML5 backend (#431) * support multiple terms per labeled list item in model (#532) * added role?, has_role?, option? and roles methods to AbstractNode (#423, 474) * added captioned_title method to AbstractBlock * honor showtitle attribute as alternate to notitle! (#457) * strip leading indent from literal paragraph blocks assigned the style normal * only process lines in AsciiDoc files * emit message that tilt gem is required to use custom backends if missing (#433) * use attributes for version and last updated messages in footer (#596) * added a basic template cache (#438) * include line info in several of the warnings (for lists and tables) * print warning/error messages using warn (#556) * lines are not preprocessed when peeking ahead for section underline * introduced Cursor object to track line info * fixed table valign classes, no underline on image link * removed dependency on pending library, lock Nokogiri version to 1.5.10 * removed require rubygems line in asciidoctor.rb, add to cli if RUBY_VERSION < 1.9 * added tests for custom backends * added test that shorthand doesn't clobber explicit options (#481) * removed unnecessary monospace class from literal and listing blocks Distribution Packages:: * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?milestone=7&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v0.1.4[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v0.1.3\...v0.1.4[full diff] == 0.1.3 (2013-05-30) - @mojavelinux Performance:: * 10% increase in speed compared to 0.1.2 Enhancements:: * added support for inline rendering by setting doctype to inline (#328) * added support for using font-based icons (#115) * honor haml/slim/jade-style shorthand for id and role attributes (#313) * support Markdown-style headings as section titles (#373) * support Markdown-style quote blocks * added section level 5 (maps to h6 element in the html5 backend) (#334) * added btn inline macro (#259) * added menu inline menu to identify a menu selection (#173) (*@bleathem*) * added kbd inline macro to identify a key or key combination (#172) (*@bleathem*) * support alternative quote forms (#196) * added indent attribute to verbatim blocks (#365) * added prettify source-highlighter (#202) * link section titles (#122) * introduce shorthand syntax for table format (#350) * parse attributes in link when use-link-attrs attribute is set (#214) * support preamble toc-placement (#295) * exclude attribute div if quote has no attribution (#309) * support attributes passed to API as string or string array (#289) * allow safe mode to be set using string, symbol or int in API (#290) * make level 0 section titles more prominent in TOC (#369) Compliance:: * ~ 99.5% compliance with AsciiDoc.py * drop line if target of include directive is blank (#376) * resolve attribute references in target of include directive (#367) * added irc scheme to link detection (#314) * toc should honor numbered attribute (#341) * added toc2 layout to default stylesheet (#285) * consecutive terms in labeled list share same entry (#315) * support set:name:value attribute syntax (#228) * block title not allowed above document title (#175) * assign caption even if no title (#321) * horizontal dlist layout in docbook backend (#298) * set doctitle attribute (#337) * allow any backend to be specified in cli (#320) (*@lightguard*) * support for abstract and partintro (#297) Bug Fixes:: * fixed file path resolution on Windows (#330) * fixed bad variable name that was causing crash, add test for it (#335) * set proper encoding on input data (#308) * don't leak doctitle into nested document (#382) * handle author(s) defined using attributes (#301) Improvements:: * added tests for all special sections (#80) * added test for attributes defined as string or string array (#291) (*@lightguard*) Distribution Packages:: * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] https://asciidoctor.org/news/2013/05/31/asciidoctor-0-1-3-released[release notes] | https://github.com/asciidoctor/asciidoctor/issues?milestone=4&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v0.1.3[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v0.1.2\...v0.1.3[full diff] == 0.1.2 (2013-04-25) - @mojavelinux Performance:: * 28% increase in speed compared to 0.1.1, 32% increase compared to 0.1.0 Enhancements:: * new website at https://asciidoctor.org * added a default stylesheet (#76) * added viewport meta tag for mobile browsers (#238) * set attributes based on safe mode (#244) * added admonition name as style class (#265) * removed hardcoded CSS, no one likes hardcoded CSS (#165) * support multiple authors in document header (#223) * include footnotes block in embedded document (#206) * allow comma delimiter in include attribute values (#226) * support including tagged lines (#226) * added line selection to include directive (#226) * Asciidoctor#render APIs return Document when document is written to file Compliance:: * added compatibility file to make AsciiDoc.py behave like Asciidoctor (#257) * restore alpha-based xml entities (#211) * implement video and audio block macros (#155) * implement toc block macro (#269) * correctly handle multi-part books (#222) * complete masquerade functionality for blocks & paragraphs (#187) * support explicit subs on blocks (#220) * use code element instead of tt (#260) * honor toc2 attribute (#221) * implement leveloffset feature (#212) * include docinfo files in header when safe mode < SERVER (#116) * support email links and mailto inline macros (#213) * question must be wrapped in simpara (#231) * allow round bracket in link (#218) Bug Fixes:: * trailing comma shouldn't be included in link (#280) * warn if file in include directive doesn't exist (#262) * negative case for inline ifndef should only affect current line (#241) * don't compact nested documents (#217) * nest revision info inside revision element (#236) Distribution Packages:: * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] https://asciidoctor.org/news/2013/04/25/asciidoctor-0-1-2-released[release notes] | https://github.com/asciidoctor/asciidoctor/issues?milestone=3&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v0.1.2[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v0.1.1\...v0.1.2[full diff] == 0.1.1 (2013-02-26) - @erebor Performance:: * 15% increase in speed compared to 0.1.0 Enhancements:: * migrated repository to asciidoctor organization on GitHub (#77) * include document title when header/footer disabled and notitle attribute is unset (#103) * honor GitHub-flavored Markdown fenced code blocks (#118) * added :doctype and :backend keys to options hash in API (#163) * added :to_dir option to the Asciidoctor#render API * added option :header_only to stop parsing after reading the header * preliminary line number tracking * auto-select backend sub-folder containing custom templates * rubygem-asciidoctor package now available in Fedora (#92) Compliance:: * refactor reader, process attribute entries and conditional blocks while parsing (#143) * support limited value comparison functionality of ifeval (#83) * added support for multiple attributes in ifdef and ifndef directives * don't attempt to embed image with uri reference when data-uri is set (#157) * accommodate trailing dot in author name (#156) * don't hardcode language attribute in html backend (#185) * removed language from DocBook root node (#188) * fixed revinfo line swallowing attribute entry * auto-generate caption for listing blocks if listing-caption attribute is set * support nested includes * support literal and listing paragraphs * support em dash shorthand at the end of a line * added ftp support to link inline macro * added support for the page break block macro Bug Fixes:: * pass through image with uri reference when data-uri is set (#157) * print message for failed arg (#152) * normalize whitespace at the end of lines (improved) * properly load custom templates and required libraries Improvements:: * parse document header in distinct parsing step * moved hardcoded english captions to attributes Distribution Packages:: * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?milestone=1&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v0.1.1[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v0.1.0\...v0.1.1[full diff] == 0.1.0 (2013-02-04) - @erebor Enhancements:: * introduced Asciidoctor API (Asciidoctor#load and Asciidoctor#render methods) (#34) * added SERVER safe mode level (minimum recommended security for serverside usage) (#93) * added the asciidoctor commandline interface (cli) * added asciidoctor-safe command, enables safe mode by default * added man page for the asciidoctor command * use blockquote tag for quote block content (#124) * added hardbreaks option to preserve line breaks in paragraph text (#119) * :header_footer option defaults to false when using the API, unless rendering to file * added idseparator attribute to customized separator used in generated section ids * do not number special sections (differs from AsciiDoc.py) Compliance:: * use callout icons if icons are enabled, unless safe mode is SECURE * added support for name=value@ attribute syntax passed via cli (#97) * attr refs no longer case sensitive (#109) * fixed several cases of incorrect list handling * don't allow links to consume newlines or surrounding angled brackets * recognize single quote in author name * support horizontal labeled list style * added support for the d cell style * added support for bibliography anchors * added support for special sections (e.g., appendix) * added support for index term inline macros * added support for footnote and footnoteref inline macros * added auto-generated numbered captions for figures, tables and examples * added counter inline macros * added support for floating (discrete) section titles Bug Fixes:: * fixed UTF-8 encoding issue by adding magic encoding line to ERB templates (#144) * resolved Windows compatibility issues * clean CRLF from end of lines (#125) * enabled warnings when running tests, fixed warnings (#69) Improvements:: * renamed iconstype attribute to icontype Distribution Packages:: * https://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?milestone=12&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v0.1.0[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v0.0.9\...v0.1.0[full diff] == Older releases (pre-0.0.1) For information about older releases, refer to the https://github.com/asciidoctor/asciidoctor/tags[commit history] on GitHub. asciidoctor-2.0.20/CODE-OF-CONDUCT.adoc000066400000000000000000000002201443135032600170320ustar00rootroot00000000000000== Code of Conduct Please read the https://github.com/asciidoctor/.github/blob/main/CODE-OF-CONDUCT.md[Asciidoctor Community Code of Conduct]. asciidoctor-2.0.20/CONTRIBUTING.adoc000066400000000000000000000226561443135032600166510ustar00rootroot00000000000000= Contributing // settings: :idprefix: :idseparator: - :source-language: ruby :language: {source-language} ifdef::env-github,env-browser[:outfilesuffix: .adoc] // URIs: :uri-repo: https://github.com/asciidoctor/asciidoctor :uri-help-base: https://help.github.com/articles :uri-issues: {uri-repo}/issues :uri-fork-help: {uri-help-base}/fork-a-repo :uri-branch-help: {uri-fork-help}#create-branches :uri-pr-help: {uri-help-base}/using-pull-requests :uri-gist: https://gist.github.com :uri-yard: https://yardoc.org :uri-tomdoc: http://tomdoc.org == License Agreement By contributing changes to this repository, you agree to license your contributions under the MIT license. This ensures your contributions have the same license as the project and that the community is free to use your contributions. You also assert that you are the original author of the work that you are contributing unless otherwise stated. == Submitting an Issue We use the {uri-issues}[issue tracker on GitHub] associated with this project to track bugs and features (i.e., issues). We very much appreciate the time and effort you take to report an issue. Before submitting an issue, make sure it hasn't already been submitted by using the {uri-issues}[search feature]. Please be sure to check closed issues as well as the issue may have been recently fixed. If you've determined that your issue has not already been reported, please follow these guidelines when submitting an issue: . Use an actionable title that identifies the behavior you want, such as "`Allow attributes to be defined per list item`". . Add a description that explains your use case and why this behavior will help you achieve your goal. . If your change involves AsciiDoc syntax, please provide a sample AsciiDoc document that can be used to better understand the scenario and for use in testing. Also include any details that may help reproduce the bug, including your gem version, Ruby version, and operating system. + Since Asciidoctor is a text processor, it's really important that you submit a sample document so we can reproduce the scenario. If the sample document or code sample is very long, you can put it in a {uri-gist}[Gist] and link to it. . An ideal bug report would also include a pull request with at least one failing spec. However, we recognize that not everyone who uses Asciidctor is a Ruby programmer, or even a programmer. So we do not expect you to include a pull request with your issue. Condescending or disparaging remarks have no place in this issue tracker and will result in your issue being rejected. You can be critical, but keep it positive and constructive. Stick with actionable language that describes what you would like the software to do. Be mindful of the fact that this project is maintained by volunteers and built on a foundation of trust. Please respect the work of those who have volunteered their time and effort to develop this project, and we will respect the time and effort you have taken to file an issue. == Submitting a Pull Request . {uri-fork-help}[Fork the repository]. . Run `bundle config --local path .bundle/gems` to configure Bundler to install development dependencies inside the project. ** If the `bundle` command is not available, run `gem install bundler` to install it. ** If the libxml2 and libxslt development libraries are available on your system (Ubuntu: `sudo apt install libxml2-dev libxslt-dev`, Fedora: `sudo dnf install libxml2-devel libxslt-devel`), you can speed up installation of the `nokogiri` gem by linking directly against these libraries by running `bundle config --local build__nokogiri --use-system-libraries` first. . Run `bundle` to install development dependencies. . {uri-branch-help}[Create a topic branch] (preferably using the pattern `issue-XYZ`, where `XYZ` is the issue number). . Add tests for your unimplemented feature or bug fix. (See <>) . Run `bundle exec rake` to run the tests. If your tests pass, return to step 4. . Implement your feature or bug fix. . Run `bundle exec rake` to run the tests. If your tests fail, return to step 6. . Add documentation for your feature or bug fix. . If your changes are not 100% documented, go back to step 8. . Add, commit, and push your changes. . {uri-pr-help}[Submit a pull request]. For ideas about how to use pull requests, see the post http://blog.quickpeople.co.uk/2013/07/10/useful-github-patterns[Useful GitHub Patterns]. === Background Knowledge As Asciidoctor is built using Ruby some basic knowledge of Ruby, RubyGems and Minitest is beneficial. The following resources provide a good starting point for contributors who may not be completely comfortable with these tools: * https://www.ruby-lang.org/en/documentation/quickstart/[Ruby in 20 minutes] * https://www.ruby-lang.org/en/documentation/ruby-from-other-languages/[Ruby from other languages] * http://guides.rubygems.org/rubygems-basics/[RubyGems basics] * http://guides.rubygems.org/what-is-a-gem/[What is a Gem?] * http://blog.teamtreehouse.com/short-introduction-minitest[How to use Minitest] * http://www.rubyinside.com/a-minitestspec-tutorial-elegant-spec-style-testing-that-comes-with-ruby-5354.html[Minitest spec tutorial] * https://github.com/seattlerb/minitest#readme[Minitest Project Documentation] While these resources don't cover everything needed they serve as a good starting off point for beginners. === Writing and Executing Tests Tests live inside the test directory and are named _test.rb. For instance, tests for the different types of blocks can be found in the file test/blocks_test.rb. Within a test file, individual test cases are organized inside of contexts. A context is type of logical container that groups related tests together. Each test case follows the same structure: [source] ---- test 'description of test' do # test logic end ---- At the moment, the tests are quite primitive. Here's how a typical test operates: . Defines sample AsciiDoc source . Renders the document to HTML or DocBook . Uses XPath and CSS expressions to verify expected output Here's how we might test the open block syntax: [source] ---- test 'should render content bounded by two consecutive hyphens as an open block' do input = <<-EOS -- This is an open block. -- EOS result = render_embedded_string input assert_css '.openblock', result, 1 assert_css '.openblock p', result, 1 assert_xpath '/div[@class="openblock"]//p[text()="This is an open block."]', result, 1 end ---- As you can see, several helpers are used to facilitate the test scenario. The `render_embedded_string` invokes Asciidoctor's render method with the header and footer option disabled. This method is ideal for unit-level tests. If you need to test the whole document, use `render_string` instead. The `assert_css` and `assert_xpath` assertion methods take a CSS or XPath selector, respectively, the rendered result and the number of expected matches. You can also use built-in assertions in Ruby's test library. To run all the tests, execute `rake`: $ rake NOTE: The tests should only take a few seconds to run using Ruby 2.1. If you want to run a single test file, you can use `ruby`: $ ruby test/blocks_test.rb To test a single test case, first add the string "wip" to the beginning of the description. For example: [source] ---- test 'wip should render ...' do ... end ---- Then, run `ruby` again, but this time pass a selector argument so it finds matching tests: $ ruby test/blocks_test.rb -n /wip/ You can also turn on verbose mode if you want to see more output: $ ruby test/blocks_test.rb -n /wip/ -v Once you are done with your test, make sure to remove `wip` from the description and run all the tests again using `rake`. We plan on switching to a more elegant testing framework in the future, such as RSpec or Cucumber, in order to make the tests more clear and robust. === Running Asciidoctor in Development Mode Asciidoctor is designed so that you can run the script directly out of the cloned repository. Execute the `asciidoctor` command directly (referencing it either by relative or absolute path). There's no need to install it using the `gem` command first. For example, to convert the README file, switch to the root of the project and run: $ ./bin/asciidoctor README.adoc IMPORTANT: You'll need to make sure you reference the correct relative path to the `asciidoctor` command. If you want to be able to execute the `asciidoctor` command from any directory without worrying about the relative (or absolute) path, you can setup the following Bash alias: alias asciidoctor-dev="/path/to/asciidoctor/bin/asciidoctor" Now you can execute the `asciidoctor` command from any folder as follows: $ asciidoctor-dev README.adoc == Building the API Documentation The API documentation is written in the {uri-tomdoc}[TomDoc] dialect and built using {uri-yard}[Yard]. The options for Yard are configured in the [.path]_.yardopts_ file at the root of the project. To build the API documentation locally, run the following command: $ bundle exec yard The documentation will be built into the [.path]_rdoc_ folder. == Supporting Additional Ruby Versions If you would like this library to support another Ruby version, you may volunteer to be a maintainer. Being a maintainer entails making sure all tests run and pass on that implementation. When something breaks on your implementation, you will be expected to provide patches in a timely fashion. If critical issues for a particular implementation exist at the time of a major release, support for that Ruby version may be dropped. asciidoctor-2.0.20/Gemfile000066400000000000000000000031251443135032600153730ustar00rootroot00000000000000# frozen_string_literal: true source 'https://rubygems.org' # Look in asciidoctor.gemspec for runtime and development dependencies gemspec group :development do # asciimath is needed for testing AsciiMath in DocBook backend; Asciidoctor supports asciimath >= 1.0.0 gem 'asciimath', (ENV.fetch 'ASCIIMATH_VERSION', '~> 2.0') # coderay is needed for testing source highlighting gem 'coderay', '~> 1.1.0' gem 'haml', ENV['HAML_VERSION'] if ENV.key? 'HAML_VERSION' gem 'net-ftp' if (Gem::Version.new RUBY_VERSION) >= (Gem::Version.new '3.1.0') gem 'open-uri-cached', '~> 1.0.0' # pygments.rb is needed for testing source highlighting; Asciidoctor supports pygments.rb >= 1.2.0 gem 'pygments.rb', ENV['PYGMENTS_VERSION'] if ENV.key? 'PYGMENTS_VERSION' # rouge is needed for testing source highlighting; Asciidoctor supports rouge >= 2 gem 'rouge', (ENV.fetch 'ROUGE_VERSION', '~> 3.0') if RUBY_ENGINE == 'truffleruby' || (Gem::Version.new RUBY_VERSION) < (Gem::Version.new '2.5.0') gem 'nokogiri', '~> 1.10.0' elsif (Gem::Version.new RUBY_VERSION) < (Gem::Version.new '2.6.0') gem 'nokogiri', '~> 1.12.0' end end group :docs do gem 'yard' gem 'yard-tomdoc' end # enable this group to use Guard for continuous testing # after removing comments, run `bundle install` then `guard` #group :guardtest do # gem 'guard' # gem 'guard-test' # gem 'libnotify' # gem 'listen', :github => 'guard/listen' #end group :coverage do gem 'json', '~> 2.2.0' if RUBY_ENGINE == 'truffleruby' gem 'simplecov', '~> 0.16.0' end unless (Gem::Version.new RUBY_VERSION) < (Gem::Version.new '2.5.0') asciidoctor-2.0.20/Guardfile000066400000000000000000000006461443135032600157320ustar00rootroot00000000000000# use `guard start -n f` to disable notifications # or set the environment variable GUARD_NOTIFY=false notification :libnotify, :display_message => true, :timeout => 5, # in seconds :append => false, :transient => true, :urgency => :critical guard :test do watch(%r{^lib/(.+)\.rb$}) do |m| "test/#{m[1]}_test.rb" end watch(%r{^test.+_test\.rb$}) watch('test/test_helper.rb') do "test" end end asciidoctor-2.0.20/LICENSE000066400000000000000000000021761443135032600151120ustar00rootroot00000000000000MIT License Copyright (C) 2012-present Dan Allen, Sarah White, Ryan Waldron, and the individual contributors to Asciidoctor. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. asciidoctor-2.0.20/README-de.adoc000066400000000000000000000501131443135032600162520ustar00rootroot00000000000000= Asciidoctor Dan Allen ; Sarah White v2.0.20, 2023-05-18 // settings: :idprefix: :idseparator: - :source-language: ruby :language: {source-language} ifndef::env-github[:icons: font] ifdef::env-github[] :status: :caution-caption: :fire: :important-caption: :exclamation: :note-caption: :paperclip: :tip-caption: :bulb: :warning-caption: :warning: endif::[] // Variables: :release-version: 2.0.20 // URIs: :uri-org: https://github.com/asciidoctor :uri-repo: {uri-org}/asciidoctor :uri-asciidoctorj: {uri-org}/asciidoctorj :uri-asciidoctorjs: {uri-org}/asciidoctor.js :uri-project: https://asciidoctor.org ifdef::env-site[:uri-project: link:] :uri-docs: {uri-project}/docs :uri-news: {uri-project}/news :uri-manpage: {uri-project}/man/asciidoctor :uri-issues: {uri-repo}/issues :uri-contributors: {uri-repo}/graphs/contributors :uri-rel-file-base: link: :uri-rel-tree-base: link: ifdef::env-site[] :uri-rel-file-base: {uri-repo}/blob/HEAD/ :uri-rel-tree-base: {uri-repo}/tree/HEAD/ endif::[] :uri-changelog: {uri-rel-file-base}CHANGELOG.adoc :uri-contribute: {uri-rel-file-base}CONTRIBUTING.adoc :uri-license: {uri-rel-file-base}LICENSE :uri-tests: {uri-rel-tree-base}test :uri-discuss: https://discuss.asciidoctor.org :uri-chat: https://asciidoctor.zulipchat.com :uri-rubygem: https://rubygems.org/gems/asciidoctor :uri-what-is-asciidoc: {uri-docs}/what-is-asciidoc :uri-user-manual: {uri-docs}/user-manual :uri-install-docker: https://github.com/asciidoctor/docker-asciidoctor //:uri-install-doc: {uri-docs}/install-toolchain :uri-install-macos-doc: {uri-docs}/install-asciidoctor-macos :uri-render-doc: {uri-docs}/render-documents :uri-themes-doc: {uri-docs}/produce-custom-themes-using-asciidoctor-stylesheet-factory :uri-gitscm-repo: https://github.com/git/git-scm.com :uri-freesoftware: https://www.gnu.org/philosophy/free-sw.html :uri-foundation: https://foundation.zurb.com :uri-opal: https://opalrb.com :uri-tilt: https://github.com/rtomayko/tilt :uri-ruby: https://ruby-lang.org // images: :image-uri-screenshot: https://cdn.jsdelivr.net/gh/asciidoctor/asciidoctor/screenshot.png {uri-project}[Asciidoctor] ist ein _schneller_, {uri-license}[Open Source] Textverarbeitungs- und Publishing-Toolchain für die Konvertierung von {uri-what-is-asciidoc}[AsciiDoc]-Inhalten in HTML 5, DocBook 5, PDF und andere Formate. Asciidoctor ist in Ruby geschrieben und läuft auf allen gängigen Betriebsystemen. Um die Installation zu vereinfachen wird Asciidoctor als Gem auf {uri-rubygem}[RubyGems.org] verpackt und ist als Paket für gängige Linux-Distributionen und MacOS erhältlich. Asciidoctor kann auch in einer JVM mit {uri-asciidoctorj}[AsciidoctorJ] oder einer beliebigen Javascript-Umgebung mit {uri-asciidoctorjs}[Asciidoctor.js] ausgeführt werden. Das Asciidoctor-Projekt wird {uri-repo}[auf GitHub] gehostet. ifndef::env-site[] Dieses Dokument ist auch in folgenden Sprachen erhältlich: + {uri-rel-file-base}README-zh_CN.adoc[汉语] | {uri-rel-file-base}README.adoc[English] | {uri-rel-file-base}README-fr.adoc[Français] | {uri-rel-file-base}README-jp.adoc[日本語] endif::[] .Wichtige Dokumentation [.compact] * {uri-docs}/what-is-asciidoc[Was ist AsciiDoc?] * {uri-docs}/asciidoc-writers-guide[Asciidoctor Benutzerhandbuch] * {uri-docs}/user-manual[Asciidoctor Gebrauchshandbuch] * {uri-docs}/asciidoc-syntax-quick-reference[AsciiDoc Syntax-Referenz] ifdef::status[] image:https://img.shields.io/gem/v/asciidoctor.svg[Latest Release, link={uri-gem}] image:https://img.shields.io/badge/rubydoc.info-{release-version}-blue.svg[library (API) docs,link=https://www.rubydoc.info/gems/asciidoctor/{release-version}] image:https://github.com/asciidoctor/asciidoctor/workflows/CI/badge.svg[Build Status (GitHub Actions),link={uri-repo}/actions] image:https://img.shields.io/badge/zulip-join_chat-brightgreen.svg[Project Chat (Zulip),link={uri-chat}] endif::[] == Sponsoren Wir möchten unseren großzügigen Sponsoren danken, ohne deren Unterstützung Asciidoctor nicht möglich wäre. Vielen Dank an die Sponsoren für ihr Engagement zur Verbesserung der technischen Dokumentation! Zusätzliche Mittel werden von unseren https://asciidoctor.org/supporters[Community Backers] zur Verfügung gestellt. Sie können dieses Projekt unterstützen, indem Sie Sponsor bei https://opencollective.com/asciidoctor[OpenCollective] werden. == Das große Ganze Asciidoctor liest Inhalte, die im Klartext geschrieben wurden, wie im Feld links im Bild unten gezeigt, und wandelt Sie in HTML 5 um, wie im rechten Feld dargestellt. Asciidoctor wendet ein Standard-Stylesheet auf das HTML 5-Dokument an, um ein angenehmes Out-of-the-Box-Erlebnis zu bieten. image::{image-uri-screenshot}[Preview of AsciiDoc source and corresponding rendered HTML] == AsciiDoc Verarbeitung Asciidoctor liest und analysiert Text, der in der AsciiDoc-Syntax geschrieben wurde, und leitet dann den Parse-Tree durch eine Reihe von eingebauten Konvertern, um HTML 5, DocBook 5 und man-pages zu erzeugen. Sie haben die Möglichkeit, eigene Konverter zu verwenden oder {uri-tilt}[Tilt]-gestützte Vorlagen zu laden, um die generierte Ausgabe anzupassen oder zusätzliche Formate zu erzeugen. Asciidoctor ist ein Ersatz für den Original AsciiDoc Python Prozessor (`asciidoc.py`). Die Asciidoctor-Testsuite verfügt über {uri-tests}[mehr als 2,000 Tests], um die Kompatibilität mit der AsciiDoc-Syntax sicherzustellen. Neben der klassischen AsciiDoc-Syntax erkennt Asciidoctor zusätzliche Markup- und Formatierungsoptionen, wie z.B. fontbasierte Icons (z.B. `+icon:fire[]+`) und UI-Elemente (z.B. `+button:[Save]+`). Asciidoctor bietet auch ein modernes, __responsive Theme__, das auf {uri-foundation}[Foundation] basiert, um die HTML 5-Ausgabe zu gestalten. == Wo Ruby hingeht, folgt Asciidoctor Sie können Asciidoctor in einer JVM mit JRuby ausführen. Um die Asciidoctor API direkt aus Java und anderen JVM-Sprachen aufzurufen, verwenden Sie {uri-asciidoctorj}[AsciidoctorJ]. Es stehen Ihnen auf {uri-asciidoctorj}[AsciidoctorJ] basierende Plugins zur Verfügung, die den Asciidoctor Prozessor in Apache Maven, Gradle oder Javadoc Builds integrieren. Asciidoctor läuft auch in JavaScript. {uri-opal}[Opal] wird verwendet, um den Ruby-Source in JavaScript umzukompilieren, um {uri-asciidoctorjs}[Asciidoctor.js] zu erzeugen. Asciidoctor.js ist eine voll funktionsfähige Version von Asciidoctor, die in jeder JavaScript-Umgebung wie z.B. einem Webbrowser oder Node.js funktioniert. Es wird für die AsciiDoc Vorschau-Erweiterungen für Chrome, Atom, Brackets und andere webbasierte Werkzeuge verwendet. == Anforderungen Asciidoctor arbeitet unter Linux, MacOS und Windows und benötigt eine der folgenden Implementierungen von {uri-ruby}[Ruby]: * CRuby (aka MRI) 2.3 - 2.6 * JRuby 9.1 - 9.2 * TruffleRuby (GraalVM) * Opal (JavaScript) [CAUTION] ==== Wenn Sie eine nicht-englische Windows-Umgebung verwenden, können Sie auf einen `Encoding::UndefinedConversionError` stoßen, wenn Sie Asciidoctor aufrufen. Um dieses Problem zu beheben, empfehlen wir, die aktive Codepage in Ihrer Konsole auf UTF-8 umzustellen: chcp 65001 Sobald Sie diese Änderung vorgenommen haben, haben Sie alle Ihre Unicode-Kopfschmerzen hinter sich. Wenn Sie eine IDE wie Eclipse verwenden, stellen Sie sicher, dass Sie dort auch die Kodierung auf UTF-8 setzen. Asciidoctor funktioniert am besten, wenn Sie UTF-8 überall verwenden. ==== == Installation Asciidoctor kann mit (a) Paketmanagern für gängige Linux-Distributionen, (b) Homebrew für MacOS, (c) dem Befehl `gem install` (empfohlen für Windows-Benutzer), (d) dem Asciidoctor Docker-Image oder (e) Bundler installiert werden. Der Vorteil der Verwendung des Paketmanagers Ihres Betriebssystems zur Installation des Gem ist, dass er die Installation von Ruby und der RubyGems-Bibliothek übernimmt, wenn diese Pakete nicht bereits auf Ihrem Rechner installiert sind. === (a) Linux Paketmanager Die vom Paketmanager installierte Version von Asciidoctor entspricht möglicherweise nicht der neuesten Version von Asciidoctor. Konsultieren Sie das Paket-Repository für Ihre Distribution, um herauszufinden, welche Version in der Distribution gepackt ist. * https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] * https://www.archlinux.org/packages/?name=asciidoctor[Arch Linux (asciidoctor)] * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (asciidoctor)] * https://software.opensuse.org/package/rubygem-asciidoctor[OpenSUSE (rubygem-asciidoctor)] * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] Wenn Sie eine Version von Asciidoctor verwenden möchten, die neuer ist als die, die vom Paketmanager installiert wurde, lesen Sie bitte die <>. ==== apk (Alpine Linux) Um ein Gem auf Alpine Linux zu installieren, öffnen Sie ein Terminal und geben Sie folgendes ein: $ sudo apk add asciidoctor ==== pacman (Arch Linux) Um ein Gem auf Arch-basierten Distributionen zu installieren, öffnen Sie ein Terminal und geben Sie folgendes ein: $ sudo pacman -S asciidoctor ==== APT Auf Debian und Debian-basierten Distributionen wie Ubuntu nutzen Sie APT um Asciidoctor zu installieren. Um das Paket zu installieren, öffnen Sie ein Terminal und geben Sie folgendes ein: $ sudo apt-get install -y asciidoctor ==== DNF Auf RPM-basierten Linux-Distributionen, wie Fedora, CentOS und RHEL, nutzen Sie den DNF Paketmanager um Asciidoctor zu installieren. Um das Paket zu installieren, öffnen Sie ein Terminal und geben Sie folgendes ein: $ sudo dnf install -y asciidoctor === (b) Homebrew (macOS) Sie können Homebrew, den macOS-Paketmanager, verwenden, um Asciidoctor zu installieren. Wenn Sie Homebrew nicht auf Ihrem Computer haben, führen Sie zuerst die Installationsanweisungen unter https://brew.sh/[brew.sh] aus. Sobald Homebrew installiert ist, können Sie das Asciidoctor gem installieren. Öffnen Sie ein Terminal und geben Sie folgendes ein: $ brew install asciidoctor Homebrew installiert das `asciidoctor` Gem in ein exklusives Präfix, das unabhängig von den System-Gems ist. === (c) Windows Um Asciidoctor unter Windows zu installieren, gibt es zwei einfache Möglichkeiten. ==== Chocolatey Wenn Sie bereits https://chocolatey.org[chocolatey] verwenden, können Sie folgenden Befehl verwenden: [source] ---- choco install ruby ---- Danach folgen Sie der <>. ==== Rubyinstaller Oder Sie benutzen den https://rubyinstaller.org/downloads/[Rubyinstaller], laden Sie das für Ihre Windows Version passende Paket herunter und nach der Installation folgen Sie ebenfalls der <>. [#gem-install] === (d) gem install Bevor Sie Asciidoctor mit `gem install` installieren, sollten Sie https://rvm.io[RVM] verwenden, um Ruby in Ihrem Home-Verzeichnis zu installieren (z.B. Userspace). Dann können Sie den Befehl `gem` sicher verwenden, um den Asciidoctor Gem zu installieren oder zu aktualisieren. Bei der Verwendung von RVM werden Gems an einem vom System isolierten Ort installiert. Öffnen Sie ein Terminal und geben Sie folgendes ein: $ gem install asciidoctor Wenn Sie eine Vorabversion (z.B. einen Release-Kandidaten) installieren möchten, verwenden Sie: $ gem install asciidoctor --pre === (e) Docker Siehe {uri-install-docker}[Installing Asciidoctor using Docker]. === (f) Bundler . Erstellen Sie ein Gemfile im Stammordner Ihres Projekts (oder im aktuellen Verzeichnis). . Fügen Sie den `asciidoctor` Gem wie folgt zu Ihrem Gemfile hinzu: + [source,subs=attributes+] ---- source 'https://rubygems.org' gem 'asciidoctor' # oder spezifizieren Sie die Version explizit # gem 'asciidoctor', '{release-version}' ---- . Speichern Sie das Gemfile . Öffnen Sie ein Terminal und installieren Sie das Gem mit: $ bundle Um das Gem zu aktualisieren, geben Sie die neue Version im Gemfile an und führen Sie `bundle` erneut aus. Die Verwendung von `bundle update` (ohne Angabe eines Gem) wird *nicht* empfohlen, da es auch andere Gems aktualisiert, was möglicherweise nicht das gewünschte Ergebnis ist. == Upgrade Wenn Sie Asciidoctor mit einem Paketmanager installiert haben, ist ihr Betriebssystem wahrscheinlich so konfiguriert, dass es Pakete automatisch aktualisiert. In diesem Fall müssen Sie das Gem nicht manuell aktualisieren. === apk (Alpine Linux) Um das Gem zu aktualisieren, nutzen Sie: $ sudo apk add -u asciidoctor === APT Um das Gem zu aktualisieren, nutzen Sie: $ sudo apt-get upgrade -y asciidoctor === DNF Um das Gem zu aktualisieren, nutzen Sie: $ sudo dnf update -y asciidoctor === Homebrew (macOS) Um das Gem zu aktualisieren, nutzen Sie: $ brew update $ brew upgrade asciidoctor === gem install Wenn Sie Asciidoctor zuvor mit dem Befehl `gem` installiert haben, müssen Sie Asciidoctor manuell aktualisieren, wenn eine neue Version veröffentlicht wird. Sie können mit folgendem Befehl aktualisieren: $ gem install asciidoctor Wenn Sie eine neue Version des Edelsteins mit `gem install` installieren, werden mehrere Versionen installiert. Verwenden Sie den folgenden Befehl, um die alten Versionen zu entfernen: $ gem cleanup asciidoctor == Verwendung Wenn der Asciidoctor Gem erfolgreich installiert wurde, ist das `asciidoctor` Kommandozeilen-Interface (CLI) in Ihrem PATH verfügbar. Um die Verfügbarkeit zu überprüfen, führen Sie den folgenden Befehl in Ihrem Terminal aus: $ asciidoctor --version Sie sollten Informationen über die Asciidoctor-Version und Ihre Ruby-Umgebung im Terminal sehen. [.output,subs=attributes+] .... Asciidoctor 1.5.7 [https://asciidoctor.org] Laufzeitumgebung (ruby 2.6.0p0 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) .... Asciidoctor bietet auch eine API. Die API ist für die Integration mit anderer Ruby-Software wie Rails, Sinatra und GitHub und anderen Sprachen wie Java (über {uri-asciidoctorj}[AsciidoctorJ]) und JavaScript (über {uri-asciidoctorjs}[Asciidoctor.js]) vorgesehen. === Kommandozeile Mit dem Befehl `asciidoctor` können Sie Asciidoctor von der Kommandozeile (z.B. einem Terminal) aus aufrufen. Der folgende Befehl konvertiert die Datei README.adoc nach HTML und speichert das Ergebnis in der Datei README.html im gleichen Verzeichnis. Der Name der erzeugten HTML-Datei wird aus der Quelldatei abgeleitet, indem die Dateierweiterung auf `.html` geändert wird. $ asciidoctor README.adoc Sie können den Asciidoctor-Prozessor steuern, indem Sie verschiedene Flags und Schalter hinzufügen, über die Sie sich mittels folgendem Befehl informieren können: $ asciidoctor --help Zum Beispiel, um die Datei in ein anderes Verzeichnis zu schreiben, verwenden Sie: $ asciidoctor -D output README.adoc Die `asciidoctor` {uri-manpage}[man page] bietet eine vollständige Referenz der Kommandozeile. Lesen Sie die folgenden Ressourcen, um mehr über die Verwendung des `asciidoctor`-Befehls zu erfahren. * {uri-render-doc}[How do I convert a document?] * {uri-themes-doc}[How do I use the Asciidoctor stylesheet factory to produce custom themes?] === Ruby API Um Asciidoctor in Ihrer Anwendung verwenden zu können, benötigen Sie zunächst das Gem: [source] ---- require 'asciidoctor' ---- Sie können dann eine AsciiDoc-Quelldatei in eine HTML-Datei konvertieren: [source] ---- Asciidoctor.convert_file 'README.adoc', to_file: true, safe: :safe ---- WARNING: Bei Verwendung von Asciidoctor über die API ist der Standard-Sicherheitsmodus `:secure`. Im sicheren Modus sind mehrere Kernfunktionen deaktiviert, darunter die `include`-Direktive. Wenn Sie diese Funktionen aktivieren möchten, müssen Sie den Sicherheitsmodus explizit auf `:server` (empfohlen) oder `:safe` setzen. Sie können einen AsciiDoc-String auch in ein integrierbares HTML (zum Einfügen in eine HTML-Seite) konvertieren, mit: [source] ---- content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' Asciidoctor.convert content, safe: :safe ---- Wenn Sie das komplette HTML-Dokument wünschen, aktivieren Sie die Option `head_footer` wie folgt: [source] ---- content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' html = Asciidoctor.convert content, header_footer: true, safe: :safe ---- Wenn Sie Zugriff auf das analysierte Dokument benötigen, können Sie die Konvertierung in einzelne Schritte aufteilen: [source] ---- content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' document = Asciidoctor.load content, header_footer: true, safe: :safe puts document.doctitle html = document.convert ---- Denken Sie daran, dass __Sie die Ausgabe__ von Asciidoctor __ändern können__, wenn Sie sie nicht mögen! Asciidoctor unterstützt benutzerdefinierte Konverter, die die Konvertierung vom geparsten Dokument in die generierte Ausgabe übernehmen können. Eine einfache Möglichkeit, die Ausgabe stückweise anzupassen, ist die Verwendung des Template-Konverters. Der Template-Konverter ermöglicht es Ihnen, eine von {uri-tilt}[Tilt]-gestützte Template-Datei zur Verfügung zu stellen, um die Konvertierung eines beliebigen Knotens im Dokument zu handhaben. Wie auch immer Sie vorgehen, Sie können die Ausgabe zu 100% kontrollieren. Weitere Informationen zur Verwendung der API oder zur Anpassung der Ausgabe finden Sie im {uri-user-manual}[Benutzerhandbuch]. == Mitwirken Neue Mitwirkende sind immer willkommen! Wenn Sie Fehler oder Auslassungen im Quellcode, in der Dokumentation oder im Inhalt der Website entdecken, zögern Sie bitte nicht, ein Problem zu melden oder eine Pull Request mit einem Fix zu öffnen. Hier sind einige Möglichkeiten, wie *Sie* dazu beitragen können: * durch Verwendung von Vorabversionen (Alpha-, Beta- oder Preview-Versionen) * durch das Melden von Fehlern * durch Vorschläge für neue Funktionen * durch das Verfassen oder Bearbeiten von Dokumentationen * durch Schreiben von Code mit Tests -- _Kein Patch ist zu klein._ ** Tippfehler beheben ** Kommentare hinzufügen ** inkonsistente Leerzeichen bereinigen ** Tests schreiben! * Refactoring von Code * durch die Behebung von {uri-issues}[Problemen] * durch Überprüfung von Patches Der {uri-contribute}[Contributing Guide] bietet Informationen darüber, wie man Probleme, Feature Requests, Code und Dokumentation für das Asciidoctor Projekt erstellt, gestaltet und einreicht. == Hilfe finden Asciidoctor wurde entwickelt, um Ihnen das Schreiben und Veröffentlichen Ihrer Inhalte zu erleichtern. Aber wir können es nicht ohne ihr Feedback machen! Wir ermutigen Sie, Fragen zu stellen und alle Aspekte des Projekts auf der Diskussionsliste, auf Twitter oder im Chatroom zu diskutieren. Chat (Zulip):: {uri-chat} Discussionsliste (Nabble):: {uri-discuss} Twitter:: Follow https://twitter.com/asciidoctor[@asciidoctor] or search for the https://twitter.com/search?f=tweets&q=%23asciidoctor[#asciidoctor] hashtag ifdef::env-github[] Weitere Informationen und Dokumentation zu Asciidoctor finden Sie auf der Website des Projekts. {uri-project}[Home] | {uri-news}[News] | {uri-docs}[Docs] endif::[] Die Asciidoctor-Organisation auf GitHub hostet den Quellcode des Projekts, den Issue Tracker und Unterprojekte. Source repository (git):: {uri-repo} Issue tracker:: {uri-issues} Asciidoctor Organization auf GitHub:: {uri-org} == Lizenz Copyright (C) 2012-present Dan Allen, Sarah White, Ryan Waldron, und die einzelnen Mitarbeiter von Asciidoctor. Die Nutzung dieser Software wird unter den Bedingungen der MIT-Lizenz gewährt. Siehe die {uri-license}[LIZENZ] für den vollen Lizenztext. == Authoren *Asciidoctor* wird von https://github.com/mojavelinux[Dan Allen] und https://github.com/graphitefriction[Sarah White] geleitet und hat Beiträge von {uri-contributors}[vielen Personen] in Asciidoctors großartiger Gemeinschaft erhalten. Das Projekt wurde 2012 von https://github.com/erebor[Ryan Waldron] initiiert und basiert auf einem prototyp von https://github.com/nickh[Nick Hengeveld]. *AsciiDoc* wurde von Stuart Rackham gegründet und hat Beiträge von vielen Personen aus der AsciiDoc-Community erhalten. ifndef::env-site[] == Changelog ifeval::[{safe-mode-level} < 20] include::CHANGELOG.adoc[tag=compact,leveloffset=+1] endif::[] Eine vollständige Liste der Änderungen in älteren Versionen finden Sie im {uri-changelog}[CHANGELOG]. endif::[] asciidoctor-2.0.20/README-fr.adoc000066400000000000000000000472361443135032600163050ustar00rootroot00000000000000= Asciidoctor Dan Allen ; Sarah White v2.0.20, 2023-05-18 // settings: :idprefix: :idseparator: - :source-language: ruby :language: {source-language} ifndef::env-github[:icons: font] ifdef::env-github[] :status: :caution-caption: :fire: :important-caption: :exclamation: :note-caption: :paperclip: :tip-caption: :bulb: :warning-caption: :warning: endif::[] // Variables: :release-version: 2.0.20 // URIs: :uri-org: https://github.com/asciidoctor :uri-repo: {uri-org}/asciidoctor :uri-asciidoctorj: {uri-org}/asciidoctorj :uri-asciidoctorjs: {uri-org}/asciidoctor.js :uri-project: https://asciidoctor.org ifdef::env-site[:uri-project: link:] :uri-docs: {uri-project}/docs :uri-news: {uri-project}/news :uri-manpage: {uri-project}/man/asciidoctor :uri-issues: {uri-repo}/issues :uri-contributors: {uri-repo}/graphs/contributors :uri-rel-file-base: link: :uri-rel-tree-base: link: ifdef::env-site[] :uri-rel-file-base: {uri-repo}/blob/HEAD/ :uri-rel-tree-base: {uri-repo}/tree/HEAD/ endif::[] :uri-changelog: {uri-rel-file-base}CHANGELOG.adoc :uri-contribute: {uri-rel-file-base}CONTRIBUTING.adoc :uri-license: {uri-rel-file-base}LICENSE :uri-tests: {uri-rel-tree-base}test :uri-discuss: https://discuss.asciidoctor.org :uri-chat: https://asciidoctor.zulipchat.com :uri-rubygem: https://rubygems.org/gems/asciidoctor :uri-what-is-asciidoc: {uri-docs}/what-is-asciidoc :uri-user-manual: {uri-docs}/user-manual :uri-install-docker: https://github.com/asciidoctor/docker-asciidoctor //:uri-install-doc: {uri-docs}/install-toolchain :uri-install-macos-doc: {uri-docs}/install-asciidoctor-macos :uri-render-doc: {uri-docs}/render-documents :uri-themes-doc: {uri-docs}/produce-custom-themes-using-asciidoctor-stylesheet-factory :uri-gitscm-repo: https://github.com/git/git-scm.com :uri-freesoftware: https://www.gnu.org/philosophy/free-sw.html :uri-foundation: https://foundation.zurb.com :uri-tilt: https://github.com/rtomayko/tilt :uri-ruby: https://ruby-lang.org // images: :image-uri-screenshot: https://cdn.jsdelivr.net/gh/asciidoctor/asciidoctor/screenshot.png {uri-project}/[Asciidoctor] est un processeur de texte et une chaîne de publication _rapide_ et {uri-license}[open source] permettant de convertir du contenu {uri-what-is-asciidoc}[AsciiDoc] en HTML 5, DocBook 5, PDF et d'autres formats. Asciidoctor est écrit en Ruby et fonctionne sur les principaux systèmes d'exploitation. Pour simplifier l'installation, Asciidoctor est publié au format gem sur {uri-rubygem}[RubyGems.org], et il est également disponible en tant que paquet système sur les principales distributions Linux ainsi que sur macOS. Asciidoctor fonctionne aussi sur la JVM avec {uri-asciidoctorj}[AsciidoctorJ] et dans n'importe quel environnement JavaScript avec {uri-asciidoctorjs}[Asciidoctor.js]. Le projet Asciidoctor est {uri-repo}[hébergé sur GitHub]. ifndef::env-site[] Ce document est traduit dans les langues suivantes : + {uri-rel-file-base}README.adoc[Anglais] | {uri-rel-file-base}README-zh_CN.adoc[Chinois] | {uri-rel-file-base}README-jp.adoc[Japonais] endif::[] .Documentation clé [.compact] * {uri-docs}/what-is-asciidoc[Qu'est ce qu'AsciiDoc ?] * {uri-docs}/asciidoc-writers-guide[Guide pour Rédacteur AsciiDoc] * {uri-docs}/asciidoc-syntax-quick-reference[Syntaxe de Référence AsciiDoc] * {uri-docs}/user-manual[Manuel Utilisateur Asciidoctor] ifdef::status[] image:https://img.shields.io/gem/v/asciidoctor.svg[Latest Release, link={uri-gem}] image:https://img.shields.io/badge/rubydoc.info-{release-version}-blue.svg[library (API) docs,link=https://www.rubydoc.info/gems/asciidoctor/{release-version}] image:https://github.com/asciidoctor/asciidoctor/workflows/CI/badge.svg[Build Status (GitHub Actions),link={uri-repo}/actions] image:https://img.shields.io/badge/zulip-join_chat-brightgreen.svg[Project Chat,link={uri-chat}] endif::[] == Sponsors Nous souhaitons exprimer toute notre reconnaissance à nos généreux sponsors, sans qui Asciidoctor ne pourrait pas exister. Merci à vous pour votre engagement dans l'amélioration de la documentation technique ! Un apport financier supplémentaire est assuré par https://asciidoctor.org/supporters[la communauté]. Vous pouvez aider ce projet en devant un sponsor sur https://opencollective.com/asciidoctor[OpenCollective]. == En un mot Asciidoctor lit du contenu écrit en texte brut, comme présenté dans la partie gauche de l'image ci-dessous, et le convertit en HTML 5, comme présenté dans la partie droite. Asciidoctor applique une feuille de style par défaut au document HTML 5 afin de fournir une expérience de lecture agréable, clé en main. image::{image-uri-screenshot}[Prévisualisation d'une source AsciiDoc et le rendu HTML correspondant] == Le traitement d'AsciiDoc Asciidoctor lit et analyse la syntaxe du texte écrit en AsciiDoc afin de créer une représentation, sous forme d'arbre, à partir de laquelle des templates sont appliqués pour produire de l'HTML 5, du DocBook 5 et des pages de man(uel). Vous avez la possibilité d'écrire votre propre convertisseur ou de fournir des templates supportant {uri-tilt}[Tilt] pour personnaliser le résultat généré ou pour produire des formats alternatifs. Asciidoctor remplace le processeur AsciiDoc original écrit en Python (`asciidoc.py`). La suite de tests Asciidoctor possède {uri-tests}[plus de 2,000 tests] afin de garantir la compatibilité avec la syntaxe AsciiDoc. En plus de la syntaxe AsciiDoc standard, Asciidoctor reconnaît des balises additionnelles ainsi que des options de formatage, comme les polices d'icônes (par exemple `+icon:fire[]+`) et des éléments d'interface (par exemple `+button:[Enregistrer]+`). Asciidoctor offre aussi un thème moderne et « responsive » basé sur {uri-foundation}[Foundation] pour styliser le document HTML 5 généré. == Asciidoctor est disponible partout où Ruby est disponible Vous pouvez exécuter Asciidoctor dans la JVM en utilisant JRuby. Pour invoquer l'API Asciidoctor directement depuis Java ou d'autres langages de la JVM, utilisez {uri-asciidoctorj}[AsciidoctorJ]. Des plugins basés sur AsciidoctorJ permettent d'intégrer le processeur Asciidoctor avec Apache Maven, Gradle ou Javadoc. Asciidoctor s'exécute également au sein de JavaScript. Nous utilisons https://opalrb.com[Opal] pour transcrire le code source Ruby en JavaScript afin de produire {uri-asciidoctorjs}[Asciidoctor.js], une version pleinement fonctionnelle d’Asciidoctor qui s’intègre dans tout environnement JavaScript, comme un navigateur web ou Node.js. Asciidoctor.js est utilisé pour faire fonctionner les extensions AsciiDoc Preview pour Chrome, Atom, Brackets et autres outils web. == Prérequis Asciidoctor fonctionne sur Linux, macOS et Windows et requiert une des implémentations suivantes de {uri-ruby}[Ruby] : * CRuby (aka MRI) 2.3 - 2.6 * JRuby 9.1 - 9.2 * TruffleRuby (GraalVM) * Opal (JavaScript) [CAUTION] ==== Si vous utilisez un environnement Windows dans une autre langue que l'anglais, vous pourriez tomber sur l'erreur `Encoding::UndefinedConversionError` lors du lancement d'Asciidoctor. Pour corriger ce problème, nous recommandons de changer la page de code en UTF-8 dans votre console : chcp 65001 Après ce changement, tous les maux de tête liés à l'Unicode seront derrière vous. Si vous utilisez un environnement de développement comme Eclipse, assurez-vous de définir l'encodage en UTF-8. Asciidoctor fonctionne mieux lorsque vous utilisez UTF-8 partout. ==== == Installation Asciidoctor peut être installé en utilisant (a) un gestionnaire de paquets Linux, (b) Homebrew pour macOS, (c) la commande `gem install` (recommandé pour les utilisateurs Windows), (d) l'image officielle Docker, ou (e) Bundler. L'avantage d'utiliser le gestionnaire de paquets pour installer la gemme est que l'installation englobe celle des librairies Ruby et RubyGems si elles ne sont pas déjà installées. === (a) Gestionnaires de paquets Linux La version installée par votre gestionnaire de paquets peut ne pas correspondre à la dernière version d'Asciidoctor. Consulter le dépôt de votre distribution Linux pour connaitre la dernière version disponible d'Asciidoctor en fonction de la version de votre distribution. * https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (asciidoctor)] * https://software.opensuse.org/package/rubygem-asciidoctor[OpenSUSE (rubygem-asciidoctor)] * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] Si vous souhaitez installer une version plus récente d'Asciidoctor que celle proposée par votre gestionnaire de paquets, suivre <>. ==== apk (Alpine Linux) Pour installer le paquet sur Alpine Linux, ouvrez un terminal et tapez : $ sudo apk add asciidoctor ==== APT Sur Debian et les distributions dérivées de Debian, comme Ubuntu, utilisez APT pour installer Asciidoctor. Pour installer le paquet, ouvrez un terminal et tapez : $ sudo apt-get install -y asciidoctor ==== DNF Sur les distributions Linux qui utilisent des RPM, comme Fedora, CentOS, et RHEL, utilisez le gestionnaire de paquets DNF pour installer Asciidoctor. Pour installer le paquet, ouvrez un terminal et tapez : $ sudo dnf install -y asciidoctor === (b) Homebrew (macOS) Vous pouvez utiliser Homebrew, le gestionnaire de paquets sur macOS, pour installer Asciidoctor. Si vous n'avez pas encore installé Homebrew, suivez les instructions sur https://brew.sh/[brew.sh]. Une fois Homebrew installé, vous pouvez installer Asciidoctor. Ouvrez un terminal et tapez : $ brew install asciidoctor Homebrew installe la gemme `asciidoctor` dans un répertoire spécifique qui est indépendant des gemmes système. [#gem-install] === (c) gem install Avant d'installer Asciidoctor en utilisant `gem install`, il est recommandé d'utiliser https://rvm.io[RVM] pour installer Ruby dans votre « home » (c'est-à-dire, votre espace utilisateur). Ensuite, vous pouvez utiliser la commande `gem` pour installer ou mettre à jour la gemme Asciidoctor. Quand vous utilisez RVM, les gemmes sont installées dans un répertoire isolé du système. Ouvrez un terminal et tapez : $ gem install asciidoctor Si vous souhaitez installer une version pre-release (c'est-à-dire, une « release candidate »), utilisez : $ gem install asciidoctor --pre === (d) Docker Lire {uri-install-docker}[Installer Asciidoctor en utilisant Docker]. === (e) Bundler . Créez un fichier Gemfile à la racine de votre projet (ou du répertoire courant) . Ajoutez la gemme `asciidoctor` dans votre fichier Gemfile comme ci-dessous : + [source,subs=attributes+] ---- source 'https://rubygems.org' gem 'asciidoctor' # ou spécifier la version explicitement # gem 'asciidoctor', '{release-version}' ---- . Sauvegardez le fichier Gemfile . Ouvrez un terminal et installez la gemme en utilisant : $ bundle Pour mettre à jour la gemme, spécifiez la nouvelle version dans le fichier Gemfile et exécutez `bundle` à nouveau. Utiliser `bundle update` *n*'est *pas* recommandé car les autres gemmes seront également mises à jour, ce qui n'est pas forcément le résultat voulu. == Mise à jour Si vous avez installé Asciidoctor en utilisant votre gestionnaire de paquets, votre système d'exploitation est surement configuré pour mettre à jour automatiquement les paquets, si tel est le cas vous n'avez pas besoin de mettre à jour manuellement Asciidoctor. === apk (Alpine Linux) Pour mettre à jour Asciidoctor, tapez : $ sudo apk add -u asciidoctor === APT Pour mettre à jour Asciidoctor, tapez : $ sudo apt-get upgrade -y asciidoctor === DNF Pour mettre à jour Asciidoctor, tapez : $ sudo dnf update -y asciidoctor === Homebrew (macOS) Pour mettre à jour Asciidoctor, tapez : $ brew update $ brew upgrade asciidoctor === gem install Si vous avez précédemment installé Asciidoctor en utilisant la commande `gem`, vous devez manuellement mettre à jour Asciidoctor quand une nouvelle version est publiée. Vous pouvez mettre à jour Asciidoctor en tappant : $ gem install asciidoctor Quand vous installez une nouvelle version en utilisant `gem install`, vous vous retrouvez avec plusieurs versions installées. Utilisez la commande ci-dessous pour supprimer les anciennes versions : $ gem cleanup asciidoctor == Utilisation Si la gemme Asciidoctor s'est installée correctement, la ligne de commande (CLI) `asciidoctor` sera disponible dans votre PATH. Pour vérifier sa disponibilité, exécutez la commande suivante dans votre terminal : $ asciidoctor --version Vous devriez voir les informations concernant la version d'Asciidoctor et celle de votre environnement Ruby s'afficher dans le terminal. [.output,subs=attributes+] .... Asciidoctor {release-version} [https://asciidoctor.org] Runtime Environment (ruby 2.4.1p111 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) .... Asciidoctor fournit aussi une API. Cette API permet une intégration avec d'autres logiciels Ruby, comme Rails, Sinatra et GitHub, ainsi que d'autres langages comme Java (via {uri-asciidoctorj}[AsciidoctorJ]) ou JavaScript (via {uri-asciidoctorjs}[Asciidoctor.js]). === Interface de Ligne de Commande (CLI) La commande `asciidoctor` vous permet d'invoquer Asciidoctor à partir de la ligne de commande (c'est-à-dire, un terminal). La commande suivante convertit le fichier README.adoc en HTML et sauvegarde le résultat dans le fichier README.html dans le même répertoire. Le nom du fichier HTML généré est tiré de celui du fichier source, l'extension a été changée pour `.html`. $ asciidoctor README.adoc Vous pouvez contrôler le processeur Asciidoctor en ajoutant plusieurs paramètres, vous pouvez en apprendre plus sur ces derniers en utilisant la commande : $ asciidoctor --help Par exemple, pour écrire le fichier dans un répertoire différent, utilisez : $ asciidoctor -D output README.adoc La {uri-manpage}[page man] `asciidoctor` fournit une référence complète sur l'interface de ligne de commande. Référez-vous aux ressources suivantes pour en apprendre davantage sur la façon d'utiliser la commande `asciidoctor`. * {uri-render-doc}[Comment convertir un document ?] * {uri-themes-doc}[Comment utiliser la fabrique de feuilles de style Asciidoctor pour produire des thèmes personnalisés ?] === API Ruby Pour utiliser Asciidoctor dans votre application, vous avez tout d'abord besoin de faire un « require » sur la gemme : [source] require 'asciidoctor' Vous pouvez ensuite convertir un fichier AsciiDoc en fichier HTML en utilisant : [source] Asciidoctor.convert_file 'README.adoc', to_file: true, safe: :safe WARNING: Quand vous utilisez Asciidoctor via l'API, le mode de sûreté par défaut est `:secure`. Dans le mode « secure », plusieurs fonctionnalités centrales sont désactivées, comme la directive `include`. Si vous souhaitez activer ces fonctionnalités, vous aurez besoin de définir explicitement le mode de sûreté avec une la valeur `:server` (recommandée) ou `:safe`. Vous pouvez aussi convertir une chaîne de texte en fragment HTML (pour une insertion dans une page HTML) en utilisant : [source] ---- content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' Asciidoctor.convert content, safe: :safe ---- Si vous voulez le document HTML complet, activez l'option `header_footer` comme ci-dessous : [source] ---- content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' html = Asciidoctor.convert content, header_footer: true, safe: :safe ---- Si vous avez besoin d'accéder au document analysé, vous pouvez séparer la conversion en deux étapes distinctes : [source] ---- content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' document = Asciidoctor.load content, header_footer: true, safe: :safe puts document.doctitle html = document.convert ---- Gardez en tête que si vous n'aimez pas le contenu généré par Asciidoctor, _vous pouvez le changer !_ Asciidoctor supporte des convertisseurs personnalisés qui peuvent prendre en charge la conversion depuis le document analysé jusqu'au contenu généré. Une façon simple de personnaliser les morceaux de contenu générés est d'utiliser le convertisseur de template. Le convertisseur de template vous permet, en utilisant un template supporté par {uri-tilt}[Tilt], de prendre en charge la conversion de n'importe quel élément dans le document. Vous l'aurez compris, vous _pouvez_ complètement prendre le contrôle sur le contenu généré. Pour plus d'informations sur comment utiliser l'API ou personnaliser le contenu généré, référez-vous au {uri-user-manual}[manuel utilisateur]. == Contributions Les contributeurs et contributrices sont toujours les bienvenus ! Si vous découvrez des erreurs ou des oublis dans le code source, la documentation, ou le contenu du site web, s'il vous plaît n'hésitez pas à ouvrir un ticket ou une « pull request » avec un correctif. Voici quelques façons de contribuer : * en utilisant les versions prerelease (alpha, beta ou preview), * en rapportant des anomalies, * en suggérant de nouvelles fonctionnalités, * en écrivant ou éditant la documentation, * en écrivant du code avec des tests -- _Aucun patch n'est trop petit_ ** corriger une coquille, ** ajouter des commentaires, ** nettoyer des espaces inutiles, ** écrire des tests ! * en refactorant le code, * en corrigeant des {uri-issues}[anomalies], * en effectuant des relectures des patches. Le guide du {uri-contribute}[parfait Contributeur] fournit des informations sur comment créer, styliser et soumettre des tickets, des demandes de fonctionnalités, du code et de la documentation pour le projet Asciidoctor. == Être aidé Asciidoctor est développé dans le but de vous aider à écrire et publier du contenu. Mais nous ne pouvons pas le faire sans vos avis ! Nous vous encourageons à poser vos questions et à discuter de n'importe quels aspects du projet sur la liste de discussion, Twitter ou dans le salon de discussion. Chat (Zulip):: {uri-chat} Forum (Nabble):: {uri-discuss} Twitter:: hashtag https://twitter.com/search?f=tweets&q=%23asciidoctor[#asciidoctor] ou la mention https://twitter.com/asciidoctor[@asciidoctor] ifdef::env-github[] De plus amples informations et documentations sur Asciidoctor peuvent être trouvées sur le site web du projet. {uri-project}/[Home] | {uri-news}[News] | {uri-docs}[Docs] endif::[] L'organisation Asciidoctor sur GitHub héberge le code source du projet, le gestionnaire de tickets ainsi que des sous-projets. Dépôt des sources (git):: {uri-repo} Gestionnaire de tickets:: {uri-issues} L'organisation Asciidoctor sur GitHub:: {uri-org} == Licence Copyright (C) 2012-present Dan Allen, Sarah White, Ryan Waldron, et les contributeurs individuels d'Asciidoctor. Une utilisation libre de ce logiciel est autorisée sous les termes de la licence MIT. Consultez le fichier {uri-license}[LICENSE] pour plus de détails. == Auteurs *Asciidoctor* est mené par https://github.com/mojavelinux[Dan Allen] et https://github.com/graphitefriction[Sarah White] et reçoit de nombreuses contributions de la part de la {uri-contributors}[géniale communauté] Asciidoctor. Le projet a été initié en 2012 par https://github.com/erebor[Ryan Waldron] et est basé sur un prototype écrit par https://github.com/nickh[Nick Hengeveld]. *AsciiDoc* a été démarré par Stuart Rackham et a reçu de nombreuses contributions de la part de la communauté AsciiDoc. ifndef::env-site[] == Changelog ifeval::[{safe-mode-level} < 20] include::CHANGELOG.adoc[tag=compact,leveloffset=+1] endif::[] Référez-vous au fichier {uri-changelog}[CHANGELOG] pour une liste complète des changements des versions précédentes. endif::[] asciidoctor-2.0.20/README-jp.adoc000066400000000000000000000567661443135032600163170ustar00rootroot00000000000000= Asciidoctor Dan Allen ; Sarah White v2.0.20, 2023-05-18 // settings: :idprefix: :idseparator: - :source-language: ruby :language: {source-language} ifndef::env-github[:icons: font] ifdef::env-github[] :status: :caution-caption: :fire: :important-caption: :exclamation: :note-caption: :paperclip: :tip-caption: :bulb: :warning-caption: :warning: endif::[] // Variables: :release-version: 2.0.20 // URIs: :uri-org: https://github.com/asciidoctor :uri-repo: {uri-org}/asciidoctor :uri-asciidoctorj: {uri-org}/asciidoctorj :uri-asciidoctorjs: {uri-org}/asciidoctor.js :uri-gradle-plugin: {uri-org}/asciidoctor-gradle-plugin :uri-maven-plugin: {uri-org}/asciidoctor-maven-plugin :uri-asciidoclet: {uri-org}/asciidoclet :uri-project: https://asciidoctor.org :uri-gem: https://rubygems.org/gems/asciidoctor ifdef::env-site[:uri-project: link:] :uri-docs: {uri-project}/docs :uri-news: {uri-project}/news :uri-manpage: {uri-project}/man/asciidoctor :uri-issues: {uri-repo}/issues :uri-contributors: {uri-repo}/graphs/contributors :uri-rel-file-base: link: :uri-rel-tree-base: link: ifdef::env-site,env-yard[] :uri-rel-file-base: {uri-repo}/blob/HEAD/ :uri-rel-tree-base: {uri-repo}/tree/HEAD/ endif::[] :uri-changelog: {uri-rel-file-base}CHANGELOG.adoc :uri-contribute: {uri-rel-file-base}CONTRIBUTING.adoc :uri-license: {uri-rel-file-base}LICENSE :uri-tests: {uri-rel-tree-base}test :uri-discuss: https://discuss.asciidoctor.org :uri-chat: https://asciidoctor.zulipchat.com :uri-rubygem: https://rubygems.org/gems/asciidoctor :uri-what-is-asciidoc: {uri-docs}/what-is-asciidoc :uri-user-manual: {uri-docs}/user-manual :uri-install-docker: https://github.com/asciidoctor/docker-asciidoctor //:uri-install-doc: {uri-docs}/install-toolchain :uri-install-macos-doc: {uri-docs}/install-asciidoctor-macos :uri-convert-doc: {uri-docs}/convert-documents :uri-themes-doc: {uri-docs}/produce-custom-themes-using-asciidoctor-stylesheet-factory :uri-gitscm-repo: https://github.com/git/git-scm.com :uri-freesoftware: https://www.gnu.org/philosophy/free-sw.html :uri-foundation: https://foundation.zurb.com :uri-opal: https://opalrb.com :uri-tilt: https://github.com/rtomayko/tilt :uri-ruby: https://www.ruby-lang.org // images: :image-uri-screenshot: https://cdn.jsdelivr.net/gh/asciidoctor/asciidoctor/screenshot.png {uri-project}[Asciidoctor]は, {uri-what-is-asciidoc}[AsciiDoc] で書かれたコンテンツをHTML5, DocBook, PDFなどのフォーマットに変換する, _高速で_ {uri-license}[オープンソース] のテキストプロセッサおよびパブリッシングツールチェインです. AsciidoctorはRubyで書かれており, すべての主要オペレーティングシステムで動作します. Asciidoctorプロジェクトは {uri-repo}[GitHubにホスティング] されています. インストールをシンプルにするため, AsciidoctorはRubyGem(gem)パッケージとして, {uri-rubygem}[RubyGems.org] で配布されています. さらに, Asciidoctorは主要なLinuxディストリビューション用およびmacOS用パッケージとしても配布されています. AsciidoctorはRubyで動作するだけでなく, {uri-asciidoctorj}[AsciidoctorJ]としてJVM上でも動作します. また, {uri-asciidoctorjs}[Asciidoctor.js]としてどのようなJavaScript環境(ブラウザを含む)でも実行できます. ifndef::env-site,env-yard[] このドキュメントには以下の言語版が存在します: + {uri-rel-file-base}README.adoc[English] | {uri-rel-file-base}README-zh_CN.adoc[汉语] | {uri-rel-file-base}README-de.adoc[Deutsch] | {uri-rel-file-base}README-fr.adoc[Français] endif::[] .主なドキュメント [.compact] * {uri-docs}/what-is-asciidoc[What is AsciiDoc?] * {uri-docs}/asciidoc-writers-guide[AsciiDoc Writer's Guide] * {uri-docs}/user-manual[Asciidoctor User Manual] * {uri-docs}/asciidoc-syntax-quick-reference[AsciiDoc Syntax Reference] ifdef::status[] image:https://img.shields.io/gem/v/asciidoctor.svg[Latest Release, link={uri-gem}] image:https://img.shields.io/badge/rubydoc.info-{release-version}-blue.svg[library (API) docs,link=https://www.rubydoc.info/gems/asciidoctor/{release-version}] image:https://github.com/asciidoctor/asciidoctor/workflows/CI/badge.svg[Build Status (GitHub Actions),link={uri-repo}/actions] image:https://img.shields.io/badge/zulip-join_chat-brightgreen.svg[Project Chat,link={uri-chat}] endif::[] == スポンサー {uri-project}/supporters[スポンサー] のみなさまが, このプロジェクトをサポートし, より良いテクニカルドキュメンテーションの実現にコミットメントをしてくださっていることに感謝します. スポンサーのみなさま, ありがとうございます! みなさまの多くのサポートなくしてAsciidoctorは実現不可能です. https://opencollective.com/asciidoctor[OpenCollective] を通じてスポンサーになることにより, このプロジェクトを支援することができます. == 全体像 Asciidoctorは, 下図左側のようなプレーンテキストを読み込んで, 右側のようなHTML5に変換します. 特別な設定をしなくてもきれいな表示が得られるよう, HTML5の出力にはデフォルトのスタイルシートが適用されます. image::{image-uri-screenshot}[AsciiDocソースとレンダリングされたHTMLのプレビュー] == AsciiDocの処理 Asciidoctorは, AsciiDoc文法で書かれたテキストを読み込んでパースします. 次に内蔵コンバータにパースツリーを渡します. これによりHTML5, DocBook 5やman(マニュアルmanページ)が出力されます. 出力をカスタマイズしたりフォーマットを追加したりしたいときは, ユーザ独自のコンバータや {uri-tilt}[Tilt] 対応テンプレートを使用することができます. AsciidoctorはオリジナルのAsciiDoc Pythonプロセッサ(`asciidoc.py`)に完全互換です. Asciidoctorのテストスイートには, AsciiDoc文法との互換性を保証するために {uri-tests}[2350個を超えるテスト] が入っています. Asciidoctorでは, AsciiDocの従来の文法のほかに, Asciidoctorで追加されたマークアップとフォーマッティングオプションが使用できます. フォントベースのアイコン (例えば, `+icon:fire[]+`) やUIエレメント(`+button:[Save]+`)がそれにあたります. またAsciidoctorは, HTML5出力時のスタイルとして {uri-foundation}[Foundation] に基づいたモダンでレスポンシブなテーマも提供します. == RubyのあるところAsciidoctorも動く AsciidoctorはJRubyを用いてJVM上でも実行できます. Javaや他のJVM言語からAsciidoctor APIを直接呼び出すには, {uri-asciidoctorj}[AsciidoctorJ] を使ってください. AsciidoctorJを使ったAsciiDocの処理をビルドに直接組み込むビルドツール用プラグインとして, {uri-maven-plugin}[Apache Maven用], {uri-gradle-plugin}[Gradle用], および {uri-asciidoclet}[Javadoc用] が存在します. AsciidoctorはJavaScriptでも実行可能です. Rubyで書かれたソースを {uri-opal}[Opal] を使ってJavaScriptにトランスパイルすることで {uri-asciidoctorjs}[Asciidoctor.js] が作成されています. Asciidoctor.jsはどんなJavaScript環境(WebブラウザやNode.jsを含む)でも動作する, JavaScript版の完全なAsciidoctorです. Chrome, Atom, Bracketsやその他のウェブベースのツールで, AsciiDocをプレビューするための拡張機能にAsciidoctor.jsが使われています. == 必要条件 AsciidoctorはLinux, macOS, およびWindowsで動作し, 下記の {uri-ruby}[Ruby]実装の一つを必要とします. * CRuby (aka MRI) 2.3 - 2.6 * JRuby 9.1 - 9.2 * TruffleRuby (GraalVM) * Opal (JavaScript) [CAUTION] ==== もし非英語環境のWindowsを使っているなら, Asciidoctorを起動した時に `Encoding::UndefinedConversionError` に遭遇するかもしれません. これを解決するには, 以下のコマンドにより, 使っているコンソールの有効なコードページをUTF-8に変更することを推奨します: chcp 65001 一度この変更をすると, Unicode関連の頭痛の種は消えるでしょう. もしEclipseのようなIDEを使っているなら, 同様にエンコーディングをUTF-8にするのを忘れないでください. AsciidoctorはUTF-8の環境において最も良好に動作します. ==== == インストール Asciidoctorは, (a) 主なLinuxディストリビューションのパッケージマネージャ, (b) macOSのHomebrew, (c) `gem install` コマンド(Windowsユーザに推奨), (d) Asciidoctor Dockerイメージ, あるいは(e) Bundlerを用いてインストールできます. Linuxパッケージマネージャを用いてインストールする利点は, もしRubyやRubyGemsライブラリがまだインストールされていなかったら, それらをインストールしてくれることです. === (a) Linuxのパッケージマネージャ パッケージマネージャによってインストールされるAsciidoctorは最新バージョンではないかもしれません. ディストリビューションの各リリースにおいてどのバージョンのAsciidoctorがパッケージされているかを確認するには, パッケージリポジトリを参照してください. * https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] * https://www.archlinux.org/packages/?name=asciidoctor[Arch Linux (asciidoctor)] * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (asciidoctor)] * https://software.opensuse.org/package/rubygem-asciidoctor[OpenSUSE (rubygem-asciidoctor)] * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] パッケージマネージャによってインストールされるバージョンよりも新しいAsciidoctorを使用したい場合は, <> を参照してください. ==== apk (Alpine Linux) Alpine Linuxにgemをインストールするには, ターミナルを開き, 以下を入力してください: $ sudo apk add asciidoctor ==== pacman (Arch Linux) Archベースのディストリビューションにgemをインストールするには, ターミナルを開き, 以下を入力してください: $ sudo pacman -S asciidoctor ==== APT Debian, またはUbuntuなどDebianベースのディストリビューションでは, APTを使ってAsciidoctorをインストールしてください. Asciidoctorパッケージをインストールするには, ターミナルを開き, 以下を入力してください: $ sudo apt-get install -y asciidoctor ==== DNF Fedora, CentOS, RHELなどRPMベースのLinuxディストリビューションでは, DNFパッケージマネージャを使ってAsciidoctorをインストールしてください. Asciidoctorパッケージをインストールするには, ターミナルを開き, 以下を入力してください: $ sudo dnf install -y asciidoctor === (b) Homebrew (macOS) macOSでは, パッケージマネージャHomebrewを使用してAsciidoctorをインストールすることができます. Homebrewをお持ちでない場合は, まず https://brew.sh/[brew.sh] の説明に従ってHomebrewをインストールしてください. Homebrewをインストールできたら, `asciidoctor` gemをインストールすることができます. ターミナルを開き, 以下を入力してください: $ brew install asciidoctor Homebrewにより, システムレベルのgemとは別の独立したprefixのパスに `asciidoctor` gemがインストールされます. === (c) Windows WindowsでAsciidoctorを使う場合は, 簡単な方法が2つあります. ==== Chocolatey すでにお使いのマシンで https://chocolatey.org[chocolatey] を使用しているなら, 以下の方法を使用することができます: [source] ---- choco install ruby ---- そのあとは <> に従ってください. ==== Rubyinstaller https://rubyinstaller.org/downloads/[Rubyinstaller] を使用したい場合は, お使いのWindowsのバージョンに適したRubyinstallerをダウンロードしてRubyをインストールしたあと, <> に従ってください. [#gem-install] === (d) gem install Asciidoctorを `gem install` を使ってインストールするのであれば, その前に https://rvm.io[RVM] を使ってhomeディレクトリ(つまりユーザ領域)にRubyをインストールしておくべきです. そうすれば, `gem` コマンドを使用して安全にAsciidoctor gemのインストールやアップデートができます. RVMを使用すると, システムから隔離された場所にgemがインストールされます. ターミナルを開き, 以下のように入力してください: $ gem install asciidoctor もし, 先行リリースバージョン(例えばリリース候補版)をインストールしたければ以下のようにします. $ gem install asciidoctor --pre === (e) Docker {uri-install-docker}[Installing Asciidoctor using Docker]を参照してください. === (f) Bundler . プロジェクトのルートフォルダ(またはカレントディレクトリ)にGemfileを作成 . `asciidoctor` gemをGemfileに以下のように追加: + [source,subs=attributes+] ---- source 'https://rubygems.org' gem 'asciidoctor' # または明示的にバージョンを指定 # gem 'asciidoctor', '{release-version}' ---- . Gemfileを保存 . ターミナルを開き, gemをインストール: $ bundle gemをアップグレードするには, Gemfileで新バージョンを指定し, `bundle` を再び実行してください. `bundle update` を(gemを指定せずに)行うことは推奨 *されません* . 他のgemもアップデートされて思わぬ結果になるかもしれないためです. == アップグレード オペレーティングシステムのパッケージマネージャでAsciidoctorをインストールしたのであれば, おそらくパッケージは自動的にアップデートされるように設定されています. その場合は, gemを手動でアップデートする必要はありません. === apk (Alpine Linux) gemをアップグレードするには, 以下を使用してください: $ sudo apk add -u asciidoctor === APT gemをアップグレードするには, 以下を使用してください: $ sudo apt-get upgrade -y asciidoctor === DNF gemをアップグレードするには, 以下を使用してください: $ sudo dnf update -y asciidoctor === Homebrew (macOS) gemをアップグレードするには, 以下を使用してください: $ brew update $ brew upgrade asciidoctor === gem install `gem` コマンドを使ってAsciidoctorをインストールした場合は, 新しいバージョンのAsciidoctorがリリースされたら手動でアップグレードする必要があります. 以下を入力することでアップグレードできます: $ gem install asciidoctor `gem install` を使って新しいバージョンのgemをインストールすると, 複数のバージョンがインストールされた状態になります. 以下のコマンドを使って古いバージョンを削除してください. $ gem cleanup asciidoctor == 使い方 Asciidoctorのインストールが成功すると, `asciidoctor` コマンドがPATHに存在するようになり, Asciidoctorのコマンドラインインターフェース(CLI)が使用できるようになります. 確認のために, ターミナルで以下を実行しましょう: $ asciidoctor --version AsciidoctorのバージョンとRuby環境についての情報がターミナルに出力されるはずです. [.output,subs=attributes+] .... Asciidoctor {release-version} [https://asciidoctor.org] Runtime Environment (ruby 2.6.0p0 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) .... AsciidoctorはAPIも提供します. APIは他のRubyソフトウェア, たとえばRails, Sinatra, GitHub, そして他の言語, たとえばJava ({uri-asciidoctorj}[AsciidoctorJ] 経由)やJavaScript ({uri-asciidoctorjs}[Asciidoctor.js] 経由)と組み合わせて使用するためのものです. === コマンドラインインターフェース (CLI) `asciidoctor` コマンドによりコマンドライン(つまりターミナル)からAsciidoctorを起動することができます. 次のコマンドにより, README.adocというファイルがHTMLに変換され, 結果が同じディレクトリのREADME.htmlとして保存されます. 生成されるHTMLファイルの名前は, ソースファイルのファイル名の拡張子を `.html` に替えたものとなります. $ asciidoctor README.adoc さまざまなフラグやスイッチを与えることでAsciidoctorプロセッサをコントロールすることができます. フラグやスイッチの説明は以下のコマンドで表示されます: $ asciidoctor --help 例えば, ファイルを異なるディレクトリに書き出すには以下を使用します: $ asciidoctor -D output README.adoc コマンドラインインタフェースの完全なリファレンスは `asciidoctor` の {uri-manpage}[manページ] にあります. `asciidoctor` コマンドの使い方の詳細については以下を参照してください. * {uri-convert-doc}[How do I convert a document?] * {uri-themes-doc}[How do I use the Asciidoctor stylesheet factory to produce custom themes?] === Ruby API Asciidoctorをアプリケーションの中で使うには, まずgemをrequireする必要があります: [source] require 'asciidoctor' そうすると, 以下のようにしてAsciiDocソースファイルをHTMLファイルに変換できます: [source] Asciidoctor.convert_file 'README.adoc', to_file: true, safe: :safe WARNING: AsciidoctorをAPI経由で使っているとき, デフォルトのセーフモードは `:secure` (セキュアモード)です. セキュアモードでは, `include` ディレクティブを含むいくつかのコア機能が無効化されています. これらの機能を有効化したい場合, 明示的にセーフモードを `:server` (推奨)か `:safe` にする必要があります. AsciiDoc文字列を, 埋め込み用HTML(HTMLページヘの挿入用)に変換することもできます: [source] ---- content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' Asciidoctor.convert content, safe: :safe ---- もし完全なHTMLドキュメントが必要であれば, 以下のように `header_footer` オプションを有効にしてください: [source] ---- content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' html = Asciidoctor.convert content, header_footer: true, safe: :safe ---- パースされたドキュメントにアクセスしたい場合は, 変換を複数のステップに分割します: [source] ---- content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' document = Asciidoctor.load content, header_footer: true, safe: :safe puts document.doctitle html = document.convert ---- Asciidoctorの生成する出力が気に入らない場合は, _あなたはそれを変更できる_ ことを忘れないでください! パースされたドキュメントを出力形式に変換するコンバータは, カスタマイズが可能です. 出力を部分的にカスタマイズする簡単な方法としてはテンプレートコンバータがあります. テンプレートコンバータでは, ドキュメントの各ノードの変換に {uri-tilt}[Tilt]対応テンプレートファイルを使うことができます. さまざまな方法を使って出力は100%制御することが _できます_ . APIの使い方や出力のカスタマイズ方法についてのより詳しい情報は {uri-user-manual}[ユーザマニュアル] を参照してください. == コントリビューション 新しいコントリビューションを常に歓迎します! もしソースコード, ドキュメント, あるいはウェブサイトに間違いや不備を見つけたら遠慮なく, イシューを作成するか, 修正をおこなってpull requestを作成してください. *あなた* にもできることがあります: * 先行バージョン(alpha, beta, またはpreview版)の使用 * バグレポート * 新機能提案 * ドキュメントの執筆または編集 * テストをつけてコードを書くこと -- _どのようなパッチであれ小さすぎるなどということはありません_ ** typoの修正 ** コメントの追加 ** 一貫性のないホワイトスペースの除去 ** テストの記述! * リファクタリング * {uri-issues}[イシュー] の解決 * パッチのレビュー Asciidoctorプロジェクトにイシュー, 機能リクエスト, コード, ドキュメントを送る際の, 作成方法, スタイル, および送り方は, {uri-contribute}[Contributing] ガイドに記載されています. == 助けを得る Asciidoctorは, コンテンツの執筆と公開を簡単にするために開発されています. しかしあなたからのフィードバックがなくてはAsciidoctorの開発は進みません! ディスカッションリスト, Twitter, チャットルームを使って, 質問をしたりプロジェクトのさまざまな側面について話し合ったりすることをお勧めします. チャット(Zulip):: {uri-chat} ディスカッションリスト(Nabble):: {uri-discuss} Twitter:: ハッシュタグ https://twitter.com/search?f=tweets&q=%23asciidoctor[#asciidoctor] またはメンション https://twitter.com/asciidoctor[@asciidoctor] ifdef::env-github[] 以下のプロジェクトサイトに, Asciidoctorに関するさらに詳しい情報やドキュメントがあります. {uri-project}[Home] | {uri-news}[News] | {uri-docs}[Docs] endif::[] GitHub上のAsciidoctorのorganizationではプロジェクトのソースコード, イシュートラッカー, サブプロジェクトが管理されています. ソースリポジトリ(git):: {uri-repo} イシュートラッカー:: {uri-issues} GitHub上のAsciidoctorのorganization:: {uri-org} == ライセンス Copyright (C) 2012-present Dan Allen, Sarah White, Ryan Waldron, and the individual contributors to Asciidoctor. 本ソフトウェアはMITライセンスのもとで使用できます. ライセンスの詳細については {uri-license}[LICENSE] ファイルを参照してください. == 作者 *Asciidoctor* は https://github.com/mojavelinux[Dan Allen] と https://github.com/graphitefriction[Sarah White] がリードし, Asciidoctorの素晴らしきコミュニティの {uri-contributors}[数多くのメンバ] からコントリビューションを受けてきました. このプロジェクトは https://github.com/nickh[Nick Hengeveld] のプロトタイプをベースに https://github.com/erebor[Ryan Waldron] により2012年から創始されました. *AsciiDoc* は Stuart Rackham により創始され, AsciiDocコミュニティの数多くのメンバからコントリビューションを受けてきました. ifndef::env-site[] == 変更履歴 ifeval::[{safe-mode-level} < 20] include::CHANGELOG.adoc[tag=compact,leveloffset=+1] endif::[] 過去のリリースの完全な変更点リストについては {uri-changelog}[CHANGELOG] を参照してください. endif::[] asciidoctor-2.0.20/README-zh_CN.adoc000066400000000000000000000411031443135032600166620ustar00rootroot00000000000000= Asciidoctor Dan Allen ; Sarah White v2.0.20, 2023-05-18 // settings: :page-layout: base :idprefix: :idseparator: - :source-language: ruby :language: {source-language} ifndef::env-github[:icons: font] ifdef::env-github[] :status: :caution-caption: :fire: :important-caption: :exclamation: :note-caption: :paperclip: :tip-caption: :bulb: :warning-caption: :warning: endif::[] // Variables: :release-version: 2.0.20 // URIs: :uri-org: https://github.com/asciidoctor :uri-repo: {uri-org}/asciidoctor :uri-asciidoctorj: {uri-org}/asciidoctorj :uri-asciidoctorjs: {uri-org}/asciidoctor.js :uri-project: https://asciidoctor.org ifdef::env-site[:uri-project: link:] :uri-docs: {uri-project}/docs :uri-news: {uri-project}/news :uri-manpage: {uri-project}/man/asciidoctor :uri-issues: {uri-repo}/issues :uri-contributors: {uri-repo}/graphs/contributors :uri-rel-file-base: link: :uri-rel-tree-base: link: ifdef::env-site[] :uri-rel-file-base: {uri-repo}/blob/HEAD/ :uri-rel-tree-base: {uri-repo}/tree/HEAD/ endif::[] :uri-changelog: {uri-rel-file-base}CHANGELOG.adoc :uri-contribute: {uri-rel-file-base}CONTRIBUTING.adoc :uri-license: {uri-rel-file-base}LICENSE :uri-tests: {uri-rel-tree-base}test :uri-discuss: https://discuss.asciidoctor.org :uri-chat: https://asciidoctor.zulipchat.com :uri-rubygem: https://rubygems.org/gems/asciidoctor :uri-what-is-asciidoc: {uri-docs}/what-is-asciidoc :uri-user-manual: {uri-docs}/user-manual :uri-install-docker: https://github.com/asciidoctor/docker-asciidoctor //:uri-install-doc: {uri-docs}/install-toolchain :uri-install-osx-doc: {uri-docs}/install-asciidoctor-macosx :uri-render-doc: {uri-docs}/render-documents :uri-themes-doc: {uri-docs}/produce-custom-themes-using-asciidoctor-stylesheet-factory :uri-gitscm-repo: https://github.com/git/git-scm.com :uri-freesoftware: https://www.gnu.org/philosophy/free-sw.html :uri-foundation: https://foundation.zurb.com :uri-tilt: https://github.com/rtomayko/tilt :uri-ruby: https://ruby-lang.org // images: :image-uri-screenshot: https://cdn.jsdelivr.net/gh/asciidoctor/asciidoctor/screenshot.png {uri-project}/[Asciidoctor] 是一个 _快速_ 文本处理器和发布工具链,它可以将 {uri-what-is-asciidoc}[AsciiDoc] 文档转化成 HTML 5、 DocBook 5 以及其他格式。 Asciidoctor 由 Ruby 编写,打包成 RubyGem,然后发布到 {uri-rubygem}[RubyGems.org] 上。 这个 gem 还被包含到几个 Linux 发行版中,其中包括 Fedora、Debian 和 Ubuntu。 Asciidoctor 是开源的,link:{uri-repo}[代码托管在 GitHub],遵从 {uri-license}[MIT] 协议。 该文档有如下语言的翻译版: * {uri-rel-file-base}README.adoc[English] * {uri-rel-file-base}README-fr.adoc[Français] * {uri-rel-file-base}README-jp.adoc[日本語] .关键文档 [.compact] * {uri-docs}/what-is-asciidoc[Asciidoctor 是什么?] * {uri-docs}/asciidoc-writers-guide[AsciiDoc 写作指南] * {uri-docs}/asciidoc-syntax-quick-reference[AsciiDoc 语法快速参考] * {uri-docs}/user-manual[Asciidoctor 用户手册] ifdef::status[] image:https://img.shields.io/gem/v/asciidoctor.svg[Latest Release, link={uri-gem}] image:https://img.shields.io/badge/rubydoc.info-{release-version}-blue.svg[library (API) docs,link=https://www.rubydoc.info/gems/asciidoctor/{release-version}] image:https://github.com/asciidoctor/asciidoctor/workflows/CI/badge.svg[Build Status (GitHub Actions),link={uri-repo}/actions] image:https://img.shields.io/badge/zulip-join_chat-brightgreen.svg[Project Chat,link={uri-chat}] endif::[] .Ruby 所至, Asciidoctor 相随 **** 使用 JRuby 让 Asciidoctor 运行在 Java 虚拟机上。 使用 {uri-asciidoctorj}[AsciidoctorJ] 直接调用 Asciidoctor 的 API 运行在 Java 或者其他 Java 虚拟机中。 基于 AsciidoctorJ 有好多插件可用,这些插件可以将 Asciidoctor 整合到 Apache Maven,Gradle 或 Javadoc 构建中。 Asciidoctor 也可以运行在 JavaScript 上。 我们可以使用 https://opalrb.com[Opal] 将 Ruby 源码编译成 JavaScript 生成 {uri-asciidoctorjs}[Asciidoctor.js] 文件,这是一个全功能版的 Asciidoctor,可以运行在任意的 JavaScript 环境中,比如 Web 浏览器 或 Node.js。 Asciidoctor.js 被用于预览 AsciiDoc,支持 Chrome 扩展,Atom,Brackets 或其他基于 Web 的工具。 **** [#the-big-picture] == 整体概况 Asciidoctor 以纯文本格式读取内容,见下图左边的面板,并将它转换成 HTML 5 呈现在右侧面板中。 Asciidoctor 将默认的样式表应用到 HTML 5 文档上,提供一个愉快的开箱即用的体验。 image::{image-uri-screenshot}[AsciiDoc 源文预览和相应的 HTML 渲染] [#asciidoc-processing] == AsciiDoc Processing Asciidoctor 会读取并处理用 AsciiDoc 语法写的文件,然后将解析出来的解析树参数交给内置的转化器去生成 HTML 5,DocBook 5 或帮助手册页面输出。 你可以选择使用你自己的转化器或者加载 {uri-tilt}[Tilt] - 支持通过模板来自定义输出或产生附加的格式。 NOTE: Asciidoctor是为了直接替换原 AsciiDoc Python 处理器(`asciidoc.py`)。 Asciidoctor 测试套件含有 {uri-tests}[> 1,600 测试示例] 来确保和 AsciiDoc 语法的兼容性。 除了传统的 AsciiDoc 语法,Asciidoctor 还添加额外的标记和格式设置选项,例如 font-based 图标(例如: `+icon:fire[]+`)和 UI 元素(例如: `+button:[Save]+`)。 Asciidoctor 还提供了一个基于 {uri-foundation}[Foundation] 的现代化的、响应式主题来美化 HTML 5 输出。 [#requirements] == 要求 Asciidoctor 可以运行在 Linux,OSX (Mac) 和 Windows 系统,但需要安装下面任意一个 {uri-ruby}[Ruby] 环境去实现: * CRuby (aka MRI) 2.3 - 2.6 * JRuby 9.1 - 9.2 * TruffleRuby (GraalVM) * Opal (JavaScript) 我们欢迎你来帮助在这些以及其他平台测试 Asciidoctor。 请参考 <<{idprefix}contributing,Contributing>> 来了解如何参与。 [CAUTION] ==== 如果在非英语的 Windows 环境,当你去调用 Asciidoctor 时,可能会碰到 `Encoding::UndefinedConversionError` 的错误提示。 为了解决这个问题,我们建议将控制台的编码更改为 UTF-8: chcp 65001 一旦你做了这个改变,所有的编码问题,都将迎刃而解。 如果你使用的是像 Eclipse 这样的 IDE 集成开发工具,你也需要确保他被你设置为 UTF-8 编码。 使用 UTF-8 能使 Asciidoctor 在任何地方都能正常工作。 ==== [#installation] == 安装 Asciidoctor 可以通过三种方式安装(a)`gem install` 命令;(b)Bundler打包编译;(c)流行的 Linux 发行版的包管理器 TIP: 使用 Linux 包管理器安装的好处是如果你机器在之前没有安装 Ruby 和 RubyGems 库,当你选择这种方式安装时它们会一并安装上去。 不利的是在 gem 发布之后,这类安装包并不是立即可用。 如果你需要安装最新版,你应该总是优先使用 `gem` 命令安装。 [#a-gem-install] === (a) gem 安装 打开一个终端输入如下命令(不含开头的 `$`): $ gem install asciidoctor 如果想安装一个预览版(比如:候选发布版),请使用: $ gem install asciidoctor --pre .升级 [TIP] ==== 如果你安装有的是旧版本 Asciidoctor,你可以使用下面的命令来升级: $ gem update asciidoctor 如果使用 `gem install` 命令来安装一个新版本的 gem 来代替升级,会安装多个版本。 这种情况,你可以使用下面的 gem 命令来移除旧版本: $ gem cleanup asciidoctor ==== [#b-bundler] === (b) Bundler . 在项目的根目录(或者当前路径),创建一个 `Gemfile` 文件; . 在这个文件中添加 `asciidoctor` gem 如下: + [source,subs=attributes+] ---- source 'https://rubygems.org' gem 'asciidoctor' # 或者明确指明版本 # gem 'asciidoctor', '{release-version}' ---- . 保存 `Gemfile` 文件 . 打开终端,使用如下命令安装 gem: $ bundle 要升级 gem 的话,在 `Gemfile` 文件中,指明新版本,然后再次运行 `bundle` 即可。 *不推荐* 直接使用 `bundle update` 命令,因为它还会升级其他 gem,也许会造成不可预料的结果。 [#c-linux-package-managers] === (c) Linux 包管理 [#dnf-fedora-21-or-greater] ==== DNF (Fedora 21 或更高版本) 在 Fedora 21 或更高版本中安装这个 gem,可以使用 dnf。打开终端并输入如下命令: $ sudo dnf install -y asciidoctor 升级则使用: $ sudo dnf update -y asciidoctor TIP: 如果你的 Fedora 系统配置的是自动升级包,在这种情况下,不需要你亲自动手升级。 [#apt-get-debian-ubuntu-mint] ==== apt-get (Debian, Ubuntu, Mint) 在 Debian,Ubuntu 或 Mint 中安装这个 gem,请打开终端并输入如下命令: $ sudo apt-get install -y asciidoctor 升级则使用: $ sudo apt-get upgrade -y asciidoctor TIP: 如果你的 Debian 或 Ubuntu 系统配置的是自动升级包,在这种情况下,不需要你亲自动手升级。 使用包管理器( apt-get )安装的 Asciidoctor 的版本也许不是最新发布版。 请查看发行版的包库,来确定每个发行版是打包的哪个版本。 * https://packages.debian.org/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[Debian 发行版中的 asciidoctor] * https://packages.ubuntu.com/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[Ubuntu 发行版中的 asciidoctor] * https://community.linuxmint.com/software/view/asciidoctor[Mint 发行版中的 asciidoctor] [CAUTION] ==== 我们建议不要使用 `gem update` 来升级包管理的 gem。 这样做会使系统进入不一致的状态,包管理工具将不再跟踪相关文件(通常安装在 /usr/local 下。) 简单地说,系统的 gem 只能由包管理器进行管理。 如果你想使用一个比包管理器安装的更新版本的 Asciidoctor,你应该使用 https://rvm.io[RVM] 在你的用户家目录(比如:用户空间)下安装 Ruby。 然后,你就可以放心地使用 `gem` 命令来安装或者更新 Asciidoctor gem。 当使用 RVM 时,gem 将被安装到与系统隔离的位置。 ==== [#apk-alpine-linux] ==== apk (Alpine Linux) 在 Alpine Linux 中安装这个 gem,请打开终端并输入如下命令: $ sudo apk add asciidoctor 升级则使用: $ sudo apk add -u asciidoctor TIP: 如果你的 Alpine Linux 系统配置的是自动升级包,在这种情况下,不需要你亲自动手升级。 [#other-installation-options] === 其他安装选项 * {uri-install-docker}[使用 Docker 安装 Asciidoctor ] * {uri-install-osx-doc}[在 Mac OS X 安装 Asciidoctor ] [#usage] == 使用 如果成功安装 Asciidoctor,则在可执行程序路径中,`asciidoctor` 就可用了。 为了验证它的可用性,你可以在终端中执行如下命令: $ asciidoctor --version 你应该看到关于 Asciidoctor 和 Ruby 环境信息将打印到你的终端上。 [.output,subs=attributes+] .... Asciidoctor {release-version} [https://asciidoctor.org] Runtime Environment (ruby 2.4.1p111 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) .... Asciidoctor 还提供了一套 API。 这套 API 是为了整合其他的 Ruby 软件,例如 Rails、Sinatra、GitHub,甚至其他语言,比如 Java (通过 {uri-asciidoctorj}[AsciidoctorJ]) 和 JavaScript (通过 {uri-asciidoctorjs}[Asciidoctor.js])。 [#command-line-interface-cli] === 命令行(CLI) `asciidoctor` 命令可以让你通过命令行(比如:终端)来调用 Asciidoctor。 下面的命令将 README.adoc 文件转化为 HTML,并且保存到同一目录下的 README.html 文件中。 生成的 HTML 文件名源自源文件名,只是将其扩展名改为了 `.html`。 $ asciidoctor README.adoc 您可以通过添加各种标志和开关控制 Asciidoctor 处理器,通过下面的命令你可以学习它的更多用法: $ asciidoctor --help 比如,将文件写入到不同路径里,使用如下命令: $ asciidoctor -D output README.adoc `asciidoctor` {uri-manpage}[帮助页面] 提供了这个命令的完整参考。 点击下面的资源,学习更多关于 `asciidoctor` 命令的用法。 * {uri-render-doc}[如何转化文档?] * {uri-themes-doc}[如何使用 Asciidoctor 样式工厂来创建自定义主题?] [#ruby-api] === Ruby API 为了在你应用中使用 Asciidoctor,首先需要引入这个 gem: [source] require 'asciidoctor' 然后,你可以通过下面的代码将 AsciiDoc 源文件转化成一个 HTML 文件: [source] Asciidoctor.convert_file 'README.adoc', to_file: true, safe: :safe WARNING: 当你通过 API 使用 Asciidoctor 时,默认的安全模式是 `:secure`。 在 secure 模式下,很多核心特性将不可用,包括 `include` 特性。 如果你想启用这些特性,你需要明确设置安全模式为 `:server` (推荐)或 `:safe`。 你也可以将 AsciiDoc 字符串转化为可内嵌的 HTML (为了插入到一个 HTML 页面),用法如下: [source] ---- content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' Asciidoctor.convert content, safe: :safe ---- 如果你想得到完整的 HTML 文档,只需要启用 `header_footer` 选项即可。如下: [source] ---- content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' html = Asciidoctor.convert content, header_footer: true, safe: :safe ---- 如果你想访问已经处理过的文档,可以将转化过程拆分成离散的几步: [source] ---- content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' document = Asciidoctor.load content, header_footer: true, safe: :safe puts document.doctitle html = document.convert ---- 请注意:如果你不喜欢 Asciidoctor 输出结果,_你完全可以改变它。_ Asciidoctor 支持自定义转化器,它可以操作从待处理文件到生成文档整个环节。 一个简单的、细微地自定义输出的方式是使用模板转化器。 模板转化器运行你提供一个 {uri-tilt}[Tilt] 模板,这样通过模板文件来操作转化出的文档的每个节点。 这样,你就 _可以_ 百分之百地控制你的输出。 关于更多关于 API 或自定义输出信息,请参考 {uri-user-manual}[用户帮助手册]。 [#contributing] == 贡献 自由软件的精神鼓励 _每个人_ 来帮助改善这个项目。 如果你在源码、文档或网站内容中发现错误或漏洞,请不要犹豫,提交一个议题或者推送一个修复请求。 随时欢迎新的贡献者! 这里有几种 *你* 可以做出贡献的方式: * 使用预发布版本(alpha, beta 或 preview) * 报告 Bug * 提议新功能 * 编写文档 * 编写规范 * 编写 -- _任何补丁都不小。_ ** 修正错别字 ** 添加评论 ** 清理多余空白 ** 编写测试! * 重构代码 * 修复 {uri-issues}[issues] * 审查补丁 {uri-contribute}[贡献指南]提供了如何提供贡献,包括如何创建、修饰和提交问题、特性、需求、代码和文档给 Asciidoctor 项目。 [#getting-help] == 获得帮助 开发 Asciidoctor 项目是未来了帮助你更容易地书写和发布你的内容。 但是,如果没有反馈,我们将寸步难行。 我们鼓励你在讨论组、Twitter或聊天室里,提问为题,讨论项目的方方面面, 聊天 (Zulip):: {uri-chat} 讨论组 (Nabble):: {uri-discuss} Twitter:: https://twitter.com/search?f=tweets&q=%23asciidoctor[#asciidoctor] 来加入话题 或 https://twitter.com/asciidoctor[@asciidoctor] at并提醒我们 ifdef::env-github[] Further information and documentation about Asciidoctor can be found on the project's website. {uri-project}/[Home] | {uri-news}[News] | {uri-docs}[Docs] endif::[] Asciidoctor 组织在 GitHub 托管代码、议案跟踪和相关子项目。 代码库 (git):: {uri-repo} 议案跟踪:: {uri-issues} 在 GitHub 的 Asciidoctor 组织:: {uri-org} [#copyright-and-licensing] == 版权和协议 Copyright (C) 2012-present Dan Allen, Sarah White, Ryan Waldron, and the individual contributors to Asciidoctor. 这个软件的免费使用是在MIT许可条款授予的。 请看 {uri-license}[版权声明] 文件来获取更多详细信息。 [#authors] == 作者 *Asciidoctor* 由 https://github.com/mojavelinux[Dan Allen] 和 https://github.com/graphitefriction[Sarah White] 领导,并从 Asciidoctor 社区的 {uri-contributors}[很多其他独立开发者] 上收到了很多贡献。 项目最初由 https://github.com/erebor[Ryan Waldron] 于 2012年基于 https://github.com/nickh[Nick Hengeveld] 的原型创建。 *AsciiDoc* 由 Stuart Rackham 启动,并从 AsciiDoc 社区的其他独立开发者上收到很多贡献。 == Changelog 请看 {uri-changelog}[CHANGELOG]。 asciidoctor-2.0.20/README.adoc000066400000000000000000000500171443135032600156670ustar00rootroot00000000000000= Asciidoctor Dan Allen ; Sarah White v2.0.20, 2023-05-18 // settings: :idprefix: :idseparator: - :source-language: ruby :language: {source-language} ifndef::env-github[:icons: font] ifdef::env-github[] :status: :caution-caption: :fire: :important-caption: :exclamation: :note-caption: :paperclip: :tip-caption: :bulb: :warning-caption: :warning: endif::[] // Variables: :release-version: 2.0.20 // URLs: :url-org: https://github.com/asciidoctor :url-repo: {url-org}/asciidoctor :url-asciidoctorj: {url-org}/asciidoctorj :url-asciidoctorjs: {url-org}/asciidoctor.js :url-gradle-plugin: {url-org}/asciidoctor-gradle-plugin :url-maven-plugin: {url-org}/asciidoctor-maven-plugin :url-asciidoclet: {url-org}/asciidoclet :url-project: https://asciidoctor.org ifdef::env-site[:url-project: link:] :url-docs: https://docs.asciidoctor.org :url-news: {url-project}/news :url-manpage: {url-project}/man/asciidoctor :url-issues: {url-repo}/issues :url-contributors: {url-repo}/graphs/contributors :url-rel-file-base: link: :url-rel-tree-base: link: ifdef::env-site,env-yard[] :url-rel-file-base: {url-repo}/blob/HEAD/ :url-rel-tree-base: {url-repo}/tree/HEAD/ endif::[] :url-changelog: {url-rel-file-base}CHANGELOG.adoc :url-contribute: {url-rel-file-base}CONTRIBUTING.adoc :url-license: {url-rel-file-base}LICENSE :url-tests: {url-rel-tree-base}test :url-discuss: https://discuss.asciidoctor.org :url-chat: https://asciidoctor.zulipchat.com :url-rubygem: https://rubygems.org/gems/asciidoctor :url-what-is-asciidoc: {url-docs}/asciidoctor/latest/#relationship-to-asciidoc :url-install-docker: https://github.com/asciidoctor/docker-asciidoctor :url-opal: https://opalrb.com :url-tilt: https://github.com/rtomayko/tilt :url-ruby: https://www.ruby-lang.org // images: :image-url-screenshot: https://cdn.jsdelivr.net/gh/asciidoctor/asciidoctor/screenshot.png {url-project}[Asciidoctor] is a fast, open source, Ruby-based text processor for parsing AsciiDoc(R) into a document model and converting it to output formats such as HTML 5, DocBook 5, manual pages, PDF, EPUB 3, and other formats. Asciidoctor also has an ecosystem of extensions, converters, build plugins, and tools to help you author and publish content written in {url-what-is-asciidoc}[AsciiDoc]. You can find the documentation for these projects at {url-docs}. In addition to running on Ruby, Asciidoctor can be executed on a JVM using {url-asciidoctorj}[AsciidoctorJ] or in any JavaScript environment using {url-asciidoctorjs}[Asciidoctor.js]. ifndef::env-site,env-yard[] This document is also available in the following languages: + {url-rel-file-base}README-zh_CN.adoc[汉语] | {url-rel-file-base}README-de.adoc[Deutsch] | {url-rel-file-base}README-fr.adoc[Français] | {url-rel-file-base}README-jp.adoc[日本語] endif::[] .Key documentation [.compact] * {url-docs}/asciidoctor/latest/[Asciidoctor Documentation] * {url-docs}/asciidoc/latest/[AsciiDoc Language Documentation] * {url-docs}/asciidoc/latest/syntax-quick-reference/[AsciiDoc Syntax Quick Reference] ifdef::status[] image:https://img.shields.io/gem/v/asciidoctor.svg[Latest Release, link={url-rubygem}] image:https://img.shields.io/badge/rubydoc.info-{release-version}-blue.svg[library (API) docs,link=https://www.rubydoc.info/gems/asciidoctor/{release-version}] image:https://github.com/asciidoctor/asciidoctor/workflows/CI/badge.svg[Build Status (GitHub Actions),link={url-repo}/actions] image:https://img.shields.io/badge/zulip-join_chat-brightgreen.svg[Project Chat (Zulip),link={url-chat}] endif::[] == Sponsors We want to recognize our {url-project}/supporters[sponsors] for their commitment to improving the state of technical documentation by supporting this project. Thank you sponsors! Without your generous support, Asciidoctor would not be possible. You can support this project by becoming a sponsor through https://opencollective.com/asciidoctor[OpenCollective]. == AsciiDoc Processing and Built-in Converters AsciiDoc is the language. + Asciidoctor is the processor. Asciidoctor reads the AsciiDoc source, as shown in the panel on the left in the image below, and converts it to publishable formats, such as HTML 5, as shown rendered in the panel on the right. image::{image-url-screenshot}[Preview of AsciiDoc source and corresponding rendered HTML] Asciidoctor provides built-in {url-docs}/asciidoctor/latest/converters/[converters] for three output formats by default: {url-docs}/asciidoctor/latest/html-backend/[HTML 5], {url-docs}/asciidoctor/latest/docbook-backend/[DocBook 5], and {url-docs}/asciidoctor/latest/manpage-backend/[man page] (short for manual page). Additional converters, such as PDF and EPUB 3, are provided by separate gems. Asciidoctor also provides an out-of-the-box HTML experience complete with a {url-docs}/asciidoctor/latest/html-backend/default-stylesheet/[default stylesheet] and built-in integrations like Font Awesome (for icons), highlight.js, Rouge, and Pygments (for source highlighting), and MathJax (for STEM processing). == Asciidoctor Ecosystem Although Asciidoctor is written in Ruby, it does not mean you need Ruby to use it. Asciidoctor can be executed on a JVM using {url-docs}/asciidoctorj/latest/[AsciidoctorJ] or in any JavaScript environment (including the browser) using {url-docs}/asciidoctor.js/latest/[Asciidoctor.js]. Installing an Asciidoctor processor is just the beginning of your publishing experience. Asciidoctor gives you access to a ecosystem of extensions and tools, ranging from add-on converters, to extended syntax, to build plugins, to integrated writing and preview environments: * {url-docs}/diagram-extension/latest/[Asciidoctor Diagram] * {url-docs}/maven-tools/latest/[Maven plugin and site module] * {url-gradle-plugin}[Gradle plugin] * {url-docs}/asciidoclet/latest/[Asciidoclet] * {url-docs}/reveal.js-converter/latest/[reveal.js converter] * {url-docs}/epub3-converter/latest/[EPUB 3 converter] * https://intellij-asciidoc-plugin.ahus1.de/docs[IntelliJ plugin] * {url-docs}/asciidoctor/latest/tooling/#web-browser-add-ons-preview-only[web browser extensions] * {url-org}[and more] Asciidoctor is the successor to AsciiDoc.py. If you're using AsciiDoc.py, see {url-docs}/asciidoctor/latest/migrate/asciidoc-py/[Migrate from AsciiDoc.py] to learn how to upgrade to Asciidoctor. == Requirements Asciidoctor works on Linux, macOS and Windows and requires one of the following implementations of {url-ruby}[Ruby]: * CRuby (aka MRI) 2.3 - 3.2 * JRuby 9.1 - 9.4 * TruffleRuby (GraalVM) [CAUTION] ==== If you're using a non-English Windows environment, you may bump into an `Encoding::UndefinedConversionError` when invoking Asciidoctor. To solve this issue, we recommend overriding the default external and internal character encodings to `utf-8`. You can do so by setting the `RUBYOPT` environment variable as follows: RUBYOPT="-E utf-8:utf-8" Once you make this change, all your Unicode headaches should be behind you. If you're using an IDE like Eclipse, make sure you set the encoding to UTF-8 there as well. Asciidoctor is optimized to work with UTF-8 as the default encoding. ==== == Installation Asciidoctor is packaged and distributed to RubyGems.org as a RubyGem (aka gem) named {url-rubygem}[asciidoctor^]. The asciidoctor gem can be installed on all major operating systems using Ruby packaging tools (gem or bundle). Asciidoctor is also distributed as a Docker image, as a package for numerous Linux distributions, and as a package for macOS (via Homebrew and MacPorts). === Linux package managers The version of Asciidoctor installed by the package manager may not match the latest release of Asciidoctor. Consult the package repository for your distribution to find out which version is packaged per distribution release. * https://pkgs.alpinelinux.org/packages?name=asciidoctor[Alpine Linux (asciidoctor)] * https://www.archlinux.org/packages/?name=asciidoctor[Arch Linux (asciidoctor)] * https://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (asciidoctor)] * https://software.opensuse.org/package/rubygem-asciidoctor[OpenSUSE (rubygem-asciidoctor)] * https://packages.ubuntu.com/search?keywords=asciidoctor[Ubuntu (asciidoctor)] If you want to use a version of Asciidoctor that's newer than what is installed by the package manager, see the <>. ==== apk (Alpine Linux) To install the gem on Alpine Linux, open a terminal and type: $ sudo apk add asciidoctor ==== pacman (Arch Linux) To install the gem on Arch-based distributions, open a terminal and type: $ sudo pacman -S asciidoctor ==== APT On Debian and Debian-based distributions such as Ubuntu, use APT to install Asciidoctor. To install the package, open a terminal and type: $ sudo apt-get install -y asciidoctor ==== DNF On RPM-based Linux distributions, such as Fedora, CentOS, and RHEL, use the DNF package manager to install Asciidoctor. To install the package, open a terminal and type: $ sudo dnf install -y asciidoctor === macOS ==== Homebrew You can use https://brew.sh[Homebrew], the macOS package manager, to install Asciidoctor. If you don't have Homebrew on your computer, complete the https://brew.sh[installation instructions] first. Once Homebrew is installed, you're ready to install the `asciidoctor` gem. Open a terminal and type: $ brew install asciidoctor Homebrew installs the `asciidoctor` gem into an exclusive prefix that's independent of system gems. ==== MacPorts You can also use https://www.macports.org[MacPorts], another package manager for macOS, to install Asciidoctor. If you don't have MacPorts on your computer, complete the https://www.macports.org/install.php[installation instructions] first. Once MacPorts is installed, you're ready to install the `asciidoctor` gem via the https://ports.macports.org/port/asciidoctor/[Asciidoctor port]. Open a terminal and type: $ sudo port install asciidoctor === Windows To use Asciidoctor with Windows, you have two options. ==== Chocolatey When you already use https://chocolatey.org[chocolatey] on your machine, you can use: [source] ---- choco install ruby ---- Then follow <>. ==== Rubyinstaller Or you use the https://rubyinstaller.org/downloads/[Rubyinstaller], download the package for your Windows Version and after the installation go ahead with <>. [#gem-install] === gem install Before installing Asciidoctor using `gem install`, you should set up https://rvm.io[RVM] (or similar) to install Ruby in your home directory (i.e., user space). Then, you can safely use the `gem` command to install or update the Asciidoctor gem, or any other gem for that matter. When using RVM, gems are installed in a location isolated from the system. (You should never use the gem command to install system-wide gems). Once you've installed Ruby using RVM, and you have activated it using `rvm use 3.0`, open a terminal and type: $ gem install asciidoctor If you want to install a pre-release version (e.g., a release candidate), use: $ gem install asciidoctor --pre === Docker See {url-install-docker}[Installing Asciidoctor using Docker]. === Bundler . Create a Gemfile in the root folder of your project (or the current directory) . Add the `asciidoctor` gem to your Gemfile as follows: + [source,subs=attributes+] ---- source 'https://rubygems.org' gem 'asciidoctor' # or specify the version explicitly # gem 'asciidoctor', '{release-version}' ---- . Save the Gemfile . Open a terminal and install the gem using: $ bundle To upgrade the gem, specify the new version in the Gemfile and run `bundle` again. Using `bundle update` (without specifying a gem) is *not* recommended as it will also update other gems, which may not be the desired result. == Upgrade If you installed Asciidoctor using a package manager, your operating system is probably configured to automatically update packages, in which case you don't need to update the gem manually. === apk (Alpine Linux) To upgrade the gem, use: $ sudo apk add -u asciidoctor === APT To upgrade the gem, use: $ sudo apt-get upgrade -y asciidoctor === DNF To upgrade the gem, use: $ sudo dnf update -y asciidoctor === Homebrew (macOS) To upgrade the gem, use: $ brew update $ brew upgrade asciidoctor === MacPorts (macOS) To upgrade the gem, use: $ sudo port selfupdate $ sudo port upgrade asciidoctor === gem install If you previously installed Asciidoctor using the `gem` command, you'll need to manually upgrade Asciidoctor when a new version is released. You can upgrade the gem by typing: $ gem install asciidoctor When you install a new version of the gem using `gem install`, you end up with multiple versions installed. Use the following command to remove the old versions: $ gem cleanup asciidoctor == Usage If the Asciidoctor gem installed successfully, the `asciidoctor` command line interface (CLI) will be available on your PATH. To verify it's available, run the following in your terminal: $ asciidoctor --version You should see information about the Asciidoctor version and your Ruby environment printed in the terminal. [.output,subs=attributes+] .... Asciidoctor {release-version} [https://asciidoctor.org] Runtime Environment (ruby 3.0.1p64 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:UTF-8 ex:UTF-8) .... === Command line interface (CLI) The `asciidoctor` command allows you to invoke Asciidoctor from the command line (i.e., a terminal). The following command converts the file README.adoc to HTML and saves the result to the file README.html in the same directory. The name of the generated HTML file is derived from the source file by changing its file extension to `.html`. $ asciidoctor README.adoc You can control the Asciidoctor processor by adding various flags and switches, which you can learn about using: $ asciidoctor --help For instance, to write the file to a different directory, use: $ asciidoctor -D output README.adoc The `asciidoctor` {url-manpage}[man page] provides a complete reference of the command line interface. Refer to the following resources to learn more about how to use the `asciidoctor` command. * {url-docs}/asciidoctor/latest/cli/[Process AsciiDoc using the CLI] * {url-docs}/asciidoctor/latest/cli/options/[CLI options] === Ruby API Asciidoctor also provides an API. The API is intended for integration with other Ruby software, such as Rails, GitHub, and GitLab, as well as other languages, such as Java (via AsciidoctorJ) and JavaScript (via Asciidoctor.js). To use Asciidoctor in your application, you first need to require the gem: [source] require 'asciidoctor' You can then convert an AsciiDoc source file to an HTML file using: [source] Asciidoctor.convert_file 'README.adoc', to_file: true, safe: :safe WARNING: When using Asciidoctor via the API, the default safe mode is `:secure`. In secure mode, several core features are disabled, including the `include` directive. If you want to enable these features, you'll need to explicitly set the safe mode to `:server` (recommended) or `:safe`. You can also convert an AsciiDoc string to embeddable HTML (for inserting in an HTML page) using: [source] ---- content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' Asciidoctor.convert content, safe: :safe ---- If you want the full HTML document, enable the `header_footer` option as follows: [source] ---- content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' html = Asciidoctor.convert content, header_footer: true, safe: :safe ---- If you need access to the parsed document, you can split the conversion into discrete steps: [source] ---- content = '_Zen_ in the art of writing https://asciidoctor.org[AsciiDoc].' document = Asciidoctor.load content, header_footer: true, safe: :safe puts document.doctitle html = document.convert ---- Keep in mind that if you don't like the output Asciidoctor produces, _you can change it!_ Asciidoctor supports custom converters that can handle converting from the parsed document to the generated output. One easy way to customize the output piecemeal is by using the template converter. The template converter allows you to supply a {url-tilt}[Tilt]-supported template file to handle converting any node in the document. However you go about it, you _can_ have 100% control over the output. For more information about how to use the API or to customize the output, see: * {url-docs}/asciidoctor/latest/api/[Process AsciiDoc using the API] * {url-docs}/asciidoctor/latest/api/options/[API options] * {url-docs}/asciidoctor/latest/safe-modes/[Safe modes] == Contributing New contributors are always welcome! If you discover errors or omissions in the source code, documentation, or website content, please don't hesitate to submit an issue or open a pull request with a fix. Here are some ways *you* can contribute: * by using prerelease (alpha, beta or preview) versions * by reporting bugs * by suggesting new features * by writing or editing documentation * by writing code with tests -- _No patch is too small._ ** fix typos ** add comments ** clean up inconsistent whitespace ** write tests! * by refactoring code * by fixing {url-issues}[issues] * by reviewing patches The {url-contribute}[Contributing] guide provides information on how to create, style, and submit issues, feature requests, code, and documentation to Asciidoctor. == Getting Help Asciidoctor is developed to help you easily write and publish your content. But we can't do it without your feedback! We encourage you to ask questions and discuss any aspects of the project on the discussion list, on Twitter or in the chat room. Chat (Zulip):: {url-chat} Discussion list (Nabble):: {url-discuss} Twitter:: Follow https://twitter.com/asciidoctor[@asciidoctor] or search for the https://twitter.com/search?f=tweets&q=%23asciidoctor[#asciidoctor] hashtag ifdef::env-github[] Further information and documentation about Asciidoctor can be found on the project's website. {url-project}[Home] | {url-news}[News] | {url-docs}[Docs] endif::[] The Asciidoctor organization on GitHub hosts the project's source code, issue tracker, and sub-projects. Source repository (git):: {url-repo} Issue tracker:: {url-issues} Asciidoctor organization on GitHub:: {url-org} == Code of Conduct The core Asciidoctor project is governed by the https://github.com/asciidoctor/.github/blob/HEAD/CODE-OF-CONDUCT.md[Code of Conduct] for the Asciidoctor community of projects. By participating, you're agreeing to honor this code. Let's work together to make this a welcoming, professional, inclusive, and safe environment for everyone. == Versioning and Release Policy This project adheres to semantic versioning (*major.minor.patch*). Typically, patch releases are only made for the current minor release. However, exceptions are made on a case-by-case basis to address security vulnerabilities and other high-priority issues. == Copyright and License Copyright (C) 2012-present Dan Allen, Sarah White, Ryan Waldron, and the individual contributors to Asciidoctor. Use of this software is granted under the terms of the MIT License. See the {url-license}[LICENSE] for the full license text. == Authors *Asciidoctor* is led by https://github.com/mojavelinux[Dan Allen] and https://github.com/graphitefriction[Sarah White] and has received contributions from {url-contributors}[many individuals] in Asciidoctor's awesome community. The project was initiated in 2012 by https://github.com/erebor[Ryan Waldron] based on a prototype written by https://github.com/nickh[Nick Hengeveld] for the Git website. *AsciiDoc.py* was started and maintained by Stuart Rackham from https://github.com/asciidoc-py/asciidoc-py2/blob/HEAD/CHANGELOG.txt[2002 to 2013] and has received contributions from many individuals in the https://github.com/asciidoc-py/asciidoc-py2/graphs/contributors[AsciiDoc.py community]. == Trademarks AsciiDoc(R) and AsciiDoc Language(TM) are trademarks of the Eclipse Foundation, Inc. ifndef::env-site[] == Changelog ifeval::[{safe-mode-level} < 20] include::CHANGELOG.adoc[tag=compact,leveloffset=+1] endif::[] Refer to the {url-changelog}[CHANGELOG] for a complete list of changes in older releases. endif::[] asciidoctor-2.0.20/Rakefile000066400000000000000000000001531443135032600155430ustar00rootroot00000000000000# frozen_string_literal: true Dir.glob('tasks/*.rake').each {|file| load file } task default: %w(test:all) asciidoctor-2.0.20/asciidoctor.gemspec000066400000000000000000000043741443135032600177570ustar00rootroot00000000000000begin require_relative 'lib/asciidoctor/version' rescue LoadError require 'asciidoctor/version' end Gem::Specification.new do |s| s.name = 'asciidoctor' s.version = Asciidoctor::VERSION s.summary = 'An implementation of the AsciiDoc text processor and publishing toolchain' s.description = 'A fast, open source text processor and publishing toolchain for converting AsciiDoc content to HTML 5, DocBook 5, and other formats.' s.authors = ['Dan Allen', 'Sarah White', 'Ryan Waldron', 'Jason Porter', 'Nick Hengeveld', 'Jeremy McAnally'] s.email = ['dan.j.allen@gmail.com'] s.homepage = 'https://asciidoctor.org' s.license = 'MIT' # NOTE required ruby version is informational only; it's not enforced since it can't be overridden and can cause builds to break #s.required_ruby_version = '>= 2.3.0' s.metadata = { 'bug_tracker_uri' => 'https://github.com/asciidoctor/asciidoctor/issues', 'changelog_uri' => 'https://github.com/asciidoctor/asciidoctor/blob/HEAD/CHANGELOG.adoc', 'mailing_list_uri' => 'https://chat.asciidoctor.org', 'source_code_uri' => 'https://github.com/asciidoctor/asciidoctor' } # NOTE the logic to build the list of files is designed to produce a usable package even when the git command is not available begin files = (result = `git ls-files -z`.split ?\0).empty? ? Dir['**/*'] : result rescue files = Dir['**/*'] end s.files = files.grep %r/^(?:(?:data|lib|man)\/.+|LICENSE|(?:CHANGELOG|README(?:-\w+)?)\.adoc|\.yardopts|#{s.name}\.gemspec)$/ s.executables = (files.grep %r/^bin\//).map {|f| File.basename f } s.require_paths = ['lib'] #s.test_files = files.grep %r/^(?:features|test)\/.+$/ # concurrent-ruby, haml, slim, and tilt are needed for testing custom templates s.add_development_dependency 'concurrent-ruby', '~> 1.1.0' s.add_development_dependency 'cucumber', '~> 3.1.0' # erubi is needed for testing alternate eRuby impls s.add_development_dependency 'erubi', '~> 1.10.0' s.add_development_dependency 'haml', '~> 6.1.0' s.add_development_dependency 'minitest', '~> 5.14.0' s.add_development_dependency 'nokogiri', '~> 1.13.0' s.add_development_dependency 'rake', '~> 12.3.0' s.add_development_dependency 'slim', '~> 4.1.0' s.add_development_dependency 'tilt', '~> 2.0.0' end asciidoctor-2.0.20/benchmark/000077500000000000000000000000001443135032600160315ustar00rootroot00000000000000asciidoctor-2.0.20/benchmark/.gitignore000066400000000000000000000000671443135032600200240ustar00rootroot00000000000000/sample-data/userguide.adoc /sample-data/customers.csv asciidoctor-2.0.20/benchmark/.ruby-gemset000066400000000000000000000000221443135032600202670ustar00rootroot00000000000000asciidoctor-bench asciidoctor-2.0.20/benchmark/.ruby-version000066400000000000000000000000041443135032600204700ustar00rootroot000000000000003.1 asciidoctor-2.0.20/benchmark/benchmark.rb000077500000000000000000000125241443135032600203170ustar00rootroot00000000000000#!/usr/bin/env ruby =begin Use this script to monitor changes in performance when making code changes to Asciidoctor. $ ruby benchmark.rb The most common benchmark is the userguide-loop. It will download the AsciiDoc User Guide automatically the first time, then convert it in memory. Running it 10 times provides a good picture. $ ruby benchmark.rb userguide-loop 10 Only worry about the relative change to the numbers before and after the code change. Absolute times are highly dependent on the capabilities of the machine the the version of Ruby. To get the best results under MRI, tune Ruby using environment variables as follows: .Ruby < 2.1 $ RUBY_GC_MALLOC_LIMIT=90000000 RUBY_FREE_MIN=650000 ruby benchmark.rb userguide-loop 10 .Ruby >= 2.1 $ RUBY_GC_MALLOC_LIMIT=128000000 RUBY_GC_OLDMALLOC_LIMIT=128000000 RUBY_GC_HEAP_INIT_SLOTS=10000000 RUBY_GC_HEAP_FREE_SLOTS=10000000 RUBY_GC_HEAP_GROWTH_MAX_SLOTS=250000 RUBY_GC_HEAP_GROWTH_FACTOR=2 ruby benchmark.rb userguide-loop 10 $ RUBY_GC_MALLOC_LIMIT=128000000 RUBY_GC_OLDMALLOC_LIMIT=128000000 RUBY_GC_HEAP_INIT_SLOTS=20000000 RUBY_GC_HEAP_FREE_SLOTS=1000000 RUBY_GC_HEAP_GROWTH_MAX_SLOTS=250000 RUBY_GC_HEAP_GROWTH_FACTOR=2 ruby benchmark.rb userguide-loop 10 Asciidoctor starts with ~ 12,500 objects, adds ~ 300,000 each run, so tune RUBY_GC_HEAP_* accordingly See http://globaldev.co.uk/2014/05/ruby-2-1-in-detail/#gc-tuning-environment-variables Execute Ruby using the `--disable=gems` flag to speed up the initial load time, as shown below: $ ruby --disable=gems ... =end require 'benchmark' include Benchmark bench = ARGV[0] $repeat = ARGV[1].to_i || 10000 if bench.nil? raise 'You must specify a benchmark to run.' end def fetch_userguide require 'open-uri' userguide_uri = 'https://raw.githubusercontent.com/asciidoc/asciidoc/d43faae38c4a8bf366dcba545971da99f2b2d625/doc/asciidoc.txt' customers_uri = 'https://raw.githubusercontent.com/asciidoc/asciidoc/d43faae38c4a8bf366dcba545971da99f2b2d625/doc/customers.csv' userguide_content = OpenURI.open_uri(userguide_uri) {|fd2| fd2.read } customers_content = OpenURI.open_uri(customers_uri) {|fd2| fd2.read } File.write 'sample-data/userguide.adoc', userguide_content, mode: 'w:utf-8' File.write 'sample-data/customers.csv', customers_content, mode: 'w:utf-8' end case bench =begin # benchmark template when 'name' sample = 'value' Benchmark.bmbm(12) {|bm| bm.report('operation a') { $repeat.times { call_a_on sample } } bm.report('operation b') { $repeat.times { call_b_on sample } } } =end when 'userguide' require '../lib/asciidoctor.rb' Asciidoctor::Compliance.markdown_syntax = false Asciidoctor::Compliance.shorthand_property_syntax = false if Asciidoctor::VERSION > '0.1.4' sample_file = ENV['BENCH_TEST_FILE'] || 'sample-data/userguide.adoc' backend = ENV['BENCH_BACKEND'] || 'html5' fetch_userguide if sample_file == 'sample-data/userguide.adoc' && !(File.exist? sample_file) result = Benchmark.bmbm {|bm| bm.report(%(Convert #{sample_file} (x#{$repeat}))) { $repeat.times { Asciidoctor.render_file sample_file, :backend => backend, :safe => Asciidoctor::SafeMode::SAFE, :eruby => 'erubis', :header_footer => true, :to_file => false, :attributes => {'stylesheet' => nil, 'toc' => nil, 'numbered' => nil, 'icons' => nil, 'compat-mode' => ''} } } } # prints average for real run puts %(>avg: #{result.first.real / $repeat}) when 'userguide-loop' require '../lib/asciidoctor.rb' GC.start Asciidoctor::Compliance.markdown_syntax = false Asciidoctor::Compliance.shorthand_property_syntax = false if Asciidoctor::VERSION > '0.1.4' sample_file = ENV['BENCH_TEST_FILE'] || 'sample-data/userguide.adoc' backend = ENV['BENCH_BACKEND'] || 'html5' fetch_userguide if sample_file == 'sample-data/userguide.adoc' && !(File.exist? sample_file) timings = [] 2.times.each do loop_timings = [] (1..$repeat).each do start = Time.now Asciidoctor.render_file sample_file, :backend => backend, :safe => Asciidoctor::SafeMode::SAFE, :eruby => 'erubis', :header_footer => true, :to_file => false, :attributes => { 'stylesheet' => nil, 'toc' => nil, 'numbered' => nil, 'icons' => nil, 'compat-mode' => '' } loop_timings << (Time.now - start) end timings << loop_timings end best_time = nil timings.each do |loop_timings| puts %(#{loop_timings * "\n"}\nRun Total: #{loop_timings.reduce :+}) best_time = best_time ? [best_time, loop_timings.min].min : loop_timings.min end puts %(Best Time: #{best_time}) when 'mdbasics-loop' require '../lib/asciidoctor.rb' GC.start sample_file = ENV['BENCH_TEST_FILE'] || 'sample-data/mdbasics.adoc' backend = ENV['BENCH_BACKEND'] || 'html5' timings = [] 2.times do loop_timings = [] (1..$repeat).each do start = Time.now Asciidoctor.render_file sample_file, :backend => backend, :safe => Asciidoctor::SafeMode::SAFE, :header_footer => false, :to_file => false, :attributes => { 'stylesheet' => nil, 'idprefix' => '', 'idseparator' => '-', 'showtitle' => '' } loop_timings << (Time.now - start) end timings << loop_timings end best_time = nil timings.each do |loop_timings| puts %(#{loop_timings * "\n"}\nRun Total: #{loop_timings.reduce :+}) best_time = best_time ? [best_time, loop_timings.min].min : loop_timings.min end puts %(Best Time: #{best_time}) end asciidoctor-2.0.20/benchmark/sample-data/000077500000000000000000000000001443135032600202215ustar00rootroot00000000000000asciidoctor-2.0.20/benchmark/sample-data/mdbasics.adoc000066400000000000000000000172351443135032600226460ustar00rootroot00000000000000// converted to AsciiDoc from https://github.com/gettalong/kramdown/blob/HEAD/benchmark/mdbasics.text # Markdown: Basics John Gruber :s: link:/projects/markdown/syntax :d: link:/projects/markdown/dingus :src: link:/projects/markdown/basics.text ++++ ++++ ## Getting the Gist of Markdown's Formatting Syntax This page offers a brief overview of what it's like to use Markdown. The {s}[syntax page] provides complete, detailed documentation for every feature, but Markdown should be very easy to pick up simply by looking at a few examples of it in action. The examples on this page are written in a before/after style, showing example syntax and the HTML output produced by Markdown. It's also helpful to simply try Markdown out; the {d}[Dingus] is a web application that allows you type your own Markdown-formatted text and translate it to XHTML. NOTE: This document is itself written using Markdown; you can {src}[see the source for it by adding \'.text' to the URL]. ### Paragraphs, Headers, Blockquotes A paragraph is simply one or more consecutive lines of text, separated by one or more blank lines. (A blank line is any line that looks like a blank line -- a line containing nothing spaces or tabs is considered blank.) Normal paragraphs should not be intended with spaces or tabs. Markdown offers two styles of headers: _Setext_ and _atx_. Setext-style headers for +

+ and +

+ are created by "underlining" with equal signs (+=+) and hyphens (+-+), respectively. To create an atx-style header, you put 1-6 hash marks (+#+) at the beginning of the line -- the number of hashes equals the resulting HTML header level. Blockquotes are indicated using email-style \'+>+' angle brackets. .Markdown: [listing] .... A First Level Header ==================== A Second Level Header --------------------- Now is the time for all good men to come to the aid of their country. This is just a regular paragraph. The quick brown fox jumped over the lazy dog's back. ### Header 3 > This is a blockquote. > > This is the second paragraph in the blockquote. > > ## This is an H2 in a blockquote .... .Output: ....

A First Level Header

A Second Level Header

Now is the time for all good men to come to the aid of their country. This is just a regular paragraph.

The quick brown fox jumped over the lazy dog's back.

Header 3

This is a blockquote.

This is the second paragraph in the blockquote.

This is an H2 in a blockquote

.... ### Phrase Emphasis Markdown uses asterisks and underscores to indicate spans of emphasis. .Markdown: ---- Some of these words *are emphasized*. Some of these words _are emphasized also_. Use two asterisks for **strong emphasis**. Or, if you prefer, __use two underscores instead__. ---- .Output: ....

Some of these words are emphasized. Some of these words are emphasized also.

Use two asterisks for strong emphasis. Or, if you prefer, use two underscores instead.

.... ### Lists Unordered (bulleted) lists use asterisks, pluses, and hyphens (+*+, +++, and +-+) as list markers. These three markers are interchangable; this: ---- * Candy. * Gum. * Booze. ---- this: ---- + Candy. + Gum. + Booze. ---- and this: ---- - Candy. - Gum. - Booze. ---- all produce the same output: ....
  • Candy.
  • Gum.
  • Booze.
.... Ordered (numbered) lists use regular numbers, followed by periods, as list markers: ---- 1. Red 2. Green 3. Blue ---- .Output: ....
  1. Red
  2. Green
  3. Blue
.... If you put blank lines between items, you'll get +

+ tags for the list item text. You can create multi-paragraph list items by indenting the paragraphs by 4 spaces or 1 tab: ---- * A list item. With multiple paragraphs. * Another item in the list. ---- .Output: ....

  • A list item.

    With multiple paragraphs.

  • Another item in the list.

.... ### Links Markdown supports two styles for creating links: _inline_ and _reference_. With both styles, you use square brackets to delimit the text you want to turn into a link. Inline-style links use parentheses immediately after the link text. For example: ---- This is an [example link](http://example.com/). ---- .Output: ....

This is an example link.

.... Optionally, you may include a title attribute in the parentheses: ---- This is an [example link](http://example.com/ "With a Title"). ---- .Output: ....

This is an example link.

.... Reference-style links allow you to refer to your links by names, which you define elsewhere in your document: ---- I get 10 times more traffic from [Google][1] than from [Yahoo][2] or [MSN][3]. [1]: http://google.com/ "Google" [2]: http://search.yahoo.com/ "Yahoo Search" [3]: http://search.msn.com/ "MSN Search" ---- .Output: ....

I get 10 times more traffic from Google than from Yahoo or MSN.

.... The title attribute is optional. Link names may contain letters, numbers and spaces, but are _not_ case sensitive: ---- I start my morning with a cup of coffee and [The New York Times][NY Times]. [ny times]: http://www.nytimes.com/ ---- .Output: ....

I start my morning with a cup of coffee and The New York Times.

.... ### Images Image syntax is very much like link syntax. .Inline (titles are optional): ---- ![alt text](/path/to/img.jpg "Title") ---- .Reference-style: ---- ![alt text][id] [id]: /path/to/img.jpg "Title" ---- Both of the above examples produce the same output: .... alt text .... ### Code In a regular paragraph, you can create code span by wrapping text in backtick quotes. Any ampersands (+&+) and angle brackets (+<+ or +>+) will automatically be translated into HTML entities. This makes it easy to use Markdown to write about HTML example code: ---- I strongly recommend against using any `` tags. I wish SmartyPants used named entities like `—` instead of decimal-encoded entites like `—`. ---- .Output: ....

I strongly recommend against using any <blink> tags.

I wish SmartyPants used named entities like &mdash; instead of decimal-encoded entites like &#8212;.

.... To specify an entire block of pre-formatted code, indent every line of the block by 4 spaces or 1 tab. Just like with code spans, +&+, +<+, and +>+ characters will be escaped automatically. .Markdown: ---- If you want your page to validate under XHTML 1.0 Strict, you've got to put paragraph tags in your blockquotes:

For example.

---- .Output: ....

If you want your page to validate under XHTML 1.0 Strict, you've got to put paragraph tags in your blockquotes:

<blockquote>
    <p>For example.</p>
</blockquote>
.... asciidoctor-2.0.20/bin/000077500000000000000000000000001443135032600146475ustar00rootroot00000000000000asciidoctor-2.0.20/bin/asciidoctor000077500000000000000000000005701443135032600171020ustar00rootroot00000000000000#!/usr/bin/env ruby # frozen_string_literal: true asciidoctor = File.absolute_path '../lib/asciidoctor.rb', __dir__ if File.exist? asciidoctor require asciidoctor require File.join Asciidoctor::LIB_DIR, 'asciidoctor/cli' else require 'asciidoctor' require 'asciidoctor/cli' end invoker = Asciidoctor::Cli::Invoker.new ARGV GC.start invoker.invoke! exit invoker.code asciidoctor-2.0.20/data/000077500000000000000000000000001443135032600150105ustar00rootroot00000000000000asciidoctor-2.0.20/data/locale/000077500000000000000000000000001443135032600162475ustar00rootroot00000000000000asciidoctor-2.0.20/data/locale/attributes-ar.adoc000066400000000000000000000014161443135032600216670ustar00rootroot00000000000000// Arabic translation, courtesy of Aboullaite Mohammed :appendix-caption: ملحق :appendix-refsig: {appendix-caption} :caution-caption: تنبيه //:chapter-signifier: ??? //:chapter-refsig: {chapter-signifier} :example-caption: مثال :figure-caption: الشكل :important-caption: مهم :last-update-label: اخر تحديث ifdef::listing-caption[:listing-caption: قائمة] ifdef::manname-title[:manname-title: اسم] :note-caption: ملاحظة //:part-signifier: ??? //:part-refsig: {part-signifier} ifdef::preface-title[:preface-title: تمهيد] //:section-refsig: ??? :table-caption: جدول :tip-caption: تلميح :toc-title: فهرس :untitled-label: بدون عنوان :version-label: نسخة :warning-caption: تحذير asciidoctor-2.0.20/data/locale/attributes-be.adoc000066400000000000000000000015571443135032600216610ustar00rootroot00000000000000// Belarusian translation, courtesy of Dexter Morganov :appendix-caption: Дадатак :appendix-refsig: {appendix-caption} :caution-caption: Увага :chapter-signifier: Глава :chapter-refsig: {chapter-signifier} :example-caption: Прыклад :figure-caption: Малюнак :important-caption: Важна :last-update-label: Апошняе абнаўленне ifdef::listing-caption[:listing-caption: Лістынг] ifdef::manname-title[:manname-title: Назва] :note-caption: Заўвага :part-signifier: Частка :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Прадмова] :section-refsig: Раздзел :table-caption: Табліца :tip-caption: Падказка :toc-title: Змест :untitled-label: Без назвы :version-label: Версія :warning-caption: Папярэджанне asciidoctor-2.0.20/data/locale/attributes-bg.adoc000066400000000000000000000015541443135032600216600ustar00rootroot00000000000000// Bulgarian translation, courtesy of Ivan St. Ivanov :appendix-caption: Приложение :appendix-refsig: {appendix-caption} :caution-caption: Внимание //:chapter-signifier: ??? //:chapter-refsig: {chapter-signifier} :example-caption: Пример :figure-caption: Фигура :important-caption: Важно :last-update-label: Последно обновен ifdef::listing-caption[:listing-caption: Листинг] ifdef::manname-title[:manname-title: Име] :note-caption: Забележка //:part-signifier: ??? //:part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Предговор] //:section-refsig: ??? :table-caption: Таблица :tip-caption: Подсказка :toc-title: Съдържание :untitled-label: Без заглавие :version-label: Версия :warning-caption: Внимание asciidoctor-2.0.20/data/locale/attributes-ca.adoc000066400000000000000000000013711443135032600216500ustar00rootroot00000000000000// Catalan translation, courtesy of Abel Salgado Romero and Alex Soto :appendix-caption: Apèndix :appendix-refsig: {appendix-caption} :caution-caption: Atenció :chapter-signifier: Capítol :chapter-refsig: {chapter-signifier} :example-caption: Exemple :figure-caption: Figura :important-caption: Important :last-update-label: Última actualització ifdef::listing-caption[:listing-caption: Llista] ifdef::manname-title[:manname-title: Nom] :note-caption: Nota :part-signifier: Part :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Prefaci] :section-refsig: Secció :table-caption: Taula :tip-caption: Suggeriment :toc-title: Índex :untitled-label: Sense títol :version-label: Versió :warning-caption: Advertència asciidoctor-2.0.20/data/locale/attributes-cs.adoc000066400000000000000000000013501443135032600216670ustar00rootroot00000000000000// czech translation, for reference only; matches the built-in behavior of core :appendix-caption: Příloha :appendix-refsig: {appendix-caption} :caution-caption: Upozornění :chapter-signifier: Kapitola :chapter-refsig: {chapter-signifier} :example-caption: Příklad :figure-caption: Obrázek :important-caption: Důležité :last-update-label: Změněno ifdef::listing-caption[:listing-caption: Seznam] ifdef::manname-title[:manname-title: Název] :note-caption: Poznámka :part-signifier: Část :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Úvod] :section-refsig: Oddíl :table-caption: Tabulka :tip-caption: Tip :toc-title: Obsah :untitled-label: Nepojmenovaný :version-label: Verze :warning-caption: Varování asciidoctor-2.0.20/data/locale/attributes-da.adoc000066400000000000000000000014211443135032600216450ustar00rootroot00000000000000// Danish translation, courtesy of Max Rydahl Andersen , with updates from Morten Høfft :appendix-caption: Appendix :appendix-refsig: {appendix-caption} :caution-caption: Forsigtig :chapter-signifier: Kapitel :chapter-refsig: {chapter-signifier} :example-caption: Eksempel :figure-caption: Figur :important-caption: Vigtig :last-update-label: Sidst opdateret ifdef::listing-caption[:listing-caption: List] ifdef::manname-title[:manname-title: Navn] :note-caption: Notat :part-signifier: Del :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Forord] :section-refsig: Sektion :table-caption: Tabel :tip-caption: Tips :toc-title: Indholdsfortegnelse :untitled-label: Unavngivet :version-label: Version :warning-caption: Advarsel asciidoctor-2.0.20/data/locale/attributes-de.adoc000066400000000000000000000013361443135032600216560ustar00rootroot00000000000000// German translation, courtesy of Florian Wilhelm :appendix-caption: Anhang :appendix-refsig: {appendix-caption} :caution-caption: Achtung :chapter-signifier: Kapitel :chapter-refsig: {chapter-signifier} :example-caption: Beispiel :figure-caption: Abbildung :important-caption: Wichtig :last-update-label: Zuletzt aktualisiert ifdef::listing-caption[:listing-caption: Listing] ifdef::manname-title[:manname-title: Bezeichnung] :note-caption: Anmerkung :part-signifier: Teil :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Vorwort] :section-refsig: Abschnitt :table-caption: Tabelle :tip-caption: Hinweis :toc-title: Inhaltsverzeichnis :untitled-label: Ohne Titel :version-label: Version :warning-caption: Warnung asciidoctor-2.0.20/data/locale/attributes-en.adoc000066400000000000000000000013361443135032600216700ustar00rootroot00000000000000// English translation, for reference only; matches the built-in behavior of core :appendix-caption: Appendix :appendix-refsig: {appendix-caption} :caution-caption: Caution :chapter-signifier: Chapter :chapter-refsig: {chapter-signifier} :example-caption: Example :figure-caption: Figure :important-caption: Important :last-update-label: Last updated ifdef::listing-caption[:listing-caption: Listing] ifdef::manname-title[:manname-title: Name] :note-caption: Note :part-signifier: Part :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Preface] :section-refsig: Section :table-caption: Table :tip-caption: Tip :toc-title: Table of Contents :untitled-label: Untitled :version-label: Version :warning-caption: Warning asciidoctor-2.0.20/data/locale/attributes-es.adoc000066400000000000000000000014551443135032600216770ustar00rootroot00000000000000// Spanish translation, courtesy of Eddú Meléndez with updates from Fede Mendez :appendix-caption: Apéndice :appendix-refsig: {appendix-caption} :caution-caption: Precaución :chapter-signifier: Capítulo :chapter-refsig: {chapter-signifier} :example-caption: Ejemplo :figure-caption: Figura :important-caption: Importante :last-update-label: Ultima actualización ifdef::listing-caption[:listing-caption: Lista] ifdef::manname-title[:manname-title: Nombre] :note-caption: Nota :part-signifier: Parte :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Prefacio] :section-refsig: Sección :table-caption: Tabla :tip-caption: Sugerencia :toc-title: Tabla de Contenido :untitled-label: Sin título :version-label: Versión :warning-caption: Aviso asciidoctor-2.0.20/data/locale/attributes-fa.adoc000066400000000000000000000014641443135032600216560ustar00rootroot00000000000000// Persian (Farsi) translation, courtesy of Shahryar Eivazzadeh :appendix-caption: پیوست :appendix-refsig: {appendix-caption} :caution-caption: گوشزد //:chapter-signifier: ??? //:chapter-refsig: {chapter-signifier} :example-caption: نمونه :figure-caption: نمودار :important-caption: مهم :last-update-label: آخرین به روز رسانی ifdef::listing-caption[:listing-caption: فهرست] ifdef::manname-title[:manname-title: نام] :note-caption: یادداشت //:part-signifier: ??? //:part-refsig: {part-signifier} ifdef::preface-title[:preface-title: پیشگفتار] //:section-refsig: ??? :table-caption: جدول :tip-caption: نکته :toc-title: فهرست مطالب :untitled-label: بی‌نام :version-label: نگارش :warning-caption: هشدار asciidoctor-2.0.20/data/locale/attributes-fi.adoc000066400000000000000000000013051443135032600216600ustar00rootroot00000000000000// Finnish translation by Tero Hänninen :appendix-caption: Liitteet :appendix-refsig: {appendix-caption} :caution-caption: Huom //:chapter-signifier: ??? //:chapter-refsig: {chapter-signifier} :example-caption: Esimerkki :figure-caption: Kuvio :important-caption: Tärkeää :last-update-label: Viimeksi päivitetty ifdef::listing-caption[:listing-caption: Listaus] ifdef::manname-title[:manname-title: Nimi] :note-caption: Huomio //:part-signifier: ??? //:part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Esipuhe] //:section-refsig: ??? :table-caption: Taulukko :tip-caption: Vinkki :toc-title: Sisällysluettelo :untitled-label: Nimetön :version-label: Versio :warning-caption: Varoitus asciidoctor-2.0.20/data/locale/attributes-fr.adoc000066400000000000000000000014561443135032600217000ustar00rootroot00000000000000// French translation, courtesy of Nicolas Comet with updates from Maheva Bagard Laursen :appendix-caption: Annexe :appendix-refsig: {appendix-caption} :caution-caption: Attention :chapter-signifier: Chapitre :chapter-refsig: {chapter-signifier} :example-caption: Exemple :figure-caption: Figure :important-caption: Important :last-update-label: Dernière mise à jour ifdef::listing-caption[:listing-caption: Liste] ifdef::manname-title[:manname-title: Nom] :note-caption: Note :part-signifier: Partie :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Préface] :section-refsig: Section :table-caption: Tableau :tip-caption: Astuce :toc-title: Table des matières :untitled-label: Sans titre :version-label: Version :warning-caption: Avertissement asciidoctor-2.0.20/data/locale/attributes-hu.adoc000066400000000000000000000013621443135032600217010ustar00rootroot00000000000000// Hungarian translation, courtesy of István Pató :appendix-caption: függelék :appendix-refsig: {appendix-caption} :caution-caption: Figyelmeztetés //:chapter-signifier: ??? //:chapter-refsig: {chapter-signifier} :example-caption: Példa :figure-caption: Ábra :important-caption: Fontos :last-update-label: Utolsó frissítés ifdef::listing-caption[:listing-caption: Lista] ifdef::manname-title[:manname-title: Név] :note-caption: Megjegyzés //:part-signifier: ??? //:part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Előszó] //:section-refsig: ??? :table-caption: Táblázat :tip-caption: Tipp :toc-title: Tartalomjegyzék :untitled-label: Névtelen :version-label: Verzió :warning-caption: Figyelem asciidoctor-2.0.20/data/locale/attributes-id.adoc000066400000000000000000000013261443135032600216610ustar00rootroot00000000000000// Bahasa Indonesia, courtesy of Triyan W. Nugroho :appendix-caption: Lampiran :appendix-refsig: {appendix-caption} :caution-caption: Perhatian :chapter-signifier: Bab :chapter-refsig: {chapter-signifier} :example-caption: Contoh :figure-caption: Gambar :important-caption: Penting :last-update-label: Pembaruan terakhir ifdef::listing-caption[:listing-caption: Daftar] ifdef::manname-title[:manname-title: Nama] :note-caption: Catatan //:part-signifier: ??? //:part-refsig: {part-signifier} //ifdef::preface-title[:preface-title: ???] //:section-refsig: ??? :table-caption: Tabel :tip-caption: Tips :toc-title: Daftar Isi :untitled-label: Tak Berjudul :version-label: Versi :warning-caption: Peringatan asciidoctor-2.0.20/data/locale/attributes-it.adoc000066400000000000000000000013531443135032600217010ustar00rootroot00000000000000// Italian translation, courtesy of Marco Ciampa :appendix-caption: Appendice :appendix-refsig: {appendix-caption} :caution-caption: Attenzione :chapter-signifier: Capitolo :chapter-refsig: {chapter-signifier} :example-caption: Esempio :figure-caption: Figura :important-caption: Importante :last-update-label: Ultimo aggiornamento ifdef::listing-caption[:listing-caption: Elenco] ifdef::manname-title[:manname-title: Nome] :note-caption: Nota :part-signifier: Parte :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Prefazione] :section-refsig: Sezione :table-caption: Tabella :tip-caption: Suggerimento :toc-title: Indice :untitled-label: Senza titolo :version-label: Versione :warning-caption: Attenzione asciidoctor-2.0.20/data/locale/attributes-ja.adoc000066400000000000000000000013341443135032600216560ustar00rootroot00000000000000// Japanese translation, courtesy of Takayuki Konishi :appendix-caption: 付録 :appendix-refsig: {appendix-caption} :caution-caption: 注意 //:chapter-signifier: ??? //:chapter-refsig: {chapter-signifier} :example-caption: 例 :figure-caption: 図 :important-caption: 重要 :last-update-label: 最終更新 ifdef::listing-caption[:listing-caption: リスト] ifdef::manname-title[:manname-title: 名前] :note-caption: 注記 //:part-signifier: ??? //:part-refsig: {part-signifier} ifdef::preface-title[:preface-title: まえがき] //:section-refsig: ??? :table-caption: 表 :tip-caption: ヒント :toc-title: 目次 :untitled-label: 無題 :version-label: バージョン :warning-caption: 警告 asciidoctor-2.0.20/data/locale/attributes-ko.adoc000066400000000000000000000013041443135032600216720ustar00rootroot00000000000000// Korean translation, courtesy of Sungsik Nam :appendix-caption: 부록 :appendix-refsig: {appendix-caption} :caution-caption: 주의 //:chapter-signifier: ??? //:chapter-refsig: {chapter-signifier} :example-caption: 예시 :figure-caption: 그림 :important-caption: 중요 :last-update-label: 마지막 업데이트 ifdef::listing-caption[:listing-caption: 목록] ifdef::manname-title[:manname-title: 이름] :note-caption: 노트 //:part-signifier: ??? //:part-refsig: {part-signifier} ifdef::preface-title[:preface-title: 머리말] //:section-refsig: ??? :table-caption: 표 :tip-caption: 힌트 :toc-title: 차례 :untitled-label: 익명 :version-label: 버전 :warning-caption: 경고 asciidoctor-2.0.20/data/locale/attributes-nb.adoc000066400000000000000000000013741443135032600216670ustar00rootroot00000000000000// Norwegian Bokmål, courtesy of Aslak Knutsen , with updates from Karl Ove Hufthammer :appendix-caption: Vedlegg :appendix-refsig: {appendix-caption} :caution-caption: OBS :chapter-signifier: Kapittel :chapter-refsig: {chapter-signifier} :example-caption: Eksempel :figure-caption: Figur :important-caption: Viktig :last-update-label: Sist oppdatert ifdef::listing-caption[:listing-caption: Programkode] ifdef::manname-title[:manname-title: Navn] :note-caption: Merk //:part-signifier: ??? //:part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Forord] //:section-refsig: ??? :table-caption: Tabell :tip-caption: Tips :toc-title: Innhold :untitled-label: Navnløs :version-label: Versjon :warning-caption: Advarsel asciidoctor-2.0.20/data/locale/attributes-nl.adoc000066400000000000000000000013571443135032600217020ustar00rootroot00000000000000// Dutch translation, courtesy of Roel Van Steenberghe :appendix-caption: Bijlage :appendix-refsig: {appendix-caption} :caution-caption: Opgelet :chapter-signifier: Hoofdstuk :chapter-refsig: {chapter-signifier} :example-caption: Voorbeeld :figure-caption: Figuur :important-caption: Belangrijk :last-update-label: Laatste aanpassing ifdef::listing-caption[:listing-caption: Lijst] ifdef::manname-title[:manname-title: Naam] :note-caption: Noot :part-signifier: Deel :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Inleiding] :section-refsig: Paragraaf :table-caption: Tabel :tip-caption: Tip :toc-title: Inhoudsopgave :untitled-label: Naamloos :version-label: Versie :warning-caption: Waarschuwing asciidoctor-2.0.20/data/locale/attributes-nn.adoc000066400000000000000000000013151443135032600216760ustar00rootroot00000000000000// Norwegian Nynorsk, courtesy of Karl Ove Hufthammer :appendix-caption: Vedlegg :appendix-refsig: {appendix-caption} :caution-caption: OBS :chapter-signifier: Kapittel :chapter-refsig: {chapter-signifier} :example-caption: Eksempel :figure-caption: Figur :important-caption: Viktig :last-update-label: Sist oppdatert ifdef::listing-caption[:listing-caption: Programkode] ifdef::manname-title[:manname-title: Namn] :note-caption: Merk //:part-signifier: ??? //:part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Forord] //:section-refsig: ??? :table-caption: Tabell :tip-caption: Tips :toc-title: Innhald :untitled-label: Namnlaus :version-label: Versjon :warning-caption: Åtvaring asciidoctor-2.0.20/data/locale/attributes-pl.adoc000066400000000000000000000014451443135032600217020ustar00rootroot00000000000000// Polish translation, courtesy of Łukasz Dziedziul with updates via Matthew Blissett :appendix-caption: Dodatek :appendix-refsig: {appendix-caption} :caution-caption: Uwaga :chapter-signifier: Rozdział :chapter-refsig: {chapter-signifier} :example-caption: Przykład :figure-caption: Rysunek :important-caption: Ważne :last-update-label: Ostatnio zmodyfikowany ifdef::listing-caption[:listing-caption: Listing] ifdef::manname-title[:manname-title: Nazwa] :note-caption: Notka :part-signifier: Część :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Wstęp] :section-refsig: Sekcja :table-caption: Tabela :tip-caption: Sugestia :toc-title: Spis treści :untitled-label: Bez tytułu :version-label: Wersja :warning-caption: Ostrzeżenie asciidoctor-2.0.20/data/locale/attributes-pt.adoc000066400000000000000000000014441443135032600217110ustar00rootroot00000000000000// Portuguese translation, courtesy of Roberto Cortez with updates from Andrew Rodrigues :appendix-caption: Apêndice :appendix-refsig: {appendix-caption} :caution-caption: Atenção :chapter-signifier: Capítulo :chapter-refsig: {chapter-signifier} :example-caption: Exemplo :figure-caption: Figura :important-caption: Importante :last-update-label: Última actualização ifdef::listing-caption[:listing-caption: Listagem] ifdef::manname-title[:manname-title: Nome] :note-caption: Nota :part-signifier: Parte :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Prefácio] :section-refsig: Secção :table-caption: Tabela :tip-caption: Sugestão :toc-title: Índice :untitled-label: Sem título :version-label: Versão :warning-caption: Aviso asciidoctor-2.0.20/data/locale/attributes-pt_BR.adoc000066400000000000000000000014451443135032600222750ustar00rootroot00000000000000// Brazilian Portuguese translation, courtesy of Rafael Pestano with updates from Andrew Rodrigues :appendix-caption: Apêndice :appendix-refsig: {appendix-caption} :caution-caption: Cuidado :chapter-signifier: Capítulo :chapter-refsig: {chapter-signifier} :example-caption: Exemplo :figure-caption: Figura :important-caption: Importante :last-update-label: Última atualização ifdef::listing-caption[:listing-caption: Listagem] ifdef::manname-title[:manname-title: Nome] :note-caption: Nota :part-signifier: Parte :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Prefácio] :section-refsig: Seção :table-caption: Tabela :tip-caption: Dica :toc-title: Índice :untitled-label: Sem título :version-label: Versão :warning-caption: Aviso asciidoctor-2.0.20/data/locale/attributes-ro.adoc000066400000000000000000000013461443135032600217070ustar00rootroot00000000000000// Romanian translation, courtesy of Vitalie Lazu :appendix-caption: Apendix :appendix-refsig: {appendix-caption} :caution-caption: Precauție //:chapter-signifier: ??? //:chapter-refsig: {chapter-signifier} :example-caption: Exemplu :figure-caption: Figură :important-caption: Important :last-update-label: Ultima actualizare ifdef::listing-caption[:listing-caption: Listare] ifdef::manname-title[:manname-title: Nume] :note-caption: Notă //:part-signifier: ??? //:part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Prefață] //:section-refsig: ??? :table-caption: Tabela :tip-caption: Sfat :toc-title: Cuprins :untitled-label: Fără denumire :version-label: Versiunea :warning-caption: Atenție asciidoctor-2.0.20/data/locale/attributes-ru.adoc000066400000000000000000000016401443135032600217120ustar00rootroot00000000000000// Russian translation, courtesy of Alexander Zobkov :appendix-caption: Приложение :appendix-refsig: {appendix-caption} :caution-caption: Внимание :chapter-signifier: Глава :chapter-refsig: {chapter-signifier} :example-caption: Пример :figure-caption: Рисунок :important-caption: Важно :last-update-label: Последнее обновление ifdef::listing-caption[:listing-caption: Листинг] ifdef::manname-title[:manname-title: Название] :note-caption: Примечание :part-signifier: Часть :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Предисловие] :section-refsig: Раздел :table-caption: Таблица :tip-caption: Подсказка :toc-title: Содержание :untitled-label: Без названия :version-label: Версия :warning-caption: Предупреждение asciidoctor-2.0.20/data/locale/attributes-sr.adoc000066400000000000000000000015371443135032600217150ustar00rootroot00000000000000// Serbian Cyrillic translation, courtesy of Bojan Stipic :appendix-caption: Додатак :appendix-refsig: {appendix-caption} :caution-caption: Опрез :chapter-signifier: Поглавље :chapter-refsig: {chapter-signifier} :example-caption: Пример :figure-caption: Слика :important-caption: Важно :last-update-label: Последње ажурирано ifdef::listing-caption[:listing-caption: Листинг] ifdef::manname-title[:manname-title: Назив] :note-caption: Белешка :part-signifier: Део :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Предговор] :section-refsig: Секција :table-caption: Табела :tip-caption: Савет :toc-title: Садржај :untitled-label: Без назива :version-label: Верзија :warning-caption: Упозорење asciidoctor-2.0.20/data/locale/attributes-sr_Latn.adoc000066400000000000000000000013351443135032600226670ustar00rootroot00000000000000// Serbian Latin translation, courtesy of Bojan Stipic :appendix-caption: Dodatak :appendix-refsig: {appendix-caption} :caution-caption: Oprez :chapter-signifier: Poglavlje :chapter-refsig: {chapter-signifier} :example-caption: Primer :figure-caption: Slika :important-caption: Važno :last-update-label: Poslednje ažurirano ifdef::listing-caption[:listing-caption: Listing] ifdef::manname-title[:manname-title: Naziv] :note-caption: Beleška :part-signifier: Deo :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Predgovor] :section-refsig: Sekcija :table-caption: Tabela :tip-caption: Savet :toc-title: Sadržaj :untitled-label: Bez naziva :version-label: Verzija :warning-caption: Upozorenje asciidoctor-2.0.20/data/locale/attributes-sv.adoc000066400000000000000000000013251443135032600217140ustar00rootroot00000000000000// Swedish translation, Jonas Björk :appendix-caption: Bilaga :appendix-refsig: {appendix-caption} :caution-caption: Var uppmärksam :chapter-signifier: Kapitel :chapter-refsig: {chapter-signifier} :example-caption: Exempel :figure-caption: Figur :important-caption: Viktigt :last-update-label: Senast uppdaterad ifdef::listing-caption[:listing-caption: Lista] ifdef::manname-title[:manname-title: Namn] :note-caption: Notera :part-signifier: Del :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Förord] :section-refsig: Avsnitt :table-caption: Tabell :tip-caption: Tips :toc-title: Innehållsförteckning :untitled-label: Odöpt :version-label: Version :warning-caption: Varning asciidoctor-2.0.20/data/locale/attributes-sw.adoc000066400000000000000000000012531443135032600217150ustar00rootroot00000000000000// Kiswahili translation, Benson Muite :appendix-caption: Kiambatisho :appendix-refsig: {appendix-caption} :caution-caption: Hatari :chapter-signifier: Somo :chapter-refsig: {chapter-signifier} :example-caption: Mfano :figure-caption: Picha :important-caption: Muhimu :last-update-label: Geuza la mwisho ifdef::listing-caption[:listing-caption: Orodha] ifdef::manname-title[:manname-title: Jina] :note-caption: Muhtasari :part-signifier: Sehemu :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Dibaji] :section-refsig: Fungu :table-caption: Ratiba :tip-caption: Shauri :toc-title: Fahirisi :untitled-label: Bila kichwa :version-label: Toleo :warning-caption: Onyo asciidoctor-2.0.20/data/locale/attributes-th.adoc000066400000000000000000000016771443135032600217110ustar00rootroot00000000000000// Thai translation :appendix-caption: อ้างอิง :appendix-refsig: {appendix-caption} :caution-caption: ระวัง :chapter-signifier: บท :chapter-refsig: {chapter-signifier} :example-caption: ตัวอย่าง :figure-caption: คำอธิบายลักษณะจำลอง :important-caption: สำคัญ :last-update-label: ตัวอัพเดตล่าสุด ifdef::listing-caption[:listing-caption: รายการ] ifdef::manname-title[:manname-title: ซื่อ] :note-caption: บันทึก :part-signifier: ส่วน :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: คำนำ] :section-refsig: รายการย่อย :table-caption: ตาราง :tip-caption: เคล็ดลับ :toc-title: สารบัญ :untitled-label: ยังไม่มีชื่อ :version-label: เวอร์ชัน :warning-caption: คำเตือน asciidoctor-2.0.20/data/locale/attributes-tr.adoc000066400000000000000000000013341443135032600217110ustar00rootroot00000000000000// Turkish translation, courtesy of Rahman Usta :appendix-caption: Ek bölüm :appendix-refsig: {appendix-caption} :caution-caption: Dikkat //:chapter-signifier: ??? //:chapter-refsig: {chapter-signifier} :example-caption: Örnek :figure-caption: Görsel :important-caption: Önemli :last-update-label: Son güncelleme ifdef::listing-caption[:listing-caption: Listeleme] ifdef::manname-title[:manname-title: İsim] :note-caption: Not //:part-signifier: ??? //:part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Ön söz] //:section-refsig: ??? :table-caption: Tablo :tip-caption: İpucu :toc-title: İçindekiler :untitled-label: İsimsiz :version-label: Versiyon :warning-caption: Uyarı asciidoctor-2.0.20/data/locale/attributes-uk.adoc000066400000000000000000000015751443135032600217120ustar00rootroot00000000000000// Ukrainian translation, courtesy of Kyrylo Yatsenko :appendix-caption: Додаток :appendix-refsig: {appendix-caption} :caution-caption: Обережно :chapter-signifier: Розділ :chapter-refsig: {chapter-signifier} :example-caption: Приклад :figure-caption: Рисунок :important-caption: Важливо :last-update-label: Востаннє оновлено ifdef::listing-caption[:listing-caption: Лістинг] ifdef::manname-title[:manname-title: Назва] :note-caption: Зауваження :part-signifier: Частина :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Передмова] :section-refsig: Підрозділ :table-caption: Таблиця :tip-caption: Підказка :toc-title: Зміст :untitled-label: Без назви :version-label: Версія :warning-caption: Попередження asciidoctor-2.0.20/data/locale/attributes-vi.adoc000066400000000000000000000014271443135032600217050ustar00rootroot00000000000000// Vietnamese translation, courtesy of Hoa Nguyen :appendix-caption: Phụ lục :appendix-refsig: {appendix-caption} :caution-caption: Cảnh báo :chapter-signifier: Chương :chapter-refsig: {chapter-signifier} :example-caption: Ví dụ :figure-caption: Hình :important-caption: Quan trọng :last-update-label: Cập nhật lần cuối ifdef::listing-caption[:listing-caption: Danh sách] ifdef::manname-title[:manname-title: Tên] :note-caption: Ghi chú :part-signifier: Phần :part-refsig: {part-signifier} ifdef::preface-title[:preface-title: Lời nói đầu] :section-refsig: Mục :table-caption: Bảng :tip-caption: Lời khuyên :toc-title: Mục lục :untitled-label: Không có tiêu đề :version-label: Phiên bản :warning-caption: Chú ýasciidoctor-2.0.20/data/locale/attributes-zh_CN.adoc000066400000000000000000000013201443135032600222600ustar00rootroot00000000000000// Simplified Chinese translation, courtesy of John Dong :appendix-caption: 附录 :appendix-refsig: {appendix-caption} :caution-caption: 注意 //:chapter-signifier: ??? //:chapter-refsig: {chapter-signifier} :example-caption: 示例 :figure-caption: 图表 :important-caption: 重要 :last-update-label: 最后更新 ifdef::listing-caption[:listing-caption: 列表] ifdef::manname-title[:manname-title: 名称] :note-caption: 笔记 //:part-signifier: ??? //:part-refsig: {part-signifier} ifdef::preface-title[:preface-title: 序言] //:section-refsig: ??? :table-caption: 表格 :tip-caption: 提示 :toc-title: 目录 :untitled-label: 暂无标题 :version-label: 版本 :warning-caption: 警告 asciidoctor-2.0.20/data/locale/attributes-zh_TW.adoc000066400000000000000000000013211443135032600223130ustar00rootroot00000000000000// Traditional Chinese translation, courtesy of John Dong :appendix-caption: 附錄 :appendix-refsig: {appendix-caption} :caution-caption: 注意 //:chapter-signifier: ??? //:chapter-refsig: {chapter-signifier} :example-caption: 示例 :figure-caption: 圖表 :important-caption: 重要 :last-update-label: 最後更新 ifdef::listing-caption[:listing-caption: 列表] ifdef::manname-title[:manname-title: 名稱] :note-caption: 筆記 //:part-signifier: ??? //:part-refsig: {part-signifier} ifdef::preface-title[:preface-title: 序言] //:section-refsig: ??? :table-caption: 表格 :tip-caption: 提示 :toc-title: 目錄 :untitled-label: 暫無標題 :version-label: 版本 :warning-caption: 警告 asciidoctor-2.0.20/data/locale/attributes.adoc000066400000000000000000000013751443135032600212730ustar00rootroot00000000000000// This directory provides translations for all built-in attributes in Asciidoctor that emit translatable strings. // See http://asciidoctor.org/docs/user-manual/#customizing-labels to learn how to apply this file. // // If you're introducing a new translation, create a file named attributes-.adoc, where is the IANA subtag for the language. // Next, assign a translation for each attribute, using attributes-en.adoc as a reference. // // IMPORTANT: Do not include any blank lines in the transation file. // // NOTE: Please wrap the listing-caption and preface-title entries in a preprocessor conditional directive. // These attributes should only be updated if set explicitly by the user. ifdef::lang[include::attributes-{lang}.adoc[]] asciidoctor-2.0.20/data/reference/000077500000000000000000000000001443135032600167465ustar00rootroot00000000000000asciidoctor-2.0.20/data/reference/syntax.adoc000066400000000000000000000145211443135032600211270ustar00rootroot00000000000000= AsciiDoc Syntax :icons: font :stem: :toc: left :url-docs: https://asciidoctor.org/docs :url-gem: https://rubygems.org/gems/asciidoctor A brief reference of the most commonly used AsciiDoc syntax. You can find the full documentation for the AsciiDoc syntax at {url-docs}. == Paragraphs A normal paragraph. Line breaks are not preserved. // line comments, which are lines that start with //, are skipped A blank line separates paragraphs. [%hardbreaks] This paragraph is marked with the `hardbreaks` option. Notice how line breaks are now preserved. An indented (literal) paragraph disables text formatting, preserves spaces and line breaks, and is displayed in a monospaced font. [sidebar#id.role] Adding a style, ID, and/or role gives a paragraph (or block) special meaning, like this sidebar. NOTE: An admonition paragraph, like this note, grabs the reader's attention. TIP: Convert this document using the `asciidoctor` command to see the output produced from it. == Text Formatting :hardbreaks-option: .Constrained (applied at word boundaries) *strong importance* (aka bold) _stress emphasis_ (aka italic) `monospaced` (aka typewriter text) "`double`" and '`single`' typographic quotes +passthrough text+ (substitutions disabled) `+literal text+` (monospaced with substitutions disabled) a #mark# to remember (highlighted for notation) .Unconstrained (applied anywhere) **C**reate, **R**ead, **U**pdate, and **D**elete (CRUD) fan__freakin__tastic ``mono``culture ##mark##up your text .Replacements A long time ago in a galaxy far, far away... (C) 1976 Arty Artisan I believe I shall--no, actually I won't. .ID and roles for phrases [.line-through]#delete me# the [.path]_images_ directory a [#wibble.term]*wibble* does wobble .Macros // where c=specialchars, q=quotes, a=attributes, r=replacements, m=macros, p=post_replacements, etc. The European icon:flag[role=blue] is blue & contains pass:[************] arranged in a icon:circle-o[role=yellow]. The pass:c[->] operator is often referred to as the stabby lambda. Since `pass:[++]` has strong priority in AsciiDoc, you can rewrite pass:c,a,r[C++ => C{pp}]. // activate stem support by adding `:stem:` to the document header stem:[sqrt(4) = 2] :!hardbreaks-option: == Attributes // define attributes in the document header; must be flush with left margin :name: value You can download and install Asciidoctor {asciidoctor-version} from {url-gem}. C{pp} is not required, only Ruby. Use a leading backslash to output a word enclosed in curly braces, like \{name}. == Links [%hardbreaks] https://example.org/page[A webpage] link:../path/to/file.txt[A local file] xref:document.adoc[A sibling document] mailto:hello@example.org[Email to say hello!] == Anchors [[idname,reference text]] // or written using normal block attributes as `[#idname,reftext=reference text]` A paragraph (or any block) with an anchor (aka ID) and reftext. See <> or <>. xref:document.adoc#idname[Jumps to anchor in another document]. This paragraph has a footnote.footnote:[This is the text of the footnote.] == Lists === Unordered * level 1 ** level 2 *** level 3 **** level 4 ***** etc. * back at level 1 + Attach a block or paragraph to a list item using a list continuation (which you can enclose in an open block). .Some Authors [circle] - Edgar Allen Poe - Sheri S. Tepper - Bill Bryson === Ordered . Step 1 . Step 2 .. Step 2a .. Step 2b . Step 3 .Remember your Roman numerals? [upperroman] . is one . is two . is three === Checklist * [x] checked * [ ] not checked === Callout // enable callout bubbles by adding `:icons: font` to the document header [,ruby] ---- puts 'Hello, World!' # <1> ---- <1> Prints `Hello, World!` to the console. === Description first term:: description of first term second term:: description of second term == Document Structure === Header // header must be flush with left margin = Document Title Author Name v1.0, 2019-01-01 === Sections // must be flush with left margin = Document Title (Level 0) == Level 1 === Level 2 ==== Level 3 ===== Level 4 ====== Level 5 == Back at Level 1 === Includes // must be flush with left margin include::basics.adoc[] // define -a allow-uri-read to allow content to be read from URI include::https://example.org/installation.adoc[] == Blocks -- open - a general-purpose content wrapper; useful for enclosing content to attach to a list item -- // recognized types include CAUTION, IMPORTANT, NOTE, TIP, and WARNING // enable admonition icons by setting `:icons: font` in the document header [NOTE] ==== admonition - a notice for the reader, ranging in severity from a tip to an alert ==== ==== example - a demonstration of the concept being documented ==== .Toggle Me [%collapsible] ==== collapsible - these details are revealed by clicking the title ==== **** sidebar - auxiliary content that can be read independently of the main content **** .... literal - an exhibit that features program output .... ---- listing - an exhibit that features program input, source code, or the contents of a file ---- [,language] ---- source - a listing that is embellished with (colorized) syntax highlighting ---- ```language fenced code - a shorthand syntax for the source block ``` [,attribution,citetitle] ____ quote - a quotation or excerpt; attribution with title of source are optional ____ [verse,attribution,citetitle] ____ verse - a literary excerpt, often a poem; attribution with title of source are optional ____ ++++ pass - content passed directly to the output document; often raw HTML ++++ // activate stem support by adding `:stem:` to the document header [stem] ++++ x = y^2 ++++ //// comment - content which is not included in the output document //// == Tables .Table Attributes [cols=>1h;2d,width=50%,frame=ends] |=== | Attribute Name | Values | options | header,footer,autowidth | cols | colspec[;colspec;...] | grid | all \| cols \| rows \| none | frame | all \| sides \| ends \| none | stripes | all \| even \| odd \| none | width | (0%..100%) | format | psv {vbar} csv {vbar} dsv |=== == Multimedia image::screenshot.png[block image,800,450] Press image:reload.svg[reload,16,opts=interactive] to reload the page. video::movie.mp4[width=640,start=60,end=140,options=autoplay] video::aHjpOzsQ9YI[youtube] video::300817511[vimeo] == Breaks // thematic break (aka horizontal rule) --- // page break <<< asciidoctor-2.0.20/data/stylesheets/000077500000000000000000000000001443135032600173645ustar00rootroot00000000000000asciidoctor-2.0.20/data/stylesheets/asciidoctor-default.css000066400000000000000000000705351443135032600240350ustar00rootroot00000000000000/*! Asciidoctor default stylesheet | MIT License | https://asciidoctor.org */ /* Uncomment the following line when using as a custom stylesheet */ /* @import "https://fonts.googleapis.com/css?family=Open+Sans:300,300italic,400,400italic,600,600italic%7CNoto+Serif:400,400italic,700,700italic%7CDroid+Sans+Mono:400,700"; */ html{font-family:sans-serif;-webkit-text-size-adjust:100%} a{background:none} a:focus{outline:thin dotted} a:active,a:hover{outline:0} h1{font-size:2em;margin:.67em 0} b,strong{font-weight:bold} abbr{font-size:.9em} abbr[title]{cursor:help;border-bottom:1px dotted #dddddf;text-decoration:none} dfn{font-style:italic} hr{height:0} mark{background:#ff0;color:#000} code,kbd,pre,samp{font-family:monospace;font-size:1em} pre{white-space:pre-wrap} q{quotes:"\201C" "\201D" "\2018" "\2019"} small{font-size:80%} sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline} sup{top:-.5em} sub{bottom:-.25em} img{border:0} svg:not(:root){overflow:hidden} figure{margin:0} audio,video{display:inline-block} audio:not([controls]){display:none;height:0} fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em} legend{border:0;padding:0} button,input,select,textarea{font-family:inherit;font-size:100%;margin:0} button,input{line-height:normal} button,select{text-transform:none} button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer} button[disabled],html input[disabled]{cursor:default} input[type=checkbox],input[type=radio]{padding:0} button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0} textarea{overflow:auto;vertical-align:top} table{border-collapse:collapse;border-spacing:0} *,::before,::after{box-sizing:border-box} html,body{font-size:100%} body{background:#fff;color:rgba(0,0,0,.8);padding:0;margin:0;font-family:"Noto Serif","DejaVu Serif",serif;line-height:1;position:relative;cursor:auto;-moz-tab-size:4;-o-tab-size:4;tab-size:4;word-wrap:anywhere;-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased} a:hover{cursor:pointer} img,object,embed{max-width:100%;height:auto} object,embed{height:100%} img{-ms-interpolation-mode:bicubic} .left{float:left!important} .right{float:right!important} .text-left{text-align:left!important} .text-right{text-align:right!important} .text-center{text-align:center!important} .text-justify{text-align:justify!important} .hide{display:none} img,object,svg{display:inline-block;vertical-align:middle} textarea{height:auto;min-height:50px} select{width:100%} .subheader,.admonitionblock td.content>.title,.audioblock>.title,.exampleblock>.title,.imageblock>.title,.listingblock>.title,.literalblock>.title,.stemblock>.title,.openblock>.title,.paragraph>.title,.quoteblock>.title,table.tableblock>.title,.verseblock>.title,.videoblock>.title,.dlist>.title,.olist>.title,.ulist>.title,.qlist>.title,.hdlist>.title{line-height:1.45;color:#7a2518;font-weight:400;margin-top:0;margin-bottom:.25em} div,dl,dt,dd,ul,ol,li,h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6,pre,form,p,blockquote,th,td{margin:0;padding:0} a{color:#2156a5;text-decoration:underline;line-height:inherit} a:hover,a:focus{color:#1d4b8f} a img{border:0} p{line-height:1.6;margin-bottom:1.25em;text-rendering:optimizeLegibility} p aside{font-size:.875em;line-height:1.35;font-style:italic} h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{font-family:"Open Sans","DejaVu Sans",sans-serif;font-weight:300;font-style:normal;color:#ba3925;text-rendering:optimizeLegibility;margin-top:1em;margin-bottom:.5em;line-height:1.0125em} h1 small,h2 small,h3 small,#toctitle small,.sidebarblock>.content>.title small,h4 small,h5 small,h6 small{font-size:60%;color:#e99b8f;line-height:0} h1{font-size:2.125em} h2{font-size:1.6875em} h3,#toctitle,.sidebarblock>.content>.title{font-size:1.375em} h4,h5{font-size:1.125em} h6{font-size:1em} hr{border:solid #dddddf;border-width:1px 0 0;clear:both;margin:1.25em 0 1.1875em} em,i{font-style:italic;line-height:inherit} strong,b{font-weight:bold;line-height:inherit} small{font-size:60%;line-height:inherit} code{font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;font-weight:400;color:rgba(0,0,0,.9)} ul,ol,dl{line-height:1.6;margin-bottom:1.25em;list-style-position:outside;font-family:inherit} ul,ol{margin-left:1.5em} ul li ul,ul li ol{margin-left:1.25em;margin-bottom:0} ul.circle{list-style-type:circle} ul.disc{list-style-type:disc} ul.square{list-style-type:square} ul.circle ul:not([class]),ul.disc ul:not([class]),ul.square ul:not([class]){list-style:inherit} ol li ul,ol li ol{margin-left:1.25em;margin-bottom:0} dl dt{margin-bottom:.3125em;font-weight:bold} dl dd{margin-bottom:1.25em} blockquote{margin:0 0 1.25em;padding:.5625em 1.25em 0 1.1875em;border-left:1px solid #ddd} blockquote,blockquote p{line-height:1.6;color:rgba(0,0,0,.85)} @media screen and (min-width:768px){h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{line-height:1.2} h1{font-size:2.75em} h2{font-size:2.3125em} h3,#toctitle,.sidebarblock>.content>.title{font-size:1.6875em} h4{font-size:1.4375em}} table{background:#fff;margin-bottom:1.25em;border:1px solid #dedede;word-wrap:normal} table thead,table tfoot{background:#f7f8f7} table thead tr th,table thead tr td,table tfoot tr th,table tfoot tr td{padding:.5em .625em .625em;font-size:inherit;color:rgba(0,0,0,.8);text-align:left} table tr th,table tr td{padding:.5625em .625em;font-size:inherit;color:rgba(0,0,0,.8)} table tr.even,table tr.alt{background:#f8f8f7} table thead tr th,table tfoot tr th,table tbody tr td,table tr td,table tfoot tr td{line-height:1.6} h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{line-height:1.2;word-spacing:-.05em} h1 strong,h2 strong,h3 strong,#toctitle strong,.sidebarblock>.content>.title strong,h4 strong,h5 strong,h6 strong{font-weight:400} .center{margin-left:auto;margin-right:auto} .stretch{width:100%} .clearfix::before,.clearfix::after,.float-group::before,.float-group::after{content:" ";display:table} .clearfix::after,.float-group::after{clear:both} :not(pre).nobreak{word-wrap:normal} :not(pre).nowrap{white-space:nowrap} :not(pre).pre-wrap{white-space:pre-wrap} :not(pre):not([class^=L])>code{font-size:.9375em;font-style:normal!important;letter-spacing:0;padding:.1em .5ex;word-spacing:-.15em;background:#f7f7f8;border-radius:4px;line-height:1.45;text-rendering:optimizeSpeed} pre{color:rgba(0,0,0,.9);font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;line-height:1.45;text-rendering:optimizeSpeed} pre code,pre pre{color:inherit;font-size:inherit;line-height:inherit} pre>code{display:block} pre.nowrap,pre.nowrap pre{white-space:pre;word-wrap:normal} em em{font-style:normal} strong strong{font-weight:400} .keyseq{color:rgba(51,51,51,.8)} kbd{font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;display:inline-block;color:rgba(0,0,0,.8);font-size:.65em;line-height:1.45;background:#f7f7f7;border:1px solid #ccc;border-radius:3px;box-shadow:0 1px 0 rgba(0,0,0,.2),inset 0 0 0 .1em #fff;margin:0 .15em;padding:.2em .5em;vertical-align:middle;position:relative;top:-.1em;white-space:nowrap} .keyseq kbd:first-child{margin-left:0} .keyseq kbd:last-child{margin-right:0} .menuseq,.menuref{color:#000} .menuseq b:not(.caret),.menuref{font-weight:inherit} .menuseq{word-spacing:-.02em} .menuseq b.caret{font-size:1.25em;line-height:.8} .menuseq i.caret{font-weight:bold;text-align:center;width:.45em} b.button::before,b.button::after{position:relative;top:-1px;font-weight:400} b.button::before{content:"[";padding:0 3px 0 2px} b.button::after{content:"]";padding:0 2px 0 3px} p a>code:hover{color:rgba(0,0,0,.9)} #header,#content,#footnotes,#footer{width:100%;margin:0 auto;max-width:62.5em;*zoom:1;position:relative;padding-left:.9375em;padding-right:.9375em} #header::before,#header::after,#content::before,#content::after,#footnotes::before,#footnotes::after,#footer::before,#footer::after{content:" ";display:table} #header::after,#content::after,#footnotes::after,#footer::after{clear:both} #content{margin-top:1.25em} #content::before{content:none} #header>h1:first-child{color:rgba(0,0,0,.85);margin-top:2.25rem;margin-bottom:0} #header>h1:first-child+#toc{margin-top:8px;border-top:1px solid #dddddf} #header>h1:only-child,body.toc2 #header>h1:nth-last-child(2){border-bottom:1px solid #dddddf;padding-bottom:8px} #header .details{border-bottom:1px solid #dddddf;line-height:1.45;padding-top:.25em;padding-bottom:.25em;padding-left:.25em;color:rgba(0,0,0,.6);display:flex;flex-flow:row wrap} #header .details span:first-child{margin-left:-.125em} #header .details span.email a{color:rgba(0,0,0,.85)} #header .details br{display:none} #header .details br+span::before{content:"\00a0\2013\00a0"} #header .details br+span.author::before{content:"\00a0\22c5\00a0";color:rgba(0,0,0,.85)} #header .details br+span#revremark::before{content:"\00a0|\00a0"} #header #revnumber{text-transform:capitalize} #header #revnumber::after{content:"\00a0"} #content>h1:first-child:not([class]){color:rgba(0,0,0,.85);border-bottom:1px solid #dddddf;padding-bottom:8px;margin-top:0;padding-top:1rem;margin-bottom:1.25rem} #toc{border-bottom:1px solid #e7e7e9;padding-bottom:.5em} #toc>ul{margin-left:.125em} #toc ul.sectlevel0>li>a{font-style:italic} #toc ul.sectlevel0 ul.sectlevel1{margin:.5em 0} #toc ul{font-family:"Open Sans","DejaVu Sans",sans-serif;list-style-type:none} #toc li{line-height:1.3334;margin-top:.3334em} #toc a{text-decoration:none} #toc a:active{text-decoration:underline} #toctitle{color:#7a2518;font-size:1.2em} @media screen and (min-width:768px){#toctitle{font-size:1.375em} body.toc2{padding-left:15em;padding-right:0} #toc.toc2{margin-top:0!important;background:#f8f8f7;position:fixed;width:15em;left:0;top:0;border-right:1px solid #e7e7e9;border-top-width:0!important;border-bottom-width:0!important;z-index:1000;padding:1.25em 1em;height:100%;overflow:auto} #toc.toc2 #toctitle{margin-top:0;margin-bottom:.8rem;font-size:1.2em} #toc.toc2>ul{font-size:.9em;margin-bottom:0} #toc.toc2 ul ul{margin-left:0;padding-left:1em} #toc.toc2 ul.sectlevel0 ul.sectlevel1{padding-left:0;margin-top:.5em;margin-bottom:.5em} body.toc2.toc-right{padding-left:0;padding-right:15em} body.toc2.toc-right #toc.toc2{border-right-width:0;border-left:1px solid #e7e7e9;left:auto;right:0}} @media screen and (min-width:1280px){body.toc2{padding-left:20em;padding-right:0} #toc.toc2{width:20em} #toc.toc2 #toctitle{font-size:1.375em} #toc.toc2>ul{font-size:.95em} #toc.toc2 ul ul{padding-left:1.25em} body.toc2.toc-right{padding-left:0;padding-right:20em}} #content #toc{border:1px solid #e0e0dc;margin-bottom:1.25em;padding:1.25em;background:#f8f8f7;border-radius:4px} #content #toc>:first-child{margin-top:0} #content #toc>:last-child{margin-bottom:0} #footer{max-width:none;background:rgba(0,0,0,.8);padding:1.25em} #footer-text{color:hsla(0,0%,100%,.8);line-height:1.44} #content{margin-bottom:.625em} .sect1{padding-bottom:.625em} @media screen and (min-width:768px){#content{margin-bottom:1.25em} .sect1{padding-bottom:1.25em}} .sect1:last-child{padding-bottom:0} .sect1+.sect1{border-top:1px solid #e7e7e9} #content h1>a.anchor,h2>a.anchor,h3>a.anchor,#toctitle>a.anchor,.sidebarblock>.content>.title>a.anchor,h4>a.anchor,h5>a.anchor,h6>a.anchor{position:absolute;z-index:1001;width:1.5ex;margin-left:-1.5ex;display:block;text-decoration:none!important;visibility:hidden;text-align:center;font-weight:400} #content h1>a.anchor::before,h2>a.anchor::before,h3>a.anchor::before,#toctitle>a.anchor::before,.sidebarblock>.content>.title>a.anchor::before,h4>a.anchor::before,h5>a.anchor::before,h6>a.anchor::before{content:"\00A7";font-size:.85em;display:block;padding-top:.1em} #content h1:hover>a.anchor,#content h1>a.anchor:hover,h2:hover>a.anchor,h2>a.anchor:hover,h3:hover>a.anchor,#toctitle:hover>a.anchor,.sidebarblock>.content>.title:hover>a.anchor,h3>a.anchor:hover,#toctitle>a.anchor:hover,.sidebarblock>.content>.title>a.anchor:hover,h4:hover>a.anchor,h4>a.anchor:hover,h5:hover>a.anchor,h5>a.anchor:hover,h6:hover>a.anchor,h6>a.anchor:hover{visibility:visible} #content h1>a.link,h2>a.link,h3>a.link,#toctitle>a.link,.sidebarblock>.content>.title>a.link,h4>a.link,h5>a.link,h6>a.link{color:#ba3925;text-decoration:none} #content h1>a.link:hover,h2>a.link:hover,h3>a.link:hover,#toctitle>a.link:hover,.sidebarblock>.content>.title>a.link:hover,h4>a.link:hover,h5>a.link:hover,h6>a.link:hover{color:#a53221} details,.audioblock,.imageblock,.literalblock,.listingblock,.stemblock,.videoblock{margin-bottom:1.25em} details{margin-left:1.25rem} details>summary{cursor:pointer;display:block;position:relative;line-height:1.6;margin-bottom:.625rem;outline:none;-webkit-tap-highlight-color:transparent} details>summary::-webkit-details-marker{display:none} details>summary::before{content:"";border:solid transparent;border-left:solid;border-width:.3em 0 .3em .5em;position:absolute;top:.5em;left:-1.25rem;transform:translateX(15%)} details[open]>summary::before{border:solid transparent;border-top:solid;border-width:.5em .3em 0;transform:translateY(15%)} details>summary::after{content:"";width:1.25rem;height:1em;position:absolute;top:.3em;left:-1.25rem} .admonitionblock td.content>.title,.audioblock>.title,.exampleblock>.title,.imageblock>.title,.listingblock>.title,.literalblock>.title,.stemblock>.title,.openblock>.title,.paragraph>.title,.quoteblock>.title,table.tableblock>.title,.verseblock>.title,.videoblock>.title,.dlist>.title,.olist>.title,.ulist>.title,.qlist>.title,.hdlist>.title{text-rendering:optimizeLegibility;text-align:left;font-family:"Noto Serif","DejaVu Serif",serif;font-size:1rem;font-style:italic} table.tableblock.fit-content>caption.title{white-space:nowrap;width:0} .paragraph.lead>p,#preamble>.sectionbody>[class=paragraph]:first-of-type p{font-size:1.21875em;line-height:1.6;color:rgba(0,0,0,.85)} .admonitionblock>table{border-collapse:separate;border:0;background:none;width:100%} .admonitionblock>table td.icon{text-align:center;width:80px} .admonitionblock>table td.icon img{max-width:none} .admonitionblock>table td.icon .title{font-weight:bold;font-family:"Open Sans","DejaVu Sans",sans-serif;text-transform:uppercase} .admonitionblock>table td.content{padding-left:1.125em;padding-right:1.25em;border-left:1px solid #dddddf;color:rgba(0,0,0,.6);word-wrap:anywhere} .admonitionblock>table td.content>:last-child>:last-child{margin-bottom:0} .exampleblock>.content{border:1px solid #e6e6e6;margin-bottom:1.25em;padding:1.25em;background:#fff;border-radius:4px} .sidebarblock{border:1px solid #dbdbd6;margin-bottom:1.25em;padding:1.25em;background:#f3f3f2;border-radius:4px} .sidebarblock>.content>.title{color:#7a2518;margin-top:0;text-align:center} .exampleblock>.content>:first-child,.sidebarblock>.content>:first-child{margin-top:0} .exampleblock>.content>:last-child,.exampleblock>.content>:last-child>:last-child,.exampleblock>.content .olist>ol>li:last-child>:last-child,.exampleblock>.content .ulist>ul>li:last-child>:last-child,.exampleblock>.content .qlist>ol>li:last-child>:last-child,.sidebarblock>.content>:last-child,.sidebarblock>.content>:last-child>:last-child,.sidebarblock>.content .olist>ol>li:last-child>:last-child,.sidebarblock>.content .ulist>ul>li:last-child>:last-child,.sidebarblock>.content .qlist>ol>li:last-child>:last-child{margin-bottom:0} .literalblock pre,.listingblock>.content>pre{border-radius:4px;overflow-x:auto;padding:1em;font-size:.8125em} @media screen and (min-width:768px){.literalblock pre,.listingblock>.content>pre{font-size:.90625em}} @media screen and (min-width:1280px){.literalblock pre,.listingblock>.content>pre{font-size:1em}} .literalblock pre,.listingblock>.content>pre:not(.highlight),.listingblock>.content>pre[class=highlight],.listingblock>.content>pre[class^="highlight "]{background:#f7f7f8} .literalblock.output pre{color:#f7f7f8;background:rgba(0,0,0,.9)} .listingblock>.content{position:relative} .listingblock code[data-lang]::before{display:none;content:attr(data-lang);position:absolute;font-size:.75em;top:.425rem;right:.5rem;line-height:1;text-transform:uppercase;color:inherit;opacity:.5} .listingblock:hover code[data-lang]::before{display:block} .listingblock.terminal pre .command::before{content:attr(data-prompt);padding-right:.5em;color:inherit;opacity:.5} .listingblock.terminal pre .command:not([data-prompt])::before{content:"$"} .listingblock pre.highlightjs{padding:0} .listingblock pre.highlightjs>code{padding:1em;border-radius:4px} .listingblock pre.prettyprint{border-width:0} .prettyprint{background:#f7f7f8} pre.prettyprint .linenums{line-height:1.45;margin-left:2em} pre.prettyprint li{background:none;list-style-type:inherit;padding-left:0} pre.prettyprint li code[data-lang]::before{opacity:1} pre.prettyprint li:not(:first-child) code[data-lang]::before{display:none} table.linenotable{border-collapse:separate;border:0;margin-bottom:0;background:none} table.linenotable td[class]{color:inherit;vertical-align:top;padding:0;line-height:inherit;white-space:normal} table.linenotable td.code{padding-left:.75em} table.linenotable td.linenos,pre.pygments .linenos{border-right:1px solid;opacity:.35;padding-right:.5em;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none} pre.pygments span.linenos{display:inline-block;margin-right:.75em} .quoteblock{margin:0 1em 1.25em 1.5em;display:table} .quoteblock:not(.excerpt)>.title{margin-left:-1.5em;margin-bottom:.75em} .quoteblock blockquote,.quoteblock p{color:rgba(0,0,0,.85);font-size:1.15rem;line-height:1.75;word-spacing:.1em;letter-spacing:0;font-style:italic;text-align:justify} .quoteblock blockquote{margin:0;padding:0;border:0} .quoteblock blockquote::before{content:"\201c";float:left;font-size:2.75em;font-weight:bold;line-height:.6em;margin-left:-.6em;color:#7a2518;text-shadow:0 1px 2px rgba(0,0,0,.1)} .quoteblock blockquote>.paragraph:last-child p{margin-bottom:0} .quoteblock .attribution{margin-top:.75em;margin-right:.5ex;text-align:right} .verseblock{margin:0 1em 1.25em} .verseblock pre{font-family:"Open Sans","DejaVu Sans",sans-serif;font-size:1.15rem;color:rgba(0,0,0,.85);font-weight:300;text-rendering:optimizeLegibility} .verseblock pre strong{font-weight:400} .verseblock .attribution{margin-top:1.25rem;margin-left:.5ex} .quoteblock .attribution,.verseblock .attribution{font-size:.9375em;line-height:1.45;font-style:italic} .quoteblock .attribution br,.verseblock .attribution br{display:none} .quoteblock .attribution cite,.verseblock .attribution cite{display:block;letter-spacing:-.025em;color:rgba(0,0,0,.6)} .quoteblock.abstract blockquote::before,.quoteblock.excerpt blockquote::before,.quoteblock .quoteblock blockquote::before{display:none} .quoteblock.abstract blockquote,.quoteblock.abstract p,.quoteblock.excerpt blockquote,.quoteblock.excerpt p,.quoteblock .quoteblock blockquote,.quoteblock .quoteblock p{line-height:1.6;word-spacing:0} .quoteblock.abstract{margin:0 1em 1.25em;display:block} .quoteblock.abstract>.title{margin:0 0 .375em;font-size:1.15em;text-align:center} .quoteblock.excerpt>blockquote,.quoteblock .quoteblock{padding:0 0 .25em 1em;border-left:.25em solid #dddddf} .quoteblock.excerpt,.quoteblock .quoteblock{margin-left:0} .quoteblock.excerpt blockquote,.quoteblock.excerpt p,.quoteblock .quoteblock blockquote,.quoteblock .quoteblock p{color:inherit;font-size:1.0625rem} .quoteblock.excerpt .attribution,.quoteblock .quoteblock .attribution{color:inherit;font-size:.85rem;text-align:left;margin-right:0} p.tableblock:last-child{margin-bottom:0} td.tableblock>.content{margin-bottom:1.25em;word-wrap:anywhere} td.tableblock>.content>:last-child{margin-bottom:-1.25em} table.tableblock,th.tableblock,td.tableblock{border:0 solid #dedede} table.grid-all>*>tr>*{border-width:1px} table.grid-cols>*>tr>*{border-width:0 1px} table.grid-rows>*>tr>*{border-width:1px 0} table.frame-all{border-width:1px} table.frame-ends{border-width:1px 0} table.frame-sides{border-width:0 1px} table.frame-none>colgroup+*>:first-child>*,table.frame-sides>colgroup+*>:first-child>*{border-top-width:0} table.frame-none>:last-child>:last-child>*,table.frame-sides>:last-child>:last-child>*{border-bottom-width:0} table.frame-none>*>tr>:first-child,table.frame-ends>*>tr>:first-child{border-left-width:0} table.frame-none>*>tr>:last-child,table.frame-ends>*>tr>:last-child{border-right-width:0} table.stripes-all>*>tr,table.stripes-odd>*>tr:nth-of-type(odd),table.stripes-even>*>tr:nth-of-type(even),table.stripes-hover>*>tr:hover{background:#f8f8f7} th.halign-left,td.halign-left{text-align:left} th.halign-right,td.halign-right{text-align:right} th.halign-center,td.halign-center{text-align:center} th.valign-top,td.valign-top{vertical-align:top} th.valign-bottom,td.valign-bottom{vertical-align:bottom} th.valign-middle,td.valign-middle{vertical-align:middle} table thead th,table tfoot th{font-weight:bold} tbody tr th{background:#f7f8f7} tbody tr th,tbody tr th p,tfoot tr th,tfoot tr th p{color:rgba(0,0,0,.8);font-weight:bold} p.tableblock>code:only-child{background:none;padding:0} p.tableblock{font-size:1em} ol{margin-left:1.75em} ul li ol{margin-left:1.5em} dl dd{margin-left:1.125em} dl dd:last-child,dl dd:last-child>:last-child{margin-bottom:0} li p,ul dd,ol dd,.olist .olist,.ulist .ulist,.ulist .olist,.olist .ulist{margin-bottom:.625em} ul.checklist,ul.none,ol.none,ul.no-bullet,ol.no-bullet,ol.unnumbered,ul.unstyled,ol.unstyled{list-style-type:none} ul.no-bullet,ol.no-bullet,ol.unnumbered{margin-left:.625em} ul.unstyled,ol.unstyled{margin-left:0} li>p:empty:only-child::before{content:"";display:inline-block} ul.checklist>li>p:first-child{margin-left:-1em} ul.checklist>li>p:first-child>.fa-square-o:first-child,ul.checklist>li>p:first-child>.fa-check-square-o:first-child{width:1.25em;font-size:.8em;position:relative;bottom:.125em} ul.checklist>li>p:first-child>input[type=checkbox]:first-child{margin-right:.25em} ul.inline{display:flex;flex-flow:row wrap;list-style:none;margin:0 0 .625em -1.25em} ul.inline>li{margin-left:1.25em} .unstyled dl dt{font-weight:400;font-style:normal} ol.arabic{list-style-type:decimal} ol.decimal{list-style-type:decimal-leading-zero} ol.loweralpha{list-style-type:lower-alpha} ol.upperalpha{list-style-type:upper-alpha} ol.lowerroman{list-style-type:lower-roman} ol.upperroman{list-style-type:upper-roman} ol.lowergreek{list-style-type:lower-greek} .hdlist>table,.colist>table{border:0;background:none} .hdlist>table>tbody>tr,.colist>table>tbody>tr{background:none} td.hdlist1,td.hdlist2{vertical-align:top;padding:0 .625em} td.hdlist1{font-weight:bold;padding-bottom:1.25em} td.hdlist2{word-wrap:anywhere} .literalblock+.colist,.listingblock+.colist{margin-top:-.5em} .colist td:not([class]):first-child{padding:.4em .75em 0;line-height:1;vertical-align:top} .colist td:not([class]):first-child img{max-width:none} .colist td:not([class]):last-child{padding:.25em 0} .thumb,.th{line-height:0;display:inline-block;border:4px solid #fff;box-shadow:0 0 0 1px #ddd} .imageblock.left{margin:.25em .625em 1.25em 0} .imageblock.right{margin:.25em 0 1.25em .625em} .imageblock>.title{margin-bottom:0} .imageblock.thumb,.imageblock.th{border-width:6px} .imageblock.thumb>.title,.imageblock.th>.title{padding:0 .125em} .image.left,.image.right{margin-top:.25em;margin-bottom:.25em;display:inline-block;line-height:0} .image.left{margin-right:.625em} .image.right{margin-left:.625em} a.image{text-decoration:none;display:inline-block} a.image object{pointer-events:none} sup.footnote,sup.footnoteref{font-size:.875em;position:static;vertical-align:super} sup.footnote a,sup.footnoteref a{text-decoration:none} sup.footnote a:active,sup.footnoteref a:active{text-decoration:underline} #footnotes{padding-top:.75em;padding-bottom:.75em;margin-bottom:.625em} #footnotes hr{width:20%;min-width:6.25em;margin:-.25em 0 .75em;border-width:1px 0 0} #footnotes .footnote{padding:0 .375em 0 .225em;line-height:1.3334;font-size:.875em;margin-left:1.2em;margin-bottom:.2em} #footnotes .footnote a:first-of-type{font-weight:bold;text-decoration:none;margin-left:-1.05em} #footnotes .footnote:last-of-type{margin-bottom:0} #content #footnotes{margin-top:-.625em;margin-bottom:0;padding:.75em 0} div.unbreakable{page-break-inside:avoid} .big{font-size:larger} .small{font-size:smaller} .underline{text-decoration:underline} .overline{text-decoration:overline} .line-through{text-decoration:line-through} .aqua{color:#00bfbf} .aqua-background{background:#00fafa} .black{color:#000} .black-background{background:#000} .blue{color:#0000bf} .blue-background{background:#0000fa} .fuchsia{color:#bf00bf} .fuchsia-background{background:#fa00fa} .gray{color:#606060} .gray-background{background:#7d7d7d} .green{color:#006000} .green-background{background:#007d00} .lime{color:#00bf00} .lime-background{background:#00fa00} .maroon{color:#600000} .maroon-background{background:#7d0000} .navy{color:#000060} .navy-background{background:#00007d} .olive{color:#606000} .olive-background{background:#7d7d00} .purple{color:#600060} .purple-background{background:#7d007d} .red{color:#bf0000} .red-background{background:#fa0000} .silver{color:#909090} .silver-background{background:#bcbcbc} .teal{color:#006060} .teal-background{background:#007d7d} .white{color:#bfbfbf} .white-background{background:#fafafa} .yellow{color:#bfbf00} .yellow-background{background:#fafa00} span.icon>.fa{cursor:default} a span.icon>.fa{cursor:inherit} .admonitionblock td.icon [class^="fa icon-"]{font-size:2.5em;text-shadow:1px 1px 2px rgba(0,0,0,.5);cursor:default} .admonitionblock td.icon .icon-note::before{content:"\f05a";color:#19407c} .admonitionblock td.icon .icon-tip::before{content:"\f0eb";text-shadow:1px 1px 2px rgba(155,155,0,.8);color:#111} .admonitionblock td.icon .icon-warning::before{content:"\f071";color:#bf6900} .admonitionblock td.icon .icon-caution::before{content:"\f06d";color:#bf3400} .admonitionblock td.icon .icon-important::before{content:"\f06a";color:#bf0000} .conum[data-value]{display:inline-block;color:#fff!important;background:rgba(0,0,0,.8);border-radius:50%;text-align:center;font-size:.75em;width:1.67em;height:1.67em;line-height:1.67em;font-family:"Open Sans","DejaVu Sans",sans-serif;font-style:normal;font-weight:bold} .conum[data-value] *{color:#fff!important} .conum[data-value]+b{display:none} .conum[data-value]::after{content:attr(data-value)} pre .conum[data-value]{position:relative;top:-.125em} b.conum *{color:inherit!important} .conum:not([data-value]):empty{display:none} dt,th.tableblock,td.content,div.footnote{text-rendering:optimizeLegibility} h1,h2,p,td.content,span.alt,summary{letter-spacing:-.01em} p strong,td.content strong,div.footnote strong{letter-spacing:-.005em} p,blockquote,dt,td.content,td.hdlist1,span.alt,summary{font-size:1.0625rem} p{margin-bottom:1.25rem} .sidebarblock p,.sidebarblock dt,.sidebarblock td.content,p.tableblock{font-size:1em} .exampleblock>.content{background:#fffef7;border-color:#e0e0dc;box-shadow:0 1px 4px #e0e0dc} .print-only{display:none!important} @page{margin:1.25cm .75cm} @media print{*{box-shadow:none!important;text-shadow:none!important} html{font-size:80%} a{color:inherit!important;text-decoration:underline!important} a.bare,a[href^="#"],a[href^="mailto:"]{text-decoration:none!important} a[href^="http:"]:not(.bare)::after,a[href^="https:"]:not(.bare)::after{content:"(" attr(href) ")";display:inline-block;font-size:.875em;padding-left:.25em} abbr[title]{border-bottom:1px dotted} abbr[title]::after{content:" (" attr(title) ")"} pre,blockquote,tr,img,object,svg{page-break-inside:avoid} thead{display:table-header-group} svg{max-width:100%} p,blockquote,dt,td.content{font-size:1em;orphans:3;widows:3} h2,h3,#toctitle,.sidebarblock>.content>.title{page-break-after:avoid} #header,#content,#footnotes,#footer{max-width:none} #toc,.sidebarblock,.exampleblock>.content{background:none!important} #toc{border-bottom:1px solid #dddddf!important;padding-bottom:0!important} body.book #header{text-align:center} body.book #header>h1:first-child{border:0!important;margin:2.5em 0 1em} body.book #header .details{border:0!important;display:block;padding:0!important} body.book #header .details span:first-child{margin-left:0!important} body.book #header .details br{display:block} body.book #header .details br+span::before{content:none!important} body.book #toc{border:0!important;text-align:left!important;padding:0!important;margin:0!important} body.book #toc,body.book #preamble,body.book h1.sect0,body.book .sect1>h2{page-break-before:always} .listingblock code[data-lang]::before{display:block} #footer{padding:0 .9375em} .hide-on-print{display:none!important} .print-only{display:block!important} .hide-for-print{display:none!important} .show-for-print{display:inherit!important}} @media amzn-kf8,print{#header>h1:first-child{margin-top:1.25rem} .sect1{padding:0!important} .sect1+.sect1{border:0} #footer{background:none} #footer-text{color:rgba(0,0,0,.6);font-size:.9em}} @media amzn-kf8{#header,#content,#footnotes,#footer{padding:0}} asciidoctor-2.0.20/data/stylesheets/coderay-asciidoctor.css000066400000000000000000000066341443135032600240360ustar00rootroot00000000000000/*! Stylesheet for CodeRay to loosely match GitHub themes | MIT License */ pre.CodeRay{background:#f7f7f8} .CodeRay .line-numbers{border-right:1px solid;opacity:.35;padding:0 .5em 0 0;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none} .CodeRay span.line-numbers{display:inline-block;margin-right:.75em} .CodeRay .line-numbers strong{color:#000} table.CodeRay{border-collapse:separate;border:0;margin-bottom:0;background:none} table.CodeRay td{vertical-align:top;line-height:inherit} table.CodeRay td.line-numbers{text-align:right} table.CodeRay td.code{padding:0 0 0 .75em} .CodeRay .debug{color:#fff!important;background:navy!important} .CodeRay .annotation{color:#007} .CodeRay .attribute-name{color:navy} .CodeRay .attribute-value{color:#700} .CodeRay .binary{color:#509} .CodeRay .comment{color:#998;font-style:italic} .CodeRay .char{color:#04d} .CodeRay .char .content{color:#04d} .CodeRay .char .delimiter{color:#039} .CodeRay .class{color:#458;font-weight:bold} .CodeRay .complex{color:#a08} .CodeRay .constant,.CodeRay .predefined-constant{color:teal} .CodeRay .color{color:#099} .CodeRay .class-variable{color:#369} .CodeRay .decorator{color:#b0b} .CodeRay .definition{color:#099} .CodeRay .delimiter{color:#000} .CodeRay .doc{color:#970} .CodeRay .doctype{color:#34b} .CodeRay .doc-string{color:#d42} .CodeRay .escape{color:#666} .CodeRay .entity{color:#800} .CodeRay .error{color:#808} .CodeRay .exception{color:inherit} .CodeRay .filename{color:#099} .CodeRay .function{color:#900;font-weight:bold} .CodeRay .global-variable{color:teal} .CodeRay .hex{color:#058} .CodeRay .integer,.CodeRay .float{color:#099} .CodeRay .include{color:#555} .CodeRay .inline{color:#000} .CodeRay .inline .inline{background:#ccc} .CodeRay .inline .inline .inline{background:#bbb} .CodeRay .inline .inline-delimiter{color:#d14} .CodeRay .inline-delimiter{color:#d14} .CodeRay .important{color:#555;font-weight:bold} .CodeRay .interpreted{color:#b2b} .CodeRay .instance-variable{color:teal} .CodeRay .label{color:#970} .CodeRay .local-variable{color:#963} .CodeRay .octal{color:#40e} .CodeRay .predefined{color:#369} .CodeRay .preprocessor{color:#579} .CodeRay .pseudo-class{color:#555} .CodeRay .directive{font-weight:bold} .CodeRay .type{font-weight:bold} .CodeRay .predefined-type{color:inherit} .CodeRay .reserved,.CodeRay .keyword{color:#000;font-weight:bold} .CodeRay .key{color:#808} .CodeRay .key .delimiter{color:#606} .CodeRay .key .char{color:#80f} .CodeRay .value{color:#088} .CodeRay .regexp .delimiter{color:#808} .CodeRay .regexp .content{color:#808} .CodeRay .regexp .modifier{color:#808} .CodeRay .regexp .char{color:#d14} .CodeRay .regexp .function{color:#404;font-weight:bold} .CodeRay .string{color:#d20} .CodeRay .string .string .string{background:#ffd0d0} .CodeRay .string .content{color:#d14} .CodeRay .string .char{color:#d14} .CodeRay .string .delimiter{color:#d14} .CodeRay .shell{color:#d14} .CodeRay .shell .delimiter{color:#d14} .CodeRay .symbol{color:#990073} .CodeRay .symbol .content{color:#a60} .CodeRay .symbol .delimiter{color:#630} .CodeRay .tag{color:teal} .CodeRay .tag-special{color:#d70} .CodeRay .variable{color:#036} .CodeRay .insert{background:#afa} .CodeRay .delete{background:#faa} .CodeRay .change{color:#aaf;background:#007} .CodeRay .head{color:#f8f;background:#505} .CodeRay .insert .insert{color:#080} .CodeRay .delete .delete{color:#800} .CodeRay .change .change{color:#66f} .CodeRay .head .head{color:#f4f} asciidoctor-2.0.20/docs/000077500000000000000000000000001443135032600150275ustar00rootroot00000000000000asciidoctor-2.0.20/docs/antora.yml000066400000000000000000000016121443135032600170360ustar00rootroot00000000000000name: asciidoctor title: Asciidoctor version: '2.0.20' asciidoc: attributes: xrefstyle: short@ listing-caption: Example@ release-version: '2.0.20' ruby-description: 'ruby 3.1.2p20 [x86_64-linux]' ruby-version: '3.1' url-api-gems: https://www.rubydoc.info/gems url-ruby: https://www.ruby-lang.org url-rubygems: https://rubygems.org/gems url-rvm: https://rvm.io nav: - modules/ROOT/nav-top.adoc - modules/install/nav.adoc - modules/get-started/nav.adoc - modules/convert/nav.adoc - modules/html-backend/nav.adoc - modules/docbook-backend/nav.adoc - modules/manpage-backend/nav.adoc - modules/cli/nav.adoc - modules/api/nav.adoc - modules/ROOT/nav-safe-modes.adoc - modules/tooling/nav.adoc - modules/syntax-highlighting/nav.adoc - modules/stem/nav.adoc - modules/extensions/nav.adoc - modules/ROOT/nav-lang.adoc - modules/ROOT/nav-errors.adoc - modules/migrate/nav.adoc asciidoctor-2.0.20/docs/modules/000077500000000000000000000000001443135032600164775ustar00rootroot00000000000000asciidoctor-2.0.20/docs/modules/ROOT/000077500000000000000000000000001443135032600172625ustar00rootroot00000000000000asciidoctor-2.0.20/docs/modules/ROOT/images/000077500000000000000000000000001443135032600205275ustar00rootroot00000000000000asciidoctor-2.0.20/docs/modules/ROOT/images/source-vs-output.png000066400000000000000000004535301443135032600245330ustar00rootroot00000000000000PNG  IHDR@X:*BbKGD pHYs  tIME Mh IDATxwXT:Ei!6v#Xb,MrSb{WTDT콠˖CΆP@U~vwNs8;MRRB!B!B!D+Y B!B!B'^]rC!B$''w!B!_Cz!B!B!#!B!B!B 0w֌2ϳƍ/r_/ 3rKrB!cKQqtt>V̼st :мh40kL*Vs=wu`0Pj*c6h4YDYdee1s իW ʂ`u:K_fd4 ...ؔj_222rʕy9,Y3g0Wptt,rPc0=k0@N@E\rI !H@>d .uTy3{N ];wݷߒ'BlJRSSұ#Zm|'=p#׷o㓧NY/U^zTH!BcFY&籺ׯ3i$N:E`` OaL&S{7ؾ};&LˋMPZ9ɕ+Wҹ3RBEAbkkKRR_NN޽KTDŽB<@a21s6}od?bwiݤsqϚի9< {+KI,ԪQ-۶a2}cJP@#{luTj ǜ"[l6ӺeKR !Ⱶdbٴy3W\rO\ãpx9h4}<==$CBXQ NLLd͚5qE6n?q jc_ ,OSW\ILL^E5ؐȑ#4h`d2N8ݻٺmC/;2?߽R˓xTTFca0,U+Wr &NB^CQQK#&&g''*Vq:dޤI{q˖SF n3̫oI8tx/χ:jY;~3klf9O?+BF}~ʕ\xUhb^͛7\r֭[xyywYKIѪukU ;w"55MTf=4 ȑ#GL2iyRؖmpvvzj2}l~[WFJŌlشzt낍-W^ey _~:kG͚5HNN_c֭tj޴@/xyyzr8>``(B d7d'ٻ?ƌrlIIIl߹ۖ&i:|ʶ;BQ6$$$iRm|о1T bɲ>OVV{C,b~a06E?z*Λok0>[[[^7Hǝ<{c^8_o`dԈ\rK1r8vol5x1tҙoW_+Ƃ~^@0p5ڶnEhܸy+WKشv5nV9?r 7,[3o\˾}hҤ5ڐlkki ,̝߮;.2h x7ٰ~=1zoTÃ,(^\xCѶmb8r0W^婧EQغe%R 66[[[6jd<z獺MFéS9s,5jTڵk TB|7www?ϛ֟wҹX֫gtaztZEY^`w>sի0 6ndݞ]AJj*o6|}^eY4oy_M|8S><=,gkgr|Op*rB!DS*Ʋgn޽;Gٶm˗-I&l6 mSx_uCLݯGb6FS {m?`2X"_5FdŊL:uU HQySI2(v1%ɋY('5"..={h۶}M-[Zj4jOOOOVV ?#Gih ͧʰQz͠rQK;~6w:YTY~u$y#11={/z=c~&Ju _)VЭsg mې|.Xܡ=`W᧟9qdufdfNg ~3>hj7lh7F̒RMZTŶ|*w] 0lmm9x< HՒƁjժʑ#G8{չzc\rZjg-n_6 +nYyn۟ӧO[ n;O\1@DqdQ4u݅gYM޽{ ❉-Y|=)Wz^rBQ2 0o7 !Yn=hI?l`=wIJ'aP̜ah4Qƍɂ0VO_I `kWUyl@6 f6i?=1/¥ cص'&1Kjp6mي ;v?aܾO*=y=C|-fӟ7 @V5l?F(| \ >{ytulw( !Aի]///tr瞴iS4mʖ͛ٸaw^X( .]jB lllpvv___J(˷o$%%N oooU{WP [ 2ÇlRp2;oRSS9{,III䠷ݝ*AAxxzZUIQ+srrpׯ_'3#4׻~})YS/_Çs%llV3..gҬY3󾗵lՊMu諮{Νi59{QdggsInܸhN]R|#ܫ 7^'%%eZmswA\KȰC``n7:dz*(3~~~<uk̛7NǫSzb}UصV !88+N~ޫkɍ̴̕C`@@M!Dɕ*r-yt+^uFKCj. se^}xOo233uFˁqolRN:͛7h4b與}}iРO?4MqwwQvO?޽{IHH 5%\N#ʨ_&$$Īƍ 6 NQ֙3g5r$̝77駉dʕ,[CpBv˗III`0qqqrʄ3p 6mZ`u;Νf Begg߹'͍J>>TQ~ѡCR .sqFMPPs-t=݋w?@́HMM%22Y q.W[2tذ{j1\r52BiӰl6?0w\N:Err2& ;;;Xt)OEDXl63jHN:Ō3UA֭[,ZݻvORRwquu<``ԩԩ[תŋ1|8^^̟?{{BÇQ 4#Fjr ?#Wŋ$''&/(]<5kբ<۫WzC}ѢE|w?~[BX"UVLnNe̬,U3;YY\۵Z-{ܺy=z0x`b{;v_֭[-5HVˋZkӿzc vlj=a1 >jt?|=k׮ҥKkZnU6GϞa$"eT$P(kqqdggӯoYǟpqn߾]`~5\~KT\dkg^ЭsGʖmI}[Z ;IOϧS~] -98:tl??,WƂ_ѽWnܸQHXt{hӪ%&ljH_~||CUBgǮ]dee7%xi|B"5iܘ7SBXN!$e%(3={>9ܾC={KXX}7T7;w`oO5h~G\t .p1v͗_~/J= }BY]י?App0͚Q%('gg|2ϟԩSl߾]bih4>z=i322޳*V?Hwޝs1N8A5 -u N¬YqDDD9sxϟϒ%K2don6̞5iӦq*UD͚5Z*fn\ιsr .7:t(52i޽"{8f3vvv<mصs'KġCHHHeqN8AP*4hj-66rrr_gƌTXM  7oիCq06ͻ&FAc4~:7o݂|@` {DGGSRBRCjJ w&$$#Fbr&L3gR #88W77 Wg˖-lݺ-[2u7n˗-rt֍&M` lٺm[b6bŊ=Io2>}tك@LXkWstL2k׮QJڶmKpHʕ#11fY> O{n쉎nݺ wYV- OFw&UQ$5k6x0^y^؟voľ1,]=xL&F 5f3_F#'Obz'իVB!'hCiCҪukϛʕ+ <m IDATZ7n0|p>ׯӮ]+rgn/_7ߴl?y#899Q$k`\zlƁ9kQۓ7M~yCu[ʕٙ{5mkCZZW_RJ̞=:QbEK>ߘ2e /OO&?ZRSS1|8k׮yZFQ^3[0aΝ;ǨQ6|xh?9st{UjԨQZ-'O)S>Xb%7of͢Ed7P\"N3X!yС-55_~iS2o<ʗ/I$"#5HFFG\ ޴)vvv,Zid4Aԯ[ܸyv[ j,i۱ղϜ%,0{lBBB>cV+Q7nǎԩS9CՄZhAyool٪ ;vоCBK殝;Q-[Z@>Lu:/Y´ik|'*꼱N@`ٌ'n+WRqMٌ-NbرT^~2Xyq`+T`ȑ,\^{S^ Z7端Xv-=z`FVDwkպ5_NN޽/б#~~~L{N*_keff&<7oҽ{w R졯OOzvJT ch4̝9C_9<~Lz{rwwǯ QӹR5$:Z~~!vHZymX ͛gqA~w.@@`|{B ފRŊ>6/~ŋzB?Ny<ӽOmS:N`?xz=O7ʕ`H|N\/~~Ktl BtJ _%W럟_fz~];[;FJyO/llmZ|KCLO}-qcccC֭|BuWPDy$BQj#sNݺ:xӧO"V8^O߾8;;[(JmV'-)lFټi͛7gYa4tN❿qT=åK Q7w.{d"##1h4ҲeK&Oѣoh׮-[,Р޹s'3fˋi}FDDe;E5A_IRfMVZEll,ysmv߾}M&"5*wSח;wb2֑LMy ;EQ$%%1I&L<W؃}sD Qrw>o6gaț :4;uDǎYp!VI灇;F!Hƍݻ79K/fqer]vϻ%]Z-ϝc꧟bkg_j` (( &0rH~G>=:t4 9~~~*=z|2~VZUd8! ))IIJJR?4hr !B<wѨ(,X@qrtTz쩘fEQ>#N8`%Mt3JϞ=7WWe͚5(d*=RfMŧR%eÆ (`(:r\RZ5͛_MwaŻ|yi&JNNNtGiʏ?@'NT޽zض#GeeV݆_,տ 8+UyQ<=*)ǎ+t;vT)𹺭]v)vO?YOM(}}_eӦM%.GɤjRqwsS_`yu[999JNw77e* 'N(>*)a (E^vءTTV^5.tR^yg4 OOOj֬Yꊳ3YYddd&';ɄMyx:n]ӡ1<~XOOӧOgǎ0q$ubccx"4lذZҨNNN:u,[%G5 NNNdggYnB!^F@æ5`}Ҿ}{N8СCYf 2:qqݸqcǎFƍ-w>=nkkk񤥥Qv=棢(888P^=8|P4pah4( Ie!տRJ:uHKK#6jPk߾}QWNU-.OLL$66jתU7/~.L&)ؠ`0cu^ݱ'99S< dvvv-y?bbb={6:1cRVBZ@QNʗ/_X"ʕ#--ׯ&r..TTfg ?t!({*!nnntbˣ?vdB!;RIv*֮B 5\Qvm9Ž; j㮝3fΤK.3h@?0(ĵk׬S+Wh4HY׿=Zh^gΝ2S?v111Ш'՞N*T 4,-y\rP!w\QkFc]˩eQN<==KS{CF,AuMkhKf.=uT l&77^en F4P3666|겮jժQvm._LLL g;vMӦM0E'w&33]%ߏ3͋90$=q_RX? N{U*W̜9s,{k4K"V^ʕ+2z472鈋cW_j+%&;;N6o~Qq~Ԡ/ۗ(B<B!Ᵽ6z۷T ,|; Z;CfFݻYv-k.r1d2΀۷/GfŊ\KvZ7ooM ksss KB͇ӧNq lmmYUY^ :N $od0,-wdI(+WL&lllʆ gϞّMxxxX mW-ꇆٳg[.pwP\\UVjH2z_0vX&O֭[ڵ+ `l߱~7n0i$LZ-LիWҵ+Æ +Wwtdff2l0Nrrrz䟟յȺ-U,;99QbEjլOބO Ύ)2͙رk |mGPjU~[]@vv60hӪ#hܨ!_MR*#KO9l(_"#B<ŋq;9 "Ab7C"uڕhV\/|Չ!z= kؐ1cDzdbΝ˦M~H~ 쓓:Ǣ,+&o~`jԬiWwV@zZm+==ܜ*W&#=ò}3Ȭ3ػw/YYY888p9=J*U̸Wޞ&My&on put邛CiwA:vȺuزe%Ц鰵SNMرiPP3g`֭I󂡥CDEPB*U }c݉?5sUz?ӳ1#^ kN #9^7&NNb T)dX'M֭ػ-[B !8u .i&<<<Թ垦8F;2k,;Mܥ}!ZKCҽG&MEx7 "L&/`ɒ%|5{6V gV̙ӧ?r'Wmܸq#{vٙ^zժWɉ'Ob0)v(p9Ǐ༡h48r䈥NL:uzDw%--/O2'hh4Z& h{9guxgDt:]{i}7Օ8Ju=Bx+G\^Zeg֫MS4o4#_SzId?SP`fW.o/ ndffgWAvv6>ӋƑiь~_ ''u8y_mMgԇf2pKx;Si!,t ǎq 55nzޢew;!ElyFѠjT~_jƾjZ#ƟeBfЩ@Ŀ{]iNiˈcPqds"\vVmhьg{޲x#9ݘ% IDAT8{/ Ff4lN=9~ߨ(EjC~(=zŋhEf?hRw?$B<۷KPPOkgUfͨU Wb ʻˋl233#Zh6l !!P{$;w^ʗ/ϛoUhw5hذ! ,/ayc\t5jj~wxx8חcH͍-d|,h4xzzb2R,U\vv6\-M1r;z{K.ѻwoFm~܎j2puue۷}ǀ^:F±km i$H$)"##C("<2sC :؏[[ow7a"<2ZfԣhզtFsDxdxvD!<:lh۹0bDxd8p3/bÄd\y {=ǖ-"<2ZkCzaQlA<;~BkF=C p{l{/Q1 7;-?"<2Zl޺՞W-ys;GDxdtDTfáGDxdgmdV$srB8yRGFu7:\"<2Z<9ia6+<F?\=/hqatN'Zƴ>5{\GFO./]m:u/ZTDw$ΜFQ>rTD`Wq(''7Wi+&C`Q;gOEߕٿԴRi3|)#Ź9n^ FlF !8sϣǎh?a_,w,}McDxd=CzVvoݛzAi+vZjn )MS=> #./ϡ?:w&I$^"#Ũq !8'#Ŏ;/'&sѢS+D!H$ɭrO"!!A4 ~K[ UT6mr8W~$F0d2:qE<3nCۋuٲeR@ ?ΒO+K.ڡ~z"55T|rJ"ukQTTT*_ ,*XtEfY}Jb4uD׮]^7wnr-o~!&=(,,[llǎ#<=EDvv5wyѼY3#^xy{>[es;l=xm۴nk*v>쳢R@ܩ\Yl߶E(ڵm+›7qF"%%m+,,QR@ر ϯء/2x ˝ů;yZUD{^nټYx޽zf+?>>^4l@7o.\rYD"qǢj5[l!Ϥ'p {ZAA99<EEE(7țz-\ԾHlw4'N9vr}Is tރOLzy"mOKΝK_b2ެC/M(wM_8s)2)**Gի[/'~X?kPo>Xnmlv`2w`loBB~7ڡP4o֔S?_ h29K_jӦǮ{kԈO>QfIt\Gcqh,5k ;'N!F>ޖmիZx~zu̶X7#]7;!!ijcF%w&ڽw779v:+ *nk 鮺 Zƍe/?E4 kg3gwr׀D"HUlnK֭]˹sh޼9֠7k{Ojܸ1dgg5JbݺuL>K.wwc;]PPSY` >p)ns4x`FEvv6_3gb0+^ZFQ.@/>JQn+:C2o< ?Ξݻ oe1YL2?3z(.]XllIJJbܸq,[ʁ|4}:^^^eRRYcƄ0u4<<<3z4/_.[EEE޹\veVT*N:Ehh(n^@С/\ ))͚QR%9w/`_+k<[o^Z^֗}ȑ#Yr%~Ν:cӇc_h?n֍p1fΜFaĉ4n!zKEe=f3LcjԨ5k䓜 OݔW֫7~=9p?0ڵJժPTTDrRvbQn]f|YaSDՋ3gktRv͠Ah۶-Upuuh4KRR'_~g^[4E /v'$$ܑw+ʊϏ,5n|Ccl?`h"z=noDjmp 7>ݛHm0q"8;9a0h0] FsRPJCJ w77srvliquq!W+7F)rT<ŭ( =ucͺupI.>DaA!m}q7惻&[x7y9/Q*E{Eբmn ;wCL~m}=TwLt\m0VHLRn!7WWז}Fqsu|Scqq vkpvv.s瑭*խ:3|`!,Z o6;5%Da{wܸq#7oߟ:|C,=z#GXb^{mX˪UXbjOOO@Q(**"''D@@{㉉)ן-Jߧu̙3}uV<<Vk}7dddh\H?F˛NcڵOdVqvv"""hҴ):t ::ooo72&f͛bJsV\ԩSqrr'ۢsFEEE1b&LH:u*l6֫}δiprrgggL&dgg#ˋO=u6Yeɶmޑw;:uPjUNc}-YC&a"Hbi%͜rk]ZmϜ3oͽfu ]Z;v⥜<}LxnXWM%DFpV|}}Yw$''@p.YBܙ3ri.^@zz:T*<<=^ 4U4nV{x[fٲK:޽{9q8W\`0B@J ifjՊ*UZܮT+WDQr-oڵY~=2e:mh\ThחJ+.&[#ۢe`` SMQ]f;ߟHOOQ1"}GBpѮKW-Μ=[[ ӱh۹Kŋ"<2Z=F\xQL&|Ï?'N{"-MDmowFfhV{E1sΗƋN=zH8^GF=S OP,1{АGExd8t1zDxdػ0B׋om;[jx+:/[}GFm?w $t:xik"<2Zl2ұ[ٮ*"<2Z{Rrxo4\׽e:rd1"<2Z7uɱK`xB~F-^h;)"bDJN=z:O~kڃybX2QPP eI$"] ѽр7sMy7f%xmύfk<hZ;|GHG IDAT3[yVkZ>zЭsg>5.#9z1^w>t܉kѽO?{fioϳcG3gR͍ƍSZ5,Çß[`O?ω4mƬ>oZv-0n̝M6³؏?b&СԯWKжsW<<[66O}ϽpO{=^{[O<6/ެ۶-3 esܳWcV;FJà /޼ۮo~cGլuó_>=ey qj7UwhբſH"H$+VBf9 U}O/kwޮxP!0Lea+x?Z\O}='ƥRl^p[VG۽rTd^[iÍaݫe͑=w#fEeix"k֬!8$zSl6Cf8|Οwʹvs\>{ND"Hn7 Xˋڡݓeݓ 55F 񸻻Y=˗O&Ozb.n7WWԮ;^VKj6Q\xjL}k KiOBZZz) 0z`w9p0С]FVKYH`RRRqrrb>!6oaʕ((<=b8[lJ;mei4 +w>,jdO?qjժɛGٮ~Lppr_Zlɥ˗U&`Q:Xro6U_뾸$y8=Fɶ?5|8 ۅ8|(;]A`׬wtӗ61ԯWgggCM=jH8o.pl,NZ-]:vt s' g 9hXgO`μyDur>b&iƘQOϜy_93$8gg'CJmPh߶ s^_Āѿore2bؓT^EK;F摇f =v gg'ụ .nTP-ϝ\t ߰vz~m-:ZF̘1>LC|!T& D"Hyny_Zʃ_ųQRܭ[ivs깑߼o5ݮq+o*:ηCXzbINNN:ԬUzn۝7z?݉qH$m|ClD[o;oӻg~7ßͯ+ߴED"H$K5гH$-H xlPիǖ[quupj a0lڴo,`РA7]"H$Hӥs'3ßG/Uj/ŅH!I$D"H$}MF!;;$jժuC ɄFСCTT:$D" DB@6+<}\ZK G"H$D"HSlʉzQ^=~^<{x,10.JѐˇӦqeƏO͚51F$D"WQO4?Rs<==U& ףn߿1!+W&3+>={ʁH$!H$wEQ0LRذaǏݝj{?Jb=w^|֭[G׮]?˥D"HoD"H$D"d D"w6m͘AAA۷O>4lԈ*UFH'''+Wc֭lۺ!yʕ+ߐ -D"HoH$D"H$w#D~{H$Ǧغu+s;HOKwww4b`0O~~>kߞ{.]RC"H$ͷTH$D"H$"=$c( Fgr?NRRy:& WWW qoЀUR0B$D"׿=dtD"H$D"H$qlJ ٌZ~ԯ_BךfL&jZ R"H$҈=2DJXbK6DDŰq&)D"H$D"H$7JeY )53͘fT*T~H$wO̜%,ԡ=]:wKNw Q1|0#9D"H$D"(bWj\R D"HGX쒒lPre)D"H$D"H$D"HTHD"H$D"H$D"H$D7]]Byg $%ُke?[CyHMMh4L j.|qgҤIJJd6JXF̝CyYYtً3>eێm-F///OV-Zj+Of=( nnxxzve-[YԫS7^{&55(ߥGnSOINJ!YSfẤdz/gۺ۰шӧMeD=oDT58}v$"D"H$IV=$D"H$ֱSn7_ĕ+^;Ç̳hzuA eټuh4ȧ1LՓ:kϝ;pg_g*{B1yھ8f|1[Z&V%11M7/,r(*߲L+U &>~ 8(? O|ӛ5kuvڽ ʌ*ia6yvSa yaNmx!<޻4/**kP>9CGкeK{۷s񱧿4Ż68\BK\f?.%K1Sxb<8d(IFF~~~~/c?__f9NfM귍٠mD"H$D"!. &tj>,P) "᝟y[:URD"Hn%ȸѣ8gXveÃ:(?,/jտ@떭_%N8g?.^tWq݆ ptUqgΖfS#~pV D"H$^FaW؎fCݨn4<E)SqxU w.eUfqUs0fIXd1|D"H$ͽ ɄYXqr"N5 ;KǮMFOÃy_Áسw-jά{jeZllC.55BB]\\H$D"HuJW!/\Edkbb"&*bˤd梡sD5<ݴ}cMa0w"F5}tF (< EAmM2 qGp͉|VMP1aUN"H$mS^rdrߑkYjԖ _P@RRc{s$6flvAʙ.H$D"d2fDPTT;w&00L%Hqˉr;e6QTٳ;vؕ.M6{:,X@`` {@׳j*.^h~ 8T]c[9/&;;S .z%V),p?_G UoϣPII[YB8^WC~, J!>1cW]xG=F3Z :\4xR(ٟeˮPǥˎs\JͳjW (S R<˒4K ccem(y J!=$^%6|һܱt~lED"H${ANNNhZY-V":Cj[9( nn rOɬU琾|OT\ȡD"H$DrQ~QF >Ltt4o߾(\ds%7;|:doMXBT0]mݦH1ͼ&Ew^Ҍ7%T kɳ,\=V0)ՇbʏunobQyat Y*5{}7r%)9-Yɱ5 B}H$Dror[ _~5<-D]Hs&d"335kq4=# a6_w775v=-'++ Hzz>/AUy͌a q]{g%xxxа~[Օ?l!%%E5D"H$ZMHHNNNEpp0Çq|7dee9(:E4J)IOO`0\:"&& `8{,*te۬;tF!33L 1ͤӱcG Xؼy3Bt:v^ד^wPxdffEQQQn7Y`T)L¾m#RrO֑o@R(,67BJ/ګiyLyZ寤s%=".OzN!St^]Puz.$da)'KGQHjV(2YUd0q>){|&k>hM7-&_⮼Xl.^J`p?BX榮dY:|˘gb4Ҳ PWo4Y@^%D"H$X:];%@DTО]}Mٽg/=RR 4[.lc3tk53^+U'3>9׮zK;AfMؾ=vpV[>={U|`=m)tIED"H$lvzn2PTt҅{rvJ֭[ٿ?dffRV-hPCi&<<c>W!MخE}_]CBy&f9MqԨɘe~3>sY| 2uEĄU65OI̖CϷ/22,\iJ 2sxhdҥuذ)]5kFbb"IIHzxzꅿ?kHnܹ3ǎc1!CZfȐ!TRB#TիWgҤIEQ8}4 ,]v+k׎ ryj5{]v(K~?mw)tt;]8{ ZۇThƷEW``@ lW س#WpqRShTӏ7εhjV)1?-Uth[i[[,_˝#H L~"5$:,4o/wovi=H,`L5|v{quְ} &RX4')b[w9~ j+_!)#ZN}F4vgǰjJ3FsXn7~'91}\hzם :ƒ7njh=};ZMe L{inHa]X5u4kw6W'/d1qnFҷp957g}(z'"r{x 7{I3dƒ9?][Tbü4);g>bϦM"H$ɿmӳ'}zf_/r5mBMmL mcb6\:oO~4?A<vAر#;vdƍݻ5j`񉉉А@?[Z}ol;HߘJ߰"Cr IDAT ;IKͪv[l5%馥_K?֟SxafjV.uniWg Z &31aUfIZ5D놕x_C9tGH$Dr"H$D"H$+R$''ӨQ#䐝e6:: qHHۗǏS/ql޼ .P~}Ο?On7LSfMO$%%j9,zp%=Bjz`QV~g5Uhոh+H$=TH$D"H$ۂu~~>[] !0Lvk Ab2VJ)wRڄP*и- }P:e\łK4ZFPh0; >sǒ0۵>W1F:v z2B4jhISrZS&>7P(\?D"H${ H$D"H$HmʊT۷/`YwuuҰ-jqr,61LvDy[Ɔ 8vyyy4hj֬ə3g0h %+"VKPPƍ+d29d]6 ][~[)j<=I yF }uҨpwՠ7X%@ z<\vS,yݲu#,ԗovжU`&eY>UMc8xi0.-a2 {J /coIJQP[4el>x1Z\BQѪa%fOh`Q/s$.>w6ѸCAo0UςHdpbDm8]%ָV1 |*E!'߀lFQY!Ez owb8M 4]ӪerXD^Y6y*ܜȵA8puحFϔ}D_2ZD"HiTRD"H$Q,6l7nϏ-ZڵksEr#!!@U\p^~JJJl_~~~k4i9u 6,e/Yo6\lt:{TrssrQuBsh_J!,ԏ#qiMf],{LER ־4 iKogKII?^nV *q1U9U9z7j᪥}q2ˤ]3 A{;XW (aQ\R m;Q 6⮝PߍyBOBCYjV*8/WsJ%x1<^eJ\I/xB&]v!ĄU+x,V#VeFbHS돇η9ҦsWrhPnrB&EʴDrP@( hL%=4*q,>ÞD & ,ԯm4Ke_Ŵ}GQ]zB % H^RDbH(RҤ~[!9Љy@fvgfg7wΤhߐZ{֩*63hM({U.#n-L  Grֺ BLn&B!Bɓx"]t!SL@AÆ ?$44]vqA6mj_B -Z3fo>V^ ^^^(P{:Lu@"""v[ ">>/ILL[)EPPܸq`T] ɓM._۷p.vn(پ!߭+t`+|;I8 l:x WB1.ތf¢s}ZENQ[Y2_8KOHx\ ê=Wpu"N/Lrexv~_y%sX˫+=ǃsKb>V\]Gg`y{̜g9q6U}N98w3zU>OO$,l?|aq<FPxX@ʫrMLfg8x6sӒY ћe5~&V̘n58N_Wȟ ^"vopjV#7,VhevcǑ {<ʍXv&>,Łӡ}-DZh[-:z0qQf=kwTK`\$,ɹk˷O4ʧo%1\먭x_n8K^3c4Zy2m,~2~Q{wr=4oghf>W۪<fv "$"'$E',"='O$x3N`?i"Bg/ .!B!xjlP=g]Yz5AAA(P[nq9.^H\h߾= N-dMٲe9w'N <<͛SB~ ʕ#66ÇcXhڴ)^^^k^M40LԬYӾiIb([=`ƍܼyĪU SL\|{"B!BJ5)l);` lޝ.қ)6 SiU6i)K;C?֥,"ʾ!yt]_o5tzmSG)  ;uf֞ГӍmB%\]HfR4MCMXrjsJ:ȍX,`.oS8 >FR)=yTǾS])~/XU] n=wꐼ/]W?]sgl^cK=ΧN{oslHq2&'e[l7Mx9f\} ! !B!s% ǃ~_ooFƷGp+==!1aμ;3Sޘ;>w,oB#FLJ+g +5k,g.nD9_}?ϑ=;U~h޴ Q|;|F}/B!B!e &LtL E }K5@xi?ĊXv G _sѷ@g?ٳgc԰(\k|ٲ8~\kmܰ *~G~|;|oL=re>ys(Rw]#N>cGʙQÿR.XȘӬI2+W-ɞ-kV,ʰt B7˓Jsְ1/3gYo}{?tZܹr5@٫79|3cW!u2B!*@˶PJ1~L:rYP50Ȩ(k ~eKə3CߏF4>֍[ᷘ5g.}ڋ][62d`{#kyCB3oNNNt|Sق)}ܭ!]=?KUwoKY:U*Wt~Ⱥ1Bؾc'aapqvamk!ks2닧Ggu4j:kW,Owmb.ťf˞vw`D$o lپ֭Z=rй+7_|yf6m/b7w.^iGYrE?/Yz<7hPnۮRt}cbb/f˚> ~<~m}5EgxWOٹy#FDox|U|ַ_{ ///>At):we >kPf ʖ) ?n[gվ l6>sWÏ{s4{?+R0kiOKhجiڸKڍ,[vuZ<==廬]+W潖*XwZ`e %;oӞf%عkwM&uǾʖ.-l೏?ѣ۰>ͦ54MQ !B!UF>&M9նm;v(_@mĤ$XH6jt]1gT{S?Pl.maa?P5hܡgY>nXo)_@ըy _P?ik֭Sj^vOHPPHeX,KUl6+_@e6է}>W-۴U&!Hhh>f'NJm.?P١fߏwv]ɡ[榛K IDAT2̏~I97"""|_+_+{s8 jE*&&F)ǕڴejT~@kk㞽>P~PvQjCq㕯p=m?Za[|G*_@7ҏ'Ϝu}J?z?Pݸq;諡in۽wT:1R7nߔ_@UuTTߎxWUW7P;tTjɲ-E2W*_@u^nR._o;vRH:?P=^_~T;wuH?KU1a=B!HDd$?MB'g'?;vDN:5:smA[ŸɠS[oڂQV.::Nr'?y<O d#tl@Z7kFcFЕrȟ#{v""#9u49CP`9q Dl+9wci;ߍ7J쳞uFm8"_Ȩ/Yf}hiŷ?7o YK`歸Rti}aƗcyK|Ȧ[yY}|vkm*o>uy?Zn=J)bQ/\`DFFq)T͛=s}dɗXz ͜ŨX|Z@s sߛ4l똒[$B!B!xeX Bu&Ogѧ`]|~wssl6?~)wkɰ2L ˝2=w=?Ms);zX{viFL&u3g6+Tk[fzMy7AۼqȎgggoİc.^)U[ѱ];o؈}~ݳ~`뺮ӂprvN5eSR/gxy^`0i7n|ӮsW{PnxB Ӿ꩷յin"kSNm) zg-\'''og|]yl۱fMuɒ%CSiX)B!B!eYh1;xQ ڃ^^$$&prve^^^L&̞Tr_n6rϜJeڵ~a4evZ^}nnDĠrx":*zqi0[6k֭'6.s3rط|1p0& L]Q֫yVaI#`eN瘏| ]&}63?>2e-'8Ժ0y֬>OԯJ)VYK,\Y~&U æɭKd2萾d /_˗9딾kz7*2*m۱ƌNɛ'/ͱǟ{dJw[reQX~{TLl,JVTĞ}m!)ɔ '::M[Q@~TىUkb5m'QB!B! , Ĵ?3_inի0$%%q1Fݻ݃;l!88խkvիװX,$%%qQ?yɕ#{b`2Xq'Ďƪ5k sպeKOLl,k.L2Q/[xdj׬i4n؀cP&A͍?,pڼ B/صe5d ؉!NZˋ>>s!, IIZ9 >3̛OdTT:л_^{Ԑ%(>^-~e2''g˔tسay4o |;l87oiT ib0(Zcٙ?%13B!B!xe68t7[fVC޽ɒQxqgڴjL5p5nvY?gWYF,X'ZaOݷԯC+WΜ|3ƌ\.O>}|ǟiɧO c4hž6ahWTV50ʕٻ?5^gOwvv_&g6ٲfO4nuiyGhӱC;{]ԮQQ75/K >v8:wux͖mz >w7 ?LFM5Թc{>kj**f6bin(6!t:|=lþߟZf-֬WgUmjohO[7}ؼn k[8}EvMzwE RJ^)&6Z5֧>,_+^^^6mA_x1 /yPz[^ܹrɻ#!B!B2$y X-xNf͞jU:kAL(T`N/I˖5'{li`0<~ѷ˖e u+-m\*x,28.^wiܰ3fC j*ڵ͐ZWXx!&+WydI>ڙ9rY^[}CǷBy7n'ǂR'M`t/WOw=k m'''+W&8$m#rLsB 8, v >>Bo3f/SXh!{:p]$BB}!B!Ȉi !$!!5idɜtB!B!HS`};|2p^筼BnFmʜ3d=!B!B!ēHS`\p~2rB={pSx1j׬KaԸ׻ !/,K!B=d !B!D~B!B!k!B!B!#!B!B!Bp$"B!B!B#@bcc ʖmS l]o JN8o]j TeӖ-rݧmm?5jrϼ/\g>_fΞ۹w8})Bbc꺎bAX$>_@UV}@y}}L4VTT4C'=7odT_NϏzH/~rEA=w^Ȗ5s]fL3[пo26NnFB!36x iZm6F1E4FciV:O>y1^I4MCˀsaη,\JhZOo[tR @FA{t]W-5b1@w!8IԯȞXg( WysH?,f!Oν2c.[Ƃ 8p KvM8s C J*VD5={5Xda}x91S%{loTɀR`0hS6])44v.N (44G>$=󆄄[7Wq9nݢk_k糞7G&ooP-0 ?s8} /(!'_!ӧO3qpqqA4e777}mkaX{&[v)QS |b6ZjU4Mgܞۧ#G2mTf̘f0z={UWghDW #pif̜IxxxV5W3w\ԯ\@)UJo$!&c`$?^GMÔYtECqΜa9.6J̚_oȠxa1ڰATO߾|l޴nݻ?^a0hY͇n0kr?.XQ L[qL.mUL.DŚ *G,h8%,(eDj(]%3g !cy"Flߘf5;To Mx'ˁxy4۰_ HfMȣ]!cF`E<Y}|7j$eJN7of)ZCz:ao?[olشt]F=4߸ Obb"x{yr_޻F$-f3nnnϛߧMݞVDEG/J)4Rn}bbb쿗)]YMK3s A)(ZS~fޓNA`uPƤǮ]5ơn `_puu}~{I Pa4؉'OeZZwHhh& gg'F|-k݇J/΄iд9~GGfϙ@IHHd$&&*oO>ѣ}'K-HwmaC+RߦʻWVMݝzjL;ڱ};f͙˂Ec'N[jU7igæ͌8[)VX1~|+R!xv͜9s0p ŊŒ<|LXj?OL"E) [=mJ-MӬӥ$Oef`06+wnj֬iO7aۇ`@u{=ߔu4V^;#J)2gLƍ9}ꔽ,f~-,6(N&قSW._[;ﵝ[ڶ n޼qH>?'MXb~3R,Ь;B4>vwm g\=-lقd^zL&{^zꫯҰQ#{Kf>hK0:(eHQ{;ZMf]2741Jn'шR [@Y5CRXLI8OqvuiьF Lg=nhi/tt].b:`ȂA3:tuūVǜdxzޟwOf{z#J;ջ7իWw諶6on܊J`kLXt.J1TW;׺:BWލ۷l>՛][6߬~97GJ?$-җ|F\\<̝Ki*2eʔ*'9~$ TDŽ6n-9sݴe oނw;-,**QRE8~$+V^fl\!qٺ};ŋYF8;pu֧1=ϞpuuqJ)o1wBve_`D'@lm/@d2ѶSgRѢgMt[IIxEs /Mرk}`HصJF )U$aaa_{QF-ȟ/6ld8Q_vX,t 9v8{;߾zf)\7Oݳo9gP4jX?eيy廎oٻ?ʖku0\r[[<`m0r #F ̜9bŊmt)lvwOrעHK8oKǎVCK:(sHJJbʏkJ0-DT%o|[rv>>Yo, +7np;=4Ӧb0mTw[a&"""ɒ%`WlY1moa6٘L&&O@@SPl|~Oح[(V(1cT[0qx{yӿX7p0l]Ρ=5U}}Mh֭p{OS:3~JW^䍷XJ|f_Ã-:ԥLJ<3s !*m  c?~J+ۿQo <ܾ}ݻwGٲeyq7oۛ$UK/֭[r*̙3 \.^sګi{ٳxzyQD J.ɓ'9~8P>>J_Ŋɓ\zui$֮]K||%5:~`1(۠ Q!A\ؿKR9' H>eIÔ+둭`;M.Cl-.:@[xeA!*gd %łΜm\69Y IЙӔk 90'&r)"97&3V,c$FI)+V;1xzzҰaC_ݸ ˓/u@c:tYN_&667nlfSN1vXv-(P5j԰_J)a8}^;5 sf43Vĥ۔-!`;nlǃ 'WVwɍ[6R|DplNF|qЈxV35!G7£{eݜЀ_"WtuEIdFFw˱M.?IW QTN{`ЈIb`B#J@dv׵n}2zX,{c(JB"Y B#ڵqx3f֩t2mϡ r$_޼9z,=?AϏz<оXJԪyӄV:k;0#{v~& JQ!ay[4m̔_AU@@*T+ɏHDFEOZ*cF j*/ޯO/>q[Cz^gp"}ZoHRq~)9'K/S!K4_;d@tt^J~{~g{6=o>͚Xdj`;w&8$\9s:\s/~fTG劕V5" ;͛X,:whDI2Lo۾gggJ%T|C}97o]8W^bЭKTuB|3 `}6XfX4p {/e˖uk2zh*W}8{,SM~ű}6,Yl7mǩ]6ww4nGD0{l6oLÆ Zł Xj{bJL0WWWΟ?OL5z47N5l\>v_M#((777-ZDLL 9r@,7˗-c޼yr*Tiȟ?T\{o>Ǔ?~.]D\駟(VXO =J~8y5P 'M`DEE1ct8@\l,UUSD}tVZܹsQJʮ;ٵsu`fM7 f͚EJM_9铬;+Sr 2Т[, 6 ֭?1'$DFyw`Ńr?$Fٝ[o7'#!!|bkb䩬vϝ__':$wbɚHF-4͠1,Z[wf0pq*V[/<}߻Dthli<ɄG7nClŋRti>, NNN9r͛3bH~gY7~bt# _`0zjf3uֵ@)Ϝ>̈́ ҥKdɒ&PVҕ s#9u%D?/=0y2(ڂ ǿ#;!=ه4sg=|,SET(v\"":5c#KnCgX"Aq}#rUg] (y;۬w6jXtF|:^@ɠuLً+EdJp C~Ni^}j,Acs|= D){yzao_]BUxxzX111?Pm޺-6_@ҥi?P8聎48?PmI m*WTjULl}a\M'dR;tRiZ6k̛jkZÞKJu{vTm:vzm;uIs[jՕCR*2*J77oZk?P8xP=I!-vܕ5;tTd2*7$4Ժ]m۱'_b/spHRQQj?Pmش9k~>Cլ[ߡCN9|S|ҫٻoLz Y[5jԨ}/^|?ӎ;| n}Bd=gf뺲X,J)}pwW>PY,͛6\9s'*Z~ɒEu~l6WUSU R-RN;lY3Od{o+ZCzddTzT)տRjDj`txMZ* ڏ뺲J);<{.@BME )U@BQkAP@A(H.EAzDj~2?n%<<ٛݝݻٙzgOU6*J sJ)f͚!C2SJ+W꩖-Zy9uꔪSܹsyp^S^&Z:vڳg^ UZ2٣V5WWzUjTP`Z0#3gHU|y5feTFFǼwUAAjgRѕ*qK{aF/lեKUV-uTTd5sRJ\Ro 3//O)믿+UR}~Zǫ+??n:}]:wVM7VIII͛7+oo5ӹժ*"<\ ~M;~Zjܨ7nJKMUJ)fUtiճgBUɓ'[n]穧Tjj^77;31w4Q[ƫ}{ջ횫>߶ovKkUʨ w9΋c޽ԏK G3q]jMԓ^Cڶ+Ûݢމ[YhUԆSTҙ8u5q EP *'=M9O6_m6xEe&'*춭=K&ԌWgSq*n5K[LQ#t`JۈZ6qmxw;J0wǒ)Z1rz[U^nHJTY)JqN]i*0 @}J)μ6L*U_RgΜQS={t!]*/szƞ4q Qx@ݻW)?+UR;uҷQ\MZx@?q]HRZ[2m(es}r&Uu>l)elU;WOԌe׾٪ڿJv.&]mn~,ӱˎ8?3_.fO.V,QvMΫNC+O_LWQ NYW~]g_O]KQawR/Nٮ/{~6E"7M1-!?͆w ʒeiیA z Z4 8|M7bɬ\5+En^lZ"T)~ڷ_^bp[LMM΁| Bl`ZW`v>u$?g=Um&W~Ō(n n7C9\tV+Ghӱ{ ס>`ӷش~MXX~C\|52")9w][Vv \)뜒 atߛ,=B!M(V ,`„ iӆv` --AoAZxC)EhX.)1,N8-B=4 ?Ffv;Vg@h5> l64iݺqqN<7kbl6_ r]ƃ>H9g]R^=v喟oǏꫯI^^+W履ZGgϞ4mڔ5jEpH9rDOb!+3xCҥyꩧrcts@@~L>>> =N f4IMMe…jݚF^^u^Xz5GڜqAf^VWGlf\t?~FTBŊQ |YpW9Ͽ/.\jjTTfZ:uꤧ4 GM)t||}l , AAAMl:H3?95^| ̂V^&$Q5jup| Wy e" /8 4CC  찚ͬ6U/Q^C*T aS9>qo2lܕNȪ1c9v~5'M'<ChrP||  ?$]eǝ;{Xrr2OSN//Pzu*WmV^W]u8+~ҦM7nfnS:t .x|vnpX-mP6ܟ ˱x"?u̼aWJ/s39FMDĻm:AoUHx]Uʄa92ȳ*ڦ@Ն@o \Njud)<LF9y2|{],V;;til6;w/kU,LJ6%M! ado֮-JR>F}:sfo fO>lA[~=M{ͨPQkUTf\|u,??L&y?LfZhI8'.-"_@ ~zuoy/uCu5>e+w9~x L8_DD;j`n15B?x>iYKg}YڵlvJX9s8mƇrB!nBiv7$::I'7ںW\Cٳ'~~~ff͚[Ǐs%6mJ1f3?Oߩ5 2f+W&lYj^ٲe,ЃZ܎Jժ $-ERJQ99htt4>u QF|5o_͛|lQF BBBX~=SN-om\qynqNO?xZ59p{\|ʹM۶XV٣vZz!u6mܹs4 .p):voUGi|V ;Ip =μC 5k"((3i$=-㹱* n5 -ALz=E)TNL㦜ڳk٭Ζ*P] a*Wʫv۶m֎]җX Ԕl 4ʮǠ{\}! {}㶞v`ʮꊫ̔㴘UРAvMRR~- z `QpA{ M&z۶"UE~/8{ x{Uكm2tBrB/Oɦ}(1[9Br{yAC9kIҽD꽦_Fͮ/{ e)Q!~,v;wbE> Vktݵ_@PP5]}3!8(֭Zrt zdߎQcߣQl ~sGyyع{7-&8(UX9֭KLL vL|</^NiSزe *U"2*m p}y_Q] hXbM6=i\p 6ۼ9ʕmoP\;"Hbo/2y]η;7y*RQڑ` ]A5gG^LRuǼ]7jWd` QբmRSmmV|1f"; +:4UGF#͚5#11QPWU˖ԫWիav;:yZw4Ǯθvjg; izgPro?{9#y$nǗ_neo~Z(my火i }]qK;xc[n*UHn8-&77Cݰ9o"rf)>yS>O?S(}Td$Le”nkլu-{wHSض}W_n0XQ .q}{Ŷ޷1bBw^:uhצ oL*M䶍ƍҺUKmzyyه4wy~lڲM[\Qd4z7t~5mJܵp>q?׫Kog֭ȅɈco(XJj*'OVzvCvxy`ev^wޛ8cWq[6cz}u~Ǻos+N:9v=ۺwX9ʣO>vێӂ#guo?~ mNO r IDATbi<ާ/׮eub&"B!ĿR @FF_ycǎ1yڴm7ܹҀcdĉlj8~x=zQr l6S^?"]g: |||?aժUlv8*S{0kFjj*}ݻbɄGDܳiRwwI`4:A0ckpFѪU+,YBJJ {~9NJ%**㏳i&b71Sf9#`Q㞻q$$$Pzuƌe ŊvFUVz#PԿ~+FQ+r-*(߻M`4b˃Zm;PI3&o|t|~tF FJveUD>s kߞqtԉ~K.L:sΑJnn>uy `4R3:t$lv;v~>FՈϱnp7ܒWfWX1ͮ*`4hz Fǽ6Wk`(5/&7@LL4c O3M ZK-Jb" )bW^z[oeHts}0IOKSzg11ECLNuP|吗P^]ʗ/-Z5jjY=݇H?v gϞcܹ:|pz۷#?`X]C|@=]s_%Q6*i&;obRTV}z?mM ƳyV,[Nrr n1~i|ݷKOvlqq>yo+IIMޭb~yft499,I YRI"9Grr211ѼE@m߶ MptoU&;Z}=?trΞ=GժUkn[9:rLOJ5zԪQc?cŪoHKOQ{=Jc33vͲ+9w<իWc;#&:3EÄBs56kn^ :@To+ᯄ bbgrT7m"ߟh㨸}:vBXX]t]wqYL&Sv)mzVSyUkPs%) i%\qkll,c?ƍMxD~;۷o$^B|o?BZeZ: }bY6+WD3svTV-NIiF# /'S%9$'<2&_z^V<9f0p1yo Ƴs@{٩ơ`<{ $vq>FTJhDDWc*NF~Y'khϗm +_6\GZnv ĵtՏ:uoڵk sv3m4߸̬,7iP,8-_}.*Ouͮ05V',Л鯴wkwv l;p?gxIm!j45Ve]|74(@p.%gm'QWc,B4M)nwyˁ(ۡrt2if*pnS#3BJfuЩds)_:Ġ]$?zP!nFtZҡ};:xh=iVǗh}7}wo޺ ʀW_涶iheYŊx7J|N6Oo޴۵}%FoxM5I%Jýx^L׮MڵiSm>yC{^{ XkBYr[ݾy3B*Wįq)5-s__??:ЩcG:uX-bcݦbҪ]K7Bq>m| > C:~jML87b4f׮]K8q>|jj*GK.gϝϏ5 rcoddc;xb0<6bv6m7n$yssi߾=>>>/⫸:r^E_Ӵ"ӸFtF-j[5k֤f͚ݻٌ}?DPPM\Ӑ]jݚP,Y9/f~OvܙUVdb}|_[=t9fp4[3;:-%4UK%H~5Gg*xKG|L4]dp 1 =v@J%i`2 T%x /a]`W?כOѬNiQj;F\scgRr)%.!yzVro9rzUk4h;AǟxW/7n|BO'w4WYSvk^͑V-)8!Cٳw/f߼ӦLjU]b`XY) !57W3zh6mʨѣ"77|ݻwӷo_f~yvn[f3wٳYt)}RSS9}>S||<׵jժngڵ3#F*/W㬦iUN:#qԞ;w.˖-瞣B z'N`49}޸wixiР+V "<ʕ+ۋmޜJ*ӢE wN\pqO:i233@e c5BߨqOc4jw 4ŋjl>\1@^|EN:EIZlIVkWmvf#l [g~ܗaʥ.>ދ帣sm4`Xr$~;bJN"=;xĸV*Mo'8;,ݻprNBý9vӜq?!/'sf&'vmr3թo&6ʗzVl=Uc׬0sq cL,?3&[ִ6;~/z1G& " =Lb•lx:f;\zj,;snDfs:-[revA(͆7]veݺu$''9\zaZfZ$^;{ q)8b|1*]R=z&Phh0 z|Lm8ARz.K̥LRҡ8evs\::Sw6.%gci2qԋIYo2IV Ŏ])r-,t@0?؂@?/}?][FӾq|/O%c4j_䍏Vt(X]+[ o}Ykq\:+ĘM\JO}v]׽B[h ,,_tٙ'MUr6?BӖ âPB) !Bgp#KX"f1[48{, .mvH/ffƇ)sر#Æh4b09 о}{, ^^^l޴^{ esO׮넄5[VƍeHMME)Edfd0 **~Ѭ'M#22ݻ£] +^xgС\| =<}!''=zpbbb[t L<+V4`ԨQ/_MXhSLTPiӦh">s eƍ?~=>2ǵ~DD_ΙC*U MOOHKMŮ5j`W_QTBqvOFٌdby"%%wyMONv6hjբq& 07SNe!,^Ν;cΝ{9/_oqz#'x9y$6RJQfM^u;|g$$$`)Ut믿d>bƌQre ĢEXf ـ襗__~aΗ_bv6jWVcߟQB=j< ۷;eNTp6 cYToњ#Q~CV+~eB֎mSzi7iaoe?K ĮF|Cgt&qS2MSrFcRn-)|Mc Of(GnԧsYz,bUi<7_Į ~:tn448Mˁ+LJT[RF3ϞCϱ#kP=n烇rl&mM[yp_~Κ I9w7ekf˽{繽ukbccj&4D嘹(Lfmux _o9oA+h4cA)~Yv 1QAXv Myr1)Q_c+ʵRBCoDJXmo2l*#΃  Zgf0L߾ 4gyi?ir⯐ƞ{9~6*U*ӡm[o !BBnbhGn|d2a2 ӧQq5e䐞!!!~eggStB b +rQJ1+rrrnS4e`s6֖F BLJJ VH233IKM9ek_(HJJfk6LXX[MVVzͩ\eIVVaaax{{EZz[ uBRR5Mb!**47KP``&LlvEՎ%%%R>!={;@B!٣`#np[u,Xm pNOeG#ywϘߵsѤqcՋSB{a|lݶ5j$L<Ĕ/Wqw+ IJ?(BˋZ^*Ghs%c"@T]Xfp*h68-6l6epv8:) &&Gj\j˵}M@=rW޼L ݮ@s~cJ/Y X^s< G9{;;>l6fbSZ8"{Bҵ+B!(r |0xhX:7+^q PkQ]ŕIQ{H !B!?snuxhvC6+J$M B!g_'B!B!iLcQB$s !B!B!# B!B!B!HB!B!B!n:"B!B!Bt!B!B!# B!B!B!HB!B!B!n:"B!B!Bt!B!B!# B!B!B!HB!B!B!n:"B!B!Bt!B!B!# B!B!B!HB!B!B!n:"B!B!Bt!B!B!# B!B!B!HB!B!B!n:?b#b[oN?Dճ'\{i AAAԨVf5f^Vys!B!B!7N;Z6/KnvN_OzFFIKO/2m[oeiM9v?S.]V=ǤӪ]7oI9{_[^k֭Ql }*(M_7Go֮;3gQⴳS~Ĭ/evl6|~9b;YpBB95qㆄ'( MlG(]5j3U5i2hjMjA)B!"g}9+V(CSo3ƌOvv5&%'3vrrr٤KA܀wvsǎŦ5=f}9q'*'9_[YO;;~tl׎wvRŊsj%d6iۂ o÷=r̻rFbo_2mƇ0weۥVܜv;~&88oo[Ѷ- U jDDDS""\<̄2eiUS-|& `0h>|Yc]lNG)LBSJȴBw)nbk}ӕޮcڷ{矒V;}ILۉxLXV̝K/{3Im/[ %"$$D [H^^ .9/+Wѳ{}i|<}ݷ>/~EbR#-'^t\:s~Ezz>><-4mZb(S1xiDGWWFӂψ/CHOW^#>z Bق4&@B}vʹBOd"9]x?'0 ش/\` \#6G#--}@֬_]R% ?p{cFǷ7TJvmY2+Ё6~/oR HFF?i߾IXX- R̛#FH25_ݞNnFlkʥKAJbd$BONNW\ٳ\pA.+Ѩa0htN>˗Ixl6?W,1hPvEn@Xh(ǎ~z;#ƌ囵Z2nhnkDk֭gͺ#]:{Lun}>:w 1);ԉ=g~RaaL.r~n,V͚%:񓧠iJ0c3"ƾ[IKOnWÜa0?{ksOpP,_Z(;xlPbf} ~zmڑ{uj1<K;hԠ}4cc\IRPJO1#G\nח1exl{'[Lt4>oooug̝@)iL <+W<6mqzC>'~ZWZL&&CV-9xJsos/_e˱Y q1βE v&ONrR~kTO>v{yؾ1١=1푣G?pIII(kdz!1)I?o"^~=>VfM>":yt&OpW/8x[:oC<].n |Y._v\{#jת}WmݶW ,,_m_d!$%%vW; oն=99Y>>e}LJ>Jו߂b؈Q q}*" CXVB>eujvKcyɧ?sF>,Sޥf꿫xşLL8޸OrY}9w<+tވT\ڼ<4=wB[Asfz? b ;nC7ƽ1{˶m8???zj,XGNpp,Gnn.~ȃݻQ\y~M[[o3vdѱgēvY8Asrɡwg1<ԣ;ʖuٸi3CèÊ̓R-YY_'[7*U5kyb゚RŊtt' /a7kv}ni yдq#q@\|l-u~෇@Zժ\x%KqaLOgã:?YO6@8w(^\B -T uJ оB{^(N x]?v@@I2w93g3sν4y4d2#0 hg/!з\uw֮?|'=NLTW]cU=wwqZcb;/?/\`gŪU$&ݝKC#JyWP9zujڤȟy0 oLJ%+Vl*jF=xJW_Avy`zEqtALsfxwQ=M>Bƍ1j4׮piѶ=ލ*z2V'1ppvlڀJzv~~ٽ;/^$1i G`Ǧd2q 7oӎzx p10d;7o`6b^DVW t6j|D7jH-h8{y:oݻٹk7 h߶ bc =w7n8뜹Li׏в!pt:vxNI+W(~(^6 RFV녿E ˻XT`h4t+ . (F#eʔgϞ(,ӳ?8^h #""F] .<E\|Pe+Wڥ=ק deg߳m;vq u:Ь« !.>Ahӱ`2ˇ'MM32yvvoj7h(|xMn mg|q-'~! zvB]&4k!',S u6?ʡOz}erZ~~8/VOΞ;g޶S!.>A8y_ q ™gUO ٺ  MZ~ +>.إEݠPXXN털('>\x+%u XO.]d?(mX(hiY!.>AxQdlo5kJ|&y8&m1qBFB8fwB ˗LOoP4Z$'}r>7  iiW޸yKO&]z|fBFMNW*Z4 lشG!9e PPP`^^v\44m.CoQQP'кcgh4>3ko_-ًn4i6bK!nPHCz!Y =Pv>0o.;OҥBxx9AZ*?p6AL&c( ZArr O.]Z+ܺ O?}+rG.µk tYAպ\p;=Err0n8A5KϤe _\.ZsJhfo-{?Ί+ۭ. Oҹ4j]y7sb)*}Z~pQ4&N|;YO4v lHq[#9g7.u)iR/2p;ʾˬ?虳xsh-ʇf5a0X;+MƎo)?LƼ^g8sse^}E9<|=@:9mZA_"ŕܸyyme+!#-4Tld+HhD"qPPU\LCQ4XfpMNʭ7c<F12ɱ(ӛٳ4JH;`bڷvi4JHoo[eOoV6־L<=<Vdn_ۘ9yyGL|||BN+ࠠz̢Lp m]e] m[xםq/UiP*,.^kMO}o2| 0kٱk'lIұ>UדdBՊdmadddPL1&3i^;wqf8]ʰ*ԯԍ"rܼ<) ** t׷pb_kWSDh/8p7 X h(d΍ QB͓ӧ~V TTQLj1LvkT*E"إ;?>.&˾Rdti7[ſu#J A&/vɣ:AFQQ yv= nnnhRŃp}n[`gxq0^6Få4^6kׯS.4§̬,"#"ͨSՀa J2zUJ%|3RYɛ'u+yS~y˛>>wƏ|jn*B1mjʔdHF&O3ݱ[+3~\8~G7u:_}v7f'NbXnyW +;Ӏ[f}f4B[TX!f#M`\pѻwg_D"c?[:GD^2d2%2 ^'33 II˖!z#k%O#=z 4z<v Bז J /2_=z=ٸ73jQ*慅T*i3 NxD"g+utd~/dh4\.G.ޓ`q7|0$sOie~ .<] IDAT{և}1-+[ >% wwwXݕ)@-/(LJ ?Ozg(ښ4k:l[8g:ݩ^7ER,Oz(BpPo;_fa.TO3?VM 탁/^!o5SK )nooo$ g|p93| zusP" +X cojU`jUf|}}FrW[n߹ƾ&**1ȱ1J0L~VVu]|J*VHqȻr͐VY8eIw+TЁUND0'&%!Jq1lT*+jK\zh|k5T"E:1jz:=7lps~7 ?__6n inڼ+h:;Bg,<2`!]`в\q-( J+nPJ14mڄ};_^{nC"BG%n`ȑ]\3н{u$ rYDyuֱj*֮]ҥK9q۶mcŊ$&&D`t~12ey-Ż7Fefel29֗ ύ[OKn^L  }CSSS>}:6mbĉoN$2z*gf̘1ݻަG׭[7ocƌa-4Z5֮]+ސ}rmѣ۷?lˁgϲl2֬YCRR$&&yf+A૯bՏ]:7 d/_Nbb"Wf͚5^D/_NRRYYY޽O>ɓ'B g37_oXAR@c2Q}k+==3f6nD头ĉYjf}˗Yt)ǎ;=?t?s 0L\LMewH$ԮgW_cdegc0p$ 4 :#G7߲?hjv7;jw7w߸ ^O~A yb 4{j7hHC9-XzudpRԯϼSш^gæML)Aw -XDvN_};2wOuzyyҴi.]%#3HfV.ݠ=N 0}L&wbŨa.L&vEKmgQB/\? ++Km\x)=nnn^۷3>6jXwt |||MOv(ϟA" ;vbD|}| {z=+6%o3r9]:vQB[o ;b]O^t:>?F#leMy{vnc4ٶ}Rgw:lYjլɡ#G/D~fV6[6GP`uFe܄`0uvoD@@]vDaa!?Lm֭lٶ |(= ~]4&222?IKdBqQF}Yǯs޽L& x7ahHOODZ-w:D\V&&;'^,d2 '7ѤR)rYs8k֥04JHo^L&]nGob[ٖVwhU*W"22 >c؜ݔRx(**sxVN~:[5oLj$]ˁCDcիU%|y&3x!\pX5ǿEUjdf?p~, "{фD"O? JoJUw R j+ (=ʼyd 111c"##N|Fo֔XuҬ}``29]7 &I,>1Ҭӛ+Q `bxʼֽEzwY& aAQ@gzW&FfזO`18knkϝ;OOOdz>+°*J3۾-omkf WREWȪfĈr@-ZvIϭkڵkٳg}gǏ_e۶mNK&Fcf0҂dѢE=z|}}t_|;vp0u>(J[geq,8+^ʑ3meF4N=h=Y|85 vgD  ̙C^^QQQ( ͥK.ԬY~0}SRXw"H0F< 4.j]e2e4LfM6 d_{|Wt.RdEqнsgXN{iT:ufޢEtلth._֭xoGнkshZ;y3L`F3W -ˤgSnTJ%x^}m;vm1]&|Wڳݻv̷wuayk֠Y&l۾-[G&`WF|z4JH`ןҴUP0kƏO/¦-[d ^nmNۺM˖JJm20}wb hP>{T4<4i܈zugl<(խ+o6w՘ѣfdxBAfViiiՌuzE]6sXE8600'MI6Lhd kY|wBڹx~lش eCkvyodg԰mޤ qjyV6bfl޺[ͧvrBA8|([wl7]Ν;wѭuʌ_˚xU }_^Οs}29m5Mޝ6c&lnXXwSN'C^}ح`4F U`!,7D2n_:bBbg޻v.hM>}FsfO/~B0 X&LƺukI= TB* ;Tο1ND"RJԪUk׮ѲeK1/_ڵkFE~ޱx-ٶNƑ?sz`DYRn7X~[=V5JQsa4T ML&S塸;!g4LwӷQ)wߓ ca<ȔH$h4222h'iӦHR R>öOޱcǸr劝JJeT*Ϫx>Y)J;-e;J.ۙ L&Cղo>Zn]pׯӧ)((ã--=qqqL2{)!!uT*???ZjEJJ ʕf͚ 6mJrr2gח5jU\.)Dd$^#qV:>g|Ǣ\4&oi*hyO..%jDGGӣGd2jUVQjUjԨA5thmiv&;%;m-K3__ `)YY|2dHR4&ҨhJ^-y4sqU1`{޽!4lڟ1QD;"&*{z5|*22ͧKrU5֨Uw:K 4i?$7/ZѧWi,2ՌopV3Y󾾶TD%L9˗Ҩ\/JppӶIrsiک#϶htaO?raX˗wK3: 6lJ)Ç N8V&vMb˶m,_HNNի/V;י3w VJZQQQh;l1~ԭSkֈ?LaӖ-LL"j֨Ϋ/>(i?qfVNΝ\j՗^,OtTTPNn)Y*777^1L,S|pDGƫAѼI2fL.\Ȝy9.LDE=Κk֐&J/Z*ܵb}8翿6%P'/SWvͪ5\~ʕ+(e+Va9j?f2֥_@q8|XJhrdttwr3琘D RNF 2mg1,^¦-[sLú[_gs<==iߦ5;v|hG.].?@*ѯW/4nQ V.1+gd0~I`#:Eѽ{ $rW=L_҉R)7n`F˖-b/--7G@@ڵ#008`V.̕;ˠ6Mz]dM_qWՈ*$ J|+iˎs;rAڽ= vVzDzʓۛFG 05t;:z?8bՒI΢0rCWN fst"=U"xkU=M9˷՛xyTLZ@r uJ:ͩlj?#:UK @M<_=QB ?W _7 6?|0wF&qA ]vH$Μ9ݻ>nҤ Xn=zͰaضmˋjժ4߸q#fӧM픢;v`׮]x{{Ӻuk*T *% ٤p-j5Z"***A@Tr6l؀L&QFԯ_N\PP@JJ W^ERѼys*W,>/,,$%%,>___^}6;wd4k̡ lJOO';;v1}t.\@ll222M6qM֯_OAAi///6l#GO>vZNͶmh4+Wvډ'V(J|||h޼9{ի( :t@jj*|tԉדJPP۷lٲvm{9lBQQ!!!oooo HLLt֍P.\HQQ+V$&&;vOƍiРEEElذK.P(hҤvc֭ҫW//9T*~4jHTzK$t:(nnn4k֌hy~~>ׯ/QV`7IxjgA&MV*R6,쀀 d׏Fn]۷/ WqF233VZDػw/z *ЪU+J%ZLuL&#%%"""h۶-~~~c򻪷7Koޤ aHHM||8d"H0ZߕV,uAlnnȋ`bŊh®A@RL&aÆ;=6mۘL&iڴ)J\VZŵkרP=z@ղ|r._Lhh( Bղd݋SL!::.]uU'>>&M… 9wQfMZhJB"r.8!;.<ddfҦcg~==rS ,}+Tg޵R7c1 .ũHx[7U `V['Tgs`y6O̰ԷcZ|00nbN&>sɋwfwGS5/uiAt٭o]nM"y&{N6|׉Ki2qG:XSeVԭlouv>JbxtLٷVӣiG5ZF;=ix)**⮒wFL~= 0zܘ᳘ ?L!_MDMjx?Ǧ]xv9]&==I&ѴiSZj` (('N0sLwN͚59|0TX֭[Yr% 4Ã~UƘ1c~HNN/&55CFdd$999L0PjԨAXXH h4|ңG3f ʕs*GZ> X".\ ))ӤI'y9}4k֬7ߤ%ٳ|2G믿^ztڕN8ٳԩիW bOpݺuһwo}bbb1b@J̛7SN1vX&OLժUիw}իWUV|W[oXp!۷o/$((V˜9s8x _h42zhj֬СCE7>s~ƍ'3}6 BP0| #G?%%~m=… O*>>lڴJe,77J%QQQvT*QQQ(RL $77<4СCՋBCCӧNѣb[oݺˆjժ $;7p֮]3ŋ+WBNCVR|reJ$nsf. yzV]O5§d;%z \Yx 3b|-d2xL6VgDj6mÐI%h xɩW9ωnh8seJNoK;v!`?wUrL&o5B&b0̧mK$pl*x 7nr:NX"5"ML9{o+k׮`JpoNaa!jAtT^]Wfͻ#˩kA~ ]k[O* ;nnnvϟ?/iZ-* Jh$,, D"Çs?BBBt(J8wH_Ŋٺu+ɴlR).k׮+*W9{,u߭X")))^6mڠRēiiijѕU^Jv8s]WV:e2aK.!D@pp0;wXrrr1e˖eر*uq6m*ʐ7 pU&ԧO~>SڵkGllxċJhX3 @jjyLj4\|uYbHt:EReʔm[^xJEٲe( ٿ(O־4@zzX֭[j:66ZjA2ed5AbLa$' @t3UX.)o4!00PO???1B 33۷oӾ]WrMZqz"##Q*v[ƕDʑ"mIh', i [3|f{p:=111 ./`sJ2KF*<CMQ= . . f#xѸq?Iʎ5k2_(xxxr  e˺\[rLet 4~iӦQvm "k1\tшT*E&S$xo|f@7"I ugK l[=jp=I b{*SlD8BJW +`00@ƀR9Zbht<ͧj-ܕ̊;:ܕuTǀ`n%@B.E!+_J\Fv֦}-&WY )6F#:Ծ;NɧP(X V|t۲F_\jsSL999SvR tss`0`0b\0}t\E%bgѢEٳCE"pQ Xp!zT'3vܙ\-[޽{2dJ.0/**B&Э* PE$Nȑ#\|Y///VcFBBsε_hU(~'mk2̎G9Abi/7T*GX$5 R[_@[-ֶF PIH>j pd21,4=Y qk.\֭[EȰ1c{\IsOaa6V>wk;~JpUUAo؎{\x'ot5 .mZr5 . .Bd29EE9̞ }DPT*pb.[\fH$rOYt)~~~ 8PTEEE1x`JrQ^\Icu5 MFdj qyIpFtkSv1jn6LDJ d>@ 5rXvjcTb66httMlv4:JLT8)e65 V-}nr FFe$`0P?RӿK TB@ۥt: _m߽9E#FiU+5Ul5M߾}EHQ%''___fՋ/Hz3gnnnr O;Z˸|2[4Jd3Y@`mo$E\.d2QfM:v(S(vR\Njj*tؑٳg3n80 ƊhEUFSTvlo,k|6`AJRT֗FF4 *ȋ3Lv{wߓF4iBz((({JJFy TI1TX䶉?-M%?222]6}͍T5t: eW791|I1loxvΝ %''www1Zv1$8u9;::&D HR!z6Fo0a5s ˷[<[G BtmY7o¾qxxxf^QQY,J///;wիԭ[.]pEe**/\ TV TZ:uJlݻws%޽: $$D`pITR[߫$ ׯ_ɓԪUKtU|y X'@AAEEE/_OOO}APPٸBOk4֭[G߿?rXXXHFFׅܹsGt_NppXu>$9uPkXXxsRi4rR֭[b=VBRiZTTDff]3'[Gk\j5Rƶ K_* -|}9ߠ˔1 rV3J &3Xz*4iD]@͛s?m\L&9RV uV,: Ҹqc^W F . . . > $`]V؞.$y Eo #66VX!*/^ȡCVXcâ88~G/v{ Ql؍{%1Ɵ%QcEPD@PTlX;Jq-wyĝ}wfyw+W;vG!002 O<ݻR_\>QJʶ&8q14w''?̀m55=Z\e{"#[[ mUY ˔,ؑy +,ҹ6NE?ű(jF=)2!-K}}xG<ƭT\D}3au,)V\1ϱM&lU%[JG BJMЭe5Ա#7! Jf޲vNYyr&>ͅB`ߴ;vc|$&&pssc%)+++ %%<( p/&tcVၜbHHH`ɮkSx%"""RpDDDe˖e-`nn"##8|0bcc6O:۷o^xkkkfQP"!1Qc09ZZZܜ AAAy&˗#/˅7R),JRrR)d2+OV_}?۷ozmԨ~)XXX 쌰0ѢP(UVEW^!;;Ǐg͛798LdddÃMسg,XGC J7h]k{ڐgZ}*JF&&&h۶-3 qQ-Bke!Bf&6?p6=(؝R(R*Ѷ\9;:D)XZ[[!>>J];@ @RRáT*q];w-[d͛7 =4h]:ucܺu rfZXXٳgy>;wkCI$fctt4L__ߏbKJi͟?eLP( BP( ڗ&z}u+*UE˖ WDb OO+-- VVVFwŋqm9ֶRE`n"@p)vCĭW怆5l3ppaq >.kq)2n5+4e '`Cp8nu~Fy+ jY©nyx\p7^t3L!Fv B4+ژ5ǿq'1 0=u#6! YnU>B"rlL1*￁XCjj mRX?~Rx)amm {{{޽{vڵkN:n߾ ///j~o޼3n}]Bxx5233T$BLk׮1 HPV-\z!!!z*j֬ V|FVV#xzzƍhٲ%z̈́` WbccqY\r*T@vW666Bll,RSS1x`TPj"QQQwB!T+W ;;S|>9333s4m+VĥKpm$%%aРAR _Z5\|!!!x!ڴi+ 0vh"==q&) $$$˸|2ׯÇܜ}Fp]DGG#&&pwwgr]p8ԪU  Bdd$ѺukFE0 Ξ=D֭Qzuرcx!T*!PR%p8ܽ{ǎC^^RSSիC"~> +++twbb"͙]3qqqǫW;S={3gʕ+Q@ajpp0\+lݻ@֭aiiDDDիWHIIA:u p ܺu jImnnDiiiuvb1:7qp@yjE}7ZYYA ܹsF:uРA <qP#11~:Zh7ԯ_111˗SSS9쐑DGGbŊpttDTT^|ի˗Gll,bbbڵk39d\.pdgg#-- ^:D",--8Yj֬dܻw5BڵVB{pNR ujуzع{?LBnJt؁'G-66g߼m;`ؠA;7cdį&u+}rRϞӧ`eeId8C3_:ف]{~] zcע"Æbit!b=1ׯcI1f;UWJzz:|I IDATԿ= /}(\.99 TƖ-PB{XYՅ@`! >>իW/Q8wiWR,Z~ᬛ;"==|>@o.Pb)JgzEDڕX Zͼk!r9r9|>_P\ jb.bm.t6nbbJۿ=htJ [[t\.;?Zb`X0O J J NjBrq\Bj-]hԄ脓"yp x\pS!_pX֡"Jf:A{MBr& WB>WۄgU rСqZM$E uzD"n6!66Dqgh7z},H6cYFuǾ:E BfyEâs B{O[} P؎360z0Tk_Lێ^uQ[^4DŽ)k+x<̐~LMM.gc^M>~[,n1nէVs:ѵݹ(<5 & bL$wD9$sx{>7}wUi)Bd$Rf뚥%֮N͚1n܊Ř og?4]:B"#8ПV .ݰ~*9~oXYYa5hҸ .BdT#֯Y|'spR.`w$'CVp_bk^HIMe+.!?/Xь 6Xz%Ȑ={a©A8u&JfXj%Z6ohX)PTDR2ڡ mμ|J 66Xw «$ݗUh{ Z XfM`_}}\c&زOc^8xzc ;W_slX ͝XmݹwsYF掎kbe8|=o޸m\ j̘3߼􅭭 ֭Zgm s9m{yvtsî={1jƇ;w0ob$eKlXX={[N,kvwSm$[))m\]v֗S+WTC'\i# Rjvv:U՘8m:޻qrXj%iԈiڍ7i Nxc R ssl\MÖaٍ _\۷1obꬣ[{WV+W0yL 8+W!UF?7C&MJ_]68ZzM5Wbb42XcӆzZƘ0>ah԰aƑL.Ǹ#P\9n-ԯ'è(PaJ4l@!#Gӧd4[kTǁʵr: UT?nbHKKclWXOF.BZZ:S=0@]=]d$`֌i9tQ]p4,[kQ;i2n߹[`;ןhX>6{ԱiS :/Sna/_1g V~'|j]-rXC^>'r:\ Ðx9B Uz~^BEe3o>Jx!M|h 8;ə4-H@tqTDUP\EVM"G_p8_? ]<|h:wnݺ1Ne BPl6j šMkWuuEfVyza8*53͚|ݻ#i;Nnn0t8 v`r%˖3ME~~>81&!T+Nݐ0>ڴFkddf⸗7LS>PAه\I7o߆R`/[qAG"#oHq/0 |Q<{ĊUk1sTII0ul\ c%-fC$WhԨRSp0.DF[.LCqb":oV-["-#2Crܿ+U²%k6|!>N_ިQ!s w2Ǝ8ql"1g|l\Z~[;b1~b(ìiSKx=)3f!\(_LQ&b@>Vq>2sYW̌D ‹/ѥSG4wrBrj*}0zD 78Ky:qѮMku1jDx< U*WF coKrRz(gkR燡v+T('2eΏ<.wT w,uqP(ж+Qq 2 S'M@ '<“OQF @}ݻY&x5n‚N‚x%3#>>޾~<}&.0qWZ50T999=~"rI: X]3E3c:?&MsFKשU Ça 9? 42ܻ~<}&.e 9qTlr1f$XYYY3sxեg/dddаA}<{>'1rxD=Ê:ulx;*WA#aN`8\quD֮s:?(4Tڵ3<<P=7W kjU;7yuDKnS) ^tZJ[BP(_0iii$--+/\$.$V\.'.;&ʕJ%i۩ iچ,Y8:ulk73+8:vJŔ~8r%?/XȔGGgrb ڹ S?mlBpg_.dϝ %.Kr\NZkO&M%Ggҽ׏{$.kLJ"m;u!ڶ'JROGgr)*U.ɈsdYYqtv1z~ێمD_*;xqtvavGgrJuӍuSBz*{.Bba6oۮy5VGgaTu4*/qtv!1׮sssImɜy ^مL6Dq=ʕBPQ,|ǦمܼuF~ 2z[ ?Fϯc#qtv!uBӏ8:R߷U;7T*I7م?BH|Bqtv!g!?tȠ ĩ+YoLYم 6Uw]sd=?obM?*;ܶ=du.".E;Ҫm{"BR)ڳqtv!{,x8:ٿ?-\D!:K yj5iߥ[1yĩ+NU*i׹+iݡ#Q輿}| Wm`>Jمۇ8:ǰ{ˋ);mP0vC]HXxDt{ ܵ;/9VLEDN,BD" LXlF|SҮ] 6ܻ#Je>!DV8MP( B|{sW0]yRx<ū7oRŊZ*|P(ѱ7orJR !+Fm]cp>Ƚh{WS.]RP* qvU]q82L}١RŊeAJeLس9sX-4a6||?Y[Y튣Pt8 \nYgM>˖umS`k  m A6Q( BP(R:8f;5lݺ!0(k׮Wfjj: q L3A-uj~/ ddc8yUckk+*LP@Vùe0>0ݘ F'~m{]^J*qPޞu,)ЯJU_cQgR]~8&w!?оzz̙5Cop YMXo$WY X5ZkB`|jO>{2sv233jT^dKhTŶ: -֨QV?Z_ר^-@R(/4FzFFdhP$g&^|Y8}0,CA~ -+/EȨh8 Bnj3@]֭ B3& -=]ueۧPg} UW~?-- BթSPuCx_BzuYڹbcc2E |r  oWPdP ܼph BP( cRjd2.51ׯ#0(0mlxkhURA/42h1wTz072v7##D}!W(uDljSAoS" Pݬ,M]+cT^t2BTEQ'|!7/W%;K+IX$\'W#x^^jx>EL"YNv\&b5Hppi?;;V̱doD"J&iS0}dH2ʳ BS\KNwek77/e2{X~Cim *V2P|4N>_ B&&UAB%B x: BP( SO:wꈜ\|rEP`Os!H0rP| ?_`SʒSP]Kܐ KK!оrrsae{.o999dzhTxAH(D N3"/ÁBz/6)hHOlm`KfpuiX[[(_z5jBb׉4K(Fev6lJk`ii['d2|>'֫[R)EG' ۤ!'Vihִ Tj5~8vk 'PMi J2p8H$b4i-ZZ2f͚HOO{ c%YҊ+4 {䰡Xl)T-ArrGZHNѿ2[ݾ$뇕 _c@?cHع!u<}|XǙ8lHL42$&ꆅoQ"+ Z>L|:8,afVaeUffˁǓ] BP('Կ'C"1 J%r9nbպ81UÖM1k4rnщ18}&r9X~32=d0d<} e 7oʵ8i8p >O>cdq~_W]t_9?Mr2T*R;d!#eXW]3xuS+W3zn6kr vVD^G`nnu0uGԝ>'VCGu'H 7o,!C3p1󄅅jhB:7o?O>JbbU}~[;Z SB} =x'|aeiՌ* IDAT))UVX /nwpPTB@R!􇕕+rӧϠV>ux\d!4D">ziE'}@._ctv."+T/P%VŢ_")u?r/FBV#'73h-9P㐕9_Ȃ~t(J8DL$w{g"`]z|,[ 999P( ÅR2k}l,_Ub%TRf;K >#drdee1{_vEFFV[r9{W¾Zp\j҄\2Fp!m\.|ݷ! wރ˗VS16nAȯ%K pB$"/=% tn)_9 < SpAP( B|*JkK/W!sB#-\87nn]ش)`?ѱ{,Z -7gnXe1__ K~Yנ@vRH1#Kl]OV8reEi<6X`V]AJ\X|)RRy) {Z:/_k6'X,?!95kvp.":wevm #ڷmYuv`ѭ;<}|­]weh.Fo12t>~~Cal޸m\5Qj̞> l S̜6 gbQzmZ¹E D_C~ߋ)۱u T&9;G tY1_ܻoopR[^QtPǏ߻c ssivѮukva[(b>-n a;AN_ƸĞXpAd3s^NnnhܨsܰA1Bxy=,,,V[Uֳn]ameeP~Ȩ(uƔ{{Jez~}u p~(| 4?{{?|On/R6֘:i"m;̞AXtC8pC߅zġGp4N.|~}q˻kÇ xzۇ-Kj@uj{n]3cGbhܰnܺ6;3Ca Ι)3عz+OV-Ud9l(2Q$a};tb/viٻ ^%%a)W7?utP( BP(eJЫgOojL ں}p&E~~>NaXdq<E'z`ʄؼm;233ѵs' _oAЩ>1m\\0jׯwot <#up8quXlmQAm.).p8رu nފ1//$&>}5,?fEЯ/uq puuFRgU[m\.ihܨ!O _5~-ϟ ?F 1H]- υS`vd_1333lx!==M7Ia~HMMӓF ^ľp-4Eݹm ΄?YYplƏD".t=?v 탽憁ڭ]ʕ*kT/׮.~'q* %O-wځSAA?u999pj cLk'lmg犷Ǻ5E`&nZYYYԥCMΦM!b}.L$t}{Xz{(xޡnسoԄ_ܱ^S`Aw~ܹw&&t }~A .y 1 b@߾:lڈc8$v/fϘuj㸗7r`a C8燠KѲysnp| JpncF4Xא-/"Rƺdvj9~Oiښ~=vyzivCXc)x>owO@Pd28Ĩ#  z=M["54_'N@^^_{kٽo:|aԄ*rJ8x|>֮.z28~m݆1иaL8x)c=8τ sVLLL/n BP( B|8iiikaS .ѶukCJ;\.lmlGBP(n _ b|N`u~&OT՘6i"ƌr^\/AA||;9 L_Q( B`Bx.i[ ק7>LP( BP( >  BP( tB{{{BP( ۃP( BP(BJ%UBP( ?uP( BP(WA*UP BP(ʻ@R( BP(WÁUBP( ?E n+QAbŊA]i:kSS+Wԩ] G:kҸ1=K 9x:.c6OR)\:2ڷk|_q&MA oTT=ڼ+Wc0cj B!p8T BP( r=pDVC/O*C}pد/n]a[W\t9>to [W714c10g 2DP( A BP(r\do߱;w+ZZt 4sg\9 VwTotS( BP( BP[$ BP( BP( BP:>z?J%LMMv 8;B0o"D^T&˅͚֯6eefeCؼqB!tJ%LnJ8lQj9셔Tm9@/c٪HMMJH$BJo1y &9jb4m[X%'[ϖd` BT VD q2 GaX9,I;];taFW=.װlw жS2)*<}?֮A =c^uk֮-sV1󧹸v:c;66Xz%4n'ˈ1cq=;sCG#%5 |>/ ]:u, s{ i s(J|^ _!%%~?7+8S՘6k6nފ-Jߖ!bul׳aμ8WVXap=3GFu{WIIٻ/v N͚1su KzaFm}VdrD]aРA}غŠ 2 Fƃ[[l\5dՋ}s.Fjj*cnmbzWcai8w+T`=~Cо=hP1y6V^ݺEѶ)Zo_q@ BP( BP(Guv-B!fMԴ4޷3a! L!#Ga|<ڵi֮.1O/-H&\bEv MIMŞ0mD @ (~],bF])3gA$Whܨ!Rp0".\D cǃ?5r\Yuӟr\@YӦur2оm86k1ׯ#(,N,Lq0t@TT '1i2ݻ8tniS%"\o}>$_=a|>$N9DlСYҹLw լQF)P%Wvm Z:?( BP( BP('<{66X&\!6mZ~57j@8 {{;g<}tkִ i۰A<vvUY<޿9ک#Fm;v`ΌM[BVc߮Sbѻ||0gty*f-P(Z) BP( BP(:@G c-fsRRR\zT{5r8~Bjj\b蠁z;2tp4ŷ߲ &MAÇpe3j$_>k6~gժjp=O@֮ƿ6#553MEiͳ4_1#ǎdrv %߼eſ, Ki74͢.OŜy 3|cى!PTs fQ]mmm&9!ّYlP{w)`eҩ ^yB*hd(fccHMKedІ֫?}dqq.<ujf՝4~X7ɨY ȗ+ BP( BP( .|THvcmJŔ=ȟ\m`ڵ3ܮRϤT(A1C^i^QҰ yC իfه/@TBd$7jp̘:gC4!jl9c?p8wCjZܽ+c 7/?Jbfj R #>Fŵ+Bk;AN&SSSU*x>Tl9@\1l(V >W_}x5"RTٲsQ^^^eB|BڷkCj\<c&N(vtJ$$$Cv6d0VȐ)&Lf@bb”E"B)ׇ4ĪU͙75BP( BP( T|vN;-, I"]]b(999\xrssaiiɔkC7UPpIuD"A®jG ;;ߋ %ݩpPMkW shOiQQϗԴp!u\.Y73N9kkk%$Ͻsru,;׆vc=[Fr2 DtǏY;qO ƵkX2~;H kVd>k'\( ɡRMgŭې+O277gʄ! SAo_P|rۗ P( BP( BPJshҸp80d>}.SwHM>Ig"''jAaX&8rcJ gCÐds5rsn8;?bʕǂeߙgpPoׯqcӧPTq&\[ <0k\.\q/oXZZfٻ(5 lKN$tBoJozE"U"HSQ HEAH ޓm3dɆ$bf̞s$7{9۾ I0S G#e Qy|Mw"s}( [۶o~lڲ5_Yoo9FjjDQĿGbͺ`4vm۱?.7_z_L&zlݾKWjmӖ~L&ddf&@EԷz`{LDFFL&v섗'e|#f~4L&6mw Olk4,yRRR -OA|=K{L}0L}ڼ`yѮSc/]FXxß~4 ұV[=zY}1c:ڴjlg;b#9[1smuvvƛ#_/QcixD@dxШXpAa0m,<ۭݾ-_F9 Z}ӏ'aȫcc a 6e#›i&'Mx5fMYo//4m<6vdL8)_6#F)1Mh"x9ڦS>|s7Lvejۿvd07K~h@]g]boaHO@XP 2NN.f[:pU*WÇ+~YӦbMرku t_Xa#v)r?,u{U&$%%IQ ";$ (#!*{9 rtt=#""""""""R q ǀ~}WDDDDDDDDD DDDDDX0=\BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDTJC#nȃ */ ͭȃ9fͱn;I>ܸyNl,ZWTP JTzuۅ%""""zB$ AbgQw%?}hSϦMй2gly賰<߻4у$HX@0 @ JS2$D@H$H&|3@DDD!Ou$!| g?A!08q" v,qG~R}Ǐ˖l2>e27p"$l^yI jQ a| <}LS3_ f@ZLrtLKA  @YxYxhAj"""y6k`岟jh"_~]>kgDDDD$ H=u Ah޾pPnUk@C72._@@$Ha$dF_FɣȎ Sf& jWw86:\*TV?c_4{Kc* 8QiT@NNE*?m,A$888l2uR(<WN7֬\={##3XWJ$&%CqrD<fNZ𷻄ǡNBfM us98:(λ/bْXbػF Z->> Q '"d\[7V/CN#;>]ݡ5nY x/\Y4 ú n&$T O(pY XK~ 3 ~C QwL.̧M*vhݲv?-[A/dĸ[JJ*V[ƍФQciq9۸m;voIU|ySl>d0؁K.6O; dffe1hثcvW\F֭дI$$c՚u<5ؼ>>žff^Կ_[<߷?]vm q#$&'a;l`w舞ݺfO_V"!m:߽d899@5t:^: /<*T(ȃxk{rtXVѣp-W=KĉJ,K#L>o UGM{zpT˜[בv nNdF_B//'ML&kZ'_ܠƅ3qqL8x .rvd4A+zDϡ uqJ>zeT|XAIKDDD)BRRE>( ~B<Ǐck#TrrržJh߶ Z4o|m&` :}&/]Ͼ3Q&0[֯[1 wّCΝ8t ϴoi柴 @Νɤw'b_!Icq^AXzX0bYvv6z;qqh4SthkSvΝOp4n0u:n 6lyEh$a]P`2l=,< Y%?- AXnpjt4+@*/ WY~h1]˖,F5!JNNFq?{#?@v- 9Q/PGBͺubNAUyCAqZm,3?D@.3y6ܳn^gUN=/_D? p.W?"""|G-r"^G[ׯêkqV9wYcsgmx{#%57oDVV6DIJ廾FJ%O;7 ,h1o_P&0ODWۖ3`08u}] 6NJ_Wa>MAmel I ,A//ء|LOPڽZF7ǎk_C|߫Gw|h1RSS[2相l_8:g\>LFAY,ȹlk fQּn!M[&/ťᅥG0l}!Ba:x8Vю"]<%&@墽,AjdGчDDDDy<{>?>??jDFs^`4ؽe^ S 嫆; ׮W.^ g_/O]q/x5׭A2e._0:zH&= Fuy%GFf>X97I4QIdKG]pݞ7M0D1E+wܲٓpHdT2矰>/sw{ )߫ &ZNR*!wlۇ|L& +i|c>?;llǂ J`{H7&ABf%R ȁšm$ A)RmSZg$R07 =<qfڵu ]^C@_p528{5h!b PTEv$B\Z _ cѷ`+qUkU`"%99 f'''CE Hzz<ؙӃgκ }r"k׃ͣ-Vh&h{=I/̈́ xDe"XE@!(rFM}WyK0J{ B-Ah0NxO {~p^!E(ɅEiur^d|v}?}c/; Xz v *=WP*kI&xNEiƟfWAsU=D5#ĤMJ,rهEa;vfr|=7o݊g;9*+-T*UZju6 0q'aͱaf<к@TbwߡCEab4JB_@vv6ܥ֮x'g~d$PhL3ґ} ٱaHP@mH 8b/a>OND̎m*BpgڲOܪA@.o JG'Dd4;}RFAYv=pgH&:tP;6 n.d\:cF8ر;A?譲sD$FGMH>~xvdӴ%tUB(^RewǃCzIF΍H:v8@X>-Qp ^vXRw'ڃsO?;=[! Pyzml]ҺVSV;_< Cr"o׮]۬MnA"""z@OuFhݺ(_L -KW`EfE{܏z(u7ƾ3DFbpssCpP=[Ӻ{@]\ùyՐ}=FBSV&ʺ- m;p ,oֲJ%'&9.Sr8Z_$u^N0f#!+;FO>rbo!jX !5R*-L9H;w1;6֦U;e\*V_6W} IG"qrhd\uZ.u:dݸ_O՟AwPy${KS3>MG09("F4..?/B>B>k[|aʀ>)1k\&􉸹~45pyD|Jݳ=qOv4U5:kBER}7׭@> lb;NL ѠBV,h0)0ee#!|[R"P =86vZm ǀ2yAm f &xkFTjR|0nnX zPsTx5\ʒH;N=>d^mHuT Uڀ$ApQᅗQsgr~A={}5oDWoS?}_}ϦC.v '  B=7f4p"~z,o͜^/hs#F}˯g#_{^^c~~xq7Fn6c:MunR}߮:auܣ3Mz<9OD!,<"_5lV-Z`h.(""""*,pPE~"1׮`B S˄mC887WOhא%dĭͫqcr9 W/DV YȘAȺ˷#jMzZ .. j;X4Z |"ZCd-oFqqwhJyC:xxRƱqZ}65jC;!!N}:lA(~^ K7^Fу5_o_X2jK#8xDMAD@M!J] XWX)82$DoRp)rNO}>yR}oyEϋpzxh• o y;+ cfΝ<|f8w_⹨wi,yՐ(B([s_dT!arwTK5q&$)OjyȨ{;U@T|9tvF#j5ڃïh0<@$qZmzR;qhx N$&#*5no]c hț1bOz['>o .1 rpnrtp`/J#vy%T0L'vXVde~OG *8`G(\ 4]Y^a4yd Gϋ(D Yq" d:yœɔgT*\[QG#h(Ԝ0fŋ} &K ߷țQ͸&"""*g>FADDDDDOy2XPPQcdpt WY7s'-E !&yYP5)'댬Wu̻)@2w3X'wkՃBm;_˿eٵzmh'*_} ?oEWF '6N}z6NDQ  IDAT^Z{aFɲE%XJ ]ܝ( V Ӝks*+ Ip/%OBi:w $IT#ny߯E;ztd߾n*ɫ& Y$HF#T.ZU[ȼrZb D EoE/z\3 =DڹS@{]|, M`Xx;NpY9qȸ|>w̙Y;ֺ!CW''<6$),|^+.h"U)IޅDH&S *+ TOަ˩L<G@Jp*o P8:[ k+T¸T iOvHܯOWɻZܟj]b1&Iw v,^˷Razb2댣c!rLgN ue`LK.zB}r/SU'`ʺ}Y7 ܽqb.u8 3\\\b׵tvIh9"""z *v""""'A@9P_ / Dųپ6©O +QcGmپ$P(   9/#coP I"T.Z%+:(AqE*t'5 OK?Ⱦy\kW 4ކ1#P;Q. o@C²Fj$DkFo{;*2~j7$ޖv\"tpt,V;L9YB1&# z zbdA`ǞĹ\ݑ}1Шvu+k}"""z@ SϺŕ$pV njҀ^=.]#_A)_LA߂BN$?%ØѠtP5*$ u(RE^Q(TBXT:@RÐV䫌>)ZfI&0 y's@ v@С+#ą9qȼ~?jwIw}R"*5.}}1X:SSl:Ɣ cF:4Њhx4fCvw7W@uy iOP@PK@,@jc?[:89q,̘u"oO$(ѺX\KX2VE@ ^H9y*d#l? HduQZ֘;&|\(|K*ɢ$p NW'lx5j&0cW}!upQy .-5jөdDހ[ZoGæ60$BiFI,kx hž%(G!"""*y!""""R(w_EHF#클pa4޺U|%H 9}G Fڹo*0!!(m?6I&yuh ғkº ATJ$6QM'eۍZ}$*jiOֵ8B%Js;LFx5߽M+}s*AO6g`*)DhyA"""0 :jyT< *-ϟR*+@Θs'Qy=iܪʝEH& L&$O HF#YE XER<PE2/yqEd2Bex5n]_8cndm$K2u 3.0$'+ǍUZW7׫(\,Ȑ .G>, DDDD%WQguz=SNEx'찬zϏ3! @&]I pX 쬘0fåB\E&|hJs[5^ ,9;TW/>P;,vS@Y_< Ѡ7'Bh>p D3.BeݼCj KJ$!""""s",HF#GJ)s um(Ey+{-2^'KSR cF:2_5wCx?`{b|{ͺP!1}#KΒl 5fIVH=uo9AUu#ʷC|r"$8K`d^[׋5NROd4nQz9w0ff|%*5$Y4AZ&PSU!,$KNh-%)'wM;sy q ІH:._y3SVRϝ .̟.*ó~c$?G|{tP5D`1^Kp黯PKoyB$@h}Һ r0C79` IPH;w MS<5"xҟe>p{z\? `R@9NJ>x vLXAa>JG' %,܎gl!v\3MBag4A$AՑB[!)[1N Dvh>f?B^Q8;sD0Jp^ U^y|)N/QÿPa6\a>&FȌCJM[Sqt 8Q2Hp翚g\!M=~YSǑ{,}BɉH>v''ő1aB S<]ikđQ/#!r#38g!($lPmH:|zu0 iH8wvo3Uk!Mٹ:N-cG^jqbis> ձz 9\JJy pwD(:e|7ƠBi DDDDw_DDDDDԐ$ @4pmb\y4޾pJG'=td݈FNjc'"hP:9:TD/ ýV(F~,l}RDƌtKlj4@cfM>1Ԕ">% J2V6,^G@NpZNerqIKH;{ UkןïJ ]b$GyKqa X1m[p Sft qȼvBj_<VB ǀxxAuBt qȈ Aw{jnvh<8& w~&? GY%#e5CF2qqLyeT K*Ph!0A2h$uQE(4D7 õ?^Pm~FޖJ1;9w!>@w>c`9\\9uJCz*2.Cv-4m} Z ICZ*t qE˓#C'%BC;1))IOOO=2xX=DgO")d\8[נK13jwOUGFhǀO\s6#1SPT"|B=PF;{årUBr* ig;ܪׂ[:Am#vV8x»q"C!D6ok$ (8z?: Lَ!y3ӡ8@[%-#}h< p`/o۩'uӈDJoT WXcx7n {LBʩHpW."'.drPh:(^ar'yڑ~Y7!@PkK*/ÚPIG8;هK`HTA m`x5h ߖh;~z8nIG˜+Zud\cײ=|<ʺ߷!!ro\hC k>tt*O:|8yJGHIB Ti֦hg =!O곇PpIYCɣh4^azL}yu{x~~}Uv?gBDDDDDO%2'2ls cspDMt=~wMZ[]{r~d̗}PxIs$}xQ&({TPcLX[V$60=vg^3Iϒh,*qY2""""""""""@^Z,~N9#ǎBƒᄉnܸh6ר^h4!py  *𵋗.ƾ3eDDDDDDDDDDDqY=Ыgu:$&&aq)<|7`2RV/==JYYYb6Ν?F `WPbEJgtgjuBc_9J~Cʔ K7jHMK9o+/׮ߴekv,+4j~>>Du۪Ӧu+dddU/"""""""""""z[d#eHLLhD|B>l\rr > AEK廦4j5_S "?o/Wl2? F zf@zu׮h4BQFDDDDDDDDDD=-ʗ/ zuW\=wÆM晎'}I؜hХSG۸ :uʰ6eN}_q'X eKa!.o//L|>6y#^QFDDDDDDDDDD=Ȥ'`ؠAX/8y <=ѭs'knv[׵ka2Ro8_>j֨m;wÇ F PjU kpb\`  !!]-$IIIgnhۦ5VG h #""""""""""z$ < WEXxG#<ʆGGn]N_ժV/KH$"""R/99ك= """""""""""*mXjGi"""""""""""z 귿"""""""""""G!"""""* s DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT<ȗs> GFSXn=3[w($""""*aDQd'=TBWFg;tO?)޵,PjUtؑ#Q(?+&IA(3L$ *7DшI?EFfqNO~kׯ? <+Woܰƿ=֮#""""* .]`s$?DQ$IP* ~ Jٻ(lz^KQ@iATEz(kTDPtA@T/ =.Y Pn~,dΜs̙I|sqPR%ط?{IjJm>ǚǶc*gvЧzy,WMyfDDDDD ^WuV\2(=99gG}mhOiSDDDDDʣ!0y썌| C CuKը>w߾vurZ _-&=H7m6ws/+W"//4Ge߰J+w@V*sf}$""""RNGxr;Ʊci۶-eafSfYaŎ)q /eY矼1}: :UNDDDD3trn@xxoTuN<IGwx?_޽. غm]t ړ3?fԍbl&$[.aշ?cnɼ_yV֮[G6mtal.qsаaC;N'ep8Xj 4DG`4F~~>^PL L ZbcFfĉBC5s&QHNx ,۶a8^} ]0sgs! )2_huuQƑyzY$g֭<أ\5W^ɼr"]'a!q IDATx_`-{DDDDD ڵkiݪ]tSb"ڥ lw} sҳG:w/i|u 4 63 ):u*yR>֭Z1k,ޚ1 /.]h"ϸc锘H]={v Vhiݺ5~&7' j b`RR p0q<!u&λY7o*?qBp:}Q@*HIM#""""rc׮]def)t8N8@NNN7eqСq }%5%<>HӦM |b|r嗓EL<^={֌zm?aI;Ν;ӻO?{[n6IӦNEDDDDC~{.(Ӆq'yVbgeeRnaN~)Fc&BCq u ȋjryN'qqqXEʕ`ӦM,X_~0z^~%nv^:A-"(﹇ ;v䬳bw}̙=+W)FJKDDDDD+?`3UtTyyy|,]u`arǗdQek[8}KDd$w&44۲ؽk >>׋aU^\ll,-FDDDU1+H|\i=0[X]l"""utEDDDDʫ܅w88c Ō+\>+;4ypbyIbNaF3uU >sm]gPn&dɲeڽիy=zf͞+3OQfM,6_ҳG7.8|"""""@ Fys呑GYf-GNUud]mSܱ])IȠK^Aiwu5+)7H4$6Hx&>0SeÂʆ2vSr~{%%%%ǟmIuJ#* 1U$==Zju-4 hvل/ЬY3\.egWhYP_~|#RFfAca&ijJ,8 14nz8iтٟ~ʁh8~}ڹ4׮0C\=bxP#߸Q#6ФQCSR׸H4lؐӧSm۶ʖ͛iѢ5k$//o-"!! pW0qD"WoUU㮻`֭}۶ql ##[,Klmx^,bΝ}v6mm ;~rr """"""'$~ p8(xXp!yyDDD0d*^z%}9ua1jՊ,^6/0Сwy',ZW_}T rٳÇc&W_}5#}39tW ѣ0 222x_m6<UUqF7*)"""""RC9!g,8Xph Me6ej9?0"eq q9é˲0 Hpzeu()r\AM '""""R @DDDDDDPh )hkHQDDDDD .F)wrG)wl{;oغ}խ'?zڹd0 VBPvmݍ"""""""""""e }I ;ЫgO2={LH2pÏ>=wATdd =7/ǟzkQ&֬XVn:;c:]x!-[zjs.<}Nxn;FwH) ʕ+ӥg/,[}Խ'YYYTVT ,+9Sݑ"""""""""""eHNNO| @TT$oONuJը>w߾vIknLz l۶wOWiģкU"lќ6RbE>=]r)YTMH`3q:ev!]1;v;SDDDDʝJ!"""""r<^dɲlݶ͟co,US<0aQN}EA/q%[m[tR`,F/̦jՄ4M+%1Q|>K6ouhۦM\ ۶ٷիUSCDDDDDDDDDD pĶmnwi|`>;uaLy1 '$$nu9)x`$zY$w߳e6^3th>>"&Lk/T澻"//.]LxF J.`6oRf;oH2p0s LHs8LzA~\{w //e+Vm<6G2.WNopT @JjjkGŴhќ;)pNv"Zjief ӤApAoNdd$j"44?hj׮i:t]vG͚5=/f֭Ӈc6]w< >>}_DDDDD|r?"99S!>{q:9p\s m O3e>'vw'x{]wM-ظq#}7KXx8O?4u!55?,_~%?e_knnvڷoϿvÙ8iReoΘA^~>6PR@xb nB\.h0 ^/{a\uՁDz,n3 bq-'gxW/&LoM xgYb?|=/wO>$S&O޽y駩[.ӦMGϞt1h47p8Xr%|\vY$""""GHbxb?Q"Gp8 ' Z0Xjׯ{|AaÇtb+''Dիl?u KTd$7l_ò,^o 0OsWӢE M%""""R ⁀EvYعs'nիVg\!!lذBժUmIʕBBBp\EBC/xiz1 'O&6.I&` HzrMX).PL;?4ؾcnpnv AMot;իyʋH0@DDDDD\"#"h֬111J*DG3XeY< U(a *`7|3͛7*() (2L}/t:Y`o6璙 <"B7H! H>s,]yQ9Π<ÁZnMll,ͣy昦Ijj*7lCǎ$$$$P0 N'N3(p.`eƍ曼TZ5ϰaС)D)wJZhSEɮ]r|}FBB 4mڔH^xy:zuRNNJrr2e׏;9sػw/]RN^/`…tԉn7YYY,ZQFPF^=eĿEjZ:xFE9{&77SaF*3sLG]IKKò,5kF hӦ w`׮]ݴ)+Uu6iӆM6w>СC1 U?Ē%KHLL$$$,ZnMeۘgMenZl Xܲ,G| x<Bp&qqquK끈=|Frr ^&%%Pș)DDDDDD xޠL LAeYV`t4J%av`q=N+Sh#9bDDDDDH-^R`xy8s DDDDDDDDDDDQDDDDDDDDDDDD@DDDDDDDDDDDQDDDDDDDDDDDD@DDDDDDDDDDDQDDDDDDDDDDDD@DDDDDDDDDDDqE%w^bz w> 민DVʼBykkt9eYYRgPgiSfݺ2ԣ@DDDDDDDDDDt:kX-Xض]b^>˲Jz+}=s;y-ON WҾcG3) r]tҍiwa':uA>?8wB ;qa\u"/i1%ˊ9ҮcbnuL$++>ұK7]؉YAy._Es@; |mswұk@]=cͺu7Lddf{c|2g.]DV&]DZMzz(O~q~<Ͻ2:&ᅨȦwq@<K-QÆ x Pv_~r,!a|,]QcƫI{_}-<(ي|20)-qP23~%KѷŁC`֕'%5?ɨ1I{ ඛo"*2ZQ&w*V;n'#=7~w UÆ?W_ч'ZK[M4wqNYdsǍ'??~Ď8 ^upA*WTDFF271 ~}zY&ʒop86<Z^ËӲ%??lYau3y:rGy=7ȑΚ@jU700 y_}a8NM֥ S_D0ض}1>ڶ,46(:2{ŗ_agS!> 3) 37zx8p yZ5j@Դ4,bيx]hټy ohh(^{M,5koaaau{eͷg^^/Y G^ :wx5k4e*+W*<WVbn m7;j;n҄^H}hѢ_-ZDUyݵԪQMUMHi&y- r^}}:ލJuV DDDDDDDDDDDTFrr •n!h{͊e٣m]ѱC|lDxY>5wgAM^hټ9oMM\DDDDHII""""""TWz_-ZaL<K.㣏?NG ~lG """"yb2"'=#\T|Fs޽Og~H5tEDDD䄔4, ZPDDDDD'۶1Mz &:3ժV庫":*K/@tt.V#"""""iemŎmU[R~USWߟʫʫʫG{"5DDDDDD't<Fʫ U^U^*򖿟iG2$""""""""""" """""""""""R("""""""""""" """""""""""R(""""""""""K IDAT"" """""""""""R("""""""""""" """""""""""R(""""""""""""厳,*YtYvb4m,Y96FѯV\c8000JSPt(9⵰ml/1mYؖ]J^rEDDDL:r=֬XVl/9y7.=K-;""""""ʲ, 8/ J:X3qf);k4M Ѯ6@|| WsFS/%%>{Gۃ ^/VF1`ޮ[Y90c"dd㈏ Iy?XY8V$Ѿ6`ԗ?uv=nSSrs=a[e? =k2L}W2]"e-阑t`d`Gc8 """r=7٘ϙWDDDDD'۶zx^^ojB, ),5kx<"u˕tZmUzr}Md;t>!Lw?ߖ}/} x|a.og%0Œ$<\&mਗ਼:?]V\<)JcW$O΁qO/-~1/B$ucU?@_Hm,'#KLeG$5-y6][);Z]DDDDD 00M۶q88 *4MGr4Y˲8i;w +<??XPfhFhFX(fTDЈ;߃.䵰s_[oԈaA#̨t'>1 Q2zٷ#fxXv~aZeތe^/Nj?J޹g>ma<](_pxĎLH!!EU>ӷF`D{BGQqbDc|W8FK߸""""'ي<۶1 ]vgڶmOfVm۶J*x^7n$;;;/P.99kג⯿bŬ[˗Bzz:]tJ*XizjRI&T\uˆ,x)Œ p~T3L#= ۲0 OMe`Q"amyQ|Gx L?r?`MmNR)J<$KbH![߮',BU)9K~!Y. +#gB\-g*E ܀gA"z v{pDG<=WHfd8Fx/Up """"Rvy^ɂ # 2?HvN$&&SOef~X˲p8]>}kn*Wdzf矓ٳ%99gE*U0MpuױyfHOOaÆ|ᇄ+%̈${[q+ޥx۶$/}`F՛&ɓ|*ݟBXǖTy^_B#" YU&+Wɜ fX,,*w5W)]q8pTýe7Y/&/If\Wib%=& eՓ r_`aFGPpVXL w;Q*Bj'>3 #sZDDxVOSp'~߅U;/CXĎ@_܇J rԨQC0rH.RSSy'iݺ5_}'LYfx<֯_ύ7HBBBٌ/h=p(0!] _lϮT|xQ gƽk/o}B I%}\7eFD(1#:q\g.vwql;c(:WPv:'Gǃբ>a[r`S8*ٯ#fIq_wnM]#5z`gӿ6I{<[Kksu !uwo-/%{O<:\6!uٽewhҫ&jPׂ_EDDDDNlǫ7DDDDD8ɻÿ/,]ʼyHMIEѸq 0HNN/׶-m۶哏?A 0jBmNtTIPR%ضmeqnV$&&IY69agٷcॽg>]MЋ0\!?m o&'!v~o&wa.\|.l`<˓<[lN+hݾCg#Sͻdٲ"y'O}^~w9ϿIk|[Ϟ8kNXlYc yg>-}󉈈\RSS_0`YVPPx9 q;^ОK>GsaǜLxǷdI^xU,ꤵgnJ7UFDDDDu֥_8 t-l^8 t:p8y p8N9L8nLmgw:Qh_OP>/[ƍz>Rn3EDDD>t֍ݺ MQtX6V@)R?Kq݁þKyv߶HZq^/n 7hhkY ?<S|fg'=#:uj{s{z鳒F?- O""""rZ,_SG3m"#yⱩ;@L:?/ L`*;&>0;Xf AXh(aaa|`eqwڵc&111b>3}^=3zH{ԮU+Pfƍq}jofx=U׏b |KjRRRx<8&>0/>von. ϩS6Wa\5|5k`_r cʤO*'H):sg{cO<ɳO>qpC\ԣ{GaK%>..>v =Z`?Y"+?ү2:wć/ [oxxi7Jg\>*V)Tp^WX VWy7kΝ;X/ԩ ^/6O{M<3^9-[p\zУs\4g""""""""""rlVp!A/y+UHڵٰSrHW$s#&^s;2ŽE">YY,^೔+"/?¯չ}mlٺ?..0Xn+.BZz:yy7JE~9-Zsǭ7}dE| @~MG-IDD~ʈҠYE"##ٵ{)9+#**x|#ZhΌצ cSOm۶M : w?:Nz;޳wI&ddfAuض}|=..ك)>7~GJJ 5W#$$={̬Uv2i{mCӂ6$**9 J)zq..8 wIINGoi꠩Nh|' gdz rЦukmm0Mݻ1{nZ:H{- @noɷȈKlCI*l~ʈ˅ ߿u 0 vߠ|}Q6 {mq CΨڥ3]8c S^pӇ3g:=%?Z[.W223iP~9f} ^S4IVf?YNnnP3gcKDDDDDDDDDt(udky呑ɔ'9%}BBp\ƛ0:/MKӦmvvSm+0< :|գ[Wbٴb,Q`=êY#-[ꇹsᬚGzn+ӐY{U>Eݻv]*h~L6M#[[խKھcl64cM_ML3wg4ҷ1׷3RRRռYS 2D~~t0.jKTZF55QŊ NDCzt'c5oByd3=cyd3sk єѥN?aZz_ G_ZFij,|4kC?-%%Ő2xYԣ>0^]r5\.:wn]oBP.*ϳ\K1Fep8TZ2*??0\:?~#___*(&*;WUWreq9}`@uCx ^:@!uvK0 Yu\Z~ExK0r# IDATVcٶחs/Ok^ܶ/,9߶E m -%%Őr'r&3?~gg2VY mwctuCx ^:@!uCx ^:@!uCx ^:@!b/\WF%  \-rrrKj\1arr8x\qGa\.\saWJ*RJT "arŰS^:@!uCx ^A$eH6n]x{Ða$&>.:ԏa0rd/K{e\av;AR + vſPvlSy^q͟MIy=^=f+U Qe6K_U2dٔҮtۛGjڷwZnvڥ 7?r:3g#:TϣxC|Aww6JJ7!!AqqqW5('_G"鿽BJ}N;?wKкkuk…K0t:/i6)??̆a(77Wr:۸\.痺<93|9 =vIvOڿH Pff$ĉ:tǶ<{Knӂ㦥)//ONSjғO>7lбG%Iyyy%֗{>օfR˖-xb9s6uj[ҙ3g]l50#Fh:R$RRֆH;`.YtE8aJkYr7@qkh[&//Ϻ6iSt%Iyމ&3mDy.U_E)x 8"B/BBB'h֭@ 1B7p^z%լYz#a ad.߿ym9bC,YX_L19w-ylI>>>>C>ϲf~7jرڽ{v/E{jժYͷd^g6M2 JJJ*73ZjyU}6ƍdՋ_3I=@|}}KlϹuv fϽ̼vXb>Sp?ݯ;N/6˗kĉy*gHHBk;N>FzS*WLmb!ijt  kJ>^_WTWK#TUVMנAdەE[nl6Ҵ`_NT',L+W:tfft*,խWO*U… br8qBՓ6mڤoFyyдit+?;;[&M3dٔ&MhŊQ^^իggӦMZho.*|?j#?egg+::Z%%aÆłA6MG[chmL5lP4ivd͟7OUf͚[j޼yU^]ժU+>OuJM4)qΉ?L:uR˖-eٔ%KhժUڻwURʇYޟ7lcT~}J45p jZz<͜1Cׯj֬իWkV ::ZGQÆ kOjjf͚C)!!A>I!!!V̙3Z`֭]xթSG+WVjjM[*88X5k/_ *vڰaym(--MUUӱc4c %''+,,L?֮] U^])))۶fpݬYZz4k֬$+?{UVu|^}]J*kGYܿ?~ ݻvQڶ}zAeddB :w$? @*X/Wm6Ǎqc*fM^пo >s?e˖)##C=Ӛ:u233駟#yyyc5kV3ga$I5Ҷ[5|pEGG̙3~_z*ҹsJ*I*9_/k̛o ȗ_ԯ.8NÆ?}͙3GgΜѨQ޻Ar s: PTPA۵k.{=z3L+=3aѣx})=u8Ct߀ڰav6nܨc2 C#zJ3f̐$yMMu]w~['OVժUba}?sAJIIу<;vnk޽4p;ic9gUʕ|r}1yԮʕ++V-kZz$aÆZb1BfRff3Fc|Ӫgө }y&M?^ *UlN: ҶmԨqc޵K>KڶmvBCC5w4o\<ʒ!sA?~\=>;zTܩ{G{UW; nz5|ʒ0]M*UX 8_bp%]g|0 cÆoIS1 0.W8N0 cCa|Z{n#//ϸ ɓa7_~[7|0 x饗a"/_޽qYg4iزeYmii 8ӻ2QصkazjAƁ 0iu;999aƱcnj^k,\:^xXo^0 #&&hܨq10 #..ha-\o3l0:d\ۤnZ+_3Zli$%%acyheFm Wa] 0O?ޭ[6j=zak妛~0 ߌ6[111>>f7>>ިl|3s?l<aF;4^5kݤ?7ڷkgapF*3ի摑y*l׶1 03f[2Ξ=k{*ã>jNӸS'?ѨaCc߾}ÇO(M7h|1yg'Z36 Lis ؊j.W,n 'v:|ka?I>wNuRNNd鉡CUf͂B}_ryTIދۜeT;_]C[={:f ΂!nVիWOvN PNN:w}̙3XT*á{WM5g'ȣZsm42acnl6Ξ=k֨^zvG̶/z^9tvs[SE6fMU&꙯Nv|6/F/by%@µ^F1cO4jX <7_ũQF$)_ K '+9%U$%9"áZ!!%c͉Gm6|||SrqD~g]۴li.7_./513[rsstℂ%IGfSh>>>V=(p\q8}Zu=㒤z)66V۷mөSJgXXV^x]v5ax>|Xӿ>HwqjIe}ffu(.jr֐YNS))):PBzРA#n:͜9SƎ^۷OGx)+SjUh…JJJ_wղeKuM&LP||<?P}Uƍt:,`('N(!I}nKXX~=|XVofڻgUYlկ__w}^{U-?_Wk{;wTNN-Z.շ~{$ 6L3fЗ_|?qiy7|SS*##CwPӦMUR%)++K/ofxA;vLw$ 8P>%%%i͚5;gun=1tV\%ɓϔz޻Wڵk4|ӧnѣ:vbwa%&&өd+!2Ϟn~Af+V?:3fΝ:t$rjժ&}֯[Ӧ)55UqqqV~õvZ)--j/___}wZjL~I ]??EEE~O^,ݰp]F┒"Izz=mϨQ4oHJ1ҾQ0o)Y]-ZE\jqoVT:Uթ;zݸq̟@%$$Cr8j,woVZԩV\͛7ɓzԲeKIRdd2Ϟ՚5k﯈k!_Vtam޴Ia]z꥓Zbvܩݺ瞳1g'Z=>ΝST|!GSjJ:t ݮ'O*>>^͛7Wڵu-h=ZnΜ9VZZ`lެhbڷo_rժU+ժUK u1EFFjZt233U-(Hj׮ӧOkھ}:p^xEvM6MǏSHHM^YɓuI*TQ8$5jHv]Vá5kO?UXX222XׯS'OcլY3KUTIKQqCҥKճgO=6dhΝzbPw}2 Cԏ?X;TŊնm[mڸQ?o 5jfH~Z rXGjҤI]vکW^ںuoؠ艡C[rԨQ#EEEiҥڲeϝӛcƨ~ڽ{+""BIII  SnT^=-[LyyZr\߿_{UxX8N:)//O'OTBBІ T^=:}Z+WԌ3t뭷* *a ֍;kUڴqN$%W^Qm uu)!!Ak׮ ;o͛e(=zT7nThhÕUVgѲe˴{. ݻumuIZvnFfm޴Iv+DmڸQCCCqq^:wl-aKII1$Y`N}5<{\VҋҖgkӲ{s=`~S.]4ԵkW!.w>ƍZbj׮m5.ك9@x`sP$/um5}o1Ѵ9V8itїv2̴J8ƽW2s7~ 3dpA*21ef}n{hZ%z.\.沢۔V}y!Z4/{y*%ý[;КHmYZ{aM`[pejjZs޸6JJ;G̴̹=u\Pv)Mg wq5W8=ZիWW@HJ҃>nݺyMsm6;V{Q.]{g ~ׇ=ӧeW.D"##C999 יLURE*UQ@= x0{Z[_ɞEUR ʅ>=+ {ʃ(x .*ކ: J>u?G9@Ռ!!uCx ^:xjxzC.B IDATx ^:@!uC~gѣ2aj߾ǿDS=4~?4/۶mSzzu5i֧gϞAw8TZ55nX7xnvU\.ݗl6)44T[Vp 駟4ydM6o|mZzu-];ѣGKvZf&LPϟ?_+W.1 /| P>}tI2 =@x<-YDO?̙3MyXXڷo \jժiƍze$I~l٢-[hҥ?@{:7nܨٳg{ULL~͚5O]???u]^>)OImZJEGGk1cUf-;v6lؠPIuI&i̙9sIR߾}e%I%C,Y"y2l޼#quiѢEzW2nذA&L ̙3eF@xիW+??_uQ^^~?4?3g>3itb$R:tqW^QJJFm۶qQiv]߿y*T)OyiΝ)^ZuEozvU [om۶jҤ4i"` kY.]/ZG֭eٔu֝7111jӦ߾}*88#ݻN: $M4 ^o޼yj߾:w,IZ`U_+w]wݥ={*??_u3 /7?N֮].mz RJsssՠAy~EK*p&''G+VP߾}/4vJ:uJ?F͚5KKEaJLLԴiӴe%''RJj֬qlٲL͛7 T&Mԯ_?r-{oɒ%QE/kٲe:y/^~h[NIIIl[z쩁z~1eoY:m4}Gaaa3g믿EY[?Jl{G#GԔ)S*==]6lnrIRRKmذANtcǎ\^9==m.}j6m4i$͞=[9"___iFÇWDDDۺuk*??_׀Եk y{#_Jy;Uٳg+44z6ol XlUV)//O={رc =@x bŊ޽ڵk`kyi}7رƎ1c(,,L~yeddKbz>e߲мyԴi?fEGGP ݫoF6lSO=u*11Q~a,ܹsգG;V=nݪzJgC/,z?X?QFYfZv 'N;OjÆ _NfΜyQSRs%UVM!!!fSKѽ{wUTIZlY/zjWϞ=_5m4hС%v>}Ν;{ҨN:ڴi/^TU^]dFsi̘1V㨨(%%%iʔ)Zn.IZ P0\Z_V:ubcceaW_UZZ~i ؽiy ݟ>+!|ӣG+}wׯWZZ|||}ԨQ#}ݥRz{)ivءǏ{[x9gk͟?_uQ6mejԨ: x35iDk׮p8tM7I$qr8R9C~ϼ'K)լo߾Kj:qsE{w U`9ѣmw.___\.-ZcѣG{,\}QIS!..N'N(XG_}$)$$D7|C{bJHHPffl6XeUoq9Z':uT-q3gp\ zYf͚I*!^=޽{KJ<_Jyf_㣌 ;jѢïȱ?^lLU͛7W@@@UV &,Y۷NLLTԦMEDDHƍWl(X=j%RBϷ4}tOr83fQQQR^^bFGGK*f͚T֒~˛WIj޼1nnŊJ孓HkNR0l1bd~/IC/^}j!d;wU*++KX㱬W:IRs!օ3_pvEϕo7nT0y|NSa\0_z;w|x7tjڴiZlԵkWURf4}teff*%%EjRDDl6ۧ;wjݪQrJ}Gט1c<+[ֲetA͛7OҦM4}tM8Q;vM7$IԩS/l6ڴi#__ߋZt:vZ͞=[ԴiSuEa(##C;wԇ~oVAAAzwձcbo׮VX~I:|&Mկ__Ǐ-OYKS-o^8]V+VTrr-Zɓ'kڱcRSSձcGըQC6M/S*V( *ko^WV||,Xcǎi۶m1c>#lR?{lذAf͒aJII_ի[JHHP||t9h8sz^Z999jܸ֭+.áPXBׯWvv233}v?^w}o߮\k.D^ݕ5K-OimnAAAAϔauYeffiӦS5 <-_\ .T0l)YJKKSnBBB{IM2EׯWRRׯ>}hѴiGѣG+--o&ٳ;UZU 6ԍ7ި;믿֚5k$ݮ0C ]LYϧ<[޼jĉZtN:o^Æ SVVz!k.]hȐ!zKL|Y̙3ʕ+uql6խ[W}}W!xiiiޗW^]K.8qVX$U^]111e/\|FU1ϟoM7|3ZѓzӦM2eݫ<5h@Uyyyիڴi?֮]ۿ?oKӴiSM>U:uCx ^:@Uk a IENDB`asciidoctor-2.0.20/docs/modules/ROOT/nav-errors.adoc000066400000000000000000000000421443135032600222040ustar00rootroot00000000000000* xref:errors-and-warnings.adoc[] asciidoctor-2.0.20/docs/modules/ROOT/nav-lang.adoc000066400000000000000000000000431443135032600216120ustar00rootroot00000000000000* xref:localization-support.adoc[] asciidoctor-2.0.20/docs/modules/ROOT/nav-safe-modes.adoc000066400000000000000000000000741443135032600227200ustar00rootroot00000000000000* xref:safe-modes.adoc[] ** xref:reference-safe-mode.adoc[] asciidoctor-2.0.20/docs/modules/ROOT/nav-top.adoc000066400000000000000000000000661443135032600215000ustar00rootroot00000000000000[] * xref:features.adoc[] [] * xref:whats-new.adoc[] asciidoctor-2.0.20/docs/modules/ROOT/pages/000077500000000000000000000000001443135032600203615ustar00rootroot00000000000000asciidoctor-2.0.20/docs/modules/ROOT/pages/docinfo.adoc000066400000000000000000000001421443135032600226270ustar00rootroot00000000000000= Docinfo Files :page-location: asciidoc:docinfo:index.adoc Relocated to xref:{page-location}[]. asciidoctor-2.0.20/docs/modules/ROOT/pages/errors-and-warnings.adoc000066400000000000000000000207211443135032600251150ustar00rootroot00000000000000= Errors and Warnings All application messages (i.e., debug, info, warning, error, or fatal) emitted by Asciidoctor are written to the logger. By default, only warning, error, and fatal messages are logged. When using the CLI, the logger routes messages to the console via stderr (i.e., standard error). == Notification types DEBUG:: Debug messages provide insight into what the software is doing and can be useful for tracking down problems or unexpected behavior. Currently, Asciidoctor does not log any messages at this level. However, extensions may do so. <>:: Info messages do not stop conversion, but they indicate possible problems, and the output may not be what you were expecting. <> (WARNING):: Warnings do not stop conversion, but they indicate likely problems, and the output probably won't be what you were expecting. In the CLI output, Asciidoctor labels these messages as `WARNING` rather than `WARN`. <>:: Errors do not stop conversion, but the output document will almost certainly be wrong. <> (FAILED):: Failures are fatal; no output document will be produced. If the `--trace` CLI option is specified, the message may be accompanied by a stacktrace. In the CLI output, Asciidoctor labels these messages as `FAILED` rather than `FATAL`. The messages listed in the following sections may contain some of the following representative placeholders: :: Represents the basename of the source file being processed (e.g., `sample.adoc`). :: Represents a path to the input file or other referenced file. :: Represents a URI (i.e., URL) being referenced. or :: Placeholders for other contextual information in the message. [#info] == Info messages [cols=2*] |=== |Message |Troubleshooting |possible invalid reference: |The processor could not validate the internal reference named . This message doesn't definitively indicate a missing reference. Rather, it indicates the reference (i.e., anchor) is not in a location where the reference can be validated. |=== [#warning] == Warning messages [cols=2*] |=== |Message |Troubleshooting |abstract block cannot be used in a document without a title when doctype is book. Excluding block content. |Invalid book document structure. |cannot retrieve contents of at URI: (allow-uri-read attribute not enabled) |Reading from a URI is only allowed if the allow-uri-read attribute is set from the CLI or API. |could not retrieve contents of at URI: |Web address not found. |could not retrieve image data from URI: |Web address not found. Only occurs with `allow-uri-read` and `data-uri`. Check the URI. |dropping line containing reference to missing attribute: |An attribute cannot be resolved and the `attribute-missing` attribute is set to `drop-line`. |file does not exist or cannot be read: |You specified a stylesheet (`-a stylesheet=`) but does not exist or is not readable. |gem 'concurrent-ruby' is not installed. This gem is recommended when registering custom converters. |You have registered a custom converter, and you have not installed the concurrent-ruby gem. |gem 'concurrent-ruby' is not installed. This gem is recommended when using custom backend templates. |You are using custom templates (`-T `), but you have not installed the concurrent-ruby gem. |image to embed not found or not readable: |You used `:data-uri:` but the file could not be found. |include file not readable: |You do not have permission to access the file. |input path is a , not a file |The path is not a file (perhaps it is a socket or a block device). |optional gem 'asciimath' is not installed. Functionality disabled. |asciimath is a required library when converting AsciiMath expressions in AsciiDoc to DocBook. |optional gem 'rouge' is not installed. Functionality disabled. |Rouge is used for source code highlighting when `source-highlighter` is set to `rouge`. |skipping reference to missing attribute: |An attribute cannot be resolved and the `attribute-missing` attribute is set to `skip`. |tables must have at least one body row | |tag '' not found in include file: |You tried to include by tagged region, but the included document does not have that tag. |: id assigned to already in use: | is a duplicate ID, meaning it has already been assigned to a node of (e.g., section, block, anchor). If you don't see the problem in , check that the duplicate ID isn't coming from a file which is being included. | callout list item index: expected got |Callouts are expected to be in numerical order, just like any ordered list. | include not readable: |If is a file, do you have read permissions for it? If it is a URI and `-a allow-uri-read` is set, does it exist? | include file not found: |Probably a typo or missing file. If not, make sure you understand the search process. | invalid empty detected in style attribute |The first positional attribute in the block attributes could not be parsed. | invalid style for block: |You have added a custom style to a block, but you haven't registered a custom block extension to handle it. | invalid style for paragraph: |You have a line `[]` before a paragraph, but `name-of-style` isn't a recognized built-in style. | list item index: expected , got |You gave explicit numbers on an ordered list, but they were not sequential. Asciidoctor renumbers them for you, and gives this warning. | multiple ids detected in style attribute |Multiple IDs cannot be specified in the block style (e.g., `[#cat#dog]`). | no callouts refer to list item |The callout is missing or not recognized. In source listings, is the callout the last thing on the line? | section title out of sequence |Invalid document structure. Check section levels. |=== [#error] == Error messages [cols=2*] |=== |Message |Troubleshooting |input file missing or cannot be read |Check that the file exists and that the filename is not misspelled. |include file has illegal reference to ancestor of jail; recovering automatically |The safe mode is restricting access to an include file outside of the base directory. |input file and output file cannot be the same: |Choose a different output directory or filename. |partintro block can only be used when doctype is book and it\'s a child of a part section. Excluding block content. |Invalid book document structure. |unmatched macro: endif::[] |`endif::[]` with no unclosed preceding `ifdef::[]`. | dropping cell because it exceeds specified number of columns // The extra cells are dropped, but this message is not produced ??? | | illegal block content outside of partintro block |Invalid book document structure. | invalid part, must have at least one section (e.g., chapter, appendix, etc.) |Invalid book document structure. | malformed manpage title |Document does not conform to the structure required by the declared manpage doctype. | malformed name section body |Document does not conform to the structure required by the declared manpage doctype. | maximum include depth of 64 exceeded |Does your file include itself, directly or indirectly? | mismatched macro: endif::[], expected endif::[] |ifdef/endif blocks must be strictly nested. | name section expected |Document does not conform to the structure required by the declared manpage doctype. | name section title must be at level 1 |Document does not conform to the structure required by the declared manpage doctype. | only book doctypes can contain level 0 sections |Illegal use of a level-0 section when doctype is not book. | table missing leading separator, recovering automatically |Check for missing cell separator characters at the start of the line. |=== [#fatal] == Fatal messages [cols=2*] |=== |Message |Troubleshooting |missing converter for backend ''. Processing aborted. |You used -b with an invalid or missing backend. //|Failed to load AsciiDoc document - undefined method `convert' for nil:NilClass //| |'tilt' could not be loaded |You must have the tilt gem installed (`gem install tilt`) to use custom templates. |=== //// API only |ERROR |IOError, %(target directory does not exist: #{to_dir}) |API, the mkdirs option is not set, and the target directory does not already exist. //// asciidoctor-2.0.20/docs/modules/ROOT/pages/features.adoc000066400000000000000000000244431443135032600230360ustar00rootroot00000000000000= Features :description: This page highlights the features of Asciidoctor that make it a great choice for processing and publishing your AsciiDoc content. {description} == Readily available with no dependencies Asciidoctor is written in Ruby, which means it must be run on a Ruby language runtime (including JRuby). But that's its only requirement. Asciidoctor is packaged and distributed as a gem named {url-rubygems}/asciidoctor[asciidoctor^] to the package repository at {url-rubygems}[RubyGems.org^]. The gem can be installed on *any operating system* that Ruby supports using Ruby's package management tools (gem or bundle). Asciidoctor itself has no dependencies. But what if you aren't familiar with Ruby or, for whatever reason, prefer not to install it? No problem! Thanks to xref:asciidoctorj::index.adoc[AsciidoctorJ] and xref:asciidoctor.js::index.adoc[Asciidoctor.js], you can run the same exact version of Asciidoctor on a Java Virtual Machine (JVM) or JavaScript runtime, respectively. That means you don't need a Ruby runtime installed on your machine after all. AsciidoctorJ uses JRuby internally, whereas Asciidoctor.js relies on a Ruby-like runtime written in JavaScript. Whether you're working in Ruby, Java, or JavaScript, Asciidoctor is readily available for you to start using. Only when you get into add-on converters and extensions do you need to install dependencies. == Quick wins Asciidoctor provides a nice out-of-the-box xref:html-backend:index.adoc[HTML experience], complete with a default stylesheet and built-in integrations like Font Awesome (for icons), Highlight.js (for source highlighting), and MathJax (for STEM processing). When you're just getting into using AsciiDoc for writing, Asciidoctor's HTML output should be sufficient for all of your publishing needs. The following before and after picture gives you an idea of what to expect: image::source-vs-output.png[AsciiDoc source vs HTML output] If you're looking for more advanced output, or you have an existing DocBook toolchain in place, you can instead xref:docbook-backend:index.adoc[convert to DocBook] and feed the result into that pipeline. Once you get more familiar with AsciiDoc and Asciidoctor, you can explore customizing the built-in converter using templates or using add-on converters to produce other output formats such as PDF and EPUB 3. So there is plenty of room to grow. == Built-in and add-on converters Asciidoctor provides converters for three output formats by default: xref:html-backend:index.adoc[HTML 5], xref:docbook-backend:index.adoc[DocBook 5], and xref:manpage-backend:index.adoc[man page] (short for manual page). These converters are designed to cover a majority of users`' needs for basic preview and publishing. The *HTML* converter provides a result you can publish the web straight away without any tweaking. The *DocBook* converter allows you to leverage an existing publishing toolchain or migrate the content to a different authoring format (without the tool needing to know how to parse AsciiDoc). The *man page* converter drastically lowers the barrier to making system help files. But it doesn't end there. The converter interface in Asciidoctor is an extension point. That means it can be used to create converters for any output format imaginable. And there's an ecosystem of additional converters already available in the Asciidoctor project. You can find converters for creating *PDF*, *EPUB 3*, *Reveal.js slides*, and more from AsciiDoc. Asciidoctor also provides advanced docinfo support for injecting colophon (such as content scripts) into the header and footer of the output file. A single input format, AsciiDoc, gains you a plethora of output formats. == Custom converter or templates While Asciidoctor provides a built-in converter for producing publish-ready HTML, *all the HTML that Asciidoctor generates can be changed.* There are two ways to modify the HTML that Asciidoctor produces: a custom converter or converter templates. If you're an experienced programmer, you may lean towards the *custom converter*. You can extend the built-in HTML converter and override the methods that handle the conversion for any node in the document tree. If your expertise is more on the technical writing side, you may find the *converter templates* to be more approachable. The templates can be written in any template language supported by the Tilt template abstraction library, such as ERB, Haml, or Slim. These templates augment the built-in converter by replacing the processing for a node in the document. You introduce one template for each type of node for which you want to control conversion. Templates allow you to apply logic around chunks of HTML (or HTML-like) markup. Just know that if the HTML that Asciidoctor produces isn't working for you, you can change it. == Syntax highlighting If you're writing technical documentation that presents snippets of source code or configuration, you can enhance the display of those source blocks using syntax highlighting (aka source highlighting). Syntax highlighting is the practice of colorizing (or otherwise emphasizing) keywords and syntax elements in a structured programming or configuration language. Here's an example to give you an idea: [source,ruby] ---- phrase = "I love AsciiDoc" puts phrase # now say it like you mean it 5.times { puts %(#{phrase}!) } ---- Asciidoctor provides adapters for several popular syntax highlighters, including Rouge and Highlight.js. Aside from installing the library (if necessary), all you need to do is set a document attribute on your document and Asciidoctor will handle the rest. From there, you can configure the behavior of the syntax highlighter, such as changing the style/theme, enabling line numbers, and block highlighting select lines. == Multiple interfaces: CLI and API Asciidoctor offers two interfaces for processing AsciiDoc content: a commandline interface (CLI) and an application programming interface (API). The *CLI* is designed as a simple tool for non-programmers who want to convert AsciiDoc without having to write a program or for converting content in an automated environment such as CI. Many of the processing options are accessible from the CLI using option flags. When you're first starting out with Asciidoctor, you'll mostly likely interact with it via the CLI. Although the CLI itself does not require any programming, it can still load extension code that augments processing. NOTE: If you're migrating from AsciiDoc.py, the `asciidoctor` CLI is a drop-in replacement for the `asciidoc` CLI. The *API* is designed for programmers who want to take their AsciiDoc processing further. Like with the CLI, you can use the API to convert documents. But it's not all about conversion to an output format. //Alternately, you can load the document just to inspect its contents. Asciidoctor parses and converts the source document in discrete steps. This makes conversion optional and gives programs the opportunity to extract, add, or replace content in the document by interacting with the document object model. //Developers can use the full power of the Ruby programming language to play with the content in the document. Or you may want to leverage the ability to convert to an embedded document for integrating with other applications, such as a static site generator. The API also provides an extension SPI that you can use to augment the processor, such as to introduce new syntax, mutate the parsed document before conversion, or tweak the output after conversion. TIP: The API is written in Ruby, but also accessible from JVM languages or JavaScript when using AsciidoctorJ or Asciidoctor.js, respectively. Both the CLI and API have the ability to process both AsciiDoc files and AsciiDoc source passed in as a string. == Impressive performance and strong security No coverage of Asciidoctor would be complete without mention of its speed. Asciidoctor is about as fast as any program that runs in Ruby can be. It can load, parse, and convert a 100K AsciiDoc document in about a tenth of a second (~ 1MB/s). That's more than 100x as fast as AsciiDoc.py, the original AsciiDoc implementation. Asciidoctor's speed is good news for developer productivity and good news for server-side applications that need to convert AsciiDoc markup. It also means that preview tools like the browser extension can present a preview of the AsciiDoc content in HTML in near real time. Asciidoctor also has the ability to run securely by offering several xref:safe-modes.adoc[security (aka safe) modes]. By using one of these safe modes, you don't have to worry about the processor accessing sensitive files or even the file system in highly secure environments. In addition to its performance, these security levels make Asciidoctor well-suited for server-side deployments. == Access to an ecosystem of extensions and tools Installing Asciidoctor is just the beginning of your publishing experience. Asciidoctor gives you access to a {url-org}[healthy ecosystem of extensions and tools^], ranging from add-on converters, to extended syntax, to build plugins, to integrated writing and preview environments. One popular extension is xref:diagram-extension::index.adoc[Asciidoctor Diagram]. When loaded, Asciidoctor Diagram allows you to make diagrams from plain text (much like AsciiDoc does for writing). Asciidoctor Diagram does this by extending the syntax of AsciiDoc to recognize specially marked literal blocks. It takes the text inside those blocks, passes it through one of the diagramming tools it integrates with, and reinserts the image back into the document as it is being processed. The result is that the diagram source in the AsciiDoc document becomes an image in the generated output. Another popular tool is the {url-org}/asciidoctor-browser-extension[browser extension^]. When this extension is installed, you can browse to an AsciiDoc file on your local storage or on the web and the browser will show you the converted HTML *instead of* the AsciiDoc source. That means you can get the out-of-the-box HTML experience that Asciidoctor provides without even having to run a command or script. The extension running in the browser does everything for you. These are just two examples. There are plenty more possibilities to explore in the ever-growing Asciidoctor ecosystem. All the components of this ecosystem work together to achieve one goal, to make writing in AsciiDoc a rewarding and productive experience. asciidoctor-2.0.20/docs/modules/ROOT/pages/index.adoc000066400000000000000000000135121443135032600223220ustar00rootroot00000000000000= Asciidoctor Documentation :navtitle: Introduction :description: A brief introduction to Asciidoctor, its ecosystem, its relationship to AsciiDoc, and the language platforms on which it runs. This is the documentation for the Ruby-based AsciiDoc processor named Asciidoctor. Asciidoctor currently serves as the reference implementation for how to interpret the AsciiDoc language. If you're looking for the documentation for the AsciiDoc language, see the xref:asciidoc::index.adoc[AsciiDoc section] of this website. == What is Asciidoctor? Asciidoctor is a fast, open source, text processor for parsing AsciiDoc into a document model, then converting it to output formats such as [.nowrap]#HTML 5#, [.nowrap]#DocBook 5#, man(ual) pages, PDF, and [.nowrap]#EPUB 3#. Asciidoctor is written in the Ruby programming language. When we use the name "`Asciidoctor`" in this area of the documentation, we're referring to the core Asciidoctor Ruby processor, abbreviated as _Asciidoctor core_ or _Asciidoctor Ruby_. We add the qualifier "`core`" because Asciidoctor is also the name of the suite of projects that provides runtimes, extensions, and tools for processing AsciiDoc. To simplify installation, Asciidoctor is packaged as a gem named {url-rubygems}/asciidoctor[asciidoctor^] and published to the gem hosting service located at RubyGems.org. A https://guides.rubygems.org/what-is-a-gem/[gem^] is a packaged Ruby application or library (in this case, both). The *asciidoctor* gem can be installed directly from RubyGems.org on all major operating systems using Ruby packaging tools (gem or bundle). Asciidoctor is also distributed as a Docker image, as a package for numerous Linux distributions, and as a package for macOS (via Homebrew and MacPorts). Asciidoctor is open source software available under the terms of the MIT license and {url-org}[hosted on GitHub^]. Asciidoctor is the successor to AsciiDoc.py, the legacy Python-based processor for the AsciiDoc language. If you're using AsciiDoc.py, follow xref:migrate:asciidoc-py.adoc[] to learn how to upgrade to Asciidoctor. == Basic usage Asciidoctor provides two interfaces for processing AsciiDoc documents, a xref:cli:index.adoc[CLI] named `asciidoctor` and a xref:api:index.adoc[Ruby API] named `Asciidoctor`. The following table gives you an idea of how to use these interfaces. |=== ^|CLI ^|API a| $ asciidoctor document.adoc a| [source,ruby] ---- require 'asciidoctor' Asciidoctor.convert_file \ 'document.adoc', safe: :safe ---- |See xref:cli:index.adoc[] to learn more about this interface. |See xref:api:index.adoc[] to learn more about this interface. |=== == Relationship to AsciiDoc AsciiDoc is the language. + Asciidoctor is the processor. You compose documents using the xref:asciidoc::index.adoc[AsciiDoc language], which is a text-based writing format. The AsciiDoc language was designed to be unobtrusive and concise to simplify writing and make it more pleasant. But AsciiDoc itself is not a publishing format. It's more like a shorthand. That's where an AsciiDoc processor comes in. //Asciidoctor reads and parses text written in the AsciiDoc syntax, then feeds the parse tree into a set of built-in templates to produce HTML, PDF, DocBook, man page, and other output formats. //You have the option of writing your own converter or loading Tilt-supported templates to customize the generated output or produce alternative formats. //Asciidoctor also offers a responsive theme based on Foundation to style the HTML5 output. An AsciiDoc processor, such as Asciidoctor, reads the AsciiDoc source and converts it to publishable formats such as HTML 5 or PDF. It can also convert it to formats which themselves can be processed by a publishing toolchain, such as DocBook. Since AsciiDoc is not published as is, the processing step provides an opportunity to augment the document by expanding shorthand codes, layering in integrations, and applying a pleasant visual style. That augmentation is a large part of what Asciidoctor does. In brief, you give AsciiDoc source to Asciidoctor and it gives you a result you can publish. == Ecosystem When we talk about Asciidoctor in this section, we are talking about the core processor and built-in converters. Asciidoctor also has an ecosystem of extensions, converters, build plugins, and tools to help you author and publish content written in AsciiDoc. You can find the documentation for these projects elsewhere on this site or in the Asciidoctor organization on GitHub. == Beyond Ruby Although Asciidoctor is written in Ruby, it does not mean you need Ruby to use it. You can also run Asciidoctor using a JVM via AsciidoctorJ or JavaScript via Asciidoctor.js. === Java / JVM Thanks to xref:asciidoctorj::index.adoc[AsciidoctorJ], Asciidoctor can be used in any program that runs on a Java Virtual Machine (JVM). Under the covers, AsciidoctorJ uses JRuby to run Asciidoctor, but this is hidden behind a Java API. AsciidoctorJ even provides an alternate CLI, which means you can get the full Asciidoctor experience with only a Java runtime. Building atop AsciidoctorJ, there are also plugins that integrate Asciidoctor into Apache Maven, Gradle, or Javadoc builds. === JavaScript Thanks to xref:asciidoctor.js::index.adoc[Asciidoctor.js], Asciidoctor can also be used in JavaScript. The Asciidoctor.js project uses Opal to transpile the Ruby source in Asciidoctor to JavaScript. The result is a fully-functional version of Asciidoctor that works on any JavaScript runtime, such as a modern browser or Node.js. Asciidoctor.js even provides an alternate CLI, which means you can get the full Asciidoctor experience with only a JavaScript runtime. Asciidoctor.js is used to power the AsciiDoc language preview extensions for Chrome, Atom, Brackets, and other web-based tooling. To be accurate, you can run Asciidoctor using Ruby, a JVM, or JavaScript. This documentation focuses on the Ruby version of Asciidoctor. asciidoctor-2.0.20/docs/modules/ROOT/pages/localization-support.adoc000066400000000000000000000135121443135032600254150ustar00rootroot00000000000000= Localization Support :keywords: translations, labels, l10n, internationalization, i18n :url-docbook-i8n: http://www.sagehill.net/docbookxsl/Localizations.html :url-lang-attributes: {url-org}/asciidoctor/blob/{page-origin-refname}/data/locale/attributes.adoc Asciidoctor is not restricted to working with English-only content. Asciidoctor can process the full range of the UTF-8 character set. That means you can write your document in any language, save the file with UTF-8 encoding, and expect Asciidoctor to convert the text properly. Furthermore, you can customize the built-in labels (e.g., "`Appendix`") to match the language in which you are writing. This process is known as localization (l10n). The built-in labels used in the output generated by Asciidoctor are English by default. These labels can be modified by changing the value of the <>. There are some caveats to know about: * Currently, the official HTML and PDF converters only fully support left-to-right (and top-to-bottom) reading. Support for right-to-left (RTL) is being considered. See {url-org}/asciidoctor/issues/1601[issue #1601^] for details. In the interim, you can leverage the DocBook toolchain to get right-to-left support. * Attributes that store dates and times (e.g., `docdatetime`) are always formatted like `2019-01-04 19:26:06 -0600`. * Application messages (i.e., log messages) are always in English. * Asciidoctor does not support the language conf files used by AsciiDoc.py. However, Asciidoctor does provide {url-lang-attributes}[a translation file^] that can be used for a similar purpose. [#customizing-labels] == Translating built-in labels When converting to DocBook, you can rely on the DocBook toolchain to translate (most) built-in labels. To activate this feature, set the `lang` attribute to a valid country code (which defaults to `en` for English). For example: $ asciidoctor -a lang=es -b docbook article.adoc The list of supported languages, as well as additional language considerations for DocBook, are described in {url-docbook-i8n}[DocBook XSL: The Complete Guide^]. The `lang` attribute _does not_ enable automatic translation of built-in labels when converting directly to HTML or PDF. It's merely a hint to configure the DocBook toolchain. If you're not using the DocBook toolchain for publishing, you must translate each built-in label yourself. One way is to set the following attributes in the document header or by passing the attributes via the API or CLI: .Attributes that control built-in labels [#label-attributes,cols="~,~,30%"] |=== |Attribute |Default English Value |Notes |`appendix-caption` |Appendix | |`appendix-refsig` |Appendix |Used when referencing an appendix. |`caution-caption` |Caution | |`chapter-signifier` |Chapter |Only applies when `doctype` is `book`. |`chapter-refsig` |Chapter |Used when referencing a chapter. |`example-caption` |Example | |`figure-caption` |Figure | |`important-caption` |Important | |`last-update-label` |Last updated | |`listing-caption` |_not set_ |By default, listing blocks do not have captions. Specify `listing-caption` to turn on captions for listing blocks. |`manname-title` |NAME |Only applies when `doctype` is `manpage`. |`note-caption` |Note | |`part-signifier` |Part | |`part-refsig` |Part |Used when referencing a part. |`preface-title` |_not set_ |Only applies when `doctype` is `book`. |`section-refsig` |Section |Used when referencing a section. |`table-caption` |Table | |`tip-caption` |Tip | |`toc-title` |Table of Contents | |`untitled-label` |Untitled | |`version-label` |Version | |`warning-caption` |Warning | |=== If you plan to support multiple languages, you'll want to define the attributes for each language inside a xref:asciidoc:directives:conditionals.adoc[conditional preprocessor directive]. For example: [source,asciidoc] ---- \ifeval::["{lang}" == "de"] :caution-caption: Achtung ... \endif::[] ---- Of course, you're probably hoping this has already been done for you. Indeed, it has! You can find an {url-lang-attributes}[AsciiDoc file^] in the Asciidoctor repository that provides translations of these attributes for most major languages. Thus far, the built-in labels have been translated into the following languages: Arabic, Belarusian, Bulgarian, Catalan, Czech, Danish, Dutch, German, Spanish, Persian (Farsi), Finnish, French, Hungarian, Bahasa Indonesian, Italian, Japanese, Korean, Norwegian Bokmål, Norwegian Nynorsk, Polish, Portuguese, Brazilian Portuguese, Romanian, Russian, Serbian Cyrillic, Serbian Latin, Swedish, Turkish, Ukrainian, Vietnamese, Simplified Chinese, and Traditional Chinese. The translations are defined using AsciiDoc attribute entries inside conditional preprocessor blocks, just as suggested above. To use this file to translate the built-in labels according the value of the `lang` attribute (just like the DocBook toolchain does), follow these steps: . Download the AsciiDoc file {url-lang-attributes}[attributes.adoc^] from the Asciidoctor repository. . Put the file in the folder [.path]_locale_ relative to your document. . Add the following line to the header of your AsciiDoc document: + [source,asciidoc] ---- \include::locale/attributes.adoc[] ---- . Set the language using the `lang` attribute. This attribute must be set before the include directive gets processed. For example: -a lang=es The built-in labels will now be translated automatically based on the value of the `lang` attribute. There's an ongoing discussion about how to make language support even simpler ({url-org}/asciidoctor/issues/1129[issue #1129^]). Input is welcome. == Translation Asciidoctor (or DocBook) currently does not support translation of content out of the box. There's a proposal to integrate gettext (https://discuss.asciidoctor.org/Professional-providers-translating-Asciidoc-tt2692.html#none[discussion^]), and suggestions are welcome. asciidoctor-2.0.20/docs/modules/ROOT/pages/reference-safe-mode.adoc000066400000000000000000000050561443135032600250130ustar00rootroot00000000000000= Safe Mode Specific Content // anchor: set-safe-attrs Asciidoctor provides access to the current safe mode through built-in attributes. You can use these attributes to enable or disable content based on the current safe mode of the processor. == Referencing safe modes The xref:safe-modes.adoc[safe mode] can be referenced by one of the following document attributes: * The value of the `safe-mode-name` attribute (e.g., unsafe, safe, etc.) * The value of the `safe-mode-level` attribute (e.g., 0, 10, etc.) * The presence of the `safe-mode-` attribute, where `` is the safe mode name. The attributes in the next example define replacement text for features that are disabled in high security environments: [source,asciidoc] ---- \ifdef::safe-mode-secure[] Link to chapters instead of including them. \endif::safe-mode-secure[] ---- This feature is particularly handy for displaying content on GitHub, where the safe mode is set to its most restrictive setting, xref:safe-modes.adoc#secure[SECURE]. You can set the xref:cli:set-safe-mode.adoc[safe mode from the CLI] and the xref:api:set-safe-mode.adoc[API]. //// Allow the include directive to import a file from a URI. Example: include::https://cdn.jsdelivr.net/gh/asciidoctor/asciidoctor/README.adoc[] To be secure by default, the allow-uri-read attribute must be set in the API or CLI (not document) for this feature to be enabled. It's also completely disabled if the safe mode is SECURE or greater. Since this is a potentially dangerous feature, it’s disabled if the safe mode is SECURE or greater. Assuming the safe mode is less than SECURE, you must also set the allow-uri-read attribute to permit Asciidoctor to read content from a URI. I decided the following defaults for the standalone option make the most sense: true if using the cli (use -s to disable, consistent with asciidoc) false if using the API, unless converting directly to a file, in which case true is the default The basic logic is that if you are writing to a file, you probably want to create a standalone document. If you are converting to a string, then you probably want an embedded document. Of course, you can always set it explicitly, this is just a default setting. The reason I think the standalone default is important is because we don't want people switching from Markdown to AsciiDoc and be totally taken by surprise when they start getting a full HTML document. On the other hand, if you are converting to a file (or using the cli), then it makes a lot of sense to write a standalone document. To me, it just feels natural now. //// asciidoctor-2.0.20/docs/modules/ROOT/pages/safe-modes.adoc000066400000000000000000000071161443135032600232410ustar00rootroot00000000000000= Safe Modes Asciidoctor provides security levels that control the read and write access of attributes, include directives, macros, and scripts while a document is processing. Each level includes the restrictions enabled in the prior security level. .Security assumptions [#security-assumptions] **** Asciidoctor's safe modes are primarily focused on what the processor is permitted to do. The safe modes do not provide a comprehensive security framework. In particular, there's no safe mode that restricts the kind of content the author can pass through to the output document. In other words, the safe mode setting does not provide a way to sanitize the output. Asciidoctor performs sensible escaping to allow an author to safely input text, but does not limit the content that can be included in the output using passthrough blocks or custom substitutions. The reason for this policy is that we assume the document will be passed through a sanitizer if the HTML must be embedded directly into a web page, precisely what GitHub and GitLab do. This postprocessing (which could be done using a postprocessor extension) is better handled by a separate tool since there are many permutations to consider and only a separate tool would know which restrictions to apply for a given situation. **** The safe mode can be set from the xref:cli:set-safe-mode.adoc[CLI] and the xref:api:set-safe-mode.adoc[API]. You can also xref:reference-safe-mode.adoc[enable or disable content based on the current safe mode]. [#unsafe] == UNSAFE The `UNSAFE` safe mode level disables any security features enforced by Asciidoctor. Ruby is still subject to its own restrictions. *This is the default safe mode for the CLI.* Its integer value is `0`. [#safe] == SAFE The `SAFE` safe mode level prevents access to files which reside outside of the parent directory of the source file. Include directives (`+include::[]+`) are enabled, but paths to include files must be within the parent directory. This mode allows assets (such as the stylesheet) to be embedded in the document. Its integer value is `1`. [#server] == SERVER The `SERVER` safe mode level disallows the document from setting attributes that would affect conversion of the document. This level trims `docfile` to its relative path and prevents the document from: * setting `source-highlighter`, `doctype`, `docinfo` and `backend` * seeing `docdir` (as it can reveal information about the host filesystem) It allows `icons` and `linkcss`. Its integer value is `10`. [#secure] == SECURE The `SECURE` safe mode level disallows the document from attempting to read files from the file system and including their contents into the document. Additionally, it: * disables icons * disables include directives (`+include::[]+`) * data can not be retrieved from URIs * prevents access to stylesheets and JavaScript files * sets the backend to `html5` * disables `docinfo` files * disables `data-uri` * disables interactive (`opts=interactive`) and inline (`opts=inline`) modes for SVGs * disables `docdir` and `docfile` (as these can reveal information about the host filesystem) * disables source highlighting xref:extensions:index.adoc[Asciidoctor extensions] may still embed content into the document depending whether they honor the safe mode setting. *This is the default safe mode for the API.* Its integer value is `20`. TIP: GitHub processes AsciiDoc files using the `SECURE` mode. //// |=== |{empty} |Unsafe |Safe |Server |Secure |URI access |system access |base directory access |docdir |docfile |docinfo |backend |doctype |source-highlighter |macros |include |data-uri |linkcss |icons |=== //// asciidoctor-2.0.20/docs/modules/ROOT/pages/whats-new.adoc000066400000000000000000001201231443135032600231250ustar00rootroot00000000000000//= What's New (Asciidoctor {page-component-version}) = What's New in {page-component-version} :doctype: book :description: The new features, improvements, and bug fixes made in each patch release of the Asciidoctor {page-component-version} release line. :page-toclevels: 0 :url-releases-asciidoctor: {url-org}/asciidoctor/releases :url-milestone: {url-org}/asciidoctor/milestone/33?closed=1 {description} The releases are ordered from newest to oldest. _**Cumulative issues resolved:** {url-milestone}[2.0.x^]_ = Asciidoctor 2.0.19 _**Release date:** 2023.05.15_ == Improvements * Return empty string instead of nil if raw or verbatim block has no lines * Don't uppercase monospace span in section title in manpage output (#4402) * Simplify processing of implicit link (i.e., autolink) by separating implicit and explicit match * Generate partintro block consistently (#4450) * Add Kiswahili translation for built-in labels (PR #4454) (*@bkmgit*) == Compliance * Fix call order so use of an include file with invalid encoding continues to raise error when using Ruby >= 3.2.0 * Fix test assertion for fallback Rouge stylesheet to be compatible with Rouge 4.1 (#4406) (*@tmzullinger*) * Support `notitle` option on section as alternative to `untitled` to hide title (#4437) * Add support for Haml 6 to template converter (#4429) == Bug Fixes * Process constrained inline passthrough inside monospace span (#4458) * Catalog inline ref defined using anchor macro even when resolved reftext is empty * Use while loop rather than recursion to locate next line to process; prevents stack limit error (#4368) * Avoid matching numeric character references when searching for # in xref target (#4393) * Use correct selector to collapse margin on first and last child of sidebar * Don't allow target of include directive to start with a space (to distinguish it from a dlist item) or to end with a space * Manify alt text of block image in manpage output (#4401) * Adjust font size of term in horizontal dlist to match font size of term in regular dlist * Implicitly attach nested list that starts with block attribute lines to dlist entry (#4268) * Don't swallow square brackets when processing escaped URL macro * Treat `uri:classloader:` as an absolute path prefix when running on JRuby (#3929) * Apply reftext substitutions to value of `mantitle` attribute in DocBook output (#4448) * Enclose `` tag in `
` tag in DocBook output for man page (#4452) * Correctly handle compat role on monospace and constrained passthrough when box attrlist or formatted text is escaped == Build / Infrastructure * Update latest CRuby in CI workflow to 3.2 * Update latest JRuby in CI workflow to 9.4.2.0 = Asciidoctor 2.0.18 _**Release date:** 2022.10.15_ == Bug Fixes * Change internal `uriish?` helper to only detect a URI pattern at start of a string; avoids misleading messages (#4357) * Prevent highlight.js warning when no language is set on source block; don't call `highlightBlock` if `data-lang` attribute is absent (#4263) * Don't raise error if `Asciidoctor::Extensions.unregister` is called before groups are initialized (#4270) * If path is included both partially and fully, store it with true value (included fully) in includes table of document catalog * Reset registry if activate is called on it again (#4256) * Format source location in exception message when extension code is malformed * Fix lineno on reader when `skip-front-matter` attribute is set but end of front matter is not found * Fix `Asciidoctor::Cli::Invoker` constructor when first argument is a hash * Update default stylesheet to honor marker on unordered list when marker is defined on ancestor unordered list (#4361) == Improvements * Propagate `:to_dir` option to document of AsciiDoc table cell (#4297) * Force encoding of attribute data passed via CLI to UTF-8 if transcoding fails (#4351) (*@zkaip*) = Asciidoctor 2.0.17 _**Release date:** 2022.01.05_ == Bug Fixes * Don't crash if process method for custom block returns an abstract block with context :compound that isn't of type Block (e.g., a list) * Ignore return value of process method for custom block or block macro if value matches parent argument * Remove unnamespaced selectors in Pygments stylesheet * Normalize output from Pygments to use `linenos` class for inline line numbering and trim space after number; update default stylesheet accordingly * Change `AbstractBlock#sections?` to return false when called on block that isn't a Section or Document (PR #3591) (*@Mogztter*) * Hide built-in marker on HTML summary element in Safari when using default stylesheet (#4162) * Hide outline around HTML summary when activated in Safari (#4162) * Include primary video in value of playlist attribute when embedding YouTube video (#4156) * Honor `stripes=none` on nested table (#4165) * Update default stylesheet to fix spacing around empty list item (#4184) * Honor `:header_only` option when parsing document with manpage doctype (#4192) * Use numeric character reference for closing square bracket around alt text of icon * Process `author` or `authors` document attribute in document header when implicit doctitle is absent (#4206) * Patch open-uri-cached gem to work with Ruby 3.1 (update: drop patch now that open-uri-cached has been fixed) (#4227) == Improvements * Prevent line numbers on source blocks in HTML output from being selected (applies to pygments and coderay) (#4128) * Allow hash to be specified for Vimeo video either in video ID or using hash attribute (#4176) * Remove unnecessary specificity in default stylesheet for styling p element inside list item * Remove obsolete gist embed styles from default stylesheet * Allow `--failure-level` to be set to default value, `FATAL` * Sort levels in help for `--failure-level` option in ascending order * Invert FR translations for caution & warning admonition labels (#4212) (*@cyChop*) * Add tests for open-uri-cached integration that's activated by the `cache-uri` attribute * Don't warn if negated tag is not found in include file (#4230) == Build / Infrastructure * Add Ruby 3.1 to CI matrix = Asciidoctor 2.0.16 _**Release date:** 2021.08.03_ == Bug Fixes * Include all lines outside of specified tagged region when tag filter on include directive is a single negated tag (#4048) * Only interpret negated wildcard in tag filter on include directive as implicit globstar if it precedes other tags (#4086) * Change `ifeval` directive to resolve to false if comparison operation cannot be performed (#4046) * Don't crash if `:to_file` option is passed to `load` or `load_file` and value is not a string (#4055) * Use automatic link text if ID in shorthand xref is followed by dangling comma (e.g., `+<>+`) * Update default stylesheet to indent blocks attached to list item in checklist (#2550) * Update default stylesheet to re-enable styling of implicit lead role on first paragraph of preamble inside AsciiDoc table cell * Update default stylesheet to fix conflict between text decoration and bottom border on abbr[title] element * Change invalid font family "sans" in default stylesheet to "sans-serif" * Fix missing automatic reftext for internal xrefs in manpage output (#4110) * Replace numeric character reference for plus in manpage output (#4059) * Replace numeric character reference for degree sign in manpage output (#4059) * Convert apostrophe to the portable `+\*(Aq+` variable instead of the groff-specific escape `\(aq` (#4060) (*@felipec*) * Document the `-e, --embedded` option flag in the man page, which replaces the outdated `-e, --eruby` option flag == Improvements * Use queue to iterate over lines in reader instead of stack (#4106) * Uppercase automatic reftext for level-2 section titles in manpage output if reftext matches section title (#4110) * Show safe modes in strictness order in CLI help (#4065) * Remove redundant styles from the default stylesheet * Update font styles for summary element in default stylesheet to match font styles of paragraph (#4114) * Update default stylesheet to indent content of details element (#4116) * Update default stylesheet to use custom marker for summary element to make appearance consistent (#4116) * Add Vietnamese translation of built-in attributes (PR #4066) (*@nguyenhoa93*) * Add Thai translation of built-in attributes (PR #4113) (*@ammaneena*) == Build / Infrastructure * Import source of default stylesheet into this repository; use PostCSS with cssnano to minify (#4062) * Use autoprefixer to manage browser prefixes in default stylesheet (#4118) = Asciidoctor 2.0.15 _**Release date:** 2021.04.27_ == Bug Fixes * Don't include trailing period, question mark, or exclamation point in target (URL) of autolink (#3860) * Don't assign nil value to named attribute mapped to absent positional attribute when parsing attrlist (#4033) * Remove leading and trailing spaces around role on inline phrase (#4035) * Ignore empty role on inline phrase defined using legacy syntax and followed by comma (#4035) * Use xreftext on document as fallback link text in HTML output for inter-document xref that resolves to current document when no link text is provided (#4032) * Use xreftext on document as fallback link text in HTML output for internal xref with empty fragment when no link text is provided (#4032) * Use document ID as linkend in DocBook output for internal xref with empty fragment; auto-generating one if necessary (#4032) == Improvements * Format keyboard references in monospace in man page output == Build and infrastructure * Get remaining invoker tests working on JRuby 9.1 for Windows = Asciidoctor 2.0.14 _**Release date:** 2021.04.19_ == Bug fixes * Don't allow AsciiDoc table cell to set document attribute that was unset from the API (exceptions include: compat-mode, toc, showtitle, and notitle) (#4017) * Ensure default document attributes unset in parent document remain unset in AsciiDoc table cell (#2586) * Allow `showtitle` and `notitle` to be toggled in AsciiDoc table cell if set in parent document (#4018) * Ensure mtime of input file honors TZ environment variable on JRuby for Windows (affects value of `docdatetime` attribute) (#3550) * Honor caption attribute on blocks that support captioned title even if corresponding `*-caption` document attribute (e.g., `example-caption`) is unset (#4023) * Suppress missing attribute warning when applying substitutions to implicit document title for assignment to intrinsic `doctitle` attribute (#4024) + If you want to use an attribute reference in the document title (i.e., level-0 section title), and you also need to reference the `doctitle` attribute somewhere in the document, then any attributes you reference in the document title must be defined before that line (aka follow document order, just like any other attribute entry). Otherwise, they will remain unresolved in the value of the `doctitle` attribute (though they will still work in the document title itself). == Improvements * Use attribute, if set, as seed value for counter even if not already registered as a counter (#4014) * Allow subs attribute value on Inline node returned by process method for custom inline macro to be a String (#3938) * Allow value of `user-home` attribute to be overridden by API or CLI (#3732) == Build and infrastructure * Run tests on JRuby for Windows (#3550) = Asciidoctor 2.0.13 _**Release date:** 2021.04.10_ == Bug fixes * Rollback change for #3470, which added logic to remove leading and trailing empty lines in an AsciiDoc include file; instead skip empty lines before processing document header (#3997) * Don't allow `counter` and `counter2` attribute directives to override locked attributes (#3939) (*@mogztter*) * Fix crash when resolving next value in sequence for counter with non-numeric value (#3940) * Honor list of tags following negated wildcard on include directive (#3932) * Update default stylesheet to remove the dash in front of cite on nested quote block (#3847) * Don't mangle formatting macros when uppercasing section titles in man page output (#3892) * Don't escape hyphen in `manname` in man page output * Remove extra `.sp` line before content of verse block in man page output * Fix layout of footnotes in man page output (#3989) * Fix formatting of footnote text with URL in man page output (#3988) * Remove redundant trailing space on URL followed by non-adjacent text in man page output (#4004) * Use `.bp` macro at location of page break in man page output (#3992) == Improvements * Extract method to create lexer and formatter in Rouge adapter (#3953) (*@Oblomov*) * Add support for pygments.rb 2.x (#3969) (*@slonopotamus*) * Allow `NullLogger` to be enabled by setting the `:logger` option to a falsy value (#3982) * Substitute attributes in manpurpose part of NAME section in man page doctype (#4000) * Output all mannames in name section of HTML output for man page doctype (#3757) == Build and infrastructure * Enable running tests as root (PR #3874) (*@mikemckiernan*) * Import documentation for processor into Asciidoctor core repository (#3861) (*@graphitefriction*) * Speed up CI by using Bundler cache (PR #3901) (*@slonopotamus*) * Run tests against both pygments.rb 1.x and 2.x (#3969) (*@slonopotamus*) == Documentation * Multiple copyedits and typo fixes (PR #3858)(PR #3912)(PR #3913) (*@mogztter*) * Improve Dutch translation by removing diacritic from first letter of `toc-title` and adding translations for `part` and `section` (PR #3895) (*@jdevreese*) * Update _attributes-it.adoc_ (PR #3886) (*@ciampix*) * Correct extension registry errors in _options.adoc_ (PR #3902) (*@djencks*) * Minor improvements to docs features page (PR #3917) (*@Younes-L*) * Add Belarusian translation of built-in attributes (PR #3928) (*@morganov*) * Document the `header_only` option (PR #3934) (*@mogztter*) * Add a pandoc command using docker and update command on MS Word migration page (PR #3956) (*@dacog*) * Apply minor typo and flow change to default stylesheet docs (PR #3977) (*@chrisperrault*) * Update instructions for Migrating from Confluence XHTML (#3994) (*@juliojgd*) = Asciidoctor 2.0.12 _**Release date:** 2020.11.10_ == Bug Fixes * Set `type` and `target` property on unresolved footnote reference and unset `id` property (fixes regression) (#3825) * Fix crash when inlining an SVG if the explicit width or height value on the image node is not a string (#3829) * Reset word wrap behavior to normal on tables, then re-enable again for admonition content, horizontal dlist description, and AsciiDoc table cells (#3833) == Improvements * Pass through role to DocBook output for inline image (#3832) == Compliance * Defer use of Ruby >= 2.3 constructs to restore compatibility with Ruby 2.0 until at least next minor release (#3827) * Don't append the default px unit identifier to the explicit width or height value when inlining an SVG (#3829) == Build and infrastructure * Migrate Linux CI jobs to GitHub Actions (#3837) * Migrate Windows CI jobs to GitHub Actions (#3839) * Run CI job on macOS (#3842) = Asciidoctor 2.0.11 _**Release date:** 2020.11.02_ == Bug fixes * Fix infinite loop when callout list with obsolete syntax is found inside list item (#3472) * Fix infinite loop when xreftext contains a circular reference path in HTML and man page converters (#3543) * Apply text formatting to table cells in implicit header row when column has the `a` or `l` style (#3760) * Fix errant reference warning for valid reference when running in compat mode (#3555) * Initialize backend traits for converter (if not previously initialized) using assigned basebackend; mimics Asciidoctor < 2 behavior (#3341) * Set `source_location` on preamble block when `sourcemap` option is enabled (#3799) * Link the `notitle` and `showtitle` attributes so they act as opposites for the same toggle (#3804) * Pass options to constructor of Rouge lexer instead of `#lex` method; restores compatibility with Rouge >= 3.4 (#3336) * Don't clobber `cgi-style` options on language when enabling `start_inline` option on the Rouge PHP lexer (#3336) * Fix parsing of wrapped link and xref text, including when an attrlist signature is detected (#3331) * Restore deprecated writable number property on `AbstractBlock` * Always use title as xreftext if target block has an empty caption, regardless of `xrefstyle` value (#3745) * Allow a bibliography reference to be used inside a footnote (#3325) * Fix bottom margin collapsing on AsciiDoc table cell (#3370) * Remove excess hard line break in multi-line AsciiMath blocks (#3407) * Only strip trailing spaces from lines of AsciiDoc include file (#3436) * Remove errant optional flag in regexp for menu macro that breaks Asciidoctor.js (#3433) * Preserve repeating backslashes when generating man page output (#3456) * Honor percentage width specified on macro of inline SVG (#3464) * Removing leading and trailing blank lines in AsciiDoc include file to match assumption of parser (#3470) * Activate extensions when `:extensions` option is set, even if Extensions API is not yet loaded (#3570) * Don't activate global extensions if `:extensions` option is `false` (#3570) * Escape ellipsis at start of line in man page output (#3645) (*@jnavila*) * Don't register footnote with ID if a footnote is already registered with that ID (#3690) * Honor `start` attribute on ordered list in man page output (#3714) * Warn instead of crashing if SVG to inline is empty (#3638) (*@mogztter*) * Compute highlight line ranges on source block relative to value of `start` attribute (#3519) (*@mogztter*) * Prevent collapsible block from incrementing example number by assigning an empty caption (#3639) * Use custom init function for highlight.js to select the correct `code` elements (#3761) * Fix resolved value of `:to_dir` when both `:to_file` and `:to_dir` options are set to absolute paths (#3778) * Fix crash if value of `stylesheets` attribute contains a folder and the destination directory for the stylesheet does not exist (even when the `:mkdirs` option is set) (#3808) * Fix crash if value passed by API for `copycss` attribute is not a string (#3592) * Restore label in front of each bibliography entry in DocBook output that was dropped by fix for #3085 (#3782) * Apply max width to each top-level container instead of body in HTML output (#3513) * Don't apply `border-collapse:` separate to HTML for table blocks; fixes double border at boundary of `colspan` and `rowspan` (#3793) (*@ahus1*) * Don't remove right border on last table cell in row (#2563) * Rework table borders to leverage border collapsing (apply frame border to table, grid border to cells, and selectively override border on cells to accommodate frame) (#3387) == Compliance * Add support for `muted` option to self-hosted video (#3408) * Move ` ---- Now tell Asciidoctor to look for and load the docinfo file using the `docinfo` attribute: $ asciidoctor -a docinfo=shared document.adoc The `) end end end asciidoctor-2.0.20/docs/modules/syntax-highlighting/nav.adoc000066400000000000000000000002111443135032600240760ustar00rootroot00000000000000* xref:index.adoc[] ** xref:highlightjs.adoc[] ** xref:rouge.adoc[] ** xref:coderay.adoc[] ** xref:pygments.adoc[] ** xref:custom.adoc[] asciidoctor-2.0.20/docs/modules/syntax-highlighting/pages/000077500000000000000000000000001443135032600235675ustar00rootroot00000000000000asciidoctor-2.0.20/docs/modules/syntax-highlighting/pages/coderay.adoc000066400000000000000000000043061443135032600260500ustar00rootroot00000000000000= CodeRay :url-coderay: http://coderay.rubychan.de/ :url-coderay-gem: https://rubygems.org/gems/coderay {url-coderay}[CodeRay^] is an encoding-aware, syntax highlighter that supports the languages listed below. [%autowidth,cols="3*",grid=none,frame=none] |=== | C | C++ | Clojure | CSS | Delphi | diff | ERB | Go | Groovy | HAML | HTML | Java | JavaScript | JSON | Lua | PHP | Python | Ruby | Sass | SQL | Taskpaper | XML | YAML | |=== == Install CodeRay To use CodeRay with Asciidoctor, you need the {url-coderay-gem}[coderay gem^]. You can use one of the following methods to install CodeRay. Install using gem (all systems):: + $ gem install coderay Install using apt-get (Debian-based systems):: + $ sudo apt-get install ruby-coderay Install using dnf (Fedora-based systems):: + $ sudo dnf install rubygem-coderay == Activate CodeRay Once you've installed the gem, assign the `coderay` value to the `source-highlighter` attribute in the document header to activate it. [source,asciidoc] ---- :source-highlighter: coderay ---- == CodeRay attributes You can further customize the source block output with additional CodeRay attributes. coderay-css:: Controls what method is used for applying CSS to the tokens. Can be `class` or `style`. Default: `class`. coderay-linenums-mode:: Controls how line numbers are laid out. Can be `table` or `inline`. If line wrapping is enabled on preformatted blocks (i.e., `prewrap`), and you want to use line numbering on source blocks, you must set the value of this attribute to `inline` in order for the numbers to line up properly with their target lines. Default: `table`. .Customizing a source block with CodeRay line numbers [source,asciidoc] .... :source-highlighter: coderay :coderay-linenums-mode: inline [source%linenums,ruby] ---- ORDERED_LIST_KEYWORDS = { 'loweralpha' => 'a', 'lowerroman' => 'i', 'upperalpha' => 'A', 'upperroman' => 'I' #'lowergreek' => 'a' #'arabic' => '1' #'decimal' => '1' } ---- .... See the xref:html-backend:source-highlighting-stylesheets.adoc#coderay[CodeRay stylesheet section] to learn about the `coderay-css` attribute. //// Note: I'm not getting this to work. Need to come back and do some quality assurance. //// asciidoctor-2.0.20/docs/modules/syntax-highlighting/pages/custom.adoc000066400000000000000000000124501443135032600257330ustar00rootroot00000000000000= Custom Syntax Highlighter Adapter :navtitle: Custom Adapter :apidoc-base: {url-api-gems}/asciidoctor/{release-version}/Asciidoctor :apidoc-syntax-highlighter: {apidoc-base}/SyntaxHighlighter :apidoc-syntax-highlighter-base: {apidoc-syntax-highlighter}/Base :apidoc-syntax-highlighter-for: {apidoc-syntax-highlighter}/Factory#for-instance_method You can integrate additional syntax highlighters into Asciidoctor by implementing and registering a syntax highlighter adapter. You can either write a new adapter from scratch or you can extend and even replace one of the built-in adapters. [#new] == Create a new adapter To implement a new adapter, you must create a class that extends the {apidoc-syntax-highlighter-base}[Asciidoctor::SyntaxHighlighter::Base] class, register the adapter for a value of the `source-highlighter` attribute, and implement the required methods. Which methods are required depends on whether the adapter is for a client-side (runs in the browser) or build-time (runs when the document is converted) syntax highlighter. Here's an example of how to write and register a syntax highlighter adapter for the Prism.js syntax highlighting library. Prism.js is a client-side syntax highlighter, meaning it runs in the browser. That means the adapter only has to implement methods that pertain to client-side syntax highlighting, which include `format`, `docinfo?`, and `docinfo`. .Syntax highlighter adapter for Prism.js [,ruby] ---- include::example$prism-syntax-highlighter.rb[] ---- Save this code to a file named [.path]_prism-syntax-highlighter.rb_. Then, require this file when invoking Asciidoctor and set `source-highlighter=prism` to activate it: $ asciidoctor -r ./prism-syntax-highlighter -a source-highlighter=prism document.adoc You can also define an adapter for a syntax highlighter that runs during conversion. We'll look at doing that while also extending a built-in adapter. [#extend] == Extend an existing adapter Instead of creating a new adapter, you can customize a built-in adapter by extending it, overriding its behavior, and optionally replacing it. To extend an adapter, you need to look up a reference to the built-in adapter by name using the {apidoc-syntax-highlighter-for}[Asciidoctor::SyntaxHighlighter.for] method, create a class that extends it, register the adapter with a unique name (or the same name, if you want to replace it), and override any methods that provide the behavior you want to modify. Here's the basic template for customizing an existing adapter: [,ruby] ---- class CustomAdapter < (Asciidoctor::SyntaxHighlighter.for 'rouge') register_for 'rouge' # override methods go here end ---- Let's look at some examples of how to customize a built-in adapter. === docinfo Let's override the adapter for Pygments to prevent it from adding a stylesheet to the HTML (presumably because the styles will be provided by a different stylesheet). .Extended syntax highlighter adapter for Pygments [,ruby] ---- include::example$extended-pygments-syntax-highlighter.rb[] ---- Save this code to a file named [.path]_extended-pygments-syntax-highlighter.rb_. Then, require this file when invoking Asciidoctor, setting `source-highlighter=pygments` to activate it, as you would normally do: $ asciidoctor -r ./extended-pygments-syntax-highlighter.rb -a source-highlighter=pygments document.adoc If, instead, you wanted to modify the built-in adapter to honor the location of a custom stylesheet specified by the `pygments-stylesheet` attribute, you can do so by extending the adapter and overriding the `docinfo` method. [,ruby] ---- include::example$pygments-syntax-highlighter-with-custom-stylesheet.rb[] ---- If you want to decorate built-in behavior, you can invoke the `super` method anywhere inside the method to delegate to the behavior provided by the built-in adapter. === highlight Let's say you always want lines to be numbered, regardless of the setting in the document. You can do so by overriding the `highlight` method, setting the `:number_lines` key on the `opts` argument, then delegating back to the built-in adapter using `super`. [,ruby] ---- include::example$always-number-lines.rb[] ---- === create_formatter (Rouge) When using Rouge as the syntax highlighter, you can customize the formatter by overriding the `create_formatter` method. This allows you to add custom logic for handling certain tokens in the source language. Let's assume that you want to look for bare URLs in code comments and translate them into links (i.e., autolinks), just like in AsciiDoc. You can do that by weaving extra logic into the formatter that looks for tokens in the `Comment` category and applies a substitution to the value. [,ruby] ---- include::example$autolink-urls-in-comments.rb[] ---- Since the formatter has access to all the tokens in the code identified by the syntax highlighter, this technique opens up a lot of possibilities. For example, you could look for the `Type` token in the `Keyword` category in Java code and create a link to the API docs. The `lang` argument to the `create_formatter` method lets you know the source language (e.g., `java`) to which the tokens belong. To study the logic you may be interesting in overriding, browse the code for the https://github.com/asciidoctor/asciidoctor/tree/{page-origin-refname}/lib/asciidoctor/syntax_highlighter[built-in syntax highlighter adapters]. asciidoctor-2.0.20/docs/modules/syntax-highlighting/pages/highlightjs.adoc000066400000000000000000000051711443135032600267270ustar00rootroot00000000000000= Highlight.js :url-highlightjs: https://highlightjs.org/ :url-highlightjs-lang: https://highlightjs.org/download/ :url-highlightjs-cdn: https://cdnjs.com/libraries/highlight.js {url-highlightjs}[Highlight.js^] is a popular client-side syntax highlighter that supports a broad range of {url-highlightjs-lang}[languages^]. == Activate highlight.js To activate highlight.js, add the following attribute entry to the header of your AsciiDoc file: [source,asciidoc] ---- :source-highlighter: highlight.js ---- By default, Asciidoctor will link to the highlight.js library and stylesheet hosted on {url-highlightjs-cdn}[cdnjs^]. The version of the highlight.js library Asciidoctor loads from the CDN only includes support for languages in the common language bundle (apache, bash, coffeescript, cpp, cs, css, diff, http, ini, java, javascript, json, makefile, markdown, nginx, objectivec, perl, php, properties, python, ruby, shell, sql, xml, and yaml). == Load support for additional languages To load additional languages supported by highlight.js, list them in the value of the `highlightjs-languages` document attribute. Separate each language by a comma followed by an optional space. The common highlight.js bundle does not include support for Rust and Swift. Let's set the `highlightjs-languages` attribute so the HTML converter loads support for them into the HTML page. [source,asciidoc] ---- :source-highlighter: highlight.js :highlightjs-languages: rust, swift ---- The `highlightjs-languages` attribute only applies when generating a standalone HTML document (i.e., backend: html, standalone: true). It does not work when generating embedded HTML, which is used by site generator integrations such as Antora. == Use a custom highlight.js library If you'd rather use a personal copy of highlight.js instead of the one hosted on the CDN, follow these steps: . Create your custom bundle on the {url-highlightjs-lang}[download page^]. . Download and unpack the zip into a folder called [.path]_highlight_ adjacent to your AsciiDoc file (or in the output directory, if different) . Rename [.path]_highlight/highlight.pack.js_ to [.path]_highlight/highlight.min.js_ . Rename [.path]_highlight/styles/github.css_ to [.path]_highlight/styles/github.min.css_ ** Replace `github` with the name of the `highlightjs-theme` you are using, if different. . Add the attribute entry `:highlightjsdir: highlight` to the header of your AsciiDoc file. ** Alternatively, you can pass the `-a highlightjsdir=highlight` flag when invoking the Asciidoctor CLI. The output file will use your personal copy of the highlight.js library and stylesheet instead of the one hosted on cdnjs. asciidoctor-2.0.20/docs/modules/syntax-highlighting/pages/index.adoc000066400000000000000000000135711443135032600255350ustar00rootroot00000000000000= Syntax Highlighting AsciiDoc defines a style of listing block known as a xref:asciidoc:verbatim:source-blocks.adoc[source block] for adding source code snippets intended to be colorized by a syntax highlighter to a document. Here's a simple example of a source block: [source,asciidoc] .... [source,ruby] ---- puts "Hello, World!" ---- .... Here's how we expect it to appear: [source,ruby] ---- puts "Hello, World!" ---- It's up to an AsciiDoc processor such as Asciidoctor to apply the syntax highlighting to this source block, a process referred to as xref:asciidoc:verbatim:source-highlighter.adoc[source highlighting]. Asciidoctor provides adapters that integrate with several popular syntax highlighter libraries to perform this task. It also provides an interface for implementing a custom syntax highlighter adapter. == Syntax highlighter types Asciidoctor supports two types of syntax highlighters: client-side and build-time. Let's explore each type and how they work. A [.term]*client-side syntax highlighter* performs syntax highlighting in the browser as the page is loading. Asciidoctor does not invoke the syntax highlighter itself. Instead, it focuses on adding the assets to the generated HTML so the browser can load the syntax highlighter and run it. For this type of syntax highlighter, Asciidoctor passes the contents of the source block through to the output as is. It also adds metadata to the element so that the syntax highlighter knows to highlight it and which language it is. Unfortunately, Asciidoctor does not process callout numbers in the source block in this case, so they may cause the syntax highlighter to get tripped up. A [.term]*build-time syntax highlighter* performs syntax highlighting during AsciiDoc conversion. Asciidoctor does invoke the syntax highlighter in this case. It also takes care of hiding the callout numbers from the syntax highlighter, ensuring they are put back in the proper place afterwards. What Asciidoctor emits into the output is the result produced by the syntax highlighter, which are the tokens enclosed in `` elements to apply color and other formatting (either inline or via CSS classes). These syntax highlighters tend to support more features because Asciidoctor has greater control over the process. === Client-side vs build-time There are benefits and drawbacks of each type. The benefit of a client-side syntax highlighter is that does not require installing any additional libraries. It also makes conversion faster and makes it produce smaller output since the syntax highlighting is deferred until page load. The main drawback is that callouts in the source block can be mangled by the syntax highlighter or confuse it. The benefits of a build-time syntax highlighter are that you have more control over syntax highlighting and can enable additional features such as line numbers and line highlighting. The main drawback is that it requires installing an extra library, it slows down conversion, and causes the output to be larger. You should try each syntax highlighter and find the one that works best for you. == Built-in syntax highlighter adapters [%autowidth] |=== |Type |Syntax Highlighter |Required Gem |Compatible Converters h|Client-side |Highlight.js |_n/a_ |HTML, Reveal.js .3+h|Build-time |CodeRay |coderay (>= 1.1) |HTML, PDF, EPUB3, Reveal.js |Pygments |pygments.rb (>= 1.2) |HTML, PDF, EPUB3, Reveal.js |Rouge |rouge (>= 2) |HTML, PDF, EPUB3, Reveal.js |=== Asciidoctor does not apply syntax highlighting when generating DocBook. The assumption is that this is a task the DocBook toolchain can handle. The DocBook converter generates a `` element for the source block and passes through the source language as specified in the AsciiDoc. It's then up to the DocBook toolchain to apply syntax highlighting to the contents of that tag. You can explore these integrations in depth on the xref:highlightjs.adoc[], xref:rouge.adoc[], xref:pygments.adoc[], and xref:coderay.adoc[] pages. You can also create your own integration by making a xref:custom.adoc[]. == Custom subs on source blocks You should not mix syntax highlighting with AsciiDoc text formatting (i.e., the `quotes` and `macros` substitutions). In many converters, these two operations are mutually exclusive. .Improper use of custom substitutions on a source block [source,asciidoc] .... [,java,subs=+quotes] ---- interface OrderRepository extends CrudRepository { *List* findByCategory(String category); Order findById(long id); } ---- .... The additional markup introduced by AsciiDoc text formatting may confuse the syntax highlighter and lead to unexpected results. While it may work in some syntax highlighters in the HTML backend (which perhaps know how to work around the formatting tags), it will most certainly fail when converting to other formats such as PDF. If you're going to customize the substitutions on a source block using the `subs` attribute, you should limit those substitutions to attribute replacements (`attributes`). .Valid use of custom substitutions on a source block [source,asciidoc] .... [,java,subs=attributes+] ---- interface {model}Repository extends CrudRepository<{model},Long> { {model} findById(long id); } ---- .... If you still need to emphasize certain tokens in the block of code, you should do so by creating a custom lexer or formatter for the syntax highlighting library that understands these additional semantics and perhaps even hints. In other words, work through the syntax highlighter so it's added during the highlighting process, giving the syntax highlighter full knowledge as to what's going on. Another option is to use a xref:custom.adoc[custom syntax highlighter adapter]. Before trying to get the syntax highlighter to recognize new tokens, make sure it doesn't already recognize them. If it does, it may just be a matter of customizing the syntax highlighter theme to apply different formatting to those tokens. asciidoctor-2.0.20/docs/modules/syntax-highlighting/pages/pygments.adoc000066400000000000000000000142731443135032600262740ustar00rootroot00000000000000= Pygments :url-pygments: https://pygments.org :url-pygments-lang: https://pygments.org/languages/ :url-python: https://www.python.org :url-pygments-gem: https://rubygems.org/gems/pygments.rb {url-pygments}[Pygments^] is a popular syntax highlighter that supports a broad range of {url-pygments-lang}[programming and template languages^]. == Install Pygments In order to use Pygments with Asciidoctor, you need {url-python}[Python] and the {url-pygments-gem}[pygments.rb gem^]. The pygments.rb gem manages calls to Pygments, which is an external program that runs using Python. TIP: You do not need to install Pygments itself. It comes bundled with the pygments.rb gem. IMPORTANT: You must have Python installed to use pygments.rb. The version of Python required depends on which pygments.rb release you're using: * pygments.rb 1.x requires Python 2. Check that you have a `python2` (Linux), `python` (macOS), or `py -2` (Windows) executable on your PATH. (On macOS, verify that the `python` executable uses Python 2 by running `python -V`). * pygments.rb 2.x requires Python 3. Check that you have a `python3` (Linux/macOS) or `py -3` (Windows) executable on your PATH. .Installing Python and the pygments.rb gem via the CLI (cross platform) [,console] ---- $ "`\which apt-get || \which dnf || \which yum || \which brew`" install python # <.> $ gem install pygments.rb # <.> ---- <.> Install Python using your package manager <.> Install the pygments.rb gem == Activate Pygments Once you've installed these libraries, assign `pygments` to the `source-highlighter` attribute in your document's header. [,asciidoc] ---- :source-highlighter: pygments ---- == Pygments attributes You can further customize the source block output with additional Pygments attributes. pygments-style:: Sets the name of the color theme Pygments uses. To see the list of available style names, see <>. Default: `pastie`. pygments-css:: Controls what method is used for applying CSS to the tokens. Can be `class` (CSS classes) or `style` (inline styles). See the xref:html-backend:source-highlighting-stylesheets.adoc#pygments[Pygments stylesheet section] to learn more about how the value `class` is handled. Default: `class`. pygments-linenums-mode:: Controls how line numbers are arranged when line numbers are enabled on the source block. Can be `table` or `inline`. If line wrapping is enabled on preformatted blocks (i.e., `prewrap`), and you want to use line numbering on source blocks, you must set the value of this attribute to `inline` in order for the numbers to line up properly with their target lines. Default: `table`. .Customizing a source block with Pygments attributes [source,asciidoc] .... :source-highlighter: pygments :pygments-style: manni :pygments-linenums-mode: inline [source%linenums,ruby] ---- ORDERED_LIST_KEYWORDS = { 'loweralpha' => 'a', 'lowerroman' => 'i', 'upperalpha' => 'A', 'upperroman' => 'I' #'lowergreek' => 'a' #'arabic' => '1' #'decimal' => '1' } ---- .... //// .Result: Source block using inline line numbers and the manni theme ==== image::custom-pygments.png[Line numbers and a custom Pygments theme for a source block.] ==== //// [#listing-pygments-style-names] === Available Pygments style names To list the available Pygments styles, run the following command in a terminal: $ $(dirname $(gem which pygments.rb))/../vendor/pygments-main/pygmentize -L styles The pygments.rb gem uses a bundled version of Pygments (often ahead of the latest release). This command ensures that you are invoking the `pygmentize` command from the Pygments used by that gem. [#pygments-timeout] == Pygments timeout (pygments.rb 1.x only) If you're using pygments.rb 1.x, you may need to adjust the timeout. This configuration step is not necessary if you're using pygments.rb 2.x with Python 3. Since Pygments is an external program, the call to that command in pygments.rb 1.x is managed by a timeout to safeguard against a hanging process. By default, this timeout is 8 seconds. If you discover that the call is failing to complete within this timeout period, you can increase the timeout (in seconds) by setting the `MENTOS_TIMEOUT` environment variable. export MENTOS_TIMEOUT=30 Now the call to Pygments (via pygments.rb 1.x) will be allocated up to 30 seconds to complete. == Use a custom Pygments installation If you already have Pygments installed on your system, you want to use your own fork, or you want to customize how Pygments is configured, you can get Asciidoctor to use a custom version of Pygments instead of the one bundled with the pygments.rb gem. First, install your own version of Pygments. You can do this, for instance, by cloning the upstream Pygments repository: $ hg clone https://bitbucket.org/birkenfeld/pygments-main pygments Find the directory that contains the file [.path]_pygmentize_ or the [.path]_Makefile_. That's your Pygments installation path. Make note of it. Next, create a script to run _before_ invoking Asciidoctor for the first time. Let's call it [.path]_pygments_init.rb_. Populate the script with the following content: .pygments_init.rb [source,ruby] ---- require 'pygments' # use a custom Pygments installation (directory that contains pygmentize) Pygments.start '/path/to/pygments' # example of registering a missing or additional lexer #Pygments::Lexer.create name: 'Turtle', aliases: ['turtle'], # filenames: ['*.ttl'], mimetypes: ['text/turtle', 'application/x-turtle'] ---- TIP: You could enhance this script to read the Pygments installation path from an environment variable (or configuration file). Now just require this script before your invoke Asciidoctor the first time. When using the `asciidoctor` command, pass the script using the `-r` flag: $ asciidoctor -r ./pygments_init.rb document.adoc When using the Asciidoctor API, require the script using `require` or `require_relative`: [source,ruby] ---- require 'asciidoctor' require_relative './pygments_init.rb' Asciidoctor.convert_file 'document.adoc', safe: :safe ---- Now Asciidoctor is using your custom installation of Pygments instead of the one bundled with the pygments.rb gem. Alternately, you can xref:custom.adoc#extend[extend the adapter for Pygments] and put this logic inside that adapter. asciidoctor-2.0.20/docs/modules/syntax-highlighting/pages/rouge.adoc000066400000000000000000000053471443135032600255510ustar00rootroot00000000000000= Rouge :url-rouge: http://rouge.jneen.net :url-rouge-gem: https://rubygems.org/gems/rouge :url-rouge-repo: https://github.com/rouge-ruby/rouge/tree/HEAD/lib/rouge/themes {url-rouge}[Rouge^] is an extendable code highlighter written in Ruby that supports a vast array of languages. == Install Rouge To use Rouge with Asciidoctor, you need the {url-rouge-gem}[rouge gem^]. You can use one of the following methods to install Rouge. Install using `gem` (all systems):: + $ gem install rouge Install using `apt-get` (Debian-based systems):: + $ sudo apt-get install ruby-rouge Install using `dnf` (Fedora-based systems):: + $ sudo dnf install rubygem-rouge Install using `pacman` (Arch Linux-based systems):: + $ sudo pacman -S ruby-rouge == Assign rouge to source-highlighter Once you've installed the gem, assign the `rouge` value to the `source-highlighter` attribute in the document header to activate it. [source,asciidoc] ---- :source-highlighter: rouge ---- == Rouge attributes You can further customize the source block output with additional Rouge attributes. rouge-css:: Controls what method is used for applying CSS to the tokens. Can be `class` or `style`. Default: `class`. [.line-through]#rouge-linenums-mode# (not currently implemented, see https://github.com/asciidoctor/asciidoctor/issues/3641[#3641]):: Controls how line numbers are laid out. Can be `table` or `inline`. If line wrapping is enabled on preformatted blocks (i.e., `prewrap`), and you want to use line numbering on source blocks, you must set the value of this attribute to `inline` in order for the numbers to line up properly with their target lines. Default: `table`. rouge-style:: Controls the color theme used to for highlighting. You can find the list of themes in the {url-rouge-repo}[Rouge code repository^]. Typically, you set these attributes using the CLI or API (e.g., `-a source-highlighter=rouge -a rouge-style=colorful`) so you don't have to define them in each document. However, you can define them per document as shown in the following example. .Activating the Rouge syntax highlighter and applying a different style [source,asciidoc] .... :source-highlighter: rouge :rouge-style: monokai [source,ruby] ---- puts "Hello, Rouge!" ---- .... You can enable line numbering using the linenums option on the block. .Customizing a source block with Rouge line numbers [source,asciidoc] .... :source-highlighter: rouge [source%linenums,ruby] ---- ORDERED_LIST_KEYWORDS = { 'loweralpha' => 'a', 'lowerroman' => 'i', 'upperalpha' => 'A', 'upperroman' => 'I' #'lowergreek' => 'a' #'arabic' => '1' #'decimal' => '1' } ---- .... It's also possible to enable linenums for all source blocks by setting the `source-linenums-option` attribute on the document. asciidoctor-2.0.20/docs/modules/tooling/000077500000000000000000000000001443135032600201525ustar00rootroot00000000000000asciidoctor-2.0.20/docs/modules/tooling/nav.adoc000066400000000000000000000000241443135032600215620ustar00rootroot00000000000000* xref:index.adoc[] asciidoctor-2.0.20/docs/modules/tooling/pages/000077500000000000000000000000001443135032600212515ustar00rootroot00000000000000asciidoctor-2.0.20/docs/modules/tooling/pages/index.adoc000066400000000000000000000152001443135032600232060ustar00rootroot00000000000000= AsciiDoc Tooling :url-chrome-extension: https://chrome.google.com/webstore/detail/asciidoctorjs-live-previe/iaalpfgpbocpdfblpnhhgllgbdbchmia :url-edge-addon: https://microsoftedge.microsoft.com/addons/detail/asciidoctorjs-live-previ/pefkelkanablhjdekgdahplkccnbdggd :url-firefox-addon: https://addons.mozilla.org/en/firefox/addon/asciidoctorjs-live-preview :url-opera-extension: https://addons.opera.com/en/extensions/details/asciidoctorjs-live-preview/ :url-asciidocfx: https://www.asciidocfx.com :url-asciidocfx-docs: https://www.asciidocfx.com/#truehow-to-install-asciidocfx :url-eclipse-marketplace: https://marketplace.eclipse.org/content/asciidoctor-editor :url-eclipse-plugin-github: https://github.com/de-jcup/eclipse-asciidoctor-editor :url-intellij-plugin-quickstart: https://intellij-asciidoc-plugin.ahus1.de/docs/users-guide/quick-start.html :url-intellij-plugin-features: https://intellij-asciidoc-plugin.ahus1.de/docs/users-guide/features.html :url-scite: https://www.scintilla.org/SciTE.html Since AsciiDoc syntax is just plain text, you can write an AsciiDoc document using any text editor. You don't need complex word processing programs like Microsoft Word or Google Docs. In fact, you shouldn't use these programs because they add cruft to your document that you can't see that makes conversion tedious. Here's an overview of the different ways to preview and edit your AsciiDoc documents. == Hosted git repositories The two most popular git repository hosts, GitHub and GitLab, support editing and previewing AsciiDoc files. In order to activate this support, the file must have a supported AsciiDoc extension, such as .adoc. When you browse to an AsciiDoc file in the repository view, you will see an HTML preview of the AsciiDoc content. That preview is powered by Asciidoctor. The HTML that's rendered is the embedded output produced by the built-in HTML converter. That means it won't look like a standalone HTML document generated by Asciidoctor. Rather, the HTML is styled to match the theme of the hosting service and sanitized, so it does not impact the rendering of the page. That means that certain features may not be available. Both services apply syntax highlighting to source blocks. Both services also add a floating anchor link next to section titles. You can deep link to a specific section or other anchor by adding a fragment identifier to the URL. The AsciiDoc preview on GitLab currently supports more AsciiDoc features than on GitHub. On GitLab, you can find support for include directives, admonition icons, diagrams, and STEM expressions. GitHub, on the other hand, provides a table of contents browser, whereas on GitLab you must add a TOC explicitly using the `toc` attribute. On GitHub, if you want to see the source of the AsciiDoc file instead of the preview, append `?plain=1` to the end of the URL. If you click the Edit button, you will see the syntax highlighted source of the AsciiDoc file. In edit mode, you can view a preview of the modified source before you commit the changes. GitLab allows you to edit multiple files at once using its Web IDE. == Web browser add-ons (preview only) To preview an AsciiDoc document in a web browser, install the {url-chrome-extension}[Chrome extension^], the {url-edge-addon}[Edge add-on^], the {url-firefox-addon}[Firefox add-on^], or the {url-opera-extension}[Opera extension^] (all produced from the same code base). Then you can see the AsciiDoc file rendered as HTML just by visiting it. == IDEs and text editors The following IDEs and text editors support the AsciiDoc syntax and most provide document preview rendering with Asciidoctor. === AsciiDocFX {url-asciidocfx}[AsciiDoc FX^] is an editor and terminal emulator written with JavaFX 8. See its documentation for {url-asciidocfx-docs}[download and installation instructions^]. === Atom [IMPORTANT] ==== The Atom editor's official site has announced that Atom is being sunset at the end of 2022. See https://github.blog/2022-06-08-sunsetting-atom/[Sunsetting Atom] ==== Install https://atom.io/[Atom^]. Then from the Atom editor menus, navigate to menu:Atom[Preferences]. From there, open the menu:Packages[] tab and install: https://atom.io/packages/asciidoc-preview[AsciiDoc Preview^] :: enables live preview https://atom.io/packages/language-asciidoc[AsciiDoc Language^] :: enables syntax highlighting (AsciiDoc language support) https://atom.io/packages/asciidoc-image-helper[AsciiDoc Image Helper^] :: provides the ability to paste images from the clipboard https://atom.io/packages/autocomplete-asciidoc[AsciiDoc Autocomplete^] :: automatically completes AsciiDoc language items https://atom.io/packages/asciidoc-assistant[AsciiDoc Assistant^] :: Installs useful components to Atom for editing AsciiDoc files (including the above packages) === Brackets Install http://brackets.io/[Brackets^]. Then from the Brackets file menu, open the extension manager. Browse available extensions and install `AsciiDoc Preview`. More information: * https://github.com/asciidoctor/brackets-asciidoc-preview[AsciiDoc Preview for Brackets^] === Eclipse Install the open source plugin `Asciidoctor Editor` from the {url-eclipse-marketplace}[Eclipse Marketplace^] or visit the {url-eclipse-plugin-github}[project repository^]. === IntelliJ IDEA The community AsciiDoc plugin adds support to edit AsciiDoc files in the IDE with syntax highlighting, auto-completion and a live preview. It includes support for Antora and Spring REST Docs and creates PDFs using Asciidoctor PDF. It works with the free community editions as well as with the paid editions of IntelliJ IDEA, CLion, PhpStorm, RubyMine, Android Studio etc. {url-intellij-plugin-quickstart}[Install the AsciiDoc plugin^] or {url-intellij-plugin-features}[learn more about its features^]. === SciTE Install {url-scite}[SciTE^] 5.2.3 or later. To enable the AsciiDoc lexer, follow these steps: . Edit the user properties file and remove `asciidoc` from the list in `imports.exclude`. . Restart SciTE. . SciTE will automatically apply AsciiDoc syntax highlighting to files ending in `.adoc` and `.asciidoc`. SciTE does not provide document preview rendering like other editors listed on this page. === Visual Studio Code Visual Studio Code provides rich language support for AsciiDoc and a live preview as you type. . Install https://code.visualstudio.com/[Visual Studio Code^]. . Launch Visual Studio Code and open the Quick Open dialog by pressing kbd:[Ctrl,P]. . Type the following command to locate and install the AsciiDoc extension (by João Pinto): + ext install asciidoctor.asciidoctor-vscode More information: * https://marketplace.visualstudio.com/items?itemName=asciidoctor.asciidoctor-vscode[AsciiDoc extension for Visual Studio Code^] asciidoctor-2.0.20/features/000077500000000000000000000000001443135032600157155ustar00rootroot00000000000000asciidoctor-2.0.20/features/open_block.feature000066400000000000000000000036011443135032600214050ustar00rootroot00000000000000# language: en Feature: Open Blocks In order to group content in a generic container As a writer I want to be able to wrap content in an open block Scenario: Render an open block that contains a paragraph to HTML Given the AsciiDoc source """ -- A paragraph in an open block. -- """ When it is converted to html Then the result should match the HTML source """

A paragraph in an open block.

""" Scenario: Render an open block that contains a paragraph to DocBook Given the AsciiDoc source """ -- A paragraph in an open block. -- """ When it is converted to docbook Then the result should match the XML source """ A paragraph in an open block. """ Scenario: Render an open block that contains a paragraph to HTML (alt) Given the AsciiDoc source """ -- A paragraph in an open block. -- """ When it is converted to html Then the result should match the HTML structure """ .openblock .content .paragraph p A paragraph in an open block. """ Scenario: Render an open block that contains a paragraph to DocBook (alt) Given the AsciiDoc source """ -- A paragraph in an open block. -- """ When it is converted to docbook Then the result should match the XML structure """ simpara A paragraph in an open block. """ Scenario: Render an open block that contains a list to HTML Given the AsciiDoc source """ -- * one * two * three -- """ When it is converted to html Then the result should match the HTML structure """ .openblock .content .ulist ul li: p one li: p two li: p three """ asciidoctor-2.0.20/features/pass_block.feature000066400000000000000000000024021443135032600214100ustar00rootroot00000000000000# language: en Feature: Open Blocks In order to pass content through unprocessed As a writer I want to be able to mark passthrough content using a pass block Scenario: Render a pass block without performing substitutions by default to HTML Given the AsciiDoc source """ :name: value ++++

{name}

image:tiger.png[] ++++ """ When it is converted to html Then the result should match the HTML source """

{name}

image:tiger.png[] """ Scenario: Render a pass block without performing substitutions by default to DocBook Given the AsciiDoc source """ :name: value ++++ {name} image:tiger.png[] ++++ """ When it is converted to docbook Then the result should match the XML source """ {name} image:tiger.png[] """ Scenario: Render a pass block performing explicit substitutions to HTML Given the AsciiDoc source """ :name: value [subs="attributes,macros"] ++++

{name}

image:tiger.png[] ++++ """ When it is converted to html Then the result should match the HTML source """

value

tiger """ asciidoctor-2.0.20/features/step_definitions.rb000066400000000000000000000031621443135032600216120ustar00rootroot00000000000000# frozen_string_literal: true ASCIIDOCTOR_FEATURES_DIR = File.absolute_path __dir__ ASCIIDOCTOR_LIB_DIR = ENV['ASCIIDOCTOR_LIB_DIR'] || File.join(ASCIIDOCTOR_FEATURES_DIR, '../lib') require 'simplecov' if ENV['COVERAGE'] == 'true' require File.join ASCIIDOCTOR_LIB_DIR, 'asciidoctor' Dir.chdir Asciidoctor::ROOT_DIR require 'minitest' require 'tilt' require 'slim' assertions = Class.new do include Minitest::Assertions attr_accessor :assertions def initialize @assertions = 0 end end.new Given %r/the AsciiDoc source/ do |source| @source = source end When %r/it is converted to html/ do @output = Asciidoctor.convert @source end When %r/it is converted to docbook/ do @output = Asciidoctor.convert @source, backend: :docbook end Then %r/the result should (match|contain) the (HTML|XML) source/ do |matcher, _, expected| matcher == 'match' ? (assertions.assert_equal expected, @output) : (assertions.assert_includes @output, expected) end Then %r/the result should (match|contain) the (HTML|XML) structure/ do |matcher, format, expected| result = @output if format == 'HTML' options = { format: :html, disable_escape: true, sort_attrs: false } else # format == 'XML' options = { format: :xhtml, disable_escape: true, sort_attrs: false } result = result.gsub '"/>', '" />' if result.include? '"/>' end result = Slim::Template.new(options) { result.each_line.map {|l| (l.start_with? '<') ? l : %(|#{l}) }.join }.render expected = Slim::Template.new(options) { expected }.render matcher == 'match' ? (assertions.assert_equal expected, result) : (assertions.assert_includes result, expected) end asciidoctor-2.0.20/features/text_formatting.feature000066400000000000000000000031611443135032600225110ustar00rootroot00000000000000# language: en Feature: Text Formatting In order to apply formatting to the text As a writer I want to be able to markup inline text with formatting characters Scenario: Convert text that contains superscript and subscript characters Given the AsciiDoc source """ _v_~rocket~ is the value ^3^He is the isotope log~4~x^n^ is the expression M^me^ White is the address the 10^th^ point has coordinate (x~10~, y~10~) """ When it is converted to html Then the result should match the HTML source """

vrocket is the value 3He is the isotope log4xn is the expression Mme White is the address the 10th point has coordinate (x10, y10)

""" Scenario: Convert text that has ex-inline literal formatting Given the AsciiDoc source """ Use [x-]`{asciidoctor-version}` to print the version of Asciidoctor. """ When it is converted to html Then the result should match the HTML source """

Use {asciidoctor-version} to print the version of Asciidoctor.

""" Scenario: Convert text that has ex-inline monospaced formatting Given the AsciiDoc source """ :encoding: UTF-8 The document is assumed to be encoded as [x-]+{encoding}+. """ When it is converted to html Then the result should match the HTML source """

The document is assumed to be encoded as UTF-8.

""" asciidoctor-2.0.20/features/xref.feature000066400000000000000000000665571443135032600202610ustar00rootroot00000000000000# language: en Feature: Cross References In order to create cross references between sections and blocks in the current or neighboring document As a writer I want to be able to use the cross reference macro to compose these references Scenario: Create a cross reference to a block that has explicit reftext Given the AsciiDoc source """ :xrefstyle: full See <> to learn how it works. .Parameterized Type [[param-type-t,that "" thing]] **** This sidebar describes what that thing is all about. **** """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#param-type-t' that "<T>" thing |to learn how it works. """ When it is converted to docbook Then the result should match the XML structure """ simpara |See xref<> linkend='param-type-t'/ |to learn how it works. sidebar xml:id='param-type-t' xreflabel='that "<T>" thing' title Parameterized Type <T> simpara This sidebar describes what that <T> thing is all about. """ Scenario: Create a cross reference to a block that has explicit reftext with formatting Given the AsciiDoc source """ :xrefstyle: full There are cats, then there are the <>. [[big-cats,*big* cats]] == Big Cats So ferocious. """ When it is converted to html Then the result should contain the HTML structure """ |There are cats, then there are the a< href='#big-cats' big cats |. """ When it is converted to docbook Then the result should match the XML structure """ simpara |There are cats, then there are the xref< linkend='big-cats'/ |. section xml:id='big-cats' xreflabel='big cats' title Big Cats simpara So ferocious. """ Scenario: Create a full cross reference to a numbered section Given the AsciiDoc source """ :sectnums: :xrefstyle: full See <> to find a complete list of features. == About [#sect-features] === Features All the features are listed in this section. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#sect-features' Section 1.1, “Features” |to find a complete list of features. """ Scenario: Create a short cross reference to a numbered section Given the AsciiDoc source """ :sectnums: :xrefstyle: short See <> to find a complete list of features. [#sect-features] == Features All the features are listed in this section. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#sect-features' Section 1 |to find a complete list of features. """ Scenario: Create a basic cross reference to an unnumbered section Given the AsciiDoc source """ :xrefstyle: full See <> to find a complete list of features. [#sect-features] == Features All the features are listed in this section. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#sect-features' Features |to find a complete list of features. """ Scenario: Create a basic cross reference to a numbered section when the section reference signifier is disabled Given the AsciiDoc source """ :sectnums: :xrefstyle: full :!section-refsig: See <> to find a complete list of features. [#sect-features] == Features All the features are listed in this section. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#sect-features' 1, “Features” |to find a complete list of features. """ Scenario: Create a full cross reference to a numbered chapter Given the AsciiDoc source """ :doctype: book :sectnums: :xrefstyle: full See <> to find a complete list of features. [#chap-features] == Features All the features are listed in this chapter. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#chap-features' Chapter 1, Features |to find a complete list of features. """ Scenario: Create a short cross reference to a numbered chapter Given the AsciiDoc source """ :doctype: book :sectnums: :xrefstyle: short See <> to find a complete list of features. [#chap-features] == Features All the features are listed in this chapter. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#chap-features' Chapter 1 |to find a complete list of features. """ Scenario: Create a basic cross reference to a numbered chapter Given the AsciiDoc source """ :doctype: book :sectnums: :xrefstyle: basic See <> to find a complete list of features. [#chap-features] == Features All the features are listed in this chapter. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#chap-features' Features |to find a complete list of features. """ Scenario: Create a basic cross reference to an unnumbered chapter Given the AsciiDoc source """ :doctype: book :xrefstyle: full See <> to find a complete list of features. [#chap-features] == Features All the features are listed in this chapter. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#chap-features' Features |to find a complete list of features. """ Scenario: Create a cross reference to a chapter using a custom chapter reference signifier Given the AsciiDoc source """ :doctype: book :sectnums: :xrefstyle: full :chapter-refsig: Ch See <> to find a complete list of features. [#chap-features] == Features All the features are listed in this chapter. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#chap-features' Ch 1, Features |to find a complete list of features. """ Scenario: Create a full cross reference to a numbered part Given the AsciiDoc source """ :doctype: book :sectnums: :partnums: :xrefstyle: full [preface] = Preface See <> for an introduction to the language. [#p1] = Language == Syntax This chapter covers the syntax. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#p1' Part I, “Language” |for an introduction to the language. """ Scenario: Create a short cross reference to a numbered part Given the AsciiDoc source """ :doctype: book :sectnums: :partnums: :xrefstyle: short [preface] = Preface See <> for an introduction to the language. [#p1] = Language == Syntax This chapter covers the syntax. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#p1' Part I |for an introduction to the language. """ Scenario: Create a basic cross reference to a numbered part Given the AsciiDoc source """ :doctype: book :sectnums: :partnums: :xrefstyle: basic [preface] = Preface See <> for an introduction to the language. [#p1] = Language == Syntax This chapter covers the syntax. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#p1' Language |for an introduction to the language. """ Scenario: Create a basic cross reference to an unnumbered part Given the AsciiDoc source """ :doctype: book :sectnums: :xrefstyle: full [preface] = Preface See <> for an introduction to the language. [#p1] = Language == Syntax This chapter covers the syntax. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#p1' Language |for an introduction to the language. """ @wip Scenario: Create a cross reference to a part using a custom part reference signifier Given the AsciiDoc source """ :doctype: book :sectnums: :partnums: :xrefstyle: full :part-refsig: P [preface] = Preface See <> for an introduction to the language. [#p1] = Language == Syntax This chapter covers the syntax. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#p1' P I, “Language” |for an introduction to the language. """ Scenario: Create a full cross reference to a numbered appendix Given the AsciiDoc source """ :sectnums: :xrefstyle: full See <> to find a complete list of features. [appendix#app-features] == Features All the features are listed in this appendix. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#app-features' Appendix A, Features |to find a complete list of features. """ Scenario: Create a short cross reference to a numbered appendix Given the AsciiDoc source """ :sectnums: :xrefstyle: short See <> to find a complete list of features. [appendix#app-features] == Features All the features are listed in this appendix. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#app-features' Appendix A |to find a complete list of features. """ Scenario: Create a full cross reference to an appendix even when section numbering is disabled Given the AsciiDoc source """ :xrefstyle: full See <> to find a complete list of features. [appendix#app-features] == Features All the features are listed in this appendix. """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#app-features' Appendix A, Features |to find a complete list of features. """ Scenario: Create a full cross reference to a numbered formal block Given the AsciiDoc source """ :xrefstyle: full See <> to find a table of features. .Features [#tbl-features%autowidth] |=== |Text formatting |Formats text for display. |=== """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#tbl-features' Table 1, “Features” |to find a table of features. """ Scenario: Create a short cross reference to a numbered formal block Given the AsciiDoc source """ :xrefstyle: short See <> to find a table of features. .Features [#tbl-features%autowidth] |=== |Text formatting |Formats text for display. |=== """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#tbl-features' Table 1 |to find a table of features. """ Scenario: Create a basic cross reference to a numbered formal block when the caption prefix is disabled Given the AsciiDoc source """ :xrefstyle: full :!table-caption: See <> to find a table of features. .Features [#tbl-features%autowidth] |=== |Text formatting |Formats text for display. |=== """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#tbl-features' Features |to find a table of features. """ Scenario: Create a cross reference to a numbered formal block with a custom caption prefix Given the AsciiDoc source """ :xrefstyle: full :table-caption: Tbl See <> to find a table of features. .Features [#tbl-features%autowidth] |=== |Text formatting |Formats text for display. |=== """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#tbl-features' Tbl 1, “Features” |to find a table of features. """ Scenario: Create a basic cross reference to a formal paragraph Given the AsciiDoc source """ <> apply. .Terms and conditions [#terms] These are the terms and conditions. """ When it is converted to html Then the result should match the HTML structure """ .paragraph: p a> href='#terms' Terms and conditions |apply. #terms.paragraph .title Terms and conditions p These are the terms and conditions. """ Scenario: Create a full cross reference to a formal image block Given the AsciiDoc source """ :xrefstyle: full Behold, <>! .The ferocious Ghostscript tiger [#tiger] image::tiger.svg[Ghostscript tiger] """ When it is converted to html Then the result should match the HTML structure """ .paragraph: p |Behold, a< href='#tiger' Figure 1, “The ferocious Ghostscript tiger” |! #tiger.imageblock .content: img src='tiger.svg' alt='Ghostscript tiger' .title Figure 1. The ferocious Ghostscript tiger """ Scenario: Create a short cross reference to a formal image block Given the AsciiDoc source """ :xrefstyle: short Behold, <>! .The ferocious Ghostscript tiger [#tiger] image::tiger.svg[Ghostscript tiger] """ When it is converted to html Then the result should match the HTML structure """ .paragraph: p |Behold, a< href='#tiger' Figure 1 |! #tiger.imageblock .content: img src='tiger.svg' alt='Ghostscript tiger' .title Figure 1. The ferocious Ghostscript tiger """ Scenario: Create a full cross reference to a block with an explicit caption Given the AsciiDoc source """ :xrefstyle: full See <> and <>. .Managing Orders [#diagram-1,caption="Diagram {counter:diag-number}. "] image::managing-orders.png[Managing Orders] .Managing Inventory [#diagram-2,caption="Diagram {counter:diag-number}. "] image::managing-inventory.png[Managing Inventory] """ When it is converted to html Then the result should match the HTML structure """ .paragraph: p |See a<> href='#diagram-1' Diagram 1, “Managing Orders” |and a< href='#diagram-2' Diagram 2, “Managing Inventory” |. #diagram-1.imageblock .content: img src='managing-orders.png' alt='Managing Orders' .title Diagram 1. Managing Orders #diagram-2.imageblock .content: img src='managing-inventory.png' alt='Managing Inventory' .title Diagram 2. Managing Inventory """ Scenario: Create a full cross reference to a block with an empty caption Given the AsciiDoc source """ :xrefstyle: full See <>. .Title [#ex1,caption=] ==== content ==== """ When it is converted to html Then the result should match the HTML structure """ .paragraph: p |See a< href='#ex1' Title |. #ex1.exampleblock .title Title .content: .paragraph: p |content """ Scenario: Create a short cross reference to a block with an explicit caption Given the AsciiDoc source """ :xrefstyle: short See <> and <>. .Managing Orders [#diagram-1,caption="Diagram {counter:diag-number}. "] image::managing-orders.png[Managing Orders] .Managing Inventory [#diagram-2,caption="Diagram {counter:diag-number}. "] image::managing-inventory.png[Managing Inventory] """ When it is converted to html Then the result should match the HTML structure """ .paragraph: p |See a<> href='#diagram-1' Diagram 1 |and a< href='#diagram-2' Diagram 2 |. #diagram-1.imageblock .content: img src='managing-orders.png' alt='Managing Orders' .title Diagram 1. Managing Orders #diagram-2.imageblock .content: img src='managing-inventory.png' alt='Managing Inventory' .title Diagram 2. Managing Inventory """ Scenario: Create a short cross reference to a block with an empty caption Given the AsciiDoc source """ :xrefstyle: short See <>. .Title [#ex1,caption=] ==== content ==== """ When it is converted to html Then the result should match the HTML structure """ .paragraph: p |See a< href='#ex1' Title |. #ex1.exampleblock .title Title .content: .paragraph: p |content """ Scenario: Create a basic cross reference to an unnumbered formal block Given the AsciiDoc source """ :xrefstyle: full See <> to find the data used in this report. .Data [#data] .... a b c .... """ When it is converted to html Then the result should contain the HTML structure """ |See a<> href='#data' Data |to find the data used in this report. """ Scenario: Use title as cross reference text to refer to a formal admonition block Given the AsciiDoc source """ :xrefstyle: full Recall in <>, we told you how to speed up this process. .Essential tip #1 [#essential-tip-1] TIP: You can speed up this process by pressing the turbo button. """ When it is converted to html Then the result should contain the HTML structure """ |Recall in a< href='#essential-tip-1' Essential tip #1 |, we told you how to speed up this process. """ Scenario: Create a cross reference from an AsciiDoc cell to a section Given the AsciiDoc source """ |=== a|See <<_install>> |=== == Install Instructions go here. """ When it is converted to html Then the result should match the HTML structure """ table.tableblock.frame-all.grid-all.stretch colgroup col style='width: 100%;' tbody tr td.tableblock.halign-left.valign-top div.content .paragraph: p |See a< href='#_install' Install .sect1 h2#_install Install .sectionbody .paragraph: p Instructions go here. """ Scenario: Create a cross reference using the title of the target section Given the AsciiDoc source """ == Section One content == Section Two, continued from <
> refer to <
> """ When it is converted to html Then the result should match the HTML structure """ .sect1 h2#_section_one Section One .sectionbody: .paragraph: p content .sect1 h2#_section_two_continued_from_section_one |Section Two, continued from a< href='#_section_one' Section One .sectionbody: .paragraph: p |refer to a< href='#_section_one' Section One """ Scenario: Create a cross reference using the reftext of the target section Given the AsciiDoc source """ [reftext="the first section"] == Section One content == Section Two refer to <> """ When it is converted to html Then the result should match the HTML structure """ .sect1 h2#_section_one Section One .sectionbody: .paragraph: p content .sect1 h2#_section_two Section Two .sectionbody: .paragraph: p |refer to a< href='#_section_one' the first section """ When it is converted to docbook Then the result should match the XML structure """ section xml:id='_section_one' xreflabel='the first section' title Section One simpara content section xml:id='_section_two' title Section Two simpara |refer to xref< linkend='_section_one'/ """ Scenario: Create a cross reference using the formatted title of the target section Given the AsciiDoc source """ == Section *One* content == Section Two refer to <
> """ When it is converted to html Then the result should match the HTML structure """ .sect1 h2#_section_one |Section One .sectionbody: .paragraph: p content .sect1 h2#_section_two Section Two .sectionbody: .paragraph: p |refer to a< href='#_section_one' Section One """ Scenario: Does not process a natural cross reference in compat mode Given the AsciiDoc source """ :compat-mode: == Section One content == Section Two refer to <
> """ When it is converted to html Then the result should match the HTML structure """ .sect1 h2#_section_one |Section One .sectionbody: .paragraph: p content .sect1 h2#_section_two Section Two .sectionbody: .paragraph: p |refer to a< href='#Section One' [Section One] """ Scenario: Parses text of xref macro as attributes if attribute signature found Given the AsciiDoc source """ == Section One content == Section Two refer to xref:_section_one[role=next] """ When it is converted to html Then the result should match the HTML structure """ .sect1 h2#_section_one |Section One .sectionbody: .paragraph: p content .sect1 h2#_section_two Section Two .sectionbody: .paragraph: p |refer to a< href='#_section_one' class='next' Section One """ Scenario: Does not parse text of xref macro as attribute if attribute signature not found Given the AsciiDoc source """ == Section One content == Section Two refer to xref:_section_one[One, Section One] """ When it is converted to html Then the result should match the HTML structure """ .sect1 h2#_section_one |Section One .sectionbody: .paragraph: p content .sect1 h2#_section_two Section Two .sectionbody: .paragraph: p |refer to a< href='#_section_one' One, Section One """ Scenario: Uses whole text of xref macro as link text if attribute signature found and text is enclosed in double quotes Given the AsciiDoc source """ == Section One content == Section Two refer to xref:_section_one["Section One == Starting Point"] """ When it is converted to html Then the result should match the HTML structure """ .sect1 h2#_section_one |Section One .sectionbody: .paragraph: p content .sect1 h2#_section_two Section Two .sectionbody: .paragraph: p |refer to a< href='#_section_one' |Section One == Starting Point """ Scenario: Does not parse text of xref macro as text if enclosed in double quotes but attribute signature not found Given the AsciiDoc source """ == Section One content == Section Two refer to xref:_section_one["The Premier Section"] """ When it is converted to html Then the result should match the HTML structure """ .sect1 h2#_section_one |Section One .sectionbody: .paragraph: p content .sect1 h2#_section_two Section Two .sectionbody: .paragraph: p |refer to a< href='#_section_one' "The Premier Section" """ Scenario: Does not parse text of xref macro as attribute if no attributes found Given the AsciiDoc source """ == Section One content == Section Two refer to xref:_section_one[Section One = First Section] """ When it is converted to html Then the result should match the HTML structure """ .sect1 h2#_section_one |Section One .sectionbody: .paragraph: p content .sect1 h2#_section_two Section Two .sectionbody: .paragraph: p |refer to a< href='#_section_one' |Section One |= First Section """ Scenario: Does not parse formatted text of xref macro as attributes Given the AsciiDoc source """ == Section One content == Section Two refer to xref:_section_one[[.role]#Section One#] """ When it is converted to html Then the result should match the HTML structure """ .sect1 h2#_section_one |Section One .sectionbody: .paragraph: p content .sect1 h2#_section_two Section Two .sectionbody: .paragraph: p |refer to a< href='#_section_one' span.role |Section |One """ Scenario: Can escape double quotes in text of xref macro using backslashes when text is parsed as attributes Given the AsciiDoc source """ == Section One content == Section Two refer to xref:_section_one["\"The Premier Section\"",role=spotlight] """ When it is converted to html Then the result should match the HTML structure """ .sect1 h2#_section_one |Section One .sectionbody: .paragraph: p content .sect1 h2#_section_two Section Two .sectionbody: .paragraph: p |refer to a< href='#_section_one' class='spotlight' "The Premier Section" """ Scenario: Override xrefstyle for a given part of the document Given the AsciiDoc source """ :xrefstyle: full :doctype: book :sectnums: == Foo refer to <<#_bar>> == Bar :xrefstyle: short refer to xref:#_foo[xrefstyle=short] """ When it is converted to html Then the result should match the HTML structure """ .sect1 h2#_foo 1. Foo .sectionbody: .paragraph: p |refer to a< href='#_bar' Chapter 2, Bar .sect1 h2#_bar 2. Bar .sectionbody: .paragraph: p |refer to a< href='#_foo' Chapter 1 """ Scenario: Override xrefstyle for a specific reference by assigning the xrefstyle attribute on the xref macro Given the AsciiDoc source """ :xrefstyle: full :doctype: book :sectnums: == Foo content == Bar refer to <<#_foo>> refer to xref:#_foo[xrefstyle=short] """ When it is converted to html Then the result should match the HTML structure """ .sect1 h2#_foo 1. Foo .sectionbody: .paragraph: p content .sect1 h2#_bar 2. Bar .sectionbody .paragraph: p |refer to a< href='#_foo' Chapter 1, Foo .paragraph: p |refer to a< href='#_foo' Chapter 1 """ asciidoctor-2.0.20/lib/000077500000000000000000000000001443135032600146455ustar00rootroot00000000000000asciidoctor-2.0.20/lib/asciidoctor.rb000066400000000000000000000513201443135032600174760ustar00rootroot00000000000000# frozen_string_literal: true require 'set' # NOTE RUBY_ENGINE == 'opal' conditional blocks like this are filtered by the Opal preprocessor if RUBY_ENGINE == 'opal' # this require is satisfied by the Asciidoctor.js build; it augments the Ruby environment for Asciidoctor.js require 'asciidoctor/js' else autoload :Base64, 'base64' require 'cgi/util' autoload :OpenURI, 'open-uri' autoload :Pathname, 'pathname' autoload :StringScanner, 'strscan' autoload :URI, 'uri' end # Public: The main application interface (API) for Asciidoctor. This API provides methods to parse AsciiDoc content and # convert it to various output formats using built-in or third-party converters or Tilt-supported templates. # # An AsciiDoc document can be as simple as a single line of content, though it more commonly starts with a document # header that declares the document title and document attribute definitions. The document header is then followed by # zero or more section titles, optionally nested, to organize the paragraphs, blocks, lists, etc. of the document. # # By default, the processor converts the AsciiDoc document to HTML 5 using a built-in converter. However, this behavior # can be changed by specifying a different backend (e.g., +docbook+). A backend is a keyword for an output format (e.g., # DocBook). That keyword, in turn, is used to select a converter, which carries out the request to convert the document # to that format. # # In addition to this API, Asciidoctor also provides a command-line interface (CLI) named +asciidoctor+ for converting # AsciiDoc content. See the provided man(ual) page for usage and options. # # Examples # # # Convert an AsciiDoc file # Asciidoctor.convert_file 'document.adoc', safe: :safe # # # Convert an AsciiDoc string # puts Asciidoctor.convert "I'm using *Asciidoctor* version {asciidoctor-version}.", safe: :safe # # # Convert an AsciiDoc file using Tilt-supported templates # Asciidoctor.convert_file 'document.adoc', safe: :safe, template_dir: '/path/to/templates' # # # Parse an AsciiDoc file into a document object # doc = Asciidoctor.load_file 'document.adoc', safe: :safe # # # Parse an AsciiDoc string into a document object # doc = Asciidoctor.load "= Document Title\n\nfirst paragraph\n\nsecond paragraph", safe: :safe # module Asciidoctor # alias the RUBY_ENGINE constant inside the Asciidoctor namespace and define a precomputed alias for runtime RUBY_ENGINE_OPAL = (RUBY_ENGINE = ::RUBY_ENGINE) == 'opal' module SafeMode # A safe mode level that disables any of the security features enforced # by Asciidoctor (Ruby is still subject to its own restrictions). UNSAFE = 0 # A safe mode level that closely parallels safe mode in AsciiDoc. This value # prevents access to files which reside outside of the parent directory of # the source file and disables any macro other than the include::[] directive. SAFE = 1 # A safe mode level that disallows the document from setting attributes # that would affect the conversion of the document, in addition to all the # security features of SafeMode::SAFE. For instance, this level forbids # changing the backend or source-highlighter using an attribute defined # in the source document header. This is the most fundamental level of # security for server deployments (hence the name). SERVER = 10 # A safe mode level that disallows the document from attempting to read # files from the file system and including the contents of them into the # document, in additional to all the security features of SafeMode::SERVER. # For instance, this level disallows use of the include::[] directive and the # embedding of binary content (data uri), stylesheets and JavaScripts # referenced by the document. (Asciidoctor and trusted extensions may still # be allowed to embed trusted content into the document). # # Since Asciidoctor is aiming for wide adoption, this level is the default # and is recommended for server deployments. SECURE = 20 # A planned safe mode level that disallows the use of passthrough macros and # prevents the document from setting any known attributes, in addition to all # the security features of SafeMode::SECURE. # # Please note that this level is not currently implemented (and therefore not # enforced)! #PARANOID = 100 @names_by_value = (constants false).map {|sym| [(const_get sym), sym.to_s.downcase] }.sort {|(a), (b)| a <=> b }.to_h def self.value_for_name name const_get name.upcase, false end def self.name_for_value value @names_by_value[value] end def self.names @names_by_value.values end end # Flags to control compliance with the behavior of AsciiDoc module Compliance @keys = ::Set.new class << self attr_reader :keys # Defines a new compliance key and assigns an initial value. def define key, value instance_variable_set %(@#{key}), value singleton_class.send :attr_accessor, key @keys << key nil end end # AsciiDoc terminates paragraphs adjacent to # block content (delimiter or block attribute list) # This option allows this behavior to be modified # TODO what about literal paragraph? # Compliance value: true define :block_terminates_paragraph, true # AsciiDoc does not parse paragraphs with a verbatim style # (i.e., literal, listing, source, verse) as verbatim content. # This options allows this behavior to be modified # Compliance value: false define :strict_verbatim_paragraphs, true # AsciiDoc supports both atx (single-line) and setext (underlined) section titles. # This option can be used to disable the setext variant. # Compliance value: true define :underline_style_section_titles, true # Asciidoctor will unwrap the content in a preamble if the document has a # title and no sections, then discard the empty preamble. # Compliance value: false define :unwrap_standalone_preamble, true # AsciiDoc drops lines that contain references to missing attributes. # This behavior is not intuitive to most writers. # Asciidoctor allows this behavior to be configured. # Possible options are 'skip', 'drop', 'drop-line', and 'warn'. # Compliance value: 'drop-line' define :attribute_missing, 'skip' # AsciiDoc drops lines that contain an attribute unassignment. # This behavior may need to be tuned depending on the circumstances. # Compliance value: 'drop-line' define :attribute_undefined, 'drop-line' # Asciidoctor will allow the id, role and options to be set # on blocks using a shorthand syntax (e.g., #idname.rolename%optionname) # Compliance value: false define :shorthand_property_syntax, true # Asciidoctor will attempt to resolve the target of a cross reference by # matching its reference text (reftext or title) (e.g., <
>) # Compliance value: false define :natural_xrefs, true # Asciidoctor will start counting at the following number # when creating a unique id when there is a conflict # Compliance value: 2 define :unique_id_start_index, 2 # Asciidoctor will recognize commonly-used Markdown syntax # to the degree it does not interfere with existing # AsciiDoc syntax and behavior. # Compliance value: false define :markdown_syntax, true end # The absolute root directory of the Asciidoctor RubyGem ROOT_DIR = ::File.dirname ::File.absolute_path __dir__ unless defined? ROOT_DIR # The absolute lib directory of the Asciidoctor RubyGem LIB_DIR = ::File.join ROOT_DIR, 'lib' # The absolute data directory of the Asciidoctor RubyGem DATA_DIR = ::File.join ROOT_DIR, 'data' # The user's home directory, as best we can determine it # IMPORTANT this rescue is required for running Asciidoctor on GitHub.com USER_HOME = ::Dir.home rescue (::ENV['HOME'] || ::Dir.pwd) # The newline character used for output; stored in constant table as an optimization LF = ?\n # The null character to use for splitting attribute values NULL = ?\0 # String for matching tab character TAB = ?\t # Maximum integer value for "boundless" operations; equal to MAX_SAFE_INTEGER in JavaScript MAX_INT = 9007199254740991 # Alias UTF_8 encoding for convenience / speed UTF_8 = ::Encoding::UTF_8 # Byte arrays for UTF-* Byte Order Marks BOM_BYTES_UTF_8 = [0xef, 0xbb, 0xbf] BOM_BYTES_UTF_16LE = [0xff, 0xfe] BOM_BYTES_UTF_16BE = [0xfe, 0xff] # The mode to use when opening a file for reading FILE_READ_MODE = RUBY_ENGINE_OPAL ? 'r' : 'rb:utf-8:utf-8' # The mode to use when opening a URI for reading URI_READ_MODE = FILE_READ_MODE # The mode to use when opening a file for writing FILE_WRITE_MODE = RUBY_ENGINE_OPAL ? 'w' : 'w:utf-8' # The default document type # Can influence markup generated by the converters DEFAULT_DOCTYPE = 'article' # The backend determines the format of the converted output, default to html5 DEFAULT_BACKEND = 'html5' DEFAULT_STYLESHEET_KEYS = ['', 'DEFAULT'].to_set DEFAULT_STYLESHEET_NAME = 'asciidoctor.css' # Pointers to the preferred version for a given backend. BACKEND_ALIASES = { 'html' => 'html5', 'docbook' => 'docbook5' } # Default page widths for calculating absolute widths DEFAULT_PAGE_WIDTHS = { 'docbook' => 425 } # Default extensions for the respective base backends DEFAULT_EXTENSIONS = { 'html' => '.html', 'docbook' => '.xml', 'pdf' => '.pdf', 'epub' => '.epub', 'manpage' => '.man', 'asciidoc' => '.adoc' } # A map of file extensions that are recognized as AsciiDoc documents # TODO .txt should be deprecated ASCIIDOC_EXTENSIONS = { '.adoc' => true, '.asciidoc' => true, '.asc' => true, '.ad' => true, # TODO .txt should be deprecated '.txt' => true } SETEXT_SECTION_LEVELS = { '=' => 0, '-' => 1, '~' => 2, '^' => 3, '+' => 4 } ADMONITION_STYLES = ['NOTE', 'TIP', 'IMPORTANT', 'WARNING', 'CAUTION'].to_set ADMONITION_STYLE_HEADS = ::Set.new.tap {|accum| ADMONITION_STYLES.each {|s| accum << s.chr } } PARAGRAPH_STYLES = ['comment', 'example', 'literal', 'listing', 'normal', 'open', 'pass', 'quote', 'sidebar', 'source', 'verse', 'abstract', 'partintro'].to_set VERBATIM_STYLES = ['literal', 'listing', 'source', 'verse'].to_set DELIMITED_BLOCKS = { '--' => [:open, ['comment', 'example', 'literal', 'listing', 'pass', 'quote', 'sidebar', 'source', 'verse', 'admonition', 'abstract', 'partintro'].to_set], '----' => [:listing, ['literal', 'source'].to_set], '....' => [:literal, ['listing', 'source'].to_set], '====' => [:example, ['admonition'].to_set], '****' => [:sidebar, ::Set.new], '____' => [:quote, ['verse'].to_set], '++++' => [:pass, ['stem', 'latexmath', 'asciimath'].to_set], '|===' => [:table, ::Set.new], ',===' => [:table, ::Set.new], ':===' => [:table, ::Set.new], '!===' => [:table, ::Set.new], '////' => [:comment, ::Set.new], '```' => [:fenced_code, ::Set.new] } DELIMITED_BLOCK_HEADS = {}.tap {|accum| DELIMITED_BLOCKS.each_key {|k| accum[k.slice 0, 2] = true } } DELIMITED_BLOCK_TAILS = {}.tap {|accum| DELIMITED_BLOCKS.each_key {|k| accum[k] = k[k.length - 1] if k.length == 4 } } # NOTE the 'figure' key as a string is historical and used by image blocks CAPTION_ATTRIBUTE_NAMES = { example: 'example-caption', 'figure' => 'figure-caption', listing: 'listing-caption', table: 'table-caption' } LAYOUT_BREAK_CHARS = { '\'' => :thematic_break, '<' => :page_break } MARKDOWN_THEMATIC_BREAK_CHARS = { '-' => :thematic_break, '*' => :thematic_break, '_' => :thematic_break } HYBRID_LAYOUT_BREAK_CHARS = LAYOUT_BREAK_CHARS.merge MARKDOWN_THEMATIC_BREAK_CHARS #LIST_CONTEXTS = [:ulist, :olist, :dlist, :colist] NESTABLE_LIST_CONTEXTS = [:ulist, :olist, :dlist] # TODO validate use of explicit style name above ordered list (this list is for selecting an implicit style) ORDERED_LIST_STYLES = [:arabic, :loweralpha, :lowerroman, :upperalpha, :upperroman] #, :lowergreek] ORDERED_LIST_KEYWORDS = { #'arabic' => '1', #'decimal' => '1', 'loweralpha' => 'a', 'lowerroman' => 'i', #'lowergreek' => 'a', 'upperalpha' => 'A', 'upperroman' => 'I' } ATTR_REF_HEAD = '{' LIST_CONTINUATION = '+' # NOTE AsciiDoc.py allows + to be preceded by TAB; Asciidoctor does not HARD_LINE_BREAK = ' +' LINE_CONTINUATION = ' \\' LINE_CONTINUATION_LEGACY = ' +' BLOCK_MATH_DELIMITERS = { asciimath: ['\$', '\$'], latexmath: ['\[', '\]'], } INLINE_MATH_DELIMITERS = { asciimath: ['\$', '\$'], latexmath: ['\(', '\)'], } (STEM_TYPE_ALIASES = { 'latexmath' => 'latexmath', 'latex' => 'latexmath', 'tex' => 'latexmath' }).default = 'asciimath' FONT_AWESOME_VERSION = '4.7.0' HIGHLIGHT_JS_VERSION = '9.18.3' MATHJAX_VERSION = '2.7.9' DEFAULT_ATTRIBUTES = { 'appendix-caption' => 'Appendix', 'appendix-refsig' => 'Appendix', 'caution-caption' => 'Caution', 'chapter-refsig' => 'Chapter', #'encoding' => 'UTF-8', 'example-caption' => 'Example', 'figure-caption' => 'Figure', 'important-caption' => 'Important', 'last-update-label' => 'Last updated', #'listing-caption' => 'Listing', 'note-caption' => 'Note', 'part-refsig' => 'Part', #'preface-title' => 'Preface', 'prewrap' => '', 'sectids' => '', 'section-refsig' => 'Section', 'table-caption' => 'Table', 'tip-caption' => 'Tip', 'toc-placement' => 'auto', 'toc-title' => 'Table of Contents', 'untitled-label' => 'Untitled', 'version-label' => 'Version', 'warning-caption' => 'Warning', } # attributes which be changed throughout the flow of the document (e.g., sectnums) FLEXIBLE_ATTRIBUTES = ['sectnums'] INTRINSIC_ATTRIBUTES = { 'startsb' => '[', 'endsb' => ']', 'vbar' => '|', 'caret' => '^', 'asterisk' => '*', 'tilde' => '~', 'plus' => '+', 'backslash' => '\\', 'backtick' => '`', 'blank' => '', 'empty' => '', 'sp' => ' ', 'two-colons' => '::', 'two-semicolons' => ';;', 'nbsp' => ' ', 'deg' => '°', 'zwsp' => '​', 'quot' => '"', 'apos' => ''', 'lsquo' => '‘', 'rsquo' => '’', 'ldquo' => '“', 'rdquo' => '”', 'wj' => '⁠', 'brvbar' => '¦', 'pp' => '++', 'cpp' => 'C++', 'amp' => '&', 'lt' => '<', 'gt' => '>' } # Regular expression character classes (to ensure regexp compatibility between Ruby and JavaScript) # CC stands for "character class", CG stands for "character class group" unless RUBY_ENGINE == 'opal' # CC_ALL is any character, including newlines (must be accompanied by multiline regexp flag) CC_ALL = '.' # CC_ANY is any character except newlines CC_ANY = '.' CC_EOL = '$' CC_ALPHA = CG_ALPHA = '\p{Alpha}' CC_ALNUM = CG_ALNUM = '\p{Alnum}' CG_BLANK = '\p{Blank}' CC_WORD = CG_WORD = '\p{Word}' end QUOTE_SUBS = {}.tap do |accum| # unconstrained quotes:: can appear anywhere # constrained quotes:: must be bordered by non-word characters # NOTE these substitutions are processed in the order they appear here and # the order in which they are replaced is important accum[false] = normal = [ # **strong** [:strong, :unconstrained, /\\?(?:\[([^\]]+)\])?\*\*(#{CC_ALL}+?)\*\*/m], # *strong* [:strong, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+)\])?\*(\S|\S#{CC_ALL}*?\S)\*(?!#{CG_WORD})/m], # "`double-quoted`" [:double, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+)\])?"`(\S|\S#{CC_ALL}*?\S)`"(?!#{CG_WORD})/m], # '`single-quoted`' [:single, :constrained, /(^|[^#{CC_WORD};:`}])(?:\[([^\]]+)\])?'`(\S|\S#{CC_ALL}*?\S)`'(?!#{CG_WORD})/m], # ``monospaced`` [:monospaced, :unconstrained, /\\?(?:\[([^\]]+)\])?``(#{CC_ALL}+?)``/m], # `monospaced` [:monospaced, :constrained, /(^|[^#{CC_WORD};:"'`}])(?:\[([^\]]+)\])?`(\S|\S#{CC_ALL}*?\S)`(?![#{CC_WORD}"'`])/m], # __emphasis__ [:emphasis, :unconstrained, /\\?(?:\[([^\]]+)\])?__(#{CC_ALL}+?)__/m], # _emphasis_ [:emphasis, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+)\])?_(\S|\S#{CC_ALL}*?\S)_(?!#{CG_WORD})/m], # ##mark## (referred to in AsciiDoc.py as unquoted) [:mark, :unconstrained, /\\?(?:\[([^\]]+)\])?##(#{CC_ALL}+?)##/m], # #mark# (referred to in AsciiDoc.py as unquoted) [:mark, :constrained, /(^|[^#{CC_WORD}&;:}])(?:\[([^\]]+)\])?#(\S|\S#{CC_ALL}*?\S)#(?!#{CG_WORD})/m], # ^superscript^ [:superscript, :unconstrained, /\\?(?:\[([^\]]+)\])?\^(\S+?)\^/], # ~subscript~ [:subscript, :unconstrained, /\\?(?:\[([^\]]+)\])?~(\S+?)~/] ] accum[true] = compat = normal.drop 0 # ``quoted'' compat[2] = [:double, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+)\])?``(\S|\S#{CC_ALL}*?\S)''(?!#{CG_WORD})/m] # `quoted' compat[3] = [:single, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+)\])?`(\S|\S#{CC_ALL}*?\S)'(?!#{CG_WORD})/m] # ++monospaced++ compat[4] = [:monospaced, :unconstrained, /\\?(?:\[([^\]]+)\])?\+\+(#{CC_ALL}+?)\+\+/m] # +monospaced+ compat[5] = [:monospaced, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+)\])?\+(\S|\S#{CC_ALL}*?\S)\+(?!#{CG_WORD})/m] # #unquoted# #compat[8] = [:unquoted, *compat[8][1..-1]] # ##unquoted## #compat[9] = [:unquoted, *compat[9][1..-1]] # 'emphasis' compat.insert 3, [:emphasis, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+)\])?'(\S|\S#{CC_ALL}*?\S)'(?!#{CG_WORD})/m] end # NOTE order of replacements is significant REPLACEMENTS = [ # (C) [/\\?\(C\)/, '©', :none], # (R) [/\\?\(R\)/, '®', :none], # (TM) [/\\?\(TM\)/, '™', :none], # foo -- bar (where either space character can be a newline) # NOTE this necessarily drops the newline if replacement appears at end of line [/(?: |\n|^|\\)--(?: |\n|$)/, ' — ', :none], # foo--bar [/(#{CG_WORD})\\?--(?=#{CG_WORD})/, '—​', :leading], # ellipsis [/\\?\.\.\./, '…​', :none], # right single quote [/\\?`'/, '’', :none], # apostrophe (inside a word) [/(#{CG_ALNUM})\\?'(?=#{CG_ALPHA})/, '’', :leading], # right arrow -> [/\\?->/, '→', :none], # right double arrow => [/\\?=>/, '⇒', :none], # left arrow <- [/\\?<-/, '←', :none], # left double arrow <= [/\\?<=/, '⇐', :none], # restore entities [/\\?(&)amp;((?:[a-zA-Z][a-zA-Z]+\d{0,2}|#\d\d\d{0,4}|#x[\da-fA-F][\da-fA-F][\da-fA-F]{0,3});)/, '', :bounding] ] # Internal: Automatically load the Asciidoctor::Extensions module. # # Requires the Asciidoctor::Extensions module if the name is :Extensions. # Otherwise, delegates to the super method. # # This method provides the same functionality as using autoload on # Asciidoctor::Extensions, except that the constant isn't recognized as # defined prior to it being loaded. # # Returns the resolved constant, if resolved, otherwise nothing. def self.const_missing name if name == :Extensions require_relative 'asciidoctor/extensions' Extensions else super end end unless RUBY_ENGINE == 'opal' unless RUBY_ENGINE == 'opal' autoload :SyntaxHighlighter, %(#{__dir__}/asciidoctor/syntax_highlighter) autoload :Timings, %(#{__dir__}/asciidoctor/timings) end end # core extensions require_relative 'asciidoctor/core_ext' # modules and helpers require_relative 'asciidoctor/helpers' require_relative 'asciidoctor/logging' require_relative 'asciidoctor/rx' require_relative 'asciidoctor/substitutors' require_relative 'asciidoctor/version' # abstract classes require_relative 'asciidoctor/abstract_node' require_relative 'asciidoctor/abstract_block' # concrete classes require_relative 'asciidoctor/attribute_list' require_relative 'asciidoctor/block' require_relative 'asciidoctor/callouts' require_relative 'asciidoctor/converter' require_relative 'asciidoctor/document' require_relative 'asciidoctor/inline' require_relative 'asciidoctor/list' require_relative 'asciidoctor/parser' require_relative 'asciidoctor/path_resolver' require_relative 'asciidoctor/reader' require_relative 'asciidoctor/section' require_relative 'asciidoctor/stylesheets' require_relative 'asciidoctor/table' require_relative 'asciidoctor/writer' # main API entry points require_relative 'asciidoctor/load' require_relative 'asciidoctor/convert' if RUBY_ENGINE == 'opal' require_relative 'asciidoctor/syntax_highlighter' require_relative 'asciidoctor/timings' # this require is satisfied by the Asciidoctor.js build; it supplies compile and runtime overrides for Asciidoctor.js require 'asciidoctor/js/postscript' end asciidoctor-2.0.20/lib/asciidoctor/000077500000000000000000000000001443135032600171505ustar00rootroot00000000000000asciidoctor-2.0.20/lib/asciidoctor/abstract_block.rb000066400000000000000000000443771443135032600224710ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor class AbstractBlock < AbstractNode # Public: Get the Array of {AbstractBlock} child blocks for this block. Only applies if content model is :compound. attr_reader :blocks # Public: Set the caption for this block. attr_writer :caption # Public: Describes the type of content this block accepts and how it should be converted. Acceptable values are: # * :compound - this block contains other blocks # * :simple - this block holds a paragraph of prose that receives normal substitutions # * :verbatim - this block holds verbatim text (displayed "as is") that receives verbatim substitutions # * :raw - this block holds unprocessed content passed directly to the output with no substitutions applied # * :empty - this block has no content attr_accessor :content_model # Public: Set the Integer level of this {Section} or the level of the Section to which this {AbstractBlock} belongs. attr_accessor :level # Public: Get/Set the String numeral of this block (if section, relative to parent, otherwise absolute). # Only assigned to section if automatic section numbering is enabled. # Only assigned to formal block (block with title) if corresponding caption attribute is present. attr_accessor :numeral # Public: Gets/Sets the location in the AsciiDoc source where this block begins. # Tracking source location is not enabled by default, and is controlled by the sourcemap option. attr_accessor :source_location # Public: Get/Set the String style (block type qualifier) for this block. attr_accessor :style # Public: Substitutions to be applied to content in this block. attr_reader :subs def initialize parent, context, opts = {} super @content_model = :compound @blocks = [] @subs = [] @id = @title = @caption = @numeral = @style = @default_subs = @source_location = nil if context == :document || context == :section @level = @next_section_index = 0 @next_section_ordinal = 1 elsif AbstractBlock === parent @level = parent.level else @level = nil end end def block? true end def inline? false end # Public: Get the source file where this block started def file @source_location && @source_location.file end # Public: Get the source line number where this block started def lineno @source_location && @source_location.lineno end # Public: Get the converted String content for this Block. If the block # has child blocks, the content method should cause them to be # converted and returned as content that can be included in the # parent block's template. def convert @document.playback_attributes @attributes converter.convert self end # Deprecated: Use {AbstractBlock#convert} instead. alias render convert # Public: Get the converted result of the child blocks by converting the # children appropriate to content model that this block supports. def content @blocks.map {|b| b.convert }.join LF end # Public: Update the context of this block. # # This method changes the context of this block. It also updates the node name accordingly. # # context - the context Symbol context to assign to this block # # Returns the specified Symbol context def context= context @node_name = (@context = context).to_s end # Public: Append a content block to this block's list of blocks. # # block - The new child block. # # Examples # # block = Block.new(parent, :preamble, content_model: :compound) # # block << Block.new(block, :paragraph, source: 'p1') # block << Block.new(block, :paragraph, source: 'p2') # block.blocks? # # => true # block.blocks.size # # => 2 # # Returns The parent Block def << block block.parent = self unless block.parent == self @blocks << block self end # NOTE append alias required for adapting to a Java API alias append << # Public: Determine whether this Block contains block content # # Returns A Boolean indicating whether this Block has block content def blocks? @blocks.empty? ? false : true end # Public: Check whether this block has any child Section objects. # # Acts an an abstract method that always returns false unless this block is an # instance of Document or Section. # Both Document and Section provide overrides for this method. # # Returns false def sections? false end # Deprecated: Legacy property to get the String or Integer numeral of this section. def number (Integer @numeral) rescue @numeral end # Deprecated: Legacy property to set the numeral of this section by coercing the value to a String. def number= val @numeral = val.to_s end # Public: Walk the document tree and find all block-level nodes that match the specified selector (context, style, id, # role, and/or custom filter). # # If a Ruby block is given, it's applied as a supplemental filter. If the filter returns true (which implies :accept), # the node is accepted and node traversal continues. If the filter returns false (which implies :skip), the node is # skipped, but its children are still visited. If the filter returns :reject, the node and all its descendants are # rejected. If the filter returns :prune, the node is accepted, but its descendants are rejected. If no selector # or filter block is supplied, all block-level nodes in the tree are returned. # # Examples # # doc.find_by context: :section # #=> Asciidoctor::Section@14459860 { level: 0, title: "Hello, AsciiDoc!", blocks: 0 } # #=> Asciidoctor::Section@14505460 { level: 1, title: "First Section", blocks: 1 } # # doc.find_by(context: :section) {|section| section.level == 1 } # #=> Asciidoctor::Section@14505460 { level: 1, title: "First Section", blocks: 1 } # # doc.find_by context: :listing, style: 'source' # #=> Asciidoctor::Block@13136720 { context: :listing, content_model: :verbatim, style: "source", lines: 1 } # # Returns An Array of block-level nodes that match the filter or an empty Array if no matches are found #-- # TODO support jQuery-style selector (e.g., image.thumb) def find_by selector = {}, &block find_by_internal selector, (result = []), &block rescue ::StopIteration result end alias query find_by # Move to the next adjacent block in document order. If the current block is the last # item in a list, this method will return the following sibling of the list block. def next_adjacent_block unless @context == :document if (p = @parent).context == :dlist && @context == :list_item (sib = p.items[(p.items.find_index {|terms, desc| (terms.include? self) || desc == self }) + 1]) ? sib : p.next_adjacent_block else (sib = p.blocks[(p.blocks.find_index self) + 1]) ? sib : p.next_adjacent_block end end end # Public: Get the Array of child Section objects # # Only applies to Document and Section instances # # Examples # # doc << (sect1 = Section.new doc, 1) # sect1.title = 'Section 1' # para1 = Block.new sect1, :paragraph, source: 'Paragraph 1' # para2 = Block.new sect1, :paragraph, source: 'Paragraph 2' # sect1 << para1 << para2 # sect1 << (sect1_1 = Section.new sect1, 2) # sect1_1.title = 'Section 1.1' # sect1_1 << (Block.new sect1_1, :paragraph, source: 'Paragraph 3') # sect1.blocks? # # => true # sect1.blocks.size # # => 3 # sect1.sections.size # # => 1 # # Returns an [Array] of Section objects def sections @blocks.select {|block| block.context == :section } end # Public: Returns the converted alt text for this block image. # # Returns the [String] value of the alt attribute with XML special character # and replacement substitutions applied. def alt if (text = @attributes['alt']) if text == @attributes['default-alt'] sub_specialchars text else text = sub_specialchars text (ReplaceableTextRx.match? text) ? (sub_replacements text) : text end else '' end end # Gets the caption for this block. # # This method routes the deprecated use of the caption method on an # admonition block to the textlabel attribute. # # Returns the [String] caption for this block (or the value of the textlabel # attribute if this is an admonition block). def caption @context == :admonition ? @attributes['textlabel'] : @caption end # Public: Convenience method that returns the interpreted title of the Block # with the caption prepended. # # Concatenates the value of this Block's caption instance variable and the # return value of this Block's title method. No space is added between the # two values. If the Block does not have a caption, the interpreted title is # returned. # # Returns the converted String title prefixed with the caption, or just the # converted String title if no caption is set def captioned_title %(#{@caption}#{title}) end # Public: Retrieve the list marker keyword for the specified list type. # # For use in the HTML type attribute. # # list_type - the type of list; default to the @style if not specified # # Returns the single-character [String] keyword that represents the marker for the specified list type def list_marker_keyword list_type = nil ORDERED_LIST_KEYWORDS[list_type || @style] end # Public: Get the String title of this Block with title substitutions applied # # The following substitutions are applied to block and section titles: # # :specialcharacters, :quotes, :replacements, :macros, :attributes and :post_replacements # # Examples # # block.title = "Foo 3^ # {two-colons} Bar(1)" # block.title # => "Foo 3^ # :: Bar(1)" # # Returns the converted String title for this Block, or nil if the source title is falsy def title # prevent substitutions from being applied to title multiple times @converted_title ||= @title && (apply_title_subs @title) end # Public: A convenience method that checks whether the title of this block is defined. # # Returns a [Boolean] indicating whether this block has a title. def title? @title ? true : false end # Public: Set the String block title. # # Returns the specified String title def title= val @converted_title = nil @title = val end # Public: A convenience method that checks whether the specified # substitution is enabled for this block. # # name - The Symbol substitution name # # Returns A Boolean indicating whether the specified substitution is # enabled for this block def sub? name @subs.include? name end # Public: Remove a substitution from this block # # sub - The Symbol substitution name # # Returns nothing def remove_sub sub @subs.delete sub nil end # Public: Generate cross reference text (xreftext) that can be used to refer # to this block. # # Use the explicit reftext for this block, if specified, retrieved from the # {#reftext} method. Otherwise, if this is a section or captioned block (a # block with both a title and caption), generate the xreftext according to # the value of the xrefstyle argument (e.g., full, short). This logic may # leverage the {Substitutors#sub_quotes} method to apply formatting to the # text. If this is not a captioned block, return the title, if present, or # nil otherwise. # # xrefstyle - An optional String that specifies the style to use to format # the xreftext ('full', 'short', or 'basic') (default: nil). # # Returns the generated [String] xreftext used to refer to this block or # nothing if there isn't sufficient information to generate one. def xreftext xrefstyle = nil if (val = reftext) && !val.empty? val # NOTE xrefstyle only applies to blocks with a title and a caption or number elsif xrefstyle && @title && !@caption.nil_or_empty? case xrefstyle when 'full' quoted_title = sub_placeholder (sub_quotes @document.compat_mode ? %q(``%s'') : '"`%s`"'), title if @numeral && (caption_attr_name = CAPTION_ATTRIBUTE_NAMES[@context]) && (prefix = @document.attributes[caption_attr_name]) %(#{prefix} #{@numeral}, #{quoted_title}) else %(#{@caption.chomp '. '}, #{quoted_title}) end when 'short' if @numeral && (caption_attr_name = CAPTION_ATTRIBUTE_NAMES[@context]) && (prefix = @document.attributes[caption_attr_name]) %(#{prefix} #{@numeral}) else @caption.chomp '. ' end else # 'basic' title end else title end end # Public: Generate and assign caption to block if not already assigned. # # If the block has a title and a caption prefix is available for this block, # then build a caption from this information, assign it a number and store it # to the caption attribute on the block. # # If a caption has already been assigned to this block, do nothing. # # The parts of a complete caption are: . # This partial caption represents the part the precedes the title. # # value - The String caption to assign to this block or nil to use document attribute. # caption_context - The Symbol context to use when resolving caption-related attributes. If not provided, the name of # the context for this block is used. Only certain contexts allow the caption to be looked up. # (default: @context) # # Returns nothing. def assign_caption value, caption_context = @context unless @caption || !@title || (@caption = value || @document.attributes['caption']) if (attr_name = CAPTION_ATTRIBUTE_NAMES[caption_context]) && (prefix = @document.attributes[attr_name]) @caption = %(#{prefix} #{@numeral = @document.increment_and_store_counter %(#{caption_context}-number), self}. ) nil end end end # Internal: Assign the next index (0-based) and numeral (1-based) to the section. # If the section is an appendix, the numeral is a letter (starting with A). This # method also assigns the appendix caption. # # section - The section to which to assign the next index and numeral. # # Assign to the specified section the next index and, if the section is # numbered, the numeral within this block (its parent). # # Returns nothing def assign_numeral section @next_section_index = (section.index = @next_section_index) + 1 if (like = section.numbered) if (sectname = section.sectname) == 'appendix' section.numeral = @document.counter 'appendix-number', 'A' section.caption = (caption = @document.attributes['appendix-caption']) ? %(#{caption} #{section.numeral}: ) : %(#{section.numeral}. ) # NOTE currently chapters in a book doctype are sequential even for multi-part books (see #979) elsif sectname == 'chapter' || like == :chapter section.numeral = (@document.counter 'chapter-number', 1).to_s else section.numeral = sectname == 'part' ? (Helpers.int_to_roman @next_section_ordinal) : @next_section_ordinal.to_s @next_section_ordinal += 1 end end nil end # Internal: Reassign the section indexes # # Walk the descendents of the current Document or Section # and reassign the section 0-based index value to each Section # as it appears in document order. # # IMPORTANT You must invoke this method on a node after removing # child sections or else the internal counters will be off. # # Returns nothing def reindex_sections @next_section_index = 0 @next_section_ordinal = 1 @blocks.each do |block| if block.context == :section assign_numeral block block.reindex_sections end end end protected # Internal: Performs the work for find_by, but does not handle the StopIteration exception. def find_by_internal selector = {}, result = [], &block if ((any_context = (context_selector = selector[:context]) ? nil : true) || context_selector == @context) && (!(style_selector = selector[:style]) || style_selector == @style) && (!(role_selector = selector[:role]) || (has_role? role_selector)) && (!(id_selector = selector[:id]) || id_selector == @id) if block_given? if (verdict = yield self) case verdict when :prune result << self raise ::StopIteration if id_selector return result when :reject raise ::StopIteration if id_selector return result when :stop raise ::StopIteration else result << self raise ::StopIteration if id_selector end elsif id_selector raise ::StopIteration end else result << self raise ::StopIteration if id_selector end end case @context when :document unless context_selector == :document # process document header as a section, if present if header? && (any_context || context_selector == :section) @header.find_by_internal selector, result, &block end @blocks.each do |b| next if context_selector == :section && b.context != :section # optimization b.find_by_internal selector, result, &block end end when :dlist # dlist has different structure than other blocks if any_context || context_selector != :section # optimization # NOTE the list item of a dlist can be nil, so we have to check @blocks.flatten.each {|b| b.find_by_internal selector, result, &block if b } end when :table if selector[:traverse_documents] rows.head.each {|r| r.each {|c| c.find_by_internal selector, result, &block } } selector = selector.merge context: :document if context_selector == :inner_document (rows.body + rows.foot).each do |r| r.each do |c| c.find_by_internal selector, result, &block c.inner_document.find_by_internal selector, result, &block if c.style == :asciidoc end end else (rows.head + rows.body + rows.foot).each {|r| r.each {|c| c.find_by_internal selector, result, &block } } end else @blocks.each do |b| next if context_selector == :section && b.context != :section # optimization b.find_by_internal selector, result, &block end end result end end end �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/lib/asciidoctor/abstract_node.rb�������������������������������������������������0000664�0000000�0000000�00000056514�14431350326�0022320�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# frozen_string_literal: true module Asciidoctor # Public: An abstract base class that provides state and methods for managing a # node of AsciiDoc content. The state and methods on this class are common to # all content segments in an AsciiDoc document. class AbstractNode include Logging include Substitutors # Public: Get the Hash of attributes for this node attr_reader :attributes # Public: Get the Symbol context for this node attr_reader :context # Public: Get the Asciidoctor::Document to which this node belongs attr_reader :document # Public: Get/Set the String id of this node attr_accessor :id # Public: Get the String name of this node attr_reader :node_name # Public: Get the AbstractBlock parent element of this node attr_reader :parent def initialize parent, context, opts = {} # document is a special case, should refer to itself if context == :document @document = self elsif parent @document = (@parent = parent).document end @node_name = (@context = context).to_s # NOTE the value of the :attributes option may be nil on an Inline node @attributes = (attrs = opts[:attributes]) ? attrs.merge : {} @passthroughs = [] end # Public: Returns whether this {AbstractNode} is an instance of {Block} # # Returns [Boolean] def block? # :nocov: raise ::NotImplementedError # :nocov: end # Public: Returns whether this {AbstractNode} is an instance of {Inline} # # Returns [Boolean] def inline? # :nocov: raise ::NotImplementedError # :nocov: end # Public: Get the Asciidoctor::Converter instance being used to convert the # current Asciidoctor::Document. def converter @document.converter end # Public: Associate this Block with a new parent Block # # parent - The Block to set as the parent of this Block # # Returns the the specified Block parent def parent= parent @parent, @document = parent, parent.document end # Public: Get the value of the specified attribute. If the attribute is not found on this node, fallback_name is set, # and this node is not the Document node, get the value of the specified attribute from the Document node. # # Look for the specified attribute in the attributes on this node and return the value of the attribute, if found. # Otherwise, if fallback_name is set (default: same as name) and this node is not the Document node, look for that # attribute on the Document node and return its value, if found. Otherwise, return the default value (default: nil). # # name - The String or Symbol name of the attribute to resolve. # default_value - The Object value to return if the attribute is not found (default: nil). # fallback_name - The String or Symbol of the attribute to resolve on the Document if the attribute is not found on # this node (default: same as name). # # Returns the [Object] value (typically a String) of the attribute or default_value if the attribute is not found. def attr name, default_value = nil, fallback_name = nil @attributes[name.to_s] || (fallback_name && @parent && @document.attributes[(fallback_name == true ? name : fallback_name).to_s] || default_value) end # Public: Check if the specified attribute is defined using the same logic as {#attr}, optionally performing a # comparison with the expected value if specified. # # Look for the specified attribute in the attributes on this node. If not found, fallback_name is specified (default: # same as name), and this node is not the Document node, look for that attribute on the Document node. In either case, # if the attribute is found, and the comparison value is truthy, return whether the two values match. Otherwise, # return whether the attribute was found. # # name - The String or Symbol name of the attribute to resolve. # expected_value - The expected Object value of the attribute (default: nil). # fallback_name - The String or Symbol of the attribute to resolve on the Document if the attribute is not found on # this node (default: same as name). # # Returns a [Boolean] indicating whether the attribute exists and, if a truthy comparison value is specified, whether # the value of the attribute matches the comparison value. def attr? name, expected_value = nil, fallback_name = nil if expected_value expected_value == (@attributes[name.to_s] || (fallback_name && @parent ? @document.attributes[(fallback_name == true ? name : fallback_name).to_s] : nil)) else (@attributes.key? name.to_s) || (fallback_name && @parent ? (@document.attributes.key? (fallback_name == true ? name : fallback_name).to_s) : false) end end # Public: Assign the value to the attribute name for the current node. # # name - The String attribute name to assign # value - The Object value to assign to the attribute (default: '') # overwrite - A Boolean indicating whether to assign the attribute # if currently present in the attributes Hash (default: true) # # Returns a [Boolean] indicating whether the assignment was performed def set_attr name, value = '', overwrite = true if overwrite == false && (@attributes.key? name) false else @attributes[name] = value true end end # Public: Remove the attribute from the current node. # # name - The String attribute name to remove # # Returns the previous [String] value, or nil if the attribute was not present. def remove_attr name @attributes.delete name end # Public: A convenience method to check if the specified option attribute is # enabled on the current node. # # Check if the option is enabled. This method simply checks to see if the # <name>-option attribute is defined on the current node. # # name - the String or Symbol name of the option # # return a Boolean indicating whether the option has been specified def option? name @attributes[%(#{name}-option)] ? true : false end # Public: Set the specified option on this node. # # This method sets the specified option on this node by setting the <name>-option attribute. # # name - the String name of the option # # Returns nothing def set_option name @attributes[%(#{name}-option)] = '' nil end # Public: Retrieve the Set of option names that are enabled on this node # # Returns a [Set] of option names def enabled_options ::Set.new.tap {|accum| @attributes.each_key {|k| accum << (k.slice 0, k.length - 7) if k.to_s.end_with? '-option' } } end # Public: Update the attributes of this node with the new values in # the attributes argument. # # If an attribute already exists with the same key, it's value will # be overwritten. # # new_attributes - A Hash of additional attributes to assign to this node. # # Returns the updated attributes [Hash] on this node. def update_attributes new_attributes @attributes.update new_attributes end # Public: Retrieves the space-separated String role for this node. # # Returns the role as a space-separated [String]. def role @attributes['role'] end # Public: Retrieves the String role names for this node as an Array. # # Returns the role names as a String [Array], which is empty if the role attribute is absent on this node. def roles (val = @attributes['role']) ? val.split : [] end # Public: Checks if the role attribute is set on this node and, if an expected value is given, whether the # space-separated role matches that value. # # expected_value - The expected String value of the role (optional, default: nil) # # Returns a [Boolean] indicating whether the role attribute is set on this node and, if an expected value is given, # whether the space-separated role matches that value. def role? expected_value = nil expected_value ? expected_value == @attributes['role'] : (@attributes.key? 'role') end # Public: Checks if the specified role is present in the list of roles for this node. # # name - The String name of the role to find. # # Returns a [Boolean] indicating whether this node has the specified role. def has_role? name # NOTE center + include? is faster than split + include? (val = @attributes['role']) ? (%( #{val} ).include? %( #{name} )) : false end # Public: Sets the value of the role attribute on this node. # # names - A single role name, a space-separated String of role names, or an Array of role names # # Returns the specified String role name or Array of role names def role= names @attributes['role'] = (::Array === names) ? (names.join ' ') : names end # Public: Adds the given role directly to this node. # # Returns a [Boolean] indicating whether the role was added. def add_role name if (val = @attributes['role']) # NOTE center + include? is faster than split + include? if %( #{val} ).include? %( #{name} ) false else @attributes['role'] = %(#{val} #{name}) true end else @attributes['role'] = name true end end # Public: Removes the given role directly from this node. # # Returns a [Boolean] indicating whether the role was removed. def remove_role name if (val = @attributes['role']) && ((val = val.split).delete name) if val.empty? @attributes.delete 'role' else @attributes['role'] = val.join ' ' end true else false end end # Public: A convenience method that returns the value of the reftext attribute with substitutions applied. def reftext (val = @attributes['reftext']) ? (apply_reftext_subs val) : nil end # Public: A convenience method that checks if the reftext attribute is defined. def reftext? @attributes.key? 'reftext' end # Public: Construct a reference or data URI to an icon image for the # specified icon name. # # If the 'icon' attribute is set on this block, the name is ignored and the # value of this attribute is used as the target image path. Otherwise, # construct a target image path by concatenating the value of the 'iconsdir' # attribute, the icon name, and the value of the 'icontype' attribute # (defaulting to 'png'). # # The target image path is then passed through the #image_uri() method. If # the 'data-uri' attribute is set on the document, the image will be # safely converted to a data URI. # # The return value of this method can be safely used in an image tag. # # name - The String name of the icon # # Returns A String reference or data URI for an icon image def icon_uri name if attr? 'icon' icon = attr 'icon' # QUESTION should we be adding the extension if the icon is an absolute URI? icon = %(#{icon}.#{@document.attr 'icontype', 'png'}) unless Helpers.extname? icon else icon = %(#{name}.#{@document.attr 'icontype', 'png'}) end image_uri icon, 'iconsdir' end # Public: Construct a URI reference or data URI to the target image. # # If the target image is a URI reference, then leave it untouched. # # The target image is resolved relative to the directory retrieved from the # specified attribute key, if provided. # # If the 'data-uri' attribute is set on the document, and the safe mode level # is less than SafeMode::SECURE, the image will be safely converted to a data URI # by reading it from the same directory. If neither of these conditions # are satisfied, a relative path (i.e., URL) will be returned. # # The return value of this method can be safely used in an image tag. # # target_image - A String path to the target image # asset_dir_key - The String attribute key used to lookup the directory where # the image is located (default: 'imagesdir') # # Returns A String reference or data URI for the target image def image_uri(target_image, asset_dir_key = 'imagesdir') if (doc = @document).safe < SafeMode::SECURE && (doc.attr? 'data-uri') if ((Helpers.uriish? target_image) && (target_image = Helpers.encode_spaces_in_uri target_image)) || (asset_dir_key && (images_base = doc.attr asset_dir_key) && (Helpers.uriish? images_base) && (target_image = normalize_web_path target_image, images_base, false)) (doc.attr? 'allow-uri-read') ? (generate_data_uri_from_uri target_image, (doc.attr? 'cache-uri')) : target_image else generate_data_uri target_image, asset_dir_key end else normalize_web_path target_image, (asset_dir_key ? (doc.attr asset_dir_key) : nil) end end # Public: Construct a URI reference to the target media. # # If the target media is a URI reference, then leave it untouched. # # The target media is resolved relative to the directory retrieved from the # specified attribute key, if provided. # # The return value can be safely used in a media tag (img, audio, video). # # target - A String reference to the target media # asset_dir_key - The String attribute key used to lookup the directory where # the media is located (default: 'imagesdir') # # Returns A String reference for the target media def media_uri(target, asset_dir_key = 'imagesdir') normalize_web_path target, (asset_dir_key ? @document.attr(asset_dir_key) : nil) end # Public: Generate a data URI that can be used to embed an image in the output document # # First, and foremost, the target image path is cleaned if the document safe mode level # is set to at least SafeMode::SAFE (a condition which is true by default) to prevent access # to ancestor paths in the filesystem. The image data is then read and converted to # Base64. Finally, a data URI is built which can be used in an image tag. # # target_image - A String path to the target image # asset_dir_key - The String attribute key used to lookup the directory where # the image is located (default: nil) # # Returns A String data URI containing the content of the target image def generate_data_uri(target_image, asset_dir_key = nil) if (ext = Helpers.extname target_image, nil) mimetype = ext == '.svg' ? 'image/svg+xml' : %(image/#{ext.slice 1, ext.length}) else mimetype = 'application/octet-stream' end if asset_dir_key image_path = normalize_system_path(target_image, @document.attr(asset_dir_key), nil, target_name: 'image') else image_path = normalize_system_path(target_image) end if ::File.readable? image_path # NOTE base64 is autoloaded by reference to ::Base64 %(data:#{mimetype};base64,#{::Base64.strict_encode64 ::File.binread image_path}) else logger.warn %(image to embed not found or not readable: #{image_path}) %(data:#{mimetype};base64,) # uncomment to return 1 pixel white dot instead #'data:image/gif;base64,R0lGODlhAQABAAAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw==' end end # Public: Read the image data from the specified URI and generate a data URI # # The image data is read from the URI and converted to Base64. A data URI is # constructed from the content_type header and Base64 data and returned, # which can then be used in an image tag. # # image_uri - The URI from which to read the image data. Can be http://, https:// or ftp:// # cache_uri - A Boolean to control caching. When true, the open-uri-cached library # is used to cache the image for subsequent reads. (default: false) # # Returns A data URI string built from Base64 encoded data read from the URI # and the mime type specified in the Content Type header. def generate_data_uri_from_uri image_uri, cache_uri = false if cache_uri # caching requires the open-uri-cached gem to be installed # processing will be automatically aborted if these libraries can't be opened Helpers.require_library 'open-uri/cached', 'open-uri-cached' elsif !RUBY_ENGINE_OPAL # autoload open-uri ::OpenURI end begin mimetype, bindata = ::OpenURI.open_uri(image_uri, URI_READ_MODE) {|f| [f.content_type, f.read] } # NOTE base64 is autoloaded by reference to ::Base64 %(data:#{mimetype};base64,#{::Base64.strict_encode64 bindata}) rescue logger.warn %(could not retrieve image data from URI: #{image_uri}) image_uri # uncomment to return empty data (however, mimetype needs to be resolved) #%(data:#{mimetype}:base64,) # uncomment to return 1 pixel white dot instead #'data:image/gif;base64,R0lGODlhAQABAAAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw==' end end # Public: Normalize the asset file or directory to a concrete and rinsed path # # Delegates to normalize_system_path, with the start path set to the value of # the base_dir instance variable on the Document object. def normalize_asset_path(asset_ref, asset_name = 'path', autocorrect = true) normalize_system_path(asset_ref, @document.base_dir, nil, target_name: asset_name, recover: autocorrect) end # Public: Resolve and normalize a secure path from the target and start paths # using the PathResolver. # # See {PathResolver#system_path} for details. # # The most important functionality in this method is to prevent resolving a # path outside of the jail (which defaults to the directory of the source # file, stored in the base_dir instance variable on Document) if the document # safe level is set to SafeMode::SAFE or greater (a condition which is true # by default). # # target - the String target path # start - the String start (i.e., parent) path # jail - the String jail path to confine the resolved path # opts - an optional Hash of options to control processing (default: {}): # * :recover is used to control whether the processor should # automatically recover when an illegal path is encountered # * :target_name is used in messages to refer to the path being resolved # # raises a SecurityError if a jail is specified and the resolved path is # outside the jail. # # Returns the [String] path resolved from the start and target paths, with any # parent references resolved and self references removed. If a jail is provided, # this path will be guaranteed to be contained within the jail. def normalize_system_path target, start = nil, jail = nil, opts = {} if (doc = @document).safe < SafeMode::SAFE if start start = ::File.join doc.base_dir, start unless doc.path_resolver.root? start else start = doc.base_dir end else start ||= doc.base_dir jail ||= doc.base_dir end doc.path_resolver.system_path target, start, jail, opts end # Public: Normalize the web path using the PathResolver. # # See {PathResolver#web_path} for details about path resolution and encoding. # # target - the String target path # start - the String start (i.e, parent) path (optional, default: nil) # preserve_uri_target - a Boolean indicating whether target should be preserved if contains a URI (default: true) # # Returns the resolved [String] path def normalize_web_path(target, start = nil, preserve_uri_target = true) if preserve_uri_target && (Helpers.uriish? target) Helpers.encode_spaces_in_uri target else @document.path_resolver.web_path target, start end end # Public: Read the contents of the file at the specified path. # This method assumes that the path is safe to read. It checks # that the file is readable before attempting to read it. # # path - the String path from which to read the contents # opts - a Hash of options to control processing (default: {}) # * :warn_on_failure a Boolean that controls whether a warning # is issued if the file cannot be read (default: false) # * :normalize a Boolean that controls whether the lines # are normalized and coerced to UTF-8 (default: false) # # Returns the [String] content of the file at the specified path, or nil # if the file does not exist. def read_asset path, opts = {} # remap opts for backwards compatibility opts = { warn_on_failure: (opts != false) } unless ::Hash === opts if ::File.readable? path # QUESTION should we chomp content if normalize is false? opts[:normalize] ? ((Helpers.prepare_source_string ::File.read path, mode: FILE_READ_MODE).join LF) : (::File.read path, mode: FILE_READ_MODE) elsif opts[:warn_on_failure] logger.warn %(#{(attr 'docfile') || '<stdin>'}: #{opts[:label] || 'file'} does not exist or cannot be read: #{path}) nil end end # Public: Resolve the URI or system path to the specified target, then read and return its contents # # The URI or system path of the target is first resolved. If the resolved path is a URI, read the # contents from the URI if the allow-uri-read attribute is set, enabling caching if the cache-uri # attribute is also set. If the resolved path is not a URI, read the contents of the file from the # file system. If the normalize option is set, the data will be normalized. # # target - The URI or local path from which to read the data. # opts - a Hash of options to control processing (default: {}) # * :label the String label of the target to use in warning messages (default: 'asset') # * :normalize a Boolean that indicates whether the data should be normalized (default: false) # * :start the String relative base path to use when resolving the target (default: nil) # * :warn_on_failure a Boolean that indicates whether warnings are issued if the target cannot be read (default: true) # * :warn_if_empty a Boolean that indicates whether a warning is issued if contents of target is empty (default: false) # Returns the contents of the resolved target or nil if the resolved target cannot be read # -- # TODO refactor other methods in this class to use this method were possible (repurposing if necessary) def read_contents target, opts = {} doc = @document if (Helpers.uriish? target) || ((start = opts[:start]) && (Helpers.uriish? start) && (target = doc.path_resolver.web_path target, start)) if doc.attr? 'allow-uri-read' Helpers.require_library 'open-uri/cached', 'open-uri-cached' if doc.attr? 'cache-uri' begin if opts[:normalize] contents = (Helpers.prepare_source_string ::OpenURI.open_uri(target, URI_READ_MODE) {|f| f.read }).join LF else contents = ::OpenURI.open_uri(target, URI_READ_MODE) {|f| f.read } end rescue logger.warn %(could not retrieve contents of #{opts[:label] || 'asset'} at URI: #{target}) if opts.fetch :warn_on_failure, true end elsif opts.fetch :warn_on_failure, true logger.warn %(cannot retrieve contents of #{opts[:label] || 'asset'} at URI: #{target} (allow-uri-read attribute not enabled)) end else target = normalize_system_path target, opts[:start], nil, target_name: (opts[:label] || 'asset') contents = read_asset target, normalize: opts[:normalize], warn_on_failure: (opts.fetch :warn_on_failure, true), label: opts[:label] end logger.warn %(contents of #{opts[:label] || 'asset'} is empty: #{target}) if contents && opts[:warn_if_empty] && contents.empty? contents end # Deprecated: Check whether the specified String is a URI by # matching it against the Asciidoctor::UriSniffRx regex. # # In use by Asciidoctor PDF # # @deprecated Use Helpers.uriish? instead def is_uri? str Helpers.uriish? str end end end ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/lib/asciidoctor/attribute_list.rb������������������������������������������������0000664�0000000�0000000�00000013624�14431350326�0022541�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# frozen_string_literal: true module Asciidoctor # Public: Handles parsing AsciiDoc attribute lists into a Hash of key/value # pairs. By default, attributes must each be separated by a comma and quotes # may be used around the value. If a key is not detected, the value is assigned # to a 1-based positional key, The positional attributes can be "rekeyed" when # given a positional_attrs array either during parsing or after the fact. # # Examples # # attrlist = Asciidoctor::AttributeList.new('astyle') # # attrlist.parse # => { 0 => 'astyle' } # # attrlist.rekey(['style']) # => { 'style' => 'astyle' } # # attrlist = Asciidoctor::AttributeList.new('quote, Famous Person, Famous Book (2001)') # # attrlist.parse(['style', 'attribution', 'citetitle']) # => { 'style' => 'quote', 'attribution' => 'Famous Person', 'citetitle' => 'Famous Book (2001)' } # class AttributeList APOS = '\'' BACKSLASH = '\\' QUOT = '"' # Public: Regular expressions for detecting the boundary of a value BoundaryRx = { QUOT => /.*?[^\\](?=")/, APOS => /.*?[^\\](?=')/, ',' => /.*?(?=[ \t]*(,|$))/ } # Public: Regular expressions for unescaping quoted characters EscapedQuotes = { QUOT => '\\"', APOS => '\\\'' } # Public: A regular expression for an attribute name (approx. name token from XML) # TODO named attributes cannot contain dash characters NameRx = /#{CG_WORD}[#{CC_WORD}\-.]*/ BlankRx = /[ \t]+/ # Public: Regular expressions for skipping delimiters SkipRx = { ',' => /[ \t]*(,|$)/ } def initialize source, block = nil, delimiter = ',' @scanner = ::StringScanner.new source @block = block @delimiter = delimiter @delimiter_skip_pattern = SkipRx[delimiter] @delimiter_boundary_pattern = BoundaryRx[delimiter] @attributes = nil end def parse_into attributes, positional_attrs = [] attributes.update parse positional_attrs end def parse positional_attrs = [] # return if already parsed return @attributes if @attributes @attributes = {} index = 0 while parse_attribute index, positional_attrs break if @scanner.eos? skip_delimiter index += 1 end @attributes end def rekey positional_attrs AttributeList.rekey @attributes, positional_attrs end def self.rekey attributes, positional_attrs positional_attrs.each_with_index do |key, index| if key && (val = attributes[index + 1]) # QUESTION should we delete the positional key? attributes[key] = val end end attributes end private def parse_attribute index, positional_attrs continue = true skip_blank case @scanner.peek 1 # example: "quote" || "foo when QUOT name = parse_attribute_value @scanner.get_byte # example: 'quote' || 'foo when APOS name = parse_attribute_value @scanner.get_byte single_quoted = true unless name.start_with? APOS else skipped = ((name = scan_name) && skip_blank) || 0 if @scanner.eos? return unless name || (@scanner.string.rstrip.end_with? @delimiter) # example: quote (at eos) continue = nil # example: quote, elsif (c = @scanner.get_byte) == @delimiter @scanner.unscan elsif name # example: foo=... if c == '=' skip_blank case (c = @scanner.get_byte) # example: foo="bar" || foo="ba\"zaar" || foo="bar when QUOT value = parse_attribute_value c # example: foo='bar' || foo='ba\'zaar' || foo='ba"zaar' || foo='bar when APOS value = parse_attribute_value c single_quoted = true unless value.start_with? APOS # example: foo=, when @delimiter value = '' @scanner.unscan # example: foo= (at eos) when nil value = '' # example: foo=bar || foo=None else value = %(#{c}#{scan_to_delimiter}) return true if value == 'None' end # example: foo bar else name = %(#{name}#{' ' * skipped}#{c}#{scan_to_delimiter}) end # example: =foo= || !foo else name = %(#{c}#{scan_to_delimiter}) end end if value # example: options="opt1,opt2,opt3" || opts="opts1,opt2,opt3" case name when 'options', 'opts' if value.include? ',' value = value.delete ' ' if value.include? ' ' (value.split ',').each {|opt| @attributes[%(#{opt}-option)] = '' unless opt.empty? } else @attributes[%(#{value}-option)] = '' unless value.empty? end else if single_quoted && @block case name when 'title', 'reftext' @attributes[name] = value else @attributes[name] = @block.apply_subs value end else @attributes[name] = value end end else name = @block.apply_subs name if single_quoted && @block if (positional_attr_name = positional_attrs[index]) && name @attributes[positional_attr_name] = name end # QUESTION should we assign the positional key even when it's claimed by a positional attribute? @attributes[index + 1] = name end continue end def parse_attribute_value quote # empty quoted value if (@scanner.peek 1) == quote @scanner.get_byte '' elsif (value = scan_to_quote quote) @scanner.get_byte (value.include? BACKSLASH) ? (value.gsub EscapedQuotes[quote], quote) : value # leading quote only else %(#{quote}#{scan_to_delimiter}) end end def skip_blank @scanner.skip BlankRx end def skip_delimiter @scanner.skip @delimiter_skip_pattern end def scan_name @scanner.scan NameRx end def scan_to_delimiter @scanner.scan @delimiter_boundary_pattern end def scan_to_quote quote @scanner.scan BoundaryRx[quote] end end end ������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/lib/asciidoctor/block.rb���������������������������������������������������������0000664�0000000�0000000�00000012715�14431350326�0020575�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# frozen_string_literal: true module Asciidoctor # Public: Methods for managing AsciiDoc content blocks. # # Examples # # block = Asciidoctor::Block.new(parent, :paragraph, source: '_This_ is a <test>') # block.content # => "<em>This</em> is a <test>" class Block < AbstractBlock (DEFAULT_CONTENT_MODEL = { # TODO should probably fill in all known blocks audio: :empty, image: :empty, listing: :verbatim, literal: :verbatim, stem: :raw, open: :compound, page_break: :empty, pass: :raw, thematic_break: :empty, video: :empty, }).default = :simple # Public: Create alias for context to be consistent w/ AsciiDoc alias blockname context # Public: Get/Set the original Array content for this block, if applicable attr_accessor :lines # Public: Initialize an Asciidoctor::Block object. # # parent - The parent AbstractBlock with a compound content model to which this Block will be appended. # context - The Symbol context name for the type of content (e.g., :paragraph). # opts - a Hash of options to customize block initialization: (default: {}) # * :content_model indicates whether blocks can be nested in this Block (:compound), otherwise # how the lines should be processed (:simple, :verbatim, :raw, :empty). (default: :simple) # * :attributes a Hash of attributes (key/value pairs) to assign to this Block. (default: {}) # * :source a String or Array of raw source for this Block. (default: nil) # # IMPORTANT: If you don't specify the `:subs` option, you must explicitly call # the `commit_subs` method to resolve and assign the substitutions to this # block (which are resolved from the `subs` attribute, if specified, or the # default substitutions based on this block's context). If you want to use the # default subs for a block, pass the option `subs: :default`. You can # override the default subs using the `:default_subs` option. #-- # QUESTION should we store source_data as lines for blocks that have compound content models? def initialize parent, context, opts = {} super @content_model = opts[:content_model] || DEFAULT_CONTENT_MODEL[context] if opts.key? :subs # FIXME feels funky; we have to be defensive to get commit_subs to honor override # FIXME does not resolve substitution groups inside Array (e.g., [:normal]) if (subs = opts[:subs]) case subs # e.g., subs: :default # subs attribute is honored; falls back to opts[:default_subs], then built-in defaults based on context when :default @default_subs = opts[:default_subs] # e.g., subs: [:quotes] # subs attribute is not honored when ::Array @default_subs = subs.drop 0 @attributes.delete 'subs' # e.g., subs: :normal or subs: 'normal' # subs attribute is not honored else @default_subs = nil @attributes['subs'] = subs.to_s end # resolve the subs eagerly only if subs option is specified # QUESTION should we skip subsequent calls to commit_subs? commit_subs # e.g., subs: nil else # NOTE @subs is initialized as empty array by super constructor # prevent subs from being resolved @default_subs = [] @attributes.delete 'subs' end # defer subs resolution; subs attribute is honored else # NOTE @subs is initialized as empty array by super constructor # QUESTION should we honor :default_subs option (i.e., @default_subs = opts[:default_subs])? @default_subs = nil end if (raw_source = opts[:source]).nil_or_empty? @lines = [] elsif ::String === raw_source @lines = Helpers.prepare_source_string raw_source else @lines = raw_source.drop 0 end end # Public: Get the converted result of the child blocks by converting the # children appropriate to content model that this block supports. # # Examples # # doc = Asciidoctor::Document.new # block = Asciidoctor::Block.new(doc, :paragraph, # source: '_This_ is what happens when you <meet> a stranger in the <alps>!') # block.content # => "<em>This</em> is what happens when you <meet> a stranger in the <alps>!" def content case @content_model when :compound super when :simple apply_subs((@lines.join LF), @subs) when :verbatim, :raw # QUESTION could we use strip here instead of popping empty lines? # maybe apply_subs can know how to strip whitespace? result = apply_subs @lines, @subs if result.size < 2 result[0] || '' else result.shift while (first = result[0]) && first.rstrip.empty? result.pop while (last = result[-1]) && last.rstrip.empty? result.join LF end else logger.warn %(unknown content model '#{@content_model}' for block: #{self}) unless @content_model == :empty nil end end # Public: Returns the preprocessed source of this block # # Returns the a String containing the lines joined together or empty string # if there are no lines def source @lines.join LF end def to_s content_summary = @content_model == :compound ? %(blocks: #{@blocks.size}) : %(lines: #{@lines.size}) %(#<#{self.class}@#{object_id} {context: #{@context.inspect}, content_model: #{@content_model.inspect}, style: #{@style.inspect}, #{content_summary}}>) end end end ���������������������������������������������������asciidoctor-2.0.20/lib/asciidoctor/callouts.rb������������������������������������������������������0000664�0000000�0000000�00000006101�14431350326�0021321�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# frozen_string_literal: true module Asciidoctor # Public: Maintains a catalog of callouts and their associations. class Callouts def initialize @lists = [] @list_index = 0 next_list end # Public: Register a new callout for the given list item ordinal. # # Generates a unique id for this callout based on the index of the next callout # list in the document and the index of this callout since the end of the last # callout list. # # li_ordinal - the Integer ordinal (1-based) of the list item to which this # callout is to be associated # # Examples # # callouts = Asciidoctor::Callouts.new # callouts.register(1) # # => "CO1-1" # callouts.next_list # callouts.register(2) # # => "CO2-1" # # Returns The unique String id of this callout def register li_ordinal current_list << { ordinal: li_ordinal.to_i, id: (id = generate_next_callout_id) } @co_index += 1 id end # Public: Get the next callout index in the document # # Reads the next callout index in the document and advances the pointer. # This method is used during conversion to retrieve the unique id of the # callout that was generated during parsing. # # Returns The unique String id of the next callout in the document def read_next_id id = nil list = current_list if @co_index <= list.size id = list[@co_index - 1][:id] end @co_index += 1 id end # Public: Get a space-separated list of callout ids for the specified list item # # li_ordinal - the Integer ordinal (1-based) of the list item for which to # retrieve the callouts # # Returns A space-separated String of callout ids associated with the specified list item def callout_ids li_ordinal current_list.map {|it| it[:ordinal] == li_ordinal ? %(#{it[:id]} ) : '' }.join.chop end # Public: The current list for which callouts are being collected # # Returns The Array of callouts at the position of the list index pointer def current_list @lists[@list_index - 1] end # Public: Advance to the next callout list in the document # # Returns nothing def next_list @list_index += 1 if @lists.size < @list_index @lists << [] end @co_index = 1 nil end # Public: Rewind the list index pointer, intended to be used when switching # from the parsing to conversion phase. # # Returns nothing def rewind @list_index = 1 @co_index = 1 nil end private # Internal: Generate a unique id for the callout based on the internal indexes # # Returns A unique String id for this callout def generate_next_callout_id generate_callout_id @list_index, @co_index end # Internal: Generate a unique id for the callout at the specified position # # list_index - The 1-based Integer index of the callout list within the document # co_index - The 1-based Integer index of the callout since the end of the last callout list # # Returns A unique String id for a callout def generate_callout_id list_index, co_index %(CO#{list_index}-#{co_index}) end end end ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/lib/asciidoctor/cli.rb�����������������������������������������������������������0000664�0000000�0000000�00000000157�14431350326�0020247�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# frozen_string_literal: true require 'optparse' require_relative 'cli/options' require_relative 'cli/invoker' �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/lib/asciidoctor/cli/�������������������������������������������������������������0000775�0000000�0000000�00000000000�14431350326�0017717�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/lib/asciidoctor/cli/invoker.rb���������������������������������������������������0000664�0000000�0000000�00000012753�14431350326�0021731�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# frozen_string_literal: true module Asciidoctor module Cli # Public Invocation class for starting Asciidoctor via CLI class Invoker include Logging attr_reader :options attr_reader :documents attr_reader :code def initialize *options @documents = [] @out = nil @err = nil @code = 0 options = options.flatten case (first_option = options[0]) when Options @options = first_option when ::Hash @options = Options.new first_option else if ::Integer === (result = Options.parse! options) @code = result @options = nil else @options = result end end end def invoke! return unless @options old_logger = old_logger_level = nil old_verbose, $VERBOSE = $VERBOSE, @options[:warnings] opts = {} infiles = [] outfile = nil abs_srcdir_posix = nil non_posix_env = ::File::ALT_SEPARATOR == RS err = @err || $stderr show_timings = false # NOTE in Ruby 2.7, RubyGems sets SOURCE_DATE_EPOCH if it's not set ::ENV.delete 'SOURCE_DATE_EPOCH' if (::ENV.key? 'IGNORE_SOURCE_DATE_EPOCH') && (::Gem.respond_to? :source_date_epoch) @options.map do |key, val| case key when :input_files infiles = val when :output_file outfile = val when :source_dir if val abs_srcdir_posix = ::File.expand_path val abs_srcdir_posix = abs_srcdir_posix.tr RS, FS if non_posix_env && (abs_srcdir_posix.include? RS) end when :destination_dir opts[:to_dir] = val if val when :attributes # NOTE processor will dup attributes internally opts[:attributes] = val when :timings show_timings = val when :trace # no assignment when :verbose case val when 0 $VERBOSE = nil old_logger, LoggerManager.logger = logger, NullLogger.new when 2 old_logger_level, logger.level = logger.level, ::Logger::Severity::DEBUG end else opts[key] = val unless val.nil? end end if infiles.size == 1 if (infile0 = infiles[0]) == '-' outfile ||= infile0 stdin = true elsif ::File.pipe? infile0 outfile ||= '-' end end if outfile == '-' (tofile = @out) || ((tofile = $stdout).set_encoding UTF_8) elsif outfile opts[:mkdirs] = true tofile = outfile else opts[:mkdirs] = true # automatically calculate outfile based on infile end if stdin # allows use of block to supply stdin, particularly useful for tests # NOTE set_encoding returns nil on JRuby 9.1 block_given? ? (input = yield) : ((input = $stdin).set_encoding UTF_8, UTF_8) input_opts = opts.merge to_file: tofile if show_timings @documents << (::Asciidoctor.convert input, (input_opts.merge timings: (timings = Timings.new))) timings.print_report err, '-' else @documents << (::Asciidoctor.convert input, input_opts) end else infiles.each do |infile| input_opts = opts.merge to_file: tofile if abs_srcdir_posix && (input_opts.key? :to_dir) abs_indir = ::File.dirname ::File.expand_path infile if non_posix_env abs_indir_posix = (abs_indir.include? RS) ? (abs_indir.tr RS, FS) : abs_indir else abs_indir_posix = abs_indir end if abs_indir_posix.start_with? %(#{abs_srcdir_posix}/) input_opts[:to_dir] += abs_indir.slice abs_srcdir_posix.length, abs_indir.length end end if show_timings @documents << (::Asciidoctor.convert_file infile, (input_opts.merge timings: (timings = Timings.new))) timings.print_report err, infile else @documents << (::Asciidoctor.convert_file infile, input_opts) end end end @code = 1 if (logger.respond_to? :max_severity) && logger.max_severity && logger.max_severity >= opts[:failure_level] rescue ::Exception => e if ::SignalException === e @code = e.signo # add extra newline if Ctrl+C is used err.puts if ::Interrupt === e else @code = (e.respond_to? :status) ? e.status : 1 if @options[:trace] raise e else err.puts ::RuntimeError === e ? %(#{e.message} (#{e.class})) : e.message err.puts ' Use --trace to show backtrace' end end nil ensure $VERBOSE = old_verbose if old_logger LoggerManager.logger = old_logger elsif old_logger_level logger.level = old_logger_level end end def document @documents[0] end def redirect_streams out, err = nil @out = out @err = err end def read_output @out ? @out.string : '' end def read_error @err ? @err.string : '' end def reset_streams @out = nil @err = nil end end end end ���������������������asciidoctor-2.0.20/lib/asciidoctor/cli/options.rb���������������������������������������������������0000664�0000000�0000000�00000034536�14431350326�0021752�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# frozen_string_literal: true module Asciidoctor module Cli FS = ?/ RS = ?\\ # Public: List of options that can be specified on the command line class Options < ::Hash def initialize(options = {}) self[:attributes] = options[:attributes] || {} self[:input_files] = options[:input_files] self[:output_file] = options[:output_file] self[:safe] = options[:safe] || SafeMode::UNSAFE self[:standalone] = options.fetch :standalone, true self[:template_dirs] = options[:template_dirs] self[:template_engine] = options[:template_engine] self[:attributes]['doctype'] = options[:doctype] if options[:doctype] self[:attributes]['backend'] = options[:backend] if options[:backend] self[:eruby] = options[:eruby] self[:verbose] = options.fetch :verbose, 1 self[:warnings] = options.fetch :warnings, false self[:load_paths] = options[:load_paths] self[:requires] = options[:requires] self[:base_dir] = options[:base_dir] self[:source_dir] = options[:source_dir] self[:destination_dir] = options[:destination_dir] self[:failure_level] = ::Logger::Severity::FATAL self[:trace] = false self[:timings] = false end def self.parse!(args) Options.new.parse! args end def parse!(args) opts_parser = ::OptionParser.new do |opts| # NOTE don't use squiggly heredoc to maintain compatibility with Ruby < 2.3 opts.banner = <<-'EOS'.gsub ' ', '' Usage: asciidoctor [OPTION]... FILE... Convert the AsciiDoc input FILE(s) to the backend output format (e.g., HTML 5, DocBook 5, etc.) Unless specified otherwise, the output is written to a file whose name is derived from the input file. Application log messages are printed to STDERR. Example: asciidoctor input.adoc EOS opts.on('-b', '--backend BACKEND', 'set backend output format: [html5, xhtml5, docbook5, manpage] (default: html5)', 'additional backends are supported via extended converters (e.g., pdf, epub3)') do |backend| self[:attributes]['backend'] = backend end opts.on('-d', '--doctype DOCTYPE', ['article', 'book', 'manpage', 'inline'], 'document type to use when converting document: [article, book, manpage, inline] (default: article)') do |doctype| self[:attributes]['doctype'] = doctype end opts.on('-e', '--embedded', 'suppress enclosing document structure and output an embedded document (default: false)') do self[:standalone] = false end opts.on('-o', '--out-file FILE', 'output file (default: based on path of input file); use - to output to STDOUT') do |output_file| self[:output_file] = output_file end opts.on('--safe', 'set safe mode level to safe (default: unsafe)', 'enables include directives, but prevents access to ancestor paths of source file', 'provided for compatibility with the asciidoc command') do self[:safe] = SafeMode::SAFE end opts.on('-S', '--safe-mode SAFE_MODE', (safe_mode_names = SafeMode.names), %(set safe mode level explicitly: [#{safe_mode_names.join ', '}] (default: unsafe)), 'disables potentially dangerous macros in source files, such as include::[]') do |name| self[:safe] = SafeMode.value_for_name name end opts.on('-s', '--no-header-footer', 'suppress enclosing document structure and output an embedded document (default: false)') do self[:standalone] = false end opts.on('-n', '--section-numbers', 'auto-number section titles in the HTML backend; disabled by default') do self[:attributes]['sectnums'] = '' end opts.on('--eruby ERUBY', ['erb', 'erubi', 'erubis'], 'specify eRuby implementation to use when rendering custom ERB templates: [erb, erubi, erubis] (default: erb)') do |eruby| self[:eruby] = eruby end opts.on('-a', '--attribute name[=value]', 'a document attribute to set in the form of name, name!, or name=value pair', 'this attribute takes precedence over the same attribute defined in the source document', 'unless either the name or value ends in @ (i.e., name@=value or name=value@)') do |attr| next if (attr = attr.rstrip).empty? || attr == '=' begin attr = attr.encode UTF_8 rescue ::EncodingError attr = attr.force_encoding UTF_8 end unless attr.encoding == UTF_8 name, _, val = attr.partition '=' self[:attributes][name] = val end opts.on('-T', '--template-dir DIR', 'a directory containing custom converter templates that override the built-in converter (requires tilt gem)', 'may be specified multiple times') do |template_dir| if self[:template_dirs].nil? self[:template_dirs] = [template_dir] elsif ::Array === self[:template_dirs] self[:template_dirs] << template_dir else self[:template_dirs] = [self[:template_dirs], template_dir] end end opts.on('-E', '--template-engine NAME', 'template engine to use for the custom converter templates (loads gem on demand)') do |template_engine| self[:template_engine] = template_engine end opts.on('-B', '--base-dir DIR', 'base directory containing the document and resources (default: directory of source file)') do |base_dir| self[:base_dir] = base_dir end opts.on('-R', '--source-dir DIR', 'source root directory (used for calculating path in destination directory)') do |src_dir| self[:source_dir] = src_dir end opts.on('-D', '--destination-dir DIR', 'destination output directory (default: directory of source file)') do |dest_dir| self[:destination_dir] = dest_dir end opts.on('-IDIRECTORY', '--load-path DIRECTORY', 'add a directory to the $LOAD_PATH', 'may be specified more than once') do |path| (self[:load_paths] ||= []).concat(path.split ::File::PATH_SEPARATOR) end opts.on('-rLIBRARY', '--require LIBRARY', 'require the specified library before executing the processor (using require)', 'may be specified more than once') do |path| (self[:requires] ||= []).concat(path.split ',') end opts.on('--failure-level LEVEL', %w(info INFO warning WARNING error ERROR fatal FATAL), 'set minimum log level that yields a non-zero exit code: [INFO, WARN, ERROR, FATAL] (default: FATAL)') do |level| level = 'WARN' if (level = level.upcase) == 'WARNING' self[:failure_level] = ::Logger::Severity.const_get level end opts.on('-q', '--quiet', 'silence application log messages and script warnings (default: false)') do self[:verbose] = 0 end opts.on('--trace', 'include backtrace information when reporting errors (default: false)') do self[:trace] = true end opts.on('-v', '--verbose', 'directs application messages logged at DEBUG or INFO level to STDERR (default: false)') do self[:verbose] = 2 end opts.on('-w', '--warnings', 'turn on script warnings (default: false)') do self[:warnings] = true end opts.on('-t', '--timings', 'print timings report (default: false)') do self[:timings] = true end opts.on_tail('-h', '--help [TOPIC]', 'print a help message', 'show this usage if TOPIC is not specified or recognized', 'show an overview of the AsciiDoc syntax if TOPIC is syntax', 'dump the Asciidoctor man page (in troff/groff format) if TOPIC is manpage') do |topic| case topic # use `asciidoctor -h manpage | man -l -` to view with man pager when 'manpage' if (manpage_path = ::ENV['ASCIIDOCTOR_MANPAGE_PATH']) if ::File.exist? manpage_path if manpage_path.end_with? '.gz' require 'zlib' unless defined? ::Zlib::GzipReader $stdout.puts ::Zlib::GzipReader.open(manpage_path) {|gz| gz.read } else $stdout.puts ::File.read manpage_path end else $stderr.puts %(asciidoctor: FAILED: manual page not found: #{manpage_path}) return 1 end # Ruby 2.3 requires the extra brackets around the ::File.join method call elsif ::File.exist? (manpage_path = (::File.join ROOT_DIR, 'man', 'asciidoctor.1')) $stdout.puts ::File.read manpage_path else manpage_path = %x(man -w asciidoctor).chop rescue '' if manpage_path.empty? $stderr.puts 'asciidoctor: FAILED: manual page not found; try `man asciidoctor`' return 1 elsif manpage_path.end_with? '.gz' require 'zlib' unless defined? ::Zlib::GzipReader $stdout.puts ::Zlib::GzipReader.open(manpage_path) {|gz| gz.read } else $stdout.puts ::File.read manpage_path end end when 'syntax' # Ruby 2.3 requires the extra brackets around the ::File.join method call if ::File.exist? (syntax_path = (::File.join ROOT_DIR, 'data', 'reference', 'syntax.adoc')) $stdout.puts ::File.read syntax_path else $stderr.puts 'asciidoctor: FAILED: syntax page not found; visit https://asciidoctor.org/docs' return 1 end else $stdout.puts opts end return 0 end opts.on_tail('-V', '--version', 'display the version and runtime environment (or -v if no other flags or arguments)') do return print_version $stdout end end old_verbose, $VERBOSE = $VERBOSE, (args.include? '-w') opts_parser.parse! args if args.empty? if self[:verbose] == 2 # -v flag was specified return print_version $stdout else $stderr.puts opts_parser return 1 end end infiles = [] # shave off the file to process so that options errors appear correctly if args.size == 1 && args[0] == '-' infiles << args.pop else args.each do |file| if file.start_with? '-' # warn, but don't panic; we may have enough to proceed, so we won't force a failure $stderr.puts %(asciidoctor: WARNING: extra arguments detected (unparsed arguments: '#{args.join "', '"}') or incorrect usage of stdin) elsif ::File.file? file infiles << file # NOTE only attempt to glob if file is not found else # Tilt backslashes in Windows paths the Ruby-friendly way if ::File::ALT_SEPARATOR == RS && (file.include? RS) file = file.tr RS, FS end if (matches = ::Dir.glob file).empty? # NOTE if no matches, assume it's just a missing file and proceed infiles << file else infiles.concat matches end end end end infiles.reject {|file| file == '-' }.each do |file| begin fstat = ::File.stat file if fstat.file? || fstat.pipe? unless fstat.readable? $stderr.puts %(asciidoctor: FAILED: input file #{file} is not readable) return 1 end else $stderr.puts %(asciidoctor: FAILED: input path #{file} is a #{fstat.ftype}, not a file) return 1 end rescue ::Errno::ENOENT $stderr.puts %(asciidoctor: FAILED: input file #{file} is missing) return 1 end end self[:input_files] = infiles delete :attributes if self[:attributes].empty? if self[:template_dirs] begin require 'tilt' unless defined? ::Tilt.new rescue ::LoadError raise $! if self[:trace] $stderr.puts 'asciidoctor: FAILED: \'tilt\' could not be loaded' $stderr.puts ' You must have the tilt gem installed (gem install tilt) to use custom backend templates' $stderr.puts ' Use --trace to show backtrace' return 1 rescue ::SystemExit # not permitted here end end if (load_paths = self[:load_paths]) load_paths.uniq! load_paths.reverse_each {|path| $:.unshift ::File.expand_path path } end if (requires = self[:requires]) requires.uniq! requires.each do |path| begin require path rescue ::LoadError raise $! if self[:trace] $stderr.puts %(asciidoctor: FAILED: '#{path}' could not be loaded) $stderr.puts ' Use --trace to show backtrace' return 1 rescue ::SystemExit # not permitted here end end end self rescue ::OptionParser::MissingArgument $stderr.puts %(asciidoctor: option #{$!.message}) $stdout.puts opts_parser 1 rescue ::OptionParser::InvalidOption, ::OptionParser::InvalidArgument $stderr.puts %(asciidoctor: #{$!.message}) $stdout.puts opts_parser 1 ensure $VERBOSE = old_verbose end def print_version os = $stdout os.puts %(Asciidoctor #{::Asciidoctor::VERSION} [https://asciidoctor.org]) encoding_info = { 'lc' => 'locale', 'fs' => 'filesystem', 'in' => 'internal', 'ex' => 'external' }.map do |k, v| %(#{k}:#{v == 'internal' ? (::File.open(__FILE__) {|f| f.getc.encoding }) : (::Encoding.find v)}) end os.puts %(Runtime Environment (#{::RUBY_DESCRIPTION}) (#{encoding_info.join ' '})) 0 end end end end ������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/lib/asciidoctor/convert.rb�������������������������������������������������������0000664�0000000�0000000�00000022332�14431350326�0021157�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# frozen_string_literal: true module Asciidoctor class << self # Public: Parse the AsciiDoc source input into an Asciidoctor::Document and # convert it to the specified backend format. # # Accepts input as an IO (or StringIO), String or String Array object. If the # input is a File, the object is expected to be opened for reading and is not # closed afterwards by this method. Information about the file (filename, # directory name, etc) gets assigned to attributes on the Document object. # # If the :to_file option is true, and the input is a File, the output is # written to a file adjacent to the input file, having an extension that # corresponds to the backend format. Otherwise, if the :to_file option is # specified, the file is written to that file. If :to_file is not an absolute # path, it is resolved relative to :to_dir, if given, otherwise the # Document#base_dir. If the target directory does not exist, it will not be # created unless the :mkdirs option is set to true. If the file cannot be # written because the target directory does not exist, or because it falls # outside of the Document#base_dir in safe mode, an IOError is raised. # # If the output is going to be written to a file, the header and footer are # included unless specified otherwise (writing to a file implies creating a # standalone document). Otherwise, the header and footer are not included by # default and the converted result is returned. # # input - the String AsciiDoc source filename # options - a String, Array or Hash of options to control processing (default: {}) # String and Array values are converted into a Hash. # See Asciidoctor::Document#initialize for details about options. # # Returns the Document object if the converted String is written to a # file, otherwise the converted String def convert input, options = {} (options = options.merge).delete :parse to_dir = options.delete :to_dir mkdirs = options.delete :mkdirs case (to_file = options.delete :to_file) when true, nil unless (write_to_target = to_dir) sibling_path = ::File.absolute_path input.path if ::File === input end to_file = nil when false to_file = nil when '/dev/null' return load input, options else options[:to_file] = write_to_target = to_file unless (stream_output = to_file.respond_to? :write) end unless options.key? :standalone if sibling_path || write_to_target options[:standalone] = options.fetch :header_footer, true elsif options.key? :header_footer options[:standalone] = options[:header_footer] end end # NOTE outfile may be controlled by document attributes, so resolve outfile after loading if sibling_path options[:to_dir] = outdir = ::File.dirname sibling_path elsif write_to_target if to_dir if to_file options[:to_dir] = ::File.dirname ::File.expand_path to_file, to_dir else options[:to_dir] = ::File.expand_path to_dir end elsif to_file options[:to_dir] = ::File.dirname ::File.expand_path to_file end end # NOTE :to_dir is always set when outputting to a file # NOTE :to_file option only passed if assigned an explicit path doc = load input, options if sibling_path # write to file in same directory outfile = ::File.join outdir, %(#{doc.attributes['docname']}#{doc.outfilesuffix}) raise ::IOError, %(input file and output file cannot be the same: #{outfile}) if outfile == sibling_path elsif write_to_target # write to explicit file or directory working_dir = (options.key? :base_dir) ? (::File.expand_path options[:base_dir]) : ::Dir.pwd # QUESTION should the jail be the working_dir or doc.base_dir??? jail = doc.safe >= SafeMode::SAFE ? working_dir : nil if to_dir outdir = doc.normalize_system_path(to_dir, working_dir, jail, target_name: 'to_dir', recover: false) if to_file outfile = doc.normalize_system_path(to_file, outdir, nil, target_name: 'to_dir', recover: false) # reestablish outdir as the final target directory (in the case to_file had directory segments) outdir = ::File.dirname outfile else outfile = ::File.join outdir, %(#{doc.attributes['docname']}#{doc.outfilesuffix}) end elsif to_file outfile = doc.normalize_system_path(to_file, working_dir, jail, target_name: 'to_dir', recover: false) # establish outdir as the final target directory (in the case to_file had directory segments) outdir = ::File.dirname outfile end if ::File === input && outfile == (::File.absolute_path input.path) raise ::IOError, %(input file and output file cannot be the same: #{outfile}) end if mkdirs Helpers.mkdir_p outdir else # NOTE we intentionally refer to the directory as it was passed to the API raise ::IOError, %(target directory does not exist: #{to_dir} (hint: set :mkdirs option)) unless ::File.directory? outdir end else # write to stream outfile = to_file outdir = nil end if outfile && !stream_output output = doc.convert 'outfile' => outfile, 'outdir' => outdir else output = doc.convert end if outfile doc.write output, outfile # NOTE document cannot control this behavior if safe >= SafeMode::SERVER # NOTE skip if stylesdir is a URI if !stream_output && doc.safe < SafeMode::SECURE && (doc.attr? 'linkcss') && (doc.attr? 'copycss') && (doc.basebackend? 'html') && !((stylesdir = (doc.attr 'stylesdir')) && (Helpers.uriish? stylesdir)) if (stylesheet = doc.attr 'stylesheet') if DEFAULT_STYLESHEET_KEYS.include? stylesheet copy_asciidoctor_stylesheet = true elsif !(Helpers.uriish? stylesheet) copy_user_stylesheet = true end end copy_syntax_hl_stylesheet = (syntax_hl = doc.syntax_highlighter) && (syntax_hl.write_stylesheet? doc) if copy_asciidoctor_stylesheet || copy_user_stylesheet || copy_syntax_hl_stylesheet stylesoutdir = doc.normalize_system_path(stylesdir, outdir, doc.safe >= SafeMode::SAFE ? outdir : nil) if mkdirs Helpers.mkdir_p stylesoutdir else raise ::IOError, %(target stylesheet directory does not exist: #{stylesoutdir} (hint: set :mkdirs option)) unless ::File.directory? stylesoutdir end if copy_asciidoctor_stylesheet Stylesheets.instance.write_primary_stylesheet stylesoutdir # FIXME should Stylesheets also handle the user stylesheet? elsif copy_user_stylesheet if (stylesheet_src = doc.attr 'copycss') == '' || stylesheet_src == true stylesheet_src = doc.normalize_system_path stylesheet else # NOTE in this case, copycss is a source location (but cannot be a URI) stylesheet_src = doc.normalize_system_path stylesheet_src.to_s end stylesheet_dest = doc.normalize_system_path stylesheet, stylesoutdir, (doc.safe >= SafeMode::SAFE ? outdir : nil) # NOTE don't warn if src can't be read and dest already exists (see #2323) if stylesheet_src != stylesheet_dest && (stylesheet_data = doc.read_asset stylesheet_src, warn_on_failure: !(::File.file? stylesheet_dest), label: 'stylesheet') if (stylesheet_outdir = ::File.dirname stylesheet_dest) != stylesoutdir && !(::File.directory? stylesheet_outdir) if mkdirs Helpers.mkdir_p stylesheet_outdir else raise ::IOError, %(target stylesheet directory does not exist: #{stylesheet_outdir} (hint: set :mkdirs option)) end end ::File.write stylesheet_dest, stylesheet_data, mode: FILE_WRITE_MODE end end syntax_hl.write_stylesheet doc, stylesoutdir if copy_syntax_hl_stylesheet end end doc else output end end # Public: Parse the contents of the AsciiDoc source file into an # Asciidoctor::Document and convert it to the specified backend format. # # input - the String AsciiDoc source filename # options - a String, Array or Hash of options to control processing (default: {}) # String and Array values are converted into a Hash. # See Asciidoctor::Document#initialize for details about options. # # Returns the Document object if the converted String is written to a # file, otherwise the converted String def convert_file filename, options = {} ::File.open(filename, FILE_READ_MODE) {|file| convert file, options } end # Deprecated: Use {Asciidoctor.convert} instead. alias render convert # Deprecated: Use {Asciidoctor.convert_file} instead. alias render_file convert_file end end ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/lib/asciidoctor/converter.rb�����������������������������������������������������0000664�0000000�0000000�00000037733�14431350326�0021521�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# frozen_string_literal: true module Asciidoctor # A module for defining converters that are used to convert {AbstractNode} objects in a parsed AsciiDoc document to an # output (aka backend) format such as HTML or DocBook. # # A {Converter} is typically instantiated each time an AsciiDoc document is processed (i.e., parsed and converted). # Implementing a custom converter entails: # # * Including the {Converter} module in a converter class and implementing the {Converter#convert} method or extending # the {Converter::Base Base} class and implementing the dispatch methods that map to each node. # * Optionally registering the converter with one or more backend names statically using the +register_for+ DSL method # contributed by the {Converter::Config Config} module. # # Examples # # class TextConverter # include Asciidoctor::Converter # register_for 'text' # def initialize *args # super # outfilesuffix '.txt' # end # def convert node, transform = node.node_name, opts = nil # case transform # when 'document', 'section' # [node.title, node.content].join %(\n\n) # when 'paragraph' # (node.content.tr ?\n, ' ') << ?\n # else # (transform.start_with? 'inline_') ? node.text : node.content # end # end # end # puts Asciidoctor.convert_file 'sample.adoc', backend: :text, safe: :safe # # class Html5Converter < (Asciidoctor::Converter.for 'html5') # register_for 'html5' # def convert_paragraph node # %(<p>#{node.content}</p>) # end # end # puts Asciidoctor.convert_file 'sample.adoc', safe: :safe module Converter autoload :CompositeConverter, %(#{__dir__}/converter/composite) autoload :TemplateConverter, %(#{__dir__}/converter/template) unless RUBY_ENGINE == 'opal' # Public: The String backend name that this converter is handling. attr_reader :backend # Public: Creates a new instance of this {Converter}. # # backend - The String backend name (aka format) to which this converter converts. # opts - An options Hash (optional, default: {}) # # Returns a new [Converter] instance. def initialize backend, opts = {} @backend = backend end # Public: Converts an {AbstractNode} using the given transform. # # This method must be implemented by a concrete converter class. # # node - The concrete instance of AbstractNode to convert. # transform - An optional String transform that hints at which transformation should be applied to this node. If a # transform is not given, the transform is often derived from the value of the {AbstractNode#node_name} # property. (optional, default: nil) # opts - An optional Hash of options hints about how to convert the node. (optional, default: nil) # # Returns the [String] result. def convert node, transform = nil, opts = nil raise ::NotImplementedError, %(#{self.class} (backend: #{@backend}) must implement the ##{__method__} method) end # Public: Reports whether the current converter is able to convert this node (by its transform name). Used by the # {CompositeConverter} to select which converter to use to handle a given node. Returns true by default. # # transform - the String name of the node transformation (typically the node name). # # Returns a [Boolean] indicating whether this converter can handle the specified transform. def handles? transform true end # Public: Derive backend traits (basebackend, filetype, outfilesuffix, htmlsyntax) from the given backend. # # backend - the String backend from which to derive the traits # basebackend - the String basebackend to use in favor of deriving one from the backend (optional, default: nil) # # Returns the backend traits for the given backend as a [Hash]. def self.derive_backend_traits backend, basebackend = nil return {} unless backend if (outfilesuffix = DEFAULT_EXTENSIONS[(basebackend ||= backend.sub TrailingDigitsRx, '')]) filetype = outfilesuffix.slice 1, outfilesuffix.length else outfilesuffix = %(.#{filetype = basebackend}) end filetype == 'html' ? { basebackend: basebackend, filetype: filetype, htmlsyntax: 'html', outfilesuffix: outfilesuffix } : { basebackend: basebackend, filetype: filetype, outfilesuffix: outfilesuffix } end module BackendTraits def basebackend value = nil value ? ((backend_traits value)[:basebackend] = value) : backend_traits[:basebackend] end def filetype value = nil value ? (backend_traits[:filetype] = value) : backend_traits[:filetype] end def htmlsyntax value = nil value ? (backend_traits[:htmlsyntax] = value) : backend_traits[:htmlsyntax] end def outfilesuffix value = nil value ? (backend_traits[:outfilesuffix] = value) : backend_traits[:outfilesuffix] end def supports_templates value = true backend_traits[:supports_templates] = value end def supports_templates? backend_traits[:supports_templates] end def init_backend_traits value = nil @backend_traits = value || {} end def backend_traits basebackend = nil @backend_traits ||= Converter.derive_backend_traits @backend, basebackend end alias backend_info backend_traits # Deprecated: Use {Converter.derive_backend_traits} instead. def self.derive_backend_traits backend, basebackend = nil Converter.derive_backend_traits backend, basebackend end end # A module that contributes the +register_for+ method for registering a converter with the default registry. module Config # Public: Registers this {Converter} class with the default registry to handle the specified backend name(s). # # backends - One or more String backend names with which to associate this {Converter} class. # # Returns nothing. def register_for *backends Converter.register self, *(backends.map {|backend| backend.to_s }) end end # A reusable module for registering and instantiating {Converter Converter} classes used to convert an {AbstractNode} # to an output (aka backend) format such as HTML or DocBook. # # {Converter Converter} objects are instantiated by passing a String backend name and, optionally, an options Hash to # the {Factory#create} method. The backend can be thought of as an intent to convert a document to a specified format. # # Applications interact with the factory either through the global, static registry mixed into the {Converter # Converter} module or a concrete class that includes this module such as {CustomFactory}. For example: # # Examples # # converter = Asciidoctor::Converter.create 'html5', htmlsyntax: 'xml' module Factory # Public: Create an instance of DefaultProxyFactory or CustomFactory, depending on whether the proxy_default keyword # arg is set (true by default), and optionally seed it with the specified converters map. If proxy_default is set, # entries in the proxy registry are preferred over matching entries from the default registry. # # converters - An optional Hash of converters to use in place of ones in the default registry. The keys are # backend names and the values are converter classes or instances. # proxy_default - A Boolean keyword arg indicating whether to proxy the default registry (optional, default: true). # # Returns a Factory instance (DefaultFactoryProxy or CustomFactory) seeded with the optional converters map. def self.new converters = nil, proxy_default: true proxy_default ? (DefaultFactoryProxy.new converters) : (CustomFactory.new converters) end # Deprecated: Maps the old default factory instance holder to the Converter module. def self.default *args Converter end # Deprecated: Maps the create method on the old default factory instance holder to the Converter module. def self.create backend, opts = {} default.create backend, opts end # Public: Register a custom converter with this factory to handle conversion for the specified backends. If the # backend is an asterisk (i.e., +*+), the converter will handle any backend for which a converter is not registered. # # converter - The Converter class to register. # backends - One or more String backend names that this converter should be registered to handle. # # Returns nothing def register converter, *backends backends.each {|backend| backend == '*' ? (registry.default = converter) : (registry[backend] = converter) } end # Public: Lookup the custom converter registered with this factory to handle the specified backend. # # backend - The String backend name. # # Returns the [Converter] class registered to convert the specified backend or nil if no match is found. def for backend registry[backend] end # Public: Create a new Converter object that can be used to convert {AbstractNode}s to the format associated with # the backend. This method accepts an optional Hash of options that are passed to the converter's constructor. # # If a custom Converter is found to convert the specified backend, it's instantiated (if necessary) and returned # immediately. If a custom Converter is not found, an attempt is made to find a built-in converter. If the # +:template_dirs+ key is found in the Hash passed as the second argument, a {CompositeConverter} is created that # delegates to a {TemplateConverter} and, if found, the built-in converter. If the +:template_dirs+ key is not # found, the built-in converter is returned or nil if no converter is found. # # backend - the String backend name. # opts - a Hash of options to customize creation; also passed to the converter's constructor: # :template_dirs - a String Array of directories used to instantiate a {TemplateConverter} (optional). # :delegate_backend - a backend String of the last converter in the {CompositeConverter} chain (optional). # # Returns the [Converter] instance. def create backend, opts = {} if (converter = self.for backend) converter = converter.new backend, opts if ::Class === converter if (template_dirs = opts[:template_dirs]) && BackendTraits === converter && converter.supports_templates? CompositeConverter.new backend, (TemplateConverter.new backend, template_dirs, opts), converter, backend_traits_source: converter else converter end elsif (template_dirs = opts[:template_dirs]) if (delegate_backend = opts[:delegate_backend]) && (converter = self.for delegate_backend) converter = converter.new delegate_backend, opts if ::Class === converter CompositeConverter.new backend, (TemplateConverter.new backend, template_dirs, opts), converter, backend_traits_source: converter else TemplateConverter.new backend, template_dirs, opts end end end # Public: Get the Hash of Converter classes keyed by backend name. Intended for testing only. def converters registry.merge end private def registry raise ::NotImplementedError, %(#{Factory} subclass #{self.class} must implement the ##{__method__} method) end end class CustomFactory include Factory def initialize seed_registry = nil if seed_registry seed_registry.default = seed_registry.delete '*' @registry = seed_registry else @registry = {} end end # Public: Unregister all Converter classes that are registered with this factory. Intended for testing only. # # Returns nothing. def unregister_all registry.clear.default = nil end private attr_reader :registry end # Mixed into the {Converter} module to provide the global registry of converters that are registered statically. # # This registry includes built-in converters for {Html5Converter HTML 5}, {DocBook5Converter DocBook 5} and # {ManPageConverter man(ual) page}, as well as any custom converters that have been discovered or explicitly # registered. Converter registration is synchronized (where applicable) and is thus guaranteed to be thread safe. module DefaultFactory include Factory private @@registry = {} def registry @@registry end unless RUBY_ENGINE == 'opal' # the following block adds support for synchronization and lazy registration public def register converter, *backends if @@mutex.owned? backends.each {|backend| backend == '*' ? (@@catch_all = converter) : (@@registry = @@registry.merge backend => converter) } else @@mutex.synchronize { register converter, *backends } end end def unregister_all @@mutex.synchronize do @@registry = @@registry.select {|backend| PROVIDED[backend] } @@catch_all = nil end end def for backend @@registry.fetch backend do PROVIDED[backend] ? (@@mutex.synchronize do # require is thread-safe, so no reason to refetch require PROVIDED[backend] @@registry[backend] end) : catch_all end end PROVIDED = { 'docbook5' => %(#{__dir__}/converter/docbook5), 'html5' => %(#{__dir__}/converter/html5), 'manpage' => %(#{__dir__}/converter/manpage), } private def catch_all @@catch_all end @@catch_all = nil @@mutex = ::Mutex.new end end class DefaultFactoryProxy < CustomFactory include DefaultFactory # inserts module into ancestors immediately after superclass unless RUBY_ENGINE == 'opal' def unregister_all super @registry.clear.default = nil end def for backend @registry.fetch(backend) { super } end private def catch_all @registry.default || super end end end # Internal: Mixes the {Config} module into any class that includes the {Converter} module. Additionally, mixes the # {BackendTraits} method into instances of this class. # # into - The Class into which the {Converter} module is being included. # # Returns nothing. def self.included into into.send :include, BackendTraits into.extend Config end private_class_method :included # use separate declaration for Ruby 2.0.x # An abstract base class for defining converters that can be used to convert {AbstractNode} objects in a parsed # AsciiDoc document to a backend format such as HTML or DocBook. class Base include Logging include Converter # Public: Converts an {AbstractNode} by delegating to a method that matches the transform value. # # This method looks for a method whose name matches the transform prefixed with "convert_" to dispatch to. If the # +opts+ argument is non-nil, this method assumes the dispatch method accepts two arguments, the node and an options # Hash. The options Hash may be used by converters to delegate back to the top-level converter. Currently, this # feature is used for the outline transform. If the +opts+ argument is nil, this method assumes the dispatch method # accepts the node as its only argument. # # See {Converter#convert} for details about the arguments and return value. def convert node, transform = node.node_name, opts = nil opts ? (send 'convert_' + transform, node, opts) : (send 'convert_' + transform, node) rescue raise unless ::NoMethodError === (ex = $!) && ex.receiver == self && ex.name.to_s == transform logger.warn %(missing convert handler for #{ex.name} node in #{@backend} backend (#{self.class})) nil end def handles? transform respond_to? %(convert_#{transform}) end # Public: Converts the {AbstractNode} using only its converted content. # # Returns the converted [String] content. def content_only node node.content end # Public: Skips conversion of the {AbstractNode}. # # Returns nothing. def skip node; end end extend DefaultFactory # exports static methods end end �������������������������������������asciidoctor-2.0.20/lib/asciidoctor/converter/�������������������������������������������������������0000775�0000000�0000000�00000000000�14431350326�0021157�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/lib/asciidoctor/converter/composite.rb�������������������������������������������0000664�0000000�0000000�00000003702�14431350326�0023510�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# frozen_string_literal: true module Asciidoctor # A {Converter} implementation that delegates to the chain of {Converter} # objects passed to the constructor. Selects the first {Converter} that # identifies itself as the handler for a given transform. class Converter::CompositeConverter < Converter::Base # Get the Array of Converter objects in the chain attr_reader :converters def initialize backend, *converters, backend_traits_source: nil @backend = backend (@converters = converters).each {|converter| converter.composed self if converter.respond_to? :composed } init_backend_traits backend_traits_source.backend_traits if backend_traits_source @converter_cache = ::Hash.new {|hash, key| hash[key] = find_converter key } end # Public: Delegates to the first converter that identifies itself as the # handler for the given transform. The optional Hash is passed as the last # option to the delegate's convert method. # # node - the AbstractNode to convert # transform - the optional String transform, or the name of the node if no # transform is specified. (default: nil) # opts - an optional Hash that is passed to the delegate's convert method. (default: nil) # # Returns the String result returned from the delegate's convert method def convert node, transform = nil, opts = nil (converter_for transform ||= node.node_name).convert node, transform, opts end # Public: Retrieve the converter for the specified transform. # # Returns the matching [Converter] object def converter_for transform @converter_cache[transform] end # Public: Find the converter for the specified transform. # Raise an exception if no converter is found. # # Returns the matching [Converter] object def find_converter transform @converters.each {|candidate| return candidate if candidate.handles? transform } raise %(Could not find a converter to handle transform: #{transform}) end end end ��������������������������������������������������������������asciidoctor-2.0.20/lib/asciidoctor/converter/docbook5.rb��������������������������������������������0000664�0000000�0000000�00000073161�14431350326�0023221�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# frozen_string_literal: true module Asciidoctor # A built-in {Converter} implementation that generates DocBook 5 output. The output is inspired by the output produced # by the docbook45 backend from AsciiDoc.py, except it has been migrated to the DocBook 5 specification. class Converter::DocBook5Converter < Converter::Base register_for 'docbook5' # default represents variablelist (DLIST_TAGS = { 'qanda' => { list: 'qandaset', entry: 'qandaentry', label: 'question', term: 'simpara', item: 'answer' }, 'glossary' => { list: nil, entry: 'glossentry', term: 'glossterm', item: 'glossdef' }, }).default = { list: 'variablelist', entry: 'varlistentry', term: 'term', item: 'listitem' } (QUOTE_TAGS = { monospaced: ['<literal>', '</literal>'], emphasis: ['<emphasis>', '</emphasis>', true], strong: ['<emphasis role="strong">', '</emphasis>', true], double: ['<quote>', '</quote>', true], single: ['<quote>', '</quote>', true], mark: ['<emphasis role="marked">', '</emphasis>'], superscript: ['<superscript>', '</superscript>'], subscript: ['<subscript>', '</subscript>'], }).default = ['', '', true] MANPAGE_SECTION_TAGS = { 'section' => 'refsection', 'synopsis' => 'refsynopsisdiv' } TABLE_PI_NAMES = ['dbhtml', 'dbfo', 'dblatex'] CopyrightRx = /^(#{CC_ANY}+?)(?: ((?:\d{4}-)?\d{4}))?$/ ImageMacroRx = /^image::?(\S|\S#{CC_ANY}*?\S)\[(#{CC_ANY}+)?\]$/ def initialize backend, opts = {} @backend = backend init_backend_traits basebackend: 'docbook', filetype: 'xml', outfilesuffix: '.xml', supports_templates: true end def convert_document node result = ['<?xml version="1.0" encoding="UTF-8"?>'] result << ((node.attr? 'toclevels') ? %(<?asciidoc-toc maxdepth="#{node.attr 'toclevels'}"?>) : '<?asciidoc-toc?>') if node.attr? 'toc' result << ((node.attr? 'sectnumlevels') ? %(<?asciidoc-numbered maxdepth="#{node.attr 'sectnumlevels'}"?>) : '<?asciidoc-numbered?>') if node.attr? 'sectnums' lang_attribute = (node.attr? 'nolang') ? '' : %( xml:lang="#{node.attr 'lang', 'en'}") if (root_tag_name = node.doctype) == 'manpage' manpage = true root_tag_name = 'article' end root_tag_idx = result.size id = node.id result << (document_info_tag node) unless node.noheader if manpage result << '<refentry>' result << '<refmeta>' result << %(<refentrytitle>#{node.apply_reftext_subs node.attr 'mantitle'}</refentrytitle>) if node.attr? 'mantitle' result << %(<manvolnum>#{node.attr 'manvolnum'}</manvolnum>) if node.attr? 'manvolnum' result << %(<refmiscinfo class="source">#{node.attr 'mansource', ' '}</refmiscinfo>) result << %(<refmiscinfo class="manual">#{node.attr 'manmanual', ' '}</refmiscinfo>) result << '</refmeta>' result << '<refnamediv>' result += (node.attr 'mannames').map {|n| %(<refname>#{n}</refname>) } if node.attr? 'mannames' result << %(<refpurpose>#{node.attr 'manpurpose'}</refpurpose>) if node.attr? 'manpurpose' result << '</refnamediv>' end unless (docinfo_content = node.docinfo :header).empty? result << docinfo_content end result << node.content if node.blocks? unless (docinfo_content = node.docinfo :footer).empty? result << docinfo_content end result << '</refentry>' if manpage id, node.id = node.id, nil unless id # defer adding root tag in case document ID is auto-generated on demand result.insert root_tag_idx, %(<#{root_tag_name} xmlns="http://docbook.org/ns/docbook" xmlns:xl="http://www.w3.org/1999/xlink" version="5.0"#{lang_attribute}#{common_attributes id}>) result << %(</#{root_tag_name}>) result.join LF end alias convert_embedded content_only def convert_section node if node.document.doctype == 'manpage' tag_name = MANPAGE_SECTION_TAGS[tag_name = node.sectname] || tag_name else tag_name = node.sectname end title_el = node.special && ((node.option? 'notitle') || (node.option? 'untitled')) ? '' : %(<title>#{node.title}\n) %(<#{tag_name}#{common_attributes node.id, node.role, node.reftext}> #{title_el}#{node.content} ) end def convert_admonition node %(<#{tag_name = node.attr 'name'}#{common_attributes node.id, node.role, node.reftext}> #{title_tag node}#{enclose_content node} ) end alias convert_audio skip def convert_colist node result = [] result << %() result << %(#{node.title}) if node.title? node.items.each do |item| result << %() result << %(#{item.text}) result << item.content if item.blocks? result << '' end result << %() result.join LF end def convert_dlist node result = [] if node.style == 'horizontal' result << %(<#{tag_name = node.title? ? 'table' : 'informaltable'}#{common_attributes node.id, node.role, node.reftext} tabstyle="horizontal" frame="none" colsep="0" rowsep="0"> #{title_tag node} ) node.items.each do |terms, dd| result << %( ) terms.each {|dt| result << %(#{dt.text}) } result << %( ) if dd result << %(#{dd.text}) if dd.text? result << dd.content if dd.blocks? end result << %( ) end result << %( ) else tags = DLIST_TAGS[node.style] list_tag = tags[:list] entry_tag = tags[:entry] label_tag = tags[:label] term_tag = tags[:term] item_tag = tags[:item] if list_tag result << %(<#{list_tag}#{common_attributes node.id, node.role, node.reftext}>) result << %(#{node.title}) if node.title? end node.items.each do |terms, dd| result << %(<#{entry_tag}>) result << %(<#{label_tag}>) if label_tag terms.each {|dt| result << %(<#{term_tag}>#{dt.text}) } result << %() if label_tag result << %(<#{item_tag}>) if dd result << %(#{dd.text}) if dd.text? result << dd.content if dd.blocks? end result << %() result << %() end result << %() if list_tag end result.join LF end def convert_example node if node.title? %( #{node.title} #{enclose_content node} ) else %( #{enclose_content node} ) end end def convert_floating_title node %(#{node.title}) end def convert_image node # NOTE according to the DocBook spec, content area, scaling, and scaling to fit are mutually exclusive # See http://tdg.docbook.org/tdg/4.5/imagedata-x.html#d0e79635 if node.attr? 'scaledwidth' width_attribute = %( width="#{node.attr 'scaledwidth'}") depth_attribute = '' scale_attribute = '' elsif node.attr? 'scale' # QUESTION should we set the viewport using width and depth? (the scaled image would be contained within this box) #width_attribute = (node.attr? 'width') ? %( width="#{node.attr 'width'}") : '' #depth_attribute = (node.attr? 'height') ? %( depth="#{node.attr 'height'}") : '' scale_attribute = %( scale="#{node.attr 'scale'}") else width_attribute = (node.attr? 'width') ? %( contentwidth="#{node.attr 'width'}") : '' depth_attribute = (node.attr? 'height') ? %( contentdepth="#{node.attr 'height'}") : '' scale_attribute = '' end align_attribute = (node.attr? 'align') ? %( align="#{node.attr 'align'}") : '' mediaobject = %( #{node.alt} ) if node.title? %( #{node.title} #{mediaobject} ) else %( #{mediaobject} ) end end def convert_listing node informal = !node.title? common_attrs = common_attributes node.id, node.role, node.reftext if node.style == 'source' if (attrs = node.attributes).key? 'linenums' numbering_attrs = (attrs.key? 'start') ? %( linenumbering="numbered" startinglinenumber="#{attrs['start'].to_i}") : ' linenumbering="numbered"' else numbering_attrs = ' linenumbering="unnumbered"' end if attrs.key? 'language' wrapped_content = %(#{node.content}) else wrapped_content = %(#{node.content}) end else wrapped_content = %(#{node.content}) end informal ? wrapped_content : %( #{node.title} #{wrapped_content} ) end def convert_literal node if node.title? %( #{node.title} #{node.content} ) else %(#{node.content}) end end alias convert_pass content_only def convert_stem node if (idx = node.subs.index :specialcharacters) node.subs.delete_at idx equation = node.content idx > 0 ? (node.subs.insert idx, :specialcharacters) : (node.subs.unshift :specialcharacters) else equation = node.content end if node.style == 'asciimath' # NOTE fop requires jeuclid to process mathml markup equation_data = asciimath_available? ? ((::AsciiMath.parse equation).to_mathml 'mml:', 'xmlns:mml' => 'http://www.w3.org/1998/Math/MathML') : %() else # unhandled math; pass source to alt and required mathphrase element; dblatex will process alt as LaTeX math equation_data = %( ) end if node.title? %( #{node.title} #{equation_data} ) else # WARNING dblatex displays the element inline instead of block as documented (except w/ mathml) %( #{equation_data} ) end end def convert_olist node result = [] num_attribute = node.style ? %( numeration="#{node.style}") : '' start_attribute = (node.attr? 'start') ? %( startingnumber="#{node.attr 'start'}") : '' result << %() result << %(#{node.title}) if node.title? node.items.each do |item| result << %() result << %(#{item.text}) result << item.content if item.blocks? result << '' end result << %() result.join LF end def convert_open node case node.style when 'abstract' if node.parent == node.document && node.document.doctype == 'book' logger.warn 'abstract block cannot be used in a document without a title when doctype is book. Excluding block content.' '' else %( #{title_tag node}#{enclose_content node} ) end when 'partintro' if node.level == 0 && node.parent.context == :section && node.document.doctype == 'book' %( #{title_tag node}#{enclose_content node} ) else logger.error 'partintro block can only be used when doctype is book and must be a child of a book part. Excluding block content.' '' end else reftext = node.reftext if (id = node.id) role = node.role if node.title? %( #{node.title} #{content_spacer = node.content_model == :compound ? LF : ''}#{node.content}#{content_spacer} ) elsif id || role if node.content_model == :compound %( #{node.content} ) else %(#{node.content}) end else enclose_content node end end end def convert_page_break node '' end def convert_paragraph node if node.title? %( #{node.title} #{node.content} ) else %(#{node.content}) end end def convert_preamble node if node.document.doctype == 'book' %( #{title_tag node, false}#{node.content} ) else node.content end end def convert_quote node blockquote_tag(node, (node.has_role? 'epigraph') && 'epigraph') { enclose_content node } end def convert_thematic_break node '' end def convert_sidebar node %( #{title_tag node}#{enclose_content node} ) end def convert_table node has_body = false result = [] pgwide_attribute = (node.option? 'pgwide') ? ' pgwide="1"' : '' frame = 'topbot' if (frame = node.attr 'frame', 'all', 'table-frame') == 'ends' grid = node.attr 'grid', nil, 'table-grid' result << %(<#{tag_name = node.title? ? 'table' : 'informaltable'}#{common_attributes node.id, node.role, node.reftext}#{pgwide_attribute} frame="#{frame}" rowsep="#{['none', 'cols'].include?(grid) ? 0 : 1}" colsep="#{['none', 'rows'].include?(grid) ? 0 : 1}"#{(node.attr? 'orientation', 'landscape', 'table-orientation') ? ' orient="land"' : ''}>) if node.option? 'unbreakable' result << '' elsif node.option? 'breakable' result << '' end result << %(#{node.title}) if tag_name == 'table' if (width = (node.attr? 'width') ? (node.attr 'width') : nil) TABLE_PI_NAMES.each do |pi_name| result << %() end col_width_key = 'colabswidth' else col_width_key = 'colpcwidth' end result << %() node.columns.each do |col| result << %() end node.rows.to_h.each do |tsec, rows| next if rows.empty? has_body = true if tsec == :body result << %() rows.each do |row| result << '' row.each do |cell| colspan_attribute = cell.colspan ? %( namest="col_#{colnum = cell.column.attr 'colnumber'}" nameend="col_#{colnum + cell.colspan - 1}") : '' rowspan_attribute = cell.rowspan ? %( morerows="#{cell.rowspan - 1}") : '' # NOTE may not have whitespace (e.g., line breaks) as a direct descendant according to DocBook rules entry_start = %() if tsec == :head cell_content = cell.text else case cell.style when :asciidoc cell_content = cell.content when :literal cell_content = %(#{cell.text}) when :header cell_content = (cell_content = cell.content).empty? ? '' : %(#{cell_content.join ''}) else cell_content = (cell_content = cell.content).empty? ? '' : %(#{cell_content.join ''}) end end entry_end = (node.document.attr? 'cellbgcolor') ? %() : '' result << %(#{entry_start}#{cell_content}#{entry_end}) end result << '' end result << %() end result << '' result << %() logger.warn 'tables must have at least one body row' unless has_body result.join LF end alias convert_toc skip def convert_ulist node result = [] if node.style == 'bibliography' result << %() result << %(#{node.title}) if node.title? node.items.each do |item| result << '' result << %(#{item.text}) result << item.content if item.blocks? result << '' end result << '' else mark_type = (checklist = node.option? 'checklist') ? 'none' : node.style mark_attribute = mark_type ? %( mark="#{mark_type}") : '' result << %() result << %(#{node.title}) if node.title? node.items.each do |item| text_marker = (item.attr? 'checked') ? '✓ ' : '❏ ' if checklist && (item.attr? 'checkbox') result << %() result << %(#{text_marker || ''}#{item.text}) result << item.content if item.blocks? result << '' end result << '' end result.join LF end def convert_verse node blockquote_tag(node, (node.has_role? 'epigraph') && 'epigraph') { %(#{node.content}) } end alias convert_video skip def convert_inline_anchor node case node.type when :ref %() when :xref if (path = node.attributes['path']) %(#{node.text || path}) else if (linkend = node.attributes['refid']).nil_or_empty? root_doc = get_root_document node # Q: should we warn instead of generating a document ID on demand? linkend = (root_doc.id ||= generate_document_id root_doc) end # NOTE the xref tag in DocBook does not support explicit link text, so the link tag must be used instead # The section at http://www.sagehill.net/docbookxsl/CrossRefs.html#IdrefLinks gives an explanation for this choice # "link - a cross reference where you supply the text of the reference as the content of the link element." (text = node.text) ? %(#{text}) : %() end when :link %(#{node.text}) when :bibref %(#{text}) else logger.warn %(unknown anchor type: #{node.type.inspect}) nil end end def convert_inline_break node %(#{node.text}) end def convert_inline_button node %(#{node.text}) end def convert_inline_callout node %() end def convert_inline_footnote node if node.type == :xref %() else %(#{node.text}) end end def convert_inline_image node width_attribute = (node.attr? 'width') ? %( contentwidth="#{node.attr 'width'}") : '' depth_attribute = (node.attr? 'height') ? %( contentdepth="#{node.attr 'height'}") : '' %( #{node.alt} ) end def convert_inline_indexterm node if (see = node.attr 'see') rel = %(\n#{see}) elsif (see_also_list = node.attr 'see-also') rel = see_also_list.map {|see_also| %(\n#{see_also}) }.join else rel = '' end if node.type == :visible %( #{node.text}#{rel} #{node.text}) elsif (numterms = (terms = node.attr 'terms').size) > 2 %( #{terms[0]}#{terms[1]}#{terms[2]}#{rel} #{(node.document.option? 'indexterm-promotion') ? %[ #{terms[1]}#{terms[2]} #{terms[2]} ] : ''}) elsif numterms > 1 %( #{terms[0]}#{terms[1]}#{rel} #{(node.document.option? 'indexterm-promotion') ? %[ #{terms[1]} ] : ''}) else %( #{terms[0]}#{rel} ) end end def convert_inline_kbd node if (keys = node.attr 'keys').size == 1 %(#{keys[0]}) else %(#{keys.join ''}) end end def convert_inline_menu node menu = node.attr 'menu' if (submenus = node.attr 'submenus').empty? if (menuitem = node.attr 'menuitem') %(#{menu} #{menuitem}) else %(#{menu}) end else %(#{menu} #{submenus.join ' '} #{node.attr 'menuitem'}) end end def convert_inline_quoted node if (type = node.type) == :asciimath # NOTE fop requires jeuclid to process mathml markup asciimath_available? ? %(#{(::AsciiMath.parse node.text).to_mathml 'mml:', 'xmlns:mml' => 'http://www.w3.org/1998/Math/MathML'}) : %() elsif type == :latexmath # unhandled math; pass source to alt and required mathphrase element; dblatex will process alt as LaTeX math %() else open, close, supports_phrase = QUOTE_TAGS[type] text = node.text if node.role if supports_phrase quoted_text = %(#{open}#{text}#{close}) else quoted_text = %(#{open.chop} role="#{node.role}">#{text}#{close}) end else quoted_text = %(#{open}#{text}#{close}) end node.id ? %(#{quoted_text}) : quoted_text end end private def common_attributes id, role = nil, reftext = nil if id attrs = %( xml:id="#{id}"#{role ? %[ role="#{role}"] : ''}) elsif role attrs = %( role="#{role}") else attrs = '' end if reftext if (reftext.include? '<') && ((reftext = reftext.gsub XmlSanitizeRx, '').include? ' ') reftext = (reftext.squeeze ' ').strip end reftext = reftext.gsub '"', '"' if reftext.include? '"' %(#{attrs} xreflabel="#{reftext}") else attrs end end def author_tag doc, author result = [] result << '' result << '' result << %(#{doc.sub_replacements author.firstname}) if author.firstname result << %(#{doc.sub_replacements author.middlename}) if author.middlename result << %(#{doc.sub_replacements author.lastname}) if author.lastname result << '' result << %(#{author.email}) if author.email result << '' result.join LF end def document_info_tag doc result = [''] unless doc.notitle if (title = doc.doctitle partition: true, use_fallback: true).subtitle? result << %(#{title.main} #{title.subtitle}) else result << %(#{title}) end end if (date = (doc.attr? 'revdate') ? (doc.attr 'revdate') : ((doc.attr? 'reproducible') ? nil : (doc.attr 'docdate'))) result << %(#{date}) end if doc.attr? 'copyright' CopyrightRx =~ (doc.attr 'copyright') result << '' result << %(#{$1}) result << %(#{$2}) if $2 result << '' end if doc.header? unless (authors = doc.authors).empty? if authors.size > 1 result << '' authors.each {|author| result << (author_tag doc, author) } result << '' else result << (author_tag doc, (author = authors[0])) result << %(#{author.initials}) if author.initials end end if (doc.attr? 'revdate') && ((doc.attr? 'revnumber') || (doc.attr? 'revremark')) result << %( ) result << %(#{doc.attr 'revnumber'}) if doc.attr? 'revnumber' result << %(#{doc.attr 'revdate'}) if doc.attr? 'revdate' result << %(#{doc.attr 'authorinitials'}) if doc.attr? 'authorinitials' result << %(#{doc.attr 'revremark'}) if doc.attr? 'revremark' result << %( ) end if (doc.attr? 'front-cover-image') || (doc.attr? 'back-cover-image') if (back_cover_tag = cover_tag doc, 'back') result << (cover_tag doc, 'front', true) result << back_cover_tag elsif (front_cover_tag = cover_tag doc, 'front') result << front_cover_tag end end result << %(#{doc.attr 'orgname'}) if doc.attr? 'orgname' unless (docinfo_content = doc.docinfo).empty? result << docinfo_content end end result << '' result.join LF end def get_root_document node while (node = node.document).nested? node = node.parent_document end node end def generate_document_id doc %(__#{doc.doctype}-root__) end # FIXME this should be handled through a template mechanism def enclose_content node node.content_model == :compound ? node.content : %(#{node.content}) end def title_tag node, optional = true !optional || node.title? ? %(#{node.title}\n) : '' end def cover_tag doc, face, use_placeholder = false if (cover_image = doc.attr %(#{face}-cover-image)) width_attr = '' depth_attr = '' if (cover_image.include? ':') && ImageMacroRx =~ cover_image attrlist = $2 cover_image = doc.image_uri $1 if attrlist attrs = (AttributeList.new attrlist).parse ['alt', 'width', 'height'] if attrs.key? 'scaledwidth' # NOTE scalefit="1" is the default in this case width_attr = %( width="#{attrs['scaledwidth']}") else width_attr = %( contentwidth="#{attrs['width']}") if attrs.key? 'width' depth_attr = %( contentdepth="#{attrs['height']}") if attrs.key? 'height' end end end %( ) elsif use_placeholder %() end end def blockquote_tag node, tag_name = nil if tag_name start_tag, end_tag = %(<#{tag_name}), %() else start_tag, end_tag = '' end result = [%(#{start_tag}#{common_attributes node.id, node.role, node.reftext}>)] result << %(#{node.title}) if node.title? if (node.attr? 'attribution') || (node.attr? 'citetitle') result << '' result << (node.attr 'attribution') if node.attr? 'attribution' result << %(#{node.attr 'citetitle'}) if node.attr? 'citetitle' result << '' end result << yield result << end_tag result.join LF end def asciimath_available? (@asciimath_status ||= load_asciimath) == :loaded end def load_asciimath (defined? ::AsciiMath.parse) ? :loaded : (Helpers.require_library 'asciimath', true, :warn).nil? ? :unavailable : :loaded end end end asciidoctor-2.0.20/lib/asciidoctor/converter/html5.rb000066400000000000000000001522051443135032600225420ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # A built-in {Converter} implementation that generates HTML 5 output # consistent with the html5 backend from AsciiDoc.py. class Converter::Html5Converter < Converter::Base register_for 'html5' (QUOTE_TAGS = { monospaced: ['', '', true], emphasis: ['', '', true], strong: ['', '', true], double: ['“', '”'], single: ['‘', '’'], mark: ['', '', true], superscript: ['', '', true], subscript: ['', '', true], asciimath: ['\$', '\$'], latexmath: ['\(', '\)'], # Opal can't resolve these constants when referenced here #asciimath: INLINE_MATH_DELIMITERS[:asciimath] + [false], #latexmath: INLINE_MATH_DELIMITERS[:latexmath] + [false], }).default = ['', ''] DropAnchorRx = %r(<(?:a\b[^>]*|/a)>) StemBreakRx = / *\\\n(?:\\?\n)*|\n\n+/ if RUBY_ENGINE == 'opal' # NOTE In JavaScript, ^ matches the start of the string when the m flag is not set SvgPreambleRx = /^#{CC_ALL}*?(?=])/ SvgStartTagRx = /^]*)?>/ else SvgPreambleRx = /\A.*?(?=])/m SvgStartTagRx = /\A]*)?>/ end DimensionAttributeRx = /\s(?:width|height|style)=(["'])#{CC_ANY}*?\1/ def initialize backend, opts = {} @backend = backend if opts[:htmlsyntax] == 'xml' syntax = 'xml' @xml_mode = true @void_element_slash = '/' else syntax = 'html' @xml_mode = nil @void_element_slash = '' end init_backend_traits basebackend: 'html', filetype: 'html', htmlsyntax: syntax, outfilesuffix: '.html', supports_templates: true end def convert node, transform = node.node_name, opts = nil case transform when 'inline_quoted' then convert_inline_quoted node when 'paragraph' then convert_paragraph node when 'inline_anchor' then convert_inline_anchor node when 'section' then convert_section node when 'listing' then convert_listing node when 'literal' then convert_literal node when 'ulist' then convert_ulist node when 'olist' then convert_olist node when 'dlist' then convert_dlist node when 'admonition' then convert_admonition node when 'colist' then convert_colist node when 'embedded' then convert_embedded node when 'example' then convert_example node when 'floating_title' then convert_floating_title node when 'image' then convert_image node when 'inline_break' then convert_inline_break node when 'inline_button' then convert_inline_button node when 'inline_callout' then convert_inline_callout node when 'inline_footnote' then convert_inline_footnote node when 'inline_image' then convert_inline_image node when 'inline_indexterm' then convert_inline_indexterm node when 'inline_kbd' then convert_inline_kbd node when 'inline_menu' then convert_inline_menu node when 'open' then convert_open node when 'page_break' then convert_page_break node when 'preamble' then convert_preamble node when 'quote' then convert_quote node when 'sidebar' then convert_sidebar node when 'stem' then convert_stem node when 'table' then convert_table node when 'thematic_break' then convert_thematic_break node when 'verse' then convert_verse node when 'video' then convert_video node when 'document' then convert_document node when 'toc' then convert_toc node when 'pass' then convert_pass node when 'audio' then convert_audio node else; super end end def convert_document node br = %() unless (asset_uri_scheme = (node.attr 'asset-uri-scheme', 'https')).empty? asset_uri_scheme = %(#{asset_uri_scheme}:) end cdn_base_url = %(#{asset_uri_scheme}//cdnjs.cloudflare.com/ajax/libs) linkcss = node.attr? 'linkcss' max_width_attr = (node.attr? 'max-width') ? %( style="max-width: #{node.attr 'max-width'};") : '' result = [''] lang_attribute = (node.attr? 'nolang') ? '' : %( lang="#{node.attr 'lang', 'en'}") result << %() result << %( ) result << %() if node.attr? 'app-name' result << %() if node.attr? 'description' result << %() if node.attr? 'keywords' result << %() if node.attr? 'authors' result << %() if node.attr? 'copyright' if node.attr? 'favicon' if (icon_href = node.attr 'favicon').empty? icon_href = 'favicon.ico' icon_type = 'image/x-icon' elsif (icon_ext = Helpers.extname icon_href, nil) icon_type = icon_ext == '.ico' ? 'image/x-icon' : %(image/#{icon_ext.slice 1, icon_ext.length}) else icon_type = 'image/x-icon' end result << %() end result << %(#{node.doctitle sanitize: true, use_fallback: true}) if DEFAULT_STYLESHEET_KEYS.include?(node.attr 'stylesheet') if (webfonts = node.attr 'webfonts') result << %() end if linkcss result << %() else result << %() end elsif node.attr? 'stylesheet' if linkcss result << %() else result << %() end end if node.attr? 'icons', 'font' if node.attr? 'iconfont-remote' result << %() else iconfont_stylesheet = %(#{node.attr 'iconfont-name', 'font-awesome'}.css) result << %() end end if (syntax_hl = node.syntax_highlighter) result << (syntax_hl_docinfo_head_idx = result.size) end unless (docinfo_content = node.docinfo).empty? result << docinfo_content end result << '' id_attr = node.id ? %( id="#{node.id}") : '' if (sectioned = node.sections?) && (node.attr? 'toc-class') && (node.attr? 'toc') && (node.attr? 'toc-placement', 'auto') classes = [node.doctype, (node.attr 'toc-class'), %(toc-#{node.attr 'toc-position', 'header'})] else classes = [node.doctype] end classes << node.role if node.role? result << %() unless (docinfo_content = node.docinfo :header).empty? result << docinfo_content end unless node.noheader result << %(' end result << %(
#{node.content}
) if node.footnotes? && !(node.attr? 'nofootnotes') result << %(
) node.footnotes.each do |footnote| result << %(
#{footnote.index}. #{footnote.text}
) end result << '
' end unless node.nofooter result << %(' end # JavaScript (and auxiliary stylesheets) loaded at the end of body for performance reasons # See http://www.html5rocks.com/en/tutorials/speed/script-loading/ if syntax_hl if syntax_hl.docinfo? :head result[syntax_hl_docinfo_head_idx] = syntax_hl.docinfo :head, node, cdn_base_url: cdn_base_url, linkcss: linkcss, self_closing_tag_slash: slash else result.delete_at syntax_hl_docinfo_head_idx end if syntax_hl.docinfo? :footer result << (syntax_hl.docinfo :footer, node, cdn_base_url: cdn_base_url, linkcss: linkcss, self_closing_tag_slash: slash) end end if node.attr? 'stem' eqnums_val = node.attr 'eqnums', 'none' eqnums_val = 'AMS' if eqnums_val.empty? eqnums_opt = %( equationNumbers: { autoNumber: "#{eqnums_val}" } ) # IMPORTANT inspect calls on delimiter arrays are intentional for JavaScript compat (emulates JSON.stringify) result << %( ) end unless (docinfo_content = node.docinfo :footer).empty? result << docinfo_content end result << '' result << '' result.join LF end def convert_embedded node result = [] if node.doctype == 'manpage' # QUESTION should notitle control the manual page title? unless node.notitle id_attr = node.id ? %( id="#{node.id}") : '' result << %(#{node.doctitle} Manual Page) end result << (generate_manname_section node) if node.attr? 'manpurpose' elsif node.header? && !node.notitle id_attr = node.id ? %( id="#{node.id}") : '' result << %(#{node.header.title}) end if node.sections? && (node.attr? 'toc') && (toc_p = node.attr 'toc-placement') != 'macro' && toc_p != 'preamble' result << %(
#{node.attr 'toc-title'}
#{node.converter.convert node, 'outline'}
) end result << node.content if node.footnotes? && !(node.attr? 'nofootnotes') result << %(
) node.footnotes.each do |footnote| result << %(
#{footnote.index}. #{footnote.text}
) end result << '
' end result.join LF end def convert_outline node, opts = {} return unless node.sections? sectnumlevels = opts[:sectnumlevels] || (node.document.attributes['sectnumlevels'] || 3).to_i toclevels = opts[:toclevels] || (node.document.attributes['toclevels'] || 2).to_i sections = node.sections # FIXME top level is incorrect if a multipart book starts with a special section defined at level 0 result = [%(
    )] sections.each do |section| slevel = section.level if section.caption stitle = section.captioned_title elsif section.numbered && slevel <= sectnumlevels if slevel < 2 && node.document.doctype == 'book' case section.sectname when 'chapter' stitle = %(#{(signifier = node.document.attributes['chapter-signifier']) ? "#{signifier} " : ''}#{section.sectnum} #{section.title}) when 'part' stitle = %(#{(signifier = node.document.attributes['part-signifier']) ? "#{signifier} " : ''}#{section.sectnum nil, ':'} #{section.title}) else stitle = %(#{section.sectnum} #{section.title}) end else stitle = %(#{section.sectnum} #{section.title}) end else stitle = section.title end stitle = stitle.gsub DropAnchorRx, '' if stitle.include? '#{stitle}) result << child_toc_level result << '' else result << %(
  • #{stitle}
  • ) end end result << '
' result.join LF end def convert_section node doc_attrs = node.document.attributes level = node.level if node.caption title = node.captioned_title elsif node.numbered && level <= (doc_attrs['sectnumlevels'] || 3).to_i if level < 2 && node.document.doctype == 'book' case node.sectname when 'chapter' title = %(#{(signifier = doc_attrs['chapter-signifier']) ? "#{signifier} " : ''}#{node.sectnum} #{node.title}) when 'part' title = %(#{(signifier = doc_attrs['part-signifier']) ? "#{signifier} " : ''}#{node.sectnum nil, ':'} #{node.title}) else title = %(#{node.sectnum} #{node.title}) end else title = %(#{node.sectnum} #{node.title}) end else title = node.title end if node.id id_attr = %( id="#{id = node.id}") if doc_attrs['sectlinks'] title = %(#{title}) end if doc_attrs['sectanchors'] # QUESTION should we add a font-based icon in anchor if icons=font? if doc_attrs['sectanchors'] == 'after' title = %(#{title}) else title = %(#{title}) end end else id_attr = '' end if level == 0 %(#{title} #{node.content}) else %(
#{title} #{level == 1 ? %[
#{node.content}
] : node.content}
) end end def convert_admonition node id_attr = node.id ? %( id="#{node.id}") : '' name = node.attr 'name' title_element = node.title? ? %(
#{node.title}
\n) : '' if node.document.attr? 'icons' if (node.document.attr? 'icons', 'font') && !(node.attr? 'icon') label = %() else label = %(#{node.attr 'textlabel'}) end else label = %(
#{node.attr 'textlabel'}
) end %(
#{label} #{title_element}#{node.content}
) end def convert_audio node xml = @xml_mode id_attribute = node.id ? %( id="#{node.id}") : '' classes = ['audioblock', node.role].compact class_attribute = %( class="#{classes.join ' '}") title_element = node.title? ? %(
#{node.title}
\n) : '' start_t = node.attr 'start' end_t = node.attr 'end' time_anchor = (start_t || end_t) ? %(#t=#{start_t || ''}#{end_t ? ",#{end_t}" : ''}) : '' %( #{title_element}
) end def convert_colist node result = [] id_attribute = node.id ? %( id="#{node.id}") : '' classes = ['colist', node.style, node.role].compact class_attribute = %( class="#{classes.join ' '}") result << %() result << %(
#{node.title}
) if node.title? if node.document.attr? 'icons' result << '' font_icons, num = (node.document.attr? 'icons', 'font'), 0 node.items.each do |item| num += 1 if font_icons num_label = %(#{num}) else num_label = %(#{num}) end result << %() end result << '
#{num_label} #{item.text}#{item.blocks? ? LF + item.content : ''}
' else result << '
    ' node.items.each do |item| result << %(
  1. #{item.text}

    #{item.blocks? ? LF + item.content : ''}
  2. ) end result << '
' end result << '' result.join LF end def convert_dlist node result = [] id_attribute = node.id ? %( id="#{node.id}") : '' case node.style when 'qanda' classes = ['qlist', 'qanda', node.role] when 'horizontal' classes = ['hdlist', node.role] else classes = ['dlist', node.style, node.role] end class_attribute = %( class="#{classes.compact.join ' '}") result << %() result << %(
#{node.title}
) if node.title? case node.style when 'qanda' result << '
    ' node.items.each do |terms, dd| result << '
  1. ' terms.each do |dt| result << %(

    #{dt.text}

    ) end if dd result << %(

    #{dd.text}

    ) if dd.text? result << dd.content if dd.blocks? end result << '
  2. ' end result << '
' when 'horizontal' slash = @void_element_slash result << '' if (node.attr? 'labelwidth') || (node.attr? 'itemwidth') result << '' col_style_attribute = (node.attr? 'labelwidth') ? %( style="width: #{(node.attr 'labelwidth').chomp '%'}%;") : '' result << %() col_style_attribute = (node.attr? 'itemwidth') ? %( style="width: #{(node.attr 'itemwidth').chomp '%'}%;") : '' result << %() result << '' end node.items.each do |terms, dd| result << '' result << %(' result << '' result << '' end result << '
) first_term = true terms.each do |dt| result << %() unless first_term result << dt.text first_term = nil end result << '' if dd result << %(

#{dd.text}

) if dd.text? result << dd.content if dd.blocks? end result << '
' else result << '
' dt_style_attribute = node.style ? '' : ' class="hdlist1"' node.items.each do |terms, dd| terms.each do |dt| result << %(#{dt.text}) end next unless dd result << '
' result << %(

#{dd.text}

) if dd.text? result << dd.content if dd.blocks? result << '
' end result << '
' end result << '' result.join LF end def convert_example node id_attribute = node.id ? %( id="#{node.id}") : '' if node.option? 'collapsible' class_attribute = node.role ? %( class="#{node.role}") : '' summary_element = node.title? ? %(#{node.title}) : 'Details' %( #{summary_element}
#{node.content}
) else title_element = node.title? ? %(
#{node.captioned_title}
\n) : '' %( #{title_element}
#{node.content}
) end end def convert_floating_title node tag_name = %(h#{node.level + 1}) id_attribute = node.id ? %( id="#{node.id}") : '' classes = [node.style, node.role].compact %(<#{tag_name}#{id_attribute} class="#{classes.join ' '}">#{node.title}) end def convert_image node target = node.attr 'target' width_attr = (node.attr? 'width') ? %( width="#{node.attr 'width'}") : '' height_attr = (node.attr? 'height') ? %( height="#{node.attr 'height'}") : '' if ((node.attr? 'format', 'svg') || (target.include? '.svg')) && node.document.safe < SafeMode::SECURE if node.option? 'inline' img = (read_svg_contents node, target) || %(#{node.alt}) elsif node.option? 'interactive' fallback = (node.attr? 'fallback') ? %(#{encode_attribute_value node.alt}) : %(#{node.alt}) img = %(#{fallback}) else img = %(#{encode_attribute_value node.alt}) end else img = %(#{encode_attribute_value node.alt}) end img = %(#{img}) if node.attr? 'link' id_attr = node.id ? %( id="#{node.id}") : '' classes = ['imageblock'] classes << (node.attr 'float') if node.attr? 'float' classes << %(text-#{node.attr 'align'}) if node.attr? 'align' classes << node.role if node.role class_attr = %( class="#{classes.join ' '}") title_el = node.title? ? %(\n
#{node.captioned_title}
) : '' %(
#{img}
#{title_el} ) end def convert_listing node nowrap = (node.option? 'nowrap') || !(node.document.attr? 'prewrap') if node.style == 'source' lang = node.attr 'language' if (syntax_hl = node.document.syntax_highlighter) opts = syntax_hl.highlight? ? { css_mode: ((doc_attrs = node.document.attributes)[%(#{syntax_hl.name}-css)] || :class).to_sym, style: doc_attrs[%(#{syntax_hl.name}-style)], } : {} opts[:nowrap] = nowrap else pre_open = %(
)
        pre_close = '
' end else pre_open = %() pre_close = '' end id_attribute = node.id ? %( id="#{node.id}") : '' title_element = node.title? ? %(
#{node.captioned_title}
\n) : '' %( #{title_element}
#{syntax_hl ? (syntax_hl.format node, lang, opts) : pre_open + node.content + pre_close}
) end def convert_literal node id_attribute = node.id ? %( id="#{node.id}") : '' title_element = node.title? ? %(
#{node.title}
\n) : '' nowrap = !(node.document.attr? 'prewrap') || (node.option? 'nowrap') %( #{title_element}
#{node.content}
) end def convert_stem node id_attribute = node.id ? %( id="#{node.id}") : '' title_element = node.title? ? %(
#{node.title}
\n) : '' open, close = BLOCK_MATH_DELIMITERS[style = node.style.to_sym] if (equation = node.content) if style == :asciimath && (equation.include? LF) br = %(#{LF}) equation = equation.gsub(StemBreakRx) { %(#{close}#{br * (($&.count LF) - 1)}#{LF}#{open}) } end unless (equation.start_with? open) && (equation.end_with? close) equation = %(#{open}#{equation}#{close}) end else equation = '' end %( #{title_element}
#{equation}
) end def convert_olist node result = [] id_attribute = node.id ? %( id="#{node.id}") : '' classes = ['olist', node.style, node.role].compact class_attribute = %( class="#{classes.join ' '}") result << %() result << %(
#{node.title}
) if node.title? type_attribute = (keyword = node.list_marker_keyword) ? %( type="#{keyword}") : '' start_attribute = (node.attr? 'start') ? %( start="#{node.attr 'start'}") : '' reversed_attribute = (node.option? 'reversed') ? (append_boolean_attribute 'reversed', @xml_mode) : '' result << %(
    ) node.items.each do |item| if item.id result << %(
  1. ) elsif item.role result << %(
  2. ) else result << '
  3. ' end result << %(

    #{item.text}

    ) result << item.content if item.blocks? result << '
  4. ' end result << '
' result << '' result.join LF end def convert_open node if (style = node.style) == 'abstract' if node.parent == node.document && node.document.doctype == 'book' logger.warn 'abstract block cannot be used in a document without a title when doctype is book. Excluding block content.' '' else id_attr = node.id ? %( id="#{node.id}") : '' title_el = node.title? ? %(
#{node.title}
\n) : '' %( #{title_el}
#{node.content}
) end elsif style == 'partintro' && (node.level > 0 || node.parent.context != :section || node.document.doctype != 'book') logger.error 'partintro block can only be used when doctype is book and must be a child of a book part. Excluding block content.' '' else id_attr = node.id ? %( id="#{node.id}") : '' title_el = node.title? ? %(
#{node.title}
\n) : '' %( #{title_el}
#{node.content}
) end end def convert_page_break node '
' end def convert_paragraph node if node.role attributes = %(#{node.id ? %[ id="#{node.id}"] : ''} class="paragraph #{node.role}") elsif node.id attributes = %( id="#{node.id}" class="paragraph") else attributes = ' class="paragraph"' end if node.title? %(
#{node.title}

#{node.content}

) else %(

#{node.content}

) end end alias convert_pass content_only def convert_preamble node if (doc = node.document).attr?('toc-placement', 'preamble') && doc.sections? && (doc.attr? 'toc') toc = %(
#{doc.attr 'toc-title'}
#{doc.converter.convert doc, 'outline'}
) else toc = '' end %(
#{node.content}
#{toc}
) end def convert_quote node id_attribute = node.id ? %( id="#{node.id}") : '' classes = ['quoteblock', node.role].compact class_attribute = %( class="#{classes.join ' '}") title_element = node.title? ? %(\n
#{node.title}
) : '' attribution = (node.attr? 'attribution') ? (node.attr 'attribution') : nil citetitle = (node.attr? 'citetitle') ? (node.attr 'citetitle') : nil if attribution || citetitle cite_element = citetitle ? %(#{citetitle}) : '' attribution_text = attribution ? %(— #{attribution}#{citetitle ? "\n" : ''}) : '' attribution_element = %(\n
\n#{attribution_text}#{cite_element}\n
) else attribution_element = '' end %(#{title_element}
#{node.content}
#{attribution_element} ) end def convert_thematic_break node %() end def convert_sidebar node id_attribute = node.id ? %( id="#{node.id}") : '' title_element = node.title? ? %(
#{node.title}
\n) : '' %(
#{title_element}#{node.content}
) end def convert_table node result = [] id_attribute = node.id ? %( id="#{node.id}") : '' frame = 'ends' if (frame = node.attr 'frame', 'all', 'table-frame') == 'topbot' classes = ['tableblock', %(frame-#{frame}), %(grid-#{node.attr 'grid', 'all', 'table-grid'})] if (stripes = node.attr 'stripes', nil, 'table-stripes') classes << %(stripes-#{stripes}) end style_attribute = '' if (autowidth = node.option? 'autowidth') && !(node.attr? 'width') classes << 'fit-content' elsif (tablewidth = node.attr 'tablepcwidth') == 100 classes << 'stretch' else style_attribute = %( style="width: #{tablewidth}%;") end classes << (node.attr 'float') if node.attr? 'float' if (role = node.role) classes << role end class_attribute = %( class="#{classes.join ' '}") result << %() result << %(#{node.captioned_title}) if node.title? if (node.attr 'rowcount') > 0 slash = @void_element_slash result << '' if autowidth result += (Array.new node.columns.size, %()) else node.columns.each do |col| result << ((col.option? 'autowidth') ? %() : %()) end end result << '' node.rows.to_h.each do |tsec, rows| next if rows.empty? result << %() rows.each do |row| result << '' row.each do |cell| if tsec == :head cell_content = cell.text else case cell.style when :asciidoc cell_content = %(
#{cell.content}
) when :literal cell_content = %(
#{cell.text}
) else cell_content = (cell_content = cell.content).empty? ? '' : %(

#{cell_content.join '

'}

) end end cell_tag_name = (tsec == :head || cell.style == :header ? 'th' : 'td') cell_class_attribute = %( class="tableblock halign-#{cell.attr 'halign'} valign-#{cell.attr 'valign'}") cell_colspan_attribute = cell.colspan ? %( colspan="#{cell.colspan}") : '' cell_rowspan_attribute = cell.rowspan ? %( rowspan="#{cell.rowspan}") : '' cell_style_attribute = (node.document.attr? 'cellbgcolor') ? %( style="background-color: #{node.document.attr 'cellbgcolor'};") : '' result << %(<#{cell_tag_name}#{cell_class_attribute}#{cell_colspan_attribute}#{cell_rowspan_attribute}#{cell_style_attribute}>#{cell_content}) end result << '' end result << %(
) end end result << '' result.join LF end def convert_toc node unless (doc = node.document).attr?('toc-placement', 'macro') && doc.sections? && (doc.attr? 'toc') return '' end if node.id id_attr = %( id="#{node.id}") title_id_attr = %( id="#{node.id}title") else id_attr = ' id="toc"' title_id_attr = ' id="toctitle"' end title = node.title? ? node.title : (doc.attr 'toc-title') levels = (node.attr? 'levels') ? (node.attr 'levels').to_i : nil role = node.role? ? node.role : (doc.attr 'toc-class', 'toc') %( #{title} #{doc.converter.convert doc, 'outline', toclevels: levels} ) end def convert_ulist node result = [] id_attribute = node.id ? %( id="#{node.id}") : '' div_classes = ['ulist', node.style, node.role].compact marker_checked = marker_unchecked = '' if (checklist = node.option? 'checklist') div_classes.unshift div_classes.shift, 'checklist' ul_class_attribute = ' class="checklist"' if node.option? 'interactive' if @xml_mode marker_checked = ' ' marker_unchecked = ' ' else marker_checked = ' ' marker_unchecked = ' ' end elsif node.document.attr? 'icons', 'font' marker_checked = ' ' marker_unchecked = ' ' else marker_checked = '✓ ' marker_unchecked = '❏ ' end else ul_class_attribute = node.style ? %( class="#{node.style}") : '' end result << %() result << %(
#{node.title}
) if node.title? result << %() node.items.each do |item| if item.id result << %(
  • ) elsif item.role result << %(
  • ) else result << '
  • ' end if checklist && (item.attr? 'checkbox') result << %(

    #{(item.attr? 'checked') ? marker_checked : marker_unchecked}#{item.text}

    ) else result << %(

    #{item.text}

    ) end result << item.content if item.blocks? result << '
  • ' end result << '' result << '' result.join LF end def convert_verse node id_attribute = node.id ? %( id="#{node.id}") : '' classes = ['verseblock', node.role].compact class_attribute = %( class="#{classes.join ' '}") title_element = node.title? ? %(\n
    #{node.title}
    ) : '' attribution = (node.attr? 'attribution') ? (node.attr 'attribution') : nil citetitle = (node.attr? 'citetitle') ? (node.attr 'citetitle') : nil if attribution || citetitle cite_element = citetitle ? %(#{citetitle}) : '' attribution_text = attribution ? %(— #{attribution}#{citetitle ? "\n" : ''}) : '' attribution_element = %(\n
    \n#{attribution_text}#{cite_element}\n
    ) else attribution_element = '' end %(#{title_element}
    #{node.content}
    #{attribution_element} ) end def convert_video node xml = @xml_mode id_attribute = node.id ? %( id="#{node.id}") : '' classes = ['videoblock'] classes << (node.attr 'float') if node.attr? 'float' classes << %(text-#{node.attr 'align'}) if node.attr? 'align' classes << node.role if node.role class_attribute = %( class="#{classes.join ' '}") title_element = node.title? ? %(\n
    #{node.title}
    ) : '' width_attribute = (node.attr? 'width') ? %( width="#{node.attr 'width'}") : '' height_attribute = (node.attr? 'height') ? %( height="#{node.attr 'height'}") : '' case node.attr 'poster' when 'vimeo' unless (asset_uri_scheme = (node.document.attr 'asset-uri-scheme', 'https')).empty? asset_uri_scheme = %(#{asset_uri_scheme}:) end start_anchor = (node.attr? 'start') ? %(#at=#{node.attr 'start'}) : '' delimiter = ['?'] target, hash = (node.attr 'target').split '/', 2 hash_param = (hash ||= node.attr 'hash') ? %(#{delimiter.pop || '&'}h=#{hash}) : '' autoplay_param = (node.option? 'autoplay') ? %(#{delimiter.pop || '&'}autoplay=1) : '' loop_param = (node.option? 'loop') ? %(#{delimiter.pop || '&'}loop=1) : '' muted_param = (node.option? 'muted') ? %(#{delimiter.pop || '&'}muted=1) : '' %(#{title_element}
    ) when 'youtube' unless (asset_uri_scheme = (node.document.attr 'asset-uri-scheme', 'https')).empty? asset_uri_scheme = %(#{asset_uri_scheme}:) end rel_param_val = (node.option? 'related') ? 1 : 0 # NOTE start and end must be seconds (t parameter allows XmYs where X is minutes and Y is seconds) start_param = (node.attr? 'start') ? %(&start=#{node.attr 'start'}) : '' end_param = (node.attr? 'end') ? %(&end=#{node.attr 'end'}) : '' autoplay_param = (node.option? 'autoplay') ? '&autoplay=1' : '' loop_param = (has_loop_param = node.option? 'loop') ? '&loop=1' : '' mute_param = (node.option? 'muted') ? '&mute=1' : '' controls_param = (node.option? 'nocontrols') ? '&controls=0' : '' # cover both ways of controlling fullscreen option if node.option? 'nofullscreen' fs_param = '&fs=0' fs_attribute = '' else fs_param = '' fs_attribute = append_boolean_attribute 'allowfullscreen', xml end modest_param = (node.option? 'modest') ? '&modestbranding=1' : '' theme_param = (node.attr? 'theme') ? %(&theme=#{node.attr 'theme'}) : '' hl_param = (node.attr? 'lang') ? %(&hl=#{node.attr 'lang'}) : '' # parse video_id/list_id syntax where list_id (i.e., playlist) is optional target, list = (node.attr 'target').split '/', 2 if (list ||= (node.attr 'list')) list_param = %(&list=#{list}) else # parse dynamic playlist syntax: video_id1,video_id2,... target, playlist = target.split ',', 2 if (playlist ||= (node.attr 'playlist')) # INFO playlist bar doesn't appear in Firefox unless showinfo=1 and modestbranding=1 list_param = %(&playlist=#{target},#{playlist}) else # NOTE for loop to work, playlist must be specified; use VIDEO_ID if there's no explicit playlist list_param = has_loop_param ? %(&playlist=#{target}) : '' end end %(#{title_element}
    ) else poster_attribute = (val = node.attr 'poster').nil_or_empty? ? '' : %( poster="#{node.media_uri val}") preload_attribute = (val = node.attr 'preload').nil_or_empty? ? '' : %( preload="#{val}") start_t = node.attr 'start' end_t = node.attr 'end' time_anchor = (start_t || end_t) ? %(#t=#{start_t || ''}#{end_t ? ",#{end_t}" : ''}) : '' %(#{title_element}
    ) end end def convert_inline_anchor node case node.type when :xref if (path = node.attributes['path']) attrs = (append_link_constraint_attrs node, node.role ? [%( class="#{node.role}")] : []).join text = node.text || path else attrs = node.role ? %( class="#{node.role}") : '' unless (text = node.text) if AbstractNode === (ref = (@refs ||= node.document.catalog[:refs])[refid = node.attributes['refid']] || (refid.nil_or_empty? ? (top = get_root_document node) : nil)) if (@resolving_xref ||= (outer = true)) && outer if (text = ref.xreftext node.attr 'xrefstyle', nil, true) text = text.gsub DropAnchorRx, '' if text.include? '#{text}) when :ref %() when :link attrs = node.id ? [%( id="#{node.id}")] : [] attrs << %( class="#{node.role}") if node.role attrs << %( title="#{node.attr 'title'}") if node.attr? 'title' %(#{node.text}) when :bibref %([#{node.reftext || node.id}]) else logger.warn %(unknown anchor type: #{node.type.inspect}) nil end end def convert_inline_break node %(#{node.text}) end def convert_inline_button node %(#{node.text}) end def convert_inline_callout node if node.document.attr? 'icons', 'font' %((#{node.text})) elsif node.document.attr? 'icons' src = node.icon_uri("callouts/#{node.text}") %(#{node.text}) elsif ::Array === (guard = node.attributes['guard']) %(<!--(#{node.text})-->) else %(#{guard}(#{node.text})) end end def convert_inline_footnote node if (index = node.attr 'index') if node.type == :xref %([#{index}]) else id_attr = node.id ? %( id="_footnote_#{node.id}") : '' %([#{index}]) end elsif node.type == :xref %([#{node.text}]) end end def convert_inline_image node target = node.target if (type = node.type || 'image') == 'icon' if (icons = node.document.attr 'icons') == 'font' i_class_attr_val = %(fa fa-#{target}) i_class_attr_val = %(#{i_class_attr_val} fa-#{node.attr 'size'}) if node.attr? 'size' if node.attr? 'flip' i_class_attr_val = %(#{i_class_attr_val} fa-flip-#{node.attr 'flip'}) elsif node.attr? 'rotate' i_class_attr_val = %(#{i_class_attr_val} fa-rotate-#{node.attr 'rotate'}) end attrs = (node.attr? 'title') ? %( title="#{node.attr 'title'}") : '' img = %() elsif icons attrs = (node.attr? 'width') ? %( width="#{node.attr 'width'}") : '' attrs = %(#{attrs} height="#{node.attr 'height'}") if node.attr? 'height' attrs = %(#{attrs} title="#{node.attr 'title'}") if node.attr? 'title' img = %(#{encode_attribute_value node.alt}) else img = %([#{node.alt}]) end else attrs = (node.attr? 'width') ? %( width="#{node.attr 'width'}") : '' attrs = %(#{attrs} height="#{node.attr 'height'}") if node.attr? 'height' attrs = %(#{attrs} title="#{node.attr 'title'}") if node.attr? 'title' if ((node.attr? 'format', 'svg') || (target.include? '.svg')) && node.document.safe < SafeMode::SECURE if node.option? 'inline' img = (read_svg_contents node, target) || %(#{node.alt}) elsif node.option? 'interactive' fallback = (node.attr? 'fallback') ? %(#{encode_attribute_value node.alt}) : %(#{node.alt}) img = %(#{fallback}) else img = %(#{encode_attribute_value node.alt}) end else img = %(#{encode_attribute_value node.alt}) end end img = %(#{img}) if node.attr? 'link' class_attr_val = type if (role = node.role) class_attr_val = (node.attr? 'float') ? %(#{class_attr_val} #{node.attr 'float'} #{role}) : %(#{class_attr_val} #{role}) elsif node.attr? 'float' class_attr_val = %(#{class_attr_val} #{node.attr 'float'}) end %(#{img}) end def convert_inline_indexterm node node.type == :visible ? node.text : '' end def convert_inline_kbd node if (keys = node.attr 'keys').size == 1 %(#{keys[0]}) else %(#{keys.join '+'}) end end def convert_inline_menu node caret = (node.document.attr? 'icons', 'font') ? '  ' : '  ' submenu_joiner = %(#{caret}) menu = node.attr 'menu' if (submenus = node.attr 'submenus').empty? if (menuitem = node.attr 'menuitem') %(#{menu}#{caret}#{menuitem}) else %(#{menu}) end else %(#{menu}#{caret}#{submenus.join submenu_joiner}#{caret}#{node.attr 'menuitem'}) end end def convert_inline_quoted node open, close, tag = QUOTE_TAGS[node.type] if node.id class_attr = node.role ? %( class="#{node.role}") : '' if tag %(#{open.chop} id="#{node.id}"#{class_attr}>#{node.text}#{close}) else %(#{open}#{node.text}#{close}) end elsif node.role if tag %(#{open.chop} class="#{node.role}">#{node.text}#{close}) else %(#{open}#{node.text}#{close}) end else %(#{open}#{node.text}#{close}) end end # NOTE expose read_svg_contents for Bespoke converter def read_svg_contents node, target if (svg = node.read_contents target, start: (node.document.attr 'imagesdir'), normalize: true, label: 'SVG', warn_if_empty: true) return if svg.empty? svg = svg.sub SvgPreambleRx, '' unless svg.start_with? ') end svg = %(#{new_start_tag}#{svg[old_start_tag.length..-1]}) if new_start_tag end svg end private def append_boolean_attribute name, xml xml ? %( #{name}="#{name}") : %( #{name}) end def append_link_constraint_attrs node, attrs = [] rel = 'nofollow' if node.option? 'nofollow' if (window = node.attributes['window']) attrs << %( target="#{window}") attrs << (rel ? %( rel="#{rel} noopener") : ' rel="noopener"') if window == '_blank' || (node.option? 'noopener') elsif rel attrs << %( rel="#{rel}") end attrs end def encode_attribute_value val (val.include? '"') ? (val.gsub '"', '"') : val end def generate_manname_section node manname_title = node.attr 'manname-title', 'Name' if (next_section = node.sections[0]) && (next_section_title = next_section.title) == next_section_title.upcase manname_title = manname_title.upcase end manname_id_attr = (manname_id = node.attr 'manname-id') ? %( id="#{manname_id}") : '' %(#{manname_title}

    #{(node.attr 'mannames').join ', '} - #{node.attr 'manpurpose'}

    ) end def get_root_document node while (node = node.document).nested? node = node.parent_document end node end # NOTE adapt to older converters that relied on unprefixed method names def method_missing id, *args !((name = id.to_s).start_with? 'convert_') && (handles? name) ? (send %(convert_#{name}), *args) : super end def respond_to_missing? id, *options !((name = id.to_s).start_with? 'convert_') && (handles? name) end end end asciidoctor-2.0.20/lib/asciidoctor/converter/manpage.rb000066400000000000000000000556601443135032600231300ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # A built-in {Converter} implementation that generates the man page (troff) format. # # The output of this converter adheres to the man definition as defined by # groff and uses the manpage output of the DocBook toolchain as a foundation. # That means if you've previously been generating man pages using the a2x tool # from AsciiDoc.py, you should be able to achieve a very similar result # using this converter. Though you'll also get to enjoy some notable # enhancements that have been added since, such as the customizable linkstyle. # # See http://www.gnu.org/software/groff/manual/html_node/Man-usage.html#Man-usage class Converter::ManPageConverter < Converter::Base register_for 'manpage' WHITESPACE = %(#{LF}#{TAB} ) ET = ' ' * 8 ESC = ?\u001b # troff leader marker ESC_BS = %(#{ESC}\\) # escaped backslash (indicates troff formatting sequence) ESC_FS = %(#{ESC}.) # escaped full stop (indicates troff macro) LiteralBackslashRx = /\A\\|(#{ESC})?\\/ LeadingPeriodRx = /^\./ EscapedMacroRx = /^(?:#{ESC}\\c\n)?#{ESC}\.((?:URL|MTO) "#{CC_ANY}*?" "#{CC_ANY}*?" )( |[^\s]*)(#{CC_ANY}*?)(?: *#{ESC}\\c)?$/ MalformedEscapedMacroRx = /(#{ESC}\\c) (#{ESC}\.(?:URL|MTO) )/ MockMacroRx = %r(]+)>) EmDashCharRefRx = /—(?:​)?/ EllipsisCharRefRx = /…(?:​)?/ WrappedIndentRx = /#{CG_BLANK}*#{LF}#{CG_BLANK}*/ XMLMarkupRx = /&#?[a-z\d]+;||<[^>]+>)|([^&<]+)) def initialize backend, opts = {} @backend = backend init_backend_traits basebackend: 'manpage', filetype: 'man', outfilesuffix: '.man', supports_templates: true end def convert_document node unless node.attr? 'mantitle' raise 'asciidoctor: ERROR: doctype must be set to manpage when using manpage backend' end mantitle = (node.attr 'mantitle').gsub InvalidSectionIdCharsRx, '' manvolnum = node.attr 'manvolnum', '1' manname = node.attr 'manname', mantitle manmanual = node.attr 'manmanual' mansource = node.attr 'mansource' docdate = (node.attr? 'reproducible') ? nil : (node.attr 'docdate') # NOTE the first line enables the table (tbl) preprocessor, necessary for non-Linux systems result = [%('\\" t .\\" Title: #{mantitle} .\\" Author: #{(node.attr? 'authors') ? (node.attr 'authors') : '[see the "AUTHOR(S)" section]'} .\\" Generator: Asciidoctor #{node.attr 'asciidoctor-version'})] result << %(.\\" Date: #{docdate}) if docdate result << %(.\\" Manual: #{manmanual ? (manmanual.tr_s WHITESPACE, ' ') : '\ \&'} .\\" Source: #{mansource ? (mansource.tr_s WHITESPACE, ' ') : '\ \&'} .\\" Language: English .\\") # TODO add document-level setting to disable capitalization of manname result << %(.TH "#{manify manname.upcase}" "#{manvolnum}" "#{docdate}" "#{mansource ? (manify mansource) : '\ \&'}" "#{manmanual ? (manify manmanual) : '\ \&'}") # define portability settings # see http://bugs.debian.org/507673 # see http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html result << '.ie \n(.g .ds Aq \(aq' result << '.el .ds Aq \'' # set sentence_space_size to 0 to prevent extra space between sentences separated by a newline # the alternative is to add \& at the end of the line result << '.ss \n[.ss] 0' # disable hyphenation result << '.nh' # disable justification (adjust text to left margin only) result << '.ad l' # define URL macro for portability # see http://web.archive.org/web/20060102165607/http://people.debian.org/~branden/talks/wtfm/wtfm.pdf # # Usage # # .URL "http://www.debian.org" "Debian" "." # # * First argument: the URL # * Second argument: text to be hyperlinked # * Third (optional) argument: text that needs to immediately trail the hyperlink without intervening whitespace result << '.de URL \\fI\\\\$2\\fP <\\\\$1>\\\\$3 .. .als MTO URL .if \n[.g] \{\ . mso www.tmac . am URL . ad l . . . am MTO . ad l . .' result << %(. LINKSTYLE #{node.attr 'man-linkstyle', 'blue R < >'}) result << '.\}' unless node.noheader if node.attr? 'manpurpose' mannames = node.attr 'mannames', [manname] result << %(.SH "#{(node.attr 'manname-title', 'NAME').upcase}" #{mannames.map {|n| (manify n).gsub '\-', '-' }.join ', '} \\- #{manify node.attr('manpurpose'), whitespace: :normalize}) end end result << node.content # QUESTION should NOTES come after AUTHOR(S)? append_footnotes result, node unless (authors = node.authors).empty? if authors.size > 1 result << '.SH "AUTHORS"' authors.each do |author| result << %(.sp #{author.name}) end else result << %(.SH "AUTHOR" .sp #{authors[0].name}) end end result.join LF end # NOTE embedded doesn't really make sense in the manpage backend def convert_embedded node result = [node.content] append_footnotes result, node # QUESTION should we add an AUTHOR(S) section? result.join LF end def convert_section node result = [] if node.level > 1 macro = 'SS' # QUESTION why captioned title? why not when level == 1? stitle = node.captioned_title else macro = 'SH' stitle = uppercase_pcdata node.title end result << %(.#{macro} "#{manify stitle}" #{node.content}) result.join LF end def convert_admonition node result = [] result << %(.if n .sp .RS 4 .it 1 an-trap .nr an-no-space-flag 1 .nr an-break-flag 1 .br .ps +1 .B #{node.attr 'textlabel'}#{node.title? ? "\\fP: #{manify node.title}" : ''} .ps -1 .br #{enclose_content node} .sp .5v .RE) result.join LF end def convert_colist node result = [] result << %(.sp .B #{manify node.title} .br) if node.title? result << '.TS tab(:); r lw(\n(.lu*75u/100u).' num = 0 node.items.each do |item| result << %(\\fB(#{num += 1})\\fP\\h'-2n':T{) result << (manify item.text, whitespace: :normalize) result << item.content if item.blocks? result << 'T}' end result << '.TE' result.join LF end # TODO implement horizontal (if it makes sense) def convert_dlist node result = [] result << %(.sp .B #{manify node.title} .br) if node.title? counter = 0 node.items.each do |terms, dd| counter += 1 case node.style when 'qanda' result << %(.sp #{counter}. #{manify terms.map {|dt| dt.text }.join ' '} .RS 4) else result << %(.sp #{manify terms.map {|dt| dt.text }.join(', '), whitespace: :normalize} .RS 4) end if dd result << (manify dd.text, whitespace: :normalize) if dd.text? result << dd.content if dd.blocks? end result << '.RE' end result.join LF end def convert_example node result = [] result << (node.title? ? %(.sp .B #{manify node.captioned_title} .br) : '.sp') result << %(.RS 4 #{enclose_content node} .RE) result.join LF end def convert_floating_title node %(.SS "#{manify node.title}") end def convert_image node result = [] result << (node.title? ? %(.sp .B #{manify node.captioned_title} .br) : '.sp') result << %([#{manify node.alt}]) result.join LF end def convert_listing node result = [] result << %(.sp .B #{manify node.captioned_title} .br) if node.title? result << %(.sp .if n .RS 4 .nf .fam C #{manify node.content, whitespace: :preserve} .fam .fi .if n .RE) result.join LF end def convert_literal node result = [] result << %(.sp .B #{manify node.title} .br) if node.title? result << %(.sp .if n .RS 4 .nf .fam C #{manify node.content, whitespace: :preserve} .fam .fi .if n .RE) result.join LF end def convert_sidebar node result = [] result << (node.title? ? %(.sp .B #{manify node.title} .br) : '.sp') result << %(.RS 4 #{enclose_content node} .RE) result.join LF end def convert_olist node result = [] result << %(.sp .B #{manify node.title} .br) if node.title? start = (node.attr 'start', 1).to_i node.items.each_with_index do |item, idx| result << %(.sp .RS 4 .ie n \\{\\ \\h'-04' #{numeral = idx + start}.\\h'+01'\\c .\\} .el \\{\\ . sp -1 . IP " #{numeral}." 4.2 .\\} #{manify item.text, whitespace: :normalize}) result << item.content if item.blocks? result << '.RE' end result.join LF end def convert_open node case node.style when 'abstract', 'partintro' enclose_content node else node.content end end def convert_page_break node '.bp' end def convert_paragraph node if node.title? %(.sp .B #{manify node.title} .br #{manify node.content, whitespace: :normalize}) else %(.sp #{manify node.content, whitespace: :normalize}) end end alias convert_pass content_only alias convert_preamble content_only def convert_quote node result = [] if node.title? result << %(.sp .RS 3 .B #{manify node.title} .br .RE) end attribution_line = (node.attr? 'citetitle') ? %(#{node.attr 'citetitle'} ) : nil attribution_line = (node.attr? 'attribution') ? %[#{attribution_line}\\(em #{node.attr 'attribution'}] : nil result << %(.RS 3 .ll -.6i #{enclose_content node} .br .RE .ll) if attribution_line result << %(.RS 5 .ll -.10i #{attribution_line} .RE .ll) end result.join LF end def convert_stem node result = [] result << (node.title? ? %(.sp .B #{manify node.title} .br) : '.sp') open, close = BLOCK_MATH_DELIMITERS[node.style.to_sym] if ((equation = node.content).start_with? open) && (equation.end_with? close) equation = equation.slice open.length, equation.length - open.length - close.length end result << %(#{manify equation, whitespace: :preserve} (#{node.style})) result.join LF end # FIXME: The reason this method is so complicated is because we are not # receiving empty(marked) cells when there are colspans or rowspans. This # method has to create a map of all cells and in the case of rowspans # create empty cells as placeholders of the span. # To fix this, asciidoctor needs to provide an API to tell the user if a # given cell is being used as a colspan or rowspan. def convert_table node result = [] if node.title? result << %(.sp .it 1 an-trap .nr an-no-space-flag 1 .nr an-break-flag 1 .br .B #{manify node.captioned_title} ) end result << '.TS allbox tab(:);' row_header = [] row_text = [] row_index = 0 node.rows.to_h.each do |tsec, rows| rows.each do |row| row_header[row_index] ||= [] row_text[row_index] ||= [] # result << LF # l left-adjusted # r right-adjusted # c centered-adjusted # n numerical align # a alphabetic align # s spanned # ^ vertically spanned remaining_cells = row.size row.each_with_index do |cell, cell_index| remaining_cells -= 1 row_header[row_index][cell_index] ||= [] # Add an empty cell if this is a rowspan cell if row_header[row_index][cell_index] == ['^t'] row_text[row_index] << %(T{#{LF}.sp#{LF}T}:) end row_text[row_index] << %(T{#{LF}.sp#{LF}) cell_halign = (cell.attr 'halign', 'left').chr if tsec == :body if row_header[row_index].empty? || row_header[row_index][cell_index].empty? row_header[row_index][cell_index] << %(#{cell_halign}t) else row_header[row_index][cell_index + 1] ||= [] row_header[row_index][cell_index + 1] << %(#{cell_halign}t) end case cell.style when :asciidoc cell_content = cell.content when :literal cell_content = %(.nf#{LF}#{manify cell.text, whitespace: :preserve}#{LF}.fi) else cell_content = manify cell.content.join, whitespace: :normalize end row_text[row_index] << %(#{cell_content}#{LF}) else # tsec == :head || tsec == :foot if row_header[row_index].empty? || row_header[row_index][cell_index].empty? row_header[row_index][cell_index] << %(#{cell_halign}tB) else row_header[row_index][cell_index + 1] ||= [] row_header[row_index][cell_index + 1] << %(#{cell_halign}tB) end row_text[row_index] << %(#{manify cell.text, whitespace: :normalize}#{LF}) end if cell.colspan && cell.colspan > 1 (cell.colspan - 1).times do |i| if row_header[row_index].empty? || row_header[row_index][cell_index].empty? row_header[row_index][cell_index + i] << 'st' else row_header[row_index][cell_index + 1 + i] ||= [] row_header[row_index][cell_index + 1 + i] << 'st' end end end if cell.rowspan && cell.rowspan > 1 (cell.rowspan - 1).times do |i| row_header[row_index + 1 + i] ||= [] if row_header[row_index + 1 + i].empty? || row_header[row_index + 1 + i][cell_index].empty? row_header[row_index + 1 + i][cell_index] ||= [] row_header[row_index + 1 + i][cell_index] << '^t' else row_header[row_index + 1 + i][cell_index + 1] ||= [] row_header[row_index + 1 + i][cell_index + 1] << '^t' end end end if remaining_cells >= 1 row_text[row_index] << 'T}:' else row_text[row_index] << %(T}#{LF}) end end row_index += 1 end unless rows.empty? end #row_header.each do |row| # result << LF # row.each_with_index do |cell, i| # result << (cell.join ' ') # result << ' ' if row.size > i + 1 # end #end # FIXME temporary fix to get basic table to display result << LF result << ('lt ' * row_header[0].size).chop result << %(.#{LF}) row_text.each do |row| result << row.join end result << %(.TE#{LF}.sp) result.join end def convert_thematic_break node '.sp .ce \l\'\n(.lu*25u/100u\(ap\'' end alias convert_toc skip def convert_ulist node result = [] result << %(.sp .B #{manify node.title} .br) if node.title? node.items.map do |item| result << %[.sp .RS 4 .ie n \\{\\ \\h'-04'\\(bu\\h'+03'\\c .\\} .el \\{\\ . sp -1 . IP \\(bu 2.3 .\\} #{manify item.text, whitespace: :normalize}] result << item.content if item.blocks? result << '.RE' end result.join LF end def convert_verse node result = [] if node.title? result << %(.sp .B #{manify node.title} .br) end attribution_line = (node.attr? 'citetitle') ? %(#{node.attr 'citetitle'} ) : nil attribution_line = (node.attr? 'attribution') ? %[#{attribution_line}\\(em #{node.attr 'attribution'}] : nil result << %(.sp .nf #{manify node.content, whitespace: :preserve} .fi .br) if attribution_line result << %(.in +.5i .ll -.5i #{attribution_line} .in .ll) end result.join LF end def convert_video node start_param = (node.attr? 'start') ? %(&start=#{node.attr 'start'}) : '' end_param = (node.attr? 'end') ? %(&end=#{node.attr 'end'}) : '' result = [] result << (node.title? ? %(.sp .B #{manify node.title} .br) : '.sp') result << %(<#{node.media_uri(node.attr 'target')}#{start_param}#{end_param}> (video)) result.join LF end def convert_inline_anchor node target = node.target case node.type when :link if target.start_with? 'mailto:' macro = 'MTO' target = target.slice 7, target.length else macro = 'URL' end if (text = node.text) == target text = '' else text = text.gsub '"', %[#{ESC_BS}(dq] end target = target.sub '@', %[#{ESC_BS}(at] if macro == 'MTO' %(#{ESC_BS}c#{LF}#{ESC_FS}#{macro} "#{target}" "#{text}" ) when :xref unless (text = node.text) if AbstractNode === (ref = (@refs ||= node.document.catalog[:refs])[refid = node.attributes['refid']] || (refid.nil_or_empty? ? (top = get_root_document node) : nil)) if (@resolving_xref ||= (outer = true)) && outer && (text = ref.xreftext node.attr 'xrefstyle', nil, true) text = uppercase_pcdata text if ref.context === :section && ref.level < 2 && text == ref.title else text = top ? '[^top]' : %([#{refid}]) end @resolving_xref = nil if outer else text = %([#{refid}]) end end text when :ref, :bibref # These are anchor points, which shouldn't be visible '' else logger.warn %(unknown anchor type: #{node.type.inspect}) nil end end def convert_inline_break node %(#{node.text}#{LF}#{ESC_FS}br) end def convert_inline_button node %(<#{ESC_BS}fB>[#{ESC_BS}0#{node.text}#{ESC_BS}0]) end def convert_inline_callout node %(<#{ESC_BS}fB>(#{node.text})<#{ESC_BS}fP>) end def convert_inline_footnote node if (index = node.attr 'index') %([#{index}]) elsif node.type == :xref %([#{node.text}]) end end def convert_inline_image node (node.attr? 'link') ? %([#{node.alt}] <#{node.attr 'link'}>) : %([#{node.alt}]) end def convert_inline_indexterm node node.type == :visible ? node.text : '' end def convert_inline_kbd node %[<#{ESC_BS}f(CR>#{(keys = node.attr 'keys').size == 1 ? keys[0] : (keys.join "#{ESC_BS}0+#{ESC_BS}0")}] end def convert_inline_menu node caret = %[#{ESC_BS}0#{ESC_BS}(fc#{ESC_BS}0] menu = node.attr 'menu' if !(submenus = node.attr 'submenus').empty? submenu_path = submenus.map {|item| %(<#{ESC_BS}fI>#{item}) }.join caret %(<#{ESC_BS}fI>#{menu}#{caret}#{submenu_path}#{caret}<#{ESC_BS}fI>#{node.attr 'menuitem'}) elsif (menuitem = node.attr 'menuitem') %(<#{ESC_BS}fI>#{menu}#{caret}#{menuitem}) else %(<#{ESC_BS}fI>#{menu}) end end # NOTE use fake XML elements to prevent creating artificial word boundaries def convert_inline_quoted node case node.type when :emphasis %(<#{ESC_BS}fI>#{node.text}) when :strong %(<#{ESC_BS}fB>#{node.text}) when :monospaced %[<#{ESC_BS}f(CR>#{node.text}] when :single %[<#{ESC_BS}(oq>#{node.text}] when :double %[<#{ESC_BS}(lq>#{node.text}] else node.text end end def self.write_alternate_pages mannames, manvolnum, target return unless mannames && mannames.size > 1 mannames.shift manvolext = %(.#{manvolnum}) dir, basename = ::File.split target mannames.each do |manname| ::File.write ::File.join(dir, %(#{manname}#{manvolext})), %(.so #{basename}), mode: FILE_WRITE_MODE end end private def append_footnotes result, node if node.footnotes? && !(node.attr? 'nofootnotes') result << '.SH "NOTES"' node.footnotes.each do |fn| result << %(.IP [#{fn.index}]) # NOTE restore newline in escaped macro that gets removed by normalize_text in substitutor if (text = fn.text).include? %(#{ESC}\\c #{ESC}.) text = (manify %(#{text.gsub MalformedEscapedMacroRx, %(\\1#{LF}\\2)} ), whitespace: :normalize).chomp ' ' else text = manify text, whitespace: :normalize end result << text end end end # Converts HTML entity references back to their original form, escapes # special man characters and strips trailing whitespace. # # It's crucial that text only ever pass through manify once. # # str - the String to convert # opts - an Hash of options to control processing (default: {}) # * :whitespace an enum that indicates how to handle whitespace; supported options are: # :preserve - preserve spaces (only expanding tabs); :normalize - normalize whitespace # (remove spaces around newlines); :collapse - collapse adjacent whitespace to a single # space (default: :collapse) # * :append_newline a Boolean that indicates whether to append a newline to the result (default: false) def manify str, opts = {} case opts.fetch :whitespace, :collapse when :preserve str = str.gsub TAB, ET when :normalize str = str.gsub WrappedIndentRx, LF else str = str.tr_s WHITESPACE, ' ' end str = str .gsub(LiteralBackslashRx) { $1 ? $& : '\\(rs' } # literal backslash (not a troff escape sequence) .gsub(EllipsisCharRefRx, '...') # horizontal ellipsis .gsub(LeadingPeriodRx, '\\\&.') # leading . is used in troff for macro call or other formatting; replace with \&. .gsub(EscapedMacroRx) do # drop orphaned \c escape lines, unescape troff macro, quote adjacent character, isolate macro line (rest = $3.lstrip).empty? ? %(.#{$1}"#{$2}") : %(.#{$1}"#{$2.rstrip}"#{LF}#{rest}) end .gsub('-', '\-') .gsub('<', '<') .gsub('>', '>') .gsub('+', '+') # plus sign; alternately could use \c(pl .gsub(' ', '\~') # non-breaking space .gsub('©', '\(co') # copyright sign .gsub('®', '\(rg') # registered sign .gsub('™', '\(tm') # trademark sign .gsub('°', '\(de') # degree sign .gsub(' ', ' ') # thin space .gsub('–', '\(en') # en dash .gsub(EmDashCharRefRx, '\(em') # em dash .gsub('‘', '\(oq') # left single quotation mark .gsub('’', '\(cq') # right single quotation mark .gsub('“', '\(lq') # left double quotation mark .gsub('”', '\(rq') # right double quotation mark .gsub('←', '\(<-') # leftwards arrow .gsub('→', '\(->') # rightwards arrow .gsub('⇐', '\(lA') # leftwards double arrow .gsub('⇒', '\(rA') # rightwards double arrow .gsub('​', '\:') # zero width space .gsub('&', '&') # literal ampersand (NOTE must take place after any other replacement that includes &) .gsub('\'', '\*(Aq') # apostrophe / neutral single quote .gsub(MockMacroRx, '\1') # mock boundary .gsub(ESC_BS, '\\') # unescape troff backslash (NOTE update if more escapes are added) .gsub(ESC_FS, '.') # unescape full stop in troff commands (NOTE must take place after gsub(LeadingPeriodRx)) .rstrip # strip trailing space opts[:append_newline] ? %(#{str}#{LF}) : str end def uppercase_pcdata string (XMLMarkupRx.match? string) ? string.gsub(PCDATAFilterRx) { $2 ? $2.upcase : $1 } : string.upcase end def enclose_content node node.content_model == :compound ? node.content : %(.sp#{LF}#{manify node.content, whitespace: :normalize}) end def get_root_document node while (node = node.document).nested? node = node.parent_document end node end end end asciidoctor-2.0.20/lib/asciidoctor/converter/template.rb000066400000000000000000000252241443135032600233240ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # A {Converter} implementation that uses templates composed in template # languages supported by {https://github.com/rtomayko/tilt Tilt} to convert # {AbstractNode} objects from a parsed AsciiDoc document tree to the backend # format. # # The converter scans the specified directories for template files that are # supported by Tilt. If an engine name (e.g., "slim") is specified in the # options Hash passed to the constructor, the scan is restricted to template # files that have a matching extension (e.g., ".slim"). The scanner trims any # extensions from the basename of the file and uses the resulting name as the # key under which to store the template. When the {Converter#convert} method # is invoked, the transform argument is used to select the template from this # table and use it to convert the node. # # For example, the template file "path/to/templates/paragraph.html.slim" will # be registered as the "paragraph" transform. The template is then used to # convert a paragraph {Block} object from the parsed AsciiDoc tree to an HTML # backend format (e.g., "html5"). # # As an optimization, scan results and templates are cached for the lifetime # of the Ruby process. If the {https://rubygems.org/gems/concurrent-ruby # concurrent-ruby} gem is installed, these caches are guaranteed to be thread # safe. If this gem is not present, there is no such guarantee and a warning # will be issued. class Converter::TemplateConverter < Converter::Base DEFAULT_ENGINE_OPTIONS = { erb: { trim: 0 }, # TODO line 466 of haml/compiler.rb sorts the attributes; file an issue to make this configurable # NOTE AsciiDoc syntax expects HTML/XML output to use double quotes around attribute values haml: { format: :xhtml, attr_wrapper: '"', escape_html: false, escape_attrs: false, ugly: true }, slim: { disable_escape: true, sort_attrs: false, pretty: false }, } begin require 'concurrent/map' unless defined? ::Concurrent::Map @caches = { scans: ::Concurrent::Map.new, templates: ::Concurrent::Map.new } rescue ::LoadError @caches = { scans: {}, templates: {} } end class << self attr_reader :caches def clear_caches @caches[:scans].clear @caches[:templates].clear end end def initialize backend, template_dirs, opts = {} Helpers.require_library 'tilt' unless defined? ::Tilt.new @backend = backend @templates = {} @template_dirs = template_dirs @eruby = opts[:eruby] @safe = opts[:safe] @active_engines = {} @engine = opts[:template_engine] @engine_options = {}.tap {|accum| DEFAULT_ENGINE_OPTIONS.each {|engine, engine_opts| accum[engine] = engine_opts.merge } } if opts[:htmlsyntax] == 'html' # if not set, assume xml since this converter is also used for DocBook (which doesn't specify htmlsyntax) @engine_options[:haml][:format] = :html5 @engine_options[:slim][:format] = :html end @engine_options[:slim][:include_dirs] = template_dirs.reverse.map {|dir| ::File.expand_path dir } if (overrides = opts[:template_engine_options]) overrides.each do |engine, override_opts| (@engine_options[engine] ||= {}).update override_opts end end case opts[:template_cache] when true logger.warn 'optional gem \'concurrent-ruby\' is not available. This gem is recommended when using the default template cache.' unless defined? ::Concurrent::Map @caches = self.class.caches when ::Hash @caches = opts[:template_cache] else @caches = {} # the empty Hash effectively disables caching end scan end # Public: Convert an {AbstractNode} to the backend format using the named template. # # Looks for a template that matches the value of the template name or, if the template name is not specified, the # value of the {AbstractNode#node_name} property. # # node - the AbstractNode to convert # template_name - the String name of the template to use, or the value of # the node_name property on the node if a template name is # not specified. (optional, default: nil) # opts - an optional Hash that is passed as local variables to the # template. (optional, default: nil) # # Returns the [String] result from rendering the template def convert node, template_name = nil, opts = nil unless (template = @templates[template_name ||= node.node_name]) raise %(Could not find a custom template to handle transform: #{template_name}) end # Slim doesn't include helpers in the template's execution scope (like HAML), so do it ourselves node.extend ::Slim::Helpers if (defined? ::Slim::Helpers) && (::Slim::Template === template) # NOTE opts become locals in the template if template_name == 'document' (template.render node, opts).strip else (template.render node, opts).rstrip end end # Public: Checks whether there is a Tilt template registered with the specified name. # # name - the String template name # # Returns a [Boolean] that indicates whether a Tilt template is registered for the # specified template name. def handles? name @templates.key? name end # Public: Retrieves the templates that this converter manages. # # Returns a [Hash] of Tilt template objects keyed by template name. def templates @templates.merge end # Public: Registers a Tilt template with this converter. # # name - the String template name # template - the Tilt template object to register # # Returns the Tilt template object def register name, template if (template_cache = @caches[:templates]) template_cache[template.file] = template end @templates[name] = template end private # Internal: Scans the template directories specified in the constructor for Tilt-supported # templates, loads the templates and stores the in a Hash that is accessible via the # {TemplateConverter#templates} method. # # Returns nothing def scan path_resolver = PathResolver.new backend = @backend engine = @engine @template_dirs.each do |template_dir| # FIXME need to think about safe mode restrictions here # Ruby 2.3 requires the extra brackets around the path_resolver.system_path method call next unless ::File.directory?(template_dir = (path_resolver.system_path template_dir)) if engine file_pattern = %(*.#{engine}) # example: templates/haml if ::File.directory?(engine_dir = %(#{template_dir}/#{engine})) template_dir = engine_dir end else # NOTE last matching template wins for template name if no engine is given file_pattern = '*' end # example: templates/html5 (engine not set) or templates/haml/html5 (engine set) if ::File.directory?(backend_dir = %(#{template_dir}/#{backend})) template_dir = backend_dir end pattern = %(#{template_dir}/#{file_pattern}) if (scan_cache = @caches[:scans]) template_cache = @caches[:templates] unless (templates = scan_cache[pattern]) templates = scan_cache[pattern] = scan_dir template_dir, pattern, template_cache end templates.each do |name, template| @templates[name] = template_cache[template.file] = template end else @templates.update scan_dir(template_dir, pattern, @caches[:templates]) end end nil end # Internal: Scan the specified directory for template files matching pattern and instantiate # a Tilt template for each matched file. # # Returns the scan result as a [Hash] def scan_dir template_dir, pattern, template_cache = nil result, helpers = {}, nil # Grab the files in the top level of the directory (do not recurse) ::Dir.glob(pattern).keep_if {|match| ::File.file? match }.each do |file| if (basename = ::File.basename file) == 'helpers.rb' helpers = file next elsif (path_segments = basename.split '.').size < 2 next end if (name = path_segments[0]) == 'block_ruler' name = 'thematic_break' elsif name.start_with? 'block_' name = name.slice 6, name.length end unless template_cache && (template = template_cache[file]) template_class, extra_engine_options, extsym = ::Tilt, {}, path_segments[-1].to_sym case extsym when :slim unless @active_engines[extsym] # NOTE slim doesn't get automatically loaded by Tilt Helpers.require_library 'slim' unless defined? ::Slim::Engine require 'slim/include' unless defined? ::Slim::Include ::Slim::Engine.define_options asciidoc: {} # align safe mode of AsciiDoc embedded in Slim template with safe mode of current document # NOTE safe mode won't get updated if using template cache and changing safe mode (@engine_options[extsym][:asciidoc] ||= {})[:safe] ||= @safe if @safe @active_engines[extsym] = true end when :haml unless @active_engines[extsym] Helpers.require_library 'haml' unless defined? ::Haml::Engine # NOTE Haml 5 dropped support for pretty printing @engine_options[extsym].delete :ugly if defined? ::Haml::TempleEngine @engine_options[extsym][:attr_quote] = @engine_options[extsym].delete :attr_wrapper unless defined? ::Haml::Options @active_engines[extsym] = true end when :erb template_class, extra_engine_options = (@active_engines[extsym] ||= (load_eruby @eruby)) when :rb next else next unless ::Tilt.registered? extsym.to_s end template = template_class.new file, 1, (@engine_options[extsym] ||= {}).merge(extra_engine_options) end result[name] = template end if helpers || ::File.file?(helpers = %(#{template_dir}/helpers.rb)) require helpers end result end # Internal: Load the eRuby implementation # # name - the String name of the eRuby implementation # # Returns an [Array] containing the Tilt template Class for the eRuby implementation # and a Hash of additional options to pass to the initializer def load_eruby name if !name || name == 'erb' require 'erb' unless defined? ::ERB.version [::Tilt::ERBTemplate, {}] elsif name == 'erubi' Helpers.require_library 'erubi' unless defined? ::Erubis::Engine [::Tilt::ErubiTemplate, {}] elsif name == 'erubis' Helpers.require_library 'erubis' unless defined? ::Erubis::FastEruby [::Tilt::ErubisTemplate, engine_class: ::Erubis::FastEruby] else raise ::ArgumentError, %(Unknown ERB implementation: #{name}) end end end end asciidoctor-2.0.20/lib/asciidoctor/core_ext.rb000066400000000000000000000004321443135032600213040ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'core_ext/nil_or_empty' require_relative 'core_ext/hash/merge' if RUBY_ENGINE == 'opal' require_relative 'core_ext/match_data/names' else require_relative 'core_ext/float/truncate' require_relative 'core_ext/regexp/is_match' end asciidoctor-2.0.20/lib/asciidoctor/core_ext/000077500000000000000000000000001443135032600207605ustar00rootroot00000000000000asciidoctor-2.0.20/lib/asciidoctor/core_ext/float/000077500000000000000000000000001443135032600220655ustar00rootroot00000000000000asciidoctor-2.0.20/lib/asciidoctor/core_ext/float/truncate.rb000066400000000000000000000011661443135032600242430ustar00rootroot00000000000000# frozen_string_literal: true # NOTE remove once minimum required Ruby version is at least 2.4 # NOTE use `send :prepend` to be nice to Ruby 2.0 Float.send :prepend, (Module.new do def truncate *args if args.length == 1 if (precision = Integer args.shift) == 0 super elsif precision > 0 precision_factor = 10.0 ** precision (self * precision_factor).to_i / precision_factor else precision_factor = 10 ** precision.abs (self / precision_factor).to_i * precision_factor end else super end end end) if (Float.instance_method :truncate).arity == 0 asciidoctor-2.0.20/lib/asciidoctor/core_ext/hash/000077500000000000000000000000001443135032600217035ustar00rootroot00000000000000asciidoctor-2.0.20/lib/asciidoctor/core_ext/hash/merge.rb000066400000000000000000000005551443135032600233340ustar00rootroot00000000000000# frozen_string_literal: true # NOTE remove once minimum required Ruby version is at least 2.6 # NOTE use `send :prepend` to be nice to Ruby 2.0 Hash.send :prepend, (Module.new do def merge *args (len = args.length) < 1 ? dup : (len > 1 ? args.inject(self) {|acc, arg| acc.merge arg } : (super args[0])) end end) if (Hash.instance_method :merge).arity == 1 asciidoctor-2.0.20/lib/asciidoctor/core_ext/match_data/000077500000000000000000000000001443135032600230455ustar00rootroot00000000000000asciidoctor-2.0.20/lib/asciidoctor/core_ext/match_data/names.rb000066400000000000000000000003101443135032600244670ustar00rootroot00000000000000# frozen_string_literal: true # NOTE remove once implemented in Opal; see https://github.com/opal/opal/issues/1964 class MatchData def names [] end end unless MatchData.method_defined? :names asciidoctor-2.0.20/lib/asciidoctor/core_ext/nil_or_empty.rb000066400000000000000000000012051443135032600240030ustar00rootroot00000000000000# frozen_string_literal: true # A core library extension that defines the method nil_or_empty? as an alias to # optimize checks for nil? or empty? on common object types such as NilClass, # String, Array, Hash, and Numeric. class NilClass alias nil_or_empty? nil? unless method_defined? :nil_or_empty? end class String alias nil_or_empty? empty? unless method_defined? :nil_or_empty? end class Array alias nil_or_empty? empty? unless method_defined? :nil_or_empty? end class Hash alias nil_or_empty? empty? unless method_defined? :nil_or_empty? end class Numeric alias nil_or_empty? nil? unless method_defined? :nil_or_empty? end asciidoctor-2.0.20/lib/asciidoctor/core_ext/regexp/000077500000000000000000000000001443135032600222525ustar00rootroot00000000000000asciidoctor-2.0.20/lib/asciidoctor/core_ext/regexp/is_match.rb000066400000000000000000000002511443135032600243640ustar00rootroot00000000000000# frozen_string_literal: true # NOTE remove once minimum required Ruby version is at least 2.4 class Regexp alias match? === end unless Regexp.method_defined? :match? asciidoctor-2.0.20/lib/asciidoctor/document.rb000066400000000000000000001510361443135032600213210ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # Public: The Document class represents a parsed AsciiDoc document. # # Document is the root node of a parsed AsciiDoc document. It provides an # abstract syntax tree (AST) that represents the structure of the AsciiDoc # document from which the Document object was parsed. # # Although the constructor can be used to create an empty document object, more # commonly, you'll load the document object from AsciiDoc source using the # primary API methods, {Asciidoctor.load} or {Asciidoctor.load_file}. When # using one of these APIs, you almost always want to set the safe mode to # :safe (or :unsafe) to enable all of Asciidoctor's features. # # Asciidoctor.load '= Hello, AsciiDoc!', safe: :safe # # => Asciidoctor::Document { doctype: "article", doctitle: "Hello, AsciiDoc!", blocks: 0 } # # Instances of this class can be used to extract information from the document # or alter its structure. As such, the Document object is most often used in # extensions and by integrations. # # The most basic usage of the Document object is to retrieve the document's # title. # # source = '= Document Title' # document = Asciidoctor.load source, safe: :safe # document.doctitle # # => 'Document Title' # # If the document has no title, the {Document#doctitle} method returns the # title of the first section. If that check falls through, you can have the # method return a fallback value (the value of the untitled-label attribute). # # Asciidoctor.load('no doctitle', safe: :safe).doctitle use_fallback: true # # => "Untitled" # # You can also use the Document object to access document attributes defined in # the header, such as the author and doctype. # # source = '= Document Title # Author Name # :doctype: book' # document = Asciidoctor.load source, safe: :safe # document.author # # => 'Author Name' # document.doctype # # => 'book' # # You can retrieve arbitrary document attributes defined in the header using # {Document#attr} or check for the existence of one using {Document#attr?}: # # source = '= Asciidoctor # :uri-project: https://asciidoctor.org' # document = Asciidoctor.load source, safe: :safe # document.attr 'uri-project' # # => 'https://asciidoctor.org' # document.attr? 'icons' # # => false # # Starting at the Document object, you can begin walking the document tree using # the {Document#blocks} method: # # source = 'paragraph contents # # [sidebar] # sidebar contents' # doc = Asciidoctor.load source, safe: :safe # doc.blocks.map {|block| block.context } # # => [:paragraph, :sidebar] # # You can discover block nodes at any depth in the tree using the # {AbstractBlock#find_by} method. # # source = '**** # paragraph in sidebar # ****' # doc = Asciidoctor.load source, safe: :safe # doc.find_by(context: :paragraph).map {|block| block.context } # # => [:paragraph] # # Loading a document object is the first step in the conversion process. You # can take the process to completion by calling the {Document#convert} method. class Document < AbstractBlock ImageReference = ::Struct.new :target, :imagesdir do alias to_s target end Footnote = ::Struct.new :index, :id, :text class AttributeEntry attr_reader :name, :value, :negate def initialize name, value, negate = nil @name = name @value = value @negate = negate.nil? ? value.nil? : negate end def save_to block_attributes (block_attributes[:attribute_entries] ||= []) << self self end end # Public Parsed and stores a partitioned title (i.e., title & subtitle). class Title attr_reader :main alias title main attr_reader :subtitle attr_reader :combined def initialize val, opts = {} # TODO separate sanitization by type (:cdata for HTML/XML, :plain_text for non-SGML, false for none) if (@sanitized = opts[:sanitize]) && val.include?('<') val = val.gsub(XmlSanitizeRx, '').squeeze(' ').strip end if (sep = opts[:separator] || ':').empty? || !val.include?(sep = %(#{sep} )) @main = val @subtitle = nil else @main, _, @subtitle = val.rpartition sep end @combined = val end def sanitized? @sanitized end def subtitle? @subtitle ? true : false end def to_s @combined end end # Public: The Author class represents information about an author extracted from document attributes Author = ::Struct.new :name, :firstname, :middlename, :lastname, :initials, :email # Public A read-only integer value indicating the level of security that # should be enforced while processing this document. The value must be # set in the Document constructor using the :safe option. # # A value of 0 (UNSAFE) disables any of the security features enforced # by Asciidoctor (Ruby is still subject to its own restrictions). # # A value of 1 (SAFE) closely parallels safe mode in AsciiDoc. In particular, # it prevents access to files which reside outside of the parent directory # of the source file and disables any macro other than the include directive. # # A value of 10 (SERVER) disallows the document from setting attributes that # would affect the conversion of the document, in addition to all the security # features of SafeMode::SAFE. For instance, this level forbids changing the # backend or source-highlighter using an attribute defined in the source # document header. This is the most fundamental level of security for server # deployments (hence the name). # # A value of 20 (SECURE) disallows the document from attempting to read files # from the file system and including the contents of them into the document, # in addition to all the security features of SafeMode::SECURE. In # particular, it disallows use of the include::[] directive and the embedding of # binary content (data uri), stylesheets and JavaScripts referenced by the # document. (Asciidoctor and trusted extensions may still be allowed to embed # trusted content into the document). # # Since Asciidoctor is aiming for wide adoption, 20 (SECURE) is the default # value and is recommended for server deployments. # # A value of 100 (PARANOID) is planned to disallow the use of passthrough # macros and prevents the document from setting any known attributes in # addition to all the security features of SafeMode::SECURE. Please note that # this level is not currently implemented (and therefore not enforced)! attr_reader :safe # Public: Get the Boolean AsciiDoc compatibility mode # # enabling this attribute activates the following syntax changes: # # * single quotes as constrained emphasis formatting marks # * single backticks parsed as inline literal, formatted as monospace # * single plus parsed as constrained, monospaced inline formatting # * double plus parsed as constrained, monospaced inline formatting # attr_reader :compat_mode # Public: Get the cached value of the backend attribute for this document attr_reader :backend # Public: Get the cached value of the doctype attribute for this document attr_reader :doctype # Public: Get or set the Boolean flag that indicates whether source map information should be tracked by the parser attr_accessor :sourcemap # Public: Get the document catalog Hash attr_reader :catalog # Public: Alias catalog property as references for backwards compatibility alias references catalog # Public: Get the Hash of document counters attr_reader :counters # Public: Get the level-0 Section (i.e., doctitle). (Only stores the title, not the header attributes). attr_reader :header # Public: Get the String base directory for converting this document. # # Defaults to directory of the source file. # If the source is a string, defaults to the current directory. attr_reader :base_dir # Public: Get the Hash of resolved options used to initialize this Document attr_reader :options # Public: Get the outfilesuffix defined at the end of the header. attr_reader :outfilesuffix # Public: Get a reference to the parent Document of this nested document. attr_reader :parent_document # Public: Get the Reader associated with this document attr_reader :reader # Public: Get/Set the PathResolver instance used to resolve paths in this Document. attr_reader :path_resolver # Public: Get the Converter associated with this document attr_reader :converter # Public: Get the SyntaxHighlighter associated with this document attr_reader :syntax_highlighter # Public: Get the activated Extensions::Registry associated with this document. attr_reader :extensions # Public: Initialize a {Document} object. # # data - The AsciiDoc source data as a String or String Array. (default: nil) # options - A Hash of options to control processing (e.g., safe mode value (:safe), backend (:backend), # standalone enclosure (:standalone), custom attributes (:attributes)). (default: {}) # # Duplication of the options Hash is handled in the enclosing API. # # Examples # # data = File.read filename # doc = Asciidoctor::Document.new data # puts doc.convert def initialize data = nil, options = {} super self, :document if (parent_doc = options.delete :parent) @parent_document = parent_doc options[:base_dir] ||= parent_doc.base_dir options[:catalog_assets] = true if parent_doc.options[:catalog_assets] options[:to_dir] = parent_doc.options[:to_dir] if parent_doc.options[:to_dir] @catalog = parent_doc.catalog.merge footnotes: [] # QUESTION should we support setting attribute in parent document from nested document? @attribute_overrides = attr_overrides = (parent_doc.instance_variable_get :@attribute_overrides).merge parent_doc.attributes attr_overrides.delete 'compat-mode' parent_doctype = attr_overrides.delete 'doctype' attr_overrides.delete 'notitle' attr_overrides.delete 'showtitle' # QUESTION if toc is hard unset in parent document, should it be hard unset in nested document? attr_overrides.delete 'toc' @attributes['toc-placement'] = (attr_overrides.delete 'toc-placement') || 'auto' attr_overrides.delete 'toc-position' @safe = parent_doc.safe @attributes['compat-mode'] = '' if (@compat_mode = parent_doc.compat_mode) @outfilesuffix = parent_doc.outfilesuffix @sourcemap = parent_doc.sourcemap @timings = nil @path_resolver = parent_doc.path_resolver @converter = parent_doc.converter initialize_extensions = nil @extensions = parent_doc.extensions @syntax_highlighter = parent_doc.syntax_highlighter else @parent_document = nil @catalog = { ids: {}, # deprecated; kept for backwards compatibility with converters refs: {}, footnotes: [], links: [], images: [], callouts: Callouts.new, includes: {}, } # copy attributes map and normalize keys # attribute overrides are attributes that can only be set from the commandline # a direct assignment effectively makes the attribute a constant # a nil value or name with leading or trailing ! will result in the attribute being unassigned @attribute_overrides = attr_overrides = {} (options[:attributes] || {}).each do |key, val| if key.end_with? '@' if key.start_with? '!' key, val = (key.slice 1, key.length - 2), false elsif key.end_with? '!@' key, val = (key.slice 0, key.length - 2), false else key, val = key.chop, %(#{val}@) end elsif key.start_with? '!' key, val = (key.slice 1, key.length), val == '@' ? false : nil elsif key.end_with? '!' key, val = key.chop, val == '@' ? false : nil end attr_overrides[key.downcase] = val end if ::String === (to_file = options[:to_file]) attr_overrides['outfilesuffix'] = Helpers.extname to_file end # safely resolve the safe mode from const, int or string if !(safe_mode = options[:safe]) @safe = SafeMode::SECURE elsif ::Integer === safe_mode # be permissive in case API user wants to define new levels @safe = safe_mode else @safe = (SafeMode.value_for_name safe_mode) rescue SafeMode::SECURE end input_mtime = options.delete :input_mtime @compat_mode = attr_overrides.key? 'compat-mode' @sourcemap = options[:sourcemap] @timings = options.delete :timings @path_resolver = PathResolver.new initialize_extensions = (defined? ::Asciidoctor::Extensions) || (options.key? :extensions) ? ::Asciidoctor::Extensions : nil @extensions = nil # initialize further down if initialize_extensions is true options[:standalone] = options[:header_footer] if (options.key? :header_footer) && !(options.key? :standalone) end @parsed = @reftexts = @header = @header_attributes = nil @counters = {} @attributes_modified = ::Set.new @docinfo_processor_extensions = {} standalone = options[:standalone] (@options = options).freeze attrs = @attributes unless parent_doc attrs['attribute-undefined'] = Compliance.attribute_undefined attrs['attribute-missing'] = Compliance.attribute_missing attrs.update DEFAULT_ATTRIBUTES # TODO if lang attribute is set, @safe mode < SafeMode::SERVER, and !parent_doc, # load attributes from data/locale/attributes-.adoc end if standalone # sync embedded attribute with :standalone option value attr_overrides['embedded'] = nil attrs['copycss'] = '' attrs['iconfont-remote'] = '' attrs['stylesheet'] = '' attrs['webfonts'] = '' else # sync embedded attribute with :standalone option value attr_overrides['embedded'] = '' if (attr_overrides.key? 'showtitle') && (attr_overrides.keys & %w(notitle showtitle))[-1] == 'showtitle' attr_overrides['notitle'] = { nil => '', false => '@', '@' => false }[attr_overrides['showtitle']] elsif attr_overrides.key? 'notitle' attr_overrides['showtitle'] = { nil => '', false => '@', '@' => false }[attr_overrides['notitle']] else attrs['notitle'] = '' end end attr_overrides['asciidoctor'] = '' attr_overrides['asciidoctor-version'] = ::Asciidoctor::VERSION attr_overrides['safe-mode-name'] = (safe_mode_name = SafeMode.name_for_value @safe) attr_overrides[%(safe-mode-#{safe_mode_name})] = '' attr_overrides['safe-mode-level'] = @safe # the only way to set the max-include-depth attribute is via the API; default to 64 like AsciiDoc.py attr_overrides['max-include-depth'] ||= 64 # the only way to set the allow-uri-read attribute is via the API; disabled by default attr_overrides['allow-uri-read'] ||= nil # remap legacy attribute names attr_overrides['sectnums'] = attr_overrides.delete 'numbered' if attr_overrides.key? 'numbered' attr_overrides['hardbreaks-option'] = attr_overrides.delete 'hardbreaks' if attr_overrides.key? 'hardbreaks' # If the base_dir option is specified, it overrides docdir and is used as the root for relative # paths. Otherwise, the base_dir is the directory of the source file (docdir), if set, otherwise # the current directory. if (base_dir_val = options[:base_dir]) @base_dir = (attr_overrides['docdir'] = ::File.expand_path base_dir_val) elsif attr_overrides['docdir'] @base_dir = attr_overrides['docdir'] else #logger.warn 'setting base_dir is recommended when working with string documents' unless nested? @base_dir = attr_overrides['docdir'] = ::Dir.pwd end # allow common attributes backend and doctype to be set using options hash, coerce values to string if (backend_val = options[:backend]) attr_overrides['backend'] = backend_val.to_s end if (doctype_val = options[:doctype]) attr_overrides['doctype'] = doctype_val.to_s end if @safe >= SafeMode::SERVER # restrict document from setting copycss, source-highlighter and backend attr_overrides['copycss'] ||= nil attr_overrides['source-highlighter'] ||= nil attr_overrides['backend'] ||= DEFAULT_BACKEND # restrict document from seeing the docdir and trim docfile to relative path if !parent_doc && attr_overrides.key?('docfile') attr_overrides['docfile'] = attr_overrides['docfile'][(attr_overrides['docdir'].length + 1)..-1] end attr_overrides['docdir'] = '' attr_overrides['user-home'] ||= '.' if @safe >= SafeMode::SECURE attr_overrides['max-attribute-value-size'] = 4096 unless attr_overrides.key? 'max-attribute-value-size' # assign linkcss (preventing css embedding) unless explicitly disabled from the commandline or API #attr_overrides['linkcss'] = (attr_overrides.fetch 'linkcss', '') || nil attr_overrides['linkcss'] = '' unless attr_overrides.key? 'linkcss' # restrict document from enabling icons attr_overrides['icons'] ||= nil end else attr_overrides['user-home'] ||= USER_HOME end # the only way to set the max-attribute-value-size attribute is via the API; disabled by default @max_attribute_value_size = (size = (attr_overrides['max-attribute-value-size'] ||= nil)) ? size.to_i.abs : nil attr_overrides.delete_if do |key, val| if val # a value ending in @ allows document to override value if ::String === val && (val.end_with? '@') val, verdict = val.chop, true end attrs[key] = val else # a nil or false value both unset the attribute; only a nil value locks it attrs.delete key verdict = val == false end verdict end if parent_doc @backend = attrs['backend'] # reset doctype unless it matches the default value unless (@doctype = attrs['doctype'] = parent_doctype) == DEFAULT_DOCTYPE update_doctype_attributes DEFAULT_DOCTYPE end # don't need to do the extra processing within our own document # FIXME line info isn't reported correctly within include files in nested document @reader = Reader.new data, options[:cursor] @source_location = @reader.cursor if @sourcemap # Now parse the lines in the reader into blocks # Eagerly parse (for now) since a subdocument is not a publicly accessible object Parser.parse @reader, self # should we call some sort of post-parse function? restore_attributes @parsed = true else # setup default backend and doctype @backend = nil if (initial_backend = attrs['backend'] || DEFAULT_BACKEND) == 'manpage' @doctype = attrs['doctype'] = attr_overrides['doctype'] = 'manpage' else @doctype = (attrs['doctype'] ||= DEFAULT_DOCTYPE) end update_backend_attributes initial_backend, true # dynamic intrinstic attribute values #attrs['indir'] = attrs['docdir'] #attrs['infile'] = attrs['docfile'] # fallback directories attrs['stylesdir'] ||= '.' attrs['iconsdir'] ||= %(#{attrs.fetch 'imagesdir', './images'}/icons) fill_datetime_attributes attrs, input_mtime if initialize_extensions if (ext_registry = options[:extension_registry]) # QUESTION should we warn if the value type of this option is not a registry if Extensions::Registry === ext_registry || ((defined? ::AsciidoctorJ::Extensions::ExtensionRegistry) && ::AsciidoctorJ::Extensions::ExtensionRegistry === ext_registry) @extensions = ext_registry.activate self end elsif (ext_block = options[:extensions]).nil? @extensions = Extensions::Registry.new.activate self unless Extensions.groups.empty? elsif ::Proc === ext_block @extensions = Extensions.create(&ext_block).activate self end end @reader = PreprocessorReader.new self, data, (Reader::Cursor.new attrs['docfile'], @base_dir), normalize: true @source_location = @reader.cursor if @sourcemap end end # Public: Parse the AsciiDoc source stored in the {Reader} into an abstract syntax tree. # # If the data parameter is not nil, create a new {PreprocessorReader} and assigned it to the reader # property of this object. Otherwise, continue with the reader that was created in {#initialize}. # Pass the reader to {Parser.parse} to parse the source data into an abstract syntax tree. # # If parsing has already been performed, this method returns without performing any processing. # # data - The optional replacement AsciiDoc source data as a String or String Array. (default: nil) # # Returns this [Document] def parse data = nil if @parsed self else doc = self # create reader if data is provided (used when data is not known at the time the Document object is created) if data @reader = PreprocessorReader.new doc, data, (Reader::Cursor.new @attributes['docfile'], @base_dir), normalize: true @source_location = @reader.cursor if @sourcemap end if (exts = @parent_document ? nil : @extensions) && exts.preprocessors? exts.preprocessors.each do |ext| @reader = ext.process_method[doc, @reader] || @reader end end # Now parse the lines in the reader into blocks Parser.parse @reader, doc, header_only: @options[:parse_header_only] # should we call sort of post-parse function? restore_attributes if exts && exts.tree_processors? exts.tree_processors.each do |ext| if (result = ext.process_method[doc]) && Document === result && result != doc doc = result end end end @parsed = true doc end end # Public: Returns whether the source lines of the document have been parsed. def parsed? @parsed end # Public: Get the named counter and take the next number in the sequence. # # name - the String name of the counter # seed - the initial value as a String or Integer # # returns the next number in the sequence for the specified counter def counter name, seed = nil return @parent_document.counter name, seed if @parent_document if ((locked = attribute_locked? name) && (curr_val = @counters[name])) || !(curr_val = @attributes[name]).nil_or_empty? next_val = @counters[name] = Helpers.nextval curr_val elsif seed next_val = @counters[name] = seed == seed.to_i.to_s ? seed.to_i : seed else next_val = @counters[name] = 1 end @attributes[name] = next_val unless locked next_val end # Public: Increment the specified counter and store it in the block's attributes # # counter_name - the String name of the counter attribute # block - the Block on which to save the counter # # returns the next number in the sequence for the specified counter def increment_and_store_counter counter_name, block ((AttributeEntry.new counter_name, (counter counter_name)).save_to block.attributes).value end # Deprecated: Map old counter_increment method to increment_counter for backwards compatibility alias counter_increment increment_and_store_counter # Public: Register a reference in the document catalog def register type, value case type when :ids # deprecated register :refs, [(id = value[0]), (Inline.new self, :anchor, value[1], type: :ref, id: id)] when :refs @catalog[:refs][value[0]] ||= (ref = value[1]) ref when :footnotes @catalog[type] << value else @catalog[type] << (type == :images ? (ImageReference.new value, @attributes['imagesdir']) : value) if @options[:catalog_assets] end end # Public: Scan registered references and return the ID of the first reference that matches the specified reference text. # # text - The String reference text to compare to the converted reference text of each registered reference. # # Returns the String ID of the first reference with matching reference text or nothing if no reference is found. def resolve_id text if @reftexts @reftexts[text] elsif @parsed # @reftexts is set eagerly to prevent nested lazy init (@reftexts = {}).tap {|accum| @catalog[:refs].each {|id, ref| accum[ref.xreftext] ||= id } }[text] else resolved_id = nil # @reftexts is set eagerly to prevent nested lazy init @reftexts = accum = {} @catalog[:refs].each do |id, ref| # NOTE short-circuit early since we're throwing away this table anyway if (xreftext = ref.xreftext) == text resolved_id = id break end accum[xreftext] ||= id end @reftexts = nil resolved_id end end # Public: Check whether this Document has any child Section objects. # # Returns A [Boolean] to indicate whether this Document has child Section objects def sections? @next_section_index > 0 end def footnotes? @catalog[:footnotes].empty? ? false : true end def footnotes @catalog[:footnotes] end def callouts @catalog[:callouts] end def nested? @parent_document ? true : false end def embedded? @attributes.key? 'embedded' end def extensions? @extensions ? true : false end # Make the raw source for the Document available. def source @reader.source if @reader end # Make the raw source lines for the Document available. def source_lines @reader.source_lines if @reader end def basebackend? base @attributes['basebackend'] == base end # Public: Return the doctitle as a String # # Returns the resolved doctitle as a [String] or nil if a doctitle cannot be resolved def title doctitle end # Public: Set the title on the document header # # Set the title of the document header to the specified value. If the header # does not exist, it is first created. # # title - the String title to assign as the title of the document header # # Returns the specified [String] title def title= title unless (sect = @header) (sect = (@header = Section.new self, 0)).sectname = 'header' end sect.title = title end # Public: Resolves the primary title for the document # # Searches the locations to find the first non-empty # value: # # * document-level attribute named title # * header title (known as the document title) # * title of the first section # * document-level attribute named untitled-label (if :use_fallback option is set) # # If no value can be resolved, nil is returned. # # If the :partition attribute is specified, the value is parsed into an Document::Title object. # If the :sanitize attribute is specified, XML elements are removed from the value. # # TODO separate sanitization by type (:cdata for HTML/XML, :plain_text for non-SGML, false for none) # # Returns the resolved title as a [Title] if the :partition option is passed or a [String] if not # or nil if no value can be resolved. def doctitle opts = {} unless (val = @attributes['title']) if (sect = first_section) val = sect.title elsif !(opts[:use_fallback] && (val = @attributes['untitled-label'])) return end end if (separator = opts[:partition]) Title.new val, opts.merge({ separator: (separator == true ? @attributes['title-separator'] : separator) }) elsif opts[:sanitize] && val.include?('<') val.gsub(XmlSanitizeRx, '').squeeze(' ').strip else val end end alias name doctitle def xreftext xrefstyle = nil (val = reftext) && !val.empty? ? val : title end # Public: Convenience method to retrieve the document attribute 'author' # # returns the full name of the author as a String def author @attributes['author'] end # Public: Convenience method to retrieve the authors of this document as an Array of Author objects. # # This method is backed by the author-related attributes on the document. # # returns the authors of this document as an Array def authors if (attrs = @attributes).key? 'author' authors = [(Author.new attrs['author'], attrs['firstname'], attrs['middlename'], attrs['lastname'], attrs['authorinitials'], attrs['email'])] if (num_authors = attrs['authorcount'] || 0) > 1 idx = 1 while idx < num_authors idx += 1 authors << (Author.new attrs[%(author_#{idx})], attrs[%(firstname_#{idx})], attrs[%(middlename_#{idx})], attrs[%(lastname_#{idx})], attrs[%(authorinitials_#{idx})], attrs[%(email_#{idx})]) end end authors else [] end end # Public: Convenience method to retrieve the document attribute 'revdate' # # returns the date of last revision for the document as a String def revdate @attributes['revdate'] end def notitle @attributes.key? 'notitle' end def noheader @attributes.key? 'noheader' end def nofooter @attributes.key? 'nofooter' end def first_section @header || @blocks.find {|e| e.context == :section } end def header? @header ? true : false end alias has_header? header? # Public: Append a content Block to this Document. # # If the child block is a Section, assign an index to it. # # block - The child Block to append to this parent Block # # Returns The parent Block def << block assign_numeral block if block.context == :section super end # Internal: Called by the parser after parsing the header and before parsing # the body, even if no header is found. #-- # QUESTION should we invoke the TreeProcessors here, passing in a phase? # QUESTION is finalize_header the right name? def finalize_header unrooted_attributes, header_valid = true clear_playback_attributes unrooted_attributes save_attributes unrooted_attributes['invalid-header'] = true unless header_valid unrooted_attributes end # Public: Replay attribute assignments at the block level def playback_attributes(block_attributes) if block_attributes.key? :attribute_entries block_attributes[:attribute_entries].each do |entry| name = entry.name if entry.negate @attributes.delete name @compat_mode = false if name == 'compat-mode' else @attributes[name] = entry.value @compat_mode = true if name == 'compat-mode' end end end end # Public: Restore the attributes to the previously saved state (attributes in header) def restore_attributes @catalog[:callouts].rewind unless @parent_document @attributes.replace @header_attributes end # Public: Set the specified attribute on the document if the name is not locked # # If the attribute is locked, false is returned. Otherwise, the value is # assigned to the attribute name after first performing attribute # substitutions on the value. If the attribute name is 'backend' or # 'doctype', then the value of backend-related attributes are updated. # # name - the String attribute name # value - the String attribute value; must not be nil (optional, default: '') # # Returns the substituted value if the attribute was set or nil if it was not because it's locked. def set_attribute name, value = '' unless attribute_locked? name value = apply_attribute_value_subs value unless value.empty? # NOTE if @header_attributes is set, we're beyond the document header if @header_attributes @attributes[name] = value else case name when 'backend' update_backend_attributes value, (@attributes_modified.delete? 'htmlsyntax') && value == @backend when 'doctype' update_doctype_attributes value else @attributes[name] = value end @attributes_modified << name end value end end # Public: Delete the specified attribute from the document if the name is not locked # # If the attribute is locked, false is returned. Otherwise, the attribute is deleted. # # name - the String attribute name # # returns true if the attribute was deleted, false if it was not because it's locked def delete_attribute(name) if attribute_locked?(name) false else @attributes.delete(name) @attributes_modified << name true end end # Public: Determine if the attribute has been locked by being assigned in document options # # key - The attribute key to check # # Returns true if the attribute is locked, false otherwise def attribute_locked?(name) @attribute_overrides.key?(name) end # Public: Assign a value to the specified attribute in the document header. # # The assignment will be visible when the header attributes are restored, # typically between processor phases (e.g., between parse and convert). # # name - The String attribute name to assign # value - The Object value to assign to the attribute (default: '') # overwrite - A Boolean indicating whether to assign the attribute # if already present in the attributes Hash (default: true) # # Returns a [Boolean] indicating whether the assignment was performed def set_header_attribute name, value = '', overwrite = true attrs = @header_attributes || @attributes if overwrite == false && (attrs.key? name) false else attrs[name] = value true end end # Public: Convert the AsciiDoc document using the templates # loaded by the Converter. If a :template_dir is not specified, # or a template is missing, the converter will fall back to # using the appropriate built-in template. def convert opts = {} @timings.start :convert if @timings parse unless @parsed unless @safe >= SafeMode::SERVER || opts.empty? # QUESTION should we store these on the Document object? @attributes.delete 'outfile' unless (@attributes['outfile'] = opts['outfile']) @attributes.delete 'outdir' unless (@attributes['outdir'] = opts['outdir']) end # QUESTION should we add extensions that execute before conversion begins? if doctype == 'inline' if (block = @blocks[0] || @header) if block.content_model == :compound || block.content_model == :empty logger.warn 'no inline candidate; use the inline doctype to convert a single paragragh, verbatim, or raw block' else output = block.content end end else if opts.key? :standalone transform = opts[:standalone] ? 'document' : 'embedded' elsif opts.key? :header_footer transform = opts[:header_footer] ? 'document' : 'embedded' else transform = @options[:standalone] ? 'document' : 'embedded' end output = @converter.convert self, transform end unless @parent_document if (exts = @extensions) && exts.postprocessors? exts.postprocessors.each do |ext| output = ext.process_method[self, output] end end end @timings.record :convert if @timings output end # Deprecated: Use {Document#convert} instead. alias render convert # Public: Write the output to the specified file # # If the converter responds to :write, delegate the work of writing the output # to that method. Otherwise, write the output to the specified file. In the # latter case, this method ensures the output has a trailing newline if the # target responds to write and the output is not empty. # # output - The output to write. Unless the converter responds to write, this # object is expected to be a String. # target - The file to write, either a File object or a String path. # # Returns nothing def write output, target @timings.start :write if @timings if Writer === @converter @converter.write output, target else if target.respond_to? :write # QUESTION should we set encoding using target.set_encoding? unless output.nil_or_empty? target.write output.chomp # ensure there's a trailing endline target.write LF end else ::File.write target, output, mode: FILE_WRITE_MODE end if @backend == 'manpage' && ::String === target && (@converter.class.respond_to? :write_alternate_pages) @converter.class.write_alternate_pages @attributes['mannames'], @attributes['manvolnum'], target end end @timings.record :write if @timings nil end def content # NOTE per AsciiDoc-spec, remove the title before converting the body @attributes.delete('title') super end # Public: Read the docinfo file(s) for inclusion in the document template # # If the docinfo1 attribute is set, read the docinfo.ext file. If the docinfo # attribute is set, read the doc-name.docinfo.ext file. If the docinfo2 # attribute is set, read both files in that order. # # location - The Symbol location of the docinfo (e.g., :head, :footer, etc). (default: :head) # suffix - The suffix of the docinfo file(s). If not set, the extension # will be set to the outfilesuffix. (default: nil) # # returns The contents of the docinfo file(s) or empty string if no files are # found or the safe mode is secure or greater. def docinfo location = :head, suffix = nil if safe < SafeMode::SECURE qualifier = %(-#{location}) unless location == :head suffix ||= @outfilesuffix if (docinfo = @attributes['docinfo']).nil_or_empty? if @attributes.key? 'docinfo2' docinfo = ['private', 'shared'] elsif @attributes.key? 'docinfo1' docinfo = ['shared'] else docinfo = docinfo ? ['private'] : nil end else docinfo = docinfo.split(',').map {|it| it.strip } end if docinfo content = [] docinfo_file, docinfo_dir, docinfo_subs = %(docinfo#{qualifier}#{suffix}), @attributes['docinfodir'], resolve_docinfo_subs unless (docinfo & ['shared', %(shared-#{location})]).empty? docinfo_path = normalize_system_path docinfo_file, docinfo_dir # NOTE normalizing the lines is essential if we're performing substitutions if (shared_docinfo = read_asset docinfo_path, normalize: true) content << (apply_subs shared_docinfo, docinfo_subs) end end unless @attributes['docname'].nil_or_empty? || (docinfo & ['private', %(private-#{location})]).empty? docinfo_path = normalize_system_path %(#{@attributes['docname']}-#{docinfo_file}), docinfo_dir # NOTE normalizing the lines is essential if we're performing substitutions if (private_docinfo = read_asset docinfo_path, normalize: true) content << (apply_subs private_docinfo, docinfo_subs) end end end end # TODO allow document to control whether extension docinfo is contributed if @extensions && (docinfo_processors? location) ((content || []).concat @docinfo_processor_extensions[location].map {|ext| ext.process_method[self] }.compact).join LF elsif content content.join LF else '' end end def docinfo_processors?(location = :head) if @docinfo_processor_extensions.key?(location) # false means we already performed a lookup and didn't find any @docinfo_processor_extensions[location] != false elsif @extensions && @document.extensions.docinfo_processors?(location) !!(@docinfo_processor_extensions[location] = @document.extensions.docinfo_processors(location)) else @docinfo_processor_extensions[location] = false end end def to_s %(#<#{self.class}@#{object_id} {doctype: #{doctype.inspect}, doctitle: #{(@header && @header.title).inspect}, blocks: #{@blocks.size}}>) end private # Internal: Apply substitutions to the attribute value # # If the value is an inline passthrough macro (e.g., pass:[value]), # apply the substitutions defined in to the value, or leave the value # unmodified if no substitutions are specified. If the value is not an # inline passthrough macro, apply header substitutions to the value. # # value - The String attribute value on which to perform substitutions # # Returns The String value with substitutions performed def apply_attribute_value_subs value if AttributeEntryPassMacroRx =~ value value = $2 value = apply_subs value, (resolve_pass_subs $1) if $1 else value = apply_header_subs value end @max_attribute_value_size ? (limit_bytesize value, @max_attribute_value_size) : value end # Internal: Safely truncates a string to the specified number of bytes. # # If a multibyte char gets split, the dangling fragment is dropped. # # str - The String the truncate. # max - The maximum allowable size of the String, in bytes. # # Returns the String truncated to the specified bytesize. def limit_bytesize str, max if str.bytesize > max max -= 1 until (str = str.byteslice 0, max).valid_encoding? end str end # Internal: Resolve the list of comma-delimited subs to apply to docinfo files. # # Resolve the list of substitutions from the value of the docinfosubs # document attribute, if specified. Otherwise, return an Array containing # the Symbol :attributes. # # Returns an [Array] of substitution [Symbol]s def resolve_docinfo_subs (@attributes.key? 'docinfosubs') ? (resolve_subs @attributes['docinfosubs'], :block, nil, 'docinfo') : [:attributes] end # Internal: Create and initialize an instance of the converter for this document #-- # QUESTION is there any additional information we should be passing to the converter? def create_converter backend, delegate_backend converter_opts = { document: self, htmlsyntax: @attributes['htmlsyntax'] } if (template_dirs = (opts = @options)[:template_dirs] || opts[:template_dir]) converter_opts[:template_dirs] = [*template_dirs] converter_opts[:template_cache] = opts.fetch :template_cache, true converter_opts[:template_engine] = opts[:template_engine] converter_opts[:template_engine_options] = opts[:template_engine_options] converter_opts[:eruby] = opts[:eruby] converter_opts[:safe] = @safe converter_opts[:delegate_backend] = delegate_backend if delegate_backend end if (converter = opts[:converter]) (Converter::CustomFactory.new backend => converter).create backend, converter_opts else (opts.fetch :converter_factory, Converter).create backend, converter_opts end end # Internal: Delete any attributes stored for playback def clear_playback_attributes(attributes) attributes.delete(:attribute_entries) end # Internal: Branch the attributes so that the original state can be restored # at a future time. # # Returns the duplicated attributes, which will later be restored def save_attributes unless ((attrs = @attributes).key? 'doctitle') || !(doctitle_val = doctitle) attrs['doctitle'] = doctitle_val end # css-signature cannot be updated after header attributes are processed @id ||= attrs['css-signature'] if (toc_val = (attrs.delete 'toc2') ? 'left' : attrs['toc']) # toc-placement allows us to separate position from using fitted slot vs macro toc_position_val = (toc_placement_val = attrs.fetch 'toc-placement', 'macro') && toc_placement_val != 'auto' ? toc_placement_val : attrs['toc-position'] unless toc_val.empty? && toc_position_val.nil_or_empty? default_toc_position = 'left' # TODO rename toc2 to aside-toc default_toc_class = 'toc2' position = toc_position_val.nil_or_empty? ? (toc_val.empty? ? default_toc_position : toc_val) : toc_position_val attrs['toc'] = '' attrs['toc-placement'] = 'auto' case position when 'left', '<', '<' attrs['toc-position'] = 'left' when 'right', '>', '>' attrs['toc-position'] = 'right' when 'top', '^' attrs['toc-position'] = 'top' when 'bottom', 'v' attrs['toc-position'] = 'bottom' when 'preamble', 'macro' attrs['toc-position'] = 'content' attrs['toc-placement'] = position default_toc_class = nil else attrs.delete 'toc-position' default_toc_class = nil end attrs['toc-class'] ||= default_toc_class if default_toc_class end end if (icons_val = attrs['icons']) && !(attrs.key? 'icontype') case icons_val when '', 'font' else attrs['icons'] = '' attrs['icontype'] = icons_val unless icons_val == 'image' end end if (@compat_mode = attrs.key? 'compat-mode') && (attrs.key? 'language') attrs['source-language'] = attrs['language'] end unless @parent_document if (basebackend = attrs['basebackend']) == 'html' # QUESTION should we allow source-highlighter to be disabled in AsciiDoc table cell? if (syntax_hl_name = attrs['source-highlighter']) && !attrs[%(#{syntax_hl_name}-unavailable)] if (syntax_hl_factory = @options[:syntax_highlighter_factory]) @syntax_highlighter = syntax_hl_factory.create syntax_hl_name, @backend, document: self elsif (syntax_hls = @options[:syntax_highlighters]) @syntax_highlighter = (SyntaxHighlighter::DefaultFactoryProxy.new syntax_hls).create syntax_hl_name, @backend, document: self else @syntax_highlighter = SyntaxHighlighter.create syntax_hl_name, @backend, document: self end end # enable toc and sectnums (i.e., numbered) by default in DocBook backend elsif basebackend == 'docbook' # NOTE the attributes_modified should go away once we have a proper attribute storage & tracking facility attrs['toc'] = '' unless (attribute_locked? 'toc') || (@attributes_modified.include? 'toc') attrs['sectnums'] = '' unless (attribute_locked? 'sectnums') || (@attributes_modified.include? 'sectnums') end # NOTE pin the outfilesuffix after the header is parsed @outfilesuffix = attrs['outfilesuffix'] # unfreeze "flexible" attributes FLEXIBLE_ATTRIBUTES.each do |name| # turning a flexible attribute off should be permanent # (we may need more config if that's not always the case) if @attribute_overrides.key?(name) && @attribute_overrides[name] @attribute_overrides.delete(name) end end end @header_attributes = attrs.merge end # Internal: Assign the local and document datetime attributes, which includes localdate, localyear, localtime, # localdatetime, docdate, docyear, doctime, and docdatetime. Honor the SOURCE_DATE_EPOCH environment variable, if set. def fill_datetime_attributes attrs, input_mtime # See https://reproducible-builds.org/specs/source-date-epoch/ now = (::ENV.key? 'SOURCE_DATE_EPOCH') ? (source_date_epoch = (::Time.at Integer ::ENV['SOURCE_DATE_EPOCH']).utc) : ::Time.now if (localdate = attrs['localdate']) attrs['localyear'] ||= (localdate.index '-') == 4 ? (localdate.slice 0, 4) : nil else localdate = attrs['localdate'] = now.strftime '%F' attrs['localyear'] ||= now.year.to_s end # %Z is OS dependent and may contain characters that aren't UTF-8 encoded (see asciidoctor#2770 and asciidoctor.js#23) localtime = (attrs['localtime'] ||= now.strftime %(%T #{now.utc_offset == 0 ? 'UTC' : '%z'})) attrs['localdatetime'] ||= %(#{localdate} #{localtime}) # docdate, doctime and docdatetime should default to localdate, localtime and localdatetime if not otherwise set input_mtime = source_date_epoch || input_mtime || now if (docdate = attrs['docdate']) attrs['docyear'] ||= ((docdate.index '-') == 4 ? (docdate.slice 0, 4) : nil) else docdate = attrs['docdate'] = input_mtime.strftime '%F' attrs['docyear'] ||= input_mtime.year.to_s end # %Z is OS dependent and may contain characters that aren't UTF-8 encoded (see asciidoctor#2770 and asciidoctor.js#23) doctime = (attrs['doctime'] ||= input_mtime.strftime %(%T #{input_mtime.utc_offset == 0 ? 'UTC' : '%z'})) attrs['docdatetime'] ||= %(#{docdate} #{doctime}) nil end # Internal: Update the backend attributes to reflect a change in the active backend. # # This method also handles updating the related doctype attributes if the # doctype attribute is assigned at the time this method is called. # # Returns the resolved String backend if updated, nothing otherwise. def update_backend_attributes new_backend, init = nil if init || new_backend != @backend current_backend = @backend current_basebackend = (attrs = @attributes)['basebackend'] current_doctype = @doctype actual_backend, _, new_backend = new_backend.partition ':' if new_backend.include? ':' if new_backend.start_with? 'xhtml' attrs['htmlsyntax'] = 'xml' new_backend = new_backend.slice 1, new_backend.length elsif new_backend.start_with? 'html' attrs['htmlsyntax'] ||= 'html' end new_backend = BACKEND_ALIASES[new_backend] || new_backend new_backend, delegate_backend = actual_backend, new_backend if actual_backend if current_doctype if current_backend attrs.delete %(backend-#{current_backend}) attrs.delete %(backend-#{current_backend}-doctype-#{current_doctype}) end attrs[%(backend-#{new_backend}-doctype-#{current_doctype})] = '' attrs[%(doctype-#{current_doctype})] = '' elsif current_backend attrs.delete %(backend-#{current_backend}) end attrs[%(backend-#{new_backend})] = '' # QUESTION should we defer the @backend assignment until after the converter is created? @backend = attrs['backend'] = new_backend # (re)initialize converter if Converter::BackendTraits === (converter = create_converter new_backend, delegate_backend) new_basebackend = converter.basebackend new_filetype = converter.filetype if (htmlsyntax = converter.htmlsyntax) attrs['htmlsyntax'] = htmlsyntax end if init attrs['outfilesuffix'] ||= converter.outfilesuffix else attrs['outfilesuffix'] = converter.outfilesuffix unless attribute_locked? 'outfilesuffix' end elsif converter backend_traits = Converter.derive_backend_traits new_backend new_basebackend = backend_traits[:basebackend] new_filetype = backend_traits[:filetype] if init attrs['outfilesuffix'] ||= backend_traits[:outfilesuffix] else attrs['outfilesuffix'] = backend_traits[:outfilesuffix] unless attribute_locked? 'outfilesuffix' end else # NOTE ideally we shouldn't need the converter before the converter phase, but we do raise ::NotImplementedError, %(asciidoctor: FAILED: missing converter for backend '#{new_backend}'. Processing aborted.) end @converter = converter if (current_filetype = attrs['filetype']) attrs.delete %(filetype-#{current_filetype}) end attrs['filetype'] = new_filetype attrs[%(filetype-#{new_filetype})] = '' if (page_width = DEFAULT_PAGE_WIDTHS[new_basebackend]) attrs['pagewidth'] = page_width else attrs.delete 'pagewidth' end if new_basebackend != current_basebackend if current_doctype if current_basebackend attrs.delete %(basebackend-#{current_basebackend}) attrs.delete %(basebackend-#{current_basebackend}-doctype-#{current_doctype}) end attrs[%(basebackend-#{new_basebackend}-doctype-#{current_doctype})] = '' elsif current_basebackend attrs.delete %(basebackend-#{current_basebackend}) end attrs[%(basebackend-#{new_basebackend})] = '' attrs['basebackend'] = new_basebackend end new_backend end end # Internal: Update the doctype and backend attributes to reflect a change in the active doctype. # # Returns the String doctype if updated, nothing otherwise. def update_doctype_attributes new_doctype if new_doctype && new_doctype != @doctype current_backend, current_basebackend, current_doctype = @backend, (attrs = @attributes)['basebackend'], @doctype if current_doctype attrs.delete %(doctype-#{current_doctype}) if current_backend attrs.delete %(backend-#{current_backend}-doctype-#{current_doctype}) attrs[%(backend-#{current_backend}-doctype-#{new_doctype})] = '' end if current_basebackend attrs.delete %(basebackend-#{current_basebackend}-doctype-#{current_doctype}) attrs[%(basebackend-#{current_basebackend}-doctype-#{new_doctype})] = '' end else attrs[%(backend-#{current_backend}-doctype-#{new_doctype})] = '' if current_backend attrs[%(basebackend-#{current_basebackend}-doctype-#{new_doctype})] = '' if current_basebackend end attrs[%(doctype-#{new_doctype})] = '' @doctype = attrs['doctype'] = new_doctype end end end end asciidoctor-2.0.20/lib/asciidoctor/extensions.rb000066400000000000000000001617431443135032600217100ustar00rootroot00000000000000# frozen_string_literal: true (require 'asciidoctor' unless defined? Asciidoctor.load) unless RUBY_ENGINE == 'opal' module Asciidoctor # Extensions provide a way to participate in the parsing and converting # phases of the AsciiDoc processor or extend the AsciiDoc syntax. # # The various extensions participate in AsciiDoc processing as follows: # # 1. After the source lines are normalized, {Preprocessor}s modify or replace # the source lines before parsing begins. {IncludeProcessor}s are used to # process include directives for targets which they claim to handle. # 2. The Parser parses the block-level content into an abstract syntax tree. # Custom blocks and block macros are processed by associated {BlockProcessor}s # and {BlockMacroProcessor}s, respectively. # 3. {TreeProcessor}s are run on the abstract syntax tree. # 4. Conversion of the document begins, at which point inline markup is processed # and converted. Custom inline macros are processed by associated {InlineMacroProcessor}s. # 5. {Postprocessor}s modify or replace the converted document. # 6. The output is written to the output stream. # # Extensions may be registered globally using the {Extensions.register} method # or added to a custom {Registry} instance and passed as an option to a single # Asciidoctor processor. module Extensions # Public: An abstract base class for document and syntax processors. # # This class provides access to a class-level Hash for holding default # configuration options defined using the {Processor.option} method. This # style of default configuration is specific to the native Ruby environment # and is only consulted inside the initializer. An overriding configuration # Hash can be passed to the initializer. Once the processor is initialized, # the configuration is accessed using the {Processor#config} instance variable. # # Instances of the Processor class provide convenience methods for creating # AST nodes, such as Block and Inline, and for parsing child content. class Processor class << self # Public: Get the static configuration for this processor class. # # Returns a configuration [Hash] def config @config ||= {} end # Public: Assigns a default value for the specified option that gets # applied to all instances of this processor. # # Examples # # option :contexts, [:open, :paragraph] # # Returns nothing def option key, default_value config[key] = default_value end # Mixes the DSL class for this processor into this processor class or instance. # # This method automatically detects whether to use the include or extend keyword to mix in the module. # # NOTE Inspiration for this DSL design comes from https://corcoran.io/2013/09/04/simple-pattern-ruby-dsl/ # # Returns self def enable_dsl if const_defined? :DSL if singleton_class? include const_get :DSL else extend const_get :DSL end end end alias use_dsl enable_dsl end # Public: Get the configuration Hash for this processor instance. attr_reader :config def initialize config = {} @config = self.class.config.merge config end def update_config config @config.update config end def process *args raise ::NotImplementedError, %(#{Processor} subclass #{self.class} must implement the ##{__method__} method) end # QUESTION should attributes be an option instead of a parameter? # Public: Creates a new Section node. # # Creates a Section node in the same manner as the parser. # # parent - The parent Section (or Document) of this new Section. # title - The String title of the new Section. # attrs - A Hash of attributes to control how the section is built. # Use the style attribute to set the name of a special section (ex. appendix). # Use the id attribute to assign an explicit ID or set the value to false to # disable automatic ID generation (when sectids document attribute is set). # opts - An optional Hash of options (default: {}): # :level - [Integer] The level to assign to this section; defaults to # one greater than the parent level (optional). # :numbered - [Boolean] A flag to force numbering, which falls back to the # state of the sectnums document attribute (optional). # # Returns a [Section] node with all properties properly initialized. def create_section parent, title, attrs, opts = {} doc = parent.document book = (doctype = doc.doctype) == 'book' level = opts[:level] || parent.level + 1 if (style = attrs.delete 'style') if book && style == 'abstract' sectname, level = 'chapter', 1 else sectname, special = style, true level = 1 if level == 0 end elsif book sectname = level == 0 ? 'part' : (level > 1 ? 'section' : 'chapter') elsif doctype == 'manpage' && (title.casecmp 'synopsis') == 0 sectname, special = 'synopsis', true else sectname = 'section' end sect = Section.new parent, level sect.title, sect.sectname = title, sectname if special sect.special = true if opts.fetch :numbered, (style == 'appendix') sect.numbered = true elsif !(opts.key? :numbered) && (doc.attr? 'sectnums', 'all') sect.numbered = (book && level == 1 ? :chapter : true) end elsif level > 0 if opts.fetch :numbered, (doc.attr? 'sectnums') sect.numbered = sect.special ? parent.numbered && true : true end elsif opts.fetch :numbered, (book && (doc.attr? 'partnums')) sect.numbered = true end if (id = attrs['id']) == false attrs.delete 'id' else sect.id = attrs['id'] = id || ((doc.attr? 'sectids') ? (Section.generate_id sect.title, doc) : nil) end sect.update_attributes attrs sect end def create_block parent, context, source, attrs, opts = {} Block.new parent, context, { source: source, attributes: attrs }.merge(opts) end # Public: Creates a list node and links it to the specified parent. # # parent - The parent Block (Block, Section, or Document) of this new list block. # context - The list context (e.g., :ulist, :olist, :colist, :dlist) # attrs - A Hash of attributes to set on this list block # # Returns a [List] node with all properties properly initialized. def create_list parent, context, attrs = nil list = List.new parent, context list.update_attributes attrs if attrs list end # Public: Creates a list item node and links it to the specified parent. # # parent - The parent List of this new list item block. # text - The text of the list item. # # Returns a [ListItem] node with all properties properly initialized. def create_list_item parent, text = nil ListItem.new parent, text end # Public: Creates an image block node and links it to the specified parent. # # parent - The parent Block (Block, Section, or Document) of this new image block. # attrs - A Hash of attributes to control how the image block is built. # Use the target attribute to set the source of the image. # Use the alt attribute to specify an alternative text for the image. # opts - An optional Hash of options (default: {}) # # Returns a [Block] node with all properties properly initialized. def create_image_block parent, attrs, opts = {} unless (target = attrs['target']) raise ::ArgumentError, 'Unable to create an image block, target attribute is required' end attrs['alt'] ||= (attrs['default-alt'] = Helpers.basename(target, true).tr('_-', ' ')) title = (attrs.key? 'title') ? (attrs.delete 'title') : nil block = create_block parent, :image, nil, attrs, opts if title block.title = title block.assign_caption (attrs.delete 'caption'), 'figure' end block end def create_inline parent, context, text, opts = {} Inline.new parent, context, text, context == :quoted ? ({ type: :unquoted }.merge opts) : opts end # Public: Parses blocks in the content and attaches the block to the parent. # # Returns The parent node into which the blocks are parsed. #-- # QUESTION is parse_content the right method name? should we wrap in open block automatically? def parse_content parent, content, attributes = nil reader = Reader === content ? content : (Reader.new content) Parser.parse_blocks reader, parent, attributes parent end # Public: Parses the attrlist String into a Hash of attributes # # block - the current AbstractBlock or the parent AbstractBlock if there is no current block (used for applying subs) # attrlist - the list of attributes as a String # opts - an optional Hash of options to control processing: # :positional_attributes - an Array of attribute names to map positional arguments to (optional, default: false) # :sub_attributes - enables attribute substitution on the attrlist argument (optional, default: false) # # Returns a Hash of parsed attributes def parse_attributes block, attrlist, opts = {} return {} if attrlist ? attrlist.empty? : true attrlist = block.sub_attributes attrlist if opts[:sub_attributes] && (attrlist.include? ATTR_REF_HEAD) (AttributeList.new attrlist).parse opts[:positional_attributes] || [] end # TODO fill out remaining methods [ [:create_paragraph, :create_block, :paragraph], [:create_open_block, :create_block, :open], [:create_example_block, :create_block, :example], [:create_pass_block, :create_block, :pass], [:create_listing_block, :create_block, :listing], [:create_literal_block, :create_block, :literal], [:create_anchor, :create_inline, :anchor], [:create_inline_pass, :create_inline, :quoted], ].each do |method_name, delegate_method_name, context| define_method method_name do |*args| args.unshift args.shift, context send delegate_method_name, *args end end end # Internal: Overlays a builder DSL for configuring the Processor instance. # Includes a method to define configuration options and another to define the # {Processor#process} method. module ProcessorDsl def option key, value config[key] = value end def process *args, &block if block_given? raise ::ArgumentError, %(wrong number of arguments (given #{args.size}, expected 0)) unless args.empty? unless block.binding && self == block.binding.receiver # NOTE remap self in process method to processor instance context = self block.define_singleton_method(:call) {|*m_args| context.instance_exec(*m_args, &block) } end @process_block = block # TODO enable if we want to support passing proc or lambda as argument instead of block #elsif ::Proc === args[0] # raise ::ArgumentError, %(wrong number of arguments (given #{args.size - 1}, expected 0)) unless args.size == 1 # @process_block = args.shift elsif defined? @process_block @process_block.call(*args) else raise ::NotImplementedError, %(#{self.class} ##{__method__} method called before being registered) end end def process_block_given? defined? @process_block end end module DocumentProcessorDsl include ProcessorDsl def prefer option :position, :>> end end module SyntaxProcessorDsl include ProcessorDsl def named value # NOTE due to how processors get initialized, we must defer this assignment in some scenarios if Processor === self @name = value else option :name, value end end def content_model value option :content_model, value end alias parse_content_as content_model def positional_attributes *value option :positional_attrs, value.flatten end alias name_positional_attributes positional_attributes # NOTE positional_attrs alias is deprecated alias positional_attrs positional_attributes def default_attributes value option :default_attrs, value end # NOTE default_attrs alias is deprecated alias default_attrs default_attributes def resolve_attributes *args # NOTE assume true as default value; rewrap single-argument string or symbol if (args = args.fetch 0, true).respond_to? :to_sym args = [args] end unless args.size > 1 case args when true option :positional_attrs, [] option :default_attrs, {} when ::Array names, defaults = [], {} args.each do |arg| if (arg = arg.to_s).include? '=' name, _, value = arg.partition '=' if name.include? ':' idx, _, name = name.partition ':' idx = idx == '@' ? names.size : idx.to_i names[idx] = name end defaults[name] = value elsif arg.include? ':' idx, _, name = arg.partition ':' idx = idx == '@' ? names.size : idx.to_i names[idx] = name else names << arg end end option :positional_attrs, names.compact option :default_attrs, defaults when ::Hash names, defaults = [], {} args.each do |key, val| if (name = key.to_s).include? ':' idx, _, name = name.partition ':' idx = idx == '@' ? names.size : idx.to_i names[idx] = name end defaults[name] = val if val end option :positional_attrs, names.compact option :default_attrs, defaults else raise ::ArgumentError, %(unsupported attributes specification for macro: #{args.inspect}) end end # NOTE resolves_attributes alias is deprecated alias resolves_attributes resolve_attributes end # Public: Preprocessors are run after the source text is split into lines and # normalized, but before parsing begins. # # Prior to invoking the preprocessor, Asciidoctor splits the source text into # lines and normalizes them. The normalize process strips trailing whitespace # and the end of line character sequence from each line. # # Asciidoctor passes the document and the document's Reader to the # {Processor#process} method of the Preprocessor instance. The Preprocessor # can modify the Reader as necessary and either return the same Reader (or # falsy, which is equivalent) or a reference to a substitute Reader. # # Preprocessor implementations must extend the Preprocessor class. class Preprocessor < Processor def process document, reader raise ::NotImplementedError, %(#{Preprocessor} subclass #{self.class} must implement the ##{__method__} method) end end Preprocessor::DSL = DocumentProcessorDsl # Public: TreeProcessors are run on the Document after the source has been # parsed into an abstract syntax tree (AST), as represented by the Document # object and its child Node objects (e.g., Section, Block, List, ListItem). # # Asciidoctor invokes the {Processor#process} method on an instance of each # registered TreeProcessor. # # TreeProcessor implementations must extend TreeProcessor. #-- # QUESTION should the tree processor get invoked after parse header too? class TreeProcessor < Processor def process document raise ::NotImplementedError, %(#{TreeProcessor} subclass #{self.class} must implement the ##{__method__} method) end end TreeProcessor::DSL = DocumentProcessorDsl # Alias deprecated class name for backwards compatibility Treeprocessor = TreeProcessor # Public: Postprocessors are run after the document is converted, but before # it is written to the output stream. # # Asciidoctor passes a reference to the converted String to the {Processor#process} # method of each registered Postprocessor. The Preprocessor modifies the # String as necessary and returns the String replacement. # # The markup format in the String is determined by the backend used to convert # the Document. The backend and be looked up using the backend method on the # Document object, as well as various backend-related document attributes. # # TIP: Postprocessors can also be used to relocate assets needed by the published # document. # # Postprocessor implementations must extend Postprocessor. class Postprocessor < Processor def process document, output raise ::NotImplementedError, %(#{Postprocessor} subclass #{self.class} must implement the ##{__method__} method) end end Postprocessor::DSL = DocumentProcessorDsl # Public: IncludeProcessors are used to process `include::[]` # directives in the source document. # # When Asciidoctor comes across a `include::[]` directive in the # source document, it iterates through the IncludeProcessors and delegates # the work of reading the content to the first processor that identifies # itself as capable of handling that target. # # IncludeProcessor implementations must extend IncludeProcessor. #-- # TODO add file extension or regexp as shortcut for handles? method class IncludeProcessor < Processor def process document, reader, target, attributes raise ::NotImplementedError, %(#{IncludeProcessor} subclass #{self.class} must implement the ##{__method__} method) end def handles? target true end end module IncludeProcessorDsl include DocumentProcessorDsl def handles? *args, &block if block_given? raise ::ArgumentError, %(wrong number of arguments (given #{args.size}, expected 0)) unless args.empty? @handles_block = block # TODO enable if we want to support passing proc or lambda as argument instead of block #elsif ::Proc === args[0] # block = args.shift # raise ::ArgumentError, %(wrong number of arguments (given #{args.size}, expected 0)) unless args.empty? # @handles_block = block elsif defined? @handles_block @handles_block.call args[0] else true end end end IncludeProcessor::DSL = IncludeProcessorDsl # Public: DocinfoProcessors are used to add additional content to # the header and/or footer of the generated document. # # The placement of docinfo content is controlled by the converter. # # DocinfoProcessors implementations must extend DocinfoProcessor. # If a location is not specified, the DocinfoProcessor is assumed # to add content to the header. class DocinfoProcessor < Processor def initialize config = {} super config @config[:location] ||= :head end def process document raise ::NotImplementedError, %(#{DocinfoProcessor} subclass #{self.class} must implement the ##{__method__} method) end end module DocinfoProcessorDsl include DocumentProcessorDsl def at_location value option :location, value end end DocinfoProcessor::DSL = DocinfoProcessorDsl # Public: BlockProcessors are used to handle delimited blocks and paragraphs # that have a custom name. # # When Asciidoctor encounters a delimited block or paragraph with an # unrecognized name while parsing the document, it looks for a BlockProcessor # registered to handle this name and, if found, invokes its {Processor#process} # method to build a corresponding node in the document tree. # # If the process method returns an instance of Block, the content model of that # Block is :compound, and the Block contains at least one line, the parser will # parse those lines into blocks and append them to the returned block. # # If your custom block can be applied to a paragraph or delimited block, and you # want to preserve the content model of the input, check whether the value of # the cloaked-context attribute is :paragraph. If it is, set the content model of # the returned block to :simple. Otherwise, set the content model to :compound. # # AsciiDoc example: # # [shout] # Get a move on. # # Recognized options: # # * :named - The name of the block (required: true) # * :contexts - The blocks contexts on which this style can be used (default: [:paragraph, :open] # * :content_model - The structure of the content supported in this block (default: :compound) # * :positional_attrs - A list of attribute names used to map positional attributes (default: nil) # * :default_attrs - A hash of attribute names and values used to seed the attributes hash (default: nil) # * ... # # BlockProcessor implementations must extend BlockProcessor. class BlockProcessor < Processor attr_accessor :name def initialize name = nil, config = {} super config @name = name || @config[:name] # assign fallbacks case @config[:contexts] when ::NilClass @config[:contexts] ||= [:open, :paragraph].to_set when ::Symbol @config[:contexts] = [@config[:contexts]].to_set else @config[:contexts] = @config[:contexts].to_set end # QUESTION should the default content model be raw?? @config[:content_model] ||= :compound end def process parent, reader, attributes raise ::NotImplementedError, %(#{BlockProcessor} subclass #{self.class} must implement the ##{__method__} method) end end module BlockProcessorDsl include SyntaxProcessorDsl def contexts *value option :contexts, value.flatten.to_set end alias on_contexts contexts alias on_context contexts alias bind_to contexts end BlockProcessor::DSL = BlockProcessorDsl class MacroProcessor < Processor attr_accessor :name def initialize name = nil, config = {} super config @name = name || @config[:name] @config[:content_model] ||= :attributes end def process parent, target, attributes raise ::NotImplementedError, %(#{MacroProcessor} subclass #{self.class} must implement the ##{__method__} method) end end module MacroProcessorDsl include SyntaxProcessorDsl def resolve_attributes *args if args.size == 1 && !args[0] option :content_model, :text else super option :content_model, :attributes end end # NOTE resolves_attributes alias is deprecated alias resolves_attributes resolve_attributes end # Public: BlockMacroProcessors are used to handle block macros that have a # custom name. # # If the process method returns an instance of Block, the content model of that # Block is :compound, and the Block contains at least one line, the parser will # parse those lines into blocks an assigned them to the returned block. # # BlockMacroProcessor implementations must extend BlockMacroProcessor. class BlockMacroProcessor < MacroProcessor def name raise ::ArgumentError, %(invalid name for block macro: #{@name}) unless MacroNameRx.match? @name.to_s @name end end BlockMacroProcessor::DSL = MacroProcessorDsl # Public: InlineMacroProcessors are used to handle block macros that have a # custom name. # # InlineMacroProcessor implementations must extend InlineMacroProcessor. #-- # TODO break this out into different pattern types # for example, FullInlineMacro, ShortInlineMacro (no target) and other patterns # FIXME for inline macro, we need to have some way to specify the text as a passthrough class InlineMacroProcessor < MacroProcessor @@rx_cache = {} # Lookup the regexp option, resolving it first if necessary. # Once this method is called, the regexp is considered frozen. def regexp @config[:regexp] ||= resolve_regexp @name.to_s, @config[:format] end def resolve_regexp name, format raise ::ArgumentError, %(invalid name for inline macro: #{name}) unless MacroNameRx.match? name @@rx_cache[[name, format]] ||= /\\?#{name}:#{format == :short ? '(){0}' : '(\S+?)'}\[(|#{CC_ANY}*?[^\\])\]/ end end module InlineMacroProcessorDsl include MacroProcessorDsl def format value option :format, value end alias match_format format # NOTE using_format alias is deprecated alias using_format format def match value option :regexp, value end end InlineMacroProcessor::DSL = InlineMacroProcessorDsl # Public: Extension is a proxy object for an extension implementation such as # a processor. It allows the preparation of the extension instance to be # separated from its usage to provide consistency between different # interfaces and avoid tight coupling with the extension type. # # The proxy encapsulates the extension kind (e.g., :block), its config Hash # and the extension instance. This Proxy is what gets stored in the extension # registry when activated. #-- # QUESTION call this ExtensionInfo? class Extension attr_reader :kind attr_reader :config attr_reader :instance def initialize kind, instance, config @kind = kind @instance = instance @config = config end end # Public: A specialization of the Extension proxy that additionally stores a # reference to the {Processor#process} method. By storing this reference, its # possible to accommodate both concrete extension implementations and Procs. class ProcessorExtension < Extension attr_reader :process_method def initialize kind, instance, process_method = nil super kind, instance, instance.config @process_method = process_method || (instance.method :process) end end # Public: A Group is used to register one or more extensions with the Registry. # # The Group should be subclassed and registered with the Registry either by # invoking the {Group.register} method or passing the subclass to the # {Extensions.register} method. Extensions are registered with the Registry # inside the {Group#activate} method. class Group class << self def register name = nil Extensions.register name, self end end def activate registry raise ::NotImplementedError end end # Public: The primary entry point into the extension system. # # Registry holds the extensions which have been registered and activated, has # methods for registering or defining a processor and looks up extensions # stored in the registry during parsing. class Registry # Public: Returns the {Asciidoctor::Document} on which the extensions in this registry are being used. attr_reader :document # Public: Returns the Hash of {Group} classes, instances, and/or Procs that have been registered with this registry. attr_reader :groups def initialize groups = {} @groups = groups reset @preprocessor_extensions = @tree_processor_extensions = @postprocessor_extensions = @include_processor_extensions = @docinfo_processor_extensions = @block_extensions = @block_macro_extensions = @inline_macro_extensions = nil @document = nil end # Public: Activates all the global extension {Group}s and the extension {Group}s # associated with this registry. # # document - the {Asciidoctor::Document} on which the extensions are to be used. # # Returns the instance of this [Registry]. def activate document reset if @document @document = document unless (ext_groups = Extensions.groups.values + @groups.values).empty? ext_groups.each do |group| case group when ::Proc case group.arity when 0, -1 instance_exec(&group) when 1 group.call self end when ::Class group.new.activate self else group.activate self end end end self end # Public: Registers a {Preprocessor} with the extension registry to process # the AsciiDoc source before parsing begins. # # The Preprocessor may be one of four types: # # * A Preprocessor subclass # * An instance of a Preprocessor subclass # * The String name of a Preprocessor subclass # * A method block (i.e., Proc) that conforms to the Preprocessor contract # # Unless the Preprocessor is passed as the method block, it must be the # first argument to this method. # # Examples # # # as a Preprocessor subclass # preprocessor FrontMatterPreprocessor # # # as an instance of a Preprocessor subclass # preprocessor FrontMatterPreprocessor.new # # # as a name of a Preprocessor subclass # preprocessor 'FrontMatterPreprocessor' # # # as a method block # preprocessor do # process do |doc, reader| # ... # end # end # # Returns the [Extension] stored in the registry that proxies the # instance of this Preprocessor. def preprocessor *args, &block add_document_processor :preprocessor, args, &block end # Public: Checks whether any {Preprocessor} extensions have been registered. # # Returns a [Boolean] indicating whether any Preprocessor extensions are registered. def preprocessors? !!@preprocessor_extensions end # Public: Retrieves the {Extension} proxy objects for all # Preprocessor instances in this registry. # # Returns an [Array] of Extension proxy objects. def preprocessors @preprocessor_extensions end # Public: Registers a {TreeProcessor} with the extension registry to process # the AsciiDoc source after parsing is complete. # # The TreeProcessor may be one of four types: # # * A TreeProcessor subclass # * An instance of a TreeProcessor subclass # * The String name of a TreeProcessor subclass # * A method block (i.e., Proc) that conforms to the TreeProcessor contract # # Unless the TreeProcessor is passed as the method block, it must be the # first argument to this method. # # Examples # # # as a TreeProcessor subclass # tree_processor ShellTreeProcessor # # # as an instance of a TreeProcessor subclass # tree_processor ShellTreeProcessor.new # # # as a name of a TreeProcessor subclass # tree_processor 'ShellTreeProcessor' # # # as a method block # tree_processor do # process do |document| # ... # end # end # # Returns the [Extension] stored in the registry that proxies the # instance of this TreeProcessor. def tree_processor *args, &block add_document_processor :tree_processor, args, &block end # Public: Checks whether any {TreeProcessor} extensions have been registered. # # Returns a [Boolean] indicating whether any TreeProcessor extensions are registered. def tree_processors? !!@tree_processor_extensions end # Public: Retrieves the {Extension} proxy objects for all # TreeProcessor instances in this registry. # # Returns an [Array] of Extension proxy objects. def tree_processors @tree_processor_extensions end # Alias deprecated methods for backwards compatibility alias treeprocessor tree_processor alias treeprocessors? tree_processors? alias treeprocessors tree_processors # Public: Registers a {Postprocessor} with the extension registry to process # the output after conversion is complete. # # The Postprocessor may be one of four types: # # * A Postprocessor subclass # * An instance of a Postprocessor subclass # * The String name of a Postprocessor subclass # * A method block (i.e., Proc) that conforms to the Postprocessor contract # # Unless the Postprocessor is passed as the method block, it must be the # first argument to this method. # # Examples # # # as a Postprocessor subclass # postprocessor AnalyticsPostprocessor # # # as an instance of a Postprocessor subclass # postprocessor AnalyticsPostprocessor.new # # # as a name of a Postprocessor subclass # postprocessor 'AnalyticsPostprocessor' # # # as a method block # postprocessor do # process do |document, output| # ... # end # end # # Returns the [Extension] stored in the registry that proxies the # instance of this Postprocessor. def postprocessor *args, &block add_document_processor :postprocessor, args, &block end # Public: Checks whether any {Postprocessor} extensions have been registered. # # Returns a [Boolean] indicating whether any Postprocessor extensions are registered. def postprocessors? !!@postprocessor_extensions end # Public: Retrieves the {Extension} proxy objects for all # Postprocessor instances in this registry. # # Returns an [Array] of Extension proxy objects. def postprocessors @postprocessor_extensions end # Public: Registers an {IncludeProcessor} with the extension registry to have # a shot at handling the include directive. # # The IncludeProcessor may be one of four types: # # * A IncludeProcessor subclass # * An instance of a IncludeProcessor subclass # * The String name of a IncludeProcessor subclass # * A method block (i.e., Proc) that conforms to the IncludeProcessor contract # # Unless the IncludeProcessor is passed as the method block, it must be the # first argument to this method. # # Examples # # # as an IncludeProcessor subclass # include_processor GitIncludeProcessor # # # as an instance of a Postprocessor subclass # include_processor GitIncludeProcessor.new # # # as a name of a Postprocessor subclass # include_processor 'GitIncludeProcessor' # # # as a method block # include_processor do # process do |document, output| # ... # end # end # # Returns the [Extension] stored in the registry that proxies the # instance of this IncludeProcessor. def include_processor *args, &block add_document_processor :include_processor, args, &block end # Public: Checks whether any {IncludeProcessor} extensions have been registered. # # Returns a [Boolean] indicating whether any IncludeProcessor extensions are registered. def include_processors? !!@include_processor_extensions end # Public: Retrieves the {Extension} proxy objects for all the # IncludeProcessor instances stored in this registry. # # Returns an [Array] of Extension proxy objects. def include_processors @include_processor_extensions end # Public: Registers an {DocinfoProcessor} with the extension registry to # add additional docinfo to the document. # # The DocinfoProcessor may be one of four types: # # * A DocinfoProcessor subclass # * An instance of a DocinfoProcessor subclass # * The String name of a DocinfoProcessor subclass # * A method block (i.e., Proc) that conforms to the DocinfoProcessor contract # # Unless the DocinfoProcessor is passed as the method block, it must be the # first argument to this method. # # Examples # # # as an DocinfoProcessor subclass # docinfo_processor MetaRobotsDocinfoProcessor # # # as an instance of a DocinfoProcessor subclass with an explicit location # docinfo_processor JQueryDocinfoProcessor.new, location: :footer # # # as a name of a DocinfoProcessor subclass # docinfo_processor 'MetaRobotsDocinfoProcessor' # # # as a method block # docinfo_processor do # process do |doc| # at_location :footer # 'footer content' # end # end # # Returns the [Extension] stored in the registry that proxies the # instance of this DocinfoProcessor. def docinfo_processor *args, &block add_document_processor :docinfo_processor, args, &block end # Public: Checks whether any {DocinfoProcessor} extensions have been registered. # # location - A Symbol for selecting docinfo extensions at a given location (:head or :footer) (default: nil) # # Returns a [Boolean] indicating whether any DocinfoProcessor extensions are registered. def docinfo_processors? location = nil if @docinfo_processor_extensions if location @docinfo_processor_extensions.any? {|ext| ext.config[:location] == location } else true end else false end end # Public: Retrieves the {Extension} proxy objects for all the # DocinfoProcessor instances stored in this registry. # # location - A Symbol for selecting docinfo extensions at a given location (:head or :footer) (default: nil) # # Returns an [Array] of Extension proxy objects. def docinfo_processors location = nil if @docinfo_processor_extensions if location @docinfo_processor_extensions.select {|ext| ext.config[:location] == location } else @docinfo_processor_extensions end end end # Public: Registers a {BlockProcessor} with the extension registry to # process the block content (i.e., delimited block or paragraph) in the # AsciiDoc source annotated with the specified block name (i.e., style). # # The BlockProcessor may be one of four types: # # * A BlockProcessor subclass # * An instance of a BlockProcessor subclass # * The String name of a BlockProcessor subclass # * A method block (i.e., Proc) that conforms to the BlockProcessor contract # # Unless the BlockProcessor is passed as the method block, it must be the # first argument to this method. The second argument is the name (coersed # to a Symbol) of the AsciiDoc block content (i.e., delimited block or # paragraph) that this processor is registered to handle. If a block name # is not passed as an argument, it gets read from the name property of the # BlockProcessor instance. If a name still cannot be determined, an error # is raised. # # Examples # # # as a BlockProcessor subclass # block ShoutBlock # # # as a BlockProcessor subclass with an explicit block name # block ShoutBlock, :shout # # # as an instance of a BlockProcessor subclass # block ShoutBlock.new # # # as an instance of a BlockProcessor subclass with an explicit block name # block ShoutBlock.new, :shout # # # as a name of a BlockProcessor subclass # block 'ShoutBlock' # # # as a name of a BlockProcessor subclass with an explicit block name # block 'ShoutBlock', :shout # # # as a method block # block do # named :shout # process do |parent, reader, attrs| # ... # end # end # # # as a method block with an explicit block name # block :shout do # process do |parent, reader, attrs| # ... # end # end # # Returns an instance of the [Extension] proxy object that is stored in the # registry and manages the instance of this BlockProcessor. def block *args, &block add_syntax_processor :block, args, &block end # Public: Checks whether any {BlockProcessor} extensions have been registered. # # Returns a [Boolean] indicating whether any BlockProcessor extensions are registered. def blocks? !!@block_extensions end # Public: Checks whether any {BlockProcessor} extensions are registered to # handle the specified block name appearing on the specified context. # # This method assumes you've called blocks? first to check whether any # block extensions are registered. # # Returns the [Extension] proxy object for the BlockProcessor that matches # the block name and context or false if no match is found. def registered_for_block? name, context if (ext = @block_extensions[name.to_sym]) (ext.config[:contexts].include? context) ? ext : false else false end end # Public: Retrieves the {Extension} proxy object for the BlockProcessor registered # to handle block content with the name. # # name - the String or Symbol (coersed to a Symbol) macro name # # This method assumes you've called blocks? first to check whether any # block extensions are registered. # # Returns the [Extension] object stored in the registry that proxies the # corresponding BlockProcessor or nil if a match is not found. def find_block_extension name @block_extensions[name.to_sym] end # Public: Registers a {BlockMacroProcessor} with the extension registry to # process a block macro with the specified name. # # The BlockMacroProcessor may be one of four types: # # * A BlockMacroProcessor subclass # * An instance of a BlockMacroProcessor subclass # * The String name of a BlockMacroProcessor subclass # * A method block (i.e., Proc) that conforms to the BlockMacroProcessor contract # # Unless the BlockMacroProcessor is passed as the method block, it must be # the first argument to this method. The second argument is the name # (coersed to a Symbol) of the AsciiDoc block macro that this processor is # registered to handle. If a block macro name is not passed as an argument, # it gets read from the name property of the BlockMacroProcessor instance. # If a name still cannot be determined, an error is raised. # # Examples # # # as a BlockMacroProcessor subclass # block_macro GistBlockMacro # # # as a BlockMacroProcessor subclass with an explicit macro name # block_macro GistBlockMacro, :gist # # # as an instance of a BlockMacroProcessor subclass # block_macro GistBlockMacro.new # # # as an instance of a BlockMacroProcessor subclass with an explicit macro name # block_macro GistBlockMacro.new, :gist # # # as a name of a BlockMacroProcessor subclass # block_macro 'GistBlockMacro' # # # as a name of a BlockMacroProcessor subclass with an explicit macro name # block_macro 'GistBlockMacro', :gist # # # as a method block # block_macro do # named :gist # process do |parent, target, attrs| # ... # end # end # # # as a method block with an explicit macro name # block_macro :gist do # process do |parent, target, attrs| # ... # end # end # # Returns an instance of the [Extension] proxy object that is stored in the # registry and manages the instance of this BlockMacroProcessor. def block_macro *args, &block add_syntax_processor :block_macro, args, &block end # Public: Checks whether any {BlockMacroProcessor} extensions have been registered. # # Returns a [Boolean] indicating whether any BlockMacroProcessor extensions are registered. def block_macros? !!@block_macro_extensions end # Public: Checks whether any {BlockMacroProcessor} extensions are registered to # handle the block macro with the specified name. # # name - the String or Symbol (coersed to a Symbol) macro name # # This method assumes you've called block_macros? first to check whether any # block macro extensions are registered. # # Returns the [Extension] proxy object for the BlockMacroProcessor that matches # the macro name or false if no match is found. #-- # TODO only allow blank target if format is :short def registered_for_block_macro? name (ext = @block_macro_extensions[name.to_sym]) ? ext : false end # Public: Retrieves the {Extension} proxy object for the BlockMacroProcessor registered # to handle a block macro with the specified name. # # name - the String or Symbol (coersed to a Symbol) macro name # # This method assumes you've called block_macros? first to check whether any # block macro extensions are registered. # # Returns the [Extension] object stored in the registry that proxies the # corresponding BlockMacroProcessor or nil if a match is not found. def find_block_macro_extension name @block_macro_extensions[name.to_sym] end # Public: Registers a {InlineMacroProcessor} with the extension registry to # process an inline macro with the specified name. # # The InlineMacroProcessor may be one of four types: # # * An InlineMacroProcessor subclass # * An instance of an InlineMacroProcessor subclass # * The String name of an InlineMacroProcessor subclass # * A method block (i.e., Proc) that conforms to the InlineMacroProcessor contract # # Unless the InlineMacroProcessor is passed as the method block, it must be # the first argument to this method. The second argument is the name # (coersed to a Symbol) of the AsciiDoc block macro that this processor is # registered to handle. If a block macro name is not passed as an argument, # it gets read from the name property of the InlineMacroProcessor instance. # If a name still cannot be determined, an error is raised. # # Examples # # # as an InlineMacroProcessor subclass # inline_macro ChromeInlineMacro # # # as an InlineMacroProcessor subclass with an explicit macro name # inline_macro ChromeInlineMacro, :chrome # # # as an instance of an InlineMacroProcessor subclass # inline_macro ChromeInlineMacro.new # # # as an instance of an InlineMacroProcessor subclass with an explicit macro name # inline_macro ChromeInlineMacro.new, :chrome # # # as a name of an InlineMacroProcessor subclass # inline_macro 'ChromeInlineMacro' # # # as a name of an InlineMacroProcessor subclass with an explicit macro name # inline_macro 'ChromeInlineMacro', :chrome # # # as a method block # inline_macro do # named :chrome # process do |parent, target, attrs| # ... # end # end # # # as a method block with an explicit macro name # inline_macro :chrome do # process do |parent, target, attrs| # ... # end # end # # Returns an instance of the [Extension] proxy object that is stored in the # registry and manages the instance of this InlineMacroProcessor. def inline_macro *args, &block add_syntax_processor :inline_macro, args, &block end # Public: Checks whether any {InlineMacroProcessor} extensions have been registered. # # Returns a [Boolean] indicating whether any IncludeMacroProcessor extensions are registered. def inline_macros? !!@inline_macro_extensions end # Public: Checks whether any {InlineMacroProcessor} extensions are registered to # handle the inline macro with the specified name. # # name - the String or Symbol (coersed to a Symbol) macro name # # This method assumes you've called inline_macros? first to check whether any # inline macro extensions are registered. # # Returns the [Extension] proxy object for the InlineMacroProcessor that matches # the macro name or false if no match is found. def registered_for_inline_macro? name (ext = @inline_macro_extensions[name.to_sym]) ? ext : false end # Public: Retrieves the {Extension} proxy object for the InlineMacroProcessor registered # to handle an inline macro with the specified name. # # name - the String or Symbol (coersed to a Symbol) macro name # # This method assumes you've called inline_macros? first to check whether any # inline macro extensions are registered. # # Returns the [Extension] object stored in the registry that proxies the # corresponding InlineMacroProcessor or nil if a match is not found. def find_inline_macro_extension name @inline_macro_extensions[name.to_sym] end # Public: Retrieves the {Extension} proxy objects for all # InlineMacroProcessor instances in this registry. # # This method assumes you've called inline_macros? first to check whether any # inline macro extensions are registered. # # Returns an [Array] of Extension proxy objects. def inline_macros @inline_macro_extensions.values end # Public: Inserts the document processor {Extension} instance as the first # processor of its kind in the extension registry. # # Examples # # prefer :include_processor do # process do |document, reader, target, attrs| # ... # end # end # # Returns the [Extension] stored in the registry that proxies the instance # of this processor. def prefer *args, &block extension = ProcessorExtension === (arg0 = args.shift) ? arg0 : (send arg0, *args, &block) extensions_store = instance_variable_get(%(@#{extension.kind}_extensions).to_sym) extensions_store.unshift extensions_store.delete extension extension end private def add_document_processor kind, args, &block kind_name = kind.to_s.tr '_', ' ' kind_class_symbol = kind_name.split.map {|it| it.capitalize }.join.to_sym kind_class = Extensions.const_get kind_class_symbol, false kind_java_class = (defined? ::AsciidoctorJ) ? (::AsciidoctorJ::Extensions.const_get kind_class_symbol, false) : nil kind_store = instance_variable_get(%(@#{kind}_extensions).to_sym) || instance_variable_set(%(@#{kind}_extensions).to_sym, []) # style 1: specified as block if block_given? config = resolve_args args, 1 (processor = kind_class.new config).singleton_class.enable_dsl if block.arity == 0 processor.instance_exec(&block) else yield processor end unless processor.process_block_given? raise ::NoMethodError, %(No block specified to process #{kind_name} extension at #{block.source_location.join ':'}) end processor.freeze extension = ProcessorExtension.new kind, processor else processor, config = resolve_args args, 2 # style 2: specified as Class or String class name if (processor_class = Helpers.resolve_class processor) unless processor_class < kind_class || (kind_java_class && processor_class < kind_java_class) raise ::ArgumentError, %(Invalid type for #{kind_name} extension: #{processor}) end processor_instance = processor_class.new config processor_instance.freeze extension = ProcessorExtension.new kind, processor_instance # style 3: specified as instance elsif kind_class === processor || (kind_java_class && kind_java_class === processor) processor.update_config config processor.freeze extension = ProcessorExtension.new kind, processor else raise ::ArgumentError, %(Invalid arguments specified for registering #{kind_name} extension: #{args}) end end extension.config[:position] == :>> ? (kind_store.unshift extension) : (kind_store << extension) extension end def add_syntax_processor kind, args, &block kind_name = kind.to_s.tr '_', ' ' kind_class_symbol = (kind_name.split.map {|it| it.capitalize } << 'Processor').join.to_sym kind_class = Extensions.const_get kind_class_symbol, false kind_java_class = (defined? ::AsciidoctorJ) ? (::AsciidoctorJ::Extensions.const_get kind_class_symbol, false) : nil kind_store = instance_variable_get(%(@#{kind}_extensions).to_sym) || instance_variable_set(%(@#{kind}_extensions).to_sym, {}) # style 1: specified as block if block_given? name, config = resolve_args args, 2 (processor = kind_class.new (as_symbol name), config).singleton_class.enable_dsl if block.arity == 0 processor.instance_exec(&block) else yield processor end unless (name = as_symbol processor.name) raise ::ArgumentError, %(No name specified for #{kind_name} extension at #{block.source_location.join ':'}) end unless processor.process_block_given? raise ::NoMethodError, %(No block specified to process #{kind_name} extension at #{block.source_location.join ':'}) end processor.freeze kind_store[name] = ProcessorExtension.new kind, processor else processor, name, config = resolve_args args, 3 # style 2: specified as Class or String class name if (processor_class = Helpers.resolve_class processor) unless processor_class < kind_class || (kind_java_class && processor_class < kind_java_class) raise ::ArgumentError, %(Class specified for #{kind_name} extension does not inherit from #{kind_class}: #{processor}) end processor_instance = processor_class.new as_symbol(name), config unless (name = as_symbol processor_instance.name) raise ::ArgumentError, %(No name specified for #{kind_name} extension: #{processor}) end processor_instance.freeze kind_store[name] = ProcessorExtension.new kind, processor_instance # style 3: specified as instance elsif kind_class === processor || (kind_java_class && kind_java_class === processor) processor.update_config config # TODO need a test for this override! unless (name = name ? (processor.name = as_symbol name) : (as_symbol processor.name)) raise ::ArgumentError, %(No name specified for #{kind_name} extension: #{processor}) end processor.freeze kind_store[name] = ProcessorExtension.new kind, processor else raise ::ArgumentError, %(Invalid arguments specified for registering #{kind_name} extension: #{args}) end end end def reset @preprocessor_extensions = @tree_processor_extensions = @postprocessor_extensions = @include_processor_extensions = @docinfo_processor_extensions = @block_extensions = @block_macro_extensions = @inline_macro_extensions = nil @document = nil end def resolve_args args, expect opts = ::Hash === args[-1] ? args.pop : {} return opts if expect == 1 if (missing = expect - 1 - args.size) > 0 args += (::Array.new missing) elsif missing < 0 args.pop(-missing) end args << opts args end def as_symbol name name ? name.to_sym : nil end end class << self def generate_name %(extgrp#{next_auto_id}) end def next_auto_id @auto_id ||= -1 @auto_id += 1 end def groups @groups ||= {} end def create name = nil, &block if block_given? Registry.new (name || generate_name) => block else Registry.new end end # Public: Registers an extension Group that subsequently registers a # collection of extensions. # # Registers the extension Group specified under the given name. If a name is # not given, one is calculated by appending the next value in a 0-based # index to the string "extgrp". For instance, the first unnamed extension # group to be registered is assigned the name "extgrp0" if a name is not # specified. # # The names are not yet used, but are intended for selectively activating # extensions in the future. # # If the extension group argument is a String or a Symbol, it gets resolved # to a Class before being registered. # # name - The name under which this extension group is registered (optional, default: nil) # group - A block (Proc), a Class, a String or Symbol name of a Class or # an Object instance of a Class. # # Examples # # Asciidoctor::Extensions.register UmlExtensions # # Asciidoctor::Extensions.register :uml, UmlExtensions # # Asciidoctor::Extensions.register do # block_processor :plantuml, PlantUmlBlock # end # # Asciidoctor::Extensions.register :uml do # block_processor :plantuml, PlantUmlBlock # end # # Returns the [Proc, Class or Object] instance, matching the type passed to this method. def register *args, &block argc = args.size if block_given? resolved_group = block elsif (group = args.pop) # QUESTION should we instantiate the group class here or defer until activation?? resolved_group = (Helpers.resolve_class group) || group else raise ::ArgumentError, %(Extension group to register not specified) end name = args.pop || generate_name unless args.empty? raise ::ArgumentError, %(Wrong number of arguments (#{argc} for 1..2)) end groups[name.to_sym] = resolved_group end # Public: Unregister all statically-registered extension groups. # # Returns nothing def unregister_all @groups = {} nil end # Public: Unregister statically-registered extension groups by name. # # names - one or more Symbol or String group names to unregister # # Returns nothing def unregister *names names.each_with_object(groups) {|group, catalog| catalog.delete group.to_sym } nil end end end end asciidoctor-2.0.20/lib/asciidoctor/helpers.rb000066400000000000000000000270301443135032600211410ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # Internal: Except where noted, a module that contains internal helper functions. module Helpers module_function # Public: Require the specified library using Kernel#require. # # Attempts to load the library specified in the first argument using the # Kernel#require. Rescues the LoadError if the library is not available and # passes a message to Kernel#raise if on_failure is :abort or Kernel#warn if # on_failure is :warn to communicate to the user that processing is being # aborted or functionality is disabled, respectively. If a gem_name is # specified, the message communicates that a required gem is not available. # # name - the String name of the library to require. # gem_name - a Boolean that indicates whether this library is provided by a RubyGem, # or the String name of the RubyGem if it differs from the library name # (default: true) # on_failure - a Symbol that indicates how to handle a load failure (:abort, :warn, :ignore) (default: :abort) # # Returns The [Boolean] return value of Kernel#require if the library can be loaded. # Otherwise, if on_failure is :abort, Kernel#raise is called with an appropriate message. # Otherwise, if on_failure is :warn, Kernel#warn is called with an appropriate message and nil returned. # Otherwise, nil is returned. def require_library name, gem_name = true, on_failure = :abort require name rescue ::LoadError include Logging unless include? Logging if gem_name gem_name = name if gem_name == true case on_failure when :abort details = $!.path == gem_name ? '' : %[ (reason: #{$!.path ? %(cannot load '#{$!.path}') : $!.message})] raise ::LoadError, %(asciidoctor: FAILED: required gem '#{gem_name}' is not available#{details}. Processing aborted.) when :warn details = $!.path == gem_name ? '' : %[ (reason: #{$!.path ? %(cannot load '#{$!.path}') : $!.message})] logger.warn %(optional gem '#{gem_name}' is not available#{details}. Functionality disabled.) end else case on_failure when :abort raise ::LoadError, %(asciidoctor: FAILED: #{$!.message.chomp '.'}. Processing aborted.) when :warn logger.warn %(#{$!.message.chomp '.'}. Functionality disabled.) end end nil end # Internal: Prepare the source data Array for parsing. # # Encodes the data to UTF-8, if necessary, and removes any trailing # whitespace from every line. # # If a BOM is found at the beginning of the data, a best attempt is made to # encode it to UTF-8 from the specified source encoding. # # data - the source data Array to prepare (no nil entries allowed) # trim_end - whether to trim whitespace from the end of each line; # (true cleans all whitespace; false only removes trailing newline) (default: true) # # returns a String Array of prepared lines def prepare_source_array data, trim_end = true return [] if data.empty? if (leading_2_bytes = (leading_bytes = (first = data[0]).unpack 'C3').slice 0, 2) == BOM_BYTES_UTF_16LE data[0] = first.byteslice 2, first.bytesize # NOTE you can't split a UTF-16LE string using .lines when encoding is UTF-8; doing so will cause this line to fail return trim_end ? data.map {|line| (line.encode UTF_8, ::Encoding::UTF_16LE).rstrip } : data.map {|line| (line.encode UTF_8, ::Encoding::UTF_16LE).chomp } elsif leading_2_bytes == BOM_BYTES_UTF_16BE data[0] = first.byteslice 2, first.bytesize return trim_end ? data.map {|line| (line.encode UTF_8, ::Encoding::UTF_16BE).rstrip } : data.map {|line| (line.encode UTF_8, ::Encoding::UTF_16BE).chomp } elsif leading_bytes == BOM_BYTES_UTF_8 data[0] = first.byteslice 3, first.bytesize end if first.encoding == UTF_8 trim_end ? data.map {|line| line.rstrip } : data.map {|line| line.chomp } else trim_end ? data.map {|line| (line.encode UTF_8).rstrip } : data.map {|line| (line.encode UTF_8).chomp } end end # Internal: Prepare the source data String for parsing. # # Encodes the data to UTF-8, if necessary, splits it into an array, and # removes any trailing whitespace from every line. # # If a BOM is found at the beginning of the data, a best attempt is made to # encode it to UTF-8 from the specified source encoding. # # data - the source data String to prepare # trim_end - whether to trim whitespace from the end of each line; # (true cleans all whitespace; false only removes trailing newline) (default: true) # # returns a String Array of prepared lines def prepare_source_string data, trim_end = true return [] if data.nil_or_empty? if (leading_2_bytes = (leading_bytes = data.unpack 'C3').slice 0, 2) == BOM_BYTES_UTF_16LE data = (data.byteslice 2, data.bytesize).encode UTF_8, ::Encoding::UTF_16LE elsif leading_2_bytes == BOM_BYTES_UTF_16BE data = (data.byteslice 2, data.bytesize).encode UTF_8, ::Encoding::UTF_16BE elsif leading_bytes == BOM_BYTES_UTF_8 data = data.byteslice 3, data.bytesize data = data.encode UTF_8 unless data.encoding == UTF_8 elsif data.encoding != UTF_8 data = data.encode UTF_8 end if trim_end [].tap {|lines| data.each_line {|line| lines << line.rstrip } } else [].tap {|lines| data.each_line {|line| lines << line.chomp } } end end # Internal: Efficiently checks whether the specified String resembles a URI # # Uses the Asciidoctor::UriSniffRx regex to check whether the String begins # with a URI prefix (e.g., http://). No validation of the URI is performed. # # str - the String to check # # returns true if the String is a URI, false if it is not if ::RUBY_ENGINE == 'jruby' def uriish? str (str.include? ':') && !(str.start_with? 'uri:classloader:') && (UriSniffRx.match? str) end else def uriish? str (str.include? ':') && (UriSniffRx.match? str) end end # Internal: Encode a URI component String for safe inclusion in a URI. # # str - the URI component String to encode # # Returns the String with all reserved URI characters encoded (e.g., /, &, =, space, etc). if RUBY_ENGINE == 'opal' def encode_uri_component str # patch necessary to adhere with RFC-3986 (and thus CGI.escape) # see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/encodeURIComponent#Description %x( return encodeURIComponent(str).replace(/%20|[!'()*]/g, function (m) { return m === '%20' ? '+' : '%' + m.charCodeAt(0).toString(16) }) ) end else CGI = ::CGI def encode_uri_component str CGI.escape str end end # Internal: Apply URI path encoding to spaces in the specified string (i.e., convert spaces to %20). # # str - the String to encode # # Returns the specified String with all spaces replaced with %20. def encode_spaces_in_uri str (str.include? ' ') ? (str.gsub ' ', '%20') : str end # Public: Removes the file extension from filename and returns the result # # filename - The String file name to process; expected to be a posix path # # Examples # # Helpers.rootname 'part1/chapter1.adoc' # # => "part1/chapter1" # # Returns the String filename with the file extension removed def rootname filename if (last_dot_idx = filename.rindex '.') (filename.index '/', last_dot_idx) ? filename : (filename.slice 0, last_dot_idx) else filename end end # Public: Retrieves the basename of the filename, optionally removing the extension, if present # # filename - The String file name to process. # drop_ext - A Boolean flag indicating whether to drop the extension # or an explicit String extension to drop (default: nil). # # Examples # # Helpers.basename 'images/tiger.png', true # # => "tiger" # # Helpers.basename 'images/tiger.png', '.png' # # => "tiger" # # Returns the String filename with leading directories removed and, if specified, the extension removed def basename filename, drop_ext = nil if drop_ext ::File.basename filename, (drop_ext == true ? (extname filename) : drop_ext) else ::File.basename filename end end # Public: Returns whether this path has a file extension. # # path - The path String to check; expects a posix path # # Returns true if the path has a file extension, false otherwise def extname? path (last_dot_idx = path.rindex '.') && !(path.index '/', last_dot_idx) end # Public: Retrieves the file extension of the specified path. The file extension is the portion of the path in the # last path segment starting from the last period. # # This method differs from File.extname in that it gives us control over the fallback value and is more efficient. # # path - The path String in which to look for a file extension # fallback - The fallback String to return if no file extension is present (optional, default: '') # # Returns the String file extension (with the leading dot included) or the fallback value if the path has no file extension. if ::File::ALT_SEPARATOR def extname path, fallback = '' if (last_dot_idx = path.rindex '.') (path.index '/', last_dot_idx) || (path.index ::File::ALT_SEPARATOR, last_dot_idx) ? fallback : (path.slice last_dot_idx, path.length) else fallback end end else def extname path, fallback = '' if (last_dot_idx = path.rindex '.') (path.index '/', last_dot_idx) ? fallback : (path.slice last_dot_idx, path.length) else fallback end end end # Internal: Make a directory, ensuring all parent directories exist. def mkdir_p dir unless ::File.directory? dir unless (parent_dir = ::File.dirname dir) == '.' mkdir_p parent_dir end begin ::Dir.mkdir dir rescue ::SystemCallError raise unless ::File.directory? dir end end end ROMAN_NUMERALS = { 'M' => 1000, 'CM' => 900, 'D' => 500, 'CD' => 400, 'C' => 100, 'XC' => 90, 'L' => 50, 'XL' => 40, 'X' => 10, 'IX' => 9, 'V' => 5, 'IV' => 4, 'I' => 1 } private_constant :ROMAN_NUMERALS # Internal: Converts an integer to a Roman numeral. # # val - the [Integer] value to convert # # Returns the [String] roman numeral for this integer def int_to_roman val ROMAN_NUMERALS.map do |l, i| repeat, val = val.divmod i l * repeat end.join end # Internal: Get the next value in the sequence. # # Handles both integer and character sequences. # # current - the value to increment as a String or Integer # # returns the next value in the sequence according to the current value's type def nextval current if ::Integer === current current + 1 elsif (intval = current.to_i).to_s == current.to_s intval + 1 else current.succ end end # Internal: Resolve the specified object as a Class # # object - The Object to resolve as a Class # # Returns a Class if the specified object is a Class (but not a Module) or # a String that resolves to a Class; otherwise, nil def resolve_class object ::Class === object ? object : (::String === object ? (class_for_name object) : nil) end # Internal: Resolves a Class object (not a Module) for the qualified name. # # Returns Class def class_for_name qualified_name raise unless ::Class === (resolved = ::Object.const_get qualified_name, false) resolved rescue raise ::NameError, %(Could not resolve class for name: #{qualified_name}) end end end asciidoctor-2.0.20/lib/asciidoctor/inline.rb000066400000000000000000000034611443135032600207570ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # Public: Methods for managing inline elements in AsciiDoc block class Inline < AbstractNode # Public: Get the text of this inline element attr_accessor :text # Public: Get the type (qualifier) of this inline element attr_reader :type # Public: Get/Set the target (e.g., uri) of this inline element attr_accessor :target def initialize(parent, context, text = nil, opts = {}) super(parent, context, opts) @node_name = %(inline_#{context}) @text = text @id = opts[:id] @type = opts[:type] @target = opts[:target] end def block? false end def inline? true end def convert converter.convert self end # Deprecated: Use {Inline#convert} instead. alias render convert # Public: Returns the converted alt text for this inline image. # # Returns the [String] value of the alt attribute. def alt (attr 'alt') || '' end # For a reference node (:ref or :bibref), the text is the reftext (and the reftext attribute is not set). # # (see AbstractNode#reftext?) def reftext? @text && (@type == :ref || @type == :bibref) end # For a reference node (:ref or :bibref), the text is the reftext (and the reftext attribute is not set). # # (see AbstractNode#reftext) def reftext (val = @text) ? (apply_reftext_subs val) : nil end # Public: Generate cross reference text (xreftext) that can be used to refer # to this inline node. # # Use the explicit reftext for this inline node, if specified, retrieved by # calling the reftext method. Otherwise, returns nil. # # xrefstyle - Not currently used (default: nil). # # Returns the [String] reftext to refer to this inline node or nothing if no # reftext is defined. def xreftext xrefstyle = nil reftext end end end asciidoctor-2.0.20/lib/asciidoctor/list.rb000066400000000000000000000067761443135032600204700ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # Public: Methods for managing AsciiDoc lists (ordered, unordered and description lists) class List < AbstractBlock # Public: Create alias for blocks alias items blocks # Public: Get the items in this list as an Array alias content blocks # Public: Create alias to check if this list has blocks alias items? blocks? def initialize parent, context, opts = {} super end # Check whether this list is an outline list (unordered or ordered). # # Return true if this list is an outline list. Otherwise, return false. def outline? @context == :ulist || @context == :olist end def convert if @context == :colist result = super @document.callouts.next_list result else super end end # Deprecated: Use {List#convert} instead. alias render convert def to_s %(#<#{self.class}@#{object_id} {context: #{@context.inspect}, style: #{@style.inspect}, items: #{items.size}}>) end end # Public: Methods for managing items for AsciiDoc olists, ulist, and dlists. # # In a description list (dlist), each item is a tuple that consists of a 2-item Array of ListItem terms and a ListItem # description (i.e., [[term, term, ...], desc]. If a description is not set, then the second entry in the tuple is nil. class ListItem < AbstractBlock # A contextual alias for the list parent node; counterpart to the items alias on List alias list parent # Public: Get/Set the String used to mark this list item attr_accessor :marker # Public: Initialize an Asciidoctor::ListItem object. # # parent - The parent list block for this list item # text - the String text (default nil) def initialize parent, text = nil super parent, :list_item @text = text @level = parent.level @subs = NORMAL_SUBS.drop 0 end # Public: A convenience method that checks whether the text of this list item # is not blank (i.e., not nil or empty string). def text? @text.nil_or_empty? ? false : true end # Public: Get the String text of this ListItem with substitutions applied. # # By default, normal substitutions are applied to the text. The substitutions # can be modified by altering the subs property of this object. # # Returns the converted String text for this ListItem def text # NOTE @text can be nil if dd node only has block content @text && (apply_subs @text, @subs) end # Public: Set the String text assigned to this ListItem attr_writer :text # Check whether this list item has simple content (no nested blocks aside from a single outline list). # Primarily relevant for outline lists. # # Return true if the list item contains no blocks or it contains a single outline list. Otherwise, return false. def simple? @blocks.empty? || (@blocks.size == 1 && List === (blk = @blocks[0]) && blk.outline?) end # Check whether this list item has compound content (nested blocks aside from a single outline list). # Primarily relevant for outline lists. # # Return true if the list item contains blocks other than a single outline list. Otherwise, return false. def compound? !simple? end # Internal: Fold the adjacent paragraph block into the list item text # # Returns nothing def fold_first @text = @text.nil_or_empty? ? @blocks.shift.source : %(#{@text}#{LF}#{@blocks.shift.source}) nil end def to_s %(#<#{self.class}@#{object_id} {list_context: #{parent.context.inspect}, text: #{@text.inspect}, blocks: #{(@blocks || []).size}}>) end end end asciidoctor-2.0.20/lib/asciidoctor/load.rb000066400000000000000000000114161443135032600204170ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor class << self # Public: Parse the AsciiDoc source input into a {Document} # # Accepts input as an IO (or StringIO), String or String Array object. If the # input is a File, the object is expected to be opened for reading and is not # closed afterwards by this method. Information about the file (filename, # directory name, etc) gets assigned to attributes on the Document object. # # input - the AsciiDoc source as a IO, String or Array. # options - a String, Array or Hash of options to control processing (default: {}) # String and Array values are converted into a Hash. # See {Document#initialize} for details about these options. # # Returns the Document def load input, options = {} options = options.merge if (timings = options[:timings]) timings.start :read end if (options.key? :logger) && (logger = options[:logger]) != LoggerManager.logger LoggerManager.logger = logger || NullLogger.new end if !(attrs = options[:attributes]) attrs = {} elsif ::Hash === attrs attrs = attrs.merge elsif (defined? ::Java::JavaUtil::Map) && ::Java::JavaUtil::Map === attrs attrs = attrs.dup elsif ::Array === attrs attrs = {}.tap do |accum| attrs.each do |entry| k, _, v = entry.partition '=' accum[k] = v end end elsif ::String === attrs # condense and convert non-escaped spaces to null, unescape escaped spaces, then split on null attrs = {}.tap do |accum| attrs.gsub(SpaceDelimiterRx, '\1' + NULL).gsub(EscapedSpaceRx, '\1').split(NULL).each do |entry| k, _, v = entry.partition '=' accum[k] = v end end elsif (attrs.respond_to? :keys) && (attrs.respond_to? :[]) # coerce attrs to a real Hash attrs = {}.tap {|accum| attrs.keys.each {|k| accum[k] = attrs[k] } } else raise ::ArgumentError, %(illegal type for attributes option: #{attrs.class.ancestors.join ' < '}) end if ::File === input # File#mtime on JRuby 9.1 for Windows doesn't honor TZ environment variable; see https://github.com/jruby/jruby/issues/6659 options[:input_mtime] = RUBY_ENGINE == 'jruby' ? (::Time.at input.mtime.to_i) : input.mtime # NOTE defer setting infile and indir until we get a better sense of their purpose # TODO cli checks if input path can be read and is file, but might want to add check to API too attrs['docfile'] = input_path = ::File.absolute_path input.path attrs['docdir'] = ::File.dirname input_path attrs['docname'] = Helpers.basename input_path, (attrs['docfilesuffix'] = Helpers.extname input_path) source = input.read elsif input.respond_to? :read # NOTE tty, pipes & sockets can't be rewound, but can't be sniffed easily either # just fail the rewind operation silently to handle all cases input.rewind rescue nil source = input.read elsif ::String === input source = input elsif ::Array === input source = input.drop 0 elsif input raise ::ArgumentError, %(unsupported input type: #{input.class}) end if timings timings.record :read timings.start :parse end options[:attributes] = attrs doc = options[:parse] == false ? (Document.new source, options) : (Document.new source, options).parse timings.record :parse if timings doc rescue => e begin context = %(asciidoctor: FAILED: #{attrs['docfile'] || ''}: Failed to load AsciiDoc document) if e.respond_to? :exception # The original message must be explicitly preserved when wrapping a Ruby exception wrapped_e = e.exception %(#{context} - #{e.message}) # JRuby automatically sets backtrace; MRI did not until 2.6 wrapped_e.set_backtrace e.backtrace else # Likely a Java exception class wrapped_e = e.class.new context, e wrapped_e.stack_trace = e.stack_trace end rescue wrapped_e = e end raise wrapped_e end # Public: Parse the contents of the AsciiDoc source file into an Asciidoctor::Document # # input - the String AsciiDoc source filename # options - a String, Array or Hash of options to control processing (default: {}) # String and Array values are converted into a Hash. # See Asciidoctor::Document#initialize for details about options. # # Returns the Asciidoctor::Document def load_file filename, options = {} ::File.open(filename, FILE_READ_MODE) {|file| load file, options } end end end asciidoctor-2.0.20/lib/asciidoctor/logging.rb000066400000000000000000000054451443135032600211330ustar00rootroot00000000000000# frozen_string_literal: true require 'logger' module Asciidoctor class Logger < ::Logger attr_reader :max_severity def initialize *args super self.progname = 'asciidoctor' self.formatter = BasicFormatter.new self.level = WARN end def add severity, message = nil, progname = nil if (severity ||= UNKNOWN) > (@max_severity ||= severity) @max_severity = severity end super end class BasicFormatter < Formatter SEVERITY_LABEL_SUBSTITUTES = { 'WARN' => 'WARNING', 'FATAL' => 'FAILED' } def call severity, _, progname, msg %(#{progname}: #{SEVERITY_LABEL_SUBSTITUTES[severity] || severity}: #{::String === msg ? msg : msg.inspect}#{LF}) end end module AutoFormattingMessage def inspect (sloc = self[:source_location]) ? %(#{sloc}: #{self[:text]}) : self[:text] end end end class MemoryLogger < ::Logger SEVERITY_SYMBOL_BY_VALUE = (Severity.constants false).map {|c| [(Severity.const_get c), c] }.to_h attr_reader :messages def initialize self.level = WARN @messages = [] end def add severity, message = nil, progname = nil message ||= block_given? ? yield : progname @messages << { severity: SEVERITY_SYMBOL_BY_VALUE[severity || UNKNOWN], message: message } true end def clear @messages.clear end def empty? @messages.empty? end def max_severity empty? ? nil : @messages.map {|m| Severity.const_get m[:severity] }.max end end class NullLogger < ::Logger attr_reader :max_severity def initialize self.level = WARN end def add severity, message = nil, progname = nil if (severity ||= UNKNOWN) > (@max_severity ||= severity) @max_severity = severity end true end end module LoggerManager @logger_class = Logger class << self attr_accessor :logger_class # NOTE subsequent calls to logger access the logger via the logger property directly def logger pipe = $stderr memoize_logger @logger ||= (@logger_class.new pipe) end # Returns the specified Logger def logger= new_logger @logger = new_logger || (@logger_class.new $stderr) end private def memoize_logger class << self alias logger logger # suppresses warning from CRuby attr_reader :logger end end end end module Logging # Private: Mixes the {Logging} module as static methods into any class that includes the {Logging} module. # # into - The Class that includes the {Logging} module # # Returns nothing def self.included into into.extend Logging end private_class_method :included # use separate declaration for Ruby 2.0.x def logger LoggerManager.logger end def message_with_context text, context = {} ({ text: text }.merge context).extend Logger::AutoFormattingMessage end end end asciidoctor-2.0.20/lib/asciidoctor/parser.rb000066400000000000000000003462021443135032600210000ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # Internal: Methods to parse lines of AsciiDoc into an object hierarchy # representing the structure of the document. All methods are class methods and # should be invoked from the Parser class. The main entry point is ::next_block. # No Parser instances shall be discovered running around. (Any attempt to # instantiate a Parser will be futile). # # The object hierarchy created by the Parser consists of zero or more Section # and Block objects. Section objects may be nested and a Section object # contains zero or more Block objects. Block objects may be nested, but may # only contain other Block objects. Block objects which represent lists may # contain zero or more ListItem objects. # # Examples # # # Create a Reader for the AsciiDoc lines and retrieve the next block from it. # # Parser.next_block requires a parent, so we begin by instantiating an empty Document. # # doc = Document.new # reader = Reader.new lines # block = Parser.next_block(reader, doc) # block.class # # => Asciidoctor::Block class Parser include Logging BlockMatchData = Struct.new :context, :masq, :tip, :terminator # String for matching tab character TAB = ?\t # Regexp for leading tab indentation TabIndentRx = /^\t+/ StartOfBlockProc = proc {|l| ((l.start_with? '[') && (BlockAttributeLineRx.match? l)) || (is_delimited_block? l) } StartOfListProc = proc {|l| AnyListRx.match? l } StartOfBlockOrListProc = proc {|l| (is_delimited_block? l) || ((l.start_with? '[') && (BlockAttributeLineRx.match? l)) || (AnyListRx.match? l) } NoOp = nil AuthorKeys = ['author', 'authorinitials', 'firstname', 'middlename', 'lastname', 'email'] # Internal: A Hash mapping horizontal alignment abbreviations to alignments # that can be applied to a table cell (or to all cells in a column) TableCellHorzAlignments = { '<' => 'left', '>' => 'right', '^' => 'center' } # Internal: A Hash mapping vertical alignment abbreviations to alignments # that can be applied to a table cell (or to all cells in a column) TableCellVertAlignments = { '<' => 'top', '>' => 'bottom', '^' => 'middle' } # Internal: A Hash mapping styles abbreviations to styles that can be applied # to a table cell (or to all cells in a column) TableCellStyles = { 'd' => :none, 's' => :strong, 'e' => :emphasis, 'm' => :monospaced, 'h' => :header, 'l' => :literal, 'a' => :asciidoc } # Hide the default constructor to make sure this class doesn't get instantiated. # # Raises NoMethodError if an attempt is made to invoke the constructor. private_class_method :new # Public: Parses AsciiDoc source read from the Reader into the Document # # This method is the main entry-point into the Parser when parsing a full document. # It first looks for and, if found, processes the document title. It then # proceeds to iterate through the lines in the Reader, parsing the document # into nested Sections and Blocks. # # reader - the Reader holding the source lines of the document # document - the empty Document into which the lines will be parsed # options - a Hash of options to control processing # # returns the Document object def self.parse(reader, document, options = {}) block_attributes = parse_document_header(reader, document, (header_only = options[:header_only])) # NOTE don't use a postfix conditional here as it's known to confuse JRuby in certain circumstances unless header_only while reader.has_more_lines? new_section, block_attributes = next_section(reader, document, block_attributes) if new_section document.assign_numeral new_section document.blocks << new_section end end end document end # Public: Parses the document header of the AsciiDoc source read from the Reader # # Reads the AsciiDoc source from the Reader until the end of the document # header is reached. The Document object is populated with information from # the header (document title, document attributes, etc). The document # attributes are then saved to establish a save point to which to rollback # after parsing is complete. # # This method assumes that there are no blank lines at the start of the document, # which are automatically removed by the reader. # # returns the Hash of orphan block attributes captured above the header def self.parse_document_header(reader, document, header_only = false) # capture lines of block-level metadata and plow away comment lines that precede first block block_attrs = reader.skip_blank_lines ? (parse_block_metadata_lines reader, document) : {} doc_attrs = document.attributes # special case, block title is not allowed above document title, # carry attributes over to the document body if (implicit_doctitle = is_next_line_doctitle? reader, block_attrs, doc_attrs['leveloffset']) && block_attrs['title'] doc_attrs['authorcount'] = 0 return document.finalize_header block_attrs, false end # yep, document title logic in AsciiDoc is just insanity # definitely an area for spec refinement unless (val = doc_attrs['doctitle']).nil_or_empty? document.title = doctitle_attr_val = val end # if the first line is the document title, add a header to the document and parse the header metadata if implicit_doctitle source_location = reader.cursor if document.sourcemap document.id, _, l0_section_title, _, atx = parse_section_title reader, document if doctitle_attr_val # NOTE doctitle attribute (set above or below implicit doctitle) overrides implicit doctitle l0_section_title = nil else document.title = l0_section_title if (doc_attrs['doctitle'] = doctitle_attr_val = document.sub_specialchars l0_section_title).include? ATTR_REF_HEAD # QUESTION should we defer substituting attributes until the end of the header? or should we substitute again if necessary? doc_attrs['doctitle'] = doctitle_attr_val = document.sub_attributes doctitle_attr_val, attribute_missing: 'skip' end end document.header.source_location = source_location if source_location # default to compat-mode if document has setext doctitle doc_attrs['compat-mode'] = '' unless atx || (document.attribute_locked? 'compat-mode') if (separator = block_attrs['separator']) doc_attrs['title-separator'] = separator unless document.attribute_locked? 'title-separator' end if (doc_id = block_attrs['id']) document.id = doc_id else doc_id = document.id end if (role = block_attrs['role']) doc_attrs['role'] = role end if (reftext = block_attrs['reftext']) doc_attrs['reftext'] = reftext end block_attrs.clear (modified_attrs = document.instance_variable_get :@attributes_modified).delete 'doctitle' parse_header_metadata reader, document, nil if modified_attrs.include? 'doctitle' if (val = doc_attrs['doctitle']).nil_or_empty? || val == doctitle_attr_val doc_attrs['doctitle'] = doctitle_attr_val else document.title = val end elsif !l0_section_title modified_attrs << 'doctitle' end document.register :refs, [doc_id, document] if doc_id elsif (author = doc_attrs['author']) author_metadata = process_authors author, true, false author_metadata.delete 'authorinitials' if doc_attrs['authorinitials'] doc_attrs.update author_metadata elsif (author = doc_attrs['authors']) author_metadata = process_authors author, true doc_attrs.update author_metadata else doc_attrs['authorcount'] = 0 end # parse title and consume name section of manpage document parse_manpage_header reader, document, block_attrs, header_only if document.doctype == 'manpage' # NOTE block_attrs are the block-level attributes (not document attributes) that # precede the first line of content (document title, first section or first block) document.finalize_header block_attrs end # Public: Parses the manpage header of the AsciiDoc source read from the Reader # # returns Nothing def self.parse_manpage_header(reader, document, block_attributes, header_only = false) if ManpageTitleVolnumRx =~ (doc_attrs = document.attributes)['doctitle'] doc_attrs['manvolnum'] = manvolnum = $2 doc_attrs['mantitle'] = (((mantitle = $1).include? ATTR_REF_HEAD) ? (document.sub_attributes mantitle) : mantitle).downcase else logger.error message_with_context 'non-conforming manpage title', source_location: (reader.cursor_at_line 1) # provide sensible fallbacks doc_attrs['mantitle'] = doc_attrs['doctitle'] || doc_attrs['docname'] || 'command' doc_attrs['manvolnum'] = manvolnum = '1' end if (manname = doc_attrs['manname']) && doc_attrs['manpurpose'] doc_attrs['manname-title'] ||= 'Name' doc_attrs['mannames'] = [manname] if document.backend == 'manpage' doc_attrs['docname'] = manname doc_attrs['outfilesuffix'] = %(.#{manvolnum}) end elsif header_only # done else reader.skip_blank_lines reader.save block_attributes.update parse_block_metadata_lines reader, document if (name_section_level = is_next_line_section? reader, {}) if name_section_level == 1 name_section = initialize_section reader, document, {} name_section_buffer = (reader.read_lines_until break_on_blank_lines: true, skip_line_comments: true).map {|l| l.lstrip }.join ' ' if ManpageNamePurposeRx =~ name_section_buffer if (manname = $1).include? ATTR_REF_HEAD manname = document.sub_attributes manname end if manname.include? ',' manname = (mannames = (manname.split ',').map {|n| n.lstrip })[0] else mannames = [manname] end if (manpurpose = $2).include? ATTR_REF_HEAD manpurpose = document.sub_attributes manpurpose end doc_attrs['manname-title'] ||= name_section.title doc_attrs['manname-id'] = name_section.id if name_section.id doc_attrs['manname'] = manname doc_attrs['mannames'] = mannames doc_attrs['manpurpose'] = manpurpose if document.backend == 'manpage' doc_attrs['docname'] = manname doc_attrs['outfilesuffix'] = %(.#{manvolnum}) end else error_msg = 'non-conforming name section body' end else error_msg = 'name section must be at level 1' end else error_msg = 'name section expected' end if error_msg reader.restore_save logger.error message_with_context error_msg, source_location: reader.cursor doc_attrs['manname'] = manname = doc_attrs['docname'] || 'command' doc_attrs['mannames'] = [manname] if document.backend == 'manpage' doc_attrs['docname'] = manname doc_attrs['outfilesuffix'] = %(.#{manvolnum}) end else reader.discard_save end end nil end # Public: Return the next section from the Reader. # # This method process block metadata, content and subsections for this # section and returns the Section object and any orphaned attributes. # # If the parent is a Document and has a header (document title), then # this method will put any non-section blocks at the start of document # into a preamble Block. If there are no such blocks, the preamble is # dropped. # # Since we are reading line-by-line, there's a chance that metadata # that should be associated with the following block gets consumed. # To deal with this case, the method returns a running Hash of # "orphaned" attributes that get passed to the next Section or Block. # # reader - the source Reader # parent - the parent Section or Document of this new section # attributes - a Hash of metadata that was left orphaned from the # previous Section. # # Examples # # source # # => "= Greetings\n\nThis is my doc.\n\n== Salutations\n\nIt is awesome." # # reader = Reader.new source, nil, normalize: true # # create empty document to parent the section # # and hold attributes extracted from header # doc = Document.new # # Parser.next_section(reader, doc)[0].title # # => "Greetings" # # Parser.next_section(reader, doc)[0].title # # => "Salutations" # # returns a two-element Array containing the Section and Hash of orphaned attributes def self.next_section reader, parent, attributes = {} preamble = intro = part = false # check if we are at the start of processing the document # NOTE we could drop a hint in the attributes to indicate # that we are at a section title (so we don't have to check) if parent.context == :document && parent.blocks.empty? && ((has_header = parent.header?) || (attributes.delete 'invalid-header') || !(is_next_line_section? reader, attributes)) book = (document = parent).doctype == 'book' if has_header || (book && attributes[1] != 'abstract') preamble = intro = Block.new parent, :preamble, content_model: :compound preamble.title = parent.attr 'preface-title' if book && (parent.attr? 'preface-title') parent.blocks << preamble end section = parent current_level = 0 if parent.attributes.key? 'fragment' expected_next_level = -1 # small tweak to allow subsequent level-0 sections for book doctype elsif book expected_next_level, expected_next_level_alt = 1, 0 else expected_next_level = 1 end else book = (document = parent.document).doctype == 'book' section = initialize_section reader, parent, attributes # clear attributes except for title attribute, which must be carried over to next content block attributes = (title = attributes['title']) ? { 'title' => title } : {} expected_next_level = (current_level = section.level) + 1 if current_level == 0 part = book elsif current_level == 1 && section.special # NOTE technically preface sections are only permitted in the book doctype unless (sectname = section.sectname) == 'appendix' || sectname == 'preface' || sectname == 'abstract' expected_next_level = nil end end end reader.skip_blank_lines # Parse lines belonging to this section and its subsections until we # reach the end of this section level # # 1. first look for metadata thingies (anchor, attribute list, block title line, etc) # 2. then look for a section, recurse if found # 3. then process blocks # # We have to parse all the metadata lines before continuing with the loop, # otherwise subsequent metadata lines get interpreted as block content while reader.has_more_lines? parse_block_metadata_lines reader, document, attributes if (next_level = is_next_line_section?(reader, attributes)) if document.attr? 'leveloffset' next_level += (document.attr 'leveloffset').to_i next_level = 0 if next_level < 0 end if next_level > current_level if expected_next_level unless next_level == expected_next_level || (expected_next_level_alt && next_level == expected_next_level_alt) || expected_next_level < 0 expected_condition = expected_next_level_alt ? %(expected levels #{expected_next_level_alt} or #{expected_next_level}) : %(expected level #{expected_next_level}) logger.warn message_with_context %(section title out of sequence: #{expected_condition}, got level #{next_level}), source_location: reader.cursor end else logger.error message_with_context %(#{sectname} sections do not support nested sections), source_location: reader.cursor end new_section, attributes = next_section reader, section, attributes section.assign_numeral new_section section.blocks << new_section elsif next_level == 0 && section == document logger.error message_with_context 'level 0 sections can only be used when doctype is book', source_location: reader.cursor unless book new_section, attributes = next_section reader, section, attributes section.assign_numeral new_section section.blocks << new_section else # close this section (and break out of the nesting) to begin a new one break end else # just take one block or else we run the risk of overrunning section boundaries block_cursor = reader.cursor if (new_block = next_block reader, intro || section, attributes, parse_metadata: false) # REVIEW this may be doing too much if part if !section.blocks? # if this not a [partintro] open block, enclose it in a [partintro] open block if new_block.style != 'partintro' # if this is already a normal open block, simply add the partintro style if new_block.style == 'open' && new_block.context == :open new_block.style = 'partintro' else new_block.parent = (intro = Block.new section, :open, content_model: :compound) intro.style = 'partintro' section.blocks << intro end # if this is a [partintro] paragraph, convert it to a [partintro] open block w/ single paragraph elsif new_block.content_model == :simple new_block.content_model = :compound new_block << (Block.new new_block, :paragraph, source: new_block.lines, subs: new_block.subs) new_block.lines.clear new_block.subs.clear end elsif section.blocks.size == 1 first_block = section.blocks[0] # open the [partintro] open block for appending if !intro && first_block.content_model == :compound logger.error message_with_context 'illegal block content outside of partintro block', source_location: block_cursor # rebuild [partintro] paragraph as an open block elsif first_block.content_model != :compound new_block.parent = (intro = Block.new section, :open, content_model: :compound) if first_block.style == (intro.style = 'partintro') first_block.context = :paragraph first_block.style = nil end section.blocks.shift intro << first_block section.blocks << intro end end end (intro || section).blocks << new_block attributes.clear end end reader.skip_blank_lines || break end if part unless section.blocks? && section.blocks[-1].context == :section logger.error message_with_context 'invalid part, must have at least one section (e.g., chapter, appendix, etc.)', source_location: reader.cursor end # NOTE we could try to avoid creating a preamble in the first place, though # that would require reworking assumptions in next_section since the preamble # is treated like an untitled section elsif preamble # implies parent == document if preamble.blocks? if book || document.blocks[1] || !Compliance.unwrap_standalone_preamble preamble.source_location = preamble.blocks[0].source_location if document.sourcemap # unwrap standalone preamble (i.e., document has no sections) except for books, if permissible else document.blocks.shift while (child_block = preamble.blocks.shift) document << child_block end end # drop the preamble if it has no content else document.blocks.shift end end # The attributes returned here are orphaned attributes that fall at the end # of a section that need to get transferred to the next section # see "trailing block attributes transfer to the following section" in # test/attributes_test.rb for an example [section == parent ? nil : section, attributes.merge] end # Public: Parse and return the next Block at the Reader's current location # # This method begins by skipping over blank lines to find the start of the # next block (paragraph, block macro, or delimited block). If a block is # found, that block is parsed, initialized as a Block object, and returned. # Otherwise, the method returns nothing. # # Regular expressions from the Asciidoctor module are used to match block # boundaries. The ensuing lines are then processed according to the content # model. # # reader - The Reader from which to retrieve the next Block. # parent - The Document, Section or Block to which the next Block belongs. # attributes - A Hash of attributes that will become the attributes # associated with the parsed Block (default: {}). # options - An options Hash to control parsing (default: {}): # * :text_only indicates that the parser is only looking for text content # * :list_type indicates this block will be attached to a list item in a list of the specified type # # Returns a Block object built from the parsed content of the processed # lines, or nothing if no block is found. def self.next_block(reader, parent, attributes = {}, options = {}) # skip ahead to the block content; bail if we've reached the end of the reader return unless (skipped = reader.skip_blank_lines) # check for option to find list item text only # if skipped a line, assume a list continuation was # used and block content is acceptable if (text_only = options[:text_only]) && skipped > 0 options.delete :text_only text_only = nil end document = parent.document if options.fetch :parse_metadata, true # read lines until there are no more metadata lines to read; note that :text_only option impacts parsing rules while parse_block_metadata_line reader, document, attributes, options # discard the line just processed reader.shift # QUESTION should we clear the attributes? no known cases when it's necessary reader.skip_blank_lines || return end end if (extensions = document.extensions) block_extensions, block_macro_extensions = extensions.blocks?, extensions.block_macros? end # QUESTION should we introduce a parsing context object? reader.mark this_line, doc_attrs, style = reader.read_line, document.attributes, attributes[1] block = block_context = cloaked_context = terminator = nil if (delimited_block = is_delimited_block? this_line, true) block_context = cloaked_context = delimited_block.context terminator = delimited_block.terminator if style unless style == block_context.to_s if delimited_block.masq.include? style block_context = style.to_sym elsif delimited_block.masq.include?('admonition') && ADMONITION_STYLES.include?(style) block_context = :admonition elsif block_extensions && extensions.registered_for_block?(style, block_context) block_context = style.to_sym else logger.debug message_with_context %(unknown style for #{block_context} block: #{style}), source_location: reader.cursor_at_mark if logger.debug? style = block_context.to_s end end else style = attributes['style'] = block_context.to_s end end # this loop is used for flow control; it only executes once, and only when delimited_block is not set # break once a block is found or at end of loop # returns nil if the line should be dropped while true # process lines verbatim if style && Compliance.strict_verbatim_paragraphs && (VERBATIM_STYLES.include? style) block_context = style.to_sym reader.unshift_line this_line # advance to block parsing => break end # process lines normally if text_only indented = this_line.start_with? ' ', TAB else # NOTE move this declaration up if we need it when text_only is false md_syntax = Compliance.markdown_syntax if this_line.start_with? ' ' indented, ch0 = true, ' ' # QUESTION should we test line length? if md_syntax && this_line.lstrip.start_with?(*MARKDOWN_THEMATIC_BREAK_CHARS.keys) && #!(this_line.start_with? ' ') && (MarkdownThematicBreakRx.match? this_line) # NOTE we're letting break lines (horizontal rule, page_break, etc) have attributes block = Block.new(parent, :thematic_break, content_model: :empty) break end elsif this_line.start_with? TAB indented, ch0 = true, TAB else indented, ch0 = false, this_line.chr layout_break_chars = md_syntax ? HYBRID_LAYOUT_BREAK_CHARS : LAYOUT_BREAK_CHARS if (layout_break_chars.key? ch0) && (md_syntax ? (ExtLayoutBreakRx.match? this_line) : (uniform? this_line, ch0, (ll = this_line.length)) && ll > 2) # NOTE we're letting break lines (horizontal rule, page_break, etc) have attributes block = Block.new(parent, layout_break_chars[ch0], content_model: :empty) break # NOTE very rare that a text-only line will end in ] (e.g., inline macro), so check that first elsif (this_line.end_with? ']') && (this_line.include? '::') #if (this_line.start_with? 'image', 'video', 'audio') && BlockMediaMacroRx =~ this_line if (ch0 == 'i' || (this_line.start_with? 'video:', 'audio:')) && BlockMediaMacroRx =~ this_line blk_ctx, target, blk_attrs = $1.to_sym, $2, $3 block = Block.new parent, blk_ctx, content_model: :empty if blk_attrs case blk_ctx when :video posattrs = ['poster', 'width', 'height'] when :audio posattrs = [] else # :image posattrs = ['alt', 'width', 'height'] end block.parse_attributes blk_attrs, posattrs, sub_input: true, into: attributes end # style doesn't have special meaning for media macros attributes.delete 'style' if attributes.key? 'style' if target.include? ATTR_REF_HEAD if (expanded_target = block.sub_attributes target).empty? && (doc_attrs['attribute-missing'] || Compliance.attribute_missing) == 'drop-line' && (block.sub_attributes target + ' ', attribute_missing: 'drop-line', drop_line_severity: :ignore).empty? attributes.clear return else target = expanded_target end end if blk_ctx == :image document.register :images, target attributes['imagesdir'] = doc_attrs['imagesdir'] # NOTE style is the value of the first positional attribute in the block attribute line attributes['alt'] ||= style || (attributes['default-alt'] = Helpers.basename(target, true).tr('_-', ' ')) unless (scaledwidth = attributes.delete 'scaledwidth').nil_or_empty? # NOTE assume % units if not specified attributes['scaledwidth'] = (TrailingDigitsRx.match? scaledwidth) ? %(#{scaledwidth}%) : scaledwidth end if attributes['title'] block.title = block_title = attributes.delete 'title' block.assign_caption (attributes.delete 'caption'), 'figure' end end attributes['target'] = target break elsif ch0 == 't' && (this_line.start_with? 'toc:') && BlockTocMacroRx =~ this_line block = Block.new parent, :toc, content_model: :empty block.parse_attributes $1, [], into: attributes if $1 break elsif block_macro_extensions ? (CustomBlockMacroRx =~ this_line && (extension = extensions.registered_for_block_macro? $1) || (report_unknown_block_macro = logger.debug?)) : (logger.debug? && (report_unknown_block_macro = CustomBlockMacroRx =~ this_line)) if report_unknown_block_macro logger.debug message_with_context %(unknown name for block macro: #{$1}), source_location: reader.cursor_at_mark else content = $3 if (target = $2).include? ATTR_REF_HEAD if (expanded_target = parent.sub_attributes target).empty? && (doc_attrs['attribute-missing'] || Compliance.attribute_missing) == 'drop-line' && (parent.sub_attributes target + ' ', attribute_missing: 'drop-line', drop_line_severity: :ignore).empty? attributes.clear return else target = expanded_target end end if (ext_config = extension.config)[:content_model] == :attributes document.parse_attributes content, ext_config[:positional_attrs] || ext_config[:pos_attrs] || [], sub_input: true, into: attributes if content else attributes['text'] = content || '' end if (default_attrs = ext_config[:default_attrs]) attributes.update(default_attrs) {|_, old_v| old_v } end if (block = extension.process_method[parent, target, attributes]) && block != parent attributes.replace block.attributes break else attributes.clear return end end end end end end # haven't found anything yet, continue if !indented && (ch0 ||= this_line.chr) == '<' && CalloutListRx =~ this_line reader.unshift_line this_line block = parse_callout_list(reader, $~, parent, document.callouts) attributes['style'] = 'arabic' break elsif UnorderedListRx.match? this_line reader.unshift_line this_line attributes['style'] = style = 'bibliography' if !style && Section === parent && parent.sectname == 'bibliography' block = parse_list(reader, :ulist, parent, style) break elsif OrderedListRx.match? this_line reader.unshift_line this_line block = parse_list(reader, :olist, parent, style) attributes['style'] = block.style if block.style break elsif ((this_line.include? '::') || (this_line.include? ';;')) && DescriptionListRx =~ this_line reader.unshift_line this_line block = parse_description_list(reader, $~, parent) break elsif (style == 'float' || style == 'discrete') && (Compliance.underline_style_section_titles ? (is_section_title? this_line, reader.peek_line) : !indented && (atx_section_title? this_line)) reader.unshift_line this_line float_id, float_reftext, block_title, float_level = parse_section_title reader, document, attributes['id'] attributes['reftext'] = float_reftext if float_reftext block = Block.new(parent, :floating_title, content_model: :empty) block.title = block_title attributes.delete 'title' block.id = float_id || ((doc_attrs.key? 'sectids') ? (Section.generate_id block.title, document) : nil) block.level = float_level break # FIXME create another set for "passthrough" styles # FIXME make this more DRY! elsif style && style != 'normal' if PARAGRAPH_STYLES.include?(style) block_context = style.to_sym cloaked_context = :paragraph reader.unshift_line this_line # advance to block parsing => break elsif ADMONITION_STYLES.include?(style) block_context = :admonition cloaked_context = :paragraph reader.unshift_line this_line # advance to block parsing => break elsif block_extensions && extensions.registered_for_block?(style, :paragraph) block_context = style.to_sym cloaked_context = :paragraph reader.unshift_line this_line # advance to block parsing => break else logger.debug message_with_context %(unknown style for paragraph: #{style}), source_location: reader.cursor_at_mark if logger.debug? style = nil # continue to process paragraph end end reader.unshift_line this_line # a literal paragraph: contiguous lines starting with at least one whitespace character # NOTE style can only be nil or "normal" at this point if indented && !style lines = read_paragraph_lines reader, (content_adjacent = skipped == 0 ? options[:list_type] : nil), skip_line_comments: text_only adjust_indentation! lines if text_only || content_adjacent == :dlist # this block gets folded into the list item text block = Block.new(parent, :paragraph, content_model: :simple, source: lines, attributes: attributes) else block = Block.new(parent, :literal, content_model: :verbatim, source: lines, attributes: attributes) end # a normal paragraph: contiguous non-blank/non-continuation lines (left-indented or normal style) else lines = read_paragraph_lines reader, skipped == 0 && options[:list_type], skip_line_comments: true # NOTE don't check indented here since it's extremely rare #if text_only || indented if text_only # if [normal] is used over an indented paragraph, shift content to left margin # QUESTION do we even need to shift since whitespace is normalized by XML in this case? adjust_indentation! lines if indented && style == 'normal' block = Block.new(parent, :paragraph, content_model: :simple, source: lines, attributes: attributes) elsif (ADMONITION_STYLE_HEADS.include? ch0) && (this_line.include? ':') && (AdmonitionParagraphRx =~ this_line) lines[0] = $' # string after match attributes['name'] = admonition_name = (attributes['style'] = $1).downcase attributes['textlabel'] = (attributes.delete 'caption') || doc_attrs[%(#{admonition_name}-caption)] block = Block.new(parent, :admonition, content_model: :simple, source: lines, attributes: attributes) elsif md_syntax && ch0 == '>' && this_line.start_with?('> ') lines.map! {|line| line == '>' ? (line.slice 1, line.length) : ((line.start_with? '> ') ? (line.slice 2, line.length) : line) } if lines[-1].start_with? '-- ' credit_line = (credit_line = lines.pop).slice 3, credit_line.length unless lines.empty? lines.pop while lines[-1].empty? end end attributes['style'] = 'quote' # NOTE will only detect discrete (aka free-floating) headings # TODO could assume a discrete heading when inside a block context # FIXME Reader needs to be created w/ line info block = build_block(:quote, :compound, false, parent, Reader.new(lines), attributes) if credit_line attribution, citetitle = (block.apply_subs credit_line).split ', ', 2 attributes['attribution'] = attribution if attribution attributes['citetitle'] = citetitle if citetitle end elsif ch0 == '"' && lines.size > 1 && (lines[-1].start_with? '-- ') && (lines[-2].end_with? '"') lines[0] = this_line.slice 1, this_line.length # strip leading quote credit_line = (credit_line = lines.pop).slice 3, credit_line.length lines.pop while lines[-1].empty? lines << lines.pop.chop # strip trailing quote attributes['style'] = 'quote' block = Block.new(parent, :quote, content_model: :simple, source: lines, attributes: attributes) attribution, citetitle = (block.apply_subs credit_line).split ', ', 2 attributes['attribution'] = attribution if attribution attributes['citetitle'] = citetitle if citetitle else # if [normal] is used over an indented paragraph, shift content to left margin # QUESTION do we even need to shift since whitespace is normalized by XML in this case? adjust_indentation! lines if indented && style == 'normal' block = Block.new(parent, :paragraph, content_model: :simple, source: lines, attributes: attributes) end catalog_inline_anchors((lines.join LF), block, document, reader) end break # forbid loop from executing more than once end unless delimited_block # either delimited block or styled paragraph unless block case block_context when :listing, :source if block_context == :source || (!attributes[1] && (language = attributes[2] || doc_attrs['source-language'])) if language attributes['style'] = 'source' attributes['language'] = language AttributeList.rekey attributes, [nil, nil, 'linenums'] else AttributeList.rekey attributes, [nil, 'language', 'linenums'] if doc_attrs.key? 'source-language' attributes['language'] = doc_attrs['source-language'] end unless attributes.key? 'language' end if attributes['linenums-option'] || doc_attrs['source-linenums-option'] attributes['linenums'] = '' end unless attributes.key? 'linenums' if doc_attrs.key? 'source-indent' attributes['indent'] = doc_attrs['source-indent'] end unless attributes.key? 'indent' end block = build_block(:listing, :verbatim, terminator, parent, reader, attributes) when :fenced_code attributes['style'] = 'source' if (ll = this_line.length) > 3 if (comma_idx = (language = this_line.slice 3, ll).index ',') if comma_idx > 0 language = (language.slice 0, comma_idx).strip attributes['linenums'] = '' if comma_idx < ll - 4 elsif ll > 4 attributes['linenums'] = '' end else language = language.lstrip end end if language.nil_or_empty? attributes['language'] = doc_attrs['source-language'] if doc_attrs.key? 'source-language' else attributes['language'] = language end if attributes['linenums-option'] || doc_attrs['source-linenums-option'] attributes['linenums'] = '' end unless attributes.key? 'linenums' if doc_attrs.key? 'source-indent' attributes['indent'] = doc_attrs['source-indent'] end unless attributes.key? 'indent' terminator = terminator.slice 0, 3 block = build_block(:listing, :verbatim, terminator, parent, reader, attributes) when :table block_cursor = reader.cursor block_reader = Reader.new reader.read_lines_until(terminator: terminator, skip_line_comments: true, context: :table, cursor: :at_mark), block_cursor # NOTE it's very rare that format is set when using a format hint char, so short-circuit unless terminator.start_with? '|', '!' # NOTE infer dsv once all other format hint chars are ruled out attributes['format'] ||= (terminator.start_with? ',') ? 'csv' : 'dsv' end block = parse_table(block_reader, parent, attributes) when :sidebar block = build_block(block_context, :compound, terminator, parent, reader, attributes) when :admonition attributes['name'] = admonition_name = style.downcase attributes['textlabel'] = (attributes.delete 'caption') || doc_attrs[%(#{admonition_name}-caption)] block = build_block(block_context, :compound, terminator, parent, reader, attributes) when :open, :abstract, :partintro block = build_block(:open, :compound, terminator, parent, reader, attributes) when :literal block = build_block(block_context, :verbatim, terminator, parent, reader, attributes) when :example attributes['caption'] = '' if attributes['collapsible-option'] block = build_block(block_context, :compound, terminator, parent, reader, attributes) when :quote, :verse AttributeList.rekey(attributes, [nil, 'attribution', 'citetitle']) block = build_block(block_context, (block_context == :verse ? :verbatim : :compound), terminator, parent, reader, attributes) when :stem, :latexmath, :asciimath attributes['style'] = STEM_TYPE_ALIASES[attributes[2] || doc_attrs['stem']] if block_context == :stem block = build_block(:stem, :raw, terminator, parent, reader, attributes) when :pass block = build_block(block_context, :raw, terminator, parent, reader, attributes) when :comment build_block(block_context, :skip, terminator, parent, reader, attributes) attributes.clear return else if block_extensions && (extension = extensions.registered_for_block? block_context, cloaked_context) unless (content_model = (ext_config = extension.config)[:content_model]) == :skip unless (positional_attrs = ext_config[:positional_attrs] || ext_config[:pos_attrs]).nil_or_empty? AttributeList.rekey(attributes, [nil] + positional_attrs) end if (default_attrs = ext_config[:default_attrs]) default_attrs.each {|k, v| attributes[k] ||= v } end # QUESTION should we clone the extension for each cloaked context and set in config? attributes['cloaked-context'] = cloaked_context end unless (block = build_block block_context, content_model, terminator, parent, reader, attributes, extension: extension) attributes.clear return end else # this should only happen if there's a misconfiguration raise %(Unsupported block type #{block_context} at #{reader.cursor}) end end end # FIXME we've got to clean this up, it's horrible! block.source_location = reader.cursor_at_mark if document.sourcemap # FIXME title and caption should be assigned when block is constructed (though we need to handle all cases) if attributes['title'] block.title = block_title = attributes.delete 'title' block.assign_caption attributes.delete 'caption' if CAPTION_ATTRIBUTE_NAMES[block.context] end # TODO eventually remove the style attribute from the attributes hash #block.style = attributes.delete 'style' block.style = attributes['style'] if (block_id = block.id || (block.id = attributes['id'])) # convert title to resolve attributes while in scope block.title if block_title ? (block_title.include? ATTR_REF_HEAD) : block.title? unless document.register :refs, [block_id, block] logger.warn message_with_context %(id assigned to block already in use: #{block_id}), source_location: reader.cursor_at_mark end end # FIXME remove the need for this update! block.update_attributes attributes unless attributes.empty? block.commit_subs #if doc_attrs.key? :pending_attribute_entries # doc_attrs.delete(:pending_attribute_entries).each do |entry| # entry.save_to block.attributes # end #end if block.sub? :callouts # No need to sub callouts if none are found when cataloging block.remove_sub :callouts unless catalog_callouts block.source, document end block end def self.read_paragraph_lines reader, break_at_list, opts = {} opts[:break_on_blank_lines] = true opts[:break_on_list_continuation] = true opts[:preserve_last_line] = true break_condition = (break_at_list ? (Compliance.block_terminates_paragraph ? StartOfBlockOrListProc : StartOfListProc) : (Compliance.block_terminates_paragraph ? StartOfBlockProc : NoOp)) reader.read_lines_until opts, &break_condition end # Public: Determines whether this line is the start of a known delimited block. # # Returns the BlockMatchData (if return_match_data is true) or true (if return_match_data is false) if this line is # the start of a delimited block, otherwise nothing. def self.is_delimited_block? line, return_match_data = nil # highly optimized for best performance return unless (line_len = line.length) > 1 && DELIMITED_BLOCK_HEADS[line.slice 0, 2] # open block if line_len == 2 tip = line tip_len = 2 else # all other delimited blocks, including fenced code if line_len < 5 tip = line tip_len = line_len else tip = line.slice 0, (tip_len = 4) end # special case for fenced code blocks if Compliance.markdown_syntax && (tip.start_with? '`') if tip_len == 4 if tip == '````' || (tip = tip.chop) != '```' return end line = tip line_len = tip_len = 3 elsif tip != '```' return end elsif tip_len == 3 return end end # NOTE line matches the tip when delimiter is minimum length or fenced code context, masq = DELIMITED_BLOCKS[tip] if context && (line_len == tip_len || (uniform? (line.slice 1, line_len), DELIMITED_BLOCK_TAILS[tip], (line_len - 1))) return_match_data ? (BlockMatchData.new context, masq, tip, line) : true end end # whether a block supports compound content should be a config setting # if terminator is false, that means the all the lines in the reader should be parsed # NOTE could invoke filter in here, before and after parsing def self.build_block(block_context, content_model, terminator, parent, reader, attributes, options = {}) case content_model when :skip skip_processing, parse_as_content_model = true, :simple when :raw skip_processing, parse_as_content_model = false, :simple else skip_processing, parse_as_content_model = false, content_model end if terminator.nil? if parse_as_content_model == :verbatim lines = reader.read_lines_until break_on_blank_lines: true, break_on_list_continuation: true else content_model = :simple if content_model == :compound # TODO we could also skip processing if we're able to detect reader is a BlockReader lines = read_paragraph_lines reader, false, skip_line_comments: true, skip_processing: skip_processing # QUESTION check for empty lines after grabbing lines for simple content model? end block_reader = nil elsif parse_as_content_model != :compound lines = reader.read_lines_until terminator: terminator, skip_processing: skip_processing, context: block_context, cursor: :at_mark block_reader = nil # terminator is false when reader has already been prepared elsif terminator == false lines = nil block_reader = reader else lines = nil block_cursor = reader.cursor block_reader = Reader.new reader.read_lines_until(terminator: terminator, skip_processing: skip_processing, context: block_context, cursor: :at_mark), block_cursor end case content_model when :verbatim tab_size = (attributes['tabsize'] || parent.document.attributes['tabsize']).to_i if (indent = attributes['indent']) adjust_indentation! lines, indent.to_i, tab_size elsif tab_size > 0 adjust_indentation! lines, -1, tab_size end when :skip # QUESTION should we still invoke process method if extension is specified? return end if (extension = options[:extension]) # QUESTION do we want to delete the style? attributes.delete('style') if (block = extension.process_method[parent, block_reader || (Reader.new lines), attributes.merge]) && block != parent attributes.replace block.attributes # NOTE an extension can change the content model from :simple to :compound. It's up to the extension # to decide which one to use. The extension can consult the cloaked-context attribute to determine # if the input is a paragraph or delimited block. if block.content_model == :compound && Block === block && !(lines = block.lines).empty? content_model = :compound block_reader = Reader.new lines end else return end else block = Block.new(parent, block_context, content_model: content_model, source: lines, attributes: attributes) end # reader is confined within boundaries of a delimited block, so look for # blocks until there are no more lines parse_blocks block_reader, block if content_model == :compound block end # Public: Parse blocks from this reader until there are no more lines. # # This method calls Parser#next_block until there are no more lines in the # Reader. It does not consider sections because it's assumed the Reader only # has lines which are within a delimited block region. # # reader - The Reader containing the lines to process # parent - The parent Block to which to attach the parsed blocks # # Returns nothing. def self.parse_blocks(reader, parent, attributes = nil) if attributes while ((block = next_block reader, parent, attributes.merge) && parent.blocks << block) || reader.has_more_lines?; end else while ((block = next_block reader, parent) && parent.blocks << block) || reader.has_more_lines?; end end nil end # Internal: Parse and construct an ordered or unordered list at the current position of the Reader # # reader - The Reader from which to retrieve the list # list_type - A Symbol representing the list type (:olist for ordered, :ulist for unordered) # parent - The parent Block to which this list belongs # style - The block style assigned to this list (optional, default: nil) # # Returns the Block encapsulating the parsed unordered or ordered list def self.parse_list reader, list_type, parent, style list_block = List.new parent, list_type list_rx = ListRxMap[list_type] while reader.has_more_lines? && list_rx =~ reader.peek_line # NOTE parse_list_item will stop at sibling item or end of list; never sees ancestor items if (list_item = parse_list_item reader, list_block, $~, $1, style) list_block.items << list_item end reader.skip_blank_lines || break end list_block end # Internal: Catalog any callouts found in the text, but don't process them # # text - The String of text in which to look for callouts # document - The current document in which the callouts are stored # # Returns A Boolean indicating whether callouts were found def self.catalog_callouts(text, document) found = false autonum = 0 text.scan CalloutScanRx do document.callouts.register $2 == '.' ? (autonum += 1).to_s : $2 unless $&.start_with? '\\' # we have to mark as found even if it's escaped so it can be unescaped found = true end if text.include? '<' found end # Internal: Catalog a matched inline anchor. # # id - The String id of the anchor # reftext - The optional String reference text of the anchor # node - The AbstractNode parent node of the anchor node # location - The source location (file and line) where the anchor was found # doc - The document to which the node belongs; computed from node if not specified # # Returns nothing def self.catalog_inline_anchor id, reftext, node, location, doc = node.document reftext = doc.sub_attributes reftext if reftext && (reftext.include? ATTR_REF_HEAD) unless doc.register :refs, [id, (Inline.new node, :anchor, reftext, type: :ref, id: id)] location = location.cursor if Reader === location logger.warn message_with_context %(id assigned to anchor already in use: #{id}), source_location: location end nil end # Internal: Catalog any inline anchors found in the text (but don't convert) # # text - The String text in which to look for inline anchors # block - The block in which the references should be searched # document - The current Document on which the references are stored # # Returns nothing def self.catalog_inline_anchors text, block, document, reader text.scan InlineAnchorScanRx do if (id = $1) next if (reftext = $2) && (reftext.include? ATTR_REF_HEAD) && (reftext = document.sub_attributes reftext).empty? else id = $3 if (reftext = $4) if reftext.include? ']' reftext = reftext.gsub '\]', ']' reftext = document.sub_attributes reftext if reftext.include? ATTR_REF_HEAD elsif reftext.include? ATTR_REF_HEAD reftext = nil if (reftext = document.sub_attributes reftext).empty? end end end unless document.register :refs, [id, (Inline.new block, :anchor, reftext, type: :ref, id: id)] location = reader.cursor_at_mark if (offset = ($`.count LF) + (($&.start_with? LF) ? 1 : 0)) > 0 (location = location.dup).advance offset end logger.warn message_with_context %(id assigned to anchor already in use: #{id}), source_location: location end end if (text.include? '[[') || (text.include? 'or:') nil end # Internal: Catalog the bibliography inline anchor found in the start of the list item (but don't convert) # # id - The String id of the anchor # reftext - The optional String reference text of the anchor # node - The AbstractNode parent node of the anchor node # reader - The source Reader for the current Document, positioned at the current list item # # Returns nothing def self.catalog_inline_biblio_anchor id, reftext, node, reader # QUESTION should we sub attributes in reftext (like with regular anchors)? unless node.document.register :refs, [id, (Inline.new node, :anchor, reftext && %([#{reftext}]), type: :bibref, id: id)] logger.warn message_with_context %(id assigned to bibliography anchor already in use: #{id}), source_location: reader.cursor end nil end # Internal: Parse and construct a description list Block from the current position of the Reader # # reader - The Reader from which to retrieve the description list # match - The Regexp match for the head of the list # parent - The parent Block to which this description list belongs # # Returns the Block encapsulating the parsed description list def self.parse_description_list reader, match, parent list_block = List.new parent, :dlist # detects a description list item that uses the same delimiter (::, :::, :::: or ;;) sibling_pattern = DescriptionListSiblingRx[match[2]] list_block.items << (current_pair = parse_list_item reader, list_block, match, sibling_pattern) while reader.has_more_lines? && sibling_pattern =~ reader.peek_line next_pair = parse_list_item reader, list_block, $~, sibling_pattern if current_pair[1] list_block.items << (current_pair = next_pair) else current_pair[0] << next_pair[0][0] current_pair[1] = next_pair[1] end end list_block end # Internal: Parse and construct a callout list Block from the current position of the Reader and # advance the document callouts catalog to the next list. # # reader - The Reader from which to retrieve the callout list. # match - The Regexp match containing the head of the list. # parent - The parent Block to which this callout list belongs. # callouts - The document callouts catalog. # # Returns the Block that represents the parsed callout list. def self.parse_callout_list reader, match, parent, callouts list_block = List.new(parent, :colist) next_index = 1 autonum = 0 # NOTE skip the match on the first time through as we've already done it (emulates begin...while) while match || ((match = CalloutListRx.match reader.peek_line) && reader.mark) if (num = match[1]) == '.' num = (autonum += 1).to_s end # might want to move this check to a validate method unless num == next_index.to_s logger.warn message_with_context %(callout list item index: expected #{next_index}, got #{num}), source_location: reader.cursor_at_mark end if (list_item = parse_list_item reader, list_block, match, '<1>') list_block.items << list_item if (coids = callouts.callout_ids list_block.items.size).empty? logger.warn message_with_context %(no callout found for <#{list_block.items.size}>), source_location: reader.cursor_at_mark else list_item.attributes['coids'] = coids end end next_index += 1 match = nil end callouts.next_list list_block end # Internal: Parse and construct the next ListItem (unordered, ordered, or callout list) or next # term ListItem and description ListItem pair (description list) for the specified list Block. # # First, collect and process all the lines that constitute the next list item for the specified # list (according to its type). Next, create a ListItem (in the case of a description list, a # description ListItem), parse the lines into blocks, and associate those blocks with that # ListItem. Finally, fold the first block into the item's text attribute according to rules # described in ListItem. # # reader - The Reader from which to retrieve the next list item # list_block - The parent list Block for this ListItem. Also provides access to the list type. # match - The MatchData that contains the list item marker and first line text of the ListItem # sibling_trait - The trait to match a sibling list item. For ordered and unordered lists, this is # a String marker (e.g., '**' or 'ii)'). For description lists, this is a Regexp # marker pattern. # style - The block style assigned to this list (optional, default: nil) # # Returns the next ListItem or [[ListItem], ListItem] pair (description list) for the parent list Block. def self.parse_list_item(reader, list_block, match, sibling_trait, style = nil) if (list_type = list_block.context) == :dlist dlist = true list_term = ListItem.new(list_block, (term_text = match[1])) if term_text.start_with?('[[') && LeadingInlineAnchorRx =~ term_text catalog_inline_anchor $1, ($2 || $'.lstrip), list_term, reader end has_text = true if (item_text = match[3]) list_item = ListItem.new(list_block, item_text) if list_block.document.sourcemap list_term.source_location = reader.cursor if has_text list_item.source_location = list_term.source_location else sourcemap_assignment_deferred = true end end else has_text = true list_item = ListItem.new(list_block, (item_text = match[2])) list_item.source_location = reader.cursor if list_block.document.sourcemap case list_type when :ulist list_item.marker = sibling_trait if item_text.start_with?('[') if style && style == 'bibliography' if InlineBiblioAnchorRx =~ item_text catalog_inline_biblio_anchor $1, $2, list_item, reader end elsif item_text.start_with?('[[') if LeadingInlineAnchorRx =~ item_text catalog_inline_anchor $1, $2, list_item, reader end elsif item_text.start_with?('[ ] ', '[x] ', '[*] ') list_block.set_option 'checklist' list_item.attributes['checkbox'] = '' list_item.attributes['checked'] = '' unless item_text.start_with? '[ ' list_item.text = item_text.slice(4, item_text.length) end end when :olist sibling_trait, implicit_style = resolve_ordered_list_marker(sibling_trait, (ordinal = list_block.items.size), true, reader) list_item.marker = sibling_trait if ordinal == 0 && !style # using list level makes more sense, but we don't track it # basing style on marker level is compliant with AsciiDoc.py list_block.style = implicit_style || (ORDERED_LIST_STYLES[sibling_trait.length - 1] || 'arabic').to_s end if item_text.start_with?('[[') && LeadingInlineAnchorRx =~ item_text catalog_inline_anchor $1, $2, list_item, reader end else # :colist list_item.marker = sibling_trait if item_text.start_with?('[[') && LeadingInlineAnchorRx =~ item_text catalog_inline_anchor $1, $2, list_item, reader end end end # first skip the line with the marker / term (it gets put back onto the reader by next_block) reader.shift block_cursor = reader.cursor list_item_reader = Reader.new read_lines_for_list_item(reader, list_type, sibling_trait, has_text), block_cursor if list_item_reader.has_more_lines? list_item.source_location = block_cursor if sourcemap_assignment_deferred # NOTE peek on the other side of any comment lines comment_lines = list_item_reader.skip_line_comments if (subsequent_line = list_item_reader.peek_line) list_item_reader.unshift_lines comment_lines unless comment_lines.empty? unless subsequent_line.empty? content_adjacent = true # treat lines as paragraph text if continuation does not connect first block (i.e., has_text = nil) has_text = nil unless dlist end end # reader is confined to boundaries of list, which means only blocks will be found (no sections) if (block = next_block(list_item_reader, list_item, {}, text_only: has_text ? nil : true, list_type: list_type)) list_item.blocks << block end while list_item_reader.has_more_lines? if (block = next_block(list_item_reader, list_item, {}, list_type: list_type)) list_item.blocks << block end end list_item.fold_first if content_adjacent && (first_block = list_item.blocks[0]) && first_block.context == :paragraph end dlist ? [[list_term], (list_item.text? || list_item.blocks? ? list_item : nil)] : list_item end # Internal: Collect the lines belonging to the current list item, navigating # through all the rules that determine what comprises a list item. # # Grab lines until a sibling list item is found, or the block is broken by a # terminator (such as a line comment). Description lists are more greedy if # they don't have optional inline item text...they want that text # # reader - The Reader from which to retrieve the lines. # list_type - The Symbol context of the list (:ulist, :olist, :colist or :dlist) # sibling_trait - A Regexp that matches a sibling of this list item or String list marker # of the items in this list (default: nil) # has_text - Whether the list item has text defined inline (always true except for description lists) # # Returns an Array of lines belonging to the current list item. def self.read_lines_for_list_item(reader, list_type, sibling_trait = nil, has_text = true) buffer = [] # three states for continuation: :inactive, :active & :frozen # :frozen signifies we've detected sequential continuation lines & # continuation is not permitted until reset continuation = :inactive # if we are within a nested list, we don't throw away the list # continuation marks because they will be processed when grabbing # the lines for those nested lists within_nested_list = false # a detached continuation is a list continuation that follows a blank line # it gets associated with the outermost block detached_continuation = nil dlist = list_type == :dlist while reader.has_more_lines? this_line = reader.read_line # if we've arrived at a sibling item in this list, we've captured # the complete list item and can begin processing it # the remainder of the method determines whether we've reached # the termination of the list break if is_sibling_list_item?(this_line, list_type, sibling_trait) prev_line = buffer.empty? ? nil : buffer[-1] if prev_line == LIST_CONTINUATION if continuation == :inactive continuation = :active has_text = true buffer[-1] = '' unless within_nested_list end # dealing with adjacent list continuations (which is really a syntax error) if this_line == LIST_CONTINUATION if continuation != :frozen continuation = :frozen buffer << this_line end this_line = nil next end end # a delimited block immediately breaks the list unless preceded # by a list continuation (they are harsh like that ;0) if (match = is_delimited_block? this_line, true) break unless continuation == :active buffer << this_line # grab all the lines in the block, leaving the delimiters in place # we're being more strict here about the terminator, but I think that's a good thing buffer.concat reader.read_lines_until terminator: match.terminator, read_last_line: true, context: nil continuation = :inactive # BlockAttributeLineRx only breaks dlist if ensuing line is not a list item elsif dlist && continuation != :active && (this_line.start_with? '[') && (BlockAttributeLineRx.match? this_line) block_attribute_lines = [this_line] while (next_line = reader.peek_line) if is_delimited_block? next_line interrupt = true elsif next_line.empty? || ((next_line.start_with? '[') && (BlockAttributeLineRx.match? next_line)) block_attribute_lines << reader.read_line next elsif (AnyListRx.match? next_line) && !(is_sibling_list_item? next_line, list_type, sibling_trait) buffer.concat block_attribute_lines else # rubocop:disable Lint/DuplicateBranch interrupt = true end break end if interrupt reader.unshift_lines block_attribute_lines break end elsif continuation == :active && !this_line.empty? # literal paragraphs have special considerations (and this is one of # two entry points into one) # if we don't process it as a whole, then a line in it that looks like a # list item will throw off the exit from it if LiteralParagraphRx.match? this_line reader.unshift_line this_line if dlist # we may be in an indented list disguised as a literal paragraph # so we need to make sure we don't slurp up a legitimate sibling buffer.concat reader.read_lines_until(preserve_last_line: true, break_on_blank_lines: true, break_on_list_continuation: true) {|line| is_sibling_list_item? line, list_type, sibling_trait } else buffer.concat reader.read_lines_until(preserve_last_line: true, break_on_blank_lines: true, break_on_list_continuation: true) end continuation = :inactive # let block metadata play out until we find the block elsif ((ch0 = this_line.chr) == '.' && (BlockTitleRx.match? this_line)) || (ch0 == '[' && (BlockAttributeLineRx.match? this_line)) || (ch0 == ':' && (AttributeEntryRx.match? this_line)) buffer << this_line else if (nested_list_type = (within_nested_list ? [:dlist] : NESTABLE_LIST_CONTEXTS).find {|ctx| ListRxMap[ctx].match? this_line }) within_nested_list = true if nested_list_type == :dlist && $3.nil_or_empty? # get greedy again has_text = false end end buffer << this_line continuation = :inactive end elsif prev_line && prev_line.empty? # advance to the next line of content if this_line.empty? # stop reading if we reach eof break unless (this_line = reader.skip_blank_lines && reader.read_line) # stop reading if we hit a sibling list item break if is_sibling_list_item? this_line, list_type, sibling_trait end if this_line == LIST_CONTINUATION detached_continuation = buffer.size buffer << this_line elsif has_text # has_text only relevant for dlist, which is more greedy until it has text for an item; has_text is always true for all other lists # in this block, we have to see whether we stay in the list # TODO any way to combine this with the check after skipping blank lines? if is_sibling_list_item?(this_line, list_type, sibling_trait) break elsif (nested_list_type = NESTABLE_LIST_CONTEXTS.find {|ctx| ListRxMap[ctx] =~ this_line }) buffer << this_line within_nested_list = true if nested_list_type == :dlist && $3.nil_or_empty? # get greedy again has_text = false end # slurp up any literal paragraph offset by blank lines # NOTE we have to check for indented list items first elsif LiteralParagraphRx.match? this_line reader.unshift_line this_line if dlist # we may be in an indented list disguised as a literal paragraph # so we need to make sure we don't slurp up a legitimate sibling buffer.concat reader.read_lines_until(preserve_last_line: true, break_on_blank_lines: true, break_on_list_continuation: true) {|line| is_sibling_list_item? line, list_type, sibling_trait } else buffer.concat reader.read_lines_until(preserve_last_line: true, break_on_blank_lines: true, break_on_list_continuation: true) end else break end else # only dlist in need of item text, so slurp it up! # pop the blank line so it's not interpreted as a list continuation buffer.pop unless within_nested_list buffer << this_line has_text = true end else has_text = true unless this_line.empty? if (nested_list_type = (within_nested_list ? [:dlist] : NESTABLE_LIST_CONTEXTS).find {|ctx| ListRxMap[ctx] =~ this_line }) within_nested_list = true if nested_list_type == :dlist && $3.nil_or_empty? # get greedy again has_text = false end end buffer << this_line end this_line = nil end reader.unshift_line this_line if this_line buffer[detached_continuation] = '' if detached_continuation until buffer.empty? # strip trailing blank lines to prevent empty blocks if (last_line = buffer[-1]).empty? buffer.pop else # drop optional trailing continuation # (a blank line would have served the same purpose in the document) buffer.pop if last_line == LIST_CONTINUATION break end end buffer end # Internal: Initialize a new Section object and assign any attributes provided # # The information for this section is retrieved by parsing the lines at the # current position of the reader. # # reader - the source reader # parent - the parent Section or Document of this Section # attributes - a Hash of attributes to assign to this section (default: {}) # # Returns the section [Block] def self.initialize_section reader, parent, attributes = {} document = parent.document book = (doctype = document.doctype) == 'book' source_location = reader.cursor if document.sourcemap sect_style = attributes[1] sect_id, sect_reftext, sect_title, sect_level, sect_atx = parse_section_title reader, document, attributes['id'] if sect_style if book && sect_style == 'abstract' sect_name, sect_level = 'chapter', 1 elsif (sect_style.start_with? 'sect') && (SectionLevelStyleRx.match? sect_style) sect_name = 'section' else sect_name, sect_special = sect_style, true sect_level = 1 if sect_level == 0 sect_numbered = sect_name == 'appendix' end elsif book sect_name = sect_level == 0 ? 'part' : (sect_level > 1 ? 'section' : 'chapter') elsif doctype == 'manpage' && (sect_title.casecmp 'synopsis') == 0 sect_name, sect_special = 'synopsis', true else sect_name = 'section' end attributes['reftext'] = sect_reftext if sect_reftext section = Section.new parent, sect_level section.id, section.title, section.sectname, section.source_location = sect_id, sect_title, sect_name, source_location if sect_special section.special = true if sect_numbered section.numbered = true elsif document.attributes['sectnums'] == 'all' section.numbered = (book && sect_level == 1 ? :chapter : true) end elsif document.attributes['sectnums'] && sect_level > 0 # NOTE a special section here is guaranteed to be nested in another section section.numbered = section.special ? parent.numbered && true : true elsif book && sect_level == 0 && document.attributes['partnums'] section.numbered = true end # generate an ID if one was not embedded or specified as anchor above section title if (id = section.id || (section.id = (document.attributes.key? 'sectids') ? (generated_id = Section.generate_id section.title, document) : nil)) # convert title to resolve attributes while in scope section.title unless generated_id || !(sect_title.include? ATTR_REF_HEAD) unless document.register :refs, [id, section] logger.warn message_with_context %(id assigned to section already in use: #{id}), source_location: (reader.cursor_at_line reader.lineno - (sect_atx ? 1 : 2)) end end section.update_attributes(attributes) reader.skip_blank_lines section end # Internal: Checks if the next line on the Reader is a section title # # reader - the source Reader # attributes - a Hash of attributes collected above the current line # # Returns the Integer section level if the Reader is positioned at a section title or nil otherwise def self.is_next_line_section?(reader, attributes) return if (style = attributes[1]) && (style == 'discrete' || style == 'float') if Compliance.underline_style_section_titles next_lines = reader.peek_lines 2, style && style == 'comment' is_section_title?(next_lines[0] || '', next_lines[1]) else atx_section_title?(reader.peek_line || '') end end # Internal: Convenience API for checking if the next line on the Reader is the document title # # reader - the source Reader # attributes - a Hash of attributes collected above the current line # leveloffset - an Integer (or integer String value) the represents the current leveloffset # # returns true if the Reader is positioned at the document title, false otherwise def self.is_next_line_doctitle? reader, attributes, leveloffset if leveloffset (sect_level = is_next_line_section? reader, attributes) && (sect_level + leveloffset.to_i == 0) else (is_next_line_section? reader, attributes) == 0 end end # Public: Checks whether the lines given are an atx or setext section title. # # line1 - [String] candidate title. # line2 - [String] candidate underline (default: nil). # # Returns the [Integer] section level if these lines are a section title, otherwise nothing. def self.is_section_title?(line1, line2 = nil) atx_section_title?(line1) || (line2.nil_or_empty? ? nil : setext_section_title?(line1, line2)) end # Checks whether the line given is an atx section title. # # The level returned is 1 less than number of leading markers. # # line - [String] candidate title with leading atx marker. # # Returns the [Integer] section level if this line is an atx section title, otherwise nothing. def self.atx_section_title? line if Compliance.markdown_syntax ? ((line.start_with? '=', '#') && ExtAtxSectionTitleRx =~ line) : ((line.start_with? '=') && AtxSectionTitleRx =~ line) $1.length - 1 end end # Checks whether the lines given are an setext section title. # # line1 - [String] candidate title # line2 - [String] candidate underline # # Returns the [Integer] section level if these lines are an setext section title, otherwise nothing. def self.setext_section_title? line1, line2 if (level = SETEXT_SECTION_LEVELS[line2_ch0 = line2.chr]) && (uniform? line2, line2_ch0, (line2_len = line2.length)) && (SetextSectionTitleRx.match? line1) && (line1.length - line2_len).abs < 2 level end end # Internal: Parse the section title from the current position of the reader # # Parse an atx (single-line) or setext (underlined) section title. After this method is called, # the Reader will be positioned at the line after the section title. # # For efficiency, we don't reuse methods internally that check for a section title. # # reader - the source [Reader], positioned at a section title. # document - the current [Document]. # # Examples # # reader.lines # # => ["Foo", "~~~"] # # id, reftext, title, level, atx = parse_section_title(reader, document) # # title # # => "Foo" # level # # => 2 # id # # => nil # atx # # => false # # line1 # # => "==== Foo" # # id, reftext, title, level, atx = parse_section_title(reader, document) # # title # # => "Foo" # level # # => 3 # id # # => nil # atx # # => true # # Returns an 5-element [Array] containing the id (String), reftext (String), # title (String), level (Integer), and flag (Boolean) indicating whether an # atx section title was matched, or nothing. def self.parse_section_title(reader, document, sect_id = nil) sect_reftext = nil line1 = reader.read_line if Compliance.markdown_syntax ? ((line1.start_with? '=', '#') && ExtAtxSectionTitleRx =~ line1) : ((line1.start_with? '=') && AtxSectionTitleRx =~ line1) # NOTE level is 1 less than number of line markers sect_level, sect_title, atx = $1.length - 1, $2, true if sect_title.end_with?(']]') && InlineSectionAnchorRx =~ sect_title && !$1 # escaped sect_title, sect_id, sect_reftext = (sect_title.slice 0, sect_title.length - $&.length), $2, $3 end unless sect_id elsif Compliance.underline_style_section_titles && (line2 = reader.peek_line(true)) && (sect_level = SETEXT_SECTION_LEVELS[line2_ch0 = line2.chr]) && (uniform? line2, line2_ch0, (line2_len = line2.length)) && (sect_title = SetextSectionTitleRx =~ line1 && $1) && (line1.length - line2_len).abs < 2 atx = false if sect_title.end_with?(']]') && InlineSectionAnchorRx =~ sect_title && !$1 # escaped sect_title, sect_id, sect_reftext = (sect_title.slice 0, sect_title.length - $&.length), $2, $3 end unless sect_id reader.shift else raise %(Unrecognized section at #{reader.cursor_at_prev_line}) end if document.attr? 'leveloffset' sect_level += (document.attr 'leveloffset').to_i sect_level = 0 if sect_level < 0 end [sect_id, sect_reftext, sect_title, sect_level, atx] end # Public: Consume and parse the two header lines (line 1 = author info, line 2 = revision info). # # Returns the Hash of header metadata. If a Document object is supplied, the metadata # is applied directly to the attributes of the Document. # # reader - the Reader holding the source lines of the document # document - the Document we are building (default: nil) # # Examples # # data = ["Author Name \n", "v1.0, 2012-12-21: Coincide w/ end of world.\n"] # parse_header_metadata(Reader.new data, nil, normalize: true) # # => { 'author' => 'Author Name', 'firstname' => 'Author', 'lastname' => 'Name', 'email' => 'author@example.org', # # 'revnumber' => '1.0', 'revdate' => '2012-12-21', 'revremark' => 'Coincide w/ end of world.' } def self.parse_header_metadata reader, document = nil, retrieve = true doc_attrs = document && document.attributes # NOTE this will discard any comment lines, but not skip blank lines process_attribute_entries reader, document if reader.has_more_lines? && !reader.next_line_empty? authorcount = (implicit_author_metadata = process_authors reader.read_line).delete 'authorcount' if document && (doc_attrs['authorcount'] = authorcount) > 0 implicit_author_metadata.each do |key, val| # apply header subs and assign to document; attributes substitution only relevant for email doc_attrs[key] = document.apply_header_subs val unless doc_attrs.key? key end implicit_author = doc_attrs['author'] implicit_authorinitials = doc_attrs['authorinitials'] implicit_authors = doc_attrs['authors'] end implicit_author_metadata['authorcount'] = authorcount # NOTE this will discard any comment lines, but not skip blank lines process_attribute_entries reader, document if reader.has_more_lines? && !reader.next_line_empty? rev_line = reader.read_line if (match = RevisionInfoLineRx.match rev_line) rev_metadata = {} rev_metadata['revnumber'] = match[1].rstrip if match[1] unless (component = match[2].strip).empty? # version must begin with 'v' if date is absent if !match[1] && (component.start_with? 'v') rev_metadata['revnumber'] = component.slice 1, component.length else rev_metadata['revdate'] = component end end rev_metadata['revremark'] = match[3].rstrip if match[3] if document && !rev_metadata.empty? # apply header subs and assign to document rev_metadata.each do |key, val| doc_attrs[key] = document.apply_header_subs val unless doc_attrs.key? key end end else # throw it back reader.unshift_line rev_line end end # NOTE this will discard any comment lines, but not skip blank lines process_attribute_entries reader, document reader.skip_blank_lines else implicit_author_metadata = {} end # process author attribute entries that override (or stand in for) the implicit author line if document if doc_attrs.key?('author') && (author_line = doc_attrs['author']) != implicit_author # do not allow multiple, process as names only author_metadata = process_authors author_line, true, false author_metadata.delete 'authorinitials' if doc_attrs['authorinitials'] != implicit_authorinitials elsif doc_attrs.key?('authors') && (author_line = doc_attrs['authors']) != implicit_authors # allow multiple, process as names only author_metadata = process_authors author_line, true else authors, author_idx, author_key, explicit, sparse = [], 1, 'author_1', false, false while doc_attrs.key? author_key # only use indexed author attribute if value is different # leaves corner case if line matches with underscores converted to spaces; use double space to force if (author_override = doc_attrs[author_key]) == implicit_author_metadata[author_key] authors << nil sparse = true else authors << author_override explicit = true end author_key = %(author_#{author_idx += 1}) end if explicit # rebuild implicit author names to reparse authors.each_with_index do |author, idx| next if author authors[idx] = [ implicit_author_metadata[%(firstname_#{name_idx = idx + 1})], implicit_author_metadata[%(middlename_#{name_idx})], implicit_author_metadata[%(lastname_#{name_idx})] ].compact.map {|it| it.tr ' ', '_' }.join ' ' end if sparse # process as names only author_metadata = process_authors authors, true, false else author_metadata = { 'authorcount' => 0 } end end if author_metadata['authorcount'] == 0 if authorcount author_metadata = nil else doc_attrs['authorcount'] = 0 end else doc_attrs.update author_metadata # special case if !doc_attrs.key?('email') && doc_attrs.key?('email_1') doc_attrs['email'] = doc_attrs['email_1'] end end end implicit_author_metadata.merge rev_metadata.to_h, author_metadata.to_h if retrieve end # Internal: Parse the author line into a Hash of author metadata # # author_line - the String author line # names_only - a Boolean flag that indicates whether to process line as # names only or names with emails (default: false) # multiple - a Boolean flag that indicates whether to process multiple # semicolon-separated entries in the author line (default: true) # # returns a Hash of author metadata def self.process_authors author_line, names_only = false, multiple = true author_metadata = {} author_idx = 0 (multiple && (author_line.include? ';') ? (author_line.split AuthorDelimiterRx) : [*author_line]).each do |author_entry| next if author_entry.empty? key_map = {} if (author_idx += 1) == 1 AuthorKeys.each {|key| key_map[key.to_sym] = key } else AuthorKeys.each {|key| key_map[key.to_sym] = %(#{key}_#{author_idx}) } end if names_only # when parsing an attribute value # QUESTION should we rstrip author_entry? if author_entry.include? '<' author_metadata[key_map[:author]] = author_entry.tr('_', ' ') author_entry = author_entry.gsub XmlSanitizeRx, '' end # NOTE split names and collapse repeating whitespace (split drops any leading whitespace) if (segments = author_entry.split nil, 3).size == 3 segments << (segments.pop.squeeze ' ') end elsif (match = AuthorInfoLineRx.match(author_entry)) (segments = match.to_a).shift end if segments author = author_metadata[key_map[:firstname]] = fname = segments[0].tr('_', ' ') author_metadata[key_map[:authorinitials]] = fname.chr if segments[1] if segments[2] author_metadata[key_map[:middlename]] = mname = segments[1].tr('_', ' ') author_metadata[key_map[:lastname]] = lname = segments[2].tr('_', ' ') author = fname + ' ' + mname + ' ' + lname author_metadata[key_map[:authorinitials]] = %(#{fname.chr}#{mname.chr}#{lname.chr}) else author_metadata[key_map[:lastname]] = lname = segments[1].tr('_', ' ') author = fname + ' ' + lname author_metadata[key_map[:authorinitials]] = %(#{fname.chr}#{lname.chr}) end end author_metadata[key_map[:author]] ||= author author_metadata[key_map[:email]] = segments[3] unless names_only || !segments[3] else author_metadata[key_map[:author]] = author_metadata[key_map[:firstname]] = fname = author_entry.squeeze(' ').strip author_metadata[key_map[:authorinitials]] = fname.chr end if author_idx == 1 author_metadata['authors'] = author_metadata[key_map[:author]] else # only assign the _1 attributes once we see the second author if author_idx == 2 AuthorKeys.each {|key| author_metadata[%(#{key}_1)] = author_metadata[key] if author_metadata.key? key } end author_metadata['authors'] = %(#{author_metadata['authors']}, #{author_metadata[key_map[:author]]}) end end author_metadata['authorcount'] = author_idx author_metadata end # Internal: Parse lines of metadata until a line of metadata is not found. # # This method processes sequential lines containing block metadata, ignoring # blank lines and comments. # # reader - the source reader # document - the current Document # attributes - a Hash of attributes in which any metadata found will be stored (default: {}) # options - a Hash of options to control processing: (default: {}) # * :text_only indicates that parser is only looking for text content # and thus the block title should not be captured # # returns the Hash of attributes including any metadata found def self.parse_block_metadata_lines reader, document, attributes = {}, options = {} while parse_block_metadata_line reader, document, attributes, options # discard the line just processed reader.shift reader.skip_blank_lines || break end attributes end # Internal: Parse the next line if it contains metadata for the following block # # This method handles lines with the following content: # # * line or block comment # * anchor # * attribute list # * block title # # Any attributes found will be inserted into the attributes argument. # If the line contains block metadata, the method returns true, otherwise false. # # reader - the source reader # document - the current Document # attributes - a Hash of attributes in which any metadata found will be stored # options - a Hash of options to control processing: (default: {}) # * :text_only indicates the parser is only looking for text content, # thus neither a block title or attribute entry should be captured # # returns true if the line contains metadata, otherwise falsy def self.parse_block_metadata_line reader, document, attributes, options = {} if (next_line = reader.peek_line) && (options[:text_only] ? (next_line.start_with? '[', '/') : (normal = next_line.start_with? '[', '.', '/', ':')) if next_line.start_with? '[' if next_line.start_with? '[[' if (next_line.end_with? ']]') && BlockAnchorRx =~ next_line # NOTE registration of id and reftext is deferred until block is processed attributes['id'] = $1 if (reftext = $2) attributes['reftext'] = (reftext.include? ATTR_REF_HEAD) ? (document.sub_attributes reftext) : reftext end return true end elsif (next_line.end_with? ']') && BlockAttributeListRx =~ next_line current_style = attributes[1] # extract id, role, and options from first positional attribute and remove, if present if (document.parse_attributes $1, [], sub_input: true, sub_result: true, into: attributes)[1] attributes[1] = (parse_style_attribute attributes, reader) || current_style end return true end elsif normal && (next_line.start_with? '.') if BlockTitleRx =~ next_line # NOTE title doesn't apply to section, but we need to stash it for the first block # TODO should issue an error if this is found above the document title attributes['title'] = $1 return true end elsif !normal || (next_line.start_with? '/') if next_line == '//' return true elsif normal && (uniform? next_line, '/', (ll = next_line.length)) unless ll == 3 reader.read_lines_until terminator: next_line, skip_first_line: true, preserve_last_line: true, skip_processing: true, context: :comment return true end else return true unless next_line.start_with? '///' end if next_line.start_with? '//' # NOTE the final condition can be consolidated into single line elsif normal && (next_line.start_with? ':') && AttributeEntryRx =~ next_line process_attribute_entry reader, document, attributes, $~ return true end end nil end # Process consecutive attribute entry lines, ignoring adjacent line comments and comment blocks. # # Returns nothing def self.process_attribute_entries reader, document, attributes = nil reader.skip_comment_lines while process_attribute_entry reader, document, attributes # discard line just processed reader.shift reader.skip_comment_lines end end def self.process_attribute_entry reader, document, attributes = nil, match = nil if match || (match = reader.has_more_lines? ? (AttributeEntryRx.match reader.peek_line) : nil) if (value = match[2]).nil_or_empty? value = '' elsif value.end_with? LINE_CONTINUATION, LINE_CONTINUATION_LEGACY con, value = (value.slice value.length - 2, 2), (value.slice 0, value.length - 2).rstrip while reader.advance && !(next_line = reader.peek_line || '').empty? next_line = next_line.lstrip next_line = (next_line.slice 0, next_line.length - 2).rstrip if (keep_open = next_line.end_with? con) value = %(#{value}#{(value.end_with? HARD_LINE_BREAK) ? LF : ' '}#{next_line}) break unless keep_open end end store_attribute match[1], value, document, attributes true end end # Public: Store the attribute in the document and register attribute entry if accessible # # name - the String name of the attribute to store; # if name begins or ends with !, it signals to remove the attribute with that root name # value - the String value of the attribute to store # doc - the Document being parsed # attrs - the attributes for the current context # # returns a 2-element array containing the resolved attribute name (minus the ! indicator) and value def self.store_attribute name, value, doc = nil, attrs = nil # TODO move processing of attribute value to utility method if name.end_with? '!' # a nil value signals the attribute should be deleted (unset) name = name.chop value = nil elsif name.start_with? '!' # a nil value signals the attribute should be deleted (unset) name = (name.slice 1, name.length) value = nil end if (name = sanitize_attribute_name name) == 'numbered' name = 'sectnums' elsif name == 'hardbreaks' name = 'hardbreaks-option' elsif name == 'showtitle' store_attribute 'notitle', (value ? nil : ''), doc, attrs end if doc if value if name == 'leveloffset' # support relative leveloffset values if value.start_with? '+' value = ((doc.attr 'leveloffset', 0).to_i + (value.slice 1, value.length).to_i).to_s elsif value.start_with? '-' value = ((doc.attr 'leveloffset', 0).to_i - (value.slice 1, value.length).to_i).to_s end end # QUESTION should we set value to locked value if set_attribute returns false? if (resolved_value = doc.set_attribute name, value) value = resolved_value (Document::AttributeEntry.new name, value).save_to attrs if attrs end elsif (doc.delete_attribute name) && attrs (Document::AttributeEntry.new name, value).save_to attrs end elsif attrs (Document::AttributeEntry.new name, value).save_to attrs end [name, value] end # Internal: Resolve the 0-index marker for this list item # # For ordered lists, match the marker used for this list item against the # known list markers and determine which marker is the first (0-index) marker # in its number series. # # For callout lists, return <1>. # # For bulleted lists, return the marker as passed to this method. # # list_type - The Symbol context of the list # marker - The String marker for this list item # ordinal - The position of this list item in the list # validate - Whether to validate the value of the marker # # Returns the String 0-index marker for this list item def self.resolve_list_marker(list_type, marker, ordinal = 0, validate = false, reader = nil) case list_type when :ulist marker when :olist resolve_ordered_list_marker(marker, ordinal, validate, reader)[0] else # :colist '<1>' end end # Internal: Resolve the 0-index marker for this ordered list item # # Match the marker used for this ordered list item against the # known ordered list markers and determine which marker is # the first (0-index) marker in its number series. # # The purpose of this method is to normalize the implicit numbered markers # so that they can be compared against other list items. # # marker - The marker used for this list item # ordinal - The 0-based index of the list item (default: 0) # validate - Perform validation that the marker provided is the proper # marker in the sequence (default: false) # # Examples # # marker = 'B.' # Parser.resolve_ordered_list_marker(marker, 1, true, reader) # # => ['A.', :upperalpha] # # marker = '.' # Parser.resolve_ordered_list_marker(marker, 1, true, reader) # # => ['.'] # # Returns a tuple that contains the String of the first marker in this number # series and the implicit list style, if applicable def self.resolve_ordered_list_marker(marker, ordinal = 0, validate = false, reader = nil) return [marker] if marker.start_with? '.' # NOTE case statement is guaranteed to match one of the conditions case (style = ORDERED_LIST_STYLES.find {|s| OrderedListMarkerRxMap[s].match? marker }) when :arabic if validate expected = ordinal + 1 actual = marker.to_i # remove trailing . and coerce to int end marker = '1.' when :loweralpha if validate expected = ('a'[0].ord + ordinal).chr actual = marker.chop # remove trailing . end marker = 'a.' when :upperalpha if validate expected = ('A'[0].ord + ordinal).chr actual = marker.chop # remove trailing . end marker = 'A.' when :lowerroman if validate expected = Helpers.int_to_roman(ordinal + 1).downcase actual = marker.chop # remove trailing ) end marker = 'i)' when :upperroman if validate expected = Helpers.int_to_roman(ordinal + 1) actual = marker.chop # remove trailing ) end marker = 'I)' end if validate && expected != actual logger.warn message_with_context %(list item index: expected #{expected}, got #{actual}), source_location: reader.cursor end [marker, style] end # Internal: Determine whether the this line is a sibling list item # according to the list type and trait (marker) provided. # # line - The String line to check # list_type - The context of the list (:olist, :ulist, :colist, :dlist) # sibling_trait - The String marker for the list or the Regexp to match a sibling # # Returns a Boolean indicating whether this line is a sibling list item given the criteria provided def self.is_sibling_list_item? line, list_type, sibling_trait if ::Regexp === sibling_trait sibling_trait.match? line else ListRxMap[list_type] =~ line && sibling_trait == (resolve_list_marker list_type, $1) end end # Internal: Parse the table contained in the provided Reader # # table_reader - a Reader containing the source lines of an AsciiDoc table # parent - the parent Block of this Asciidoctor::Table # attributes - attributes captured from above this Block # # returns an instance of Asciidoctor::Table parsed from the provided reader def self.parse_table(table_reader, parent, attributes) table = Table.new(parent, attributes) if (attributes.key? 'cols') && !(colspecs = parse_colspecs attributes['cols']).empty? table.create_columns colspecs explicit_colspecs = true end skipped = table_reader.skip_blank_lines || 0 if attributes['header-option'] table.has_header_option = true elsif skipped == 0 && !attributes['noheader-option'] # NOTE: assume table has header until we know otherwise; if it doesn't (nil), cells in first row get reprocessed table.has_header_option = :implicit implicit_header = true end parser_ctx = Table::ParserContext.new table_reader, table, attributes format, loop_idx, implicit_header_boundary = parser_ctx.format, -1, nil while (line = table_reader.read_line) if (beyond_first = (loop_idx += 1) > 0) && line.empty? line = nil implicit_header_boundary += 1 if implicit_header_boundary elsif format == 'psv' if parser_ctx.starts_with_delimiter? line line = line.slice 1, line.length # push empty cell spec if cell boundary appears at start of line parser_ctx.close_open_cell implicit_header_boundary = nil if implicit_header_boundary else next_cellspec, line = parse_cellspec line, :start, parser_ctx.delimiter # if cellspec is not nil, we're at a cell boundary if next_cellspec parser_ctx.close_open_cell next_cellspec implicit_header_boundary = nil if implicit_header_boundary # otherwise, the cell continues from previous line elsif implicit_header_boundary && implicit_header_boundary == loop_idx table.has_header_option = implicit_header = implicit_header_boundary = nil end end end unless beyond_first table_reader.mark # NOTE implicit header is offset by at least one blank line; implicit_header_boundary tracks size of gap if implicit_header if table_reader.has_more_lines? && table_reader.peek_line.empty? implicit_header_boundary = 1 else table.has_header_option = implicit_header = nil end end end # this loop is used for flow control; internal logic controls how many times it executes while true if line && (m = parser_ctx.match_delimiter line) pre_match, post_match = m.pre_match, m.post_match case format when 'csv' if parser_ctx.buffer_has_unclosed_quotes? pre_match parser_ctx.skip_past_delimiter pre_match break if (line = post_match).empty? redo end parser_ctx.buffer = %(#{parser_ctx.buffer}#{pre_match}) when 'dsv' if pre_match.end_with? '\\' parser_ctx.skip_past_escaped_delimiter pre_match if (line = post_match).empty? parser_ctx.buffer = %(#{parser_ctx.buffer}#{LF}) parser_ctx.keep_cell_open break end redo end parser_ctx.buffer = %(#{parser_ctx.buffer}#{pre_match}) else # psv if pre_match.end_with? '\\' parser_ctx.skip_past_escaped_delimiter pre_match if (line = post_match).empty? parser_ctx.buffer = %(#{parser_ctx.buffer}#{LF}) parser_ctx.keep_cell_open break end redo end next_cellspec, cell_text = parse_cellspec pre_match parser_ctx.push_cellspec next_cellspec parser_ctx.buffer = %(#{parser_ctx.buffer}#{cell_text}) end # don't break if empty to preserve empty cell found at end of line (see issue #1106) line = nil if (line = post_match).empty? parser_ctx.close_cell else # no other delimiters to see here; suck up this line into the buffer and move on parser_ctx.buffer = %(#{parser_ctx.buffer}#{line}#{LF}) case format when 'csv' if parser_ctx.buffer_has_unclosed_quotes? table.has_header_option = implicit_header = implicit_header_boundary = nil if implicit_header_boundary && loop_idx == 0 parser_ctx.keep_cell_open else parser_ctx.close_cell true end when 'dsv' parser_ctx.close_cell true else # psv parser_ctx.keep_cell_open end break end end # NOTE cell may already be closed if table format is csv or dsv if parser_ctx.cell_open? parser_ctx.close_cell true unless table_reader.has_more_lines? else table_reader.skip_blank_lines || break end end table.assign_column_widths unless (table.attributes['colcount'] ||= table.columns.size) == 0 || explicit_colspecs table.has_header_option = true if implicit_header table.partition_header_footer attributes table end # Internal: Parse the column specs for this table. # # The column specs dictate the number of columns, relative # width of columns, default alignments for cells in each # column, and/or default styles or filters applied to the cells in # the column. # # Every column spec is guaranteed to have a width # # returns a Hash of attributes that specify how to format # and layout the cells in the table. def self.parse_colspecs records records = records.delete ' ' if records.include? ' ' # check for deprecated syntax: single number, equal column spread if records == records.to_i.to_s return ::Array.new(records.to_i) { { 'width' => 1 } } end specs = [] # NOTE -1 argument ensures we don't drop empty records ((records.include? ',') ? (records.split ',', -1) : (records.split ';', -1)).each do |record| if record.empty? specs << { 'width' => 1 } # TODO might want to use scan rather than this mega-regexp elsif (m = ColumnSpecRx.match(record)) spec = {} if m[2] # make this an operation colspec, rowspec = m[2].split '.' if !colspec.nil_or_empty? && TableCellHorzAlignments.key?(colspec) spec['halign'] = TableCellHorzAlignments[colspec] end if !rowspec.nil_or_empty? && TableCellVertAlignments.key?(rowspec) spec['valign'] = TableCellVertAlignments[rowspec] end end if (width = m[3]) # to_i will strip the optional % spec['width'] = width == '~' ? -1 : width.to_i else spec['width'] = 1 end # make this an operation if m[4] && TableCellStyles.key?(m[4]) spec['style'] = TableCellStyles[m[4]] end if m[1] 1.upto(m[1].to_i) { specs << spec.merge } else specs << spec end end end specs end # Internal: Parse the cell specs for the current cell. # # The cell specs dictate the cell's alignments, styles or filters, # colspan, rowspan and/or repeating content. # # The default spec when pos == :end is {} since we already know we're at a # delimiter. When pos == :start, we *may* be at a delimiter, nil indicates # we're not. # # returns the Hash of attributes that indicate how to layout # and style this cell in the table. def self.parse_cellspec(line, pos = :end, delimiter = nil) m, rest = nil, '' if pos == :start if line.include? delimiter spec_part, _, rest = line.partition delimiter if (m = CellSpecStartRx.match spec_part) return [{}, rest] if m[0].empty? else return [nil, line] end else return [nil, line] end elsif (m = CellSpecEndRx.match line) # when pos == :end # NOTE return the line stripped of trailing whitespace if no cellspec is found in this case return [{}, line.rstrip] if m[0].lstrip.empty? rest = m.pre_match else return [{}, line] end spec = {} if m[1] colspec, rowspec = m[1].split '.' colspec = colspec.nil_or_empty? ? 1 : colspec.to_i rowspec = rowspec.nil_or_empty? ? 1 : rowspec.to_i case m[2] when '+' spec['colspan'] = colspec unless colspec == 1 spec['rowspan'] = rowspec unless rowspec == 1 when '*' spec['repeatcol'] = colspec unless colspec == 1 end end if m[3] colspec, rowspec = m[3].split '.' if !colspec.nil_or_empty? && TableCellHorzAlignments.key?(colspec) spec['halign'] = TableCellHorzAlignments[colspec] end if !rowspec.nil_or_empty? && TableCellVertAlignments.key?(rowspec) spec['valign'] = TableCellVertAlignments[rowspec] end end if m[4] && TableCellStyles.key?(m[4]) spec['style'] = TableCellStyles[m[4]] end [spec, rest] end # Public: Parse the first positional attribute and assign named attributes # # Parse the first positional attribute to extract the style, role and id # parts, assign the values to their corresponding attribute keys and return # the parsed style from the first positional attribute. # # attributes - The Hash of attributes to process and update # # Examples # # puts attributes # => { 1 => "abstract#intro.lead%fragment", "style" => "preamble" } # # parse_style_attribute(attributes) # => "abstract" # # puts attributes # => { 1 => "abstract#intro.lead%fragment", "style" => "abstract", "id" => "intro", # "role" => "lead", "options" => "fragment", "fragment-option" => '' } # # Returns the String style parsed from the first positional attribute def self.parse_style_attribute attributes, reader = nil # NOTE spaces are not allowed in shorthand, so if we detect one, this ain't no shorthand if (raw_style = attributes[1]) && !raw_style.include?(' ') && Compliance.shorthand_property_syntax name = nil accum = '' parsed_attrs = {} raw_style.each_char do |c| case c when '.' yield_buffered_attribute parsed_attrs, name, accum, reader accum = '' name = :role when '#' yield_buffered_attribute parsed_attrs, name, accum, reader accum = '' name = :id when '%' yield_buffered_attribute parsed_attrs, name, accum, reader accum = '' name = :option else accum += c end end # small optimization if no shorthand is found if name yield_buffered_attribute parsed_attrs, name, accum, reader if (parsed_style = parsed_attrs[:style]) attributes['style'] = parsed_style end attributes['id'] = parsed_attrs[:id] if parsed_attrs.key? :id if parsed_attrs.key? :role attributes['role'] = (existing_role = attributes['role']).nil_or_empty? ? (parsed_attrs[:role].join ' ') : %(#{existing_role} #{parsed_attrs[:role].join ' '}) end parsed_attrs[:option].each {|opt| attributes[%(#{opt}-option)] = '' } if parsed_attrs.key? :option parsed_style else attributes['style'] = raw_style end else attributes['style'] = raw_style end end # Internal: Save the collected attribute (:id, :option, :role, or nil for :style) in the attribute Hash. def self.yield_buffered_attribute attrs, name, value, reader if name if value.empty? if reader logger.warn message_with_context %(invalid empty #{name} detected in style attribute), source_location: reader.cursor_at_prev_line else logger.warn %(invalid empty #{name} detected in style attribute) end elsif name == :id if attrs.key? :id if reader logger.warn message_with_context 'multiple ids detected in style attribute', source_location: reader.cursor_at_prev_line else logger.warn 'multiple ids detected in style attribute' end end attrs[name] = value else (attrs[name] ||= []) << value end else attrs[:style] = value unless value.empty? end nil end # Remove the block indentation (the amount of whitespace of the least indented line), replace tabs with spaces (using # proper tab expansion logic) and, finally, indent the lines by the margin width. Modifies the input Array directly. # # This method preserves the significant indentation (that exceeding the block indent) on each line. # # lines - The Array of String lines to process (no trailing newlines) # indent_size - The Integer number of spaces to readd to the start of non-empty lines after removing the indentation. # If this value is < 0, the existing indentation is preserved (optional, default: 0) # tab_size - the Integer number of spaces to use in place of a tab. A value of <= 0 disables the replacement # (optional, default: 0) # # Examples # # source = < [" def names", " @names.split", " end"] # # puts (Parser.adjust_indentation! source.split ?\n).join ?\n # # => def names # # => @names.split # # => end # # returns Nothing def self.adjust_indentation! lines, indent_size = 0, tab_size = 0 return if lines.empty? # expand tabs if a tab character is detected and tab_size > 0 if tab_size > 0 && lines.any? {|line| line.include? TAB } full_tab_space = ' ' * tab_size lines.map! do |line| if line.empty? || (tab_idx = line.index TAB).nil? line else if tab_idx == 0 leading_tabs = 0 line.each_byte do |b| break unless b == 9 leading_tabs += 1 end line = %(#{full_tab_space * leading_tabs}#{line.slice leading_tabs, line.length}) next line unless line.include? TAB end # keeps track of how many spaces were added to adjust offset in match data spaces_added = 0 idx = 0 result = '' line.each_char do |c| if c == TAB # calculate how many spaces this tab represents, then replace tab with spaces if (offset = idx + spaces_added) % tab_size == 0 spaces_added += tab_size - 1 result += full_tab_space else unless (spaces = tab_size - offset % tab_size) == 1 spaces_added += spaces - 1 end result += ' ' * spaces end else result += c end idx += 1 end result end end end # skip block indent adjustment if indent_size is < 0 return if indent_size < 0 # determine block indent (assumes no whitespace-only lines are present) block_indent = nil lines.each do |line| next if line.empty? if (line_indent = line.length - line.lstrip.length) == 0 block_indent = nil break end block_indent = line_indent unless block_indent && block_indent < line_indent end # remove block indent then apply indent_size if specified # NOTE block_indent is > 0 if not nil if indent_size == 0 lines.map! {|line| line.empty? ? line : (line.slice block_indent, line.length) } if block_indent else new_block_indent = ' ' * indent_size if block_indent lines.map! {|line| line.empty? ? line : new_block_indent + (line.slice block_indent, line.length) } else lines.map! {|line| line.empty? ? line : new_block_indent + line } end end nil end def self.uniform? str, chr, len (str.count chr) == len end # Internal: Convert a string to a legal attribute name. # # name - the String name of the attribute # # Returns a String with the legal AsciiDoc attribute name. # # Examples # # sanitize_attribute_name('Foo Bar') # => 'foobar' # # sanitize_attribute_name('foo') # => 'foo' # # sanitize_attribute_name('Foo 3 #-Billy') # => 'foo3-billy' def self.sanitize_attribute_name(name) name.gsub(InvalidAttributeNameCharsRx, '').downcase end end end asciidoctor-2.0.20/lib/asciidoctor/path_resolver.rb000066400000000000000000000470021443135032600223550ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # Public: Handles all operations for resolving, cleaning and joining paths. # This class includes operations for handling both web paths (request URIs) and # system paths. # # The main emphasis of the class is on creating clean and secure paths. Clean # paths are void of duplicate parent and current directory references in the # path name. Secure paths are paths which are restricted from accessing # directories outside of a jail path, if specified. # # Since joining two paths can result in an insecure path, this class also # handles the task of joining a parent (start) and child (target) path. # # This class makes no use of path utilities from the Ruby libraries. Instead, # it handles all aspects of path manipulation. The main benefit of # internalizing these operations is that the class is able to handle both posix # and windows paths independent of the operating system on which it runs. This # makes the class both deterministic and easier to test. # # Examples # # resolver = PathResolver.new # # # Web Paths # # resolver.web_path('images') # => 'images' # # resolver.web_path('./images') # => './images' # # resolver.web_path('/images') # => '/images' # # resolver.web_path('./images/../assets/images') # => './assets/images' # # resolver.web_path('/../images') # => '/images' # # resolver.web_path('images', 'assets') # => 'assets/images' # # resolver.web_path('tiger.png', '../assets/images') # => '../assets/images/tiger.png' # # # System Paths # # resolver.working_dir # => '/path/to/docs' # # resolver.system_path('images') # => '/path/to/docs/images' # # resolver.system_path('../images') # => '/path/to/images' # # resolver.system_path('/etc/images') # => '/etc/images' # # resolver.system_path('images', '/etc') # => '/etc/images' # # resolver.system_path('', '/etc/images') # => '/etc/images' # # resolver.system_path(nil, nil, '/path/to/docs') # => '/path/to/docs' # # resolver.system_path('..', nil, '/path/to/docs') # => '/path/to/docs' # # resolver.system_path('../../../css', nil, '/path/to/docs') # => '/path/to/docs/css' # # resolver.system_path('../../../css', '../../..', '/path/to/docs') # => '/path/to/docs/css' # # resolver.system_path('..', 'C:\\data\\docs\\assets', 'C:\\data\\docs') # => 'C:/data/docs' # # resolver.system_path('..\\..\\css', 'C:\\data\\docs\\assets', 'C:\\data\\docs') # => 'C:/data/docs/css' # # begin # resolver.system_path('../../../css', '../../..', '/path/to/docs', recover: false) # rescue SecurityError => e # puts e.message # end # => 'path ../../../../../../css refers to location outside jail: /path/to/docs (disallowed in safe mode)' # # resolver.system_path('/path/to/docs/images', nil, '/path/to/docs') # => '/path/to/docs/images' # # begin # resolver.system_path('images', '/etc', '/path/to/docs', recover: false) # rescue SecurityError => e # puts e.message # end # => start path /etc is outside of jail: /path/to/docs' # class PathResolver include Logging DOT = '.' DOT_DOT = '..' DOT_SLASH = './' SLASH = '/' BACKSLASH = '\\' DOUBLE_SLASH = '//' URI_CLASSLOADER = 'uri:classloader:' WindowsRootRx = %r(^(?:[a-zA-Z]:)?[\\/]) attr_accessor :file_separator attr_accessor :working_dir # Public: Construct a new instance of PathResolver, optionally specifying the # file separator (to override the system default) and the working directory # (to override the present working directory). The working directory will be # expanded to an absolute path inside the constructor. # # file_separator - the String file separator to use for path operations # (optional, default: File::ALT_SEPARATOR or File::SEPARATOR) # working_dir - the String working directory (optional, default: Dir.pwd) # def initialize file_separator = nil, working_dir = nil @file_separator = file_separator || ::File::ALT_SEPARATOR || ::File::SEPARATOR @working_dir = working_dir ? ((root? working_dir) ? (posixify working_dir) : (::File.expand_path working_dir)) : ::Dir.pwd @_partition_path_sys = {} @_partition_path_web = {} end # Public: Check whether the specified path is an absolute path. # # This operation considers both posix paths and Windows paths. The path does # not have to be posixified beforehand. This operation does not handle URIs. # # Unix absolute paths start with a slash. UNC paths can start with a slash or # backslash. Windows roots can start with a drive letter. # # path - the String path to check # # returns a Boolean indicating whether the path is an absolute root path def absolute_path? path (path.start_with? SLASH) || (@file_separator == BACKSLASH && (WindowsRootRx.match? path)) end # Public: Check if the specified path is an absolute root path (or, in the # browser environment, an absolute URI as well) # # This operation considers both POSIX and Windows paths. If the JavaScript IO module # is xmlhttprequest, this operation also considers absolute URIs. If running on JRuby, # this operation also considers classloader URIs (starts with uri:classloader:). # # Unix absolute paths and UNC paths start with slash. Windows roots can # start with a drive letter. When the IO module is xmlhttprequest (Opal # runtime only), an absolute (qualified) URI (starts with file://, http://, # or https://) is also considered to be an absolute path. # # path - the String path to check # # returns a Boolean indicating whether the path is an absolute root path (or # an absolute URI when the JavaScript IO module is xmlhttprequest) if RUBY_ENGINE == 'opal' && ::JAVASCRIPT_IO_MODULE == 'xmlhttprequest' def root? path (absolute_path? path) || (path.start_with? 'file://', 'http://', 'https://') end elsif ::RUBY_ENGINE == 'jruby' def root? path (absolute_path? path) || (path.start_with? URI_CLASSLOADER) end else alias root? absolute_path? end # Public: Determine if the path is a UNC (root) path # # path - the String path to check # # returns a Boolean indicating whether the path is a UNC path def unc? path path.start_with? DOUBLE_SLASH end # Public: Determine if the path is an absolute (root) web path # # path - the String path to check # # returns a Boolean indicating whether the path is an absolute (root) web path def web_root? path path.start_with? SLASH end # Public: Determine whether path descends from base. # # If path equals base, or base is a parent of path, return true. # # path - The String path to check. Can be relative. # base - The String base path to check against. Can be relative. # # returns If path descends from base, return the offset, otherwise false. def descends_from? path, base if base == path 0 elsif base == SLASH (path.start_with? SLASH) && 1 else (path.start_with? base + SLASH) && (base.length + 1) end end # Public: Calculate the relative path to this absolute path from the specified base directory # # If neither path or base are absolute paths, the path is not contained # within the base directory, or the relative path cannot be computed, the # original path is returned work is done. # # path - [String] an absolute filename. # base - [String] an absolute base directory. # # Return the [String] relative path of the specified path calculated from the base directory. def relative_path path, base if root? path if (offset = descends_from? path, base) path.slice offset, path.length else begin (Pathname.new path).relative_path_from(Pathname.new base).to_s rescue path end end else path end end # Public: Normalize path by converting any backslashes to forward slashes # # path - the String path to normalize # # returns a String path with any backslashes replaced with forward slashes def posixify path if path @file_separator == BACKSLASH && (path.include? BACKSLASH) ? (path.tr BACKSLASH, SLASH) : path else '' end end alias posixfy posixify # Public: Expand the specified path by converting the path to a posix path, resolving parent # references (..), and removing self references (.). # # path - the String path to expand # # returns a String path as a posix path with parent references resolved and self references removed. # The result will be relative if the path is relative and absolute if the path is absolute. def expand_path path path_segments, path_root = partition_path path if path.include? DOT_DOT resolved_segments = [] path_segments.each do |segment| segment == DOT_DOT ? resolved_segments.pop : resolved_segments << segment end join_path resolved_segments, path_root else join_path path_segments, path_root end end # Public: Partition the path into path segments and remove self references (.) and the trailing # slash, if present. Prior to being partitioned, the path is converted to a posix path. # # Parent references are not resolved by this method since the consumer often needs to handle this # resolution in a certain context (checking for the breach of a jail, for instance). # # path - the String path to partition # web - a Boolean indicating whether the path should be handled # as a web path (optional, default: false) # # Returns a 2-item Array containing the Array of String path segments and the # path root (e.g., '/', './', 'c:/', or '//'), which is nil unless the path is absolute. def partition_path path, web = nil if (result = (cache = web ? @_partition_path_web : @_partition_path_sys)[path]) return result end posix_path = posixify path if web # ex. /sample/path if web_root? posix_path root = SLASH # ex. ./sample/path elsif posix_path.start_with? DOT_SLASH root = DOT_SLASH end # otherwise ex. sample/path elsif root? posix_path # ex. //sample/path if unc? posix_path root = DOUBLE_SLASH # ex. /sample/path elsif posix_path.start_with? SLASH root = SLASH # ex. uri:classloader:sample/path (or uri:classloader:/sample/path) elsif posix_path.start_with? URI_CLASSLOADER root = posix_path.slice 0, URI_CLASSLOADER.length # ex. C:/sample/path (or file:///sample/path in browser environment) else root = posix_path.slice 0, (posix_path.index SLASH) + 1 end # ex. ./sample/path elsif posix_path.start_with? DOT_SLASH root = DOT_SLASH end # otherwise ex. sample/path path_segments = (root ? (posix_path.slice root.length, posix_path.length) : posix_path).split SLASH # strip out all dot entries path_segments.delete DOT cache[path] = [path_segments, root] end # Public: Join the segments using the posix file separator (since Ruby knows # how to work with paths specified this way, regardless of OS). Use the root, # if specified, to construct an absolute path. Otherwise join the segments as # a relative path. # # segments - a String Array of path segments # root - a String path root (optional, default: nil) # # returns a String path formed by joining the segments using the posix file # separator and prepending the root, if specified def join_path segments, root = nil root ? %(#{root}#{segments.join SLASH}) : (segments.join SLASH) end # Public: Securely resolve a system path # # Resolves the target to an absolute path on the current filesystem. The target is assumed to be # relative to the start path, jail path, or working directory (specified in the constructor), in # that order. If a jail path is specified, the resolved path is forced to descend from the jail # path. If a jail path is not provided, the resolved path may be any location on the system. If # the target is an absolute path, use it as is (unless it breaches the jail path). Expands all # parent and self references in the resolved path. # # target - the String target path # start - the String start path from which to resolve a relative target; falls back to jail, if # specified, or the working directory specified in the constructor (default: nil) # jail - the String jail path to which to confine the resolved path, if specified; must be an # absolute path (default: nil) # opts - an optional Hash of options to control processing (default: {}): # * :recover is used to control whether the processor should # automatically recover when an illegal path is encountered # * :target_name is used in messages to refer to the path being resolved # # Returns an absolute String path relative to the start path, if specified, and confined to the # jail path, if specified. The path is posixified and all parent and self references in the path # are expanded. def system_path target, start = nil, jail = nil, opts = {} if jail raise ::SecurityError, %(Jail is not an absolute path: #{jail}) unless root? jail #raise ::SecurityError, %(Jail is not a canonical path: #{jail}) if jail.include? DOT_DOT jail = posixify jail end if target if root? target target_path = expand_path target if jail && !(descends_from? target_path, jail) if opts.fetch :recover, true logger.warn %(#{opts[:target_name] || 'path'} is outside of jail; recovering automatically) target_segments, = partition_path target_path jail_segments, jail_root = partition_path jail return join_path jail_segments + target_segments, jail_root else raise ::SecurityError, %(#{opts[:target_name] || 'path'} #{target} is outside of jail: #{jail} (disallowed in safe mode)) end end return target_path else target_segments, = partition_path target end else target_segments = [] end if target_segments.empty? if start.nil_or_empty? return jail || @working_dir elsif root? start if jail start = posixify start else return expand_path start end else target_segments, = partition_path start start = jail || @working_dir end elsif start.nil_or_empty? start = jail || @working_dir elsif root? start start = posixify start if jail else #start = system_path start, jail, jail, opts start = %(#{(jail || @working_dir).chomp '/'}/#{start}) end # both jail and start have been posixified at this point if jail is set if jail && (recheck = !(descends_from? start, jail)) && @file_separator == BACKSLASH start_segments, start_root = partition_path start jail_segments, jail_root = partition_path jail if start_root != jail_root if opts.fetch :recover, true logger.warn %(start path for #{opts[:target_name] || 'path'} is outside of jail root; recovering automatically) start_segments = jail_segments recheck = false else raise ::SecurityError, %(start path for #{opts[:target_name] || 'path'} #{start} refers to location outside jail root: #{jail} (disallowed in safe mode)) end end else start_segments, jail_root = partition_path start end if (resolved_segments = start_segments + target_segments).include? DOT_DOT unresolved_segments, resolved_segments = resolved_segments, [] if jail jail_segments, = partition_path jail unless jail_segments warned = false unresolved_segments.each do |segment| if segment == DOT_DOT if resolved_segments.size > jail_segments.size resolved_segments.pop elsif opts.fetch :recover, true unless warned logger.warn %(#{opts[:target_name] || 'path'} has illegal reference to ancestor of jail; recovering automatically) warned = true end else raise ::SecurityError, %(#{opts[:target_name] || 'path'} #{target} refers to location outside jail: #{jail} (disallowed in safe mode)) end else resolved_segments << segment end end else unresolved_segments.each do |segment| segment == DOT_DOT ? resolved_segments.pop : resolved_segments << segment end end end if recheck target_path = join_path resolved_segments, jail_root if descends_from? target_path, jail target_path elsif opts.fetch :recover, true logger.warn %(#{opts[:target_name] || 'path'} is outside of jail; recovering automatically) jail_segments, = partition_path jail unless jail_segments join_path jail_segments + target_segments, jail_root else raise ::SecurityError, %(#{opts[:target_name] || 'path'} #{target} is outside of jail: #{jail} (disallowed in safe mode)) end else join_path resolved_segments, jail_root end end # Public: Resolve a web path from the target and start paths. # The main function of this operation is to resolve any parent # references and remove any self references. # # The target is assumed to be a path, not a qualified URI. # That check should happen before this method is invoked. # # target - the String target path # start - the String start (i.e., parent) path # # returns a String path that joins the target path with the # start path with any parent references resolved and self # references removed def web_path target, start = nil target = posixify target start = posixify start unless start.nil_or_empty? || (web_root? target) target, uri_prefix = extract_uri_prefix %(#{start}#{(start.end_with? SLASH) ? '' : SLASH}#{target}) end # use this logic instead if we want to normalize target if it contains a URI #unless web_root? target # target, uri_prefix = extract_uri_prefix target if preserve_uri_target # target, uri_prefix = extract_uri_prefix %(#{start}#{SLASH}#{target}) unless uri_prefix || start.nil_or_empty? #end target_segments, target_root = partition_path target, true resolved_segments = [] target_segments.each do |segment| if segment == DOT_DOT if resolved_segments.empty? resolved_segments << segment unless target_root && target_root != DOT_SLASH elsif resolved_segments[-1] == DOT_DOT resolved_segments << segment else resolved_segments.pop end else resolved_segments << segment # checking for empty would eliminate repeating forward slashes #resolved_segments << segment unless segment.empty? end end if (resolved_path = join_path resolved_segments, target_root).include? ' ' resolved_path = resolved_path.gsub ' ', '%20' end uri_prefix ? %(#{uri_prefix}#{resolved_path}) : resolved_path end private # Internal: Efficiently extracts the URI prefix from the specified String if the String is a URI # # Uses the Asciidoctor::UriSniffRx regex to match the URI prefix in the specified String (e.g., http://). If present, # the prefix is removed. # # str - the String to check # # returns a tuple containing the specified string without the URI prefix, if present, and the extracted URI prefix. def extract_uri_prefix str if (str.include? ':') && UriSniffRx =~ str [(str.slice $&.length, str.length), $&] else str end end end end asciidoctor-2.0.20/lib/asciidoctor/reader.rb000066400000000000000000001423221443135032600207430ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # Public: Methods for retrieving lines from AsciiDoc source files class Reader include Logging class Cursor attr_reader :file, :dir, :path, :lineno def initialize file, dir = nil, path = nil, lineno = 1 @file, @dir, @path, @lineno = file, dir, path, lineno end def advance num @lineno += num end def line_info %(#{@path}: line #{@lineno}) end alias to_s line_info end attr_reader :file attr_reader :dir attr_reader :path # Public: Get the 1-based offset of the current line. attr_reader :lineno # Public: Get the document source as a String Array of lines. attr_reader :source_lines # Public: Control whether lines are processed using Reader#process_line on first visit (default: true) attr_accessor :process_lines # Public: Indicates that the end of the reader was reached with a delimited block still open. attr_accessor :unterminated # Public: Initialize the Reader object def initialize data = nil, cursor = nil, opts = {} if !cursor @file = nil @dir = '.' @path = '' @lineno = 1 elsif ::String === cursor @file = cursor @dir, @path = ::File.split @file @lineno = 1 else if (@file = cursor.file) @dir = cursor.dir || (::File.dirname @file) @path = cursor.path || (::File.basename @file) else @dir = cursor.dir || '.' @path = cursor.path || '' end @lineno = cursor.lineno || 1 end @lines = (@source_lines = prepare_lines data, opts).reverse @mark = nil @look_ahead = 0 @process_lines = true @unescape_next_line = false @unterminated = nil @saved = nil end # Public: Check whether there are any lines left to read. # # If a previous call to this method resulted in a value of false, # immediately returned the cached value. Otherwise, delegate to # peek_line to determine if there is a next line available. # # Returns True if there are more lines, False if there are not. def has_more_lines? if @lines.empty? @look_ahead = 0 false else true end end # Public: Check whether this reader is empty (contains no lines) # # Returns true if there are no more lines to peek, otherwise false. def empty? if @lines.empty? @look_ahead = 0 true else false end end alias eof? empty? # Public: Peek at the next line and check if it's empty (i.e., whitespace only) # # This method Does not consume the line from the stack. # # Returns True if the there are no more lines or if the next line is empty def next_line_empty? peek_line.nil_or_empty? end # Public: Peek at the next line of source data. Processes the line if not # already marked as processed, but does not consume it. # # This method will probe the reader for more lines. If there is a next line # that has not previously been visited, the line is passed to the # Reader#process_line method to be initialized. This call gives # sub-classes the opportunity to do preprocessing. If the return value of # the Reader#process_line is nil, the data is assumed to be changed and # Reader#peek_line is invoked again to perform further processing. # # If has_more_lines? is called immediately before peek_line, the direct flag # is implicitly true (since the line is flagged as visited). # # direct - A Boolean flag to bypasses the check for more lines and immediately # returns the first element of the internal @lines Array. (default: false) # # Returns the next line of the source data as a String if there are lines remaining. # Returns nothing if there is no more data. def peek_line direct = false while true next_line = @lines[-1] if direct || @look_ahead > 0 return @unescape_next_line ? (next_line.slice 1, next_line.length) : next_line elsif next_line # FIXME the problem with this approach is that we aren't # retaining the modified line (hence the @unescape_next_line tweak) # perhaps we need a stack of proxied lines if (line = process_line next_line) return line end else @look_ahead = 0 return end end end # Public: Peek at the next multiple lines of source data. Processes the lines if not # already marked as processed, but does not consume them. # # This method delegates to Reader#read_line to process and collect the line, then # restores the lines to the stack before returning them. This allows the lines to # be processed and marked as such so that subsequent reads will not need to process # the lines again. # # num - The positive Integer number of lines to peek or nil to peek all lines (default: nil). # direct - A Boolean indicating whether processing should be disabled when reading lines (default: false). # # Returns A String Array of the next multiple lines of source data, or an empty Array # if there are no more lines in this Reader. def peek_lines num = nil, direct = false old_look_ahead = @look_ahead result = [] (num || MAX_INT).times do if (line = direct ? shift : read_line) result << line else @lineno -= 1 if direct break end end unless result.empty? unshift_all result @look_ahead = old_look_ahead if direct end result end # Public: Get the next line of source data. Consumes the line returned. # # Returns the String of the next line of the source data if data is present. # Returns nothing if there is no more data. def read_line # has_more_lines? triggers preprocessor shift if @look_ahead > 0 || has_more_lines? end # Public: Get the remaining lines of source data. # # This method calls Reader#read_line repeatedly until all lines are consumed # and returns the lines as a String Array. This method differs from # Reader#lines in that it processes each line in turn, hence triggering # any preprocessors implemented in sub-classes. # # Returns the lines read as a String Array def read_lines lines = [] # has_more_lines? triggers preprocessor lines << shift while has_more_lines? lines end alias readlines read_lines # Public: Get the remaining lines of source data joined as a String. # # Delegates to Reader#read_lines, then joins the result. # # Returns the lines read joined as a String def read read_lines.join LF end # Public: Advance to the next line by discarding the line at the front of the stack # # Returns a Boolean indicating whether there was a line to discard. def advance shift ? true : false end # Public: Push the String line onto the beginning of the Array of source data. # # A line pushed on the reader using this method is not processed again. The # method assumes the line was previously retrieved from the reader or does # not otherwise contain preprocessor directives. Therefore, it is marked as # processed immediately. # # line_to_restore - the line to restore onto the stack # # Returns nothing. def unshift_line line_to_restore unshift line_to_restore nil end alias restore_line unshift_line # Public: Push an Array of lines onto the front of the Array of source data. # # Lines pushed on the reader using this method are not processed again. The # method assumes the lines were previously retrieved from the reader or do # not otherwise contain preprocessor directives. Therefore, they are marked # as processed immediately. # # Returns nothing. def unshift_lines lines_to_restore unshift_all lines_to_restore end alias restore_lines unshift_lines # Public: Replace the next line with the specified line. # # Calls Reader#advance to consume the current line, then calls # Reader#unshift to push the replacement onto the top of the # line stack. # # replacement - The String line to put in place of the next line (i.e., the line at the cursor). # # Returns true. def replace_next_line replacement shift unshift replacement true end # deprecated alias replace_line replace_next_line # Public: Skip blank lines at the cursor. # # Examples # # reader.lines # => ["", "", "Foo", "Bar", ""] # reader.skip_blank_lines # => 2 # reader.lines # => ["Foo", "Bar", ""] # # Returns the [Integer] number of lines skipped or nothing if all lines have # been consumed (even if lines were skipped by this method). def skip_blank_lines return if empty? num_skipped = 0 # optimized code for shortest execution path while (next_line = peek_line) if next_line.empty? shift num_skipped += 1 else return num_skipped end end end # Public: Skip consecutive comment lines and block comments. # # Examples # @lines # => ["// foo", "bar"] # # comment_lines = skip_comment_lines # => nil # # @lines # => ["bar"] # # Returns nothing def skip_comment_lines return if empty? while (next_line = peek_line) && !next_line.empty? if next_line.start_with? '//' if next_line.start_with? '///' if (ll = next_line.length) > 3 && next_line == '/' * ll read_lines_until terminator: next_line, skip_first_line: true, read_last_line: true, skip_processing: true, context: :comment else break end else shift end else break end end nil end # Public: Skip consecutive comment lines and return them. # # This method assumes the reader only contains simple lines (no blocks). def skip_line_comments return [] if empty? comment_lines = [] # optimized code for shortest execution path while (next_line = peek_line) && !next_line.empty? if next_line.start_with? '//' comment_lines << shift else break end end comment_lines end # Public: Advance to the end of the reader, consuming all remaining lines # # Returns nothing. def terminate @lineno += @lines.size @lines.clear @look_ahead = 0 nil end # Public: Return all the lines from `@lines` until we (1) run out them, # (2) find a blank line with `break_on_blank_lines: true`, or (3) find # a line for which the given block evals to true. # # options - an optional Hash of processing options: # * :terminator may be used to specify the contents of the line # at which the reader should stop # * :break_on_blank_lines may be used to specify to break on # blank lines # * :break_on_list_continuation may be used to specify to break # on a list continuation line # * :skip_first_line may be used to tell the reader to advance # beyond the first line before beginning the scan # * :preserve_last_line may be used to specify that the String # causing the method to stop processing lines should be # pushed back onto the `lines` Array. # * :read_last_line may be used to specify that the String # causing the method to stop processing lines should be # included in the lines being returned # * :skip_line_comments may be used to look for and skip # line comments # * :skip_processing is used to disable line (pre)processing # for the duration of this method # # Returns the Array of lines forming the next segment. # # Examples # # data = [ # "First line\n", # "Second line\n", # "\n", # "Third line\n", # ] # reader = Reader.new data, nil, normalize: true # # reader.read_lines_until # => ["First line", "Second line"] def read_lines_until options = {} result = [] if @process_lines && options[:skip_processing] @process_lines = false restore_process_lines = true end if (terminator = options[:terminator]) start_cursor = options[:cursor] || cursor break_on_blank_lines = false break_on_list_continuation = false else break_on_blank_lines = options[:break_on_blank_lines] break_on_list_continuation = options[:break_on_list_continuation] end skip_comments = options[:skip_line_comments] line_read = line_restored = nil shift if options[:skip_first_line] while (line = read_line) if terminator ? line == terminator : ((break_on_blank_lines && line.empty?) || (break_on_list_continuation && line_read && line == LIST_CONTINUATION && (options[:preserve_last_line] = true)) || (block_given? && (yield line))) result << line if options[:read_last_line] if options[:preserve_last_line] unshift line line_restored = true end break end unless skip_comments && (line.start_with? '//') && !(line.start_with? '///') result << line line_read = true end end if restore_process_lines @process_lines = true @look_ahead -= 1 if line_restored && !terminator end if terminator && terminator != line && (context = options.fetch :context, terminator) start_cursor = cursor_at_mark if start_cursor == :at_mark logger.warn message_with_context %(unterminated #{context} block), source_location: start_cursor @unterminated = true end result end # Internal: Shift the line off the stack and increment the lineno # # This method can be used directly when you've already called peek_line # and determined that you do, in fact, want to pluck that line off the stack. # Use read_line if the line hasn't (or many not have been) visited yet. # # Returns The String line at the top of the stack def shift @lineno += 1 @look_ahead -= 1 unless @look_ahead == 0 @lines.pop end # Internal: Restore the line to the stack and decrement the lineno def unshift line @lineno -= 1 @look_ahead += 1 @lines.push line nil end if ::RUBY_ENGINE == 'jruby' # Internal: Restore the lines to the stack and decrement the lineno def unshift_all lines_to_restore @lineno -= lines_to_restore.size @look_ahead += lines_to_restore.size if lines_to_restore.respond_to? :reverse @lines.push(*lines_to_restore.reverse) else lines_to_restore.reverse_each {|it| @lines.push it } end nil end else # Internal: Restore the lines to the stack and decrement the lineno def unshift_all lines_to_restore @lineno -= lines_to_restore.size @look_ahead += lines_to_restore.size @lines.push(*lines_to_restore.reverse) nil end end def cursor Cursor.new @file, @dir, @path, @lineno end def cursor_at_line lineno Cursor.new @file, @dir, @path, lineno end def cursor_at_mark @mark ? Cursor.new(*@mark) : cursor end def cursor_before_mark if @mark m_file, m_dir, m_path, m_lineno = @mark Cursor.new m_file, m_dir, m_path, m_lineno - 1 else Cursor.new @file, @dir, @path, @lineno - 1 end end def cursor_at_prev_line Cursor.new @file, @dir, @path, @lineno - 1 end def mark @mark = @file, @dir, @path, @lineno end # Public: Get information about the last line read, including file name and line number. # # Returns A String summary of the last line read def line_info %(#{@path}: line #{@lineno}) end # Public: Get a copy of the remaining Array of String lines managed by this Reader # # Returns A copy of the String Array of lines remaining in this Reader def lines @lines.reverse end # Public: Get a copy of the remaining lines managed by this Reader joined as a String def string @lines.reverse.join LF end # Public: Get the source lines for this Reader joined as a String def source @source_lines.join LF end # Internal: Save the state of the reader at cursor def save @saved = {}.tap do |accum| instance_variables.each do |name| unless name == :@saved || name == :@source_lines accum[name] = ::Array === (val = instance_variable_get name) ? (val.drop 0) : val end end end nil end # Internal: Restore the state of the reader at cursor def restore_save if @saved @saved.each do |name, val| instance_variable_set name, val end @saved = nil end end # Internal: Discard a previous saved state def discard_save @saved = nil end def to_s %(#<#{self.class}@#{object_id} {path: #{@path.inspect}, line: #{@lineno}}>) end private # Internal: Prepare the source data for parsing. # # Converts the source data into an Array of lines ready for parsing. If the +:normalize+ option is set, this method # coerces the encoding of each line to UTF-8 and strips trailing whitespace, including the newline. (This whitespace # cleaning is very important to how Asciidoctor works). Subclasses may choose to perform additional preparation. # # data - A String Array or String of source data to be normalized. # opts - A Hash of options to control how lines are prepared. # :normalize - Enables line normalization, which coerces the encoding to UTF-8 and removes trailing whitespace; # :rstrip removes all trailing whitespace; :chomp removes trailing newline only (optional, not set). # # Returns A String Array of source lines. If the source data is an Array, this method returns a copy. def prepare_lines data, opts = {} if (normalize = opts[:normalize]) ::Array === data ? (Helpers.prepare_source_array data, normalize != :chomp) : (Helpers.prepare_source_string data, normalize != :chomp) elsif ::Array === data data.drop 0 elsif data data.chomp.split LF, -1 else [] end rescue if (::Array === data ? data.join : data.to_s).valid_encoding? raise else raise ::ArgumentError, 'source is either binary or contains invalid Unicode data' end end # Internal: Processes a previously unvisited line # # By default, this method marks the line as processed # by incrementing the look_ahead counter and returns # the line unmodified. # # Returns The String line the Reader should make available to the next # invocation of Reader#read_line or nil if the Reader should drop the line, # advance to the next line and process it. def process_line line @look_ahead += 1 if @process_lines line end end # Public: Methods for retrieving lines from AsciiDoc source files, evaluating preprocessor # directives as each line is read off the Array of lines. class PreprocessorReader < Reader attr_reader :include_stack # Public: Initialize the PreprocessorReader object def initialize document, data = nil, cursor = nil, opts = {} @document = document super data, cursor, opts if (default_include_depth = (document.attributes['max-include-depth'] || 64).to_i) > 0 # track absolute max depth, current max depth for comparing to include stack size, and relative max depth for reporting @maxdepth = { abs: default_include_depth, curr: default_include_depth, rel: default_include_depth } else # if @maxdepth is not set, built-in include functionality is disabled @maxdepth = nil end @include_stack = [] @includes = document.catalog[:includes] @skipping = false @conditional_stack = [] @include_processor_extensions = nil end # (see Reader#has_more_lines?) def has_more_lines? peek_line ? true : false end # (see Reader#empty?) def empty? peek_line ? false : true end alias eof? empty? # Public: Override the Reader#peek_line method to pop the include # stack if the last line has been reached and there's at least # one include on the stack. # # Returns the next line of the source data as a String if there are lines remaining # in the current include context or a parent include context. # Returns nothing if there are no more lines remaining and the include stack is empty. def peek_line direct = false if (line = super) line elsif @include_stack.empty? nil else pop_include peek_line direct end end # Public: Push source onto the front of the reader and switch the context # based on the file, document-relative path and line information given. # # This method is typically used in an IncludeProcessor to add source # read from the target specified. # # Examples # # path = 'partial.adoc' # file = File.expand_path path # data = File.read file # reader.push_include data, file, path # # Returns this Reader object. def push_include data, file = nil, path = nil, lineno = 1, attributes = {} @include_stack << [@lines, @file, @dir, @path, @lineno, @maxdepth, @process_lines] if (@file = file) # NOTE if file is not a string, assume it's a URI if ::String === file @dir = ::File.dirname file elsif RUBY_ENGINE_OPAL @dir = ::URI.parse ::File.dirname(file = file.to_s) else # NOTE this intentionally throws an error if URI has no path (@dir = file.dup).path = (dir = ::File.dirname file.path) == '/' ? '' : dir file = file.to_s end @path = (path ||= ::File.basename file) # only process lines in AsciiDoc files if (@process_lines = file.end_with?(*ASCIIDOC_EXTENSIONS.keys)) # NOTE registering the include with a nil value tracks it while not making it visible to interdocument xrefs @includes[path.slice 0, (path.rindex '.')] ||= attributes['partial-option'] ? nil : true end else @dir = '.' # we don't know what file type we have, so assume AsciiDoc @process_lines = true if (@path = path) # NOTE registering the include with a nil value tracks it while not making it visible to interdocument xrefs @includes[Helpers.rootname path] ||= attributes['partial-option'] ? nil : true else @path = '' end end @lineno = lineno if @maxdepth && (attributes.key? 'depth') if (rel_maxdepth = attributes['depth'].to_i) > 0 if (curr_maxdepth = @include_stack.size + rel_maxdepth) > (abs_maxdepth = @maxdepth[:abs]) # if relative depth exceeds absolute max depth, effectively ignore relative depth request curr_maxdepth = rel_maxdepth = abs_maxdepth end @maxdepth = { abs: abs_maxdepth, curr: curr_maxdepth, rel: rel_maxdepth } else @maxdepth = { abs: @maxdepth[:abs], curr: @include_stack.size, rel: 0 } end end # effectively fill the buffer if (@lines = prepare_lines data, normalize: @process_lines || :chomp, condense: false, indent: attributes['indent']).empty? pop_include else # FIXME we eventually want to handle leveloffset without affecting the lines if attributes.key? 'leveloffset' @lines = [((leveloffset = @document.attr 'leveloffset') ? %(:leveloffset: #{leveloffset}) : ':leveloffset!:'), ''] + @lines.reverse + ['', %(:leveloffset: #{attributes['leveloffset']})] # compensate for these extra lines at the top @lineno -= 2 else @lines.reverse! end # FIXME kind of a hack #Document::AttributeEntry.new('infile', @file).save_to_next_block @document #Document::AttributeEntry.new('indir', @dir).save_to_next_block @document @look_ahead = 0 end self end def include_depth @include_stack.size end # Public: Reports whether pushing an include on the include stack exceeds the max include depth. # # Returns nil if no max depth is set and includes are disabled (max-include-depth=0), false if the current max depth # will not be exceeded, and the relative max include depth if the current max depth will be exceed. def exceeds_max_depth? @maxdepth && @include_stack.size >= @maxdepth[:curr] && @maxdepth[:rel] end alias exceeded_max_depth? exceeds_max_depth? # TODO Document this override # also, we now have the field in the super class, so perhaps # just implement the logic there? def shift if @unescape_next_line @unescape_next_line = false (line = super).slice 1, line.length else super end end def include_processors? if @include_processor_extensions.nil? if @document.extensions? && @document.extensions.include_processors? !!(@include_processor_extensions = @document.extensions.include_processors) else @include_processor_extensions = false end else @include_processor_extensions != false end end def create_include_cursor file, path, lineno if ::String === file dir = ::File.dirname file elsif RUBY_ENGINE_OPAL dir = ::File.dirname(file = file.to_s) else dir = (dir = ::File.dirname file.path) == '' ? '/' : dir file = file.to_s end Cursor.new file, dir, path, lineno end def to_s %(#<#{self.class}@#{object_id} {path: #{@path.inspect}, line: #{@lineno}, include depth: #{@include_stack.size}, include stack: [#{@include_stack.map {|inc| inc.to_s }.join ', '}]}>) end private def prepare_lines data, opts = {} result = super # QUESTION should this work for AsciiDoc table cell content? Currently it does not. if @document && @document.attributes['skip-front-matter'] && (front_matter = skip_front_matter! result) @document.attributes['front-matter'] = front_matter.join LF end if opts.fetch :condense, true result.pop while (last = result[-1]) && last.empty? end Parser.adjust_indentation! result, opts[:indent].to_i, (@document.attr 'tabsize').to_i if opts[:indent] result end def process_line line return line unless @process_lines if line.empty? @look_ahead += 1 return line end # NOTE highly optimized if line.end_with?(']') && !line.start_with?('[') && line.include?('::') if (line.include? 'if') && ConditionalDirectiveRx =~ line # if escaped, mark as processed and return line unescaped if $1 == '\\' @unescape_next_line = true @look_ahead += 1 line.slice 1, line.length elsif preprocess_conditional_directive $2, $3, $4, $5 # move the pointer past the conditional line shift # treat next line as uncharted territory nil else # the line was not a valid conditional line # mark it as visited and return it @look_ahead += 1 line end elsif @skipping shift nil elsif (line.start_with? 'inc', '\\inc') && IncludeDirectiveRx =~ line # if escaped, mark as processed and return line unescaped if $1 == '\\' @unescape_next_line = true @look_ahead += 1 line.slice 1, line.length # QUESTION should we strip whitespace from raw attributes in Substitutors#parse_attributes? (check perf) elsif preprocess_include_directive $2, $3 # peek again since the content has changed nil else # the line was not a valid include line and is unchanged # mark it as visited and return it @look_ahead += 1 line end else # NOTE optimization to inline super @look_ahead += 1 line end elsif @skipping shift nil else # NOTE optimization to inline super @look_ahead += 1 line end end # Internal: Preprocess the directive to conditionally include or exclude content. # # Preprocess the conditional directive (ifdef, ifndef, ifeval, endif) under # the cursor. If Reader is currently skipping content, then simply track the # open and close delimiters of any nested conditional blocks. If Reader is # not skipping, mark whether the condition is satisfied and continue # preprocessing recursively until the next line of available content is # found. # # keyword - The conditional inclusion directive (ifdef, ifndef, ifeval, endif) # target - The target, which is the name of one or more attributes that are # used in the condition (blank in the case of the ifeval directive) # delimiter - The conditional delimiter for multiple attributes ('+' means all # attributes must be defined or undefined, ',' means any of the attributes # can be defined or undefined. # text - The text associated with this directive (occurring between the square brackets) # Used for a single-line conditional block in the case of the ifdef or # ifndef directives, and for the conditional expression for the ifeval directive. # # Returns a Boolean indicating whether the cursor should be advanced def preprocess_conditional_directive keyword, target, delimiter, text # attributes are case insensitive target = target.downcase unless (no_target = target.empty?) if keyword == 'endif' if text logger.error message_with_context %(malformed preprocessor directive - text not permitted: endif::#{target}[#{text}]), source_location: cursor elsif @conditional_stack.empty? logger.error message_with_context %(unmatched preprocessor directive: endif::#{target}[]), source_location: cursor elsif no_target || target == (pair = @conditional_stack[-1])[:target] @conditional_stack.pop @skipping = @conditional_stack.empty? ? false : @conditional_stack[-1][:skipping] else logger.error message_with_context %(mismatched preprocessor directive: endif::#{target}[], expected endif::#{pair[:target]}[]), source_location: cursor end return true elsif @skipping skip = false else # QUESTION any way to wrap ifdef & ifndef logic up together? case keyword when 'ifdef' if no_target logger.error message_with_context %(malformed preprocessor directive - missing target: ifdef::[#{text}]), source_location: cursor return true end case delimiter when ',' # skip if no attribute is defined skip = target.split(',', -1).none? {|name| @document.attributes.key? name } when '+' # skip if any attribute is undefined skip = target.split('+', -1).any? {|name| !@document.attributes.key? name } else # if the attribute is undefined, then skip skip = !@document.attributes.key?(target) end when 'ifndef' if no_target logger.error message_with_context %(malformed preprocessor directive - missing target: ifndef::[#{text}]), source_location: cursor return true end case delimiter when ',' # skip if any attribute is defined skip = target.split(',', -1).any? {|name| @document.attributes.key? name } when '+' # skip if all attributes are defined skip = target.split('+', -1).all? {|name| @document.attributes.key? name } else # if the attribute is defined, then skip skip = @document.attributes.key?(target) end when 'ifeval' if no_target # the text in brackets must match a conditional expression if text && EvalExpressionRx =~ text.strip # NOTE assignments must happen before call to resolve_expr_val for compatibility with Opal lhs = $1 # regex enforces a restricted set of math-related operations (==, !=, <=, >=, <, >) op = $2 rhs = $3 skip = ((resolve_expr_val lhs).send op, (resolve_expr_val rhs)) ? false : true rescue true else logger.error message_with_context %(malformed preprocessor directive - #{text ? 'invalid expression' : 'missing expression'}: ifeval::[#{text}]), source_location: cursor return true end else logger.error message_with_context %(malformed preprocessor directive - target not permitted: ifeval::#{target}[#{text}]), source_location: cursor return true end end end # conditional inclusion block if keyword == 'ifeval' || !text @skipping = true if skip @conditional_stack << { target: target, skip: skip, skipping: @skipping } # single line conditional inclusion else unless @skipping || skip replace_next_line text.rstrip # HACK push dummy line to stand in for the opening conditional directive that's subsequently dropped unshift '' # NOTE force line to be processed again if it looks like an include directive # QUESTION should we just call preprocess_include_directive here? @look_ahead -= 1 if text.start_with? 'include::' end end true end # Internal: Preprocess the directive to include lines from another document. # # Preprocess the directive to include the target document. The scenarios # are as follows: # # If SafeMode is SECURE or greater, the directive is ignore and the include # directive line is emitted verbatim. # # Otherwise, if an include processor is specified pass the target and # attributes to that processor and expect an Array of String lines in return. # # Otherwise, if the max depth is greater than 0, and is not exceeded by the # stack size, normalize the target path and read the lines onto the beginning # of the Array of source data. # # If none of the above apply, emit the include directive line verbatim. # # target - The unsubstituted String name of the target document to include as specified in the # target slot of the include directive. # attrlist - An attribute list String, which is the text between the square brackets of the # include directive. # # Returns a [Boolean] indicating whether the line under the cursor was changed. To skip over the # directive, call shift and return true. def preprocess_include_directive target, attrlist doc = @document if ((expanded_target = target).include? ATTR_REF_HEAD) && (expanded_target = doc.sub_attributes target, attribute_missing: ((attr_missing = doc.attributes['attribute-missing'] || Compliance.attribute_missing) == 'warn' ? 'drop-line' : attr_missing)).empty? if attr_missing == 'drop-line' && (doc.sub_attributes target + ' ', attribute_missing: 'drop-line', drop_line_severity: :ignore).empty? logger.info { message_with_context %(include dropped due to missing attribute: include::#{target}[#{attrlist}]), source_location: cursor } shift true elsif (doc.parse_attributes attrlist, [], sub_input: true)['optional-option'] logger.info { message_with_context %(optional include dropped #{attr_missing == 'warn' && (doc.sub_attributes target + ' ', attribute_missing: 'drop-line', drop_line_severity: :ignore).empty? ? 'due to missing attribute' : 'because resolved target is blank'}: include::#{target}[#{attrlist}]), source_location: cursor } shift true else logger.warn message_with_context %(include dropped #{attr_missing == 'warn' && (doc.sub_attributes target + ' ', attribute_missing: 'drop-line', drop_line_severity: :ignore).empty? ? 'due to missing attribute' : 'because resolved target is blank'}: include::#{target}[#{attrlist}]), source_location: cursor # QUESTION should this line include target or expanded_target (or escaped target?) replace_next_line %(Unresolved directive in #{@path} - include::#{target}[#{attrlist}]) end elsif include_processors? && (ext = @include_processor_extensions.find {|candidate| candidate.instance.handles? expanded_target }) shift # FIXME parse attributes only if requested by extension ext.process_method[doc, self, expanded_target, (doc.parse_attributes attrlist, [], sub_input: true)] true # if running in SafeMode::SECURE or greater, don't process this directive # however, be friendly and at least make it a link to the source document elsif doc.safe >= SafeMode::SECURE # FIXME we don't want to use a link macro if we are in a verbatim context replace_next_line %(link:#{expanded_target}[role=include]) elsif @maxdepth if @include_stack.size >= @maxdepth[:curr] logger.error message_with_context %(maximum include depth of #{@maxdepth[:rel]} exceeded), source_location: cursor return end parsed_attrs = doc.parse_attributes attrlist, [], sub_input: true inc_path, target_type, relpath = resolve_include_path expanded_target, attrlist, parsed_attrs case target_type when :file reader = ::File.method :open read_mode = FILE_READ_MODE when :uri reader = ::OpenURI.method :open_uri read_mode = URI_READ_MODE else # NOTE if target_type is not set, inc_path is a boolean to skip over (false) or reevaluate (true) the current line return inc_path end if (enc = parsed_attrs['encoding']) && (::Encoding.find enc rescue nil) (read_mode_params = read_mode.split ':')[1] = enc read_mode = read_mode_params.join ':' end unless RUBY_ENGINE_OPAL inc_linenos = inc_tags = nil # NOTE attrlist is nil if missing from include directive if attrlist if parsed_attrs.key? 'lines' inc_linenos = [] (split_delimited_value parsed_attrs['lines']).each do |linedef| if linedef.include? '..' from, _, to = linedef.partition '..' inc_linenos += (to.empty? || (to = to.to_i) < 0) ? [from.to_i, ::Float::INFINITY] : (from.to_i..to).to_a else inc_linenos << linedef.to_i end end inc_linenos = inc_linenos.empty? ? nil : inc_linenos.sort.uniq elsif parsed_attrs.key? 'tag' unless (tag = parsed_attrs['tag']).empty? || tag == '!' inc_tags = (tag.start_with? '!') ? { (tag.slice 1, tag.length) => false } : { tag => true } end elsif parsed_attrs.key? 'tags' inc_tags = {} (split_delimited_value parsed_attrs['tags']).each do |tagdef| if tagdef.start_with? '!' inc_tags[tagdef.slice 1, tagdef.length] = false else inc_tags[tagdef] = true end unless tagdef.empty? || tagdef == '!' end inc_tags = nil if inc_tags.empty? end end if inc_linenos inc_lines, inc_offset, inc_lineno = [], nil, 0 begin reader.call inc_path, read_mode do |f| select_remaining = nil f.each_line do |l| inc_lineno += 1 if select_remaining || (::Float === (select = inc_linenos[0]) && (select_remaining = select.infinite?)) # NOTE record line where we started selecting inc_offset ||= inc_lineno inc_lines << l else if select == inc_lineno # NOTE record line where we started selecting inc_offset ||= inc_lineno inc_lines << l inc_linenos.shift end break if inc_linenos.empty? end end end rescue logger.error message_with_context %(include #{target_type} not readable: #{inc_path}), source_location: cursor return replace_next_line %(Unresolved directive in #{@path} - include::#{expanded_target}[#{attrlist}]) end shift # FIXME not accounting for skipped lines in reader line numbering if inc_offset parsed_attrs['partial-option'] = '' push_include inc_lines, inc_path, relpath, inc_offset, parsed_attrs end elsif inc_tags inc_lines, inc_offset, inc_lineno, tag_stack, tags_selected, active_tag = [], nil, 0, [], ::Set.new, nil if inc_tags.key? '**' select = base_select = inc_tags.delete '**' if inc_tags.key? '*' wildcard = inc_tags.delete '*' elsif !select && inc_tags.values.first == false wildcard = true end elsif inc_tags.key? '*' if inc_tags.keys.first == '*' select = base_select = !(wildcard = inc_tags.delete '*') else select = base_select = false wildcard = inc_tags.delete '*' end else select = base_select = !(inc_tags.value? true) end begin reader.call inc_path, read_mode do |f| dbl_co, dbl_sb = '::', '[]' f.each_line do |l| inc_lineno += 1 if (l.include? dbl_co) && (l.include? dbl_sb) && TagDirectiveRx =~ l this_tag = $2 if $1 # end tag if this_tag == active_tag tag_stack.pop active_tag, select = tag_stack.empty? ? [nil, base_select] : tag_stack[-1] elsif inc_tags.key? this_tag include_cursor = create_include_cursor inc_path, expanded_target, inc_lineno if (idx = tag_stack.rindex {|key,| key == this_tag }) idx == 0 ? tag_stack.shift : (tag_stack.delete_at idx) logger.warn message_with_context %(mismatched end tag (expected '#{active_tag}' but found '#{this_tag}') at line #{inc_lineno} of include #{target_type}: #{inc_path}), source_location: cursor, include_location: include_cursor else logger.warn message_with_context %(unexpected end tag '#{this_tag}' at line #{inc_lineno} of include #{target_type}: #{inc_path}), source_location: cursor, include_location: include_cursor end end elsif inc_tags.key? this_tag tags_selected << this_tag if (select = inc_tags[this_tag]) # QUESTION should we prevent tag from being selected when enclosing tag is excluded? tag_stack << [(active_tag = this_tag), select, inc_lineno] elsif !wildcard.nil? select = active_tag && !select ? false : wildcard tag_stack << [(active_tag = this_tag), select, inc_lineno] end elsif select # NOTE record the line where we started selecting inc_offset ||= inc_lineno inc_lines << l end end end rescue logger.error message_with_context %(include #{target_type} not readable: #{inc_path}), source_location: cursor return replace_next_line %(Unresolved directive in #{@path} - include::#{expanded_target}[#{attrlist}]) end unless tag_stack.empty? tag_stack.each do |tag_name, _, tag_lineno| logger.warn message_with_context %(detected unclosed tag '#{tag_name}' starting at line #{tag_lineno} of include #{target_type}: #{inc_path}), source_location: cursor, include_location: (create_include_cursor inc_path, expanded_target, tag_lineno) end end unless (missing_tags = inc_tags.keep_if {|_, v| v }.keys - tags_selected.to_a).empty? logger.warn message_with_context %(tag#{missing_tags.size > 1 ? 's' : ''} '#{missing_tags.join ', '}' not found in include #{target_type}: #{inc_path}), source_location: cursor end shift if inc_offset parsed_attrs['partial-option'] = '' unless base_select && wildcard != false && inc_tags.empty? # FIXME not accounting for skipped lines in reader line numbering push_include inc_lines, inc_path, relpath, inc_offset, parsed_attrs end else inc_content = nil begin # NOTE read content before shift so cursor is only advanced if IO operation succeeds inc_content = reader.call(inc_path, read_mode) {|f| f.read } shift rescue logger.error message_with_context %(include #{target_type} not readable: #{inc_path}), source_location: cursor return replace_next_line %(Unresolved directive in #{@path} - include::#{expanded_target}[#{attrlist}]) end push_include inc_content, inc_path, relpath, 1, parsed_attrs end true end end # Internal: Resolve the target of an include directive. # # An internal method to resolve the target of an include directive. This method must return an # Array containing the resolved (absolute) path of the target, the target type (:file or :uri), # and the path of the target relative to the outermost document. Alternately, the method may # return a boolean to halt processing of the include directive line and to indicate whether the # cursor should be advanced beyond this line (true) or the line should be reprocessed (false). # # This method is overridden in Asciidoctor.js to resolve the target of an include in the browser # environment. # # target - A String containing the unresolved include target. # (Attribute references in target value have already been resolved). # attrlist - An attribute list String (i.e., the text between the square brackets). # attributes - A Hash of attributes parsed from attrlist. # # Returns An Array containing the resolved (absolute) include path, the target type, and the path # relative to the outermost document. May also return a boolean to halt processing of the include. def resolve_include_path target, attrlist, attributes doc = @document if (Helpers.uriish? target) || (::String === @dir ? nil : (target = %(#{@dir}/#{target}))) return replace_next_line %(link:#{target}[role=include]) unless doc.attr? 'allow-uri-read' if doc.attr? 'cache-uri' # caching requires the open-uri-cached gem to be installed # processing will be automatically aborted if these libraries can't be opened Helpers.require_library 'open-uri/cached', 'open-uri-cached' unless defined? ::OpenURI::Cache elsif !RUBY_ENGINE_OPAL # autoload open-uri ::OpenURI end [(::URI.parse target), :uri, target] else # include file is resolved relative to dir of current include, or base_dir if within original docfile inc_path = doc.normalize_system_path target, @dir, nil, target_name: 'include file' unless ::File.file? inc_path if attributes['optional-option'] logger.info { message_with_context %(optional include dropped because include file not found: #{inc_path}), source_location: cursor } shift return true else logger.error message_with_context %(include file not found: #{inc_path}), source_location: cursor return replace_next_line %(Unresolved directive in #{@path} - include::#{target}[#{attrlist}]) end end # NOTE relpath is the path relative to the root document (or base_dir, if set) # QUESTION should we move relative_path method to Document relpath = doc.path_resolver.relative_path inc_path, doc.base_dir [inc_path, :file, relpath] end end def pop_include unless @include_stack.empty? @lines, @file, @dir, @path, @lineno, @maxdepth, @process_lines = @include_stack.pop # FIXME kind of a hack #Document::AttributeEntry.new('infile', @file).save_to_next_block @document #Document::AttributeEntry.new('indir', ::File.dirname(@file)).save_to_next_block @document @look_ahead = 0 nil end end # Private: Split delimited value on comma (if found), otherwise semi-colon def split_delimited_value val (val.include? ',') ? (val.split ',') : (val.split ';') end # Private: Ignore front-matter, commonly used in static site generators def skip_front_matter! data, increment_linenos = true return unless (delim = data[0]) == '---' original_data = data.drop 0 data.shift front_matter = [] @lineno += 1 if increment_linenos until (eof = data.empty?) || data[0] == delim front_matter << data.shift @lineno += 1 if increment_linenos end if eof data.unshift(*original_data) @lineno -= original_data.size if increment_linenos return end data.shift @lineno += 1 if increment_linenos front_matter end # Private: Resolve the value of one side of the expression # # Examples # # expr = '"value"' # resolve_expr_val expr # # => "value" # # expr = '"value' # resolve_expr_val expr # # => "\"value" # # expr = '"{undefined}"' # resolve_expr_val expr # # => "" # # expr = '{undefined}' # resolve_expr_val expr # # => nil # # expr = '2' # resolve_expr_val expr # # => 2 # # @document.attributes['name'] = 'value' # expr = '"{name}"' # resolve_expr_val expr # # => "value" # # Returns The value of the expression, coerced to the appropriate type def resolve_expr_val val if ((val.start_with? '"') && (val.end_with? '"')) || ((val.start_with? '\'') && (val.end_with? '\'')) quoted = true val = val.slice 1, (val.length - 1) else quoted = false end # QUESTION should we substitute first? # QUESTION should we also require string to be single quoted (like block attribute values?) val = @document.sub_attributes val, attribute_missing: 'drop' if val.include? ATTR_REF_HEAD if quoted val elsif val.empty? nil elsif val == 'true' true elsif val == 'false' false elsif val.rstrip.empty? ' ' elsif val.include? '.' val.to_f else # fallback to coercing to integer, since we # require string values to be explicitly quoted val.to_i end end end end asciidoctor-2.0.20/lib/asciidoctor/rouge_ext.rb000066400000000000000000000027161443135032600215040ustar00rootroot00000000000000# frozen_string_literal: true require 'rouge' unless defined? Rouge.version module Asciidoctor; module RougeExt; module Formatters class HTMLTable < ::Rouge::Formatter def initialize delegate, opts @delegate = delegate @start_line = opts[:start_line] || 1 end def stream tokens formatted_code = @delegate.format tokens formatted_code += LF unless formatted_code.end_with? LF, HangingEndSpanTagCs last_lineno = (first_lineno = @start_line) + (formatted_code.count LF) - 1 # assume number of newlines is constant lineno_format = %(%#{(::Math.log10 last_lineno).floor + 1}i) formatted_linenos = ((first_lineno..last_lineno).map {|lineno| sprintf lineno_format, lineno } << '').join LF yield %(
    #{formatted_linenos}
    #{formatted_code}
    ) end end class HTMLLineHighlighter < ::Rouge::Formatter def initialize delegate, opts @delegate = delegate @lines = opts[:lines] || [] end def stream tokens lineno = 0 token_lines tokens do |tokens_in_line| yield (@lines.include? lineno += 1) ? %(#{@delegate.format tokens_in_line}#{LF}) : %(#{@delegate.format tokens_in_line}#{LF}) end end end LF = ?\n HangingEndSpanTagCs = %(#{LF}) private_constant :HangingEndSpanTagCs, :LF end; end; end asciidoctor-2.0.20/lib/asciidoctor/rx.rb000066400000000000000000000526471443135032600201440ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # A collection of regular expression constants used by the parser. (For speed, these are not defined in the Rx module, # but rather directly in the Asciidoctor module). # # NOTE The following pattern, which appears frequently, captures the contents between square brackets, ignoring # escaped closing brackets (closing brackets prefixed with a backslash '\' character) # # Pattern: \[(|#{CC_ALL}*?[^\\])\] # Matches: [enclosed text] and [enclosed [text\]], not [enclosed text \\] or [\\] (as these require a trailing space) module Rx; end ## Document header # Matches the author info line immediately following the document title. # # Examples # # Doc Writer # Mary_Sue Brontë # AuthorInfoLineRx = /^(#{CG_WORD}[#{CC_WORD}\-'.]*)(?: +(#{CG_WORD}[#{CC_WORD}\-'.]*))?(?: +(#{CG_WORD}[#{CC_WORD}\-'.]*))?(?: +<([^>]+)>)?$/ # Matches the delimiter that separates multiple authors. # # Examples # # Doc Writer; Junior Writer # AuthorDelimiterRx = /;(?: |$)/ # Matches the revision info line, which appears immediately following # the author info line beneath the document title. # # Examples # # v1.0 # 2013-01-01 # v1.0, 2013-01-01: Ring in the new year release # 1.0, Jan 01, 2013 # RevisionInfoLineRx = /^(?:[^\d{]*(#{CC_ANY}*?),)? *(?!:)(#{CC_ANY}*?)(?: *(?!^),?: *(#{CC_ANY}*))?$/ # Matches the title and volnum in the manpage doctype. # # Examples # # = asciidoctor(1) # = asciidoctor ( 1 ) # ManpageTitleVolnumRx = /^(#{CC_ANY}+?) *\( *(#{CC_ANY}+?) *\)$/ # Matches the name and purpose in the manpage doctype. # # Examples # # asciidoctor - converts AsciiDoc source files to HTML, DocBook and other formats # ManpageNamePurposeRx = /^(#{CC_ANY}+?) +- +(#{CC_ANY}+)$/ ## Preprocessor directives # Matches a conditional preprocessor directive (e.g., ifdef, ifndef, ifeval and endif). # # Examples # # ifdef::basebackend-html[] # ifndef::theme[] # ifeval::["{asciidoctor-version}" >= "0.1.0"] # ifdef::asciidoctor[Asciidoctor!] # endif::theme[] # endif::basebackend-html[] # endif::[] # ConditionalDirectiveRx = /^(\\)?(ifdef|ifndef|ifeval|endif)::(\S*?(?:([,+])\S*?)?)\[(#{CC_ANY}+)?\]$/ # Matches a restricted (read as safe) eval expression. # # Examples # # "{asciidoctor-version}" >= "0.1.0" # EvalExpressionRx = /^(#{CC_ANY}+?) *([=!><]=|[><]) *(#{CC_ANY}+)$/ # Matches an include preprocessor directive. # # Examples # # include::chapter1.ad[] # include::example.txt[lines=1;2;5..10] # IncludeDirectiveRx = /^(\\)?include::([^\s\[](?:[^\[]*[^\s\[])?)\[(#{CC_ANY}+)?\]$/ # Matches a trailing tag directive in an include file. # # Examples # # // tag::try-catch[] # try { # someMethod(); # catch (Exception e) { # log(e); # } # // end::try-catch[] # NOTE m flag is required for Asciidoctor.js TagDirectiveRx = /\b(?:tag|(e)nd)::(\S+?)\[\](?=$|[ \r])/m ## Attribute entries and references # Matches a document attribute entry. # # Examples # # :foo: bar # :First Name: Dan # :sectnums!: # :!toc: # :long-entry: Attribute value lines ending in ' \' \ # are joined together as a single value, \ # collapsing the line breaks and indentation to \ # a single space. # AttributeEntryRx = /^:(!?#{CG_WORD}[^:]*):(?:[ \t]+(#{CC_ANY}*))?$/ # Matches invalid characters in an attribute name. InvalidAttributeNameCharsRx = /[^#{CC_WORD}-]/ # Matches a pass inline macro that surrounds the value of an attribute # entry once it has been parsed. # # Examples # # pass:[text] # pass:a[{a} {b} {c}] # if RUBY_ENGINE == 'opal' # NOTE In JavaScript, ^ and $ match the boundaries of the string when the m flag is not set AttributeEntryPassMacroRx = /^pass:([a-z]+(?:,[a-z-]+)*)?\[(#{CC_ALL}*)\]$/ else AttributeEntryPassMacroRx = /\Apass:([a-z]+(?:,[a-z-]+)*)?\[(.*)\]\Z/m end # Matches an inline attribute reference. # # Examples # # {foobar} or {app_name} or {product-version} # {counter:sequence-name:1} # {set:foo:bar} # {set:name!} # AttributeReferenceRx = /(\\)?\{(#{CG_WORD}[#{CC_WORD}-]*|(set|counter2?):#{CC_ANY}+?)(\\)?\}/ ## Paragraphs and delimited blocks # Matches an anchor (i.e., id + optional reference text) on a line above a block. # # Examples # # [[idname]] # [[idname,Reference Text]] # BlockAnchorRx = /^\[\[(?:|([#{CC_ALPHA}_:][#{CC_WORD}\-:.]*)(?:, *(#{CC_ANY}+))?)\]\]$/ # Matches an attribute list above a block element. # # Examples # # # strictly positional # [quote, Adam Smith, Wealth of Nations] # # # name/value pairs # [NOTE, caption="Good to know"] # # # as attribute reference # [{lead}] # BlockAttributeListRx = /^\[(|[#{CC_WORD}.#%{,"']#{CC_ANY}*)\]$/ # A combined pattern that matches either a block anchor or a block attribute list. # # TODO this one gets hit a lot, should be optimized as much as possible BlockAttributeLineRx = /^\[(?:|[#{CC_WORD}.#%{,"']#{CC_ANY}*|\[(?:|[#{CC_ALPHA}_:][#{CC_WORD}\-:.]*(?:, *#{CC_ANY}+)?)\])\]$/ # Matches a title above a block. # # Examples # # .Title goes here # BlockTitleRx = /^\.(\.?[^ \t.]#{CC_ANY}*)$/ # Matches an admonition label at the start of a paragraph. # # Examples # # NOTE: Just a little note. # TIP: Don't forget! # AdmonitionParagraphRx = /^(#{ADMONITION_STYLES.to_a.join '|'}):[ \t]+/ # Matches a literal paragraph, which is a line of text preceded by at least one space. # # Examples # # Foo # Foo LiteralParagraphRx = /^([ \t]+#{CC_ANY}*)$/ # Matches a comment block. # # Examples # # //// # This is a block comment. # It can span one or more lines. # //// #CommentBlockRx = %r(^/{4,}$) # Matches a comment line. # # Examples # # // note to author # #CommentLineRx = %r(^//(?=[^/]|$)) ## Section titles # Matches an Atx (single-line) section title. # # Examples # # == Foo # // ^ a level 1 (h2) section title # # == Foo == # // ^ also a level 1 (h2) section title # AtxSectionTitleRx = /^(=={0,5})[ \t]+(#{CC_ANY}+?)(?:[ \t]+\1)?$/ # Matches an extended Atx section title that includes support for the Markdown variant. ExtAtxSectionTitleRx = /^(=={0,5}|#\#{0,5})[ \t]+(#{CC_ANY}+?)(?:[ \t]+\1)?$/ # Matches the title only (first line) of an Setext (two-line) section title. # The title cannot begin with a dot and must have at least one alphanumeric character. SetextSectionTitleRx = /^((?!\.)#{CC_ANY}*?#{CG_ALNUM}#{CC_ANY}*)$/ # Matches an anchor (i.e., id + optional reference text) inside a section title. # # Examples # # Section Title [[idname]] # Section Title [[idname,Reference Text]] # InlineSectionAnchorRx = / (\\)?\[\[([#{CC_ALPHA}_:][#{CC_WORD}\-:.]*)(?:, *(#{CC_ANY}+))?\]\]$/ # Matches invalid ID characters in a section title. # # NOTE uppercase chars not included since expression is only run on a lowercase string InvalidSectionIdCharsRx = /<[^>]+>|&(?:[a-z][a-z]+\d{0,2}|#\d\d\d{0,4}|#x[\da-f][\da-f][\da-f]{0,3});|[^ #{CC_WORD}\-.]+?/ # Matches an explicit section level style like sect1 # SectionLevelStyleRx = /^sect\d$/ ## Lists # Detects the start of any list item. # # NOTE we only have to check as far as the blank character because we know it means non-whitespace follows. # IMPORTANT if this regexp does not agree with the regexp for each list type, the parser will hang. AnyListRx = %r(^(?:[ \t]*(?:-|\*\**|\.\.*|\u2022|\d+\.|[a-zA-Z]\.|[IVXivx]+\))[ \t]|(?!//[^/])[ \t]*[^ \t]#{CC_ANY}*?(?::::{0,2}|;;)(?:$|[ \t])|<(?:\d+|\.)>[ \t])) # Matches an unordered list item (one level for hyphens, up to 5 levels for asterisks). # # Examples # # * Foo # - Foo # # NOTE we know trailing (.*) will match at least one character because we strip trailing spaces UnorderedListRx = /^[ \t]*(-|\*\**|\u2022)[ \t]+(#{CC_ANY}*)$/ # Matches an ordered list item (explicit numbering or up to 5 consecutive dots). # # Examples # # . Foo # .. Foo # 1. Foo (arabic, default) # a. Foo (loweralpha) # A. Foo (upperalpha) # i. Foo (lowerroman) # I. Foo (upperroman) # # NOTE leading space match is not always necessary, but is used for list reader # NOTE we know trailing (.*) will match at least one character because we strip trailing spaces OrderedListRx = /^[ \t]*(\.\.*|\d+\.|[a-zA-Z]\.|[IVXivx]+\))[ \t]+(#{CC_ANY}*)$/ # Matches the ordinals for each type of ordered list. OrderedListMarkerRxMap = { arabic: /\d+\./, loweralpha: /[a-z]\./, lowerroman: /[ivx]+\)/, upperalpha: /[A-Z]\./, upperroman: /[IVX]+\)/, #lowergreek: /[a-z]\]/, } # Matches a description list entry. # # Examples # # foo:: # bar::: # baz:::: # blah;; # # # the term may be followed by a description on the same line... # # foo:: The metasyntactic variable that commonly accompanies 'bar' (see also, <>). # # # ...or on a separate line, which may optionally be indented # # foo:: # The metasyntactic variable that commonly accompanies 'bar' (see also, <>). # # # attribute references may be used in both the term and the description # # {foo-term}:: {foo-desc} # # NOTE we know trailing (.*) will match at least one character because we strip trailing spaces # NOTE must skip line comment when looking for next list item inside list DescriptionListRx = %r(^(?!//[^/])[ \t]*([^ \t]#{CC_ANY}*?)(:::{0,2}|;;)(?:$|[ \t]+(#{CC_ANY}*)$)) # Matches a sibling description list item (excluding the delimiter specified by the key). # NOTE must skip line comment when looking for sibling list item DescriptionListSiblingRx = { '::' => %r(^(?!//[^/])[ \t]*([^ \t]#{CC_ANY}*?[^:]|[^ \t:])(::)(?:$|[ \t]+(#{CC_ANY}*)$)), ':::' => %r(^(?!//[^/])[ \t]*([^ \t]#{CC_ANY}*?[^:]|[^ \t:])(:::)(?:$|[ \t]+(#{CC_ANY}*)$)), '::::' => %r(^(?!//[^/])[ \t]*([^ \t]#{CC_ANY}*?[^:]|[^ \t:])(::::)(?:$|[ \t]+(#{CC_ANY}*)$)), ';;' => %r(^(?!//[^/])[ \t]*([^ \t]#{CC_ANY}*?)(;;)(?:$|[ \t]+(#{CC_ANY}*)$)) } # Matches a callout list item. # # Examples # # <1> Explanation # # or # # <.> Explanation with automatic number # # NOTE we know trailing (.*) will match at least one character because we strip trailing spaces CalloutListRx = /^<(\d+|\.)>[ \t]+(#{CC_ANY}*)$/ # Matches a callout reference inside literal text. # # Examples # <1> (optionally prefixed by //, #, -- or ;; line comment chars) # <1> <2> (multiple callouts on one line) # (for XML-based languages) # <.> (auto-numbered) # # NOTE extract regexps are applied line-by-line, so we can use $ as end-of-line char CalloutExtractRx = %r(((?://|#|--|;;) ?)?(\\)?(?=(?: ?\\?)*$)) CalloutExtractRxt = '(\\\\)?<()(\\d+|\\.)>(?=(?: ?\\\\?<(?:\\d+|\\.)>)*$)' CalloutExtractRxMap = ::Hash.new {|h, k| h[k] = /(#{k.empty? ? '' : "#{::Regexp.escape k} ?"})?#{CalloutExtractRxt}/ } # NOTE special characters have not been replaced when scanning CalloutScanRx = /\\?(?=(?: ?\\?)*#{CC_EOL})/ # NOTE special characters have already been replaced when converting to an SGML format CalloutSourceRx = %r(((?://|#|--|;;) ?)?(\\)?<!?(|--)(\d+|\.)\3>(?=(?: ?\\?<!?\3(?:\d+|\.)\3>)*#{CC_EOL})) CalloutSourceRxt = "(\\\\)?<()(\\d+|\\.)>(?=(?: ?\\\\?<(?:\\d+|\\.)>)*#{CC_EOL})" CalloutSourceRxMap = ::Hash.new {|h, k| h[k] = /(#{k.empty? ? '' : "#{::Regexp.escape k} ?"})?#{CalloutSourceRxt}/ } # A Hash of regexps for lists used for dynamic access. ListRxMap = { ulist: UnorderedListRx, olist: OrderedListRx, dlist: DescriptionListRx, colist: CalloutListRx } ## Tables # Parses the column spec (i.e., colspec) for a table. # # Examples # # 1*h,2*,^3e # ColumnSpecRx = /^(?:(\d+)\*)?([<^>](?:\.[<^>]?)?|(?:[<^>]?\.)?[<^>])?(\d+%?|~)?([a-z])?$/ # Parses the start and end of a cell spec (i.e., cellspec) for a table. # # Examples # # 2.3+<.>m # # FIXME use step-wise scan (or treetop) rather than this mega-regexp CellSpecStartRx = /^[ \t]*(?:(\d+(?:\.\d*)?|(?:\d*\.)?\d+)([*+]))?([<^>](?:\.[<^>]?)?|(?:[<^>]?\.)?[<^>])?([a-z])?$/ CellSpecEndRx = /[ \t]+(?:(\d+(?:\.\d*)?|(?:\d*\.)?\d+)([*+]))?([<^>](?:\.[<^>]?)?|(?:[<^>]?\.)?[<^>])?([a-z])?$/ # Block macros # Matches the custom block macro pattern. # # Examples # # gist::123456[] # #-- # NOTE we've relaxed the match for target to accommodate the short format (e.g., name::[attrlist]) CustomBlockMacroRx = /^(#{CG_WORD}[#{CC_WORD}-]*)::(|\S|\S#{CC_ANY}*?\S)\[(#{CC_ANY}+)?\]$/ # Matches an image, video or audio block macro. # # Examples # # image::filename.png[Caption] # video::http://youtube.com/12345[Cats vs Dogs] # BlockMediaMacroRx = /^(image|video|audio)::(\S|\S#{CC_ANY}*?\S)\[(#{CC_ANY}+)?\]$/ # Matches the TOC block macro. # # Examples # # toc::[] # toc::[levels=2] # BlockTocMacroRx = /^toc::\[(#{CC_ANY}+)?\]$/ ## Inline macros # Matches an anchor (i.e., id + optional reference text) in the flow of text. # # Examples # # [[idname]] # [[idname,Reference Text]] # anchor:idname[] # anchor:idname[Reference Text] # InlineAnchorRx = /(\\)?(?:\[\[([#{CC_ALPHA}_:][#{CC_WORD}\-:.]*)(?:, *(#{CC_ANY}+?))?\]\]|anchor:([#{CC_ALPHA}_:][#{CC_WORD}\-:.]*)\[(?:\]|(#{CC_ANY}*?[^\\])\]))/ # Scans for a non-escaped anchor (i.e., id + optional reference text) in the flow of text. InlineAnchorScanRx = /(?:^|[^\\\[])\[\[([#{CC_ALPHA}_:][#{CC_WORD}\-:.]*)(?:, *(#{CC_ANY}+?))?\]\]|(?:^|[^\\])anchor:([#{CC_ALPHA}_:][#{CC_WORD}\-:.]*)\[(?:\]|(#{CC_ANY}*?[^\\])\])/ # Scans for a leading, non-escaped anchor (i.e., id + optional reference text). LeadingInlineAnchorRx = /^\[\[([#{CC_ALPHA}_:][#{CC_WORD}\-:.]*)(?:, *(#{CC_ANY}+?))?\]\]/ # Matches a bibliography anchor at the start of the list item text (in a bibliography list). # # Examples # # [[[Fowler_1997]]] Fowler M. ... # InlineBiblioAnchorRx = /^\[\[\[([#{CC_ALPHA}_:][#{CC_WORD}\-:.]*)(?:, *(#{CC_ANY}+?))?\]\]\]/ # Matches an inline e-mail address. # # doc.writer@example.com # InlineEmailRx = %r(([\\>:/])?#{CG_WORD}(?:&|[#{CC_WORD}\-.%+])*@#{CG_ALNUM}[#{CC_ALNUM}_\-.]*\.[a-zA-Z]{2,5}\b) # Matches an inline footnote macro, which is allowed to span multiple lines. # # Examples # footnote:[text] (not referenceable) # footnote:id[text] (referenceable) # footnote:id[] (reference) # footnoteref:[id,text] (legacy) # footnoteref:[id] (legacy) # InlineFootnoteMacroRx = %r(\\?footnote(?:(ref):|:([#{CC_WORD}-]+)?)\[(?:|(#{CC_ALL}*?[^\\]))\](?!))m # Matches an image or icon inline macro. # # Examples # # image:filename.png[Alt Text] # image:http://example.com/images/filename.png[Alt Text] # image:filename.png[More [Alt\] Text] (alt text becomes "More [Alt] Text") # icon:github[large] # # NOTE be as non-greedy as possible by not allowing newline or left square bracket in target InlineImageMacroRx = /\\?i(?:mage|con):([^:\s\[](?:[^\n\[]*[^\s\[])?)\[(|#{CC_ALL}*?[^\\])\]/m # Matches an indexterm inline macro, which may span multiple lines. # # Examples # # indexterm:[Tigers,Big cats] # (((Tigers,Big cats))) # indexterm2:[Tigers] # ((Tigers)) # InlineIndextermMacroRx = /\\?(?:(indexterm2?):\[(#{CC_ALL}*?[^\\])\]|\(\((#{CC_ALL}+?)\)\)(?!\)))/m # Matches either the kbd or btn inline macro. # # Examples # # kbd:[F3] # kbd:[Ctrl+Shift+T] # kbd:[Ctrl+\]] # kbd:[Ctrl,T] # btn:[Save] # InlineKbdBtnMacroRx = /(\\)?(kbd|btn):\[(#{CC_ALL}*?[^\\])\]/m # Matches an implicit link and some of the link inline macro. # # Examples # # https://github.com # https://github.com[GitHub] # # link:https://github.com[] # "https://github.com[]" # (https://github.com) <= parenthesis not included in autolink # InlineLinkRx = %r((^|link:|#{CG_BLANK}|<|[>\(\)\[\];"'])(\\?(?:https?|file|ftp|irc)://)(?:([^\s\[\]]+)\[(|#{CC_ALL}*?[^\\])\]|([^\s\[\]<]*([^\s,.?!\[\]<\)]))))m # Match a link or e-mail inline macro. # # Examples # # link:path[label] # mailto:doc.writer@example.com[] # # NOTE be as non-greedy as possible by not allowing space or left square bracket in target InlineLinkMacroRx = /\\?(?:link|(mailto)):(|[^:\s\[][^\s\[]*)\[(|#{CC_ALL}*?[^\\])\]/m # Matches the name of a macro. # MacroNameRx = /^#{CG_WORD}[#{CC_WORD}-]*$/ # Matches a stem (and alternatives, asciimath and latexmath) inline macro, which may span multiple lines. # # Examples # # stem:[x != 0] # asciimath:[x != 0] # latexmath:[\sqrt{4} = 2] # InlineStemMacroRx = /\\?(stem|(?:latex|ascii)math):([a-z]+(?:,[a-z-]+)*)?\[(#{CC_ALL}*?[^\\])\]/m # Matches a menu inline macro. # # Examples # # menu:File[Save As...] # menu:Edit[] # menu:View[Page Style > No Style] # menu:View[Page Style, No Style] # InlineMenuMacroRx = /\\?menu:(#{CG_WORD}|[#{CC_WORD}&][^\n\[]*[^\s\[])\[ *(?:|(#{CC_ALL}*?[^\\]))\]/m # Matches an implicit menu inline macro. # # Examples # # "File > New..." # InlineMenuRx = /\\?"([#{CC_WORD}&][^"]*?[ \n]+>[ \n]+[^"]*)"/ # Matches an inline passthrough, which may span multiple lines. # # Examples # # +text+ # [x-]+text+ # [x-]`text` # `text` (compat only) # [role]`text` (compat only) # # NOTE we always capture the attributes so we know when to use compatible (i.e., legacy) behavior InlinePassRx = { false => ['+', '-]', /((?:^|[^#{CC_WORD};:\\])(?=(\[)|\+)|\\(?=\[)|(?=\\\+))(?:\2(x-|[^\]]+ x-)\]|(?:\[([^\]]+)\])?(?=(\\)?\+))(\5?(\+|`)(\S|\S#{CC_ALL}*?\S)\7)(?!#{CG_WORD})/m], true => ['`', nil, /(^|[^`#{CC_WORD}])(?:(\Z)()|\[([^\]]+)\](?=(\\))?)?(\5?(`)([^`\s]|[^`\s]#{CC_ALL}*?\S)\7)(?![`#{CC_WORD}])/m], } # Matches several variants of the passthrough inline macro, which may span multiple lines. # # Examples # # +++text+++ # $$text$$ # pass:quotes[text] # # NOTE we have to support an empty pass:[] for compatibility with AsciiDoc.py InlinePassMacroRx = /(?:(?:(\\?)\[([^\]]+)\])?(\\{0,2})(\+\+\+?|\$\$)(#{CC_ALL}*?)\4|(\\?)pass:([a-z]+(?:,[a-z-]+)*)?\[(|#{CC_ALL}*?[^\\])\])/m # Matches an xref (i.e., cross-reference) inline macro, which may span multiple lines. # # Examples # # <> # xref:id[reftext] # # NOTE special characters have already been escaped, hence the entity references # NOTE { is included in start characters to support target that begins with attribute reference in title content InlineXrefMacroRx = %r(\\?(?:<<([#{CC_WORD}#/.:{]#{CC_ALL}*?)>>|xref:([#{CC_WORD}#/.:{]#{CC_ALL}*?)\[(?:\]|(#{CC_ALL}*?[^\\])\])))m ## Layout # Matches a trailing + preceded by at least one space character, # which forces a hard line break (
    tag in HTML output). # # NOTE AsciiDoc.py allows + to be preceded by TAB; Asciidoctor does not # # Examples # # Humpty Dumpty sat on a wall, + # Humpty Dumpty had a great fall. # if RUBY_ENGINE == 'opal' # NOTE In JavaScript, ^ and $ only match the start and end of line if the multiline flag is present HardLineBreakRx = /^(#{CC_ANY}*) \+$/m else # NOTE In Ruby, ^ and $ always match start and end of line HardLineBreakRx = /^(.*) \+$/ end # Matches a Markdown horizontal rule. # # Examples # # --- or - - - # *** or * * * # ___ or _ _ _ # MarkdownThematicBreakRx = /^ {0,3}([-*_])( *)\1\2\1$/ # Matches an AsciiDoc or Markdown horizontal rule or AsciiDoc page break. # # Examples # # ''' (horizontal rule) # <<< (page break) # --- or - - - (horizontal rule, Markdown) # *** or * * * (horizontal rule, Markdown) # ___ or _ _ _ (horizontal rule, Markdown) # ExtLayoutBreakRx = /^(?:'{3,}|<{3,}|([-*_])( *)\1\2\1)$/ ## General # Matches consecutive blank lines. # # Examples # # one # # two # BlankLineRx = /\n{2,}/ # Matches a comma or semi-colon delimiter. # # Examples # # one,two # three;four # #DataDelimiterRx = /[,;]/ # Matches whitespace (space, tab, newline) escaped by a backslash. # # Examples # # three\ blind\ mice # EscapedSpaceRx = /\\([ \t\n])/ # Detects if text is a possible candidate for the replacements substitution. # ReplaceableTextRx = /[&']|--|\.\.\.|\([CRT]M?\)/ # Matches a whitespace delimiter, a sequence of spaces, tabs, and/or newlines. # Matches the parsing rules of %w strings in Ruby. # # Examples # # one two three four # five six # # TODO change to /(?]+>/ end asciidoctor-2.0.20/lib/asciidoctor/section.rb000066400000000000000000000165601443135032600211510ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # Public: Methods for managing sections of AsciiDoc content in a document. # The section responds as an Array of content blocks by delegating # block-related methods to its @blocks Array. # # Examples # # section = Asciidoctor::Section.new # section.title = 'Section 1' # section.id = 'sect1' # # section.size # => 0 # # section.id # => "sect1" # # section << new_block # section.size # => 1 class Section < AbstractBlock # Public: Get/Set the 0-based index order of this section within the parent block attr_accessor :index # Public: Get/Set the section name of this section attr_accessor :sectname # Public: Get/Set the flag to indicate whether this is a special section or a child of one attr_accessor :special # Public: Get/Set the flag to indicate whether this section should be numbered. # The sectnum method should only be called if this flag is true. attr_accessor :numbered # Public: Get the caption for this section (only relevant for appendices) attr_reader :caption # Public: Initialize an Asciidoctor::Section object. # # parent - The parent AbstractBlock. If set, must be a Document or Section object (default: nil) # level - The Integer level of this section (default: 1 more than parent level or 1 if parent not defined) # numbered - A Boolean indicating whether numbering is enabled for this Section (default: false) # opts - An optional Hash of options (default: {}) def initialize parent = nil, level = nil, numbered = false, opts = {} super parent, :section, opts if Section === parent @level, @special = level || (parent.level + 1), parent.special else @level, @special = level || 1, false end @numbered = numbered @index = 0 end # Public: The name of this section, an alias of the section title alias name title # Public: Generate a String ID from the title of this section. # # See Section.generate_id for details. def generate_id Section.generate_id title, @document end # Public: Check whether this Section has any child Section objects. # # Returns A [Boolean] to indicate whether this Section has child Section objects def sections? @next_section_index > 0 end # Public: Get the section number for the current Section # # The section number is a dot-separated String that uniquely describes the position of this # Section in the document. Each entry represents a level of nesting. The value of each entry is # the 1-based outline number of the Section amongst its numbered sibling Sections. # # This method assumes that both the @level and @parent instance variables have been assigned. # The method also assumes that the value of @parent is either a Document or Section. # # delimiter - the delimiter to separate the number for each level # append - the String to append at the end of the section number # or Boolean to indicate the delimiter should not be # appended to the final level # (default: nil) # # Examples # # sect1 = Section.new(document) # sect1.level = 1 # sect1_1 = Section.new(sect1) # sect1_1.level = 2 # sect1_2 = Section.new(sect1) # sect1_2.level = 2 # sect1 << sect1_1 # sect1 << sect1_2 # sect1_1_1 = Section.new(sect1_1) # sect1_1_1.level = 3 # sect1_1 << sect1_1_1 # # sect1.sectnum # # => 1. # # sect1_1.sectnum # # => 1.1. # # sect1_2.sectnum # # => 1.2. # # sect1_1_1.sectnum # # => 1.1.1. # # sect1_1_1.sectnum(',', false) # # => 1,1,1 # # Returns the section number as a String def sectnum(delimiter = '.', append = nil) append ||= (append == false ? '' : delimiter) @level > 1 && Section === @parent ? %(#{@parent.sectnum(delimiter, delimiter)}#{@numeral}#{append}) : %(#{@numeral}#{append}) end # (see AbstractBlock#xreftext) def xreftext xrefstyle = nil if (val = reftext) && !val.empty? val elsif xrefstyle if @numbered case xrefstyle when 'full' if (type = @sectname) == 'chapter' || type == 'appendix' quoted_title = sub_placeholder (sub_quotes '_%s_'), title else quoted_title = sub_placeholder (sub_quotes @document.compat_mode ? %q(``%s'') : '"`%s`"'), title end if (signifier = @document.attributes[%(#{type}-refsig)]) %(#{signifier} #{sectnum '.', ','} #{quoted_title}) else %(#{sectnum '.', ','} #{quoted_title}) end when 'short' if (signifier = @document.attributes[%(#{@sectname}-refsig)]) %(#{signifier} #{sectnum '.', ''}) else sectnum '.', '' end else # 'basic' (type = @sectname) == 'chapter' || type == 'appendix' ? (sub_placeholder (sub_quotes '_%s_'), title) : title end else # apply basic styling (type = @sectname) == 'chapter' || type == 'appendix' ? (sub_placeholder (sub_quotes '_%s_'), title) : title end else title end end # Public: Append a content block to this block's list of blocks. # # If the child block is a Section, assign an index to it. # # block - The child Block to append to this parent Block # # Returns The parent Block def << block assign_numeral block if block.context == :section super end def to_s if @title formal_title = @numbered ? %(#{sectnum} #{@title}) : @title %(#<#{self.class}@#{object_id} {level: #{@level}, title: #{formal_title.inspect}, blocks: #{@blocks.size}}>) else super end end # Public: Generate a String ID from the given section title. # # The generated ID is prefixed with value of the 'idprefix' attribute, which # is an underscore (_) by default. Invalid characters are then removed and # spaces are replaced with the value of the 'idseparator' attribute, which is # an underscore (_) by default. # # If the generated ID is already in use in the document, a count is appended, # offset by the separator, until a unique ID is found. # # Section ID generation can be disabled by unsetting the 'sectids' document attribute. # # Examples # # Section.generate_id 'Foo', document # => "_foo" # # Returns the generated [String] ID. def self.generate_id title, document attrs = document.attributes pre = attrs['idprefix'] || '_' if (sep = attrs['idseparator']) if sep.length == 1 || (!(no_sep = sep.empty?) && (sep = attrs['idseparator'] = sep.chr)) sep_sub = sep == '-' || sep == '.' ? ' .-' : %( #{sep}.-) end else sep, sep_sub = '_', ' _.-' end gen_id = %(#{pre}#{title.downcase.gsub InvalidSectionIdCharsRx, ''}) if no_sep gen_id = gen_id.delete ' ' else # replace space with separator and remove repeating and trailing separator characters gen_id = gen_id.tr_s sep_sub, sep gen_id = gen_id.chop if gen_id.end_with? sep # ensure id doesn't begin with idseparator if idprefix is empty (assuming idseparator is not empty) gen_id = gen_id.slice 1, gen_id.length if pre.empty? && (gen_id.start_with? sep) end if document.catalog[:refs].key? gen_id ids = document.catalog[:refs] cnt = Compliance.unique_id_start_index cnt += 1 while ids[candidate_id = %(#{gen_id}#{sep}#{cnt})] candidate_id else gen_id end end end end asciidoctor-2.0.20/lib/asciidoctor/stylesheets.rb000066400000000000000000000052061443135032600220540ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # A utility class for working with the built-in stylesheets. #-- # QUESTION create methods for link_*_stylesheet? # QUESTION create method for user stylesheet? class Stylesheets DEFAULT_STYLESHEET_NAME = 'asciidoctor.css' STYLESHEETS_DIR = ::File.join DATA_DIR, 'stylesheets' @__instance__ = new def self.instance @__instance__ end def primary_stylesheet_name DEFAULT_STYLESHEET_NAME end # Public: Read the contents of the default Asciidoctor stylesheet # # returns the [String] Asciidoctor stylesheet data def primary_stylesheet_data @primary_stylesheet_data ||= (::File.read (::File.join STYLESHEETS_DIR, 'asciidoctor-default.css'), mode: FILE_READ_MODE).rstrip end # Deprecated: Generate code to embed the primary stylesheet # # Returns the [String] primary stylesheet data wrapped in a ) end def write_primary_stylesheet target_dir = '.' ::File.write (::File.join target_dir, primary_stylesheet_name), primary_stylesheet_data, mode: FILE_WRITE_MODE end def coderay_stylesheet_name (SyntaxHighlighter.for 'coderay').stylesheet_basename end # Public: Read the contents of the default CodeRay stylesheet # # returns the [String] CodeRay stylesheet data def coderay_stylesheet_data (SyntaxHighlighter.for 'coderay').read_stylesheet end # Deprecated: Generate code to embed the CodeRay stylesheet # # Returns the [String] CodeRay stylesheet data wrapped in a ) end def write_coderay_stylesheet target_dir = '.' ::File.write (::File.join target_dir, coderay_stylesheet_name), coderay_stylesheet_data, mode: FILE_WRITE_MODE end def pygments_stylesheet_name style = nil (SyntaxHighlighter.for 'pygments').stylesheet_basename style end # Public: Generate the Pygments stylesheet with the specified style. # # returns the [String] Pygments stylesheet data def pygments_stylesheet_data style = nil (SyntaxHighlighter.for 'pygments').read_stylesheet style end # Deprecated: Generate code to embed the Pygments stylesheet # # Returns the [String] Pygments stylesheet data for the specified style wrapped in a ) end def write_pygments_stylesheet target_dir = '.', style = nil ::File.write (::File.join target_dir, (pygments_stylesheet_name style)), (pygments_stylesheet_data style), mode: FILE_WRITE_MODE end end end asciidoctor-2.0.20/lib/asciidoctor/substitutors.rb000066400000000000000000001634061443135032600223010ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # Public: Methods to perform substitutions on lines of AsciiDoc text. This module # is intended to be mixed-in to Section and Block to provide operations for performing # the necessary substitutions. module Substitutors SpecialCharsRx = /[<&>]/ SpecialCharsTr = { '>' => '>', '<' => '<', '&' => '&' } # Detects if text is a possible candidate for the quotes substitution. QuotedTextSniffRx = { false => /[*_`#^~]/, true => /[*'_+#^~]/ } (BASIC_SUBS = [:specialcharacters]).freeze (HEADER_SUBS = [:specialcharacters, :attributes]).freeze (NO_SUBS = []).freeze (NORMAL_SUBS = [:specialcharacters, :quotes, :attributes, :replacements, :macros, :post_replacements]).freeze (REFTEXT_SUBS = [:specialcharacters, :quotes, :replacements]).freeze (VERBATIM_SUBS = [:specialcharacters, :callouts]).freeze SUB_GROUPS = { none: NO_SUBS, normal: NORMAL_SUBS, verbatim: VERBATIM_SUBS, specialchars: BASIC_SUBS, } SUB_HINTS = { a: :attributes, m: :macros, n: :normal, p: :post_replacements, q: :quotes, r: :replacements, c: :specialcharacters, v: :verbatim, } SUB_OPTIONS = { block: SUB_GROUPS.keys + NORMAL_SUBS + [:callouts], inline: SUB_GROUPS.keys + NORMAL_SUBS, } CAN = ?\u0018 DEL = ?\u007f # Delimiters and matchers for the passthrough placeholder # See http://www.aivosto.com/vbtips/control-characters.html#listabout for characters to use # SPA, start of guarded protected area (\u0096) PASS_START = ?\u0096 # EPA, end of guarded protected area (\u0097) PASS_END = ?\u0097 # match passthrough slot PassSlotRx = /#{PASS_START}(\d+)#{PASS_END}/ # fix passthrough slot after syntax highlighting HighlightedPassSlotRx = %r(]*>#{PASS_START}[^\d]*(\d+)[^\d]*]*>#{PASS_END}) RS = '\\' R_SB = ']' ESC_R_SB = '\]' PLUS = '+' # Public: Apply the specified substitutions to the text. # # text - The String or String Array of text to process; must not be nil. # subs - The substitutions to perform; must be a Symbol Array or nil (default: NORMAL_SUBS). # # Returns a String or String Array to match the type of the text argument with substitutions applied. def apply_subs text, subs = NORMAL_SUBS return text if text.empty? || !subs if (is_multiline = ::Array === text) text = text[1] ? (text.join LF) : text[0] end if subs.include? :macros text = extract_passthroughs text unless @passthroughs.empty? passthrus = @passthroughs # NOTE placeholders can move around, so we can only clear in the outermost substitution call @passthroughs_locked ||= (clear_passthrus = true) end end subs.each do |type| case type when :specialcharacters text = sub_specialchars text when :quotes text = sub_quotes text when :attributes text = sub_attributes text if text.include? ATTR_REF_HEAD when :replacements text = sub_replacements text when :macros text = sub_macros text when :highlight text = highlight_source text, (subs.include? :callouts) when :callouts text = sub_callouts text unless subs.include? :highlight when :post_replacements text = sub_post_replacements text else logger.warn %(unknown substitution type #{type}) end end if passthrus text = restore_passthroughs text if clear_passthrus passthrus.clear @passthroughs_locked = nil end end is_multiline ? (text.split LF, -1) : text end # Public: Apply normal substitutions. # # An alias for apply_subs with default remaining arguments. # # text - The String text to which to apply normal substitutions # # Returns the String with normal substitutions applied. def apply_normal_subs text apply_subs text, NORMAL_SUBS end # Public: Apply substitutions for header metadata and attribute assignments # # text - String containing the text process # # Returns A String with header substitutions performed def apply_header_subs text apply_subs text, HEADER_SUBS end # Public: Apply substitutions for titles. # # title - The String title to process # # Returns A String with title substitutions performed alias apply_title_subs apply_subs # Public: Apply substitutions for reftext. # # text - The String to process # # Returns a String with all substitutions from the reftext substitution group applied def apply_reftext_subs text apply_subs text, REFTEXT_SUBS end # Public: Substitute special characters (i.e., encode XML) # # The special characters <, &, and > get replaced with <, &, and >, respectively. # # text - The String text to process. # # Returns The String text with special characters replaced. if RUBY_ENGINE == 'opal' def sub_specialchars text (text.include? ?>) || (text.include? ?&) || (text.include? ?<) ? (text.gsub SpecialCharsRx, SpecialCharsTr) : text end else CGI = ::CGI def sub_specialchars text if (text.include? ?>) || (text.include? ?&) || (text.include? ?<) (text.include? ?') || (text.include? ?") ? (text.gsub SpecialCharsRx, SpecialCharsTr) : (CGI.escape_html text) else text end end end alias sub_specialcharacters sub_specialchars # Public: Substitute quoted text (includes emphasis, strong, monospaced, etc.) # # text - The String text to process # # returns The converted [String] text def sub_quotes text if QuotedTextSniffRx[compat = @document.compat_mode].match? text QUOTE_SUBS[compat].each do |type, scope, pattern| text = text.gsub(pattern) { convert_quoted_text $~, type, scope } end end text end # Public: Substitutes attribute references in the specified text # # Attribute references are in the format +{name}+. # # If an attribute referenced in the line is missing or undefined, the line may be dropped # based on the attribute-missing or attribute-undefined setting, respectively. # # text - The String text to process # opts - A Hash of options to control processing: (default: {}) # * :attribute_missing controls how to handle a missing attribute (see Compliance.attribute_missing for values) # * :drop_line_severity the severity level at which to log a dropped line (:info or :ignore) # # Returns the [String] text with the attribute references replaced with resolved values def sub_attributes text, opts = {} doc_attrs = @document.attributes drop = drop_line = drop_line_severity = drop_empty_line = attribute_undefined = attribute_missing = nil text = text.gsub AttributeReferenceRx do # escaped attribute, return unescaped if $1 == RS || $4 == RS %({#{$2}}) elsif $3 case (args = $2.split ':', 3).shift when 'set' _, value = Parser.store_attribute args[0], args[1] || '', @document # NOTE since this is an assignment, only drop-line applies here (skip and drop imply the same result) if value || (attribute_undefined ||= (doc_attrs['attribute-undefined'] || Compliance.attribute_undefined)) != 'drop-line' drop = drop_empty_line = DEL else drop = drop_line = CAN end when 'counter2' @document.counter(*args) drop = drop_empty_line = DEL else # 'counter' @document.counter(*args) end elsif doc_attrs.key?(key = $2.downcase) doc_attrs[key] elsif (value = INTRINSIC_ATTRIBUTES[key]) value else case (attribute_missing ||= (opts[:attribute_missing] || doc_attrs['attribute-missing'] || Compliance.attribute_missing)) when 'drop' drop = drop_empty_line = DEL when 'drop-line' if (drop_line_severity ||= (opts[:drop_line_severity] || :info)) == :info logger.info { %(dropping line containing reference to missing attribute: #{key}) } #elsif drop_line_severity == :warn # logger.warn %(dropping line containing reference to missing attribute: #{key}) end drop = drop_line = CAN when 'warn' logger.warn %(skipping reference to missing attribute: #{key}) $& else # 'skip' $& end end end if drop # drop lines from text if drop_empty_line lines = (text.squeeze DEL).split LF, -1 if drop_line (lines.reject {|line| line == DEL || line == CAN || (line.start_with? CAN) || (line.include? CAN) }.join LF).delete DEL else (lines.reject {|line| line == DEL }.join LF).delete DEL end elsif text.include? LF (text.split LF, -1).reject {|line| line == CAN || (line.start_with? CAN) || (line.include? CAN) }.join LF else '' end else text end end # Public: Substitute replacement characters (e.g., copyright, trademark, etc.) # # text - The String text to process # # returns The [String] text with the replacement characters substituted def sub_replacements text REPLACEMENTS.each do |pattern, replacement, restore| text = text.gsub(pattern) { do_replacement $~, replacement, restore } end if ReplaceableTextRx.match? text text end # Public: Substitute inline macros (e.g., links, images, etc) # # Replace inline macros, which may span multiple lines, in the provided text # # source - The String text to process # # returns The converted String text def sub_macros text #return text if text.nil_or_empty? # some look ahead assertions to cut unnecessary regex calls found_square_bracket = text.include? '[' found_colon = text.include? ':' found_macroish = found_square_bracket && found_colon found_macroish_short = found_macroish && (text.include? ':[') doc_attrs = (doc = @document).attributes # TODO allow position of substitution to be controlled (before or after other macros) # TODO this handling needs some cleanup if (extensions = doc.extensions) && extensions.inline_macros? # && found_macroish extensions.inline_macros.each do |extension| text = text.gsub extension.instance.regexp do # honor the escape next $&.slice 1, $&.length if (match = $&).start_with? RS if $~.names.empty? target, content = $1, $2 else target, content = ($~[:target] rescue nil), ($~[:content] rescue nil) end attributes = (default_attrs = (ext_config = extension.config)[:default_attrs]) ? default_attrs.merge : {} if content if content.empty? attributes['text'] = content unless ext_config[:content_model] == :attributes else content = normalize_text content, true, true # QUESTION should we store the unparsed attrlist in the attrlist key? if ext_config[:content_model] == :attributes parse_attributes content, ext_config[:positional_attrs] || ext_config[:pos_attrs] || [], into: attributes else attributes['text'] = content end end # NOTE for convenience, map content (unparsed attrlist) to target when format is short target ||= ext_config[:format] == :short ? content : target end if Inline === (replacement = extension.process_method[self, target, attributes]) if (inline_subs = replacement.attributes.delete 'subs') && (inline_subs = expand_subs inline_subs, 'custom inline macro') replacement.text = apply_subs replacement.text, inline_subs end replacement.convert elsif replacement logger.info { %(expected substitution value for custom inline macro to be of type Inline; got #{replacement.class}: #{match}) } replacement else '' end end end end if doc_attrs.key? 'experimental' if found_macroish_short && ((text.include? 'kbd:') || (text.include? 'btn:')) text = text.gsub InlineKbdBtnMacroRx do # honor the escape if $1 $&.slice 1, $&.length elsif $2 == 'kbd' if (keys = $3.strip).include? R_SB keys = keys.gsub ESC_R_SB, R_SB end if keys.length > 1 && (delim_idx = (delim_idx = keys.index ',', 1) ? [delim_idx, (keys.index '+', 1)].compact.min : (keys.index '+', 1)) delim = keys.slice delim_idx, 1 # NOTE handle special case where keys ends with delimiter (e.g., Ctrl++ or Ctrl,,) if keys.end_with? delim keys = (keys.chop.split delim, -1).map {|key| key.strip } keys[-1] += delim else keys = keys.split(delim).map {|key| key.strip } end else keys = [keys] end (Inline.new self, :kbd, nil, attributes: { 'keys' => keys }).convert else # $2 == 'btn' (Inline.new self, :button, (normalize_text $3, true, true)).convert end end end if found_macroish && (text.include? 'menu:') text = text.gsub InlineMenuMacroRx do # honor the escape next $&.slice 1, $&.length if $&.start_with? RS menu = $1 if (items = $2) items = items.gsub ESC_R_SB, R_SB if items.include? R_SB if (delim = items.include?('>') ? '>' : (items.include?(',') ? ',' : nil)) submenus = items.split(delim).map {|it| it.strip } menuitem = submenus.pop else submenus, menuitem = [], items.rstrip end else submenus, menuitem = [], nil end Inline.new(self, :menu, nil, attributes: { 'menu' => menu, 'submenus' => submenus, 'menuitem' => menuitem }).convert end end if (text.include? '"') && (text.include? '>') text = text.gsub InlineMenuRx do # honor the escape next $&.slice 1, $&.length if $&.start_with? RS menu, *submenus = $1.split('>').map {|it| it.strip } menuitem = submenus.pop Inline.new(self, :menu, nil, attributes: { 'menu' => menu, 'submenus' => submenus, 'menuitem' => menuitem }).convert end end end if found_macroish && ((text.include? 'image:') || (text.include? 'icon:')) # image:filename.png[Alt Text] text = text.gsub InlineImageMacroRx do # honor the escape if $&.start_with? RS next $&.slice 1, $&.length elsif $&.start_with? 'icon:' type, posattrs = 'icon', ['size'] else type, posattrs = 'image', ['alt', 'width', 'height'] end target = $1 attrs = parse_attributes $2, posattrs, unescape_input: true unless type == 'icon' doc.register :images, target attrs['imagesdir'] = doc_attrs['imagesdir'] end attrs['alt'] ||= (attrs['default-alt'] = Helpers.basename(target, true).tr('_-', ' ')) Inline.new(self, :image, nil, type: type, target: target, attributes: attrs).convert end end if ((text.include? '((') && (text.include? '))')) || (found_macroish_short && (text.include? 'dexterm')) # (((Tigers,Big cats))) # indexterm:[Tigers,Big cats] # ((Tigers)) # indexterm2:[Tigers] text = text.gsub InlineIndextermMacroRx do case $1 when 'indexterm' # honor the escape next $&.slice 1, $&.length if $&.start_with? RS # indexterm:[Tigers,Big cats] if (attrlist = normalize_text $2, true, true).include? '=' if (primary = (attrs = (AttributeList.new attrlist, self).parse)[1]) attrs['terms'] = [primary] if (see_also = attrs['see-also']) attrs['see-also'] = (see_also.include? ',') ? (see_also.split ',').map {|it| it.lstrip } : [see_also] end else attrs = { 'terms' => attrlist } end else attrs = { 'terms' => (split_simple_csv attrlist) } end (Inline.new self, :indexterm, nil, attributes: attrs).convert when 'indexterm2' # honor the escape next $&.slice 1, $&.length if $&.start_with? RS # indexterm2:[Tigers] if (term = normalize_text $2, true, true).include? '=' term = (attrs = (AttributeList.new term, self).parse)[1] || (attrs = nil) || term if attrs && (see_also = attrs['see-also']) attrs['see-also'] = (see_also.include? ',') ? (see_also.split ',').map {|it| it.lstrip } : [see_also] end end (Inline.new self, :indexterm, term, attributes: attrs, type: :visible).convert else encl_text = $3 # honor the escape if $&.start_with? RS # escape concealed index term, but process nested flow index term if (encl_text.start_with? '(') && (encl_text.end_with? ')') encl_text = encl_text.slice 1, encl_text.length - 2 visible, before, after = true, '(', ')' else next $&.slice 1, $&.length end else visible = true if encl_text.start_with? '(' if encl_text.end_with? ')' encl_text, visible = (encl_text.slice 1, encl_text.length - 2), false else encl_text, before, after = (encl_text.slice 1, encl_text.length), '(', '' end elsif encl_text.end_with? ')' encl_text, before, after = encl_text.chop, '', ')' end end if visible # ((Tigers)) if (term = normalize_text encl_text, true).include? ';&' if term.include? ' >> ' term, _, see = term.partition ' >> ' attrs = { 'see' => see } elsif term.include? ' &> ' term, *see_also = term.split ' &> ' attrs = { 'see-also' => see_also } end end subbed_term = (Inline.new self, :indexterm, term, attributes: attrs, type: :visible).convert else # (((Tigers,Big cats))) attrs = {} if (terms = normalize_text encl_text, true).include? ';&' if terms.include? ' >> ' terms, _, see = terms.partition ' >> ' attrs['see'] = see elsif terms.include? ' &> ' terms, *see_also = terms.split ' &> ' attrs['see-also'] = see_also end end attrs['terms'] = split_simple_csv terms subbed_term = (Inline.new self, :indexterm, nil, attributes: attrs).convert end before ? %(#{before}#{subbed_term}#{after}) : subbed_term end end end if found_colon && (text.include? '://') # inline urls, target[text] (optionally prefixed with link: and optionally surrounded by <>) text = text.gsub InlineLinkRx do if (target = $2 + ($3 || $5)).start_with? RS # honor the escape next ($&.slice 0, (rs_idx = $1.length)) + ($&.slice rs_idx + 1, $&.length) end prefix, suffix = $1, '' # NOTE if $4 is set, we're looking at a formal macro (e.g., https://example.org[]) if $4 prefix = '' if prefix == 'link:' link_text = nil if (link_text = $4).empty? else # invalid macro syntax (link: prefix w/o trailing square brackets or enclosed in double quotes) # FIXME we probably shouldn't even get here when the link: prefix is present; the regex is doing too much case prefix when 'link:', ?", ?' next $& end case $6 when ';' if (prefix.start_with? '<') && (target.end_with? '>') # move surrounding <> out of URL prefix = prefix.slice 4, prefix.length target = target.slice 0, target.length - 4 elsif (target = target.chop).end_with? ')' # move trailing ); out of URL target = target.chop suffix = ');' else # move trailing ; out of URL suffix = ';' end # NOTE handle case when modified target is a URI scheme (e.g., http://) next $& if target.end_with? '://' when ':' if (target = target.chop).end_with? ')' # move trailing ): out of URL target = target.chop suffix = '):' else # move trailing : out of URL suffix = ':' end # NOTE handle case when modified target is a URI scheme (e.g., http://) next $& if target.end_with? '://' end end attrs, link_opts = nil, { type: :link } if link_text new_link_text = link_text = link_text.gsub ESC_R_SB, R_SB if link_text.include? R_SB if !doc.compat_mode && (link_text.include? '=') # NOTE if an equals sign (=) is present, extract attributes from link text link_text, attrs = extract_attributes_from_text link_text, '' new_link_text = link_text link_opts[:id] = attrs['id'] end if link_text.end_with? '^' new_link_text = link_text = link_text.chop if attrs attrs['window'] ||= '_blank' else attrs = { 'window' => '_blank' } end end if new_link_text && new_link_text.empty? # NOTE it's not possible for the URI scheme to be bare in this case link_text = (doc_attrs.key? 'hide-uri-scheme') ? (target.sub UriSniffRx, '') : target bare = true end else # NOTE it's not possible for the URI scheme to be bare in this case link_text = (doc_attrs.key? 'hide-uri-scheme') ? (target.sub UriSniffRx, '') : target bare = true end if bare if attrs attrs['role'] = (attrs.key? 'role') ? %(bare #{attrs['role']}) : 'bare' else attrs = { 'role' => 'bare' } end end doc.register :links, (link_opts[:target] = target) link_opts[:attributes] = attrs if attrs %(#{prefix}#{(Inline.new self, :anchor, link_text, link_opts).convert}#{suffix}) end end if found_macroish && ((text.include? 'link:') || (text.include? 'ilto:')) # inline link macros, link:target[text] text = text.gsub InlineLinkMacroRx do # honor the escape if $&.start_with? RS next $&.slice 1, $&.length elsif (mailto = $1) target = 'mailto:' + (mailto_text = $2) else target = $2 end attrs, link_opts = nil, { type: :link } unless (link_text = $3).empty? link_text = link_text.gsub ESC_R_SB, R_SB if link_text.include? R_SB if mailto if !doc.compat_mode && (link_text.include? ',') # NOTE if a comma (,) is present, extract attributes from link text link_text, attrs = extract_attributes_from_text link_text, '' link_opts[:id] = attrs['id'] if attrs.key? 2 if attrs.key? 3 target = %(#{target}?subject=#{Helpers.encode_uri_component attrs[2]}&body=#{Helpers.encode_uri_component attrs[3]}) else target = %(#{target}?subject=#{Helpers.encode_uri_component attrs[2]}) end end end elsif !doc.compat_mode && (link_text.include? '=') # NOTE if an equals sign (=) is present, extract attributes from link text link_text, attrs = extract_attributes_from_text link_text, '' link_opts[:id] = attrs['id'] end if link_text.end_with? '^' link_text = link_text.chop if attrs attrs['window'] ||= '_blank' else attrs = { 'window' => '_blank' } end end end if link_text.empty? # mailto is a special case, already processed if mailto link_text = mailto_text else if doc_attrs.key? 'hide-uri-scheme' if (link_text = target.sub UriSniffRx, '').empty? link_text = target end else link_text = target end if attrs attrs['role'] = (attrs.key? 'role') ? %(bare #{attrs['role']}) : 'bare' else attrs = { 'role' => 'bare' } end end end # QUESTION should a mailto be registered as an e-mail address? doc.register :links, (link_opts[:target] = target) link_opts[:attributes] = attrs if attrs Inline.new(self, :anchor, link_text, link_opts).convert end end if text.include? '@' text = text.gsub InlineEmailRx do # honor the escape next $1 == RS ? ($&.slice 1, $&.length) : $& if $1 target = 'mailto:' + (address = $&) # QUESTION should this be registered as an e-mail address? doc.register(:links, target) Inline.new(self, :anchor, address, type: :link, target: target).convert end end if found_square_bracket && @context == :list_item && @parent.style == 'bibliography' text = text.sub(InlineBiblioAnchorRx) { (Inline.new self, :anchor, $2, type: :bibref, id: $1).convert } end if (found_square_bracket && text.include?('[[')) || (found_macroish && text.include?('or:')) text = text.gsub InlineAnchorRx do # honor the escape next $&.slice 1, $&.length if $1 # NOTE reftext is only relevant for DocBook output; used as value of xreflabel attribute if (id = $2) reftext = $3 else id = $4 if (reftext = $5) && (reftext.include? R_SB) reftext = reftext.gsub ESC_R_SB, R_SB end end Inline.new(self, :anchor, reftext, type: :ref, id: id).convert end end #if (text.include? ';&l') || (found_macroish && (text.include? 'xref:')) if ((text.include? '&') && (text.include? ';&l')) || (found_macroish && (text.include? 'xref:')) text = text.gsub InlineXrefMacroRx do # honor the escape next $&.slice 1, $&.length if $&.start_with? RS attrs = {} if (refid = $1) if refid.include? ',' refid, _, link_text = refid.partition ',' link_text = nil if (link_text = link_text.lstrip).empty? end else macro = true refid = $2 if (link_text = $3) link_text = link_text.gsub ESC_R_SB, R_SB if link_text.include? R_SB # NOTE if an equals sign (=) is present, extract attributes from link text link_text, attrs = extract_attributes_from_text link_text if !doc.compat_mode && (link_text.include? '=') end end if doc.compat_mode fragment = refid elsif (hash_idx = refid.index '#') && refid[hash_idx - 1] != '&' if hash_idx > 0 if (fragment_len = refid.length - 1 - hash_idx) > 0 path, fragment = (refid.slice 0, hash_idx), (refid.slice hash_idx + 1, fragment_len) else path = refid.chop end if macro if path.end_with? '.adoc' src2src = path = path.slice 0, path.length - 5 elsif !(Helpers.extname? path) src2src = path end elsif path.end_with?(*ASCIIDOC_EXTENSIONS.keys) src2src = path = path.slice 0, (path.rindex '.') else src2src = path end else target, fragment = refid, (refid.slice 1, refid.length) end elsif macro if refid.end_with? '.adoc' src2src = path = refid.slice 0, refid.length - 5 elsif Helpers.extname? refid path = refid else fragment = refid end else fragment = refid end # handles: #id if target refid = fragment logger.info %(possible invalid reference: #{refid}) if logger.info? && !doc.catalog[:refs][refid] elsif path # handles: path#, path#id, path.adoc#, path.adoc#id, or path.adoc (xref macro only) # the referenced path is the current document, or its contents have been included in the current document if src2src && (doc.attributes['docname'] == path || doc.catalog[:includes][path]) if fragment refid, path, target = fragment, nil, %(##{fragment}) logger.info %(possible invalid reference: #{refid}) if logger.info? && !doc.catalog[:refs][refid] else refid, path, target = nil, nil, '#' end else refid, path = path, %(#{doc.attributes['relfileprefix'] || ''}#{path}#{src2src ? (doc.attributes.fetch 'relfilesuffix', doc.outfilesuffix) : ''}) if fragment refid, target = %(#{refid}##{fragment}), %(#{path}##{fragment}) else target = path end end # handles: id (in compat mode or when natural xrefs are disabled) elsif doc.compat_mode || !Compliance.natural_xrefs refid, target = fragment, %(##{fragment}) logger.info %(possible invalid reference: #{refid}) if logger.info? && !doc.catalog[:refs][refid] # handles: id elsif doc.catalog[:refs][fragment] refid, target = fragment, %(##{fragment}) # handles: Node Title or Reference Text # do reverse lookup on fragment if not a known ID and resembles reftext (contains a space or uppercase char) elsif ((fragment.include? ' ') || fragment.downcase != fragment) && (refid = doc.resolve_id fragment) fragment, target = refid, %(##{refid}) else refid, target = fragment, %(##{fragment}) logger.info %(possible invalid reference: #{refid}) if logger.info? end attrs['path'] = path attrs['fragment'] = fragment attrs['refid'] = refid Inline.new(self, :anchor, link_text, type: :xref, target: target, attributes: attrs).convert end end if found_macroish && (text.include? 'tnote') text = text.gsub InlineFootnoteMacroRx do # honor the escape next $&.slice 1, $&.length if $&.start_with? RS # footnoteref if $1 if $3 id, content = $3.split ',', 2 logger.warn %(found deprecated footnoteref macro: #{$&}; use footnote macro with target instead) unless doc.compat_mode else next $& end # footnote else id = $2 content = $3 end if id if (footnote = doc.footnotes.find {|candidate| candidate.id == id }) index, content = footnote.index, footnote.text type, target, id = :xref, id, nil elsif content content = restore_passthroughs(normalize_text content, true, true) index = doc.counter('footnote-number') doc.register(:footnotes, Document::Footnote.new(index, id, content)) type, target = :ref, nil else logger.warn %(invalid footnote reference: #{id}) type, target, content, id = :xref, id, id, nil end elsif content content = restore_passthroughs(normalize_text content, true, true) index = doc.counter('footnote-number') doc.register(:footnotes, Document::Footnote.new(index, id, content)) type = target = nil else next $& end Inline.new(self, :footnote, content, attributes: { 'index' => index }, id: id, target: target, type: type).convert end end text end # Public: Substitute post replacements # # text - The String text to process # # Returns the converted String text def sub_post_replacements text #if attr? 'hardbreaks-option', nil, true if @attributes['hardbreaks-option'] || @document.attributes['hardbreaks-option'] lines = text.split LF, -1 return text if lines.size < 2 last = lines.pop (lines.map do |line| Inline.new(self, :break, (line.end_with? HARD_LINE_BREAK) ? (line.slice 0, line.length - 2) : line, type: :line).convert end << last).join LF elsif (text.include? PLUS) && (text.include? HARD_LINE_BREAK) text.gsub(HardLineBreakRx) { Inline.new(self, :break, $1, type: :line).convert } else text end end # Public: Apply verbatim substitutions on source (for use when highlighting is disabled). # # source - the source code String on which to apply verbatim substitutions # process_callouts - a Boolean flag indicating whether callout marks should be substituted # # Returns the substituted source def sub_source source, process_callouts process_callouts ? sub_callouts(sub_specialchars source) : (sub_specialchars source) end # Public: Substitute callout source references # # text - The String text to process # # Returns the converted String text def sub_callouts text callout_rx = (attr? 'line-comment') ? CalloutSourceRxMap[attr 'line-comment'] : CalloutSourceRx autonum = 0 text.gsub callout_rx do # honor the escape if $2 # use sub since it might be behind a line comment $&.sub RS, '' else Inline.new(self, :callout, $4 == '.' ? (autonum += 1).to_s : $4, id: @document.callouts.read_next_id, attributes: { 'guard' => $1 || ($3 == '--' ? [''] : nil) }).convert end end end # Public: Highlight (i.e., colorize) the source code during conversion using a syntax highlighter, if activated by the # source-highlighter document attribute. Otherwise return the text with verbatim substitutions applied. # # If the process_callouts argument is true, this method will extract the callout marks from the source before passing # it to the syntax highlighter, then subsequently restore those callout marks to the highlighted source so the callout # marks don't confuse the syntax highlighter. # # source - the source code String to syntax highlight # process_callouts - a Boolean flag indicating whether callout marks should be located and substituted # # Returns the highlighted source code, if a syntax highlighter is defined on the document, otherwise the source with # verbatim substitutions applied def highlight_source source, process_callouts # NOTE the call to highlight? is a defensive check since, normally, we wouldn't arrive here unless it returns true return sub_source source, process_callouts unless (syntax_hl = @document.syntax_highlighter) && syntax_hl.highlight? source, callout_marks = extract_callouts source if process_callouts doc_attrs = @document.attributes syntax_hl_name = syntax_hl.name if (linenums_mode = (attr? 'linenums') ? (doc_attrs[%(#{syntax_hl_name}-linenums-mode)] || :table).to_sym : nil) && (start_line_number = (attr 'start', 1).to_i) < 1 start_line_number = 1 end highlight_lines = resolve_lines_to_highlight source, (attr 'highlight'), start_line_number if attr? 'highlight' highlighted, source_offset = syntax_hl.highlight self, source, (attr 'language'), callouts: callout_marks, css_mode: (doc_attrs[%(#{syntax_hl_name}-css)] || :class).to_sym, highlight_lines: highlight_lines, number_lines: linenums_mode, start_line_number: start_line_number, style: doc_attrs[%(#{syntax_hl_name}-style)] # fix passthrough placeholders that got caught up in syntax highlighting highlighted = highlighted.gsub HighlightedPassSlotRx, %(#{PASS_START}\\1#{PASS_END}) unless @passthroughs.empty? # NOTE highlight method may have depleted callouts callout_marks.nil_or_empty? ? highlighted : (restore_callouts highlighted, callout_marks, source_offset) end # Public: Resolve the line numbers in the specified source to highlight from the provided spec. # # e.g., highlight="1-5, !2, 10" or highlight=1-5;!2,10 # # source - The String source. # spec - The lines specifier (e.g., "1-5, !2, 10" or "1..5;!2;10") # start - The line number of the first line (optional, default: false) # # Returns an [Array] of unique, sorted line numbers. def resolve_lines_to_highlight source, spec, start = nil lines = [] spec = spec.delete ' ' if spec.include? ' ' ((spec.include? ',') ? (spec.split ',') : (spec.split ';')).map do |entry| if entry.start_with? '!' entry = entry.slice 1, entry.length negate = true end if (delim = (entry.include? '..') ? '..' : ((entry.include? '-') ? '-' : nil)) from, _, to = entry.partition delim to = (source.count LF) + 1 if to.empty? || (to = to.to_i) < 0 if negate lines -= (from.to_i..to).to_a else lines |= (from.to_i..to).to_a end elsif negate lines.delete entry.to_i elsif !lines.include?(line = entry.to_i) lines << line end end # If the start attribute is defined, then the lines to highlight specified by the provided spec should be relative to the start value. unless (shift = start ? start - 1 : 0) == 0 lines = lines.map {|it| it - shift } end lines.sort end # Public: Extract the passthrough text from the document for reinsertion after processing. # # text - The String from which to extract passthrough fragments # # Returns the String text with passthrough regions substituted with placeholders def extract_passthroughs text compat_mode = @document.compat_mode passthrus = @passthroughs text = text.gsub InlinePassMacroRx do if (boundary = $4) # $$, ++, or +++ # skip ++ in compat mode, handled as normal quoted text next %(#{$2 ? "#{$1}[#{$2}]#{$3}" : "#{$1}#{$3}"}++#{extract_passthroughs $5}++) if compat_mode && boundary == '++' if (attrlist = $2) if (escape_count = $3.length) > 0 # NOTE we don't look for nested unconstrained pass macros next %(#{$1}[#{attrlist}]#{RS * (escape_count - 1)}#{boundary}#{$5}#{boundary}) elsif $1 == RS preceding = %([#{attrlist}]) elsif boundary == '++' if attrlist == 'x-' old_behavior = true attributes = {} elsif attrlist.end_with? ' x-' old_behavior = true attributes = parse_quoted_text_attributes attrlist.slice 0, attrlist.length - 3 else attributes = parse_quoted_text_attributes attrlist end else attributes = parse_quoted_text_attributes attrlist end elsif (escape_count = $3.length) > 0 # NOTE we don't look for nested unconstrained pass macros next %(#{RS * (escape_count - 1)}#{boundary}#{$5}#{boundary}) end subs = (boundary == '+++' ? [] : BASIC_SUBS) if attributes if old_behavior passthrus[passthru_key = passthrus.size] = { text: $5, subs: NORMAL_SUBS, type: :monospaced, attributes: attributes } else passthrus[passthru_key = passthrus.size] = { text: $5, subs: subs, type: :unquoted, attributes: attributes } end else passthrus[passthru_key = passthrus.size] = { text: $5, subs: subs } end else # pass:[] # NOTE we don't look for nested pass:[] macros # honor the escape next $&.slice 1, $&.length if $6 == RS if (subs = $7) passthrus[passthru_key = passthrus.size] = { text: (normalize_text $8, nil, true), subs: (resolve_pass_subs subs) } else passthrus[passthru_key = passthrus.size] = { text: (normalize_text $8, nil, true) } end end %(#{preceding || ''}#{PASS_START}#{passthru_key}#{PASS_END}) end if (text.include? '++') || (text.include? '$$') || (text.include? 'ss:') pass_inline_char1, pass_inline_char2, pass_inline_rx = InlinePassRx[compat_mode] text = text.gsub pass_inline_rx do preceding = $1 attrlist = $4 || $3 escaped = true if $5 quoted_text = $6 format_mark = $7 content = $8 if compat_mode old_behavior = true elsif attrlist && (attrlist == 'x-' || (attrlist.end_with? ' x-')) old_behavior = old_behavior_forced = true end if attrlist if escaped # honor the escape of the formatting mark next %(#{preceding}[#{attrlist}]#{quoted_text.slice 1, quoted_text.length}) elsif preceding == RS # honor the escape of the attributes next %(#{preceding}[#{attrlist}]#{quoted_text}) if old_behavior_forced && format_mark == '`' preceding = %([#{attrlist}]) elsif old_behavior_forced attributes = attrlist == 'x-' ? {} : (parse_quoted_text_attributes attrlist.slice 0, attrlist.length - 3) else attributes = parse_quoted_text_attributes attrlist end elsif escaped # honor the escape of the formatting mark next %(#{preceding}#{quoted_text.slice 1, quoted_text.length}) elsif compat_mode && preceding == RS next quoted_text end if compat_mode passthrus[passthru_key = passthrus.size] = { text: content, subs: BASIC_SUBS, attributes: attributes, type: :monospaced } elsif attributes if old_behavior subs = format_mark == '`' ? BASIC_SUBS : NORMAL_SUBS passthrus[passthru_key = passthrus.size] = { text: content, subs: subs, attributes: attributes, type: :monospaced } else passthrus[passthru_key = passthrus.size] = { text: content, subs: BASIC_SUBS, attributes: attributes, type: :unquoted } end else passthrus[passthru_key = passthrus.size] = { text: content, subs: BASIC_SUBS } end %(#{preceding}#{PASS_START}#{passthru_key}#{PASS_END}) end if (text.include? pass_inline_char1) || (pass_inline_char2 && (text.include? pass_inline_char2)) # NOTE we need to do the stem in a subsequent step to allow it to be escaped by the former text = text.gsub InlineStemMacroRx do # honor the escape next $&.slice 1, $&.length if $&.start_with? RS if (type = $1.to_sym) == :stem type = STEM_TYPE_ALIASES[@document.attributes['stem']].to_sym end subs = $2 content = normalize_text $3, nil, true # NOTE drop enclosing $ signs around latexmath for backwards compatibility with AsciiDoc.py content = content.slice 1, content.length - 2 if type == :latexmath && (content.start_with? '$') && (content.end_with? '$') subs = subs ? (resolve_pass_subs subs) : ((@document.basebackend? 'html') ? BASIC_SUBS : nil) passthrus[passthru_key = passthrus.size] = { text: content, subs: subs, type: type } %(#{PASS_START}#{passthru_key}#{PASS_END}) end if (text.include? ':') && ((text.include? 'stem:') || (text.include? 'math:')) text end # Public: Restore the passthrough text by reinserting into the placeholder positions # # text - The String text into which to restore the passthrough text # # returns The String text with the passthrough text restored def restore_passthroughs text passthrus = @passthroughs text.gsub PassSlotRx do if (pass = passthrus[$1.to_i]) subbed_text = apply_subs(pass[:text], pass[:subs]) if (type = pass[:type]) if (attributes = pass[:attributes]) id = attributes['id'] end subbed_text = Inline.new(self, :quoted, subbed_text, type: type, id: id, attributes: attributes).convert end subbed_text.include?(PASS_START) ? restore_passthroughs(subbed_text) : subbed_text else logger.error %(unresolved passthrough detected: #{text}) '??pass??' end end end # Public: Resolve the list of comma-delimited subs against the possible options. # # subs - The comma-delimited String of substitution names or aliases. # type - A Symbol representing the context for which the subs are being resolved (default: :block). # defaults - An Array of substitutions to start with when computing incremental substitutions (default: nil). # subject - The String to use in log messages to communicate the subject for which subs are being resolved (default: nil) # # Returns An Array of Symbols representing the substitution operation or nothing if no subs are found. def resolve_subs subs, type = :block, defaults = nil, subject = nil return if subs.nil_or_empty? # QUESTION should we store candidates as a Set instead of an Array? candidates = nil subs = subs.delete ' ' if subs.include? ' ' modifiers_present = SubModifierSniffRx.match? subs subs.split(',').each do |key| modifier_operation = nil if modifiers_present if (first = key.chr) == '+' modifier_operation = :append key = key.slice 1, key.length elsif first == '-' modifier_operation = :remove key = key.slice 1, key.length elsif key.end_with? '+' modifier_operation = :prepend key = key.chop end end key = key.to_sym # special case to disable callouts for inline subs if type == :inline && (key == :verbatim || key == :v) resolved_keys = BASIC_SUBS elsif SUB_GROUPS.key? key resolved_keys = SUB_GROUPS[key] elsif type == :inline && key.length == 1 && (SUB_HINTS.key? key) resolved_key = SUB_HINTS[key] if (candidate = SUB_GROUPS[resolved_key]) resolved_keys = candidate else resolved_keys = [resolved_key] end else resolved_keys = [key] end if modifier_operation candidates ||= (defaults ? (defaults.drop 0) : []) case modifier_operation when :append candidates += resolved_keys when :prepend candidates = resolved_keys + candidates when :remove candidates -= resolved_keys end else candidates ||= [] candidates += resolved_keys end end return unless candidates # weed out invalid options and remove duplicates (order is preserved; first occurrence wins) resolved = candidates & SUB_OPTIONS[type] unless (candidates - resolved).empty? invalid = candidates - resolved logger.warn %(invalid substitution type#{invalid.size > 1 ? 's' : ''}#{subject ? ' for ' : ''}#{subject}: #{invalid.join ', '}) end resolved end # Public: Call resolve_subs for the :block type. def resolve_block_subs subs, defaults, subject resolve_subs subs, :block, defaults, subject end # Public: Call resolve_subs for the :inline type with the subject set as passthrough macro. def resolve_pass_subs subs resolve_subs subs, :inline, nil, 'passthrough macro' end # Public: Expand all groups in the subs list and return. If no subs are resolved, return nil. # # subs - The substitutions to expand; can be a Symbol, Symbol Array, or String # subject - The String to use in log messages to communicate the subject for which subs are being resolved (default: nil) # # Returns a Symbol Array of substitutions to pass to apply_subs or nil if no substitutions were resolved. def expand_subs subs, subject = nil case subs when ::Symbol subs == :none ? nil : SUB_GROUPS[subs] || [subs] when ::Array expanded_subs = [] subs.each do |key| unless key == :none if (sub_group = SUB_GROUPS[key]) expanded_subs += sub_group else expanded_subs << key end end end expanded_subs.empty? ? nil : expanded_subs else resolve_subs subs, :inline, nil, subject end end # Internal: Commit the requested substitutions to this block. # # Looks for an attribute named "subs". If present, resolves substitutions # from the value of that attribute and assigns them to the subs property on # this block. Otherwise, uses the substitutions assigned to the default_subs # property, if specified, or selects a default set of substitutions based on # the content model of the block. # # Returns nothing def commit_subs unless (default_subs = @default_subs) case @content_model when :simple default_subs = NORMAL_SUBS when :verbatim # NOTE :literal with listparagraph-option gets folded into text of list item later default_subs = @context == :verse ? NORMAL_SUBS : VERBATIM_SUBS when :raw # TODO make pass subs a compliance setting; AsciiDoc.py performs :attributes and :macros on a pass block default_subs = @context == :stem ? BASIC_SUBS : NO_SUBS else return @subs end end if (custom_subs = @attributes['subs']) @subs = (resolve_block_subs custom_subs, default_subs, @context) || [] else @subs = default_subs.drop 0 end # QUESION delegate this logic to a method? if @context == :listing && @style == 'source' && (syntax_hl = @document.syntax_highlighter) && syntax_hl.highlight? && (idx = @subs.index :specialcharacters) @subs[idx] = :highlight end nil end # Internal: Parse attributes in name or name=value format from a comma-separated String # # attrlist - A comma-separated String list of attributes in name or name=value format. # posattrs - An Array of positional attribute names (default: []). # opts - A Hash of options to control how the string is parsed (default: {}): # :into - The Hash to parse the attributes into (optional, default: false). # :sub_input - A Boolean that indicates whether to substitute attributes prior to # parsing (optional, default: false). # :sub_result - A Boolean that indicates whether to apply substitutions # single-quoted attribute values (optional, default: true). # :unescape_input - A Boolean that indicates whether to unescape square brackets prior # to parsing (optional, default: false). # # Returns an empty Hash if attrlist is nil or empty, otherwise a Hash of parsed attributes. def parse_attributes attrlist, posattrs = [], opts = {} return {} if attrlist ? attrlist.empty? : true attrlist = normalize_text attrlist, true, true if opts[:unescape_input] attrlist = @document.sub_attributes attrlist if opts[:sub_input] && (attrlist.include? ATTR_REF_HEAD) # substitutions are only performed on attribute values if block is not nil block = self if opts[:sub_result] if (into = opts[:into]) AttributeList.new(attrlist, block).parse_into(into, posattrs) else AttributeList.new(attrlist, block).parse(posattrs) end end private # This method is used in cases when the attrlist can be mixed with the text of a macro. # If no attributes are detected aside from the first positional attribute, and the first positional # attribute matches the attrlist, then the original text is returned. def extract_attributes_from_text text, default_text = nil attrlist = (text.include? LF) ? (text.tr LF, ' ') : text if (resolved_text = (attrs = (AttributeList.new attrlist, self).parse)[1]) # NOTE if resolved text remains unchanged, clear attributes and return unparsed text resolved_text == attrlist ? [text, attrs.clear] : [resolved_text, attrs] else [default_text, attrs] end end # Internal: Extract the callout numbers from the source to prepare it for syntax highlighting. def extract_callouts source callout_marks = {} autonum = lineno = 0 last_lineno = nil callout_rx = (attr? 'line-comment') ? CalloutExtractRxMap[attr 'line-comment'] : CalloutExtractRx # extract callout marks, indexed by line number source = (source.split LF, -1).map do |line| lineno += 1 line.gsub callout_rx do # honor the escape if $2 # use sub since it might be behind a line comment $&.sub RS, '' else (callout_marks[lineno] ||= []) << [$1 || ($3 == '--' ? [''] : nil), $4 == '.' ? (autonum += 1).to_s : $4] last_lineno = lineno '' end end end.join LF if last_lineno source = %(#{source}#{LF}) if last_lineno == lineno else callout_marks = nil end [source, callout_marks] end # Internal: Restore the callout numbers to the highlighted source. def restore_callouts source, callout_marks, source_offset = nil if source_offset preamble = source.slice 0, source_offset source = source.slice source_offset, source.length else preamble = '' end lineno = 0 preamble + ((source.split LF, -1).map do |line| if (conums = callout_marks.delete lineno += 1) if conums.size == 1 guard, numeral = conums[0] %(#{line}#{Inline.new(self, :callout, numeral, id: @document.callouts.read_next_id, attributes: { 'guard' => guard }).convert}) else %(#{line}#{conums.map do |guard_it, numeral_it| Inline.new(self, :callout, numeral_it, id: @document.callouts.read_next_id, attributes: { 'guard' => guard_it }).convert end.join ' '}) end else line end end.join LF) end # Internal: Convert a quoted text region # # match - The MatchData for the quoted text region # type - The quoting type (single, double, strong, emphasis, monospaced, etc) # scope - The scope of the quoting (constrained or unconstrained) # # Returns The converted String text for the quoted text region def convert_quoted_text match, type, scope if match[0].start_with? RS if scope == :constrained && (attrs = match[2]) unescaped_attrs = %([#{attrs}]) else return match[0].slice 1, match[0].length end end if scope == :constrained if unescaped_attrs %(#{unescaped_attrs}#{Inline.new(self, :quoted, match[3], type: type).convert}) else if (attrlist = match[2]) id = (attributes = parse_quoted_text_attributes attrlist)['id'] type = :unquoted if type == :mark end %(#{match[1]}#{Inline.new(self, :quoted, match[3], type: type, id: id, attributes: attributes).convert}) end else if (attrlist = match[1]) id = (attributes = parse_quoted_text_attributes attrlist)['id'] type = :unquoted if type == :mark end Inline.new(self, :quoted, match[2], type: type, id: id, attributes: attributes).convert end end # Internal: Substitute replacement text for matched location # # returns The String text with the replacement characters substituted def do_replacement m, replacement, restore if (captured = m[0]).include? RS # we have to use sub since we aren't sure it's the first char captured.sub RS, '' else case restore when :none replacement when :bounding m[1] + replacement + m[2] else # :leading m[1] + replacement end end end # Internal: Inserts text into a formatted text enclosure; used by xreftext alias sub_placeholder sprintf unless RUBY_ENGINE == 'opal' # Internal: Parse the attributes that are defined on quoted (aka formatted) text # # str - A non-nil String of unprocessed attributes; # space-separated roles (e.g., role1 role2) or the id/role shorthand syntax (e.g., #idname.role) # # Returns a Hash of attributes (role and id only) def parse_quoted_text_attributes str # NOTE attributes are typically resolved after quoted text, so substitute eagerly str = sub_attributes str if str.include? ATTR_REF_HEAD # for compliance, only consider first positional attribute (very unlikely) str = str.slice 0, (str.index ',') if str.include? ',' if (str = str.strip).empty? {} elsif (str.start_with? '.', '#') && Compliance.shorthand_property_syntax before, _, after = str.partition '#' attrs = {} if after.empty? attrs['role'] = (before.tr '.', ' ').lstrip if before.length > 1 else id, _, roles = after.partition '.' attrs['id'] = id unless id.empty? if roles.empty? attrs['role'] = (before.tr '.', ' ').lstrip if before.length > 1 elsif before.length > 1 attrs['role'] = ((before + '.' + roles).tr '.', ' ').lstrip else attrs['role'] = roles.tr '.', ' ' end end attrs else { 'role' => str } end end # Internal: Normalize text to prepare it for parsing. # # If normalize_whitespace is true, strip surrounding whitespace and fold newlines. If unescape_closing_square_bracket # is set, unescape any escaped closing square brackets. # # Returns the normalized text String def normalize_text text, normalize_whitespace = nil, unescape_closing_square_brackets = nil unless text.empty? text = text.strip.tr LF, ' ' if normalize_whitespace text = text.gsub ESC_R_SB, R_SB if unescape_closing_square_brackets && (text.include? R_SB) end text end # Internal: Split text formatted as CSV with support # for double-quoted values (in which commas are ignored) def split_simple_csv str if str.empty? [] elsif str.include? '"' values = [] accum = '' quote_open = nil str.each_char do |c| case c when ',' if quote_open accum += c else values << accum.strip accum = '' end when '"' quote_open = !quote_open else accum += c end end values << accum.strip else str.split(',').map {|it| it.strip } end end end end asciidoctor-2.0.20/lib/asciidoctor/syntax_highlighter.rb000066400000000000000000000255701443135032600234120ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # Public: A pluggable adapter for integrating a syntax (aka code) highlighter into AsciiDoc processing. # # There are two types of syntax highlighter adapters. The first performs syntax highlighting during the convert phase. # This adapter type must define a highlight? method that returns true. The companion highlight method will then be # called to handle the :specialcharacters substitution for source blocks. The second assumes syntax highlighting is # performed on the client (e.g., when the HTML document is loaded). This adapter type must define a docinfo? method # that returns true. The companion docinfo method will then be called to insert markup into the output document. The # docinfo functionality is available to both adapter types. # # Asciidoctor provides several built-in adapters, including coderay, pygments, rouge, highlight.js, html-pipeline, and # prettify. Additional adapters can be registered using SyntaxHighlighter.register or by supplying a custom factory. module SyntaxHighlighter # Public: Returns the String name of this syntax highlighter for referencing it in messages and option names. attr_reader :name def initialize name, backend = 'html5', opts = {} @name = @pre_class = name end # Public: Indicates whether this syntax highlighter has docinfo (i.e., markup) to insert into the output document at # the specified location. Should be called by converter after main content has been converted. # # location - The Symbol representing the location slot (:head or :footer). # # Returns a [Boolean] indicating whether the docinfo method should be called for this location. def docinfo? location; end # Public: Generates docinfo markup for this syntax highlighter to insert at the specified location in the output document. # Should be called by converter after main content has been converted. # # location - The Symbol representing the location slot (:head or :footer). # doc - The Document in which this syntax highlighter is being used. # opts - A Hash of options that configure the syntax highlighting: # :linkcss - A Boolean indicating whether the stylesheet should be linked instead of embedded (optional). # :cdn_base_url - The String base URL for assets loaded from the CDN. # :self_closing_tag_slash - The String '/' if the converter calling this method emits self-closing tags. # # Return the [String] markup to insert. def docinfo location, doc, opts raise ::NotImplementedError, %(#{SyntaxHighlighter} subclass #{self.class} must implement the ##{__method__} method since #docinfo? returns true) end # Public: Indicates whether highlighting is handled by this syntax highlighter or by the client. # # Returns a [Boolean] indicating whether the highlight method should be used to handle the :specialchars substitution. def highlight?; end # Public: Highlights the specified source when this source block is being converted. # # If the source contains callout marks, the caller assumes the source remains on the same lines and no closing tags # are added to the end of each line. If the source gets shifted by one or more lines, this method must return a # tuple containing the highlighted source and the number of lines by which the source was shifted. # # node - The source Block to syntax highlight. # source - The raw source text String of this source block (after preprocessing). # lang - The source language String specified on this block (e.g., ruby). # opts - A Hash of options that configure the syntax highlighting: # :callouts - A Hash of callouts extracted from the source, indexed by line number (1-based) (optional). # :css_mode - The Symbol CSS mode (:class or :inline). # :highlight_lines - A 1-based Array of Integer line numbers to highlight (aka emphasize) (optional). # :number_lines - A Symbol indicating whether lines should be numbered (:table or :inline) (optional). # :start_line_number - The starting Integer (1-based) line number (optional, default: 1). # :style - The String style (aka theme) to use for colorizing the code (optional). # # Returns the highlighted source String or a tuple of the highlighted source String and an Integer line offset. def highlight node, source, lang, opts raise ::NotImplementedError, %(#{SyntaxHighlighter} subclass #{self.class} must implement the ##{__method__} method since #highlight? returns true) end # Public: Format the highlighted source for inclusion in an HTML document. # # node - The source Block being processed. # lang - The source language String for this Block (e.g., ruby). # opts - A Hash of options that control syntax highlighting: # :nowrap - A Boolean that indicates whether wrapping should be disabled (optional). # # Returns the highlighted source [String] wrapped in preformatted tags (e.g., pre and code) def format node, lang, opts raise ::NotImplementedError, %(#{SyntaxHighlighter} subclass #{self.class} must implement the ##{__method__} method) end # Public: Indicates whether this syntax highlighter wants to write a stylesheet to disk. Only called if both the # linkcss and copycss attributes are set on the document. # # doc - The Document in which this syntax highlighter is being used. # # Returns a [Boolean] indicating whether the write_stylesheet method should be called. def write_stylesheet? doc; end # Public: Writes the stylesheet to support the highlighted source(s) to disk. # # doc - The Document in which this syntax highlighter is being used. # to_dir - The absolute String path of the stylesheet output directory. # # Returns nothing. def write_stylesheet doc, to_dir raise ::NotImplementedError, %(#{SyntaxHighlighter} subclass #{self.class} must implement the ##{__method__} method since #write_stylesheet? returns true) end def self.included into into.extend Config end private_class_method :included # use separate declaration for Ruby 2.0.x module Config # Public: Statically register the current class in the registry for the specified names. # # names - one or more String or Symbol names with which to register the current class as a syntax highlighter # implementation. Symbol arguments are coerced to Strings. # # Returns nothing. def register_for *names SyntaxHighlighter.register self, *(names.map {|name| name.to_s }) end end module Factory # Public: Associates the syntax highlighter class or object with the specified names. # # syntax_highlighter - the syntax highlighter implementation to register # names - one or more String names with which to register this syntax highlighter implementation. # # Returns nothing. def register syntax_highlighter, *names names.each {|name| registry[name] = syntax_highlighter } end # Public: Retrieves the syntax highlighter class or object registered for the specified name. # # name - The String name of the syntax highlighter to retrieve. # # Returns the SyntaxHighlighter Class or Object instance registered for this name. def for name registry[name] end # Public: Resolves the name to a syntax highlighter instance, if found in the registry. # # name - The String name of the syntax highlighter to create. # backend - The String name of the backend for which this syntax highlighter is being used (default: 'html5'). # opts - A Hash of options providing information about the context in which this syntax highlighter is used: # :document - The Document for which this syntax highlighter was created. # # Returns a [SyntaxHighlighter] instance for the specified name. def create name, backend = 'html5', opts = {} if (syntax_hl = self.for name) syntax_hl = syntax_hl.new name, backend, opts if ::Class === syntax_hl raise ::NameError, %(#{syntax_hl.class} must specify a value for `name') unless syntax_hl.name syntax_hl end end private def registry raise ::NotImplementedError, %(#{Factory} subclass #{self.class} must implement the ##{__method__} method) end end class CustomFactory include Factory def initialize seed_registry = nil @registry = seed_registry || {} end private attr_reader :registry end module DefaultFactory include Factory @@registry = {} private def registry @@registry end unless RUBY_ENGINE == 'opal' public def register syntax_highlighter, *names @@mutex.owned? ? names.each {|name| @@registry = @@registry.merge name => syntax_highlighter } : @@mutex.synchronize { register syntax_highlighter, *names } end # This method will lazy require and register additional built-in implementations, which include coderay, # pygments, rouge, and prettify. Refer to {Factory#for} for parameters and return value. def for name @@registry.fetch name do @@mutex.synchronize do @@registry.fetch name do if (require_path = PROVIDED[name]) require require_path @@registry[name] else @@registry = @@registry.merge name => nil nil end end end end end PROVIDED = { 'coderay' => %(#{__dir__}/syntax_highlighter/coderay), 'prettify' => %(#{__dir__}/syntax_highlighter/prettify), 'pygments' => %(#{__dir__}/syntax_highlighter/pygments), 'rouge' => %(#{__dir__}/syntax_highlighter/rouge), } @@mutex = ::Mutex.new end end class DefaultFactoryProxy < CustomFactory include DefaultFactory # inserts module into ancestors immediately after superclass def for name @registry.fetch(name) { super } end unless RUBY_ENGINE == 'opal' end class Base include SyntaxHighlighter def format node, lang, opts class_attr_val = opts[:nowrap] ? %(#{@pre_class} highlight nowrap) : %(#{@pre_class} highlight) if (transform = opts[:transform]) transform[(pre = { 'class' => class_attr_val }), (code = lang ? { 'data-lang' => lang } : {})] # NOTE: make sure data-lang is the last attribute on the code tag to remain consistent with 1.5.x if (lang = code.delete 'data-lang') code['data-lang'] = lang end %(#{node.content}) else %(
    #{node.content}
    ) end end end extend DefaultFactory # exports static methods end end require_relative 'syntax_highlighter/highlightjs' require_relative 'syntax_highlighter/html_pipeline' unless RUBY_ENGINE == 'opal' asciidoctor-2.0.20/lib/asciidoctor/syntax_highlighter/000077500000000000000000000000001443135032600230545ustar00rootroot00000000000000asciidoctor-2.0.20/lib/asciidoctor/syntax_highlighter/coderay.rb000066400000000000000000000044501443135032600250320ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor class SyntaxHighlighter::CodeRayAdapter < SyntaxHighlighter::Base register_for 'coderay' def initialize *args super @pre_class = 'CodeRay' @requires_stylesheet = nil end def highlight? library_available? end def highlight node, source, lang, opts @requires_stylesheet = true if (css_mode = opts[:css_mode]) == :class lang = lang ? (::CodeRay::Scanners[lang = lang.to_sym] && lang rescue :text) : :text highlighted = ::CodeRay::Duo[lang, :html, css: css_mode, line_numbers: (line_numbers = opts[:number_lines]), line_number_start: opts[:start_line_number], line_number_anchors: false, highlight_lines: opts[:highlight_lines], bold_every: false, ].highlight source if line_numbers == :table && opts[:callouts] [highlighted, (idx = highlighted.index CodeCellStartTagCs) ? idx + CodeCellStartTagCs.length : nil] else highlighted end end def docinfo? location @requires_stylesheet && location == :head end def docinfo location, doc, opts if opts[:linkcss] %() else %() end end def write_stylesheet? doc @requires_stylesheet end def write_stylesheet doc, to_dir ::File.write (::File.join to_dir, stylesheet_basename), read_stylesheet, mode: FILE_WRITE_MODE end module Loader private def library_available? (@@library_status ||= load_library) == :loaded ? true : nil end def load_library (defined? ::CodeRay::Duo) ? :loaded : (Helpers.require_library 'coderay', true, :warn).nil? ? :unavailable : :loaded end end module Styles include Loader def read_stylesheet @@stylesheet_cache ||= (::File.read (::File.join Stylesheets::STYLESHEETS_DIR, stylesheet_basename), mode: FILE_READ_MODE).rstrip end def stylesheet_basename 'coderay-asciidoctor.css' end end extend Styles # exports static methods include Styles # adds methods to instance include Loader # adds methods to instance CodeCellStartTagCs = '
    '
    
      private_constant :CodeCellStartTagCs
    end
    end
    asciidoctor-2.0.20/lib/asciidoctor/syntax_highlighter/highlightjs.rb000066400000000000000000000023461443135032600257120ustar00rootroot00000000000000# frozen_string_literal: true
    module Asciidoctor
    class SyntaxHighlighter::HighlightJsAdapter < SyntaxHighlighter::Base
      register_for 'highlightjs', 'highlight.js'
    
      def initialize *args
        super
        @name = @pre_class = 'highlightjs'
      end
    
      def format node, lang, opts
        super node, lang, (opts.merge transform: proc {|_, code| code['class'] = %(language-#{lang || 'none'} hljs) })
      end
    
      def docinfo? location
        true
      end
    
      def docinfo location, doc, opts
        base_url = doc.attr 'highlightjsdir', %(#{opts[:cdn_base_url]}/highlight.js/#{HIGHLIGHT_JS_VERSION})
        if location == :head
          %()
        else # :footer
          %(
    #{(doc.attr? 'highlightjs-languages') ? ((doc.attr 'highlightjs-languages').split ',').map {|lang| %[\n] }.join : ''})
        end
      end
    end
    end
    asciidoctor-2.0.20/lib/asciidoctor/syntax_highlighter/html_pipeline.rb000066400000000000000000000004241443135032600262320ustar00rootroot00000000000000# frozen_string_literal: true
    module Asciidoctor
    class SyntaxHighlighter::HtmlPipelineAdapter < SyntaxHighlighter::Base
      register_for 'html-pipeline'
    
      def format node, lang, opts
        %(#{node.content}
    ) end end end asciidoctor-2.0.20/lib/asciidoctor/syntax_highlighter/prettify.rb000066400000000000000000000017101443135032600252460ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor class SyntaxHighlighter::PrettifyAdapter < SyntaxHighlighter::Base register_for 'prettify' def initialize *args super @pre_class = 'prettyprint' end def format node, lang, opts opts[:transform] = proc {|pre| pre['class'] += %( #{(start = node.attr 'start') ? %[linenums:#{start}] : 'linenums'}) } if node.attr? 'linenums' super end def docinfo? location true end def docinfo location, doc, opts base_url = doc.attr 'prettifydir', %(#{opts[:cdn_base_url]}/prettify/r298) if location == :head prettify_theme_url = ((prettify_theme = doc.attr 'prettify-theme', 'prettify').start_with? 'http://', 'https://') ? prettify_theme : %(#{base_url}/#{prettify_theme}.min.css) %() else # :footer %() end end end end asciidoctor-2.0.20/lib/asciidoctor/syntax_highlighter/pygments.rb000066400000000000000000000135721443135032600252570ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor class SyntaxHighlighter::PygmentsAdapter < SyntaxHighlighter::Base register_for 'pygments' def initialize *args super @requires_stylesheet = @style = nil end def highlight? library_available? end def highlight node, source, lang, opts lexer = (::Pygments::Lexer.find_by_alias lang) || (::Pygments::Lexer.find_by_mimetype 'text/plain') @requires_stylesheet = true unless (noclasses = opts[:css_mode] != :class) highlight_opts = { classprefix: TOKEN_CLASS_PREFIX, cssclass: WRAPPER_CLASS, nobackground: true, noclasses: noclasses, startinline: lexer.name == 'PHP' && !(node.option? 'mixed'), stripnl: false, style: (@style ||= (style = opts[:style]) && (style_available? style) || DEFAULT_STYLE), } if (highlight_lines = opts[:highlight_lines]) highlight_opts[:hl_lines] = highlight_lines.join ' ' end if (linenos = opts[:number_lines]) && (highlight_opts[:linenostart] = opts[:start_line_number]) && (highlight_opts[:linenos] = linenos) == :table if (highlighted = lexer.highlight source, options: highlight_opts) highlighted = highlighted.sub StyledLinenoColumnStartTagsRx, LinenoColumnStartTagsCs if noclasses highlighted = highlighted.sub WrapperTagRx, PreTagCs opts[:callouts] ? [highlighted, (idx = highlighted.index CodeCellStartTagCs) ? idx + CodeCellStartTagCs.length : nil] : highlighted else node.sub_source source, false # handles nil response from ::Pygments::Lexer#highlight end elsif (highlighted = lexer.highlight source, options: highlight_opts) if linenos if noclasses highlighted = highlighted.gsub StyledLinenoSpanTagRx, LinenoSpanTagCs elsif highlighted.include? LegacyLinenoSpanStartTagCs highlighted = highlighted.gsub LegacyLinenoSpanTagRx, LinenoSpanTagCs end end highlighted.sub WrapperTagRx, '\1' else node.sub_source source, false # handles nil response from ::Pygments::Lexer#highlight end end def format node, lang, opts if opts[:css_mode] != :class && (@style = (style = opts[:style]) && (style_available? style) || DEFAULT_STYLE) && (pre_style_attr_val = base_style @style) opts[:transform] = proc {|pre| pre['style'] = pre_style_attr_val } end super end def docinfo? location @requires_stylesheet && location == :head end def docinfo location, doc, opts if opts[:linkcss] %() else %() end end def write_stylesheet? doc @requires_stylesheet end def write_stylesheet doc, to_dir ::File.write (::File.join to_dir, (stylesheet_basename @style)), (read_stylesheet @style), mode: FILE_WRITE_MODE end module Loader private def library_available? (@@library_status ||= load_library) == :loaded ? true : nil end def load_library (defined? ::Pygments::Lexer) ? :loaded : (Helpers.require_library 'pygments', 'pygments.rb', :warn).nil? ? :unavailable : :loaded end end module Styles include Loader def read_stylesheet style library_available? ? @@stylesheet_cache[style || DEFAULT_STYLE] || '/* Failed to load Pygments CSS. */' : '/* Pygments CSS disabled because Pygments is not available. */' end def stylesheet_basename style %(pygments-#{style || DEFAULT_STYLE}.css) end private def base_style style library_available? ? @@base_style_cache[style || DEFAULT_STYLE] : nil end def style_available? style (((@@available_styles ||= ::Pygments.styles.to_set).include? style) rescue nil) && style end @@base_style_cache = ::Hash.new do |cache, key| if BaseStyleRx =~ @@stylesheet_cache[key] @@base_style_cache = cache.merge key => (style = $1.strip) style end end @@stylesheet_cache = ::Hash.new do |cache, key| if (stylesheet = ::Pygments.css BASE_SELECTOR, classprefix: TOKEN_CLASS_PREFIX, style: key) stylesheet = stylesheet.slice (stylesheet.index BASE_SELECTOR), stylesheet.length unless stylesheet.start_with? BASE_SELECTOR @@stylesheet_cache = cache.merge key => stylesheet stylesheet end end DEFAULT_STYLE = 'default' BASE_SELECTOR = 'pre.pygments' TOKEN_CLASS_PREFIX = 'tok-' BaseStyleRx = /^#{BASE_SELECTOR.gsub '.', '\\.'} +\{([^}]+?)\}/ private_constant :BASE_SELECTOR, :TOKEN_CLASS_PREFIX, :BaseStyleRx end extend Styles # exports static methods include Styles # adds methods to instance include Loader # adds methods to instance CodeCellStartTagCs = '' LegacyLinenoSpanStartTagCs = '' LegacyLinenoSpanTagRx = %r(#{LegacyLinenoSpanStartTagCs}( *\d+) ?) LinenoColumnStartTagsCs = '
    '
      LinenoSpanTagCs = '\1'
      PreTagCs = '
    \1
    ' StyledLinenoColumnStartTagsRx = /
    /
      StyledLinenoSpanTagRx = %r((?<=^|)( *\d+) ?)
      WRAPPER_CLASS = 'lineno' # doesn't appear in output; Pygments appends "table" to this value to make nested table class
      # NOTE 
     has style attribute when pygments-css=style
      # NOTE 
    has trailing newline when pygments-linenums-mode=table # NOTE initial preserves leading blank lines WrapperTagRx = %r(
    ]*?>(.*)
    \n*)m private_constant :CodeCellStartTagCs, :LegacyLinenoSpanStartTagCs, :LegacyLinenoSpanTagRx, :LinenoColumnStartTagsCs, :LinenoSpanTagCs, :PreTagCs, :StyledLinenoColumnStartTagsRx, :StyledLinenoSpanTagRx, :WrapperTagRx, :WRAPPER_CLASS end end asciidoctor-2.0.20/lib/asciidoctor/syntax_highlighter/rouge.rb000066400000000000000000000110731443135032600245240ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor class SyntaxHighlighter::RougeAdapter < SyntaxHighlighter::Base register_for 'rouge' def initialize *args super @requires_stylesheet = @style = nil end def highlight? library_available? end def highlight node, source, lang, opts @style ||= (style = opts[:style]) && (style_available? style) || DEFAULT_STYLE @requires_stylesheet = true if opts[:css_mode] == :class lexer = create_lexer node, source, lang, opts formatter = create_formatter node, source, lang, opts highlighted = formatter.format lexer.lex source if opts[:number_lines] && opts[:callouts] [highlighted, (idx = highlighted.index CodeCellStartTagCs) ? idx + CodeCellStartTagCs.length : nil] else highlighted end end def format node, lang, opts if (query_idx = lang && (lang.index '?')) lang = lang.slice 0, query_idx end if opts[:css_mode] != :class && (@style = (style = opts[:style]) && (style_available? style) || DEFAULT_STYLE) && (pre_style_attr_val = base_style @style) opts[:transform] = proc {|pre| pre['style'] = pre_style_attr_val } end super end def docinfo? location @requires_stylesheet && location == :head end def docinfo location, doc, opts if opts[:linkcss] %() else %() end end def write_stylesheet? doc @requires_stylesheet end def write_stylesheet doc, to_dir ::File.write (::File.join to_dir, (stylesheet_basename @style)), (read_stylesheet @style), mode: FILE_WRITE_MODE end def create_lexer node, source, lang, opts if lang.include? '?' # NOTE cgi-style options only properly supported in Rouge >= 2.1 if (lexer = ::Rouge::Lexer.find_fancy lang) unless lexer.tag != 'php' || (node.option? 'mixed') || ((lexer_opts = lexer.options).key? 'start_inline') lexer = lexer.class.new lexer_opts.merge 'start_inline' => true end end elsif (lexer = ::Rouge::Lexer.find lang) lexer = lexer.tag == 'php' && !(node.option? 'mixed') ? (lexer.new start_inline: true) : lexer.new end if lang lexer || ::Rouge::Lexers::PlainText.new end def create_formatter node, source, lang, opts formatter = opts[:css_mode] == :class ? (::Rouge::Formatters::HTML.new inline_theme: @style) : (::Rouge::Formatters::HTMLInline.new (::Rouge::Theme.find @style).new) if (highlight_lines = opts[:highlight_lines]) formatter = RougeExt::Formatters::HTMLLineHighlighter.new formatter, lines: highlight_lines end opts[:number_lines] ? (RougeExt::Formatters::HTMLTable.new formatter, start_line: opts[:start_line_number]) : formatter end module Loader private def library_available? (@@library_status ||= load_library) == :loaded ? true : nil end def load_library (defined? RougeExt) ? :loaded : (Helpers.require_library %(#{::File.dirname __dir__}/rouge_ext), 'rouge', :warn).nil? ? :unavailable : :loaded end end module Styles include Loader def read_stylesheet style library_available? ? @@stylesheet_cache[style || DEFAULT_STYLE] : '/* Rouge CSS disabled because Rouge is not available. */' end def stylesheet_basename style %(rouge-#{style || DEFAULT_STYLE}.css) end private def base_style style library_available? ? @@base_style_cache[style || DEFAULT_STYLE] : nil end def style_available? style (::Rouge::Theme.find style) && style end @@base_style_cache = ::Hash.new do |cache, key| base_style = (theme = ::Rouge::Theme.find key).base_style (val = base_style[:fg]) && ((style ||= []) << %(color: #{theme.palette val})) (val = base_style[:bg]) && ((style ||= []) << %(background-color: #{theme.palette val})) @@base_style_cache = cache.merge key => (resolved_base_style = style && (style.join ';')) resolved_base_style end @@stylesheet_cache = ::Hash.new do |cache, key| @@stylesheet_cache = cache.merge key => (stylesheet = ((::Rouge::Theme.find key).render scope: BASE_SELECTOR)) stylesheet end DEFAULT_STYLE = 'github' BASE_SELECTOR = 'pre.rouge' private_constant :BASE_SELECTOR end extend Styles # exports static methods include Styles # adds methods to instance include Loader # adds methods to instance CodeCellStartTagCs = '' private_constant :CodeCellStartTagCs end end asciidoctor-2.0.20/lib/asciidoctor/table.rb000066400000000000000000000602061443135032600205700ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # Public: Methods and constants for managing AsciiDoc table content in a document. # It supports all three of AsciiDoc's table formats: psv, dsv and csv. class Table < AbstractBlock # precision of column widths DEFAULT_PRECISION = 4 # Public: A data object that encapsulates the collection of rows (head, foot, body) for a table class Rows attr_accessor :head, :foot, :body def initialize head = [], foot = [], body = [] @head = head @foot = foot @body = body end alias [] send # Public: Retrieve the rows grouped by section as a nested Array. # # Creates a 2-dimensional array of two element entries. The first element # is the section name as a symbol. The second element is the Array of rows # in that section. The entries are in document order (head, foot, body). # # Returns a 2-dimentional Array of rows grouped by section. def by_section [[:head, @head], [:body, @body], [:foot, @foot]] end # Public: Retrieve the rows as a Hash. # # The keys are the names of the section groups and the values are the Array of rows in that section. # The keys are in document order (head, foot, body). # # Returns a Hash of rows grouped by section. def to_h { head: @head, body: @body, foot: @foot } end end # Public: Get/Set the columns for this table attr_accessor :columns # Public: Get/Set the Rows struct for this table (encapsulates head, foot # and body rows) attr_accessor :rows # Public: Boolean specifies whether this table has a header row attr_accessor :has_header_option # Public: Get the caption for this table attr_reader :caption def initialize parent, attributes super parent, :table @rows = Rows.new @columns = [] @has_header_option = false # smells like we need a utility method here # to resolve an integer width from potential bogus input if (pcwidth = attributes['width']) if (pcwidth_intval = pcwidth.to_i) > 100 || pcwidth_intval < 1 pcwidth_intval = 100 unless pcwidth_intval == 0 && (pcwidth == '0' || pcwidth == '0%') end else pcwidth_intval = 100 end @attributes['tablepcwidth'] = pcwidth_intval if @document.attributes['pagewidth'] @attributes['tableabswidth'] = (abswidth_val = (((pcwidth_intval / 100.0) * @document.attributes['pagewidth'].to_f).truncate DEFAULT_PRECISION)) == abswidth_val.to_i ? abswidth_val.to_i : abswidth_val end @attributes['orientation'] = 'landscape' if attributes['rotate-option'] end # Internal: Returns the current state of the header option (true or :implicit) if # the row being processed is (or is assumed to be) the header row, otherwise nil def header_row? (val = @has_header_option) && @rows.body.empty? ? val : nil end # Internal: Creates the Column objects from the column spec # # returns nothing def create_columns colspecs cols = [] autowidth_cols = nil width_base = 0 colspecs.each do |colspec| colwidth = colspec['width'] cols << (Column.new self, cols.size, colspec) if colwidth < 0 (autowidth_cols ||= []) << cols[-1] else width_base += colwidth end end if (num_cols = (@columns = cols).size) > 0 @attributes['colcount'] = num_cols width_base = nil unless width_base > 0 || autowidth_cols assign_column_widths width_base, autowidth_cols end nil end # Internal: Assign column widths to columns # # This method rounds the percentage width values to 4 decimal places and # donates the balance to the final column. # # This method assumes there's at least one column in the columns array. # # width_base - the total of the relative column values used for calculating percentage widths (default: nil) # # returns nothing def assign_column_widths width_base = nil, autowidth_cols = nil precision = DEFAULT_PRECISION total_width = col_pcwidth = 0 if width_base if autowidth_cols if width_base > 100 autowidth = 0 logger.warn %(total column width must not exceed 100% when using autowidth columns; got #{width_base}%) else autowidth = ((100.0 - width_base) / autowidth_cols.size).truncate precision autowidth = autowidth.to_i if autowidth.to_i == autowidth width_base = 100 end autowidth_attrs = { 'width' => autowidth, 'autowidth-option' => '' } autowidth_cols.each {|col| col.update_attributes autowidth_attrs } end @columns.each {|col| total_width += (col_pcwidth = col.assign_width nil, width_base, precision) } else col_pcwidth = (100.0 / @columns.size).truncate precision col_pcwidth = col_pcwidth.to_i if col_pcwidth.to_i == col_pcwidth @columns.each {|col| total_width += col.assign_width col_pcwidth, nil, precision } end # donate balance, if any, to final column (using half up rounding) @columns[-1].assign_width(((100 - total_width + col_pcwidth).round precision), nil, precision) unless total_width == 100 nil end # Internal: Partition the rows into header, footer and body as determined # by the options on the table # # returns nothing def partition_header_footer(attrs) # set rowcount before splitting up body rows num_body_rows = @attributes['rowcount'] = (body = @rows.body).size if num_body_rows > 0 if @has_header_option @rows.head = [body.shift.map {|cell| cell.reinitialize true }] num_body_rows -= 1 elsif @has_header_option.nil? @has_header_option = false body.unshift(body.shift.map {|cell| cell.reinitialize false }) end end @rows.foot = [body.pop] if num_body_rows > 0 && attrs['footer-option'] nil end end # Public: Methods to manage the columns of an AsciiDoc table. In particular, it # keeps track of the column specs class Table::Column < AbstractNode # Public: Get/Set the style Symbol for this column. attr_accessor :style def initialize table, index, attributes = {} super table, :table_column @style = attributes['style'] attributes['colnumber'] = index + 1 attributes['width'] ||= 1 attributes['halign'] ||= 'left' attributes['valign'] ||= 'top' update_attributes(attributes) end # Public: An alias to the parent block (which is always a Table) alias table parent # Internal: Calculate and assign the widths (percentage and absolute) for this column # # This method assigns the colpcwidth and colabswidth attributes. # # returns the resolved colpcwidth value def assign_width col_pcwidth, width_base, precision if width_base col_pcwidth = (@attributes['width'].to_f * 100.0 / width_base).truncate precision col_pcwidth = col_pcwidth.to_i if col_pcwidth.to_i == col_pcwidth end if parent.attributes['tableabswidth'] @attributes['colabswidth'] = (col_abswidth = ((col_pcwidth / 100.0) * parent.attributes['tableabswidth']).truncate precision) == col_abswidth.to_i ? col_abswidth.to_i : col_abswidth end @attributes['colpcwidth'] = col_pcwidth end def block? false end def inline? false end end # Public: Methods for managing the a cell in an AsciiDoc table. class Table::Cell < AbstractBlock DOUBLE_LF = LF * 2 # Public: An Integer of the number of columns this cell will span (default: nil) attr_accessor :colspan # Public: An Integer of the number of rows this cell will span (default: nil) attr_accessor :rowspan # Public: An alias to the parent block (which is always a Column) alias column parent # Public: Returns the nested Document in an AsciiDoc table cell (only set when style is :asciidoc) attr_reader :inner_document def initialize column, cell_text, attributes = {}, opts = {} super column, :table_cell @cursor = @reinitialize_args = nil @source_location = opts[:cursor].dup if @document.sourcemap # NOTE: column is always set when parsing; may not be set when building table from the API if column if (in_header_row = column.table.header_row?) if in_header_row == :implicit && (cell_style = column.style || (attributes && attributes['style'])) @reinitialize_args = [column, cell_text, attributes && attributes.merge, opts] if cell_style == :asciidoc || cell_style == :literal cell_style = nil end else cell_style = column.style end # REVIEW feels hacky to inherit all attributes from column update_attributes column.attributes end # NOTE if attributes is defined, we know this is a psv cell; implies text needs to be stripped if attributes if attributes.empty? @colspan = @rowspan = nil else @colspan, @rowspan = (attributes.delete 'colspan'), (attributes.delete 'rowspan') # TODO delete style attribute from @attributes if set cell_style = attributes['style'] || cell_style unless in_header_row update_attributes attributes end case cell_style when :asciidoc asciidoc = true inner_document_cursor = opts[:cursor] if (cell_text = cell_text.rstrip).start_with? LF lines_advanced = 1 lines_advanced += 1 while (cell_text = cell_text.slice 1, cell_text.length).start_with? LF # NOTE this only works if we remain in the same file inner_document_cursor.advance lines_advanced else cell_text = cell_text.lstrip end when :literal literal = true cell_text = cell_text.rstrip # QUESTION should we use same logic as :asciidoc cell? strip leading space if text doesn't start with newline? cell_text = cell_text.slice 1, cell_text.length while cell_text.start_with? LF else normal_psv = true # NOTE AsciidoctorJ uses nil cell_text to create an empty cell cell_text = cell_text ? cell_text.strip : '' end else @colspan = @rowspan = nil if cell_style == :asciidoc asciidoc = true inner_document_cursor = opts[:cursor] end end # NOTE only true for non-header rows if asciidoc # FIXME hide doctitle from nested document; temporary workaround to fix # nested document seeing doctitle and assuming it has its own document title parent_doctitle = @document.attributes.delete('doctitle') # NOTE we need to process the first line of content as it may not have been processed # the included content cannot expect to match conditional terminators in the remaining # lines of table cell content, it must be self-contained logic # QUESTION should we reset cell_text to nil? # QUESTION is is faster to check for :: before splitting? inner_document_lines = cell_text.split LF, -1 if (unprocessed_line1 = inner_document_lines[0]).include? '::' preprocessed_lines = (PreprocessorReader.new @document, [unprocessed_line1]).readlines unless unprocessed_line1 == preprocessed_lines[0] && preprocessed_lines.size < 2 inner_document_lines.shift inner_document_lines.unshift(*preprocessed_lines) unless preprocessed_lines.empty? end end unless inner_document_lines.empty? @inner_document = Document.new inner_document_lines, standalone: false, parent: @document, cursor: inner_document_cursor @document.attributes['doctitle'] = parent_doctitle unless parent_doctitle.nil? @subs = nil elsif literal @content_model = :verbatim @subs = BASIC_SUBS else if normal_psv if in_header_row @cursor = opts[:cursor] # used in deferred catalog_inline_anchor call else catalog_inline_anchor cell_text, opts[:cursor] end end @content_model = :simple @subs = NORMAL_SUBS end @text = cell_text @style = cell_style end def reinitialize has_header if has_header @reinitialize_args = nil elsif @reinitialize_args return Table::Cell.new(*@reinitialize_args) else @style = @attributes['style'] end catalog_inline_anchor if @cursor self end def catalog_inline_anchor cell_text = @text, cursor = nil cursor, @cursor = @cursor, nil unless cursor if (cell_text.start_with? '[[') && LeadingInlineAnchorRx =~ cell_text Parser.catalog_inline_anchor $1, $2, self, cursor, @document end end # Public: Get the String text of this cell with substitutions applied. # # Used for cells in the head row as well as text-only (non-AsciiDoc) cells in # the foot row and body. # # This method shouldn't be used for cells that have the AsciiDoc style. # # Returns the converted String text for this Cell def text apply_subs @text, @subs end # Public: Set the String text for this cell. # # This method shouldn't be used for cells that have the AsciiDoc style. attr_writer :text # Public: Handles the body data (tbody, tfoot), applying styles and partitioning into paragraphs # # This method should not be used for cells in the head row or that have the literal style. # # Returns the converted String for this Cell def content if (cell_style = @style) == :asciidoc @inner_document.convert elsif @text.include? DOUBLE_LF (text.split BlankLineRx).map do |para| cell_style && cell_style != :header ? (Inline.new parent, :quoted, para, type: cell_style).convert : para end elsif (subbed_text = text).empty? [] elsif cell_style && cell_style != :header [(Inline.new parent, :quoted, subbed_text, type: cell_style).convert] else [subbed_text] end end def lines @text.split LF end def source @text end # Public: Get the source file where this block started def file @source_location && @source_location.file end # Public: Get the source line number where this block started def lineno @source_location && @source_location.lineno end def to_s %(#{super} - [text: #{@text}, colspan: #{@colspan || 1}, rowspan: #{@rowspan || 1}, attributes: #{@attributes}]) end end # Public: Methods for managing the parsing of an AsciiDoc table. Instances of this # class are primarily responsible for tracking the buffer of a cell as the parser # moves through the lines of the table using tail recursion. When a cell boundary # is located, the previous cell is closed, an instance of Table::Cell is # instantiated, the row is closed if the cell satisfies the column count and, # finally, a new buffer is allocated to track the next cell. class Table::ParserContext include Logging # Public: An Array of String keys that represent the table formats in AsciiDoc #-- # QUESTION should we recognize !sv as a valid format value? FORMATS = ['psv', 'csv', 'dsv', 'tsv'].to_set # Public: A Hash mapping the AsciiDoc table formats to default delimiters DELIMITERS = { 'psv' => ['|', /\|/], 'csv' => [',', /,/], 'dsv' => [':', /:/], 'tsv' => [?\t, /\t/], '!sv' => ['!', /!/], } # Public: The Table currently being parsed attr_accessor :table # Public: The AsciiDoc table format (psv, dsv, or csv) attr_accessor :format # Public: Get the expected column count for a row # # colcount is the number of columns to pull into a row # A value of -1 means we use the number of columns found # in the first line as the colcount attr_reader :colcount # Public: The String buffer of the currently open cell attr_accessor :buffer # Public: The cell delimiter for this table. attr_reader :delimiter # Public: The cell delimiter compiled Regexp for this table. attr_reader :delimiter_re def initialize reader, table, attributes = {} @start_cursor_data = (@reader = reader).mark @table = table if attributes.key? 'format' if FORMATS.include?(xsv = attributes['format']) if xsv == 'tsv' # NOTE tsv is just an alias for csv with a tab separator @format = 'csv' elsif (@format = xsv) == 'psv' && table.document.nested? xsv = '!sv' end else logger.error message_with_context %(illegal table format: #{xsv}), source_location: reader.cursor_at_prev_line @format, xsv = 'psv', (table.document.nested? ? '!sv' : 'psv') end else @format, xsv = 'psv', (table.document.nested? ? '!sv' : 'psv') end if attributes.key? 'separator' if (sep = attributes['separator']).nil_or_empty? @delimiter, @delimiter_rx = DELIMITERS[xsv] # QUESTION should we support any other escape codes or multiple tabs? elsif sep == '\t' @delimiter, @delimiter_rx = DELIMITERS['tsv'] else @delimiter, @delimiter_rx = sep, /#{::Regexp.escape sep}/ end else @delimiter, @delimiter_rx = DELIMITERS[xsv] end @colcount = table.columns.empty? ? -1 : table.columns.size @buffer = '' @cellspecs = [] @cell_open = false @active_rowspans = [0] @column_visits = 0 @current_row = [] @linenum = -1 end # Public: Checks whether the line provided starts with the cell delimiter # used by this table. # # returns true if the line starts with the delimiter, false otherwise def starts_with_delimiter?(line) line.start_with? @delimiter end # Public: Checks whether the line provided contains the cell delimiter # used by this table. # # returns Regexp MatchData if the line contains the delimiter, false otherwise def match_delimiter(line) @delimiter_rx.match(line) end # Public: Skip past the matched delimiter because it's inside quoted text. # # Returns nothing def skip_past_delimiter(pre) @buffer = %(#{@buffer}#{pre}#{@delimiter}) nil end # Public: Skip past the matched delimiter because it's escaped. # # Returns nothing def skip_past_escaped_delimiter(pre) @buffer = %(#{@buffer}#{pre.chop}#{@delimiter}) nil end # Public: Determines whether the buffer has unclosed quotes. Used for CSV data. # # returns true if the buffer has unclosed quotes, false if it doesn't or it # isn't quoted data def buffer_has_unclosed_quotes? append = nil, q = '"' if (record = append ? (@buffer + append).strip : @buffer.strip) == q true elsif record.start_with? q qq = q + q if ((trailing_quote = record.end_with? q) && (record.end_with? qq)) || (record.start_with? qq) ((record = record.gsub qq, '').start_with? q) && !(record.end_with? q) else !trailing_quote end else false end end # Public: Takes a cell spec from the stack. Cell specs precede the delimiter, so a # stack is used to carry over the spec from the previous cell to the current cell # when the cell is being closed. # # returns The cell spec Hash captured from parsing the previous cell def take_cellspec @cellspecs.shift end # Public: Puts a cell spec onto the stack. Cell specs precede the delimiter, so a # stack is used to carry over the spec to the next cell. # # returns nothing def push_cellspec(cellspec = {}) # this shouldn't be nil, but we check anyway @cellspecs << (cellspec || {}) nil end # Public: Marks that the cell should be kept open. Used when the end of the line is # reached and the cell may contain additional text. # # returns nothing def keep_cell_open @cell_open = true nil end # Public: Marks the cell as closed so that the parser knows to instantiate a new cell # instance and add it to the current row. # # returns nothing def mark_cell_closed @cell_open = false nil end # Public: Checks whether the current cell is still open # # returns true if the cell is marked as open, false otherwise def cell_open? @cell_open end # Public: Checks whether the current cell has been marked as closed # # returns true if the cell is marked as closed, false otherwise def cell_closed? !@cell_open end # Public: If the current cell is open, close it. In additional, push the # cell spec captured from the end of this cell onto the stack for use # by the next cell. # # returns nothing def close_open_cell(next_cellspec = {}) push_cellspec next_cellspec close_cell(true) if cell_open? advance nil end # Public: Close the current cell, instantiate a new Table::Cell, add it to # the current row and, if the number of expected columns for the current # row has been met, close the row and begin a new one. # # returns nothing def close_cell(eol = false) if @format == 'psv' cell_text = @buffer @buffer = '' if (cellspec = take_cellspec) repeat = cellspec.delete('repeatcol') || 1 else logger.error message_with_context 'table missing leading separator; recovering automatically', source_location: Reader::Cursor.new(*@start_cursor_data) cellspec = {} repeat = 1 end else cell_text = @buffer.strip @buffer = '' cellspec = nil repeat = 1 if @format == 'csv' && !cell_text.empty? && (cell_text.include? (q = '"')) # this may not be perfect logic, but it hits the 99% if (cell_text.start_with? q) && (cell_text.end_with? q) # unquote if (cell_text = cell_text.slice(1, cell_text.length - 2)) # trim whitespace and collapse escaped quotes cell_text = cell_text.strip.squeeze q else logger.error message_with_context 'unclosed quote in CSV data; setting cell to empty', source_location: @reader.cursor_at_prev_line cell_text = '' end else # collapse escaped quotes cell_text = cell_text.squeeze q end end end 1.upto(repeat) do |i| # TODO make column resolving an operation if @colcount == -1 @table.columns << (column = Table::Column.new(@table, @table.columns.size + i - 1)) if cellspec && (cellspec.key? 'colspan') && (extra_cols = cellspec['colspan'].to_i - 1) > 0 offset = @table.columns.size extra_cols.times do |j| @table.columns << Table::Column.new(@table, offset + j) end end else # QUESTION is this right for cells that span columns? unless (column = @table.columns[@current_row.size]) logger.error message_with_context 'dropping cell because it exceeds specified number of columns', source_location: @reader.cursor_before_mark return nil end end cell = Table::Cell.new(column, cell_text, cellspec, cursor: @reader.cursor_before_mark) @reader.mark unless !cell.rowspan || cell.rowspan == 1 activate_rowspan(cell.rowspan, (cell.colspan || 1)) end @column_visits += (cell.colspan || 1) @current_row << cell # don't close the row if we're on the first line and the column count has not been set explicitly # TODO perhaps the colcount/linenum logic should be in end_of_row? (or a should_end_row? method) close_row if end_of_row? && (@colcount != -1 || @linenum > 0 || (eol && i == repeat)) end @cell_open = false nil end private # Internal: Close the row by adding it to the Table and resetting the row # Array and counter variables. # # returns nothing def close_row @table.rows.body << @current_row # don't have to account for active rowspans here # since we know this is first row @colcount = @column_visits if @colcount == -1 @column_visits = 0 @current_row = [] @active_rowspans.shift @active_rowspans[0] ||= 0 nil end # Internal: Activate a rowspan. The rowspan Array is consulted when # determining the effective number of cells in the current row. # # returns nothing def activate_rowspan(rowspan, colspan) 1.upto(rowspan - 1) {|i| @active_rowspans[i] = (@active_rowspans[i] || 0) + colspan } nil end # Internal: Check whether we've met the number of effective columns for the current row. def end_of_row? @colcount == -1 || effective_column_visits == @colcount end # Internal: Calculate the effective column visits, which consists of the number of # cells plus any active rowspans. def effective_column_visits @column_visits + @active_rowspans[0] end # Internal: Advance to the next line (which may come after the parser begins processing # the next line if the last cell had wrapped content). def advance @linenum += 1 end end end asciidoctor-2.0.20/lib/asciidoctor/timings.rb000066400000000000000000000026011443135032600211460ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor class Timings def initialize @log = {} @timers = {} end def start key @timers[key] = now end def record key @log[key] = (now - (@timers.delete key)) end def time *keys time = keys.reduce(0) {|sum, key| sum + (@log[key] || 0) } time > 0 ? time : nil end def read time :read end def parse time :parse end def read_parse time :read, :parse end def convert time :convert end def read_parse_convert time :read, :parse, :convert end def write time :write end def total time :read, :parse, :convert, :write end def print_report to = $stdout, subject = nil to.puts %(Input file: #{subject}) if subject to.puts %( Time to read and parse source: #{sprintf '%05.5f', read_parse.to_f}) to.puts %( Time to convert document: #{sprintf '%05.5f', convert.to_f}) to.puts %( Total time (read, parse and convert): #{sprintf '%05.5f', read_parse_convert.to_f}) end private if (::Process.const_defined? :CLOCK_MONOTONIC, false) && (defined? ::Process.clock_gettime) == 'method' CLOCK_ID = ::Process::CLOCK_MONOTONIC def now ::Process.clock_gettime CLOCK_ID end else def now ::Time.now end end end end asciidoctor-2.0.20/lib/asciidoctor/version.rb000066400000000000000000000001121443135032600211540ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor VERSION = '2.0.20' end asciidoctor-2.0.20/lib/asciidoctor/writer.rb000066400000000000000000000015701443135032600210140ustar00rootroot00000000000000# frozen_string_literal: true module Asciidoctor # A module that can be used to mix the {#write} method into a {Converter} implementation to allow the converter to # control how the output is written to disk. module Writer # Public: Writes the output to the specified target file name or stream. # # output - The output String to write # target - The String file name or stream object to which the output should be written. # # Returns nothing def write output, target if target.respond_to? :write # ensure there's a trailing newline to be nice to terminals target.write output.chomp + LF else # QUESTION shouldn't we ensure a trailing newline here too? ::File.write target, output, mode: FILE_WRITE_MODE end nil end end module VoidWriter include Writer # Public: Does not write output def write output, target; end end end asciidoctor-2.0.20/man/000077500000000000000000000000001443135032600146525ustar00rootroot00000000000000asciidoctor-2.0.20/man/asciidoctor.1000066400000000000000000000233271443135032600172460ustar00rootroot00000000000000'\" t .\" Title: asciidoctor .\" Author: Dan Allen, Sarah White .\" Generator: Asciidoctor 2.0.19 .\" Date: 2018-03-20 .\" Manual: Asciidoctor Manual .\" Source: Asciidoctor 2.0.20 .\" Language: English .\" .TH "ASCIIDOCTOR" "1" "2018-03-20" "Asciidoctor 2.0.20" "Asciidoctor Manual" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 .nh .ad l .de URL \fI\\$2\fP <\\$1>\\$3 .. .als MTO URL .if \n[.g] \{\ . mso www.tmac . am URL . ad l . . . am MTO . ad l . . . LINKSTYLE blue R < > .\} .SH "NAME" asciidoctor \- converts AsciiDoc source files to HTML, DocBook, and other formats .SH "SYNOPSIS" .sp \fBasciidoctor\fP [\fIOPTION\fP]... \fIFILE\fP... .SH "DESCRIPTION" .sp The asciidoctor(1) command converts the AsciiDoc source file(s) \fIFILE\fP to HTML5, DocBook 5, man(ual) page, and other custom output formats. .sp If \fIFILE\fP is \fI\-\fP then the AsciiDoc source is read from standard input. .SH "OPTIONS" .SS "Security Settings" .sp \fB\-B, \-\-base\-dir\fP=\fIDIR\fP .RS 4 Base directory containing the document and resources. Defaults to the directory containing the source file or, if the source is read from a stream, the working directory. When combined with the safe mode setting, can be used to chroot the execution of the program. .RE .sp \fB\-S, \-\-safe\-mode\fP=\fISAFE_MODE\fP .RS 4 Set safe mode level: \fIunsafe\fP, \fIsafe\fP, \fIserver\fP, or \fIsecure\fP. Disables potentially dangerous macros in source files, such as \f(CRinclude::[]\fP. If not set, the safe mode level defaults to \fIunsafe\fP when Asciidoctor is invoked using this script. .RE .sp \fB\-\-safe\fP .RS 4 Set safe mode level to \fIsafe\fP. Enables include directives, but prevents access to ancestor paths of source file. Provided for compatibility with the asciidoc command. If not set, the safe mode level defaults to \fIunsafe\fP when Asciidoctor is invoked using this script. .RE .SS "Document Settings" .sp \fB\-a, \-\-attribute\fP=\fIATTRIBUTE\fP .RS 4 Define, override, or unset a document attribute. Command\-line attributes take precedence over attributes defined in the source file unless either the name or value ends in \fI@\fP. No substitutions are applied to the value. .sp \fIATTRIBUTE\fP is normally formatted as a key\-value pair, in the form \fINAME=VALUE\fP. Alternate forms are \fINAME\fP (where the \fIVALUE\fP defaults to an empty string), \fINAME!\fP (unsets the \fINAME\fP attribute), and \fINAME=VALUE@\fP (or \fINAME@=VALUE\fP) (where \fIVALUE\fP does not override the \fINAME\fP attribute if it\(cqs already defined in the source document). A value containing spaces must be enclosed in quotes, in the form \fINAME="VALUE WITH SPACES"\fP. .sp This option may be specified more than once. .RE .sp \fB\-b, \-\-backend\fP=\fIBACKEND\fP .RS 4 Backend output file format: \fIhtml5\fP, \fIdocbook5\fP, and \fImanpage\fP are supported out of the box. You can also use the backend alias names \fIhtml\fP (aliased to \fIhtml5\fP) or \fIdocbook\fP (aliased to \fIdocbook5\fP). Other values can be passed, but if Asciidoctor cannot resolve the backend to a converter, it will fail. Defaults to \fIhtml5\fP. .RE .sp \fB\-d, \-\-doctype\fP=\fIDOCTYPE\fP .RS 4 Document type: \fIarticle\fP, \fIbook\fP, \fImanpage\fP, or \fIinline\fP. Sets the root element when using the \fIdocbook\fP backend and the style class on the HTML body element when using the \fIhtml\fP backend. The \fIbook\fP document type allows multiple level\-0 section titles in a single document. The \fImanpage\fP document type enables parsing of metadata necessary to produce a man page. The \fIinline\fP document type allows the content of a single paragraph to be formatted and returned without wrapping it in a containing element. Defaults to \fIarticle\fP. .RE .SS "Document Conversion" .sp \fB\-D, \-\-destination\-dir\fP=\fIDIR\fP .RS 4 Destination output directory. Defaults to the directory containing the source file or, if the source is read from a stream, the working directory. If specified, the directory is resolved relative to the working directory. .RE .sp \fB\-E, \-\-template\-engine\fP=\fINAME\fP .RS 4 Template engine to use for the custom converter templates. The gem with the same name as the engine will be loaded automatically. This name is also used to build the full path to the custom converter templates. If a template engine is not specified, it will be auto\-detected based on the file extension of the custom converter templates found. .RE .sp \fB\-e, \-\-embedded\fP .RS 4 Output an embeddable document, which excludes the header, the footer, and everything outside the body of the document. This option is useful for producing documents that can be inserted into an external template. .RE .sp \fB\-I, \-\-load\-path\fP=\fIDIRECTORY\fP .RS 4 Add the specified directory to the load path, so that \fI\-r\fP can load extensions from outside the default Ruby load path. This option may be specified more than once. .RE .sp \fB\-n, \-\-section\-numbers\fP .RS 4 Auto\-number section titles. Synonym for \fB\-\-attribute sectnums\fP. .RE .sp \fB\-o, \-\-out\-file\fP=\fIOUT_FILE\fP .RS 4 Write output to file \fIOUT_FILE\fP. Defaults to the base name of the input file suffixed with \fIbackend\fP extension. The file is resolved relative to the working directory. If the input is read from standard input or a named pipe (fifo), then the output file defaults to stdout. If \fIOUT_FILE\fP is \fI\-\fP, then the output file is written to standard output. .RE .sp \fB\-R, \-\-source\-dir\fP=\fIDIR\fP .RS 4 Source directory. Currently only used if the destination directory is also specified. Used to preserve the directory structure of files converted within this directory in the destination directory. If specified, the directory is resolved relative to the working directory. .RE .sp \fB\-r, \-\-require\fP=\fILIBRARY\fP .RS 4 Require the specified library before executing the processor, using the standard Ruby require. This option may be specified more than once. .RE .sp \fB\-s, \-\-no\-header\-footer\fP .RS 4 Output an embeddable document, which excludes the header, the footer, and everything outside the body of the document. This option is useful for producing documents that can be inserted into an external template. .RE .sp \fB\-T, \-\-template\-dir\fP=\fIDIR\fP .RS 4 A directory containing custom converter templates that override one or more templates from the built\-in set. (requires \fItilt\fP gem) .sp If there is a subfolder that matches the engine name (if specified), that folder is appended to the template directory path. Similarly, if there is a subfolder in the resulting template directory that matches the name of the backend, that folder is appended to the template directory path. .sp This option may be specified more than once. Matching templates found in subsequent directories override ones previously discovered. .RE .SS "Processing Information" .sp \fB\-\-failure\-level\fP=\fILEVEL\fP .RS 4 Set the minimum logging level (default: FATAL) that yields a non\-zero exit code (i.e., failure). If this option is not set, the program exits with a zero exit code even if warnings or errors have been logged. .RE .sp \fB\-q, \-\-quiet\fP .RS 4 Silence application log messages and script warnings. .RE .sp \fB\-\-trace\fP .RS 4 Include backtrace information when reporting errors. .RE .sp \fB\-v, \-\-verbose\fP .RS 4 Sets log level to DEBUG so application messages logged at INFO or DEBUG level are printed to stderr. .RE .sp \fB\-w, \-\-warnings\fP .RS 4 Turn on script warnings (applies to executed code). .RE .sp \fB\-t, \-\-timings\fP .RS 4 Print timings report to stderr (time to read, parse, and convert). .RE .SS "Program Information" .sp \fB\-h, \-\-help\fP [\fITOPIC\fP] .RS 4 Print a help message. Show the command usage if \fITOPIC\fP is not specified or recognized. Dump the Asciidoctor man page (in troff/groff format) if \fITOPIC\fP is \fImanpage\fP. Print an AsciiDoc syntax crib sheet (in AsciiDoc) if \fITOPIC\fP is \fIsyntax\fP. .RE .sp \fB\-V, \-\-version\fP .RS 4 Print program version number. .sp \fB\-v\fP can also be used if no source files are specified. .RE .SH "ENVIRONMENT" .sp \fBAsciidoctor\fP honors the \fBSOURCE_DATE_EPOCH\fP environment variable. If this variable is assigned an integer value, that value is used as the epoch of all input documents and as the local date and time. See \c .URL "https://reproducible\-builds.org/specs/source\-date\-epoch/" "" "" for more information about this environment variable. .SH "EXIT STATUS" .sp \fB0\fP .RS 4 Success. .RE .sp \fB1\fP .RS 4 Failure (syntax or usage error; configuration error; document processing failure; unexpected error). .RE .SH "BUGS" .sp Refer to the \fBAsciidoctor\fP issue tracker at \c .URL "https://github.com/asciidoctor/asciidoctor/issues?q=is%3Aopen" "" "." .SH "AUTHORS" .sp \fBAsciidoctor\fP is led and maintained by Dan Allen and Sarah White and has received contributions from many individuals in the Asciidoctor community. The project was started in 2012 by Ryan Waldron based on a prototype written by Nick Hengeveld for the Git website. Jason Porter wrote the first implementation of the CLI interface provided by this command. .sp \fBAsciiDoc.py\fP was created by Stuart Rackham and has received contributions from many individuals in the AsciiDoc.py community. .SH "RESOURCES" .sp \fBProject website:\fP \c .URL "https://asciidoctor.org" "" "" .sp \fBProject documentation:\fP \c .URL "https://docs.asciidoctor.org" "" "" .sp \fBCommunity chat:\fP \c .URL "https://chat.asciidoctor.org" "" "" .sp \fBSource repository:\fP \c .URL "https://github.com/asciidoctor/asciidoctor" "" "" .sp \fBMailing list archive:\fP \c .URL "https://discuss.asciidoctor.org" "" "" .SH "COPYING" .sp Copyright (C) 2012\-present Dan Allen, Sarah White, Ryan Waldron, and the individual contributors to Asciidoctor. Use of this software is granted under the terms of the MIT License.asciidoctor-2.0.20/man/asciidoctor.adoc000066400000000000000000000211111443135032600200010ustar00rootroot00000000000000= asciidoctor(1) Dan Allen; Sarah White :doctype: manpage :release-version: 2.0.20 :man manual: Asciidoctor Manual :man source: Asciidoctor {release-version} ifdef::backend-manpage[:!author:] == Name asciidoctor - converts AsciiDoc source files to HTML, DocBook, and other formats == Synopsis *asciidoctor* [_OPTION_]... _FILE_... == Description The asciidoctor(1) command converts the AsciiDoc source file(s) _FILE_ to HTML5, DocBook 5, man(ual) page, and other custom output formats. If _FILE_ is _-_ then the AsciiDoc source is read from standard input. == Options // tag::options[] === Security Settings *-B, --base-dir*=_DIR_:: Base directory containing the document and resources. Defaults to the directory containing the source file or, if the source is read from a stream, the working directory. When combined with the safe mode setting, can be used to chroot the execution of the program. *-S, --safe-mode*=_SAFE_MODE_:: Set safe mode level: _unsafe_, _safe_, _server_, or _secure_. Disables potentially dangerous macros in source files, such as `include::[]`. If not set, the safe mode level defaults to _unsafe_ when Asciidoctor is invoked using this script. *--safe*:: Set safe mode level to _safe_. Enables include directives, but prevents access to ancestor paths of source file. Provided for compatibility with the asciidoc command. If not set, the safe mode level defaults to _unsafe_ when Asciidoctor is invoked using this script. === Document Settings *-a, --attribute*=_ATTRIBUTE_:: Define, override, or unset a document attribute. Command-line attributes take precedence over attributes defined in the source file unless either the name or value ends in _@_. No substitutions are applied to the value. + _ATTRIBUTE_ is normally formatted as a key-value pair, in the form _NAME=VALUE_. Alternate forms are _NAME_ (where the _VALUE_ defaults to an empty string), _NAME!_ (unsets the _NAME_ attribute), and _NAME=VALUE@_ (or _NAME@=VALUE_) (where _VALUE_ does not override the _NAME_ attribute if it's already defined in the source document). A value containing spaces must be enclosed in quotes, in the form _NAME="VALUE WITH SPACES"_. + This option may be specified more than once. *-b, --backend*=_BACKEND_:: Backend output file format: _html5_, _docbook5_, and _manpage_ are supported out of the box. You can also use the backend alias names _html_ (aliased to _html5_) or _docbook_ (aliased to _docbook5_). Other values can be passed, but if Asciidoctor cannot resolve the backend to a converter, it will fail. Defaults to _html5_. *-d, --doctype*=_DOCTYPE_:: Document type: _article_, _book_, _manpage_, or _inline_. Sets the root element when using the _docbook_ backend and the style class on the HTML body element when using the _html_ backend. The _book_ document type allows multiple level-0 section titles in a single document. The _manpage_ document type enables parsing of metadata necessary to produce a man page. The _inline_ document type allows the content of a single paragraph to be formatted and returned without wrapping it in a containing element. Defaults to _article_. === Document Conversion *-D, --destination-dir*=_DIR_:: Destination output directory. Defaults to the directory containing the source file or, if the source is read from a stream, the working directory. If specified, the directory is resolved relative to the working directory. *-E, --template-engine*=_NAME_:: Template engine to use for the custom converter templates. The gem with the same name as the engine will be loaded automatically. This name is also used to build the full path to the custom converter templates. If a template engine is not specified, it will be auto-detected based on the file extension of the custom converter templates found. *-e, --embedded*:: Output an embeddable document, which excludes the header, the footer, and everything outside the body of the document. This option is useful for producing documents that can be inserted into an external template. *-I, --load-path*=_DIRECTORY_:: Add the specified directory to the load path, so that _-r_ can load extensions from outside the default Ruby load path. This option may be specified more than once. *-n, --section-numbers*:: Auto-number section titles. Synonym for *--attribute sectnums*. *-o, --out-file*=_OUT_FILE_:: Write output to file _OUT_FILE_. Defaults to the base name of the input file suffixed with _backend_ extension. The file is resolved relative to the working directory. If the input is read from standard input or a named pipe (fifo), then the output file defaults to stdout. If _OUT_FILE_ is _-_, then the output file is written to standard output. *-R, --source-dir*=_DIR_:: Source directory. Currently only used if the destination directory is also specified. Used to preserve the directory structure of files converted within this directory in the destination directory. If specified, the directory is resolved relative to the working directory. *-r, --require*=_LIBRARY_:: Require the specified library before executing the processor, using the standard Ruby require. This option may be specified more than once. *-s, --no-header-footer*:: Output an embeddable document, which excludes the header, the footer, and everything outside the body of the document. This option is useful for producing documents that can be inserted into an external template. *-T, --template-dir*=_DIR_:: A directory containing custom converter templates that override one or more templates from the built-in set. (requires _tilt_ gem) + If there is a subfolder that matches the engine name (if specified), that folder is appended to the template directory path. Similarly, if there is a subfolder in the resulting template directory that matches the name of the backend, that folder is appended to the template directory path. + This option may be specified more than once. Matching templates found in subsequent directories override ones previously discovered. === Processing Information *--failure-level*=_LEVEL_:: Set the minimum logging level (default: FATAL) that yields a non-zero exit code (i.e., failure). If this option is not set, the program exits with a zero exit code even if warnings or errors have been logged. *-q, --quiet*:: Silence application log messages and script warnings. *--trace*:: Include backtrace information when reporting errors. *-v, --verbose*:: Sets log level to DEBUG so application messages logged at INFO or DEBUG level are printed to stderr. *-w, --warnings*:: Turn on script warnings (applies to executed code). *-t, --timings*:: Print timings report to stderr (time to read, parse, and convert). === Program Information *-h, --help* [_TOPIC_]:: Print a help message. Show the command usage if _TOPIC_ is not specified or recognized. Dump the Asciidoctor man page (in troff/groff format) if _TOPIC_ is _manpage_. Print an AsciiDoc syntax crib sheet (in AsciiDoc) if _TOPIC_ is _syntax_. *-V, --version*:: Print program version number. + *-v* can also be used if no source files are specified. // end::options[] == Environment *Asciidoctor* honors the *SOURCE_DATE_EPOCH* environment variable. If this variable is assigned an integer value, that value is used as the epoch of all input documents and as the local date and time. See https://reproducible-builds.org/specs/source-date-epoch/ for more information about this environment variable. == Exit Status *0*:: Success. *1*:: Failure (syntax or usage error; configuration error; document processing failure; unexpected error). == Bugs Refer to the *Asciidoctor* issue tracker at https://github.com/asciidoctor/asciidoctor/issues?q=is%3Aopen. == Authors *Asciidoctor* is led and maintained by Dan Allen and Sarah White and has received contributions from many individuals in the Asciidoctor community. The project was started in 2012 by Ryan Waldron based on a prototype written by Nick Hengeveld for the Git website. Jason Porter wrote the first implementation of the CLI interface provided by this command. *AsciiDoc.py* was created by Stuart Rackham and has received contributions from many individuals in the AsciiDoc.py community. == Resources *Project website:* https://asciidoctor.org *Project documentation:* https://docs.asciidoctor.org *Community chat:* https://chat.asciidoctor.org *Source repository:* https://github.com/asciidoctor/asciidoctor *Mailing list archive:* https://discuss.asciidoctor.org == Copying Copyright \(C) 2012-present Dan Allen, Sarah White, Ryan Waldron, and the individual contributors to Asciidoctor. Use of this software is granted under the terms of the MIT License. asciidoctor-2.0.20/release.sh000077500000000000000000000036131443135032600160610ustar00rootroot00000000000000#!/bin/bash # required packages (for ubuntu:kinetic): curl git jq ruby if [ -z "$RELEASE_RUBYGEMS_API_KEY" ]; then echo No API key specified for publishing to rubygems.org. Stopping release. exit 1 fi export RELEASE_BRANCH=${GITHUB_REF_NAME:-main} if [ ! -v RELEASE_USER ]; then export RELEASE_USER=$GITHUB_ACTOR fi RELEASE_GIT_NAME=$(curl -s https://api.github.com/users/$RELEASE_USER | jq -r .name) RELEASE_GIT_EMAIL=$RELEASE_USER@users.noreply.github.com GEMSPEC=$(ls -1 *.gemspec | head -1) RELEASE_GEM_NAME=$(ruby -e "print (Gem::Specification.load '$GEMSPEC').name") # RELEASE_VERSION must be an exact version number; if not set, defaults to next patch release if [ -z "$RELEASE_VERSION" ]; then export RELEASE_VERSION=$(ruby -e "print (Gem::Specification.load '$GEMSPEC').version.then { _1.prerelease? ? _1.release.to_s : (_1.segments.tap {|s| s[-1] += 1 }.join ?.) }") fi export RELEASE_GEM_VERSION=${RELEASE_VERSION/-/.} # configure git to push changes git config --local user.name "$RELEASE_GIT_NAME" git config --local user.email "$RELEASE_GIT_EMAIL" # configure gem command for publishing mkdir -p $HOME/.gem echo -e "---\n:rubygems_api_key: $RELEASE_RUBYGEMS_API_KEY" > $HOME/.gem/credentials chmod 600 $HOME/.gem/credentials # release! ( set -e ruby tasks/version.rb git commit -a -m "release $RELEASE_VERSION" git tag -m "version $RELEASE_VERSION" v$RELEASE_VERSION mkdir -p pkg gem build $GEMSPEC -o pkg/$RELEASE_GEM_NAME-$RELEASE_GEM_VERSION.gem git push origin $(git describe --tags --exact-match) gem push pkg/$RELEASE_GEM_NAME-$RELEASE_GEM_VERSION.gem ruby tasks/release-notes.rb gh release create v$RELEASE_VERSION -t v$RELEASE_VERSION -F pkg/release-notes.md -d ruby tasks/postversion.rb git commit -a -m 'prepare branch for development [no ci]' git push origin $RELEASE_BRANCH ) exit_code=$? # nuke gem credentials rm -rf $HOME/.gem git status -s -b exit $exit_code asciidoctor-2.0.20/run-tests.sh000077500000000000000000000006531443135032600164060ustar00rootroot00000000000000#!/bin/sh # A convenience script to run tests without delays caused by incrementally writing to the terminal buffer. # This script will execute against all supported Ruby versions if "all" is the first argument to the script. if [ "$1" = "all" ]; then rvm 2.3,2.6,jruby-9.2 "do" ./run-tests.sh else GEM_PATH=$(bundle exec ruby -e "puts ENV['GEM_HOME']") CONSOLE_OUTPUT=$(rake test:all 2>&1) echo "$CONSOLE_OUTPUT" fi asciidoctor-2.0.20/screenshot.png000066400000000000000000004535301443135032600167740ustar00rootroot00000000000000PNG  IHDR@X:*BbKGD pHYs  tIME Mh IDATxwXT:Ei!6v#Xb,MrSb{WTDT콠˖CΆP@U~vwNs8;MRRB!B!B!D+Y B!B!B'^]rC!B$''w!B!_Cz!B!B!#!B!B!B 0w֌2ϳƍ/r_/ 3rKrB!cKQqtt>V̼st :мh40kL*Vs=wu`0Pj*c6h4YDYdee1s իW ʂ`u:K_fd4 ...ؔj_222rʕy9,Y3g0Wptt,rPc0=k0@N@E\rI !H@>d .uTy3{N ];wݷߒ'BlJRSSұ#Zm|'=p#׷o㓧NY/U^zTH!BcFY&籺ׯ3i$N:E`` OaL&S{7ؾ};&LˋMPZ9ɕ+Wҹ3RBEAbkkKRR_NN޽KTDŽB<@a21s6}od?bwiݤsqϚի9< {+KI,ԪQ-۶a2}cJP@#{luTj ǜ"[l6ӺeKR !Ⱶdbٴy3W\rO\ãpx9h4}<==$CBXQ NLLd͚5qE6n?q jc_ ,OSW\ILL^E5ؐȑ#4h`d2N8ݻٺmC/;2?߽R˓xTTFca0,U+Wr &NB^CQQK#&&g''*Vq:dޤI{q˖SF n3̫oI8tx/χ:jY;~3klf9O?+BF}~ʕ\xUhb^͛7\r֭[xyywYKIѪukU ;w"55MTf=4 ȑ#GL2iyRؖmpvvzj2}l~[WFJŌlشzt낍-W^ey _~:kG͚5HNN_c֭tj޴@/xyyzr8>``(B d7d'ٻ?ƌrlIIIl߹ۖ&i:|ʶ;BQ6$$$iRm|о1T bɲ>OVV{C,b~a06E?z*Λok0>[[[^7Hǝ<{c^8_o`dԈ\rK1r8vol5x1tҙoW_+Ƃ~^@0p5ڶnEhܸy+WKشv5nV9?r 7,[3o\˾}hҤ5ڐlkki ,̝߮;.2h x7ٰ~=1zoTÃ,(^\xCѶmb8r0W^婧EQغe%R 66[[[6jd<z獺MFéS9s,5jTڵk TB|7www?ϛ֟wҹX֫gtaztZEY^`w>sի0 6ndݞ]AJj*o6|}^eY4oy_M|8S><=,gkgr|Op*rB!DS*Ʋgn޽;Gٶm˗-I&l6 mSx_uCLݯGb6FS {m?`2X"_5FdŊL:uU HQySI2(v1%ɋY('5"..={h۶}M-[Zj4jOOOOVV ?#Gih ͧʰQz͠rQK;~6w:YTY~u$y#11={/z=c~&Ju _)VЭsg mې|.Xܡ=`W᧟9qdufdfNg ~3>hj7lh7F̒RMZTŶ|*w] 0lmm9x< HՒƁjժʑ#G8{չzc\rZjg-n_6 +nYyn۟ӧO[ n;O\1@DqdQ4u݅gYM޽{ ❉-Y|=)Wz^rBQ2 0o7 !Yn=hI?l`=wIJ'aP̜ah4Qƍɂ0VO_I `kWUyl@6 f6i?=1/¥ cص'&1Kjp6mي ;v?aܾO*=y=C|-fӟ7 @V5l?F(| \ >{ytulw( !Aի]///tr瞴iS4mʖ͛ٸaw^X( .]jB lllpvv___J(˷o$%%N oooU{WP [ 2ÇlRp2;oRSS9{,III䠷ݝ*AAxxzZUIQ+srrpׯ_'3#4׻~})YS/_Çs%llV3..gҬY3󾗵lՊMu諮{Νi59{QdggsInܸhN]R|#ܫ 7^'%%eZmswA\KȰC``n7:dz*(3~~~<uk̛7NǫSzb}UصV !88+N~ޫkɍ̴̕C`@@M!Dɕ*r-yt+^uFKCj. se^}xOo233uFˁqolRN:͛7h4b與}}iРO?4MqwwQvO?޽{IHH 5%\N#ʨ_&$$Īƍ 6 NQ֙3g5r$̝77駉dʕ,[CpBv˗III`0qqqrʄ3p 6mZ`u;Νf Begg߹'͍J>>TQ~ѡCR .sqFMPPs-t=݋w?@́HMM%22Y q.W[2tذ{j1\r52BiӰl6?0w\N:Err2& ;;;Xt)OEDXl63jHN:Ō3UA֭[,ZݻvORRwquu<``ԩԩ[תŋ1|8^^̟?{{BÇQ 4#Fjr ?#Wŋ$''&/(]<5kբ<۫WzC}ѢE|w?~[BX"UVLnNe̬,U3;YY\۵Z-{ܺy=z0x`b{;v_֭[-5HVˋZkӿzc vlj=a1 >jt?|=k׮ҥKkZnU6GϞa$"eT$P(kqqdggӯoYǟpqn߾]`~5\~KT\dkg^ЭsGʖmI}[Z ;IOϧS~] -98:tl??,WƂ_ѽWnܸQHXt{hӪ%&ljH_~||CUBgǮ]dee7%xi|B"5iܘ7SBXN!$e%(3={>9ܾC={KXX}7T7;w`oO5h~G\t .p1v͗_~/J= }BY]י?App0͚Q%('gg|2ϟԩSl߾]bih4>z=i322޳*V?Hwޝs1N8A5 -u N¬YqDDD9sxϟϒ%K2don6̞5iӦq*UD͚5Z*fn\ιsr .7:t(52i޽"{8f3vvv<mصs'KġCHHHeqN8AP*4hj-66rrr_gƌTXM  7oիCq06ͻ&FAc4~:7o݂|@` {DGGSRBRCjJ w&$$#Fbr&L3gR #88W77 Wg˖-lݺ-[2u7n˗-rt֍&M` lٺm[b6bŊ=Io2>}tك@LXkWstL2k׮QJڶmKpHʕ#11fY> O{n쉎nݺ wYV- OFw&UQ$5k6x0^y^؟voľ1,]=xL&F 5f3_F#'Obz'իVB!'hCiCҪukϛʕ+ <m IDATZ7n0|p>ׯӮ]+rgn/_7ߴl?y#899Q$k`\zlƁ9kQۓ7M~yCu[ʕٙ{5mkCZZW_RJ̞=:QbEK>ߘ2e /OO&?ZRSS1|8k׮yZFQ^3[0aΝ;ǨQ6|xh?9st{UjԨQZ-'O)S>Xb%7of͢Ed7P\"N3X!yС-55_~iS2o<ʗ/I$"#5HFFG\ ޴)vvv,Zid4Aԯ[ܸyv[ j,i۱ղϜ%,0{lBBB>cV+Q7nǎԩS9CՄZhAyool٪ ;vоCBK殝;Q-[Z@>Lu:/Y´ik|'*꼱N@`ٌ'n+WRqMٌ-NbرT^~2Xyq`+T`ȑ,\^{S^ Z7端Xv-=z`FVDwkպ5_NN޽/б#~~~L{N*_keff&<7oҽ{w R졯OOzvJT ch4̝9C_9<~Lz{rwwǯ QӹR5$:Z~~!vHZymX ͛gqA~w.@@`|{B ފRŊ>6/~ŋzB?Ny<ӽOmS:N`?xz=O7ʕ`H|N\/~~Ktl BtJ _%W럟_fz~];[;FJyO/llmZ|KCLO}-qcccC֭|BuWPDy$BQj#sNݺ:xӧO"V8^O߾8;;[(JmV'-)lFټi͛7gYa4tN❿qT=åK Q7w.{d"##1h4ҲeK&Oѣoh׮-[,Р޹s'3fˋi}FDDe;E5A_IRfMVZEll,ysmv߾}M&"5*wSח;wb2֑LMy ;EQ$%%1I&L<W؃}sD Qrw>o6gaț :4;uDǎYp!VI灇;F!Hƍݻ79K/fqer]vϻ%]Z-ϝc꧟bkg_j` (( &0rH~G>=:t4 9~~~*=z|2~VZUd8! ))IIJJR?4hr !B<wѨ(,X@qrtTz쩘fEQ>#N8`%Mt3JϞ=7WWe͚5(d*=RfMŧR%eÆ (`(:r\RZ5͛_MwaŻ|yi&JNNNtGiʏ?@'NT޽zض#GeeV݆_,տ 8+UyQ<=*)ǎ+t;vT)𹺭]v)vO?YOM(}}_eӦM%.GɤjRqwsS_`yu[999JNw77e* 'N(>*)a (E^vءTTV^5.tR^yg4 OOOj֬Yꊳ3YYddd&';ɄMyx:n]ӡ1<~XOOӧOgǎ0q$ubccx"4lذZҨNNN:u,[%G5 NNNdggYnB!^F@æ5`}Ҿ}{N8СCYf 2:qqݸqcǎFƍ-w>=nkkk񤥥Qv=棢(888P^=8|P4pah4( Ie!տRJ:uHKK#6jPk߾}QWNU-.OLL$66jתU7/~.L&)ؠ`0cu^ݱ'99S< dvvv-y?bbb={6:1cRVBZ@QNʗ/_X"ʕ#--ׯ&r..TTfg ?t!({*!nnntbˣ?vdB!;RIv*֮B 5\Qvm9Ž; j㮝3fΤK.3h@?0(ĵk׬S+Wh4HY׿=Zh^gΝ2S?v111Ш'՞N*T 4,-y\rP!w\QkFc]˩eQN<==KS{CF,AuMkhKf.=uT l&77^en F4P3666|겮jժQvm._LLL g;vMӦM0E'w&33]%ߏ3͋90$=q_RX? N{U*W̜9s,{k4K"V^ʕ+2z472鈋cW_j+%&;;N6o~Qq~Ԡ/ۗ(B<B!Ᵽ6z۷T ,|; Z;CfFݻYv-k.r1d2΀۷/GfŊ\KvZ7ooM ksss KB͇ӧNq lmmYUY^ :N $od0,-wdI(+WL&lllʆ gϞّMxxxX mW-ꇆٳg[.pwP\\UVjH2z_0vX&O֭[ڵ+ `l߱~7n0i$LZ-LիWҵ+Æ +Wwtdff2l0Nrrrz䟟յȺ-U,;99QbEjլOބO Ύ)2͙رk |mGPjU~[]@vv60hӪ#hܨ!_MR*#KO9l(_"#B<ŋq;9 "Ab7C"uڕhV\/|Չ!z= kؐ1cDzdbΝ˦M~H~ 쓓:Ǣ,+&o~`jԬiWwV@zZm+==ܜ*W&#=ò}3Ȭ3ػw/YYY888p9=J*U̸Wޞ&My&on put邛CiwA:vȺuزe%Ц鰵SNMرiPP3g`֭I󂡥CDEPB*U }c݉?5sUz?ӳ1#^ kN #9^7&NNb T)dX'M֭ػ-[B !8u .i&<<<Թ垦8F;2k,;Mܥ}!ZKCҽG&MEx7 "L&/`ɒ%|5{6V gV̙ӧ?r'Wmܸq#{vٙ^zժWɉ'Ob0)v(p9Ǐ༡h48r䈥NL:uzDw%--/O2'hh4Z& h{9guxgDt:]{i}7Օ8Ju=Bx+G\^Zeg֫MS4o4#_SzId?SP`fW.o/ ndffgWAvv6>ӋƑiь~_ ''u8y_mMgԇf2pKx;Si!,t ǎq 55nzޢew;!ElyFѠjT~_jƾjZ#ƟeBfЩ@Ŀ{]iNiˈcPqds"\vVmhьg{޲x#9ݘ% IDAT8{/ Ff4lN=9~ߨ(EjC~(=zŋhEf?hRw?$B<۷KPPOkgUfͨU Wb ʻˋl233#Zh6l !!P{$;w^ʗ/ϛoUhw5hذ! ,/ayc\t5jj~wxx8חcH͍-d|,h4xzzb2R,U\vv6\-M1r;z{K.ѻwoFm~܎j2puue۷}ǀ^:F±km i$H$)"##C("<2sC :؏[[ow7a"<2ZfԣhզtFsDxdxvD!<:lh۹0bDxd8p3/bÄd\y {=ǖ-"<2ZkCzaQlA<;~BkF=C p{l{/Q1 7;-?"<2Zl޺՞W-ys;GDxdtDTfáGDxdgmdV$srB8yRGFu7:\"<2Z<9ia6+<F?\=/hqatN'Zƴ>5{\GFO./]m:u/ZTDw$ΜFQ>rTD`Wq(''7Wi+&C`Q;gOEߕٿԴRi3|)#Ź9n^ FlF !8sϣǎh?a_,w,}McDxd=CzVvoݛzAi+vZjn )MS=> #./ϡ?:w&I$^"#Ũq !8'#Ŏ;/'&sѢS+D!H$ɭrO"!!A4 ~K[ UT6mr8W~$F0d2:qE<3nCۋuٲeR@ ?ΒO+K.ڡ~z"55T|rJ"ukQTTT*_ ,*XtEfY}Jb4uD׮]^7wnr-o~!&=(,,[llǎ#<=EDvv5wyѼY3#^xy{>[es;l=xm۴nk*v>쳢R@ܩ\Yl߶E(ڵm+›7qF"%%m+,,QR@ر ϯء/2x ˝ů;yZUD{^nټYx޽zf+?>>^4l@7o.\rYD"qǢj5[l!Ϥ'p {ZAA99<EEE(7țz-\ԾHlw4'N9vr}Is tރOLzy"mOKΝK_b2ެC/M(wM_8s)2)**Gի[/'~X?kPo>Xnmlv`2w`loBB~7ڡP4o֔S?_ h29K_jӦǮ{kԈO>QfIt\Gcqh,5k ;'N!F>ޖmիZx~zu̶X7#]7;!!ijcF%w&ڽw779v:+ *nk 鮺 Zƍe/?E4 kg3gwr׀D"HUlnK֭]˹sh޼9֠7k{Ojܸ1dgg5JbݺuL>K.wwc;]PPSY` >p)ns4x`FEvv6_3gb0+^ZFQ.@/>JQn+:C2o< ?Ξݻ oe1YL2?3z(.]XllIJJbܸq,[ʁ|4}:^^^eRRYcƄ0u4<<<3z4/_.[EEE޹\veVT*N:Ehh(n^@С/\ ))͚QR%9w/`_+k<[o^Z^֗}ȑ#Yr%~Ν:cӇc_h?n֍p1fΜFaĉ4n!zKEe=f3LcjԨ5k䓜 OݔW֫7~=9p?0ڵJժPTTDrRvbQn]f|YaSDՋ3gktRv͠Ah۶-Upuuh4KRR'_~g^[4E /v'$$ܑw+ʊϏ,5n|Ccl?`h"z=noDjmp 7>ݛHm0q"8;9a0h0] FsRPJCJ w77srvliquq!W+7F)rT<ŭ( =ucͺupI.>DaA!m}q7惻&[x7y9/Q*E{Eբmn ;wCL~m}=TwLt\m0VHLRn!7WWז}Fqsu|Scqq vkpvv.s瑭*խ:3|`!,Z o6;5%Da{wܸq#7oߟ:|C,=z#GXb^{mX˪UXbjOOO@Q(**"''D@@{㉉)ן-Jߧu̙3}uV<<Vk}7dddh\H?F˛NcڵOdVqvv"""hҴ):t ::ooo72&f͛bJsV\ԩSqrr'ۢsFEEE1b&LH:u*l6֫}δiprrgggL&dgg#ˋO=u6Yeɶmޑw;:uPjUNc}-YC&a"Hbi%͜rk]ZmϜ3oͽfu ]Z;v⥜<}LxnXWM%DFpV|}}Yw$''@p.YBܙ3ri.^@zz:T*<<=^ 4U4nV{x[fٲK:޽{9q8W\`0B@J ifjՊ*UZܮT+WDQr-oڵY~=2e:mh\ThחJ+.&[#ۢe`` SMQ]f;ߟHOOQ1"}GBpѮKW-Μ=[[ ӱh۹Kŋ"<2Z=F\xQL&|Ï?'N{"-MDmowFfhV{E1sΗƋN=zH8^GF=S OP,1{АGExd8t1zDxdػ0B׋om;[jx+:/[}GFm?w $t:xik"<2Zl2ұ[ٮ*"<2Z{Rrxo4\׽e:rd1"<2Z7uɱK`xB~F-^h;)"bDJN=z:O~kڃybX2QPP eI$"] ѽр7sMy7f%xmύfk<hZ;|GHG IDAT3[yVkZ>zЭsg>5.#9z1^w>t܉kѽO?{fioϳcG3gR͍ƍSZ5,Çß[`O?ω4mƬ>oZv-0n̝M6³؏?b&СԯWKжsW<<[66O}ϽpO{=^{[O<6/ެ۶-3 esܳWcV;FJà /޼ۮo~cGլuó_>=ey qj7UwhբſH"H$+VBf9 U}O/kwޮxP!0Lea+x?Z\O}='ƥRl^p[VG۽rTd^[iÍaݫe͑=w#fEeix"k֬!8$zSl6Cf8|Οwʹvs\>{ND"Hn7 Xˋڡݓeݓ 55F 񸻻Y=˗O&Ozb.n7WWԮ;^VKj6Q\xjL}k KiOBZZz) 0z`w9p0С]FVKYH`RRRqrrb>!6oaʕ((<=b8[lJ;mei4 +w>,jdO?qjժɛGٮ~Lppr_Zlɥ˗U&`Q:Xro6U_뾸$y8=Fɶ?5|8 ۅ8|(;]A`׬wtӗ61ԯWgggCM=jH8o.pl,NZ-]:vt s' g 9hXgO`μyDur>b&iƘQOϜy_93$8gg'CJmPh߶ s^_Āѿore2bؓT^EK;F摇f =v gg'ụ .nTP-ϝ\t ߰vz~m-:ZF̘1>LC|!T& D"Hyny_Zʃ_ųQRܭ[ivs깑߼o5ݮq+o*:ηCXzbINNN:ԬUzn۝7z?݉qH$m|ClD[o;oӻg~7ßͯ+ߴED"H$K5гH$-H xlPիǖ[quupj a0lڴo,`РA7]"H$Hӥs'3ßG/Uj/ŅH!I$D"H$}MF!;;$jժuC ɄFСCTT:$D" DB@6+<}\ZK G"H$D"HSlʉzQ^=~^<{x,10.JѐˇӦqeƏO͚51F$D"WQO4?Rs<==U& ףn߿1!+W&3+>={ʁH$!H$wEQ0LRذaǏݝj{?Jb=w^|֭[G׮]?˥D"HoD"H$D"d D"w6m͘AAA۷O>4lԈ*UFH'''+Wc֭lۺ!yʕ+ߐ -D"HoH$D"H$w#D~{H$Ǧغu+s;HOKwww4b`0O~~>kߞ{.]RC"H$ͷTH$D"H$"=$c( Fgr?NRRy:& WWW qoЀUR0B$D"׿=dtD"H$D"H$qlJ ٌZ~ԯ_BךfL&jZ R"H$҈=2DJXbK6DDŰq&)D"H$D"H$7JeY )53͘fT*T~H$wO̜%,ԡ=]:wKNw Q1|0#9D"H$D"(bWj\R D"HGX쒒lPre)D"H$D"H$D"HTHD"H$D"H$D"H$D7]]Byg $%ُke?[CyHMMh4L j.|qgҤIJJd6JXF̝CyYYtً3>eێm-F///OV-Zj+Of=( nnxxzve-[YԫS7^{&55(ߥGnSOINJ!YSfẤdz/gۺ۰шӧMeD=oDT58}v$"D"H$IV=$D"H$ֱSn7_ĕ+^;Ç̳hzuA eټuh4ȧ1LՓ:kϝ;pg_g*{B1yھ8f|1[Z&V%11M7/,r(*߲L+U &>~ 8(? O|ӛ5kuvڽ ʌ*ia6yvSa yaNmx!<޻4/**kP>9CGкeK{۷s񱧿4Ż68\BK\f?.%K1Sxb<8d(IFF~~~~/c?__f9NfM귍٠mD"H$D"!. &tj>,P) "᝟y[:URD"Hn%ȸѣ8gXveÃ:(?,/jտ@떭_%N8g?.^tWq݆ ptUqgΖfS#~pV D"H$^FaW؎fCݨn4<E)SqxU w.eUfqUs0fIXd1|D"H$ͽ ɄYXqr"N5 ;KǮMFOÃy_Áسw-jά{jeZllC.55BB]\\H$D"HuJW!/\Edkbb"&*bˤd梡sD5<ݴ}cMa0w"F5}tF (< EAmM2 qGp͉|VMP1aUN"H$mS^rdrߑkYjԖ _P@RRc{s$6flvAʙ.H$D"d2fDPTT;w&00L%Hqˉr;e6QTٳ;vؕ.M6{:,X@`` {@׳j*.^h~ 8T]c[9/&;;S .z%V),p?_G UoϣPII[YB8^WC~, J!>1cW]xG=F3Z :\4xR(ٟeˮPǥˎs\JͳjW (S R<˒4K ccem(y J!=$^%6|һܱt~lED"H${ANNNhZY-V":Cj[9( nn rOɬU琾|OT\ȡD"H$DrQ~QF >Ltt4o߾(\ds%7;|:doMXBT0]mݦH1ͼ&Ew^Ҍ7%T kɳ,\=V0)ՇbʏunobQyat Y*5{}7r%)9-Yɱ5 B}H$Dror[ _~5<-D]Hs&d"335kq4=# a6_w775v=-'++ Hzz>/AUy͌a q]{g%xxxа~[Օ?l!%%E5D"H$ZMHHNNNEpp0Çq|7dee9(:E4J)IOO`0\:"&& `8{,*te۬;tF!33L 1ͤӱcG Xؼy3Bt:v^ד^wPxdffEQQQn7Y`T)L¾m#RrO֑o@R(,67BJ/ګiyLyZ寤s%=".OzN!St^]Puz.$da)'KGQHjV(2YUd0q>){|&k>hM7-&_⮼Xl.^J`p?BX榮dY:|˘gb4Ҳ PWo4Y@^%D"H$X:];%@DTО]}Mٽg/=RR 4[.lc3tk53^+U'3>9׮zK;AfMؾ=vpV[>={U|`=m)tIED"H$lvzn2PTt҅{rvJ֭[ٿ?dffRV-hPCi&<<c>W!MخE}_]CBy&f9MqԨɘe~3>sY| 2uEĄU65OI̖CϷ/22,\iJ 2sxhdҥuذ)]5kFbb"IIHzxzꅿ?kHnܹ3ǎc1!CZfȐ!TRB#TիWgҤIEQ8}4 ,]v+k׎ ryj5{]v(K~?mw)tt;]8{ ZۇThƷEW``@ lW س#WpqRShTӏ7εhjV)1?-Uth[i[[,_˝#H L~"5$:,4o/wovi=H,`L5|v{quְ} &RX4')b[w9~ j+_!)#ZN}F4vgǰjJ3FsXn7~'91}\hzם :ƒ7njh=};ZMe L{inHa]X5u4kw6W'/d1qnFҷp957g}(z'"r{x 7{I3dƒ9?][Tbü4);g>bϦM"H$ɿmӳ'}zf_/r5mBMmL mcb6\:oO~4?A<vAر#;vdƍݻ5j`񉉉А@?[Z}ol;HߘJ߰"Cr IDAT ;IKͪv[l5%馥_K?֟SxafjV.uniWg Z &31aUfIZ5D놕x_C9tGH$Dr"H$D"H$+R$''ӨQ#䐝e6:: qHHۗǏS/ql޼ .P~}Ο?On7LSfMO$%%j9,zp%=Bjz`QV~g5Uhոh+H$=TH$D"H$ۂu~~>[] !0Lvk Ab2VJ)wRڄP*и- }P:e\łK4ZFPh0; >sǒ0۵>W1F:v z2B4jhISrZS&>7P(\?D"H${ H$D"H$HmʊT۷/`YwuuҰ-jqr,61LvDy[Ɔ 8vyyy4hj֬ə3g0h %+"VKPPƍ+d29d]6 ][~[)j<=I yF }uҨpwՠ7X%@ z<\vS,yݲu#,ԗovжU`&eY>UMc8xi0.-a2 {J /coIJQP[4el>x1Z\BQѪa%fOh`Q/s$.>w6ѸCAo0UςHdpbDm8]%ָV1 |*E!'߀lFQY!Ez owb8M 4]ӪerXD^Y6y*ܜȵA8puحFϔ}D_2ZD"HiTRD"H$Q,6l7nϏ-ZڵksEr#!!@U\p^~JJJl_~~~k4i9u 6,e/Yo6\lt:{TrssrQuBsh_J!,ԏ#qiMf],{LER ־4 iKogKII?^nV *q1U9U9z7j᪥}q2ˤ]3 A{;XW (aQ\R m;Q 6⮝PߍyBOBCYjV*8/WsJ%x1<^eJ\I/xB&]v!ĄU+x,V#VeFbHS돇η9ҦsWrhPnrB&EʴDrP@( hL%=4*q,>ÞD & ,ԯm4Ke_Ŵ}GQ]zB % H^RDbH(RҤ~[!9Љy@fvgfg7wΤhߐZ{֩*63hM({U.#n-L  Grֺ BLn&B!Bɓx"]t!SL@AÆ ?$44]vqA6mj_B -Z3fo>V^ ^^^(P{:Lu@"""v[ ">>/ILL[)EPPܸq`T] ɓM._۷p.vn(پ!߭+t`+|;I8 l:x WB1.ތf¢s}ZENQ[Y2_8KOHx\ ê=Wpu"N/Lrexv~_y%sX˫+=ǃsKb>V\]Gg`y{̜g9q6U}N98w3zU>OO$,l?|aq<FPxX@ʫrMLfg8x6sӒY ћe5~&V̘n58N_Wȟ ^"vopjV#7,VhevcǑ {<ʍXv&>,Łӡ}-DZh[-:z0qQf=kwTK`\$,ɹk˷O4ʧo%1\먭x_n8K^3c4Zy2m,~2~Q{wr=4oghf>W۪<fv "$"'$E',"='O$x3N`?i"Bg/ .!B!xjlP=g]Yz5AAA(P[nq9.^H\h߾= N-dMٲe9w'N <<͛SB~ ʕ#66ÇcXhڴ)^^^k^M40LԬYӾiIb([=`ƍܼyĪU SL\|{"B!BJ5)l);` lޝ.қ)6 SiU6i)K;C?֥,"ʾ!yt]_o5tzmSG)  ;uf֞ГӍmB%\]HfR4MCMXrjsJ:ȍX,`.oS8 >FR)=yTǾS])~/XU] n=wꐼ/]W?]sgl^cK=ΧN{oslHq2&'e[l7Mx9f\} ! !B!s% ǃ~_ooFƷGp+==!1aμ;3Sޘ;>w,oB#FLJ+g +5k,g.nD9_}?ϑ=;U~h޴ Q|;|F}/B!B!e &LtL E }K5@xi?ĊXv G _sѷ@g?ٳgc԰(\k|ٲ8~\kmܰ *~G~|;|oL=re>ys(Rw]#N>cGʙQÿR.XȘӬI2+W-ɞ-kV,ʰt B7˓Jsְ1/3gYo}{?tZܹr5@٫79|3cW!u2B!*@˶PJ1~L:rYP50Ȩ(k ~eKə3CߏF4>֍[ᷘ5g.}ڋ][62d`{#kyCB3oNNNt|Sق)}ܭ!]=?KUwoKY:U*Wt~Ⱥ1Bؾc'aapqvamk!ks2닧Ggu4j:kW,Owmb.ťf˞vw`D$o lپ֭Z=rй+7_|yf6m/b7w.^iGYrE?/Yz<7hPnۮRt}cbb/f˚> ~<~m}5EgxWOٹy#FDox|U|ַ_{ ///>At):we >kPf ʖ) ?n[gվ l6>sWÏ{s4{?+R0kiOKhجiڸKڍ,[vuZ<==廬]+W潖*XwZ`e %;oӞf%عkwM&uǾʖ.-l೏?ѣ۰>ͦ54MQ !B!UF>&M9նm;v(_@mĤ$XH6jt]1gT{S?Pl.maa?P5hܡgY>nXo)_@ըy _P?ik֭Sj^vOHPPHeX,KUl6+_@e6է}>W-۴U&!Hhh>f'NJm.?P١fߏwv]ɡ[榛K IDAT2̏~I97"""|_+_+{s8 jE*&&F)ǕڴejT~@kk㞽>P~PvQjCq㕯p=m?Za[|G*_@7ҏ'Ϝu}J?z?Pݸq;諡in۽wT:1R7nߔ_@UuTTߎxWUW7P;tTjɲ-E2W*_@u^nR._o;vRH:?P=^_~T;wuH?KU1a=B!HDd$?MB'g'?;vDN:5:smA[ŸɠS[oڂQV.::Nr'?y<O d#tl@Z7kFcFЕrȟ#{v""#9u49CP`9q Dl+9wci;ߍ7J쳞uFm8"_Ȩ/Yf}hiŷ?7o YK`歸Rti}aƗcyK|Ȧ[yY}|vkm*o>uy?Zn=J)bQ/\`DFFq)T͛=s}dɗXz ͜ŨX|Z@s sߛ4l똒[$B!B!xeX Bu&Ogѧ`]|~wssl6?~)wkɰ2L ˝2=w=?Ms);zX{viFL&u3g6+Tk[fzMy7AۼqȎgggoİc.^)U[ѱ];o؈}~ݳ~`뺮ӂprvN5eSR/gxy^`0i7n|ӮsW{PnxB Ӿ꩷յin"kSNm) zg-\'''og|]yl۱fMuɒ%CSiX)B!B!eYh1;xQ ڃ^^$$&prve^^^L&̞Tr_n6rϜJeڵ~a4evZ^}nnDĠrx":*zqi0[6k֭'6.s3rط|1p0& L]Q֫yVaI#`eN瘏| ]&}63?>2e-'8Ժ0y֬>OԯJ)VYK,\Y~&U æɭKd2萾d /_˗9딾kz7*2*m۱ƌNɛ'/ͱǟ{dJw[reQX~{TLl,JVTĞ}m!)ɔ '::M[Q@~TىUkb5m'QB!B! , Ĵ?3_inի0$%%q1Fݻ݃;l!88խkvիװX,$%%qQ?yɕ#{b`2Xq'Ďƪ5k sպeKOLl,k.L2Q/[xdj׬i4n؀cP&A͍?,pڼ B/صe5d ؉!NZˋ>>s!, IIZ9 >3̛OdTT:л_^{Ԑ%(>^-~e2''g˔tسay4o |;l87oiT ib0(Zcٙ?%13B!B!xe68t7[fVC޽ɒQxqgڴjL5p5nvY?gWYF,X'ZaOݷԯC+WΜ|3ƌ\.O>}|ǟiɧO c4hž6ahWTV50ʕٻ?5^gOwvv_&g6ٲfO4nuiyGhӱC;{]ԮQQ75/K >v8:wux͖mz >w7 ?LFM5Թc{>kj**f6bin(6!t:|=lþߟZf-֬WgUmjohO[7}ؼn k[8}EvMzwE RJ^)&6Z5֧>,_+^^^6mA_x1 /yPz[^ܹrɻ#!B!B2$y X-xNf͞jU:kAL(T`N/I˖5'{li`0<~ѷ˖e u+-m\*x,28.^wiܰ3fC j*ڵ͐ZWXx!&+WydI>ڙ9rY^[}CǷBy7n'ǂR'M`t/WOw=k m'''+W&8$m#rLsB 8, v >>Bo3f/SXh!{:p]$BB}!B!Ȉi !$!!5idɜtB!B!HS`};|2p^筼BnFmʜ3d=!B!B!ēHS`\p~2rB={pSx1j׬KaԸ׻ !/,K!B=d !B!D~B!B!k!B!B!#!B!B!Bp$"B!B!B#@bcc ʖmS l]o JN8o]j TeӖ-rݧmm?5jrϼ/\g>_fΞ۹w8})Bbc꺎bAX$>_@UV}@y}}L4VTT4C'=7odT_NϏzH/~rEA=w^Ȗ5s]fL3[пo26NnFB!36x iZm6F1E4FciV:O>y1^I4MCˀsaη,\JhZOo[tR @FA{t]W-5b1@w!8IԯȞXg( WysH?,f!Oν2c.[Ƃ 8p KvM8s C J*VD5={5Xda}x91S%{loTɀR`0hS6])44v.N (44G>$=󆄄[7Wq9nݢk_k糞7G&ooP-0 ?s8} /(!'_!ӧO3qpqqA4e777}mkaX{&[v)QS |b6ZjU4Mgܞۧ#G2mTf̘f0z={UWghDW #pif̜IxxxV5W3w\ԯ\@)UJo$!&c`$?^GMÔYtECqΜa9.6J̚_oȠxa1ڰATO߾|l޴nݻ?^a0hY͇n0kr?.XQ L[qL.mUL.DŚ *G,h8%,(eDj(]%3g !cy"Flߘf5;To Mx'ˁxy4۰_ HfMȣ]!cF`E<Y}|7j$eJN7of)ZCz:ao?[olشt]F=4߸ Obb"x{yr_޻F$-f3nnnϛߧMݞVDEG/J)4Rn}bbb쿗)]YMK3s A)(ZS~fޓNA`uPƤǮ]5ơn `_puu}~{I Pa4؉'OeZZwHhh& gg'F|-k݇J/΄iд9~GGfϙ@IHHd$&&*oO>ѣ}'K-HwmaC+RߦʻWVMݝzjL;ڱ};f͙˂Ec'N[jU7igæ͌8[)VX1~|+R!xv͜9s0p ŊŒ<|LXj?OL"E) [=mJ-MӬӥ$Oef`06+wnj֬iO7aۇ`@u{=ߔu4V^;#J)2gLƍ9}ꔽ,f~-,6(N&قSW._[;ﵝ[ڶ n޼qH>?'MXb~3R,Ь;B4>vwm g\=-lقd^zL&{^zꫯҰQ#{Kf>hK0:(eHQ{;ZMf]2741Jn'шR [@Y5CRXLI8OqvuiьF Lg=nhi/tt].b:`ȂA3:tuūVǜdxzޟwOf{z#J;ջ7իWw諶6on܊J`kLXt.J1TW;׺:BWލ۷l>՛][6߬~97GJ?$-җ|F\\<̝Ki*2eʔ*'9~$ TDŽ6n-9sݴe oނw;-,**QRE8~$+V^fl\!qٺ};ŋYF8;pu֧1=ϞpuuqJ)o1wBve_`D'@lm/@d2ѶSgRѢgMt[IIxEs /Mرk}`HصJF )U$aaa_{QF-ȟ/6ld8Q_vX,t 9v8{;߾zf)\7Oݳo9gP4jX?eيy廎oٻ?ʖku0\r[[<`m0r #F ̜9bŊmt)lvwOrעHK8oKǎVCK:(sHJJbʏkJ0-DT%o|[rv>>Yo, +7np;=4Ӧb0mTw[a&"""ɒ%`WlY1moa6٘L&&O@@SPl|~Oح[(V(1cT[0qx{yӿX7p0l]Ρ=5U}}Mh֭p{OS:3~JW^䍷XJ|f_Ã-:ԥLJ<3s !*m  c?~J+ۿQo <ܾ}ݻwGٲeyq7oۛ$UK/֭[r*̙3 \.^sګi{ٳxzyQD J.ɓ'9~8P>>J_Ŋɓ\zui$֮]K||%5:~`1(۠ Q!A\ؿKR9' H>eIÔ+둭`;M.Cl-.:@[xeA!*gd %łΜm\69Y IЙӔk 90'&r)"97&3V,c$FI)+V;1xzzҰaC_ݸ ˓/u@c:tYN_&667nlfSN1vXv-(P5j԰_J)a8}^;5 sf43Vĥ۔-!`;nlǃ 'WVwɍ[6R|DplNF|qЈxV35!G7£{eݜЀ_"WtuEIdFFw˱M.?IW QTN{`ЈIb`B#J@dv׵n}2zX,{c(JB"Y B#ڵqx3f֩t2mϡ r$_޼9z,=?AϏz<оXJԪyӄV:k;0#{v~& JQ!ay[4m̔_AU@@*T+ɏHDFEOZ*cF j*/ޯO/>q[Cz^gp"}ZoHRq~)9'K/S!K4_;d@tt^J~{~g{6=o>͚Xdj`;w&8$\9s:\s/~fTG劕V5" ;͛X,:whDI2Lo۾gggJ%T|C}97o]8W^bЭKTuB|3 `}6XfX4p {/e˖uk2zh*W}8{,SM~ű}6,Yl7mǩ]6ww4nGD0{l6oLÆ Zł Xj{bJL0WWWΟ?OL5z47N5l\>v_M#((777-ZDLL 9r@,7˗-c޼yr*Tiȟ?T\{o>Ǔ?~.]D\駟(VXO =J~8y5P 'M`DEE1ct8@\l,UUSD}tVZܹsQJʮ;ٵsu`fM7 f͚EJM_9铬;+Sr 2Т[, 6 ֭?1'$DFyw`Ńr?$Fٝ[o7'#!!|bkb䩬vϝ__':$wbɚHF-4͠1,Z[wf0pq*V[/<}߻Dthli<ɄG7nClŋRti>, NNN9r͛3bH~gY7~bt# _`0zjf3uֵ@)Ϝ>̈́ ҥKdɒ&PVҕ s#9u%D?/=0y2(ڂ ǿ#;!=ه4sg=|,SET(v\"":5c#KnCgX"Aq}#rUg] (y;۬w6jXtF|:^@ɠuLً+EdJp C~Ni^}j,Acs|= D){yzao_]BUxxzX111?Pm޺-6_@ҥi?P8聎48?PmI m*WTjULl}a\M'dR;tRiZ6k̛jkZÞKJu{vTm:vzm;uIs[jՕCR*2*J77oZk?P8xP=I!-vܕ5;tTd2*7$4Ժ]m۱'_b/spHRQQj?Pmش9k~>Cլ[ߡCN9|S|ҫٻoLz Y[5jԨ}/^|?ӎ;| n}Bd=gf뺲X,J)}pwW>PY,͛6\9s'*Z~ɒEu~l6WUSU R-RN;lY3Od{o+ZCzddTzT)տRjDj`txMZ* ڏ뺲J);<{.@BME )U@BQkAP@A(H.EAzDj~2?n%<<ٛݝݻٙzgOU6*J sJ)f͚!C2SJ+W꩖-Zy9uꔪSܹsyp^S^&Z:vڳg^ UZ2٣V5WWzUjTP`Z0#3gHU|y5feTFFǼwUAAjgRѕ*qK{aF/lեKUV-uTTd5sRJ\Ro 3//O)믿+UR}~Zǫ+??n:}]:wVM7VIII͛7+oo5ӹժ*"<\ ~M;~Zjܨ7nJKMUJ)fUtiճgBUɓ'[n]穧Tjj^77;31w4Q[ƫ}{ջ횫>߶ovKkUʨ w9΋c޽ԏK G3q]jMԓ^Cڶ+Ûݢމ[YhUԆSTҙ8u5q EP *'=M9O6_m6xEe&'*춭=K&ԌWgSq*n5K[LQ#t`JۈZ6qmxw;J0wǒ)Z1rz[U^nHJTY)JqN]i*0 @}J)μ6L*U_RgΜQS={t!]*/szƞ4q Qx@ݻW)?+UR;uҷQ\MZx@?q]HRZ[2m(es}r&Uu>l)elU;WOԌe׾٪ڿJv.&]mn~,ӱˎ8?3_.fO.V,QvMΫNC+O_LWQ NYW~]g_O]KQawR/Nٮ/{~6E"7M1-!?͆w ʒeiیA z Z4 8|M7bɬ\5+En^lZ"T)~ڷ_^bp[LMM΁| Bl`ZW`v>u$?g=Um&W~Ō(n n7C9\tV+Ghӱ{ ס>`ӷش~MXX~C\|52")9w][Vv \)뜒 atߛ,=B!M(V ,`„ iӆv` --AoAZxC)EhX.)1,N8-B=4 ?Ffv;Vg@h5> l64iݺqqN<7kbl6_ r]ƃ>H9g]R^=v喟oǏꫯI^^+W履ZGgϞ4mڔ5jEpH9rDOb!+3xCҥyꩧrcts@@~L>>> =N f4IMMe…jݚF^^u^Xz5GڜqAf^VWGlf\t?~FTBŊQ |YpW9Ͽ/.\jjTTfZ:uꤧ4 GM)t||}l , AAAMl:H3?95^| ̂V^&$Q5jup| Wy e" /8 4CC  찚ͬ6U/Q^C*T aS9>qo2lܕNȪ1c9v~5'M'<ChrP||  ?$]eǝ;{Xrr2OSN//Pzu*WmV^W]u8+~ҦM7nfnS:t .x|vnpX-mP6ܟ ˱x"?u̼aWJ/s39FMDĻm:AoUHx]Uʄa92ȳ*ڦ@Ն@o \Njud)<LF9y2|{],V;;til6;w/kU,LJ6%M! ado֮-JR>F}:sfo fO>lA[~=M{ͨPQkUTf\|u,??L&y?LfZhI8'.-"_@ ~zuoy/uCu5>e+w9~x L8_DD;j`n15B?x>iYKg}YڵlvJX9s8mƇrB!nBiv7$::I'7ںW\Cٳ'~~~ff͚[Ǐs%6mJ1f3?Oߩ5 2f+W&lYj^ٲe,ЃZ܎Jժ $-ERJQ99htt4>u QF|5o_͛|lQF BBBX~=SN-om\qynqNO?xZ59p{\|ʹM۶XV٣vZz!u6mܹs4 .p):voUGi|V ;Ip =μC 5k"((3i$=-㹱* n5 -ALz=E)TNL㦜ڳk٭Ζ*P] a*Wʫv۶m֎]җX Ԕl 4ʮǠ{\}! {}㶞v`ʮꊫ̔㴘UРAvMRR~- z `QpA{ M&z۶"UE~/8{ x{Uكm2tBrB/Oɦ}(1[9Br{yAC9kIҽD꽦_Fͮ/{ e)Q!~,v;wbE> Vktݵ_@PP5]}3!8(֭Zrt zdߎQcߣQl ~sGyyع{7-&8(UX9֭KLL vL|</^NiSزe *U"2*m p}y_Q] hXbM6=i\p 6ۼ9ʕmoP\;"Hbo/2y]η;7y*RQڑ` ]A5gG^LRuǼ]7jWd` QբmRSmmV|1f"; +:4UGF#͚5#11QPWU˖ԫWիav;:yZw4Ǯθvjg; izgPro?{9#y$nǗ_neo~Z(my火i }]qK;xc[n*UHn8-&77Cݰ9o"rf)>yS>O?S(}Td$Le”nkլu-{wHSض}W_n0XQ .q}{Ŷ޷1bBw^:uhצ oL*M䶍ƍҺUKmzyyه4wy~lڲM[\Qd4z7t~5mJܵp>q?׫Kog֭ȅɈco(XJj*'OVzvCvxy`ev^wޛ8cWq[6cz}u~Ǻos+N:9v=ۺwX9ʣO>vێӂ#guo?~ mNO r IDATbi<ާ/׮eub&"B!ĿR @FF_ycǎ1yڴm7ܹҀcdĉlj8~x=zQr l6S^?"]g: |||?aժUlv8*S{0kFjj*}ݻbɄGDܳiRwwI`4:A0ckpFѪU+,YBJJ {~9NJ%**㏳i&b71Sf9#`Q㞻q$$$Pzuƌe ŊvFUVz#PԿ~+FQ+r-*(߻M`4b˃Zm;PI3&o|t|~tF FJveUD>s kߞqtԉ~K.L:sΑJnn>uy `4R3:t$lv;v~>FՈϱnp7ܒWfWX1ͮ*`4hz Fǽ6Wk`(5/&7@LL4c O3M ZK-Jb" )bW^z[oeHts}0IOKSzg11ECLNuP|吗P^]ʗ/-Z5jjY=݇H?v gϞcܹ:|pz۷#?`X]C|@=]s_%Q6*i&;obRTV}z?mM ƳyV,[Nrr n1~i|ݷKOvlqq>yo+IIMޭb~yft499,I YRI"9Grr211ѼE@m߶ MptoU&;Z}=?trΞ=GժUkn[9:rLOJ5zԪQc?cŪoHKOQ{=Jc33vͲ+9w<իWc;#&:3EÄBs56kn^ :@To+ᯄ bbgrT7m"ߟh㨸}:vBXX]t]wqYL&Sv)mzVSyUkPs%) i%\qkll,c?ƍMxD~;۷o$^B|o?BZeZ: }bY6+WD3svTV-NIiF# /'S%9$'<2&_z^V<9f0p1yo Ƴs@{٩ơ`<{ $vq>FTJhDDWc*NF~Y'khϗm +_6\GZnv ĵtՏ:uoڵk sv3m4߸̬,7iP,8-_}.*Ouͮ05V',Л鯴wkwv l;p?gxIm!j45Ve]|74(@p.%gm'QWc,B4M)nwyˁ(ۡrt2if*pnS#3BJfuЩds)_:Ġ]$?zP!nFtZҡ};:xh=iVǗh}7}wo޺ ʀW_涶iheYŊx7J|N6Oo޴۵}%FoxM5I%Jýx^L׮MڵiSm>yC{^{ XkBYr[ݾy3B*Wįq)5-s__??:ЩcG:uX-bcݦbҪ]K7Bq>m| > C:~jML87b4f׮]K8q>|jj*GK.gϝϏ5 rcoddc;xb0<6bv6m7n$yssi߾=>>>/⫸:r^E_Ӵ"ӸFtF-j[5k֤f͚ݻٌ}?DPPM\Ӑ]jݚP,Y9/f~OvܙUVdb}|_[=t9fp4[3;:-%4UK%H~5Gg*xKG|L4]dp 1 =v@J%i`2 T%x /a]`W?כOѬNiQj;F\scgRr)%.!yzVro9rzUk4h;AǟxW/7n|BO'w4WYSvk^͑V-)8!Cٳw/f߼ӦLjU]b`XY) !57W3zh6mʨѣ"77|ݻwӷo_f~yvn[f3wٳYt)}RSS9}>S||<׵jժngڵ3#F*/W㬦iUN:#qԞ;w.˖-瞣B z'N`49}޸wixiР+V "<ʕ+ۋmޜJ*ӢE wN\pqO:i233@e c5BߨqOc4jw 4ŋjl>\1@^|EN:EIZlIVkWmvf#l [g~ܗaʥ.>ދ帣sm4`Xr$~;bJN"=;xĸV*Mo'8;,ݻprNBý9vӜq?!/'sf&'vmr3թo&6ʗzVl=Uc׬0sq cL,?3&[ִ6;~/z1G& " =Lb•lx:f;\zj,;snDfs:-[revA(͆7]veݺu$''9\zaZfZ$^;{ q)8b|1*]R=z&Phh0 z|Lm8ARz.K̥LRҡ8evs\::Sw6.%gci2qԋIYo2IV Ŏ])r-,t@0?؂@?/}?][FӾq|/O%c4j_䍏Vt(X]+[ o}Ykq\:+ĘM\JO}v]׽B[h ,,_tٙ'MUr6?BӖ âPB) !Bgp#KX"f1[48{, .mvH/ffƇ)sر#Æh4b09 о}{, ^^^l޴^{ esO׮넄5[VƍeHMME)Edfd0 **~Ѭ'M#22ݻ£] +^xgС\| =<}!''=zpbbb[t L<+V4`ԨQ/_MXhSLTPiӦh">s eƍ?~=>2ǵ~DD_ΙC*U MOOHKMŮ5j`W_QTBqvOFٌdby"%%wyMONv6hjբq& 07SNe!,^Ν;cΝ{9/_oqz#'x9y$6RJQfM^u;|g$$$`)Ut믿d>bƌQre ĢEXf ـ襗__~aΗ_bv6jWVcߟQB=j< ۷;eNTp6 cYToњ#Q~CV+~eB֎mSzi7iaoe?K ĮF|Cgt&qS2MSrFcRn-)|Mc Of(GnԧsYz,bUi<7_Į ~:tn448Mˁ+LJT[RF3ϞCϱ#kP=n烇rl&mM[yp_~Κ I9w7ekf˽{繽ukbccj&4D嘹(Lfmux _o9oA+h4cA)~Yv 1QAXv Myr1)Q_c+ʵRBCoDJXmo2l*#΃  Zgf0L߾ 4gyi?ir⯐ƞ{9~6*U*ӡm[o !BBnbhGn|d2a2 ӧQq5e䐞!!!~eggStB b +rQJ1+rrrnS4e`s6֖F BLJJ VH233IKM9ek_(HJJfk6LXX[MVVzͩ\eIVVaaax{{EZz[ uBRR5Mb!**47KP``&LlvEՎ%%%R>!={;@B!٣`#np[u,Xm pNOeG#ywϘߵsѤqcՋSB{a|lݶ5j$L<Ĕ/Wqw+ IJ?(BˋZ^*Ghs%c"@T]Xfp*h68-6l6epv8:) &&Gj\j˵}M@=rW޼L ݮ@s~cJ/Y X^s< G9{;;>l6fbSZ8"{Bҵ+B!(r |0xhX:7+^q PkQ]ŕIQ{H !B!?snuxhvC6+J$M B!g_'B!B!iLcQB$s !B!B!# B!B!B!HB!B!B!n:"B!B!Bt!B!B!# B!B!B!HB!B!B!n:"B!B!Bt!B!B!# B!B!B!HB!B!B!n:"B!B!Bt!B!B!# B!B!B!HB!B!B!n:"B!B!Bt!B!B!# B!B!B!HB!B!B!n:?b#b[oN?Dճ'\{i AAAԨVf5f^Vys!B!B!7N;Z6/KnvN_OzFFIKO/2m[oeiM9v?S.]V=ǤӪ]7oI9{_[^k֭Ql }*(M_7Go֮;3gQⴳS~Ĭ/evl6|~9b;YpBB95qㆄ'( MlG(]5j3U5i2hjMjA)B!"g}9+V(CSo3ƌOvv5&%'3vrrr٤KA܀wvsǎŦ5=f}9q'*'9_[YO;;~tl׎wvRŊsj%d6iۂ o÷=r̻rFbo_2mƇ0weۥVܜv;~&88oo[Ѷ- U jDDDS""\<̄2eiUS-|& `0h>|Yc]lNG)LBSJȴBw)nbk}ӕޮcڷ{矒V;}ILۉxLXV̝K/{3Im/[ %"$$D [H^^ .9/+Wѳ{}i|<}ݷ>/~EbR#-'^t\:s~Ezz>><-4mZb(S1xiDGWWFӂψ/CHOW^#>z Bق4&@B}vʹBOd"9]x?'0 ش/\` \#6G#--}@֬_]R% ?p{cFǷ7TJvmY2+Ё6~/oR HFF?i߾IXX- R̛#FH25_ݞNnFlkʥKAJbd$BONNW\ٳ\pA.+Ѩa0htN>˗Ixl6?W,1hPvEn@Xh(ǎ~z;#ƌ囵Z2nhnkDk֭gͺ#]:{Lun}>:w 1);ԉ=g~RaaL.r~n,V͚%:񓧠iJ0c3"ƾ[IKOnWÜa0?{ksOpP,_Z(;xlPbf} ~zmڑ{uj1<K;hԠ}4cc\IRPJO1#G\nח1exl{'[Lt4>oooug̝@)iL <+W<6mqzC>'~ZWZL&&CV-9xJsos/_e˱Y q1βE v&ONrR~kTO>v{yؾ1١=1푣G?pIII(kdz!1)I?o"^~=>VfM>":yt&OpW/8x[:oC<].n |Y._v\{#jת}WmݶW ,,_m_d!$%%vW; oն=99Y>>e}LJ>Jו߂b؈Q q}*" CXVB>eujvKcyɧ?sF>,Sޥf꿫xşLL8޸OrY}9w<+tވT\ڼ<4=wB[Asfz? b ;nC7ƽ1{˶m8???zj,XGNpp,Gnn.~ȃݻQ\y~M[[o3vdѱgēvY8Asrɡwg1<ԣ;ʖuٸi3CèÊ̓R-YY_'[7*U5kyb゚RŊtt' /a7kv}ni yдq#q@\|l-u~෇@Zժ\x%KqaLOgã:?YO6@8w(^\B -T uJ оB{^(N x]?v@@I2w93g3sν4y4d2#0 hg/!з\uw֮?|'=NLTW]cU=wwqZcb;/?/\`gŪU$&ݝKC#JyWP9zujڤȟy0 oLJ%+Vl*jF=xJW_Avy`zEqtALsfxwQ=M>Bƍ1j4׮piѶ=ލ*z2V'1ppvlڀJzv~~ٽ;/^$1i G`Ǧd2q 7oӎzx p10d;7o`6b^DVW t6j|D7jH-h8{y:oݻٹk7 h߶ bc =w7n8뜹Li׏в!pt:vxNI+W(~(^6 RFV녿E ˻XT`h4t+ . (F#eʔgϞ(,ӳ?8^h #""F] .<E\|Pe+Wڥ=ק deg߳m;vq u:Ь« !.>Ahӱ`2ˇ'MM32yvvoj7h(|xMn mg|q-'~! zvB]&4k!',S u6?ʡOz}erZ~~8/VOΞ;g޶S!.>A8y_ q ™gUO ٺ  MZ~ +>.إEݠPXXN털('>\x+%u XO.]d?(mX(hiY!.>AxQdlo5kJ|&y8&m1qBFB8fwB ˗LOoP4Z$'}r>7  iiW޸yKO&]z|fBFMNW*Z4 lشG!9e PPP`^^v\44m.CoQQP'кcgh4>3ko_-ًn4i6bK!nPHCz!Y =Pv>0o.;OҥBxx9AZ*?p6AL&c( ZArr O.]Z+ܺ O?}+rG.µk tYAպ\p;=Err0n8A5KϤe _\.ZsJhfo-{?Ί+ۭ. Oҹ4j]y7sb)*}Z~pQ4&N|;YO4v lHq[#9g7.u)iR/2p;ʾˬ?虳xsh-ʇf5a0X;+MƎo)?LƼ^g8sse^}E9<|=@:9mZA_"ŕܸyyme+!#-4Tld+HhD"qPPU\LCQ4XfpMNʭ7c<F12ɱ(ӛٳ4JH;`bڷvi4JHoo[eOoV6־L<=<Vdn_ۘ9yyGL|||BN+ࠠz̢Lp m]e] m[xםq/UiP*,.^kMO}o2| 0kٱk'lIұ>UדdBՊdmadddPL1&3i^;wqf8]ʰ*ԯԍ"rܼ<) ** t׷pb_kWSDh/8p7 X h(d΍ QB͓ӧ~V TTQLj1LvkT*E"إ;?>.&˾Rdti7[ſu#J A&/vɣ:AFQQ yv= nnnhRŃp}n[`gxq0^6Få4^6kׯS.4§̬,"#"ͨSՀa J2zUJ%|3RYɛ'u+yS~y˛>>wƏ|jn*B1mjʔdHF&O3ݱ[+3~\8~G7u:_}v7f'NbXnyW +;Ӏ[f}f4B[TX!f#M`\pѻwg_D"c?[:GD^2d2%2 ^'33 II˖!z#k%O#=z 4z<v Bז J /2_=z=ٸ73jQ*慅T*i3 NxD"g+utd~/dh4\.G.ޓ`q7|0$sOie~ .<] IDAT{և}1-+[ >% wwwXݕ)@-/(LJ ?Ozg(ښ4k:l[8g:ݩ^7ER,Oz(BpPo;_fa.TO3?VM 탁/^!o5SK )nooo$ g|p93| zusP" +X cojU`jUf|}}FrW[n߹ƾ&**1ȱ1J0L~VVu]|J*VHqȻr͐VY8eIw+TЁUND0'&%!Jq1lT*+jK\zh|k5T"E:1jz:=7lps~7 ?__6n inڼ+h:;Bg,<2`!]`в\q-( J+nPJ14mڄ};_^{nC"BG%n`ȑ]\3н{u$ rYDyuֱj*֮]ҥK9q۶mcŊ$&&D`t~12ey-Ż7Fefel29֗ ύ[OKn^L  }CSSS>}:6mbĉoN$2z*gf̘1ݻަG׭[7ocƌa-4Z5֮]+ސ}rmѣ۷?lˁgϲl2֬YCRR$&&yf+A૯bՏ]:7 d/_Nbb"Wf͚5^D/_NRRYYY޽O>ɓ'B g37_oXAR@c2Q}k+==3f6nD头ĉYjf}˗Yt)ǎ;=?t?s 0L\LMewH$ԮgW_cdegc0p$ 4 :#G7߲?hjv7;jw7w߸ ^O~A yb 4{j7hHC9-XzudpRԯϼSш^gæML)Aw -XDvN_};2wOuzyyҴi.]%#3HfV.ݠ=N 0}L&wbŨa.L&vEKmgQB/\? ++Km\x)=nnn^۷3>6jXwt |||MOv(ϟA" ;vbD|}| {z=+6%o3r9]:vQB[o ;b]O^t:>?F#leMy{vnc4ٶ}Rgw:lYjլɡ#G/D~fV6[6GP`uFe܄`0uvoD@@]vDaa!?Lm֭lٶ |(= ~]4&222?IKdBqQF}Yǯs޽L& x7ahHOODZ-w:D\V&&;'^,d2 '7ѤR)rYs8k֥04JHo^L&]nGob[ٖVwhU*W"22 >c؜ݔRx(**sxVN~:[5oLj$]ˁCDcիU%|y&3x!\pX5ǿEUjdf?p~, "{фD"O? JoJUw R j+ (=ʼyd 111c"##N|Fo֔XuҬ}``29]7 &I,>1Ҭӛ+Q `bxʼֽEzwY& aAQ@gzW&FfזO`18knkϝ;OOOdz>+°*J3۾-omkf WREWȪfĈr@-ZvIϭkڵkٳg}gǏ_e۶mNK&Fcf0҂dѢE=z|}}t_|;vp0u>(J[geq,8+^ʑ3meF4N=h=Y|85 vgD  ̙C^^QQQ( ͥK.ԬY~0}SRXw"H0F< 4.j]e2e4LfM6 d_{|Wt.RdEqнsgXN{iT:ufޢEtلth._֭xoGнkshZ;y3L`F3W -ˤgSnTJ%x^}m;vm1]&|Wڳݻv̷wuayk֠Y&l۾-[G&`WF|z4JH`ןҴUP0kƏO/¦-[d ^nmNۺM˖JJm20}wb hP>{T4<4i܈zugl<(խ+o6w՘ѣfdxBAfViiiՌuzE]6sXE8600'MI6Lhd kY|wBڹx~lش eCkvyodg԰mޤ qjyV6bfl޺[ͧvrBA8|([wl7]Ν;wѭuʌ_˚xU }_^Οs}29m5Mޝ6c&lnXXwSN'C^}ح`4F U`!,7D2n_:bBbg޻v.hM>}FsfO/~B0 X&LƺukI= TB* ;Tο1ND"RJԪUk׮ѲeK1/_ڵkFE~ޱx-ٶNƑ?sz`DYRn7X~[=V5JQsa4T ML&S塸;!g4LwӷQ)wߓ ca<ȔH$h4222h'iӦHR R>öOޱcǸr劝JJeT*Ϫx>Y)J;-e;J.ۙ L&Cղo>Zn]pׯӧ)((ã--=qqqL2{)!!uT*???ZjEJJ ʕf͚ 6mJrr2gח5jU\.)Dd$^#qV:>g|Ǣ\4&oi*hyO..%jDGGӣGd2jUVQjUjԨA5thmiv&;%;m-K3__ `)YY|2dHR4&ҨhJ^-y4sqU1`{޽!4lڟ1QD;"&*{z5|*22ͧKrU5֨Uw:K 4i?$7/ZѧWi,2ՌopV3Y󾾶TD%L9˗Ҩ\/JppӶIrsiک#϶htaO?raX˗wK3: 6lJ)Ç N8V&vMb˶m,_HNNի/V;י3w VJZQQQh;l1~ԭSkֈ?LaӖ-LL"j֨Ϋ/>(i?qfVNΝ\j՗^,OtTTPNn)Y*777^1L,S|pDGƫAѼI2fL.\Ȝy9.LDE=Κk֐&J/Z*ܵb}8翿6%P'/SWvͪ5\~ʕ+(e+Va9j?f2֥_@q8|XJhrdttwr3琘D RNF 2mg1,^¦-[sLú[_gs<==iߦ5;v|hG.].?@*ѯW/4nQ V.1+gd0~I`#:Eѽ{ $rW=L_҉R)7n`F˖-b/--7G@@ڵ#008`V.̕;ˠ6Mz]dM_qWՈ*$ J|+iˎs;rAڽ= vVzDzʓۛFG 05t;:z?8bՒI΢0rCWN fst"=U"xkU=M9˷՛xyTLZ@r uJ:ͩlj?#:UK @M<_=QB ?W _7 6?|0wF&qA ]vH$Μ9ݻ>nҤ Xn=zͰaضmˋjժ4߸q#fӧM픢;v`׮]x{{Ӻuk*T *% ٤p-j5Z"***A@Tr6l؀L&QFԯ_N\PP@JJ W^ERѼys*W,>/,,$%%,>___^}6;wd4k̡ lJOO';;v1}t.\@ll222M6qM֯_OAAi///6l#GO>vZNͶmh4+Wvډ'V(J|||h޼9{ի( :t@jj*|tԉדJPP۷lٲvm{9lBQQ!!!oooo HLLt֍P.\HQQ+V$&&;vOƍiРEEElذK.P(hҤvc֭ҫW//9T*~4jHTzK$t:(nnn4k֌hy~~>ׯ/QV`7IxjgA&MV*R6,쀀 d׏Fn]۷/ WqF233VZDػw/z *ЪU+J%ZLuL&#%%"""h۶-~~~c򻪷7Koޤ aHHM||8d"H0ZߕV,uAlnnȋ`bŊh®A@RL&aÆ;=6mۘL&iڴ)J\VZŵkרP=z@ղ|r._Lhh( Bղd݋SL!::.]uU'>>&M… 9wQfMZhJB"r.8!;.<ddfҦcg~==rS ,}+Tg޵R7c1 .ũHx[7U `V['Tgs`y6O̰ԷcZ|00nbN&>sɋwfwGS5/uiAt٭o]nM"y&{N6|׉Ki2qG:XSeVԭlouv>JbxtLٷVӣiG5ZF;=ix)**⮒wFL~= 0zܘ᳘ ?L!_MDMjx?Ǧ]xv9]&==I&ѴiSZj` (('N0sLwN͚59|0TX֭[Yr% 4Ã~UƘ1c~HNN/&55CFdd$999L0PjԨAXXH h4|ңG3f ʕs*GZ> X".\ ))ӤI'y9}4k֬7ߤ%ٳ|2G믿^ztڕN8ٳԩիW bOpݺuһwo}bbb1b@J̛7SN1vX&OLժUիw}իWUV|W[oXp!۷o/$((V˜9s8x _h42zhj֬СCE7>s~ƍ'3}6 BP0| #G?%%~m=… O*>>lڴJe,77J%QQQvT*QQQ(RL $77<4СCՋBCCӧNѣb[oݺˆjժ $;7p֮]3ŋ+WBNCVR|reJ$nsf. yzV]O5§d;%z \Yx 3b|-d2xL6VgDj6mÐI%h xɩW9ωnh8seJNoK;v!`?wUrL&o5B&b0̧mK$pl*x 7nr:NX"5"ML9{o+k׮`JpoNaa!jAtT^]Wfͻ#˩kA~ ]k[O* ;nnnvϟ?/iZ-* Jh$,, D"Çs?BBBt(J8wH_Ŋٺu+ɴlR).k׮+*W9{,u߭X")))^6mڠRēiiijѕU^Jv8s]WV:e2aK.!D@pp0;wXrrr1e˖eر*uq6m*ʐ7 pU&ԧO~>SڵkGllxċJhX3 @jjyLj4\|uYbHt:EReʔm[^xJEٲe( ٿ(O־4@zzX֭[j:66ZjA2ed5AbLa$' @t3UX.)o4!00PO???1B 33۷oӾ]WrMZqz"##Q*v[ƕDʑ"mIh', i [3|f{p:=111 ./`sJ2KF*<CMQ= . . f#xѸq?Iʎ5k2_(xxxr  e˺\[rLet 4~iӦQvm "k1\tшT*E&S$xo|f@7"I ugK l[=jp=I b{*SlD8BJW +`00@ƀR9Zbht<ͧj-ܕ̊;:ܕuTǀ`n%@B.E!+_J\Fv֦}-&WY )6F#:Ծ;NɧP(X V|t۲F_\jsSL999SvR tss`0`0b\0}t\E%bgѢEٳCE"pQ Xp!zT'3vܙ\-[޽{2dJ.0/**B&Э* PE$Nȑ#\|Y///VcFBBsε_hU(~'mk2̎G9Abi/7T*GX$5 R[_@[-ֶF PIH>j pd21,4=Y qk.\֭[EȰ1c{\IsOaa6V>wk;~JpUUAo؎{\x'ot5 .mZr5 . .Bd29EE9̞ }DPT*pb.[\fH$rOYt)~~~ 8PTEEE1x`JrQ^\Icu5 MFdj qyIpFtkSv1jn6LDJ d>@ 5rXvjcTb66httMlv4:JLT8)e65 V-}nr FFe$`0P?RӿK TB@ۥt: _m߽9E#FiU+5Ul5M߾}EHQ%''___fՋ/Hz3gnnnr O;Z˸|2[4Jd3Y@`mo$E\.d2QfM:v(S(vR\Njj*tؑٳg3n80 ƊhEUFSTvlo,k|6`AJRT֗FF4 *ȋ3Lv{wߓF4iBz((({JJFy TI1TX䶉?-M%?222]6}͍T5t: eW791|I1loxvΝ %''www1Zv1$8u9;::&D HR!z6Fo0a5s ˷[<[G BtmY7o¾qxxxf^QQY,J///;wիԭ[.]pEe**/\ TV TZ:uJlݻws%޽: $$D`pITR[߫$ ׯ_ɓԪUKtU|y X'@AAEEE/_OOO}APPٸBOk4֭[G߿?rXXXHFFׅܹsGt_NppXu>$9uPkXXxsRi4rR֭[b=VBRiZTTDff]3'[Gk\j5Rƶ K_* -|}9ߠ˔1 rV3J &3Xz*4iD]@͛s?m\L&9RV uV,: Ҹqc^W F . . . > $`]V؞.$y Eo #66VX!*/^ȡCVXcâ88~G/v{ Ql؍{%1Ɵ%QcEPD@PTlX;Jq-wyĝ}wfyw+W;vG!002 O<ݻR_\>QJʶ&8q14w''?̀m55=Z\e{"#[[ mUY ˔,ؑy +,ҹ6NE?ű(jF=)2!-K}}xG<ƭT\D}3au,)V\1ϱM&lU%[JG BJMЭe5Ա#7! Jf޲vNYyr&>ͅB`ߴ;vc|$&&pssc%)+++ %%<( p/&tcVၜbHHH`ɮkSx%"""RpDDDe˖e-`nn"##8|0bcc6O:۷o^xkkkfQP"!1Qc09ZZZܜ AAAy&˗#/˅7R),JRrR)d2+OV_}?۷ozmԨ~)XXX 쌰0ѢP(UVEW^!;;Ǐg͛798LdddÃMسg,XGC J7h]k{ڐgZ}*JF&&&h۶-3 qQ-Bke!Bf&6?p6=(؝R(R*Ѷ\9;:D)XZ[[!>>J];@ @RRáT*q];w-[d͛7 =4h]:ucܺu rfZXXٳgy>;wkCI$fctt4L__ߏbKJi͟?eLP( BP( ڗ&z}u+*UE˖ WDb OO+-- VVVFwŋqm9ֶRE`n"@p)vCĭW怆5l3ppaq >.kq)2n5+4e '`Cp8nu~Fy+ jY©nyx\p7^t3L!Fv B4+ژ5ǿq'1 0=u#6! YnU>B"rlL1*￁XCjj mRX?~Rx)amm {{{޽{vڵkN:n߾ ///j~o޼3n}]Bxx5233T$BLk׮1 HPV-\z!!!z*j֬ V|FVV#xzzƍhٲ%z̈́` WbccqY\r*T@vW666Bll,RSS1x`TPj"QQQwB!T+W ;;S|>9333s4m+VĥKpm$%%aРAR _Z5\|!!!x!ڴi+ 0vh"==q&) $$$˸|2ׯÇܜ}Fp]DGG#&&pwwgr]p8ԪU  Bdd$ѺukFE0 Ξ=D֭Qzuرcx!T*!PR%p8ܽ{ǎC^^RSSիC"~> +++twbb"͙]3qqqǫW;S={3gʕ+Q@ajpp0\+lݻ@֭aiiDDDիWHIIA:u p ܺu jImnnDiiiuvb1:7qp@yjE}7ZYYA ܹsF:uРA <qP#11~:Zh7ԯ_111˗SSS9쐑DGGbŊpttDTT^|ի˗Gll,bbbڵk39d\.pdgg#-- ^:D",--8Yj֬dܻw5BڵVB{pNR ujуzع{?LBnJt؁'G-66g߼m;`ؠA;7cdį&u+}rRϞӧ`eeId8C3_:ف]{~] zcע"Æbit!b=1ׯcI1f;UWJzz:|I IDATԿ= /}(\.99 TƖ-PB{XYՅ@`! >>իW/Q8wiWR,Z~ᬛ;"==|>@o.Pb)JgzEDڕX Zͼk!r9r9|>_P\ jb.bm.t6nbbJۿ=htJ [[t\.;?Zb`X0O J J NjBrq\Bj-]hԄ脓"yp x\pS!_pX֡"Jf:A{MBr& WB>WۄgU rСqZM$E uzD"n6!66Dqgh7z},H6cYFuǾ:E BfyEâs B{O[} P؎360z0Tk_Lێ^uQ[^4DŽ)k+x<̐~LMM.gc^M>~[,n1nէVs:ѵݹ(<5 & bL$wD9$sx{>7}wUi)Bd$Rf뚥%֮N͚1n܊Ř og?4]:B"#8ПV .ݰ~*9~oXYYa5hҸ .BdT#֯Y|'spR.`w$'CVp_bk^HIMe+.!?/Xь 6Xz%Ȑ={a©A8u&JfXj%Z6ohX)PTDR2ڡ mμ|J 66Xw «$ݗUh{ Z XfM`_}}\c&زOc^8xzc ;W_slX ͝XmݹwsYF掎kbe8|=o޸m\ j̘3߼􅭭 ֭Zgm s9m{yvtsî={1jƇ;w0ob$eKlXX={[N,kvwSm$[))m\]v֗S+WTC'\i# Rjvv:U՘8m:޻qrXj%iԈiڍ7i Nxc R ssl\MÖaٍ _\۷1obꬣ[{WV+W0yL 8+W!UF?7C&MJ_]68ZzM5Wbb42XcӆzZƘ0>ah԰aƑL.Ǹ#P\9n-ԯ'è(PaJ4l@!#Gӧd4[kTǁʵr: UT?nbHKKclWXOF.BZZ:S=0@]=]d$`֌i9tQ]p4,[kQ;i2n߹[`;ןhX>6{ԱiS :/Sna/_1g V~'|j]-rXC^>'r:\ Ðx9B Uz~^BEe3o>Jx!M|h 8;ə4-H@tqTDUP\EVM"G_p8_? ]<|h:wnݺ1Ne BPl6j šMkWuuEfVyza8*53͚|ݻ#i;Nnn0t8 v`r%˖3ME~~>81&!T+Nݐ0>ڴFkddf⸗7LS>PAه\I7o߆R`/[qAG"#oHq/0 |Q<{ĊUk1sTII0ul\ c%-fC$WhԨRSp0.DF[.LCqb":oV-["-#2Crܿ+U²%k6|!>N_ިQ!s w2Ǝ8ql"1g|l\Z~[;b1~b(ìiSKx=)3f!\(_LQ&b@>Vq>2sYW̌D ‹/ѥSG4wrBrj*}0zD 78Ky:qѮMku1jDx< U*WF coKrRz(gkR燡v+T('2eΏ<.wT w,uqP(ж+Qq 2 S'M@ '<“OQF @}ݻY&x5n‚N‚x%3#>>޾~<}&.0qWZ50T999=~"rI: X]3E3c:?&MsFKשU Ça 9? 42ܻ~<}&.e 9qTlr1f$XYYY3sxեg/dddаA}<{>'1rxD=Ê:ulx;*WA#aN`8\quD֮s:?(4Tڵ3<<P=7W kjU;7yuDKnS) ^tZJ[BP(_0iii$--+/\$.$V\.'.;&ʕJ%i۩ iچ,Y8:ulk73+8:vJŔ~8r%?/XȔGGgrb ڹ S?mlBpg_.dϝ %.Kr\NZkO&M%Ggҽ׏{$.kLJ"m;u!ڶ'JROGgr)*U.ɈsdYYqtv1z~ێمD_*;xqtvavGgrJuӍuSBz*{.Bba6oۮy5VGgaTu4*/qtv!1׮sssImɜy ^مL6Dq=ʕBPQ,|ǦمܼuF~ 2z[ ?Fϯc#qtv!uBӏ8:R߷U;7T*I7م?BH|Bqtv!g!?tȠ ĩ+YoLYم 6Uw]sd=?obM?*;ܶ=du.".E;Ҫm{"BR)ڳqtv!{,x8:ٿ?-\D!:K yj5iߥ[1yĩ+NU*i׹+iݡ#Q輿}| Wm`>Jمۇ8:ǰ{ˋ);mP0vC]HXxDt{ ܵ;/9VLEDN,BD" LXlF|SҮ] 6ܻ#Je>!DV8MP( B|{sW0]yRx<ū7oRŊZ*|P(ѱ7orJR !+Fm]cp>Ƚh{WS.]RP* qvU]q82L}١RŊeAJeLس9sX-4a6||?Y[Y튣Pt8 \nYgM>˖umS`k  m A6Q( BP(R:8f;5lݺ!0(k׮Wfjj: q L3A-uj~/ ddc8yUckk+*LP@Vùe0>0ݘ F'~m{]^J*qPޞu,)ЯJU_cQgR]~8&w!?оzz̙5Cop YMXo$WY X5ZkB`|jO>{2sv233jT^dKhTŶ: -֨QV?Z_ר^-@R(/4FzFFdhP$g&^|Y8}0,CA~ -+/EȨh8 Bnj3@]֭ B3& -=]ueۧPg} UW~?-- BթSPuCx_BzuYڹbcc2E |r  oWPdP ܼph BP( cRjd2.51ׯ#0(0mlxkhURA/42h1wTz072v7##D}!W(uDljSAoS" Pݬ,M]+cT^t2BTEQ'|!7/W%;K+IX$\'W#x^^jx>EL"YNv\&b5Hppi?;;V̱doD"J&iS0}dH2ʳ BS\KNwek77/e2{X~Cim *V2P|4N>_ B&&UAB%B x: BP( SO:wꈜ\|rEP`Os!H0rP| ?_`SʒSP]Kܐ KK!оrrsae{.o999dzhTxAH(D N3"/ÁBz/6)hHOlm`KfpuiX[[(_z5jBb׉4K(Fev6lJk`ii['d2|>'֫[R)EG' ۤ!'Vihִ Tj5~8vk 'PMi J2p8H$b4i-ZZ2f͚HOO{ c%YҊ+4 {䰡Xl)T-ArrGZHNѿ2[ݾ$뇕 _c@?cHع!u<}|XǙ8lHL42$&ꆅoQ"+ Z>L|:8,afVaeUffˁǓ] BP('Կ'C"1 J%r9nbպ81UÖM1k4rnщ18}&r9X~32=d0d<} e 7oʵ8i8p >O>cdq~_W]t_9?Mr2T*R;d!#eXW]3xuS+W3zn6kr vVD^G`nnu0uGԝ>'VCGu'H 7o,!C3p1󄅅jhB:7o?O>JbbU}~[;Z SB} =x'|aeiՌ* IDAT))UVX /nwpPTB@R!􇕕+rӧϠV>ux\d!4D">ziE'}@._ctv."+T/P%VŢ_")u?r/FBV#'73h-9P㐕9_Ȃ~t(J8DL$w{g"`]z|,[ 999P( ÅR2k}l,_Ub%TRf;K >#drdee1{_vEFFV[r9{W¾Zp\j҄\2Fp!m\.|ݷ! wރ˗VS16nAȯ%K pB$"/=% tn)_9 < SpAP( B|*JkK/W!sB#-\87nn]ش)`?ѱ{,Z -7gnXe1__ K~Yנ@vRH1#Kl]OV8reEi<6X`V]AJ\X|)RRy) {Z:/_k6'X,?!95kvp.":wevm #ڷmYuv`ѭ;<}|­]weh.Fo12t>~~Cal޸m\5Qj̞> l S̜6 gbQzmZ¹E D_C~ߋ)۱u T&9;G tY1_ܻoopR[^QtPǏ߻c ssivѮukva[(b>-n a;AN_ƸĞXpAd3s^NnnhܨsܰA1Bxy=,,,V[Uֳn]ameeP~Ȩ(uƔ{{Jez~}u p~(| 4?{{?|On/R6֘:i"m;̞AXtC8pC߅zġGp4N.|~}q˻kÇ xzۇ-Kj@uj{n]3cGbhܰnܺ6;3Ca Ι)3عz+OV-Ud9l(2Q$a};tb/viٻ ^%%a)W7?utP( BP(eJЫgOojL ں}p&E~~>NaXdq<E'z`ʄؼm;233ѵs' _oAЩ>1m\\0jׯwot <#up8quXlmQAm.).p8رu nފ1//$&>}5,?fEЯ/uq puuFRgU[m\.ihܨ!O _5~-ϟ ?F 1H]- υS`vd_1333lx!==M7Ia~HMMӓF ^ľp-4Eݹm ΄?YYplƏD".t=?v 탽憁ڭ]ʕ*kT/׮.~'q* %O-wځSAA?u999pj cLk'lmg犷Ǻ5E`&nZYYYԥCMΦM!b}.L$t}{Xz{(xޡnسoԄ_ܱ^S`Aw~ܹw&&t }~A .y 1 b@߾:lڈc8$v/fϘuj㸗7r`a C8燠KѲysnp| JpncF4Xא-/"Rƺdvj9~Oiښ~=vyzivCXc)x>owO@Pd28Ĩ#  z=M["54_'N@^^_{kٽo:|aԄ*rJ8x|>֮.z28~m݆1иaL8x)c=8τ sVLLL/n BP( B|8iiikaS .ѶukCJ;\.lmlGBP(n _ b|N`u~&OT՘6i"ƌr^\/AA||;9 L_Q( B`Bx.i[ ק7>LP( BP( >  BP( tB{{{BP( ۃP( BP(BJ%UBP( ?uP( BP(WA*UP BP(ʻ@R( BP(WÁUBP( ?E n+QAbŊA]i:kSS+Wԩ] G:kҸ1=K 9x:.c6OR)\:2ڷk|_q&MA oTT=ڼ+Wc0cj B!p8T BP( r=pDVC/O*C}pد/n]a[W\t9>to [W714c10g 2DP( A BP(r\do߱;w+ZZt 4sg\9 VwTotS( BP( BP[$ BP( BP( BP:>z?J%LMMv 8;B0o"D^T&˅͚֯6eefeCؼqB!tJ%LnJ8lQj9셔Tm9@/c٪HMMJH$BJo1y &9jb4m[X%'[ϖd` BT VD q2 GaX9,I;];taFW=.װlw жS2)*<}?֮A =c^uk֮-sV1󧹸v:c;66Xz%4n'ˈ1cq=;sCG#%5 |>/ ]:u, s{ i s(J|^ _!%%~?7+8S՘6k6nފ-Jߖ!bul׳aμ8WVXap=3GFu{WIIٻ/v N͚1su KzaFm}VdrD]aРA}غŠ 2 Fƃ[[l\5dՋ}s.Fjj*cnmbzWcai8w+T`=~Cо=hP1y6V^ݺEѶ)Zo_q@ BP( BP(Guv-B!fMԴ4޷3a! L!#Ga|<ڵi֮.1O/-H&\bEv MIMŞ0mD @ (~],bF])3gA$Whܨ!Rp0".\D cǃ?5r\Yuӟr\@YӦur2оm86k1ׯ#(,N,Lq0t@TT '1i2ݻ8tniS%"\o}>$_=a|>$N9DlСYҹLw լQF)P%Wvm Z:?( BP( BP('<{66X&\!6mZ~57j@8 {{;g<}tkִ i۰A<vvUY<޿9ک#Fm;v`ΌM[BVc߮Sbѻ||0gty*f-P(Z) BP( BP(:@G c-fsRRR\zT{5r8~Bjj\b蠁z;2tp4ŷ߲ &MAÇpe3j$_>k6~gժjp=O@֮ƿ6#553MEiͳ4_1#ǎdrv %߼eſ, Ki74͢.OŜy 3|cى!PTs fQ]mmm&9!ّYlP{w)`eҩ ^yB*hd(fccHMKedІ֫?}dqq.<ujf՝4~X7ɨY ȗ+ BP( BP( .|THvcmJŔ=ȟ\m`ڵ3ܮRϤT(A1C^i^QҰ yC իfه/@TBd$7jp̘:gC4!jl9c?p8wCjZܽ+c 7/?Jbfj R #>Fŵ+Bk;AN&SSSU*x>Tl9@\1l(V >W_}x5"RTٲsQ^^^eB|BڷkCj\<c&N(vtJ$$$Cv6d0VȐ)&Lf@bb”E"B)ׇ4ĪU͙75BP( BP( T|vN;-, I"]]b(999\xrssaiiɔkC7UPpIuD"A®jG ;;ߋ %ݩpPMkW shOiQQϗԴp!u\.Y73N9kkk%$Ͻsru,;׆vc=[Fr2 DtǏY;qO ƵkX2~;H kVd>k'\( ɡRMgŭې+O277gʄ! SAo_P|rۗ P( BP( BPJshҸp80d>}.SwHM>Ig"''jAaX&8rcJ gCÐds5rsn8;?bʕǂeߙgpPoׯqcӧPTq&\[ <0k\.\q/oXZZfٻ(5 lKN$tBoJozE"U"HSQ HEAH ޓm3dɆ$bf̞s$7{9۾ I0S G#e Qy|Mw"s}( [۶o~lڲ5_Yoo9FjjDQĿGbͺ`4vm۱?.7_z_L&zlݾKWjmӖ~L&ddf&@EԷz`{LDFFL&v섗'e|#f~4L&6mw Olk4,yRRR -OA|=K{L}0L}ڼ`yѮSc/]FXxß~4 ұV[=zY}1c:ڴjlg;b#9[1smuvvƛ#_/QcixD@dxШXpAa0m,<ۭݾ-_F9 Z}ӏ'aȫcc a 6e#›i&'Mx5fMYo//4m<6vdL8)_6#F)1Mh"x9ڦS>|s7Lvejۿvd07K~h@]g]boaHO@XP 2NN.f[:pU*WÇ+~YӦbMرku t_Xa#v)r?,u{U&$%%IQ ";$ (#!*{9 rtt=#""""""""R q ǀ~}WDDDDDDDDD DDDDDX0=\BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDTJC#nȃ */ ͭȃ9fͱn;I>ܸyNl,ZWTP JTzuۅ%""""zB$ AbgQw%?}hSϦMй2gly賰<߻4у$HX@0 @ JS2$D@H$H&|3@DDD!Ou$!| g?A!08q" v,qG~R}Ǐ˖l2>e27p"$l^yI jQ a| <}LS3_ f@ZLrtLKA  @YxYxhAj"""y6k`岟jh"_~]>kgDDDD$ H=u Ah޾pPnUk@C72._@@$Ha$dF_FɣȎ Sf& jWw86:\*TV?c_4{Kc* 8QiT@NNE*?m,A$888l2uR(<WN7֬\={##3XWJ$&%CqrD<fNZ𷻄ǡNBfM us98:(λ/bْXbػF Z->> Q '"d\[7V/CN#;>]ݡ5nY x/\Y4 ú n&$T O(pY XK~ 3 ~C QwL.̧M*vhݲv?-[A/dĸ[JJ*V[ƍФQciq9۸m;voIU|ySl>d0؁K.6O; dffe1hثcvW\F֭дI$$c՚u<5ؼ>>žff^Կ_[<߷?]vm q#$&'a;l`w舞ݺfO_V"!m:߽d899@5t:^: /<*T(ȃxk{rtXVѣp-W=KĉJ,K#L>o UGM{zpT˜[בv nNdF_B//'ML&kZ'_ܠƅ3qqL8x .rvd4A+zDϡ uqJ>zeT|XAIKDDD)BRRE>( ~B<Ǐck#TrrržJh߶ Z4o|m&` :}&/]Ͼ3Q&0[֯[1 wّCΝ8t ϴoi柴 @Νɤw'b_!Icq^AXzX0bYvv6z;qqh4SthkSvΝOp4n0u:n 6lyEh$a]P`2l=,< Y%?- AXnpjt4+@*/ WY~h1]˖,F5!JNNFq?{#?@v- 9Q/PGBͺubNAUyCAqZm,3?D@.3y6ܳn^gUN=/_D? p.W?"""|G-r"^G[ׯêkqV9wYcsgmx{#%57oDVV6DIJ廾FJ%O;7 ,h1o_P&0ODWۖ3`08u}] 6NJ_Wa>MAmel I ,A//ء|LOPڽZF7ǎk_C|߫Gw|h1RSS[2相l_8:g\>LFAY,ȹlk fQּn!M[&/ťᅥG0l}!Ba:x8Vю"]<%&@墽,AjdGчDDDDy<{>?>??jDFs^`4ؽe^ S 嫆; ׮W.^ g_/O]q/x5׭A2e._0:zH&= Fuy%GFf>X97I4QIdKG]pݞ7M0D1E+wܲٓpHdT2矰>/sw{ )߫ &ZNR*!wlۇ|L& +i|c>?;llǂ J`{H7&ABf%R ȁšm$ A)RmSZg$R07 =<qfڵu ]^C@_p528{5h!b PTEv$B\Z _ cѷ`+qUkU`"%99 f'''CE Hzz<ؙӃgκ }r"k׃ͣ-Vh&h{=I/̈́ xDe"XE@!(rFM}WyK0J{ B-Ah0NxO {~p^!E(ɅEiur^d|v}?}c/; Xz v *=WP*kI&xNEiƟfWAsU=D5#ĤMJ,rهEa;vfr|=7o݊g;9*+-T*UZju6 0q'aͱaf<к@TbwߡCEab4JB_@vv6ܥ֮x'g~d$PhL3ґ} ٱaHP@mH 8b/a>OND̎m*BpgڲOܪA@.o JG'Dd4;}RFAYv=pgH&:tP;6 n.d\:cF8ر;A?譲sD$FGMH>~xvdӴ%tUB(^RewǃCzIF΍H:v8@X>-Qp ^vXRw'ڃsO?;=[! Pyzml]ҺVSV;_< Cr"o׮]۬MnA"""z@OuFhݺ(_L -KW`EfE{܏z(u7ƾ3DFbpssCpP=[Ӻ{@]\ùyՐ}=FBSV&ʺ- m;p ,oֲJ%'&9.Sr8Z_$u^N0f#!+;FO>rbo!jX !5R*-L9H;w1;6֦U;e\*V_6W} IG"qrhd\uZ.u:dݸ_O՟AwPy${KS3>MG09("F4..?/B>B>k[|aʀ>)1k\&􉸹~45pyD|Jݳ=qOv4U5:kBER}7׭@> lb;NL ѠBV,h0)0ee#!|[R"P =86vZm ǀ2yAm f &xkFTjR|0nnX zPsTx5\ʒH;N=>d^mHuT Uڀ$ApQᅗQsgr~A={}5oDWoS?}_}ϦC.v '  B=7f4p"~z,o͜^/hs#F}˯g#_{^^c~~xq7Fn6c:MunR}߮:auܣ3Mz<9OD!,<"_5lV-Z`h.(""""*,pPE~"1׮`B S˄mC887WOhא%dĭͫqcr9 W/DV YȘAȺ˷#jMzZ .. j;X4Z |"ZCd-oFqqwhJyC:xxRƱqZ}65jC;!!N}:lA(~^ K7^Fу5_o_X2jK#8xDMAD@M!J] XWX)82$DoRp)rNO}>yR}oyEϋpzxh• o y;+ cfΝ<|f8w_⹨wi,yՐ(B([s_dT!arwTK5q&$)OjyȨ{;U@T|9tvF#j5ڃïh0<@$qZmzR;qhx N$&#*5no]c hț1bOz['>o .1 rpnrtp`/J#vy%T0L'vXVde~OG *8`G(\ 4]Y^a4yd Gϋ(D Yq" d:yœɔgT*\[QG#h(Ԝ0fŋ} &K ߷țQ͸&"""*g>FADDDDDOy2XPPQcdpt WY7s'-E !&yYP5)'댬Wu̻)@2w3X'wkՃBm;_˿eٵzmh'*_} ?oEWF '6N}z6NDQ  IDAT^Z{aFɲE%XJ ]ܝ( V Ӝks*+ Ip/%OBi:w $IT#ny߯E;ztd߾n*ɫ& Y$HF#T.ZU[ȼrZb D EoE/z\3 =DڹS@{]|, M`Xx;NpY9qȸ|>w̙Y;ֺ!CW''<6$),|^+.h"U)IޅDH&S *+ TOަ˩L<G@Jp*o P8:[ k+T¸T iOvHܯOWɻZܟj]b1&Iw v,^˷Razb2댣c!rLgN ue`LK.zB}r/SU'`ʺ}Y7 ܽqb.u8 3\\\b׵tvIh9"""z *v""""'A@9P_ / Dųپ6©O +QcGmپ$P(   9/#coP I"T.Z%+:(AqE*t'5 OK?Ⱦy\kW 4ކ1#P;Q. o@C²Fj$DkFo{;*2~j7$ޖv\"tpt,V;L9YB1&# z zbdA`ǞĹ\ݑ}1Шvu+k}"""z@ SϺŕ$pV njҀ^=.]#_A)_LA߂BN$?%ØѠtP5*$ u(RE^Q(TBXT:@RÐV䫌>)ZfI&0 y's@ v@С+#ą9qȼ~?jwIw}R"*5.}}1X:SSl:Ɣ cF:4Њhx4fCvw7W@uy iOP@PK@,@jc?[:89q,̘u"oO$(ѺX\KX2VE@ ^H9y*d#l? HduQZ֘;&|\(|K*ɢ$p NW'lx5j&0cW}!upQy .-5jөdDހ[ZoGæ60$BiFI,kx hž%(G!"""*y!""""R(w_EHF#클pa4޺U|%H 9}G Fڹo*0!!(m?6I&yuh ғkº ATJ$6QM'eۍZ}$*jiOֵ8B%Js;LFx5߽M+}s*AO6g`*)DhyA"""0 :jyT< *-ϟR*+@Θs'Qy=iܪʝEH& L&$O HF#YE XER<PE2/yqEd2Bex5n]_8cndm$K2u 3.0$'+ǍUZW7׫(\,Ȑ .G>, DDDD%WQguz=SNEx'찬zϏ3! @&]I pX 쬘0fåB\E&|hJs[5^ ,9;TW/>P;,vS@Y_< Ѡ7'Bh>p D3.BeݼCj KJ$!""""s",HF#GJ)s um(Ey+{-2^'KSR cF:2_5wCx?`{b|{ͺP!1}#KΒl 5fIVH=uo9AUu#ʷC|r"$8K`d^[׋5NROd4nQz9w0ff|%*5$Y4AZ&PSU!,$KNh-%)'wM;sy q ІH:._y3SVRϝ .̟.*ó~c$?G|{tP5D`1^Kp黯PKoyB$@h}Һ r0C79` IPH;w MS<5"xҟe>p{z\? `R@9NJ>x vLXAa>JG' %,܎gl!v\3MBag4A$AՑB[!)[1N Dvh>f?B^Q8;sD0Jp^ U^y|)N/QÿPa6\a>&FȌCJM[Sqt 8Q2Hp翚g\!M=~YSǑ{,}BɉH>v''ő1aB S<]ikđQ/#!r#38g!($lPmH:|zu0 iH8wvo3Uk!Mٹ:N-cG^jqbis> ձz 9\JJy pwD(:e|7ƠBi DDDDw_DDDDDԐ$ @4pmb\y4޾pJG'=td݈FNjc'"hP:9:TD/ ýV(F~,l}RDƌtKlj4@cfM>1Ԕ">% J2V6,^G@NpZNerqIKH;{ UkןïJ ]b$GyKqa X1m[p Sft qȼvBj_<VB ǀxxAuBt qȈ Aw{jnvh<8& w~&? GY%#e5CF2qqLyeT K*Ph!0A2h$uQE(4D7 õ?^Pm~FޖJ1;9w!>@w>c`9\\9uJCz*2.Cv-4m} Z ICZ*t qE˓#C'%BC;1))IOOO=2xX=DgO")d\8[נK13jwOUGFhǀO\s6#1SPT"|B=PF;{årUBr* ig;ܪׂ[:Am#vV8x»q"C!D6ok$ (8z?: Lَ!y3ӡ8@[%-#}h< p`/o۩'uӈDJoT WXcx7n {LBʩHpW."'.drPh:(^ar'yڑ~Y7!@PkK*/ÚPIG8;هK`HTA m`x5h ߖh;~z8nIG˜+Zud\cײ=|<ʺ߷!!ro\hC k>tt*O:|8yJGHIB Ti֦hg =!O곇PpIYCɣh4^azL}yu{x~~}Uv?gBDDDDDO%2'2ls cspDMt=~wMZ[]{r~d̗}PxIs$}xQ&({TPcLX[V$60=vg^3Iϒh,*qY2""""""""""@^Z,~N9#ǎBƒᄉnܸh6ר^h4!py  *𵋗.ƾ3eDDDDDDDDDDDqY=Ыgu:$&&aq)<|7`2RV/==JYYYb6Ν?F `WPbEJgtgjuBc_9J~Cʔ K7jHMK9o+/׮ߴekv,+4j~>>Du۪Ӧu+dddU/"""""""""""z[d#eHLLhD|B>l\rr > AEK廦4j5_S "?o/Wl2? F zf@zu׮h4BQFDDDDDDDDDD=-ʗ/ zuW\=wÆM晎'}I؜hХSG۸ :uʰ6eN}_q'X eKa!.o//L|>6y#^QFDDDDDDDDDD=Ȥ'`ؠAX/8y <=ѭs'knv[׵ka2Ro8_>j֨m;wÇ F PjU kpb\`  !!]-$IIIgnhۦ5VG h #""""""""""z$ < WEXxG#<ʆGGn]N_ժV/KH$"""R/99ك= """""""""""*mXjGi"""""""""""z 귿"""""""""""G!"""""* s DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT<ȗs> GFSXn=3[w($""""*aDQd'=TBWFg;tO?)޵,PjUtؑ#Q(?+&IA(3L$ *7DшI?EFfqNO~kׯ? <+Woܰƿ=֮#""""* .]`s$?DQ$IP* ~ Jٻ(lz^KQ@iATEz(kTDPtA@T/ =.Y Pn~,dΜs̙I|sqPR%ط?{IjJm>ǚǶc*gvЧzy,WMyfDDDDD ^WuV\2(=99gG}mhOiSDDDDDʣ!0y썌| C CuKը>w߾vurZ _-&=H7m6ws/+W"//4Ge߰J+w@V*sf}$""""RNGxr;Ʊci۶-eafSfYaŎ)q /eY矼1}: :UNDDDD3trn@xxoTuN<IGwx?_޽. غm]t ړ3?fԍbl&$[.aշ?cnɼ_yV֮[G6mtal.qsаaC;N'ep8Xj 4DG`4F~~>^PL L ZbcFfĉBC5s&QHNx ,۶a8^} ]0sgs! )2_huuQƑyzY$g֭<أ\5W^ɼr"]'a!q IDATx_`-{DDDDD ڵkiݪ]tSb"ڥ lw} sҳG:w/i|u 4 63 ):u*yR>֭Z1k,ޚ1 /.]h"ϸc锘H]={v Vhiݺ5~&7' j b`RR p0q<!u&λY7o*?qBp:}Q@*HIM#""""rc׮]def)t8N8@NNN7eqСq }%5%<>HӦM |b|r嗓EL<^={֌zm?aI;Ν;ӻO?{[n6IӦNEDDDDC~{.(Ӆq'yVbgeeRnaN~)Fc&BCq u ȋjryN'qqqXEʕ`ӦM,X_~0z^~%nv^:A-"(﹇ ;v䬳bw}̙=+W)FJKDDDDD+?`3UtTyyy|,]u`arǗdQek[8}KDd$w&44۲ؽk >>׋aU^\ll,-FDDDU1+H|\i=0[X]l"""utEDDDDʫ܅w88c Ō+\>+;4ypbyIbNaF3uU >sm]gPn&dɲeڽիy=zf͞+3OQfM,6_ҳG7.8|"""""@ Fys呑GYf-GNUud]mSܱ])IȠK^Aiwu5+)7H4$6Hx&>0SeÂʆ2vSr~{%%%%ǟmIuJ#* 1U$==Zju-4 hvل/ЬY3\.egWhYP_~|#RFfAca&ijJ,8 14nz8iтٟ~ʁh8~}ڹ4׮0C\=bxP#߸Q#6ФQCSR׸H4lؐӧSm۶ʖ͛iѢ5k$//o-"!! pW0qD"WoUU㮻`֭}۶ql ##[,Klmx^,bΝ}v6mm ;~rr """"""'$~ p8(xXp!yyDDD0d*^z%}9ua1jՊ,^6/0Сwy',ZW_}T rٳÇc&W_}5#}39tW ѣ0 222x_m6<UUqF7*)"""""RC9!g,8Xph Me6ej9?0"eq q9é˲0 Hpzeu()r\AM '""""R @DDDDDDPh )hkHQDDDDD .F)wrG)wl{;oغ}խ'?zڹd0 VBPvmݍ"""""""""""e }I ;ЫgO2={LH2pÏ>=wATdd =7/ǟzkQ&֬XVn:;c:]x!-[zjs.<}Nxn;FwH) ʕ+ӥg/,[}Խ'YYYTVT ,+9Sݑ"""""""""""eHNNO| @TT$oONuJը>w߾vIknLz l۶wOWiģкU"lќ6RbE>=]r)YTMH`3q:ev!]1;v;SDDDDʝJ!"""""r<^dɲlݶ͟co,US<0aQN}EA/q%[m[tR`,F/̦jՄ4M+%1Q|>K6ouhۦM\ ۶ٷիUSCDDDDDDDDDD pĶmnwi|`>;uaLy1 '$$nu9)x`$zY$w߳e6^3th>>"&Lk/T澻"//.]LxF J.`6oRf;oH2p0s LHs8LzA~\{w //e+Vm<6G2.WNopT @JjjkGŴhќ;)pNv"Zjief ӤApAoNdd$j"44?hj׮i:t]vG͚5=/f֭Ӈc6]w< >>}_DDDDD|r?"99S!>{q:9p\s m O3e>'vw'x{]wM-ظq#}7KXx8O?4u!55?,_~%?e_knnvڷoϿvÙ8iReoΘA^~>6PR@xb nB\.h0 ^/{a\uՁDz,n3 bq-'gxW/&LoM xgYb?|=/wO>$S&O޽y駩[.ӦMGϞt1h47p8Xr%|\vY$""""GHbxb?Q"Gp8 ' Z0Xjׯ{|AaÇtb+''Dիl?u KTd$7l_ò,^o 0OsWӢE M%""""R ⁀EvYعs'nիVg\!!lذBժUmIʕBBBp\EBC/xiz1 'O&6.I&` HzrMX).PL;?4ؾcnpnv AMot;իyʋH0@DDDDD\"#"h֬111J*DG3XeY< U(a *`7|3͛7*() (2L}/t:Y`o6璙 <"B7H! H>s,]yQ9Π<ÁZnMll,ͣy昦Ijj*7lCǎ$$$$P0 N'N3(p.`eƍ曼TZ5ϰaС)D)wJZhSEɮ]r|}FBB 4mڔH^xy:zuRNNJrr2e׏;9sػw/]RN^/`…tԉn7YYY,ZQFPF^=eĿEjZ:xFE9{&77SaF*3sLG]IKKò,5kF hӦ w`׮]ݴ)+Uu6iӆM6w>СC1 U?Ē%KHLL$$$,ZnMeۘgMenZl Xܲ,G| x<Bp&qqquK끈=|Frr ^&%%Pș)DDDDDD xޠL LAeYV`t4J%av`q=N+Sh#9bDDDDDH-^R`xy8s DDDDDDDDDDDQDDDDDDDDDDDD@DDDDDDDDDDDQDDDDDDDDDDDD@DDDDDDDDDDDQDDDDDDDDDDDD@DDDDDDDDDDDqE%w^bz w> 민DVʼBykkt9eYYRgPgiSfݺ2ԣ@DDDDDDDDDDt:kX-Xض]b^>˲Jz+}=s;y-ON WҾcG3) r]tҍiwa':uA>?8wB ;qa\u"/i1%ˊ9ҮcbnuL$++>ұK7]؉YAy._Es@; |mswұk@]=cͺu7Lddf{c|2g.]DV&]DZMzz(O~q~<Ͻ2:&ᅨȦwq@<K-QÆ x Pv_~r,!a|,]QcƫI{_}-<(ي|20)-qP23~%KѷŁC`֕'%5?ɨ1I{ ඛo"*2ZQ&w*V;n'#=7~w UÆ?W_ч'ZK[M4wqNYdsǍ'??~Ď8 ^upA*WTDFF271 ~}zY&ʒop86<Z^ËӲ%??lYau3y:rGy=7ȑΚ@jU700 y_}a8NM֥ S_D0ض}1>ڶ,46(:2{ŗ_agS!> 3) 37zx8p yZ5j@Դ4,bيx]hټy ohh(^{M,5koaaau{eͷg^^/Y G^ :wx5k4e*+W*<WVbn m7;j;n҄^H}hѢ_-ZDUyݵԪQMUMHi&y- r^}}:ލJuV DDDDDDDDDDDTFrr •n!h{͊e٣m]ѱC|lDxY>5wgAM^hټ9oMM\DDDDHII""""""TWz_-ZaL<K.㣏?NG ~lG """"yb2"'=#\T|Fs޽Og~H5tEDDD䄔4, ZPDDDDD'۶1Mz &:3ժV庫":*K/@tt.V#"""""iemŎmU[R~USWߟʫʫʫG{"5DDDDDD't<Fʫ U^U^*򖿟iG2$""""""""""" """""""""""R("""""""""""" """""""""""R(""""""""""K IDAT"" """""""""""R("""""""""""" """""""""""R(""""""""""""厳,*YtYvb4m,Y96FѯV\c8000JSPt(9⵰ml/1mYؖ]J^rEDDDL:r=֬XVl/9y7.=K-;""""""ʲ, 8/ J:X3qf);k4M Ѯ6@|| WsFS/%%>{Gۃ ^/VF1`ޮ[Y90c"dd㈏ Iy?XY8V$Ѿ6`ԗ?uv=nSSrs=a[e? =k2L}W2]"e-阑t`d`Gc8 """r=7٘ϙWDDDDD'۶zx^^ojB, ),5kx<"u˕tZmUzr}Md;t>!Lw?ߖ}/} x|a.og%0Œ$<\&mਗ਼:?]V\<)JcW$O΁qO/-~1/B$ucU?@_Hm,'#KLeG$5-y6][);Z]DDDDD 00M۶q88 *4MGr4Y˲8i;w +<??XPfhFhFX(fTDЈ;߃.䵰s_[oԈaA#̨t'>1 Q2zٷ#fxXv~aZeތe^/Nj?J޹g>ma<](_pxĎLH!!EU>ӷF`D{BGQqbDc|W8FK߸""""'ي<۶1 ]vgڶmOfVm۶J*x^7n$;;;/P.99kג⯿bŬ[˗Bzz:]tJ*XizjRI&T\uˆ,x)Œ p~T3L#= ۲0 OMe`Q"amyQ|Gx L?r?`MmNR)J<$KbH![߮',BU)9K~!Y. +#gB\-g*E ܀gA"z v{pDG<=WHfd8Fx/Up """"Rvy^ɂ # 2?HvN$&&SOef~X˲p8]>}kn*Wdzf矓ٳ%99gE*U0MpuױyfHOOaÆ|ᇄ+%̈${[q+ޥx۶$/}`F՛&ɓ|*ݟBXǖTy^_B#" YU&+Wɜ fX,,*w5W)]q8pTýe7Y/&/If\Wib%=& eՓ r_`aFGPpVXL w;Q*Bj'>3 #sZDDxVOSp'~߅U;/CXĎ@_܇J rԨQC0rH.RSSy'iݺ5_}'LYfx<֯_ύ7HBBBٌ/h=p(0!] _lϮT|xQ gƽk/o}B I%}\7eFD(1#:q\g.vwql;c(:WPv:'Gǃբ>a[r`S8*ٯ#fIq_wnM]#5z`gӿ6I{<[Kksu !uwo-/%{O<:\6!uٽewhҫ&jPׂ_EDDDDNlǫ7DDDDD8ɻÿ/,]ʼyHMIEѸq 0HNN/׶-m۶哏?A 0jBmNtTIPR%ضmeqnV$&&IY69agٷcॽg>]MЋ0\!?m o&'!v~o&wa.\|.l`<˓<[lN+hݾCg#Sͻdٲ"y'O}^~w9ϿIk|[Ϟ8kNXlYc yg>-}󉈈\RSS_0`YVPPx9 q;^ОK>GsaǜLxǷdI^xU,ꤵgnJ7UFDDDDu֥_8 t-l^8 t:p8y p8N9L8nLmgw:Qh_OP>/[ƍz>Rn3EDDD>t֍ݺ MQtX6V@)R?Kq݁þKyv߶HZq^/n 7hhkY ?<S|fg'=#:uj{s{z鳒F?- O""""rZ,_SG3m"#yⱩ;@L:?/ L`*;&>0;Xf AXh(aaa|`eqwڵc&111b>3}^=3zH{ԮU+Pfƍq}jofx=U׏b |KjRRRx<8&>0/>von. ϩS6Wa\5|5k`_r cʤO*'H):sg{cO<ɳO>qpC\ԣ{GaK%>..>v =Z`?Y"+?ү2:wć/ [oxxi7Jg\>*V)Tp^WX VWy7kΝ;X/ԩ ^/6O{M<3^9-[p\zУs\4g""""""""""rlVp!A/y+UHڵٰSrHW$s#&^s;2ŽE">YY,^೔+"/?¯չ}mlٺ?..0Xn+.BZz:yy7JE~9-Zsǭ7}dE| @~MG-IDD~ʈҠYE"##ٵ{)9+#**x|#ZhΌצ cSOm۶M : w?:Nz;޳wI&ddfAuض}|=..ك)>7~GJJ 5W#$$={̬Uv2i{mCӂ6$**9 J)zq..8 wIINGoi꠩Nh|' gdz rЦukmm0Mݻ1{nZ:H{- @noɷȈKlCI*l~ʈ˅ ߿u 0 vߠ|}Q6 {mq CΨڥ3]8c S^pӇ3g:=%?Z[.W223iP~9f} ^S4IVf?YNnnP3gcKDDDDDDDDDt(udky呑ɔ'9%}BBp\ƛ0:/MKӦmvvSm+0< :|գ[Wbٴb,Q`=êY#-[ꇹsᬚGzn+ӐY{U>Eݻv]*h~L6M#[[խKھcl64cM_ML3wg4ҷ1׷3RRRռYS 2D~~t0.jKTZF55QŊ NDCzt'c5oByd3=cyd3sk єѥN?aZz_ G_ZFij,|4kC?-%%Ő2xYԣ>0^]r5\.:wn]oBP.*ϳ\K1Fep8TZ2*??0\:?~#___*(&*;WUWreq9}`@uCx ^:@!uvK0 Yu\Z~ExK0r# IDATVcٶחs/Ok^ܶ/,9߶E m -%%Őr'r&3?~gg2VY mwctuCx ^:@!uCx ^:@!uCx ^:@!b/\WF%  \-rrrKj\1arr8x\qGa\.\saWJ*RJT "arŰS^:@!uCx ^A$eH6n]x{Ða$&>.:ԏa0rd/K{e\av;AR + vſPvlSy^q͟MIy=^=f+U Qe6K_U2dٔҮtۛGjڷwZnvڥ 7?r:3g#:TϣxC|Aww6JJ7!!AqqqW5('_G"鿽BJ}N;?wKкkuk…K0t:/i6)??̆a(77Wr:۸\.痺<93|9 =vIvOڿH Pff$ĉ:tǶ<{Knӂ㦥)//ONSjғO>7lбG%Iyyy%֗{>օfR˖-xb9s6uj[ҙ3g]l50#Fh:R$RRֆH;`.YtE8aJkYr7@qkh[&//Ϻ6iSt%Iyމ&3mDy.U_E)x 8"B/BBB'h֭@ 1B7p^z%լYz#a ad.߿ym9bC,YX_L19w-ylI>>>>C>ϲf~7jرڽ{v/E{jժYͷd^g6M2 JJJ*73ZjyU}6ƍdՋ_3I=@|}}KlϹuv fϽ̼vXb>Sp?ݯ;N/6˗kĉy*gHHBk;N>FzS*WLmb!ijt  kJ>^_WTWK#TUVMנAdەE[nl6Ҵ`_NT',L+W:tfft*,խWO*U… br8qBՓ6mڤoFyyдit+?;;[&M3dٔ&MhŊQ^^իggӦMZho.*|?j#?egg+::Z%%aÆłA6MG[chmL5lP4ivd͟7OUf͚[j޼yU^]ժU+>OuJM4)qΉ?L:uR˖-eٔ%KhժUڻwURʇYޟ7lcT~}J45p jZz<͜1Cׯj֬իWkV ::ZGQÆ kOjjf͚C)!!A>I!!!V̙3Z`֭]xթSG+WVjjM[*88X5k/_ *vڰaym(--MUUӱc4c %''+,,L?֮] U^])))۶fpݬYZz4k֬$+?{UVu|^}]J*kGYܿ?~ ݻvQڶ}zAeddB :w$? @*X/Wm6Ǎqc*fM^пo >s?e˖)##C=Ӛ:u233駟#yyyc5kV3ga$I5Ҷ[5|pEGG̙3~_z*ҹsJ*I*9_/k̛o ȗ_ԯ.8NÆ?}͙3GgΜѨQ޻Ar s: PTPA۵k.{=z3L+=3aѣx})=u8Ct߀ڰav6nܨc2 C#zJ3f̐$yMMu]w~['OVժUba}?sAJIIу<;vnk޽4p;ic9gUʕ|r}1yԮʕ++V-kZz$aÆZb1BfRff3Fc|Ӫgө }y&M?^ *UlN: ҶmԨqc޵K>KڶmvBCC5w4o\<ʒ!sA?~\=>;zTܩ{G{UW; nz5|ʒ0]M*UX 8_bp%]g|0 cÆoIS1 0.W8N0 cCa|Z{n#//ϸ ɓa7_~[7|0 x饗a"/_޽qYg4iزeYmii 8ӻ2QصkazjAƁ 0iu;999aƱcnj^k,\:^xXo^0 #&&hܨq10 #..ha-\o3l0:d\ۤnZ+_3Zli$%%acyheFm Wa] 0O?ޭ[6j=zak妛~0 ߌ6[111>>f7>>ިl|3s?l<aF;4^5kݤ?7ڷkgapF*3ի摑y*l׶1 03f[2Ξ=k{*ã>jNӸS'?ѨaCc߾}ÇO(M7h|1yg'Z36 Lis ؊j.W,n 'v:|ka?I>wNuRNNd鉡CUf͂B}_ryTIދۜeT;_]C[={:f ΂!nVիWOvN PNN:w}̙3XT*á{WM5g'ȣZsm42acnl6Ξ=k֨^zvG̶/z^9tvs[SE6fMU&꙯Nv|6/F/by%@µ^F1cO4jX <7_ũQF$)_ K '+9%U$%9"áZ!!%c͉Gm6|||SrqD~g]۴li.7_./513[rsstℂ%IGfSh>>>V=(p\q8}Zu=㒤z)66V۷mөSJgXXV^x]v5ax>|Xӿ>HwqjIe}ffu(.jr֐YNS))):PBzРA#n:͜9SƎ^۷OGx)+SjUh…JJJ_wղeKuM&LP||<?P}Uƍt:,`('N(!I}nKXX~=|XVofڻgUYlկ__w}^{U-?_Wk{;wTNN-Z.շ~{$ 6L3fЗ_|?qiy7|SS*##CwPӦMUR%)++K/ofxA;vLw$ 8P>%%%i͚5;gun=1tV\%ɓϔz޻Wڵk4|ӧnѣ:vbwa%&&өd+!2Ϟn~Af+V?:3fΝ:t$rjժ&}֯[Ӧ)55UqqqV~õvZ)--j/___}wZjL~I ]??EEE~O^,ݰp]F┒"Izz=mϨQ4oHJ1ҾQ0o)Y]-ZE\jqoVT:Uթ;zݸq̟@%$$Cr8j,woVZԩV\͛7ɓzԲeKIRdd2Ϟ՚5k﯈k!_Vtam޴Ia]z꥓Zbvܩݺ瞳1g'Z=>ΝST|!GSjJ:t ݮ'O*>>^͛7Wڵu-h=ZnΜ9VZZ`lެhbڷo_rժU+ժUK u1EFFjZt233U-(Hj׮ӧOkھ}:p^xEvM6MǏSHHM^YɓuI*TQ8$5jHv]Vá5kO?UXX222XׯS'OcլY3KUTIKQqCҥKճgO=6dhΝzbPw}2 Cԏ?X;TŊնm[mڸQ?o 5jfH~Z rXGjҤI]vکW^ںuoؠ艡C[rԨQ#EEEiҥڲeϝӛcƨ~ڽ{+""BIII  SnT^=-[LyyZr\߿_{UxX8N:)//O'OTBBІ T^=:}Z+WԌ3t뭷* *a ֍;kUڴqN$%W^Qm uu)!!Ak׮ ;o͛e(=zT7nThhÕUVgѲe˴{. ݻumuIZvnFfm޴Iv+DmڸQCCCqq^:wl-aKII1$Y`N}5<{\VҋҖgkӲ{s=`~S.]4ԵkW!.w>ƍZbj׮m5.ك9@x`sP$/um5}o1Ѵ9V8itїv2̴J8ƽW2s7~ 3dpA*21ef}n{hZ%z.\.沢۔V}y!Z4/{y*%ý[;КHmYZ{aM`[pejjZs޸6JJ;G̴̹=u\Pv)Mg wq5W8=ZիWW@HJ҃>nݺyMsm6;V{Q.]{g ~ׇ=ӧeW.D"##C999 יLURE*UQ@= x0{Z[_ɞEUR ʅ>=+ {ʃ(x .*ކ: J>u?G9@Ռ!!uCx ^:xjxzC.B IDATx ^:@!uC~gѣ2aj߾ǿDS=4~?4/۶mSzzu5i֧gϞAw8TZ55nX7xnvU\.ݗl6)44T[Vp 駟4ydM6o|mZzu-];ѣGKvZf&LPϟ?_+W.1 /| P>}tI2 =@x<-YDO?̙3MyXXڷo \jժiƍze$I~l٢-[hҥ?@{:7nܨٳg{ULL~͚5O]???u]^>)OImZJEGGk1cUf-;v6lؠPIuI&i̙9sIR߾}e%I%C,Y"y2l޼#quiѢEzW2nذA&L ̙3eF@xիW+??_uQ^^~?4?3g>3itb$R:tqW^QJJFm۶qQiv]߿y*T)OyiΝ)^ZuEozvU [om۶jҤ4i"` kY.]/ZG֭eٔu֝7111jӦ߾}*88#ݻN: $M4 ^o޼yj߾:w,IZ`U_+w]wݥ={*??_u3 /7?N֮].mz RJsssՠAy~EK*p&''G+VP߾}/4vJ:uJ?F͚5KKEaJLLԴiӴe%''RJj֬qlٲL͛7 T&Mԯ_?r-{oɒ%QE/kٲe:y/^~h[NIIIl[z쩁z~1eoY:m4}Gaaa3g믿EY[?Jl{G#GԔ)S*==]6lnrIRRKmذANtcǎ\^9==m.}j6m4i$͞=[9"___iFÇWDDDۺuk*??_׀Եk y{#_Jy;Uٳg+44z6ol XlUV)//O={رc =@x bŊ޽ڵk`kyi}7رƎ1c(,,L~yeddKbz>e߲мyԴi?fEGGP ݫoF6lSO=u*11Q~a,ܹsգG;V=nݪzJgC/,z?X?QFYfZv 'N;OjÆ _NfΜyQSRs%UVM!!!fSKѽ{wUTIZlY/zjWϞ=_5m4hС%v>}Ν;{ҨN:ڴi/^TU^]dFsi̘1V㨨(%%%iʔ)Zn.IZ P0\Z_V:ubcceaW_UZZ~i ؽiy ݟ>+!|ӣG+}wׯWZZ|||}ԨQ#}ݥRz{)ivءǏ{[x9gk͟?_uQ6mejԨ: x35iDk׮p8tM7I$qr8R9C~ϼ'K)լo߾Kj:qsE{w U`9ѣmw.___\.-ZcѣG{,\}QIS!..N'N(XG_}$)$$D7|C{bJHHPffl6XeUoq9Z':uT-q3gp\ zYf͚I*!^=޽{KJ<_Jyf_㣌 ;jѢïȱ?^lLU͛7W@@@UV &,Y۷NLLTԦMEDDHƍWl(X=j%RBϷ4}tOr83fQQQR^^bFGGK*f͚T֒~˛WIj޼1nnŊJ孓HkNR0l1bd~/IC/^}j!d;wU*++KX㱬W:IRs!օ3_pvEϕo7nT0y|NSa\0_z;w|x7tjڴiZlԵkWURf4}teff*%%EjRDDl6ۧ;wjݪQrJ}Gט1c<+[ֲetA͛7OҦM4}tM8Q;vM7$IԩS/l6ڴi#__ߋZt:vZ͞=[ԴiSuEa(##C;wԇ~oVAAAzwձcbo׮VX~I:|&Mկ__Ǐ-OYKS-o^8]V+VTrr-Zɓ'kڱcRSSձcGըQC6M/S*V( *ko^WV||,Xcǎi۶m1c>#lR?{lذAf͒aJII_ի[JHHP||t9h8sz^Z999jܸ֭+.áPXBׯWvv233}v?^w}o߮\k.D^ݕ5K-OimnAAAAϔauYeffiӦS5 <-_\ .T0l)YJKKSnBBB{IM2EׯWRRׯ>}hѴiGѣG+--o&ٳ;UZU 6ԍ7ި;믿֚5k$ݮ0C ]LYϧ<[޼jĉZtN:o^Æ SVVz!k.]hȐ!zKL|Y̙3ʕ+uql6խ[W}}W!xiiiޗW^]K.8qVX$U^]111e/\|FU1ϟoM7|3ZѓzӦM2eݫ<5h@Uyyyիڴi?֮]ۿ?oKӴiSM>U:uCx ^:@Uk a IENDB`asciidoctor-2.0.20/src/000077500000000000000000000000001443135032600146665ustar00rootroot00000000000000asciidoctor-2.0.20/src/stylesheets/000077500000000000000000000000001443135032600172425ustar00rootroot00000000000000asciidoctor-2.0.20/src/stylesheets/.gitignore000066400000000000000000000000421443135032600212260ustar00rootroot00000000000000/node_modules/ /package-lock.json asciidoctor-2.0.20/src/stylesheets/.stylelintrc000066400000000000000000000002611443135032600216160ustar00rootroot00000000000000{ "extends": "stylelint-config-standard", "rules": { "comment-empty-line-before": null, "no-descending-specificity": null, "no-duplicate-selectors": null, } } asciidoctor-2.0.20/src/stylesheets/README.adoc000066400000000000000000000012401443135032600210240ustar00rootroot00000000000000= Default Stylesheet Build This directory contains the source and build for the default stylesheet used by the built-in HTML converter. == Overview The stylesheet is written in plain CSS. The build minifies the stylesheet using PostCSS + cssnano and writes it to the data/stylesheets directory. == Usage To regenerate the default stylesheet, first install the npm packages using the `npm` command: $ npm i Next, run the npm script named `build`: $ npm run build If there are any changes to the [.path]_data/stylesheets/asciidoctor-default.css_ file, commit this file. To validate the syntax of the CSS source, run the npm script named `lint`: $ npm run lint asciidoctor-2.0.20/src/stylesheets/asciidoctor.css000066400000000000000000001054111443135032600222610ustar00rootroot00000000000000/*! Asciidoctor default stylesheet | MIT License | https://asciidoctor.org */ /* Uncomment the following line when using as a custom stylesheet */ /* @import "https://fonts.googleapis.com/css?family=Open+Sans:300,300italic,400,400italic,600,600italic%7CNoto+Serif:400,400italic,700,700italic%7CDroid+Sans+Mono:400,700"; */ html { font-family: sans-serif; -webkit-text-size-adjust: 100%; } a { background: none; } a:focus { outline: thin dotted; } a:active, a:hover { outline: 0; } h1 { font-size: 2em; margin: 0.67em 0; } b, strong { font-weight: bold; } abbr { font-size: 0.9em; } abbr[title] { cursor: help; border-bottom: 1px dotted #dddddf; text-decoration: none; } dfn { font-style: italic; } hr { height: 0; } mark { background: #ff0; color: #000; } code, kbd, pre, samp { font-family: monospace; font-size: 1em; } pre { white-space: pre-wrap; } q { quotes: "\201C" "\201D" "\2018" "\2019"; } small { font-size: 80%; } sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; } sup { top: -0.5em; } sub { bottom: -0.25em; } img { border: 0; } svg:not(:root) { overflow: hidden; } figure { margin: 0; } audio, video { display: inline-block; } audio:not([controls]) { display: none; height: 0; } fieldset { border: 1px solid silver; margin: 0 2px; padding: 0.35em 0.625em 0.75em; } legend { border: 0; padding: 0; } button, input, select, textarea { font-family: inherit; font-size: 100%; margin: 0; } button, input { line-height: normal; } button, select { text-transform: none; } button, html input[type=button], input[type=reset], input[type=submit] { -webkit-appearance: button; cursor: pointer; } button[disabled], html input[disabled] { cursor: default; } input[type=checkbox], input[type=radio] { padding: 0; } button::-moz-focus-inner, input::-moz-focus-inner { border: 0; padding: 0; } textarea { overflow: auto; vertical-align: top; } table { border-collapse: collapse; border-spacing: 0; } *, ::before, ::after { box-sizing: border-box; } html, body { font-size: 100%; } body { background: #fff; color: rgba(0, 0, 0, 0.8); padding: 0; margin: 0; font-family: "Noto Serif", "DejaVu Serif", serif; line-height: 1; position: relative; cursor: auto; tab-size: 4; word-wrap: anywhere; -moz-osx-font-smoothing: grayscale; -webkit-font-smoothing: antialiased; } a:hover { cursor: pointer; } img, object, embed { max-width: 100%; height: auto; } object, embed { height: 100%; } img { -ms-interpolation-mode: bicubic; } .left { float: left !important; } .right { float: right !important; } .text-left { text-align: left !important; } .text-right { text-align: right !important; } .text-center { text-align: center !important; } .text-justify { text-align: justify !important; } .hide { display: none; } img, object, svg { display: inline-block; vertical-align: middle; } textarea { height: auto; min-height: 50px; } select { width: 100%; } .subheader, .admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { line-height: 1.45; color: #7a2518; font-weight: 400; margin-top: 0; margin-bottom: 0.25em; } div, dl, dt, dd, ul, ol, li, h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6, pre, form, p, blockquote, th, td { margin: 0; padding: 0; } a { color: #2156a5; text-decoration: underline; line-height: inherit; } a:hover, a:focus { color: #1d4b8f; } a img { border: 0; } p { line-height: 1.6; margin-bottom: 1.25em; text-rendering: optimizeLegibility; } p aside { font-size: 0.875em; line-height: 1.35; font-style: italic; } h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { font-family: "Open Sans", "DejaVu Sans", sans-serif; font-weight: 300; font-style: normal; color: #ba3925; text-rendering: optimizeLegibility; margin-top: 1em; margin-bottom: 0.5em; line-height: 1.0125em; } h1 small, h2 small, h3 small, #toctitle small, .sidebarblock > .content > .title small, h4 small, h5 small, h6 small { font-size: 60%; color: #e99b8f; line-height: 0; } h1 { font-size: 2.125em; } h2 { font-size: 1.6875em; } h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.375em; } h4, h5 { font-size: 1.125em; } h6 { font-size: 1em; } hr { border: solid #dddddf; border-width: 1px 0 0; clear: both; margin: 1.25em 0 1.1875em; } em, i { font-style: italic; line-height: inherit; } strong, b { font-weight: bold; line-height: inherit; } small { font-size: 60%; line-height: inherit; } code { font-family: "Droid Sans Mono", "DejaVu Sans Mono", monospace; font-weight: 400; color: rgba(0, 0, 0, 0.9); } ul, ol, dl { line-height: 1.6; margin-bottom: 1.25em; list-style-position: outside; font-family: inherit; } ul, ol { margin-left: 1.5em; } ul li ul, ul li ol { margin-left: 1.25em; margin-bottom: 0; } ul.circle { list-style-type: circle; } ul.disc { list-style-type: disc; } ul.square { list-style-type: square; } ul.circle ul:not([class]), ul.disc ul:not([class]), ul.square ul:not([class]) { list-style: inherit; } ol li ul, ol li ol { margin-left: 1.25em; margin-bottom: 0; } dl dt { margin-bottom: 0.3125em; font-weight: bold; } dl dd { margin-bottom: 1.25em; } blockquote { margin: 0 0 1.25em; padding: 0.5625em 1.25em 0 1.1875em; border-left: 1px solid #ddd; } blockquote, blockquote p { line-height: 1.6; color: rgba(0, 0, 0, 0.85); } @media screen and (min-width: 768px) { h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.2; } h1 { font-size: 2.75em; } h2 { font-size: 2.3125em; } h3, #toctitle, .sidebarblock > .content > .title { font-size: 1.6875em; } h4 { font-size: 1.4375em; } } table { background: #fff; margin-bottom: 1.25em; border: 1px solid #dedede; word-wrap: normal; } table thead, table tfoot { background: #f7f8f7; } table thead tr th, table thead tr td, table tfoot tr th, table tfoot tr td { padding: 0.5em 0.625em 0.625em; font-size: inherit; color: rgba(0, 0, 0, 0.8); text-align: left; } table tr th, table tr td { padding: 0.5625em 0.625em; font-size: inherit; color: rgba(0, 0, 0, 0.8); } table tr.even, table tr.alt { background: #f8f8f7; } table thead tr th, table tfoot tr th, table tbody tr td, table tr td, table tfoot tr td { line-height: 1.6; } h1, h2, h3, #toctitle, .sidebarblock > .content > .title, h4, h5, h6 { line-height: 1.2; word-spacing: -0.05em; } h1 strong, h2 strong, h3 strong, #toctitle strong, .sidebarblock > .content > .title strong, h4 strong, h5 strong, h6 strong { font-weight: 400; } .center { margin-left: auto; margin-right: auto; } .stretch { width: 100%; } .clearfix::before, .clearfix::after, .float-group::before, .float-group::after { content: " "; display: table; } .clearfix::after, .float-group::after { clear: both; } :not(pre).nobreak { word-wrap: normal; } :not(pre).nowrap { white-space: nowrap; } :not(pre).pre-wrap { white-space: pre-wrap; } :not(pre):not([class^=L]) > code { font-size: 0.9375em; font-style: normal !important; letter-spacing: 0; padding: 0.1em 0.5ex; word-spacing: -0.15em; background: #f7f7f8; border-radius: 4px; line-height: 1.45; text-rendering: optimizeSpeed; } pre { color: rgba(0, 0, 0, 0.9); font-family: "Droid Sans Mono", "DejaVu Sans Mono", monospace; line-height: 1.45; text-rendering: optimizeSpeed; } pre code, pre pre { color: inherit; font-size: inherit; line-height: inherit; } pre > code { display: block; } pre.nowrap, pre.nowrap pre { white-space: pre; word-wrap: normal; } em em { font-style: normal; } strong strong { font-weight: 400; } .keyseq { color: rgba(51, 51, 51, 0.8); } kbd { font-family: "Droid Sans Mono", "DejaVu Sans Mono", monospace; display: inline-block; color: rgba(0, 0, 0, 0.8); font-size: 0.65em; line-height: 1.45; background: #f7f7f7; border: 1px solid #ccc; border-radius: 3px; box-shadow: 0 1px 0 rgba(0, 0, 0, 0.2), 0 0 0 0.1em #fff inset; margin: 0 0.15em; padding: 0.2em 0.5em; vertical-align: middle; position: relative; top: -0.1em; white-space: nowrap; } .keyseq kbd:first-child { margin-left: 0; } .keyseq kbd:last-child { margin-right: 0; } .menuseq, .menuref { color: #000; } .menuseq b:not(.caret), .menuref { font-weight: inherit; } .menuseq { word-spacing: -0.02em; } .menuseq b.caret { font-size: 1.25em; line-height: 0.8; } .menuseq i.caret { font-weight: bold; text-align: center; width: 0.45em; } b.button::before, b.button::after { position: relative; top: -1px; font-weight: 400; } b.button::before { content: "["; padding: 0 3px 0 2px; } b.button::after { content: "]"; padding: 0 2px 0 3px; } p a > code:hover { color: rgba(0, 0, 0, 0.9); } #header, #content, #footnotes, #footer { width: 100%; margin: 0 auto; max-width: 62.5em; *zoom: 1; position: relative; padding-left: 0.9375em; padding-right: 0.9375em; } #header::before, #header::after, #content::before, #content::after, #footnotes::before, #footnotes::after, #footer::before, #footer::after { content: " "; display: table; } #header::after, #content::after, #footnotes::after, #footer::after { clear: both; } #content { margin-top: 1.25em; } #content::before { content: none; } #header > h1:first-child { color: rgba(0, 0, 0, 0.85); margin-top: 2.25rem; margin-bottom: 0; } #header > h1:first-child + #toc { margin-top: 8px; border-top: 1px solid #dddddf; } #header > h1:only-child, body.toc2 #header > h1:nth-last-child(2) { border-bottom: 1px solid #dddddf; padding-bottom: 8px; } #header .details { border-bottom: 1px solid #dddddf; line-height: 1.45; padding-top: 0.25em; padding-bottom: 0.25em; padding-left: 0.25em; color: rgba(0, 0, 0, 0.6); display: flex; flex-flow: row wrap; } #header .details span:first-child { margin-left: -0.125em; } #header .details span.email a { color: rgba(0, 0, 0, 0.85); } #header .details br { display: none; } #header .details br + span::before { content: "\00a0\2013\00a0"; } #header .details br + span.author::before { content: "\00a0\22c5\00a0"; color: rgba(0, 0, 0, 0.85); } #header .details br + span#revremark::before { content: "\00a0|\00a0"; } #header #revnumber { text-transform: capitalize; } #header #revnumber::after { content: "\00a0"; } #content > h1:first-child:not([class]) { color: rgba(0, 0, 0, 0.85); border-bottom: 1px solid #dddddf; padding-bottom: 8px; margin-top: 0; padding-top: 1rem; margin-bottom: 1.25rem; } #toc { border-bottom: 1px solid #e7e7e9; padding-bottom: 0.5em; } #toc > ul { margin-left: 0.125em; } #toc ul.sectlevel0 > li > a { font-style: italic; } #toc ul.sectlevel0 ul.sectlevel1 { margin: 0.5em 0; } #toc ul { font-family: "Open Sans", "DejaVu Sans", sans-serif; list-style-type: none; } #toc li { line-height: 1.3334; margin-top: 0.3334em; } #toc a { text-decoration: none; } #toc a:active { text-decoration: underline; } #toctitle { color: #7a2518; font-size: 1.2em; } @media screen and (min-width: 768px) { #toctitle { font-size: 1.375em; } body.toc2 { padding-left: 15em; padding-right: 0; } #toc.toc2 { margin-top: 0 !important; background: #f8f8f7; position: fixed; width: 15em; left: 0; top: 0; border-right: 1px solid #e7e7e9; border-top-width: 0 !important; border-bottom-width: 0 !important; z-index: 1000; padding: 1.25em 1em; height: 100%; overflow: auto; } #toc.toc2 #toctitle { margin-top: 0; margin-bottom: 0.8rem; font-size: 1.2em; } #toc.toc2 > ul { font-size: 0.9em; margin-bottom: 0; } #toc.toc2 ul ul { margin-left: 0; padding-left: 1em; } #toc.toc2 ul.sectlevel0 ul.sectlevel1 { padding-left: 0; margin-top: 0.5em; margin-bottom: 0.5em; } body.toc2.toc-right { padding-left: 0; padding-right: 15em; } body.toc2.toc-right #toc.toc2 { border-right-width: 0; border-left: 1px solid #e7e7e9; left: auto; right: 0; } } @media screen and (min-width: 1280px) { body.toc2 { padding-left: 20em; padding-right: 0; } #toc.toc2 { width: 20em; } #toc.toc2 #toctitle { font-size: 1.375em; } #toc.toc2 > ul { font-size: 0.95em; } #toc.toc2 ul ul { padding-left: 1.25em; } body.toc2.toc-right { padding-left: 0; padding-right: 20em; } } #content #toc { border: 1px solid #e0e0dc; margin-bottom: 1.25em; padding: 1.25em; background: #f8f8f7; border-radius: 4px; } #content #toc > :first-child { margin-top: 0; } #content #toc > :last-child { margin-bottom: 0; } #footer { max-width: none; background: rgba(0, 0, 0, 0.8); padding: 1.25em; } #footer-text { color: rgba(255, 255, 255, 0.8); line-height: 1.44; } #content { margin-bottom: 0.625em; } .sect1 { padding-bottom: 0.625em; } @media screen and (min-width: 768px) { #content { margin-bottom: 1.25em; } .sect1 { padding-bottom: 1.25em; } } .sect1:last-child { padding-bottom: 0; } .sect1 + .sect1 { border-top: 1px solid #e7e7e9; } #content h1 > a.anchor, h2 > a.anchor, h3 > a.anchor, #toctitle > a.anchor, .sidebarblock > .content > .title > a.anchor, h4 > a.anchor, h5 > a.anchor, h6 > a.anchor { position: absolute; z-index: 1001; width: 1.5ex; margin-left: -1.5ex; display: block; text-decoration: none !important; visibility: hidden; text-align: center; font-weight: 400; } #content h1 > a.anchor::before, h2 > a.anchor::before, h3 > a.anchor::before, #toctitle > a.anchor::before, .sidebarblock > .content > .title > a.anchor::before, h4 > a.anchor::before, h5 > a.anchor::before, h6 > a.anchor::before { content: "\00A7"; font-size: 0.85em; display: block; padding-top: 0.1em; } #content h1:hover > a.anchor, #content h1 > a.anchor:hover, h2:hover > a.anchor, h2 > a.anchor:hover, h3:hover > a.anchor, #toctitle:hover > a.anchor, .sidebarblock > .content > .title:hover > a.anchor, h3 > a.anchor:hover, #toctitle > a.anchor:hover, .sidebarblock > .content > .title > a.anchor:hover, h4:hover > a.anchor, h4 > a.anchor:hover, h5:hover > a.anchor, h5 > a.anchor:hover, h6:hover > a.anchor, h6 > a.anchor:hover { visibility: visible; } #content h1 > a.link, h2 > a.link, h3 > a.link, #toctitle > a.link, .sidebarblock > .content > .title > a.link, h4 > a.link, h5 > a.link, h6 > a.link { color: #ba3925; text-decoration: none; } #content h1 > a.link:hover, h2 > a.link:hover, h3 > a.link:hover, #toctitle > a.link:hover, .sidebarblock > .content > .title > a.link:hover, h4 > a.link:hover, h5 > a.link:hover, h6 > a.link:hover { color: #a53221; } details, .audioblock, .imageblock, .literalblock, .listingblock, .stemblock, .videoblock { margin-bottom: 1.25em; } details { margin-left: 1.25rem; } details > summary { cursor: pointer; display: block; position: relative; line-height: 1.6; margin-bottom: 0.625rem; outline: none; -webkit-tap-highlight-color: transparent; } details > summary::-webkit-details-marker { display: none; } details > summary::before { content: ""; border: solid transparent; border-left-color: currentColor; border-width: 0.3em 0 0.3em 0.5em; position: absolute; top: 0.5em; left: -1.25rem; transform: translateX(15%); } details[open] > summary::before { border: solid transparent; border-top-color: currentColor; border-width: 0.5em 0.3em 0; transform: translateY(15%); } details > summary::after { content: ""; width: 1.25rem; height: 1em; position: absolute; top: 0.3em; left: -1.25rem; } .admonitionblock td.content > .title, .audioblock > .title, .exampleblock > .title, .imageblock > .title, .listingblock > .title, .literalblock > .title, .stemblock > .title, .openblock > .title, .paragraph > .title, .quoteblock > .title, table.tableblock > .title, .verseblock > .title, .videoblock > .title, .dlist > .title, .olist > .title, .ulist > .title, .qlist > .title, .hdlist > .title { text-rendering: optimizeLegibility; text-align: left; font-family: "Noto Serif", "DejaVu Serif", serif; font-size: 1rem; font-style: italic; } table.tableblock.fit-content > caption.title { white-space: nowrap; width: 0; } .paragraph.lead > p, #preamble > .sectionbody > [class=paragraph]:first-of-type p { font-size: 1.21875em; line-height: 1.6; color: rgba(0, 0, 0, 0.85); } .admonitionblock > table { border-collapse: separate; border: 0; background: none; width: 100%; } .admonitionblock > table td.icon { text-align: center; width: 80px; } .admonitionblock > table td.icon img { max-width: none; } .admonitionblock > table td.icon .title { font-weight: bold; font-family: "Open Sans", "DejaVu Sans", sans-serif; text-transform: uppercase; } .admonitionblock > table td.content { padding-left: 1.125em; padding-right: 1.25em; border-left: 1px solid #dddddf; color: rgba(0, 0, 0, 0.6); word-wrap: anywhere; } .admonitionblock > table td.content > :last-child > :last-child { margin-bottom: 0; } .exampleblock > .content { border: 1px solid #e6e6e6; margin-bottom: 1.25em; padding: 1.25em; background: #fff; border-radius: 4px; } .sidebarblock { border: 1px solid #dbdbd6; margin-bottom: 1.25em; padding: 1.25em; background: #f3f3f2; border-radius: 4px; } .sidebarblock > .content > .title { color: #7a2518; margin-top: 0; text-align: center; } .exampleblock > .content > :first-child, .sidebarblock > .content > :first-child { margin-top: 0; } .exampleblock > .content > :last-child, .exampleblock > .content > :last-child > :last-child, .exampleblock > .content .olist > ol > li:last-child > :last-child, .exampleblock > .content .ulist > ul > li:last-child > :last-child, .exampleblock > .content .qlist > ol > li:last-child > :last-child, .sidebarblock > .content > :last-child, .sidebarblock > .content > :last-child > :last-child, .sidebarblock > .content .olist > ol > li:last-child > :last-child, .sidebarblock > .content .ulist > ul > li:last-child > :last-child, .sidebarblock > .content .qlist > ol > li:last-child > :last-child { margin-bottom: 0; } .literalblock pre, .listingblock > .content > pre { border-radius: 4px; overflow-x: auto; padding: 1em; font-size: 0.8125em; } @media screen and (min-width: 768px) { .literalblock pre, .listingblock > .content > pre { font-size: 0.90625em; } } @media screen and (min-width: 1280px) { .literalblock pre, .listingblock > .content > pre { font-size: 1em; } } .literalblock pre, .listingblock > .content > pre:not(.highlight), .listingblock > .content > pre[class=highlight], .listingblock > .content > pre[class^="highlight "] { background: #f7f7f8; } .literalblock.output pre { color: #f7f7f8; background: rgba(0, 0, 0, 0.9); } .listingblock > .content { position: relative; } .listingblock code[data-lang]::before { display: none; content: attr(data-lang); position: absolute; font-size: 0.75em; top: 0.425rem; right: 0.5rem; line-height: 1; text-transform: uppercase; color: inherit; opacity: 0.5; } .listingblock:hover code[data-lang]::before { display: block; } .listingblock.terminal pre .command::before { content: attr(data-prompt); padding-right: 0.5em; color: inherit; opacity: 0.5; } .listingblock.terminal pre .command:not([data-prompt])::before { content: "$"; } .listingblock pre.highlightjs { padding: 0; } .listingblock pre.highlightjs > code { padding: 1em; border-radius: 4px; } .listingblock pre.prettyprint { border-width: 0; } .prettyprint { background: #f7f7f8; } pre.prettyprint .linenums { line-height: 1.45; margin-left: 2em; } pre.prettyprint li { background: none; list-style-type: inherit; padding-left: 0; } pre.prettyprint li code[data-lang]::before { opacity: 1; } pre.prettyprint li:not(:first-child) code[data-lang]::before { display: none; } table.linenotable { border-collapse: separate; border: 0; margin-bottom: 0; background: none; } table.linenotable td[class] { color: inherit; vertical-align: top; padding: 0; line-height: inherit; white-space: normal; } table.linenotable td.code { padding-left: 0.75em; } table.linenotable td.linenos, pre.pygments .linenos { border-right: 1px solid; opacity: 0.35; padding-right: 0.5em; user-select: none; } pre.pygments span.linenos { display: inline-block; margin-right: 0.75em; } .quoteblock { margin: 0 1em 1.25em 1.5em; display: table; } .quoteblock:not(.excerpt) > .title { margin-left: -1.5em; margin-bottom: 0.75em; } .quoteblock blockquote, .quoteblock p { color: rgba(0, 0, 0, 0.85); font-size: 1.15rem; line-height: 1.75; word-spacing: 0.1em; letter-spacing: 0; font-style: italic; text-align: justify; } .quoteblock blockquote { margin: 0; padding: 0; border: 0; } .quoteblock blockquote::before { content: "\201c"; float: left; font-size: 2.75em; font-weight: bold; line-height: 0.6em; margin-left: -0.6em; color: #7a2518; text-shadow: 0 1px 2px rgba(0, 0, 0, 0.1); } .quoteblock blockquote > .paragraph:last-child p { margin-bottom: 0; } .quoteblock .attribution { margin-top: 0.75em; margin-right: 0.5ex; text-align: right; } .verseblock { margin: 0 1em 1.25em; } .verseblock pre { font-family: "Open Sans", "DejaVu Sans", sans-serif; font-size: 1.15rem; color: rgba(0, 0, 0, 0.85); font-weight: 300; text-rendering: optimizeLegibility; } .verseblock pre strong { font-weight: 400; } .verseblock .attribution { margin-top: 1.25rem; margin-left: 0.5ex; } .quoteblock .attribution, .verseblock .attribution { font-size: 0.9375em; line-height: 1.45; font-style: italic; } .quoteblock .attribution br, .verseblock .attribution br { display: none; } .quoteblock .attribution cite, .verseblock .attribution cite { display: block; letter-spacing: -0.025em; color: rgba(0, 0, 0, 0.6); } .quoteblock.abstract blockquote::before, .quoteblock.excerpt blockquote::before, .quoteblock .quoteblock blockquote::before { display: none; } .quoteblock.abstract blockquote, .quoteblock.abstract p, .quoteblock.excerpt blockquote, .quoteblock.excerpt p, .quoteblock .quoteblock blockquote, .quoteblock .quoteblock p { line-height: 1.6; word-spacing: 0; } .quoteblock.abstract { margin: 0 1em 1.25em; display: block; } .quoteblock.abstract > .title { margin: 0 0 0.375em; font-size: 1.15em; text-align: center; } .quoteblock.excerpt > blockquote, .quoteblock .quoteblock { padding: 0 0 0.25em 1em; border-left: 0.25em solid #dddddf; } .quoteblock.excerpt, .quoteblock .quoteblock { margin-left: 0; } .quoteblock.excerpt blockquote, .quoteblock.excerpt p, .quoteblock .quoteblock blockquote, .quoteblock .quoteblock p { color: inherit; font-size: 1.0625rem; } .quoteblock.excerpt .attribution, .quoteblock .quoteblock .attribution { color: inherit; font-size: 0.85rem; text-align: left; margin-right: 0; } p.tableblock:last-child { margin-bottom: 0; } td.tableblock > .content { margin-bottom: 1.25em; word-wrap: anywhere; } td.tableblock > .content > :last-child { margin-bottom: -1.25em; } table.tableblock, th.tableblock, td.tableblock { border: 0 solid #dedede; } table.grid-all > * > tr > * { border-width: 1px; } table.grid-cols > * > tr > * { border-width: 0 1px; } table.grid-rows > * > tr > * { border-width: 1px 0; } table.frame-all { border-width: 1px; } table.frame-ends { border-width: 1px 0; } table.frame-sides { border-width: 0 1px; } table.frame-none > colgroup + * > :first-child > *, table.frame-sides > colgroup + * > :first-child > * { border-top-width: 0; } table.frame-none > :last-child > :last-child > *, table.frame-sides > :last-child > :last-child > * { border-bottom-width: 0; } table.frame-none > * > tr > :first-child, table.frame-ends > * > tr > :first-child { border-left-width: 0; } table.frame-none > * > tr > :last-child, table.frame-ends > * > tr > :last-child { border-right-width: 0; } table.stripes-all > * > tr, table.stripes-odd > * > tr:nth-of-type(odd), table.stripes-even > * > tr:nth-of-type(even), table.stripes-hover > * > tr:hover { background: #f8f8f7; } th.halign-left, td.halign-left { text-align: left; } th.halign-right, td.halign-right { text-align: right; } th.halign-center, td.halign-center { text-align: center; } th.valign-top, td.valign-top { vertical-align: top; } th.valign-bottom, td.valign-bottom { vertical-align: bottom; } th.valign-middle, td.valign-middle { vertical-align: middle; } table thead th, table tfoot th { font-weight: bold; } tbody tr th { background: #f7f8f7; } tbody tr th, tbody tr th p, tfoot tr th, tfoot tr th p { color: rgba(0, 0, 0, 0.8); font-weight: bold; } p.tableblock > code:only-child { background: none; padding: 0; } p.tableblock { font-size: 1em; } ol { margin-left: 1.75em; } ul li ol { margin-left: 1.5em; } dl dd { margin-left: 1.125em; } dl dd:last-child, dl dd:last-child > :last-child { margin-bottom: 0; } li p, ul dd, ol dd, .olist .olist, .ulist .ulist, .ulist .olist, .olist .ulist { margin-bottom: 0.625em; } ul.checklist, ul.none, ol.none, ul.no-bullet, ol.no-bullet, ol.unnumbered, ul.unstyled, ol.unstyled { list-style-type: none; } ul.no-bullet, ol.no-bullet, ol.unnumbered { margin-left: 0.625em; } ul.unstyled, ol.unstyled { margin-left: 0; } li > p:empty:only-child::before { content: ""; display: inline-block; } ul.checklist > li > p:first-child { margin-left: -1em; } ul.checklist > li > p:first-child > .fa-square-o:first-child, ul.checklist > li > p:first-child > .fa-check-square-o:first-child { width: 1.25em; font-size: 0.8em; position: relative; bottom: 0.125em; } ul.checklist > li > p:first-child > input[type=checkbox]:first-child { margin-right: 0.25em; } ul.inline { display: flex; flex-flow: row wrap; list-style: none; margin: 0 0 0.625em -1.25em; } ul.inline > li { margin-left: 1.25em; } .unstyled dl dt { font-weight: 400; font-style: normal; } ol.arabic { list-style-type: decimal; } ol.decimal { list-style-type: decimal-leading-zero; } ol.loweralpha { list-style-type: lower-alpha; } ol.upperalpha { list-style-type: upper-alpha; } ol.lowerroman { list-style-type: lower-roman; } ol.upperroman { list-style-type: upper-roman; } ol.lowergreek { list-style-type: lower-greek; } .hdlist > table, .colist > table { border: 0; background: none; } .hdlist > table > tbody > tr, .colist > table > tbody > tr { background: none; } td.hdlist1, td.hdlist2 { vertical-align: top; padding: 0 0.625em; } td.hdlist1 { font-weight: bold; padding-bottom: 1.25em; } td.hdlist2 { word-wrap: anywhere; } .literalblock + .colist, .listingblock + .colist { margin-top: -0.5em; } .colist td:not([class]):first-child { padding: 0.4em 0.75em 0; line-height: 1; vertical-align: top; } .colist td:not([class]):first-child img { max-width: none; } .colist td:not([class]):last-child { padding: 0.25em 0; } .thumb, .th { line-height: 0; display: inline-block; border: 4px solid #fff; box-shadow: 0 0 0 1px #ddd; } .imageblock.left { margin: 0.25em 0.625em 1.25em 0; } .imageblock.right { margin: 0.25em 0 1.25em 0.625em; } .imageblock > .title { margin-bottom: 0; } .imageblock.thumb, .imageblock.th { border-width: 6px; } .imageblock.thumb > .title, .imageblock.th > .title { padding: 0 0.125em; } .image.left, .image.right { margin-top: 0.25em; margin-bottom: 0.25em; display: inline-block; line-height: 0; } .image.left { margin-right: 0.625em; } .image.right { margin-left: 0.625em; } a.image { text-decoration: none; display: inline-block; } a.image object { pointer-events: none; } sup.footnote, sup.footnoteref { font-size: 0.875em; position: static; vertical-align: super; } sup.footnote a, sup.footnoteref a { text-decoration: none; } sup.footnote a:active, sup.footnoteref a:active { text-decoration: underline; } #footnotes { padding-top: 0.75em; padding-bottom: 0.75em; margin-bottom: 0.625em; } #footnotes hr { width: 20%; min-width: 6.25em; margin: -0.25em 0 0.75em; border-width: 1px 0 0; } #footnotes .footnote { padding: 0 0.375em 0 0.225em; line-height: 1.3334; font-size: 0.875em; margin-left: 1.2em; margin-bottom: 0.2em; } #footnotes .footnote a:first-of-type { font-weight: bold; text-decoration: none; margin-left: -1.05em; } #footnotes .footnote:last-of-type { margin-bottom: 0; } #content #footnotes { margin-top: -0.625em; margin-bottom: 0; padding: 0.75em 0; } div.unbreakable { page-break-inside: avoid; } .big { font-size: larger; } .small { font-size: smaller; } .underline { text-decoration: underline; } .overline { text-decoration: overline; } .line-through { text-decoration: line-through; } .aqua { color: #00bfbf; } .aqua-background { background: #00fafa; } .black { color: #000; } .black-background { background: #000; } .blue { color: #0000bf; } .blue-background { background: #0000fa; } .fuchsia { color: #bf00bf; } .fuchsia-background { background: #fa00fa; } .gray { color: #606060; } .gray-background { background: #7d7d7d; } .green { color: #006000; } .green-background { background: #007d00; } .lime { color: #00bf00; } .lime-background { background: #00fa00; } .maroon { color: #600000; } .maroon-background { background: #7d0000; } .navy { color: #000060; } .navy-background { background: #00007d; } .olive { color: #606000; } .olive-background { background: #7d7d00; } .purple { color: #600060; } .purple-background { background: #7d007d; } .red { color: #bf0000; } .red-background { background: #fa0000; } .silver { color: #909090; } .silver-background { background: #bcbcbc; } .teal { color: #006060; } .teal-background { background: #007d7d; } .white { color: #bfbfbf; } .white-background { background: #fafafa; } .yellow { color: #bfbf00; } .yellow-background { background: #fafa00; } span.icon > .fa { cursor: default; } a span.icon > .fa { cursor: inherit; } .admonitionblock td.icon [class^="fa icon-"] { font-size: 2.5em; text-shadow: 1px 1px 2px rgba(0, 0, 0, 0.5); cursor: default; } .admonitionblock td.icon .icon-note::before { content: "\f05a"; color: #19407c; } .admonitionblock td.icon .icon-tip::before { content: "\f0eb"; text-shadow: 1px 1px 2px rgba(155, 155, 0, 0.8); color: #111; } .admonitionblock td.icon .icon-warning::before { content: "\f071"; color: #bf6900; } .admonitionblock td.icon .icon-caution::before { content: "\f06d"; color: #bf3400; } .admonitionblock td.icon .icon-important::before { content: "\f06a"; color: #bf0000; } .conum[data-value] { display: inline-block; color: #fff !important; background: rgba(0, 0, 0, 0.8); border-radius: 50%; text-align: center; font-size: 0.75em; width: 1.67em; height: 1.67em; line-height: 1.67em; font-family: "Open Sans", "DejaVu Sans", sans-serif; font-style: normal; font-weight: bold; } .conum[data-value] * { color: #fff !important; } .conum[data-value] + b { display: none; } .conum[data-value]::after { content: attr(data-value); } pre .conum[data-value] { position: relative; top: -0.125em; } b.conum * { color: inherit !important; } .conum:not([data-value]):empty { display: none; } dt, th.tableblock, td.content, div.footnote { text-rendering: optimizeLegibility; } h1, h2, p, td.content, span.alt, summary { letter-spacing: -0.01em; } p strong, td.content strong, div.footnote strong { letter-spacing: -0.005em; } p, blockquote, dt, td.content, td.hdlist1, span.alt, summary { font-size: 1.0625rem; } p { margin-bottom: 1.25rem; } .sidebarblock p, .sidebarblock dt, .sidebarblock td.content, p.tableblock { font-size: 1em; } .exampleblock > .content { background: #fffef7; border-color: #e0e0dc; box-shadow: 0 1px 4px #e0e0dc; } .print-only { display: none !important; } @page { margin: 1.25cm 0.75cm; } @media print { * { box-shadow: none !important; text-shadow: none !important; } html { font-size: 80%; } a { color: inherit !important; text-decoration: underline !important; } a.bare, a[href^="#"], a[href^="mailto:"] { text-decoration: none !important; } a[href^="http:"]:not(.bare)::after, a[href^="https:"]:not(.bare)::after { content: "(" attr(href) ")"; display: inline-block; font-size: 0.875em; padding-left: 0.25em; } abbr[title] { border-bottom: 1px dotted; } abbr[title]::after { content: " (" attr(title) ")"; } pre, blockquote, tr, img, object, svg { page-break-inside: avoid; } thead { display: table-header-group; } svg { max-width: 100%; } p, blockquote, dt, td.content { font-size: 1em; orphans: 3; widows: 3; } h2, h3, #toctitle, .sidebarblock > .content > .title { page-break-after: avoid; } #header, #content, #footnotes, #footer { max-width: none; } #toc, .sidebarblock, .exampleblock > .content { background: none !important; } #toc { border-bottom: 1px solid #dddddf !important; padding-bottom: 0 !important; } body.book #header { text-align: center; } body.book #header > h1:first-child { border: 0 !important; margin: 2.5em 0 1em; } body.book #header .details { border: 0 !important; display: block; padding: 0 !important; } body.book #header .details span:first-child { margin-left: 0 !important; } body.book #header .details br { display: block; } body.book #header .details br + span::before { content: none !important; } body.book #toc { border: 0 !important; text-align: left !important; padding: 0 !important; margin: 0 !important; } body.book #toc, body.book #preamble, body.book h1.sect0, body.book .sect1 > h2 { page-break-before: always; } .listingblock code[data-lang]::before { display: block; } #footer { padding: 0 0.9375em; } .hide-on-print { display: none !important; } .print-only { display: block !important; } .hide-for-print { display: none !important; } .show-for-print { display: inherit !important; } } @media amzn-kf8, print { #header > h1:first-child { margin-top: 1.25rem; } .sect1 { padding: 0 !important; } .sect1 + .sect1 { border: 0; } #footer { background: none; } #footer-text { color: rgba(0, 0, 0, 0.6); font-size: 0.9em; } } @media amzn-kf8 { #header, #content, #footnotes, #footer { padding: 0; } } asciidoctor-2.0.20/src/stylesheets/coderay-asciidoctor.css000066400000000000000000000101461443135032600237050ustar00rootroot00000000000000/*! Stylesheet for CodeRay to loosely match GitHub themes | MIT License */ pre.CodeRay { background: #f7f7f8; } .CodeRay .line-numbers { border-right: 1px solid; opacity: 0.35; padding: 0 0.5em 0 0; user-select: none; } .CodeRay span.line-numbers { display: inline-block; margin-right: 0.75em; } .CodeRay .line-numbers strong { color: #000; } table.CodeRay { border-collapse: separate; border: 0; margin-bottom: 0; background: none; } table.CodeRay td { vertical-align: top; line-height: inherit; } table.CodeRay td.line-numbers { text-align: right; } table.CodeRay td.code { padding: 0 0 0 0.75em; } .CodeRay .debug { color: #fff !important; background: #000080 !important; } .CodeRay .annotation { color: #007; } .CodeRay .attribute-name { color: #000080; } .CodeRay .attribute-value { color: #700; } .CodeRay .binary { color: #509; } .CodeRay .comment { color: #998; font-style: italic; } .CodeRay .char { color: #04d; } .CodeRay .char .content { color: #04d; } .CodeRay .char .delimiter { color: #039; } .CodeRay .class { color: #458; font-weight: bold; } .CodeRay .complex { color: #a08; } .CodeRay .constant, .CodeRay .predefined-constant { color: #008080; } .CodeRay .color { color: #099; } .CodeRay .class-variable { color: #369; } .CodeRay .decorator { color: #b0b; } .CodeRay .definition { color: #099; } .CodeRay .delimiter { color: #000; } .CodeRay .doc { color: #970; } .CodeRay .doctype { color: #34b; } .CodeRay .doc-string { color: #d42; } .CodeRay .escape { color: #666; } .CodeRay .entity { color: #800; } .CodeRay .error { color: #808; } .CodeRay .exception { color: inherit; } .CodeRay .filename { color: #099; } .CodeRay .function { color: #900; font-weight: bold; } .CodeRay .global-variable { color: #008080; } .CodeRay .hex { color: #058; } .CodeRay .integer, .CodeRay .float { color: #099; } .CodeRay .include { color: #555; } .CodeRay .inline { color: #000; } .CodeRay .inline .inline { background: #ccc; } .CodeRay .inline .inline .inline { background: #bbb; } .CodeRay .inline .inline-delimiter { color: #d14; } .CodeRay .inline-delimiter { color: #d14; } .CodeRay .important { color: #555; font-weight: bold; } .CodeRay .interpreted { color: #b2b; } .CodeRay .instance-variable { color: #008080; } .CodeRay .label { color: #970; } .CodeRay .local-variable { color: #963; } .CodeRay .octal { color: #40e; } .CodeRay .predefined { color: #369; } .CodeRay .preprocessor { color: #579; } .CodeRay .pseudo-class { color: #555; } .CodeRay .directive { font-weight: bold; } .CodeRay .type { font-weight: bold; } .CodeRay .predefined-type { color: inherit; } .CodeRay .reserved, .CodeRay .keyword { color: #000; font-weight: bold; } .CodeRay .key { color: #808; } .CodeRay .key .delimiter { color: #606; } .CodeRay .key .char { color: #80f; } .CodeRay .value { color: #088; } .CodeRay .regexp .delimiter { color: #808; } .CodeRay .regexp .content { color: #808; } .CodeRay .regexp .modifier { color: #808; } .CodeRay .regexp .char { color: #d14; } .CodeRay .regexp .function { color: #404; font-weight: bold; } .CodeRay .string { color: #d20; } .CodeRay .string .string .string { background: #ffd0d0; } .CodeRay .string .content { color: #d14; } .CodeRay .string .char { color: #d14; } .CodeRay .string .delimiter { color: #d14; } .CodeRay .shell { color: #d14; } .CodeRay .shell .delimiter { color: #d14; } .CodeRay .symbol { color: #990073; } .CodeRay .symbol .content { color: #a60; } .CodeRay .symbol .delimiter { color: #630; } .CodeRay .tag { color: #008080; } .CodeRay .tag-special { color: #d70; } .CodeRay .variable { color: #036; } .CodeRay .insert { background: #afa; } .CodeRay .delete { background: #faa; } .CodeRay .change { color: #aaf; background: #007; } .CodeRay .head { color: #f8f; background: #505; } .CodeRay .insert .insert { color: #080; } .CodeRay .delete .delete { color: #800; } .CodeRay .change .change { color: #66f; } .CodeRay .head .head { color: #f4f; } asciidoctor-2.0.20/src/stylesheets/lib/000077500000000000000000000000001443135032600200105ustar00rootroot00000000000000asciidoctor-2.0.20/src/stylesheets/lib/postcss-minify-selectors.js000066400000000000000000000013161443135032600253370ustar00rootroot00000000000000'use strict' const selectorParser = require('postcss-selector-parser') /** * Replaces the official postcss-minify-selectors plugin with a simpler implementation. * * The official plugin sorts the selectors and mangles pseudo-elements. * This simpler plugin only removes space characters and unnecessary quotes. */ module.exports = (opts) => { return { postcssPlugin: 'postcss-minify-selectors', Rule (rule) { rule.selector = selectorParser((selectors) => { selectors.walkAttributes((attr) => { if (attr.value) attr.raws.value = attr.getQuotedValue({ smart: true }) }) }).processSync(rule.selector, { lossless: false }) }, } } module.exports.postcss = true asciidoctor-2.0.20/src/stylesheets/lib/postcss-rule-per-line.js000066400000000000000000000006601443135032600245240ustar00rootroot00000000000000'use strict' /** * Makes the minified stylesheet more readable by putting each rule on its own line and adding a trailing newline. */ module.exports = (opts) => { return { postcssPlugin: 'postcss-rule-per-line', OnceExit (root) { root.walk((node) => { if (node.type.endsWith('rule') && node.prev()) node.raws.before = '\n' }) root.raws.after = '\n' }, } } module.exports.postcss = true asciidoctor-2.0.20/src/stylesheets/package.json000066400000000000000000000011451443135032600215310ustar00rootroot00000000000000{ "private": true, "description": "Compiles the default stylesheet for the built-in HTML converter.", "devDependencies": { "autoprefixer": "~10.3", "cssnano": "~5.0", "package.json": "^2.0.1", "postcss": "~8.3", "postcss-cli": "~8.3", "stylelint": "~13.13", "stylelint-config-standard": "~22.0" }, "scripts": { "build": "postcss asciidoctor.css -o ../../data/stylesheets/asciidoctor-default.css && postcss coderay-asciidoctor.css -o ../../data/stylesheets/coderay-asciidoctor.css", "lint": "stylelint *.css" }, "browserslist": [ "defaults", "IE 11" ] } asciidoctor-2.0.20/src/stylesheets/postcss.config.js000066400000000000000000000017301443135032600225430ustar00rootroot00000000000000module.exports = (ctx) => ({ plugins: { autoprefixer: true, cssnano: { // refer to https://cssnano.co/docs/optimisations to understand this preset preset: [ 'default', { discardComments: { exclude: true }, // comments are currently aimed at the user, so keep them minifySelectors: { exclude: true }, // replaced by ./lib/postcss-minify-selectors.js minifyFontValues: { exclude: true }, // switches to numeric font weights, which make the stylesheet less extensible mergeRules: { exclude: true }, // TODO reenable; currently causes non-functional differences in output uniqueSelectors: { exclude: true }, // reorders selectors, which doesn't improve minification cssDeclarationSorter: { exclude: true }, // reorders properties, which doesn't improve minification }, ] }, './lib/postcss-minify-selectors.js': true, './lib/postcss-rule-per-line.js': true, } }) asciidoctor-2.0.20/tasks/000077500000000000000000000000001443135032600152245ustar00rootroot00000000000000asciidoctor-2.0.20/tasks/bundler.rake000066400000000000000000000001511443135032600175200ustar00rootroot00000000000000# frozen_string_literal: true begin require 'bundler/gem_tasks' rescue LoadError warn $!.message end asciidoctor-2.0.20/tasks/console.rake000066400000000000000000000002201443135032600175240ustar00rootroot00000000000000# frozen_string_literal: true desc 'Open an irb session preloaded with this library' task :console do sh 'bundle console', verbose: false end asciidoctor-2.0.20/tasks/coverage.rake000066400000000000000000000001511443135032600176600ustar00rootroot00000000000000# frozen_string_literal: true desc 'Activates coverage' task :coverage do ENV['COVERAGE'] = 'true' end asciidoctor-2.0.20/tasks/cucumber.rake000066400000000000000000000003631443135032600176770ustar00rootroot00000000000000# frozen_string_literal: true begin require 'cucumber/rake/task' Cucumber::Rake::Task.new :features do |t| t.cucumber_opts = %w(-f progress) t.cucumber_opts << '--no-color' if ENV['CI'] end rescue LoadError warn $!.message end asciidoctor-2.0.20/tasks/dependents.rake000066400000000000000000000042551443135032600202270ustar00rootroot00000000000000# frozen_string_literal: true def trigger_build project, header, payload, host, path require 'net/http' (http = Net::HTTP.new host, 443).use_ssl = true request = Net::HTTP::Post.new path, header request.body = payload response = http.request request if /^20\d$/.match? response.code puts %(Successfully triggered build on #{project} repository) else warn %(Unable to trigger build on #{project} repository: #{response.code} - #{response.message}) end end def parse_project project org, name, branch = project.split '/', 3 branch ||= 'master' [org, name, branch] end namespace :build do desc 'Trigger builds for dependent projects' task :dependents do next unless ENV['GITHUB_ACTIONS'].to_s == 'true' && ENV['GITHUB_EVENT_NAME'].to_s != 'pull_request' && !(ENV['GITHUB_REF'].to_s.start_with? 'refs/tags/') if (commit_hash = ENV['GITHUB_SHA']) commit_memo = %( (#{commit_hash.slice 0, 8})\n\nhttps://github.com/#{ENV['GITHUB_REPOSITORY'] || 'asciidoctor/asciidoctor'}/commit/#{commit_hash}) end # NOTE The GITHUB_TOKEN env var must be defined in the CI interface. # Retrieve this token using the settings of the account/org -> Developer Settings -> Personal Access Tokens # and generate a new "Personal Access Token" with the "repo" scope github_token = ENV['GITHUB_API_TOKEN'] require 'json' %w( asciidoctor/asciidoctor.js asciidoctor/asciidoctorj/main asciidoctor/asciidoctor-pdf/main asciidoctor/asciidoctor-reveal.js ).each do |project| org, name, branch = parse_project project project = [org, name, branch].join '/' header = { 'Content-Type' => 'application/json', 'Accept' => 'application/vnd.github.everest-preview+json', 'Authorization' => %(token #{github_token}) } payload = { 'event_type' => 'test_upstream', 'client_payload' => { 'branch' => (ENV['GITHUB_REF'].sub 'refs/heads/', ''), 'message' => %(Build triggered by Asciidoctor#{commit_memo}), }, }.to_json trigger_build project, header, payload, 'api.github.com', %(/repos/#{org}/#{name}/dispatches) end if github_token end end asciidoctor-2.0.20/tasks/postversion.rb000066400000000000000000000017421443135032600201500ustar00rootroot00000000000000# frozen_string_literal: true release_version = ENV['RELEASE_VERSION'] major_minor_version = ((release_version.split '.').slice 0, 2).join '.' prerelease = (release_version.count '[a-z]') > 0 ? %(.#{(release_version.split '.', 3)[-1]}) : nil changelog_file = 'CHANGELOG.adoc' antora_file = 'docs/antora.yml' changelog_contents = File.readlines changelog_file, mode: 'r:UTF-8' last_release_idx = changelog_contents.index {|l| (l.start_with? '== ') && (%r/^== \d/.match? l) } changelog_contents.insert last_release_idx, <<~END == Unreleased _No changes since previous release._ END antora_contents = (File.readlines antora_file, mode: 'r:UTF-8').map do |l| if l.start_with? 'prerelease: ' %(prerelease: #{prerelease ? ?' + prerelease + ?' : 'false'}\n) elsif l.start_with? 'version: ' %(version: '#{major_minor_version}'\n) else l end end File.write changelog_file, changelog_contents.join, mode: 'w:UTF-8' File.write antora_file, antora_contents.join, mode: 'w:UTF-8' asciidoctor-2.0.20/tasks/release-notes.rb000066400000000000000000000052071443135032600203230ustar00rootroot00000000000000# frozen_string_literal: true require 'time' old_tz, ENV['TZ'] = ENV['TZ'], 'US/Mountain' release_date = Time.now.strftime '%Y-%m-%d' ENV['TZ'] = old_tz spec = Gem::Specification.load Dir['*.gemspec'].first gem_name = spec.name gem_version = spec.version gem_dist_url = %(https://rubygems.org/gems/#{gem_name}) release_notes_file = 'pkg/release-notes.md' release_user = ENV['RELEASE_USER'] || 'mojavelinux' release_beer = ENV['RELEASE_BEER'] || 'TBD' release_tag = %(v#{gem_version}) previous_tag = (`git -c versionsort.suffix=. -c versionsort.suffix=- ls-remote --tags --refs --sort -v:refname origin`.each_line chomp: true) .map {|it| (it.rpartition '/')[-1] } .drop_while {|it| it != release_tag } .reject {|it| it == release_tag } .find {|it| (Gem::Version.new it.slice 1, it.length) < gem_version } issues_url = spec.metadata['bug_tracker_uri'] repo_url = spec.metadata['source_code_uri'] changelog = (File.readlines 'CHANGELOG.adoc', chomp: true, mode: 'r:UTF-8').reduce nil do |accum, line| if line == '=== Details' accum.pop break accum.join ?\n elsif accum if line.end_with? '::' line = %(### #{line.slice 0, line.length - 2}) elsif line.start_with? ' * ' line = line.lstrip end accum << line unless accum.empty? && line.empty? elsif line.start_with? %(== #{gem_version} ) accum = [] end accum end release_notes = <<~EOS.chomp Write summary... ## Distribution - [RubyGem (#{gem_name})](#{gem_dist_url}) Asciidoctor is also packaged for [Fedora](https://apps.fedoraproject.org/packages/rubygem-asciidoctor), [Debian](https://packages.debian.org/sid/asciidoctor), [Ubuntu](https://packages.ubuntu.com/search?keywords=asciidoctor), [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=asciidoctor), [OpenSUSE](https://software.opensuse.org/package/rubygem-asciidoctor), and [Homebrew](https://formulae.brew.sh/formula/asciidoctor). You can use the system's package manager to install the package named **asciidoctor**. ## Changelog #{changelog} ## Release meta Released on: #{release_date} Released by: @#{release_user} Release beer: #{release_beer} Logs: [resolved issues](#{issues_url}?q=is%3Aissue+label%3A#{release_tag}+is%3Aclosed)#{previous_tag ? %( | [source diff](#{repo_url}/compare/#{previous_tag}...#{release_tag}) | [gem diff](https://my.diffend.io/gems/asciidoctor/#{previous_tag}/#{release_tag})) : ''} ## Credits A very special thanks to all the **awesome** [supporters of the Asciidoctor OpenCollective campaign](https://opencollective.com/asciidoctor), who provide critical funding for the ongoing development of this project. EOS File.write release_notes_file, release_notes, mode: 'w:UTF-8' asciidoctor-2.0.20/tasks/test.rake000066400000000000000000000014711443135032600170520ustar00rootroot00000000000000# frozen_string_literal: true def prepare_test_env # rather than hardcoding gc settings in test task, # could use https://gist.github.com/benders/788695 ENV['RUBY_GC_MALLOC_LIMIT'] = 128_000_000.to_s ENV['RUBY_GC_OLDMALLOC_LIMIT'] = 128_000_000.to_s ENV['RUBY_GC_HEAP_INIT_SLOTS'] = 750_000.to_s ENV['RUBY_GC_HEAP_FREE_SLOTS'] = 750_000.to_s ENV['RUBY_GC_HEAP_GROWTH_MAX_SLOTS'] = 50_000.to_s ENV['RUBY_GC_HEAP_GROWTH_FACTOR'] = 2.to_s end begin require 'rake/testtask' Rake::TestTask.new :test do |t| prepare_test_env puts %(LANG: #{ENV['LANG']}) if ENV['CI'] t.libs << 'test' t.pattern = 'test/**/*_test.rb' t.verbose = true t.warning = true end rescue LoadError warn $!.message end namespace :test do desc 'Run unit and feature tests' task all: [:test, :features] end asciidoctor-2.0.20/tasks/version.rb000066400000000000000000000057721443135032600172510ustar00rootroot00000000000000# frozen_string_literal: true require 'time' require_relative '../lib/asciidoctor' release_version = ENV['RELEASE_VERSION'] release_gem_version = ENV['RELEASE_GEM_VERSION'] prerelease = (release_version.count '[a-z]') > 0 ? %(-#{(release_version.split '.', 3)[-1]}) : nil release_date = Time.now.strftime '%Y-%m-%d' release_user = ENV['RELEASE_USER'] version_file = Dir['lib/**/version.rb'].first readme_files = Dir['README*.adoc', 'man/asciidoctor.adoc'] changelog_file = 'CHANGELOG.adoc' antora_file = 'docs/antora.yml' version_contents = (File.readlines version_file, mode: 'r:UTF-8').map do |l| (l.include? 'VERSION') ? (l.sub %r/'[^']+'/, %('#{release_gem_version}')) : l end readme_files = readme_files.map do |readme_file| readme_contents = (File.readlines readme_file, mode: 'r:UTF-8').map do |l| (l.start_with? ':release-version: ') ? %(:release-version: #{release_gem_version}\n) : l end if readme_file.include? 'README' if readme_contents[2].start_with? 'v' readme_contents[2] = %(v#{release_version}, #{release_date}\n) else readme_contents.insert 2, %(v#{release_version}, #{release_date}\n) end end [readme_file, readme_contents] end changelog_contents = (File.readlines changelog_file, mode: 'r:UTF-8').reject do |line| line == %(// tag::compact[]\n) || line == %(// end::compact[]\n) end if (last_release_idx = changelog_contents.index {|l| (l.start_with? '== ') && (%r/^== \d/.match? l) }) previous_release_version = (changelog_contents[last_release_idx].match %r/\d\S+/)[0] else changelog_contents << ?\n last_release_idx = changelog_contents.length end changelog_contents.insert last_release_idx, <<~END === Details {url-repo}/releases/tag/v#{release_version}[git tag]#{previous_release_version ? %( | {url-repo}/compare/v#{previous_release_version}\\...v#{release_version}[full diff]) : ''} // end::compact[] END if (unreleased_idx = changelog_contents.index {|l| (l.start_with? '== Unreleased') && l.rstrip == '== Unreleased' }) changelog_contents[unreleased_idx] = %(// tag::compact[]\n== #{release_version} (#{release_date}) - @#{release_user}\n) else changelog_contents.insert last_release_idx, <<~END // tag::compact[] == #{release_version} (#{release_date}) - @#{release_user} _No changes since previous release._ END end antora_contents = (File.readlines antora_file, mode: 'r:UTF-8').map do |l| if l.start_with? 'prerelease: ' %(prerelease: #{prerelease ? 'true' : 'false'}\n) elsif l.start_with? 'version: ' %(version: '#{release_version}'\n) elsif l.start_with? ' release-version: ' %( release-version: '#{release_version}'\n) else l end end File.write version_file, version_contents.join, mode: 'w:UTF-8' readme_files.each {|readme_file, readme_contents| File.write readme_file, readme_contents.join, mode: 'w:UTF-8' } File.write changelog_file, changelog_contents.join, mode: 'w:UTF-8' File.write antora_file, antora_contents.join, mode: 'w:UTF-8' Asciidoctor.convert_file 'man/asciidoctor.adoc', backend: 'manpage', safe: :safe asciidoctor-2.0.20/test/000077500000000000000000000000001443135032600150565ustar00rootroot00000000000000asciidoctor-2.0.20/test/api_test.rb000066400000000000000000002151511443135032600172200ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context 'API' do context 'Load' do test 'should load input file' do sample_input_path = fixture_path('sample.adoc') doc = File.open(sample_input_path, Asciidoctor::FILE_READ_MODE) {|file| Asciidoctor.load file, safe: Asciidoctor::SafeMode::SAFE } assert_equal 'Document Title', doc.doctitle assert_equal File.expand_path(sample_input_path), doc.attr('docfile') assert_equal File.expand_path(File.dirname(sample_input_path)), doc.attr('docdir') assert_equal '.adoc', doc.attr('docfilesuffix') end test 'should load input file from filename' do sample_input_path = fixture_path('sample.adoc') doc = Asciidoctor.load_file(sample_input_path, safe: Asciidoctor::SafeMode::SAFE) assert_equal 'Document Title', doc.doctitle assert_equal File.expand_path(sample_input_path), doc.attr('docfile') assert_equal File.expand_path(File.dirname(sample_input_path)), doc.attr('docdir') assert_equal '.adoc', doc.attr('docfilesuffix') end test 'should load input file from pathname' do sample_input_path = Pathname fixture_path 'sample.adoc' doc = Asciidoctor.load_file sample_input_path, safe: :safe assert_equal 'Document Title', doc.doctitle assert_equal sample_input_path.expand_path.to_s, (doc.attr 'docfile') assert_equal sample_input_path.expand_path.dirname.to_s, (doc.attr 'docdir') assert_equal '.adoc', (doc.attr 'docfilesuffix') end test 'should load input file with alternate file extension' do sample_input_path = fixture_path 'sample-alt-extension.asciidoc' doc = Asciidoctor.load_file sample_input_path, safe: :safe assert_equal 'Document Title', doc.doctitle assert_equal File.expand_path(sample_input_path), doc.attr('docfile') assert_equal File.expand_path(File.dirname(sample_input_path)), doc.attr('docdir') assert_equal '.asciidoc', doc.attr('docfilesuffix') end test 'should coerce encoding of file to UTF-8' do old_external = Encoding.default_external old_internal = Encoding.default_internal old_verbose = $VERBOSE begin $VERBOSE = nil # disable warnings since we have to modify constants input_path = fixture_path 'encoding.adoc' Encoding.default_external = Encoding.default_internal = Encoding::IBM437 output = Asciidoctor.convert_file input_path, to_file: false, safe: :safe assert_equal Encoding::UTF_8, output.encoding assert_include 'Romé', output ensure Encoding.default_external = old_external Encoding.default_internal = old_internal $VERBOSE = old_verbose end end test 'should not load file with unrecognized encoding' do begin tmp_input = Tempfile.new %w(test- .adoc), encoding: Encoding::IBM437 # NOTE using a character whose code differs between UTF-8 and IBM437 tmp_input.write %(ƒ\n) tmp_input.close exception = assert_raises ArgumentError do Asciidoctor.load_file tmp_input.path, safe: :safe end expected_message = 'Failed to load AsciiDoc document - source is either binary or contains invalid Unicode data' assert_include expected_message, exception.message ensure tmp_input.close! end end test 'should not load invalid file' do sample_input_path = fixture_path('hello-asciidoctor.pdf') exception = assert_raises ArgumentError do Asciidoctor.load_file(sample_input_path, safe: Asciidoctor::SafeMode::SAFE) end expected_message = 'Failed to load AsciiDoc document - source is either binary or contains invalid Unicode data' assert_include expected_message, exception.message # verify we have the correct backtrace (should be at least in the first 5 lines) assert_match(/reader\.rb.*prepare_lines/, exception.backtrace[0..4].join(?\n)) end # NOTE JRuby for Windows does not permit creating a file with non-Windows-1252 characters in the filename test 'should convert filename that contains non-ASCII characters independent of default encodings', unless: (jruby? && windows?) do old_external = Encoding.default_external old_internal = Encoding.default_internal old_verbose = $VERBOSE begin $VERBOSE = nil # disable warnings since we have to modify constants tmp_input = Tempfile.new %w(test-UTF8- .adoc) tmp_input.write %(UTF8\n) tmp_input.close Encoding.default_external = Encoding.default_internal = Encoding::IBM437 tmp_output = tmp_input.path.sub '.adoc', '.html' Asciidoctor.convert_file tmp_input.path, safe: :safe, attributes: 'linkcss !copycss' assert File.exist? tmp_output output = File.binread tmp_output refute_empty output # force encoding to UTF-8 and we should see that the string is in fact UTF-8 encoded output = String.new output, encoding: Encoding::UTF_8 assert_equal Encoding::UTF_8, output.encoding assert_include 'UTF8', output ensure tmp_input.close! FileUtils.rm_f tmp_output Encoding.default_external = old_external Encoding.default_internal = old_internal $VERBOSE = old_verbose end end test 'should load input IO' do input = StringIO.new <<~'EOS' Document Title ============== preamble EOS doc = Asciidoctor.load(input, safe: Asciidoctor::SafeMode::SAFE) assert_equal 'Document Title', doc.doctitle refute doc.attr?('docfile') assert_equal doc.base_dir, doc.attr('docdir') end test 'should load input string' do input = <<~'EOS' Document Title ============== preamble EOS doc = Asciidoctor.load(input, safe: Asciidoctor::SafeMode::SAFE) assert_equal 'Document Title', doc.doctitle refute doc.attr?('docfile') assert_equal doc.base_dir, doc.attr('docdir') end test 'should load input string array' do input = <<~'EOS' Document Title ============== preamble EOS doc = Asciidoctor.load(input.lines, safe: Asciidoctor::SafeMode::SAFE) assert_equal 'Document Title', doc.doctitle refute doc.attr?('docfile') assert_equal doc.base_dir, doc.attr('docdir') end test 'should load nil input' do doc = Asciidoctor.load nil, safe: :safe refute_nil doc assert_empty doc.blocks end test 'should ignore :to_file option if value is truthy but not a string' do sample_input_path = fixture_path 'sample.adoc' doc = Asciidoctor.load_file sample_input_path, safe: :safe, to_file: true refute_nil doc assert_equal 'Document Title', doc.doctitle assert_equal '.html', (doc.attr 'outfilesuffix') assert_equal doc.convert, (Asciidoctor.convert_file sample_input_path, safe: :safe, to_file: false) end test 'should set outfilesuffix attribute to file extension of value of :to_file option if value is a string' do sample_input_path = fixture_path 'sample.adoc' doc = Asciidoctor.load_file sample_input_path, safe: :safe, to_file: 'out.htm' refute_nil doc assert_equal 'Document Title', doc.doctitle assert_equal '.htm', (doc.attr 'outfilesuffix') end test 'should accept attributes as array' do # NOTE there's a tab character before idseparator doc = Asciidoctor.load('text', attributes: %w(toc sectnums source-highlighter=coderay idprefix idseparator=-)) assert_kind_of Hash, doc.attributes assert doc.attr?('toc') assert_equal '', doc.attr('toc') assert doc.attr?('sectnums') assert_equal '', doc.attr('sectnums') assert doc.attr?('source-highlighter') assert_equal 'coderay', doc.attr('source-highlighter') assert doc.attr?('idprefix') assert_equal '', doc.attr('idprefix') assert doc.attr?('idseparator') assert_equal '-', doc.attr('idseparator') end test 'should accept attributes as empty array' do doc = Asciidoctor.load('text', attributes: []) assert_kind_of Hash, doc.attributes end test 'should accept attributes as string' do doc = Asciidoctor.load 'text', attributes: %(toc sectnums\nsource-highlighter=coderay\nidprefix\nidseparator=-) assert_kind_of Hash, doc.attributes assert doc.attr?('toc') assert_equal '', doc.attr('toc') assert doc.attr?('sectnums') assert_equal '', doc.attr('sectnums') assert doc.attr?('source-highlighter') assert_equal 'coderay', doc.attr('source-highlighter') assert doc.attr?('idprefix') assert_equal '', doc.attr('idprefix') assert doc.attr?('idseparator') assert_equal '-', doc.attr('idseparator') end test 'should accept values containing spaces in attributes string' do doc = Asciidoctor.load('text', attributes: %(idprefix idseparator=- note-caption=Note\\ to\\\tself toc)) assert_kind_of Hash, doc.attributes assert doc.attr?('idprefix') assert_equal '', doc.attr('idprefix') assert doc.attr?('idseparator') assert_equal '-', doc.attr('idseparator') assert doc.attr?('note-caption') assert_equal "Note to\tself", doc.attr('note-caption') end test 'should accept attributes as empty string' do doc = Asciidoctor.load('text', attributes: '') assert_kind_of Hash, doc.attributes end test 'should accept attributes as nil' do doc = Asciidoctor.load('text', attributes: nil) assert_kind_of Hash, doc.attributes end test 'should accept attributes if hash like' do class Hashlike def initialize @table = { 'toc' => '' } end def keys @table.keys end def [](key) @table[key] end end doc = Asciidoctor.load 'text', attributes: Hashlike.new assert_kind_of Hash, doc.attributes assert doc.attributes.key?('toc') end test 'should not expand value of docdir attribute if specified via API' do docdir = 'virtual/directory' doc = document_from_string '', safe: :safe, attributes: { 'docdir' => docdir } assert_equal docdir, (doc.attr 'docdir') assert_equal docdir, doc.base_dir end test 'converts block to output format when convert is called' do doc = Asciidoctor.load 'paragraph text' expected = <<~'EOS'.chop

    paragraph text

    EOS assert_equal 1, doc.blocks.length assert_equal :paragraph, doc.blocks[0].context assert_equal expected, doc.blocks[0].convert end test 'render method on node is aliased to convert method' do input = <<~'EOS' paragraph text * list item EOS doc = Asciidoctor.load input assert_equal 2, doc.blocks.length ([doc] + doc.blocks).each do |block| assert_equal block.method(:convert), block.method(:render) end inline = Asciidoctor::Inline.new doc.blocks[0], :image, nil, type: 'image', target: 'tiger.png' assert_equal inline.method(:convert), inline.method(:render) end test 'should output timestamps by default' do doc = document_from_string 'text', backend: :html5, attributes: nil result = doc.convert assert doc.attr?('docdate') refute doc.attr? 'reproducible' assert_xpath '//div[@id="footer-text" and contains(string(.//text()), "Last updated")]', result, 1 end test 'should not output timestamps if reproducible attribute is set in HTML 5' do doc = document_from_string 'text', backend: :html5, attributes: { 'reproducible' => '' } result = doc.convert assert doc.attr?('docdate') assert doc.attr?('reproducible') assert_xpath '//div[@id="footer-text" and contains(string(.//text()), "Last updated")]', result, 0 end test 'should not output timestamps if reproducible attribute is set in DocBook' do doc = document_from_string 'text', backend: :docbook, attributes: { 'reproducible' => '' } result = doc.convert assert doc.attr?('docdate') assert doc.attr?('reproducible') assert_xpath '/article/info/date', result, 0 end test 'should not modify options argument' do options = { safe: Asciidoctor::SafeMode::SAFE } options.freeze sample_input_path = fixture_path('sample.adoc') begin Asciidoctor.load_file sample_input_path, options rescue flunk %(options argument should not be modified) end end test 'should not modify attributes Hash argument' do attributes = {} attributes.freeze options = { safe: Asciidoctor::SafeMode::SAFE, attributes: attributes, } sample_input_path = fixture_path('sample.adoc') begin Asciidoctor.load_file sample_input_path, options rescue flunk %(attributes argument should not be modified) end end test 'should be able to restore header attributes after call to convert' do input = <<~'EOS' = Document Title :foo: bar content :foo: baz content EOS doc = Asciidoctor.load input assert_equal 'bar', (doc.attr 'foo') doc.convert assert_equal 'baz', (doc.attr 'foo') doc.restore_attributes assert_equal 'bar', (doc.attr 'foo') end test 'should track file and line information with blocks if sourcemap option is set' do doc = Asciidoctor.load_file fixture_path('sample.adoc'), sourcemap: true refute_nil doc.source_location assert_equal 'sample.adoc', doc.file assert_equal 1, doc.lineno preamble = doc.blocks[0] refute_nil preamble.source_location assert_equal 'sample.adoc', preamble.file assert_equal 6, preamble.lineno section_1 = doc.sections[0] assert_equal 'Section A', section_1.title refute_nil section_1.source_location assert_equal 'sample.adoc', section_1.file assert_equal 10, section_1.lineno section_2 = doc.sections[1] assert_equal 'Section B', section_2.title refute_nil section_2.source_location assert_equal 'sample.adoc', section_2.file assert_equal 18, section_2.lineno table_block = section_2.blocks[1] assert_equal :table, table_block.context refute_nil table_block.source_location assert_equal 'sample.adoc', table_block.file assert_equal 22, table_block.lineno first_cell = table_block.rows.body[0][0] refute_nil first_cell.source_location assert_equal 'sample.adoc', first_cell.file assert_equal 23, first_cell.lineno second_cell = table_block.rows.body[0][1] refute_nil second_cell.source_location assert_equal 'sample.adoc', second_cell.file assert_equal 23, second_cell.lineno last_cell = table_block.rows.body[-1][-1] refute_nil last_cell.source_location assert_equal 'sample.adoc', last_cell.file assert_equal 24, last_cell.lineno last_block = section_2.blocks[-1] assert_equal :ulist, last_block.context refute_nil last_block.source_location assert_equal 'sample.adoc', last_block.file assert_equal 28, last_block.lineno list_items = last_block.blocks refute_nil list_items[0].source_location assert_equal 'sample.adoc', list_items[0].file assert_equal 28, list_items[0].lineno refute_nil list_items[1].source_location assert_equal 'sample.adoc', list_items[1].file assert_equal 29, list_items[1].lineno refute_nil list_items[2].source_location assert_equal 'sample.adoc', list_items[2].file assert_equal 30, list_items[2].lineno doc = Asciidoctor.load_file fixture_path('main.adoc'), sourcemap: true, safe: :safe section_1 = doc.sections[0] assert_equal 'Chapter A', section_1.title refute_nil section_1.source_location assert_equal fixture_path('chapter-a.adoc'), section_1.file assert_equal 1, section_1.lineno end test 'should track file and line information on list items if sourcemap option is set' do doc = Asciidoctor.load_file fixture_path('lists.adoc'), sourcemap: true first_section = doc.blocks[1] unordered_basic_list = first_section.blocks[0] assert_equal 11, unordered_basic_list.lineno unordered_basic_list_items = unordered_basic_list.find_by context: :list_item assert_equal 11, unordered_basic_list_items[0].lineno assert_equal 12, unordered_basic_list_items[1].lineno assert_equal 13, unordered_basic_list_items[2].lineno unordered_max_nesting = first_section.blocks[1] assert_equal 16, unordered_max_nesting.lineno unordered_max_nesting_items = unordered_max_nesting.find_by context: :list_item assert_equal 16, unordered_max_nesting_items[0].lineno assert_equal 17, unordered_max_nesting_items[1].lineno assert_equal 18, unordered_max_nesting_items[2].lineno assert_equal 19, unordered_max_nesting_items[3].lineno assert_equal 20, unordered_max_nesting_items[4].lineno assert_equal 21, unordered_max_nesting_items[5].lineno checklist = first_section.blocks[2] assert_equal 24, checklist.lineno checklist_list_items = checklist.find_by context: :list_item assert_equal 24, checklist_list_items[0].lineno assert_equal 25, checklist_list_items[1].lineno assert_equal 26, checklist_list_items[2].lineno assert_equal 27, checklist_list_items[3].lineno ordered_basic = first_section.blocks[3] assert_equal 30, ordered_basic.lineno ordered_basic_list_items = ordered_basic.find_by context: :list_item assert_equal 30, ordered_basic_list_items[0].lineno assert_equal 31, ordered_basic_list_items[1].lineno assert_equal 32, ordered_basic_list_items[2].lineno ordered_nested = first_section.blocks[4] assert_equal 35, ordered_nested.lineno ordered_nested_list_items = ordered_nested.find_by context: :list_item assert_equal 35, ordered_nested_list_items[0].lineno assert_equal 36, ordered_nested_list_items[1].lineno assert_equal 37, ordered_nested_list_items[2].lineno assert_equal 38, ordered_nested_list_items[3].lineno assert_equal 39, ordered_nested_list_items[4].lineno ordered_max_nesting = first_section.blocks[5] assert_equal 42, ordered_max_nesting.lineno ordered_max_nesting_items = ordered_max_nesting.find_by context: :list_item assert_equal 42, ordered_max_nesting_items[0].lineno assert_equal 43, ordered_max_nesting_items[1].lineno assert_equal 44, ordered_max_nesting_items[2].lineno assert_equal 45, ordered_max_nesting_items[3].lineno assert_equal 46, ordered_max_nesting_items[4].lineno assert_equal 47, ordered_max_nesting_items[5].lineno labeled_singleline = first_section.blocks[6] assert_equal 50, labeled_singleline.lineno labeled_singleline_items = labeled_singleline.find_by context: :list_item assert_equal 50, labeled_singleline_items[0].lineno assert_equal 50, labeled_singleline_items[1].lineno assert_equal 51, labeled_singleline_items[2].lineno assert_equal 51, labeled_singleline_items[3].lineno labeled_multiline = first_section.blocks[7] assert_equal 54, labeled_multiline.lineno labeled_multiline_items = labeled_multiline.find_by context: :list_item assert_equal 54, labeled_multiline_items[0].lineno assert_equal 55, labeled_multiline_items[1].lineno assert_equal 56, labeled_multiline_items[2].lineno assert_equal 57, labeled_multiline_items[3].lineno qanda = first_section.blocks[8] assert_equal 61, qanda.lineno qanda_items = qanda.find_by context: :list_item assert_equal 61, qanda_items[0].lineno assert_equal 62, qanda_items[1].lineno assert_equal 63, qanda_items[2].lineno assert_equal 63, qanda_items[3].lineno mixed = first_section.blocks[9] assert_equal 66, mixed.lineno mixed_items = mixed.find_by(context: :list_item) {|block| block.text? } assert_equal 66, mixed_items[0].lineno assert_equal 67, mixed_items[1].lineno assert_equal 68, mixed_items[2].lineno assert_equal 69, mixed_items[3].lineno assert_equal 70, mixed_items[4].lineno assert_equal 71, mixed_items[5].lineno assert_equal 72, mixed_items[6].lineno assert_equal 73, mixed_items[7].lineno assert_equal 74, mixed_items[8].lineno assert_equal 75, mixed_items[9].lineno assert_equal 77, mixed_items[10].lineno assert_equal 78, mixed_items[11].lineno assert_equal 79, mixed_items[12].lineno assert_equal 80, mixed_items[13].lineno assert_equal 81, mixed_items[14].lineno assert_equal 82, mixed_items[15].lineno assert_equal 83, mixed_items[16].lineno unordered_complex_list = first_section.blocks[10] assert_equal 86, unordered_complex_list.lineno unordered_complex_items = unordered_complex_list.find_by context: :list_item assert_equal 86, unordered_complex_items[0].lineno assert_equal 87, unordered_complex_items[1].lineno assert_equal 88, unordered_complex_items[2].lineno assert_equal 92, unordered_complex_items[3].lineno assert_equal 96, unordered_complex_items[4].lineno end # FIXME see #3966 test 'should assign incorrect lineno for single-line paragraph inside a conditional preprocessor directive' do input = <<~'EOS' :conditional-attribute: before ifdef::conditional-attribute[] subject endif::[] after EOS doc = document_from_string input, sourcemap: true # FIXME the second line number should be 6 instead of 7 assert_equal [3, 7, 9], (doc.find_by context: :paragraph).map(&:lineno) end test 'should assign correct lineno for multi-line paragraph inside a conditional preprocessor directive' do input = <<~'EOS' :conditional-attribute: before ifdef::conditional-attribute[] subject subject endif::[] after EOS doc = document_from_string input, sourcemap: true assert_equal [3, 6, 10], (doc.find_by context: :paragraph).map(&:lineno) end # NOTE this does not work for a list continuation that attached to a grandparent test 'should assign correct source location to blocks that follow a detached list continuation' do input = <<~'EOS' * parent ** child + paragraph attached to parent **** sidebar outside list **** EOS doc = document_from_string input, sourcemap: true assert_equal [5, 8], (doc.find_by context: :paragraph).map(&:lineno) end test 'should assign correct source location if section occurs on last line of input' do input = <<~'EOS' = Document Title == Section A content == Section B EOS doc = document_from_string input, sourcemap: true assert_equal [1, 3, 7], (doc.find_by context: :section).map(&:lineno) end test 'should allow sourcemap option on document to be modified before document is parsed' do doc = Asciidoctor.load_file fixture_path('sample.adoc'), parse: false doc.sourcemap = true refute doc.parsed? doc = doc.parse assert doc.parsed? section_1 = doc.sections[0] assert_equal 'Section A', section_1.title refute_nil section_1.source_location assert_equal 'sample.adoc', section_1.file assert_equal 10, section_1.lineno end test 'find_by should return Array of blocks anywhere in document tree that match criteria' do input = <<~'EOS' = Document Title preamble == Section A paragraph -- Exhibit A:: + [#tiger.animal] image::tiger.png[Tiger] -- image::shoe.png[Shoe] == Section B paragraph EOS doc = Asciidoctor.load input result = doc.find_by context: :image assert_equal 2, result.size assert_equal :image, result[0].context assert_equal 'tiger.png', result[0].attr('target') assert_equal :image, result[1].context assert_equal 'shoe.png', result[1].attr('target') end test 'find_by should return an empty Array if no matches are found' do input = 'paragraph' doc = Asciidoctor.load input result = doc.find_by context: :section refute_nil result assert_equal 0, result.size end test 'should only return matched node when return value of block argument is :prune' do input = <<~'EOS' * foo ** yin *** zen ** yang * bar * baz EOS doc = Asciidoctor.load input result = doc.find_by context: :list_item do |it| it.text == 'yin' ? :prune : false end assert_equal 1, result.size assert_equal 'yin', result[0].text end test 'find_by should discover blocks inside AsciiDoc table cells if traverse_documents selector option is true' do input = <<~'EOS' paragraph in parent document (before) [%footer,cols=2*] |=== a| paragraph in nested document (body) |normal table cell a| paragraph in nested document (foot) |normal table cell |=== paragraph in parent document (after) EOS doc = Asciidoctor.load input result = doc.find_by context: :paragraph assert_equal 2, result.size result = doc.find_by context: :paragraph, traverse_documents: true assert_equal 4, result.size end test 'find_by should return inner document of AsciiDoc table cell if traverse_documents selector option is true' do input = <<~'EOS' |=== a|paragraph in nested document |=== EOS doc = Asciidoctor.load input inner_doc = doc.blocks[0].rows.body[0][0].inner_document result = doc.find_by traverse_documents: true assert_include inner_doc, result result = doc.find_by context: :inner_document, traverse_documents: true assert_equal 1, result.size assert_equal inner_doc, result[0] end test 'find_by should match table cells' do input = <<~'EOS' |=== |a |b |c |1 one a|NOTE: 2, as it goes. l| 3 you me |=== EOS doc = document_from_string input table = doc.blocks[0] first_head_cell = table.rows.head[0][0] first_body_cell = table.rows.body[0][0] result = doc.find_by assert_include first_head_cell, result assert_include first_body_cell, result assert_equal 'a', first_head_cell.source assert_equal ['a'], first_head_cell.lines assert_equal %(1\none), first_body_cell.source assert_equal ['1', 'one'], first_body_cell.lines result = doc.find_by context: :table_cell, style: :asciidoc assert_equal 1, result.size assert_kind_of Asciidoctor::Table::Cell, result[0] assert_equal :asciidoc, result[0].style assert_equal 'NOTE: 2, as it goes.', result[0].source end test 'find_by should return Array of blocks that match style criteria' do input = <<~'EOS' [square] * one * two * three --- * apples * bananas * pears EOS doc = Asciidoctor.load input result = doc.find_by context: :ulist, style: 'square' assert_equal 1, result.size assert_equal :ulist, result[0].context end test 'find_by should return Array of blocks that match role criteria' do input = <<~'EOS' [#tiger.animal] image::tiger.png[Tiger] image::shoe.png[Shoe] EOS doc = Asciidoctor.load input result = doc.find_by context: :image, role: 'animal' assert_equal 1, result.size assert_equal :image, result[0].context assert_equal 'tiger.png', result[0].attr('target') end test 'find_by should return the document title section if context selector is :section' do input = <<~'EOS' = Document Title preamble == Section One content EOS doc = Asciidoctor.load input result = doc.find_by context: :section refute_nil result assert_equal 2, result.size assert_equal :section, result[0].context assert_equal 'Document Title', result[0].title end test 'find_by should only return results for which the block argument yields true' do input = <<~'EOS' == Section content === Subsection content EOS doc = Asciidoctor.load input result = doc.find_by(context: :section) {|sect| sect.level == 1 } refute_nil result assert_equal 1, result.size assert_equal :section, result[0].context assert_equal 'Section', result[0].title end test 'find_by should reject node and its children if block returns :reject' do input = <<~'EOS' paragraph 1 ==== paragraph 2 term:: + paragraph 3 ==== paragraph 4 EOS doc = Asciidoctor.load input result = doc.find_by do |candidate| case candidate.context when :example :reject when :paragraph true end end refute_nil result assert_equal 2, result.size assert_equal :paragraph, result[0].context assert_equal :paragraph, result[1].context end test 'find_by should reject node matched by ID selector if block returns :reject' do input = <<~'EOS' [.rolename] paragraph 1 [.rolename#idname] paragraph 2 EOS doc = Asciidoctor.load input result = doc.find_by id: 'idname', role: 'rolename' refute_nil result assert_equal 1, result.size assert_equal doc.blocks[1], result[0] result = doc.find_by(id: 'idname', role: 'rolename') { :reject } refute_nil result assert_equal 0, result.size end test 'find_by should accept node matched by ID selector if block returns :prune' do input = <<~'EOS' [.rolename] paragraph 1 [.rolename#idname] ==== paragraph 2 ==== EOS doc = Asciidoctor.load input result = doc.find_by id: 'idname', role: 'rolename' refute_nil result assert_equal 1, result.size assert_equal doc.blocks[1], result[0] result = doc.find_by(id: 'idname', role: 'rolename') { :prune } refute_nil result assert_equal 1, result.size assert_equal doc.blocks[1], result[0] end test 'find_by should accept node but reject its children if block returns :prune' do input = <<~'EOS' ==== paragraph 2 term:: + paragraph 3 ==== EOS doc = Asciidoctor.load input result = doc.find_by do |candidate| if candidate.context == :example :prune end end refute_nil result assert_equal 1, result.size assert_equal :example, result[0].context end test 'find_by should stop looking for blocks when StopIteration is raised' do input = <<~'EOS' paragraph 1 ==== paragraph 2 **** paragraph 3 **** ==== paragraph 4 * item + paragraph 5 EOS doc = Asciidoctor.load input stop_at_next = false result = doc.find_by do |candidate| raise StopIteration if stop_at_next if candidate.context == :paragraph candidate.parent.context == :sidebar ? (stop_at_next = true) : true end end refute_nil result assert_equal 3, result.size assert_equal 'paragraph 1', result[0].content assert_equal 'paragraph 2', result[1].content assert_equal 'paragraph 3', result[2].content end test 'find_by should stop looking for blocks when filter block returns :stop directive' do input = <<~'EOS' paragraph 1 ==== paragraph 2 **** paragraph 3 **** ==== paragraph 4 * item + paragraph 5 EOS doc = Asciidoctor.load input stop_at_next = false result = doc.find_by do |candidate| next :stop if stop_at_next if candidate.context == :paragraph candidate.parent.context == :sidebar ? (stop_at_next = true) : true end end refute_nil result assert_equal 3, result.size assert_equal 'paragraph 1', result[0].content assert_equal 'paragraph 2', result[1].content assert_equal 'paragraph 3', result[2].content end test 'find_by should only return one result when matching by id' do input = <<~'EOS' == Section content [#subsection] === Subsection content EOS doc = Asciidoctor.load input result = doc.find_by(context: :section, id: 'subsection') refute_nil result assert_equal 1, result.size assert_equal :section, result[0].context assert_equal 'Subsection', result[0].title end test 'find_by should stop seeking once match is found' do input = <<~'EOS' == Section content [#subsection] === Subsection [#last] content EOS doc = Asciidoctor.load input visited_last = false result = doc.find_by(id: 'subsection') do |candidate| visited_last = true if candidate.id == 'last' true end refute_nil result assert_equal 1, result.size refute visited_last end test 'find_by should return an empty Array if the id criteria matches but the block argument yields false' do input = <<~'EOS' == Section content [#subsection] === Subsection content EOS doc = Asciidoctor.load input result = doc.find_by(context: :section, id: 'subsection') {|sect| false } refute_nil result assert_equal 0, result.size end test 'find_by should not crash if dlist entry does not have description' do input = 'term without description::' doc = Asciidoctor.load input result = doc.find_by refute_nil result assert_equal 3, result.size assert_kind_of Asciidoctor::Document, result[0] assert_kind_of Asciidoctor::List, result[1] assert_kind_of Asciidoctor::ListItem, result[2] end test 'dlist item should always have two entries for terms and desc' do [ 'term w/o desc::', %(term::\nalias::), %(primary:: 1\nsecondary:: 2), ].each do |input| dlist = (Asciidoctor.load input).blocks[0] dlist.items.each do |item| assert_equal 2, item.size assert_kind_of ::Array, item[0] assert_kind_of Asciidoctor::ListItem, item[1] if item[1] end end end test 'timings are recorded for each step when load and convert are called separately' do sample_input_path = fixture_path 'asciidoc_index.txt' (Asciidoctor.load_file sample_input_path, timings: (timings = Asciidoctor::Timings.new)).convert refute_equal '0.00000', '%05.5f' % timings.read_parse.to_f refute_equal '0.00000', '%05.5f' % timings.convert.to_f refute_equal timings.read_parse, timings.total end test 'can disable syntax highlighter by setting value to nil in :syntax_highlighters option' do doc = Asciidoctor.load '', safe: :safe, syntax_highlighters: { 'coderay' => nil }, attributes: { 'source-highlighter' => 'coderay' } assert_nil doc.syntax_highlighter end test 'can substitute a custom syntax highlighter factory instance using the :syntax_highlighter_factory option' do input = <<~'EOS' [source,ruby] ---- puts 'Hello, World!' ---- EOS # NOTE this tests both the lazy loading and the custom factory syntax_hl_factory = Asciidoctor::SyntaxHighlighter::CustomFactory.new 'github' => (Asciidoctor::SyntaxHighlighter.for 'html-pipeline') doc = Asciidoctor.load input, safe: :safe, syntax_highlighter_factory: syntax_hl_factory, attributes: { 'source-highlighter' => 'github' } refute_nil doc.syntax_highlighter assert_kind_of Asciidoctor::SyntaxHighlighter::HtmlPipelineAdapter, doc.syntax_highlighter assert_include '
    ', doc.convert
        end
    
        test 'can substitute an extended syntax highlighter factory implementation using the :syntax_highlighters option' do
          input = <<~'EOS'
          [source,ruby]
          ----
          puts 'Hello, World!'
          ----
          EOS
          syntax_hl_factory_class = Class.new do
            include Asciidoctor::SyntaxHighlighter::DefaultFactory
    
            def for name
              super 'highlight.js'
            end
          end
          doc = Asciidoctor.load input, safe: :safe, syntax_highlighter_factory: syntax_hl_factory_class.new, attributes: { 'source-highlighter' => 'coderay' }
          refute_nil doc.syntax_highlighter
          output = doc.convert
          refute_include 'CodeRay', output
          assert_include 'hljs', output
        end
      end
    
      context 'Convert' do
        test 'render_file is aliased to convert_file' do
          assert_equal Asciidoctor.method(:convert_file), Asciidoctor.method(:render_file)
        end
    
        test 'render is aliased to convert' do
          assert_equal Asciidoctor.method(:convert), Asciidoctor.method(:render)
        end
    
        test 'should convert source document to embedded document when header_footer is false' do
          sample_input_path = fixture_path('sample.adoc')
          sample_output_path = fixture_path('sample.html')
    
          [{ header_footer: false }, { header_footer: false, to_file: sample_output_path }].each do |opts|
            begin
              Asciidoctor.convert_file sample_input_path, opts
              assert File.exist?(sample_output_path)
              output = File.read(sample_output_path, mode: Asciidoctor::FILE_READ_MODE)
              refute_empty output
              assert_xpath '/html', output, 0
              assert_css '#preamble', output, 1
            ensure
              FileUtils.rm(sample_output_path)
            end
          end
        end
    
        test 'should convert source document to standalone document string when to_file is false and standalone is true' do
          sample_input_path = fixture_path('sample.adoc')
    
          output = Asciidoctor.convert_file sample_input_path, standalone: true, to_file: false
          refute_empty output
          assert_xpath '/html', output, 1
          assert_xpath '/html/head', output, 1
          assert_xpath '/html/body', output, 1
          assert_xpath '/html/head/title[text() = "Document Title"]', output, 1
          assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1
        end
    
        test 'should convert source document to standalone document string when to_file is false and header_footer is true' do
          sample_input_path = fixture_path('sample.adoc')
    
          output = Asciidoctor.convert_file sample_input_path, header_footer: true, to_file: false
          refute_empty output
          assert_xpath '/html', output, 1
          assert_xpath '/html/head', output, 1
          assert_xpath '/html/body', output, 1
          assert_xpath '/html/head/title[text() = "Document Title"]', output, 1
          assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1
        end
    
        test 'lines in output should be separated by line feed' do
          sample_input_path = fixture_path('sample.adoc')
    
          output = Asciidoctor.convert_file sample_input_path, standalone: true, to_file: false
          refute_empty output
          lines = output.split("\n")
          assert_equal lines.size, output.split(/\r\n|\r|\n/).size
          assert_equal lines.map(&:length), lines.map(&:rstrip).map(&:length)
        end
    
        test 'should accept attributes as array' do
          sample_input_path = fixture_path('sample.adoc')
          output = Asciidoctor.convert_file sample_input_path, attributes: %w(sectnums idprefix idseparator=-), to_file: false
          assert_css '#section-a', output, 1
        end
    
        test 'should accept attributes as string' do
          sample_input_path = fixture_path('sample.adoc')
          output = Asciidoctor.convert_file sample_input_path, attributes: 'sectnums idprefix idseparator=-', to_file: false
          assert_css '#section-a', output, 1
        end
    
        test 'should link to default stylesheet by default when safe mode is SECURE or greater' do
          sample_input_path = fixture_path('basic.adoc')
          output = Asciidoctor.convert_file sample_input_path, standalone: true, to_file: false
          assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 1
          assert_css 'html:root > head > link[rel="stylesheet"][href="./asciidoctor.css"]', output, 1
        end
    
        test 'should embed default stylesheet by default if SafeMode is less than SECURE' do
          input = <<~'EOS'
          = Document Title
    
          text
          EOS
    
          output = Asciidoctor.convert input, safe: Asciidoctor::SafeMode::SERVER, standalone: true
          assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 1
          assert_css 'html:root > head > link[rel="stylesheet"][href="./asciidoctor.css"]', output, 0
          stylenode = xmlnodes_at_css 'html:root > head > style', output, 1
          styles = stylenode.content
          refute_nil styles
          refute_empty styles.strip
        end
    
        test 'should embed remote stylesheet by default if SafeMode is less than SECURE and allow-uri-read is set' do
          input = <<~'EOS'
          = Document Title
    
          text
          EOS
    
          output = using_test_webserver do
            Asciidoctor.convert input, safe: Asciidoctor::SafeMode::SERVER, standalone: true, attributes: { 'allow-uri-read' => '', 'stylesheet' => %(http://#{resolve_localhost}:9876/fixtures/custom.css) }
          end
          stylenode = xmlnodes_at_css 'html:root > head > style', output, 1
          styles = stylenode.content
          refute_nil styles
          refute_empty styles.strip
          assert_include 'color: green', styles
        end
    
        test 'should not allow linkcss be unset from document if SafeMode is SECURE or greater' do
          input = <<~'EOS'
          = Document Title
          :linkcss!:
    
          text
          EOS
    
          output = Asciidoctor.convert input, standalone: true
          assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 1
          assert_css 'html:root > head > link[rel="stylesheet"][href="./asciidoctor.css"]', output, 1
        end
    
        test 'should embed default stylesheet if linkcss is unset from API and SafeMode is SECURE or greater' do
          input = <<~'EOS'
          = Document Title
    
          text
          EOS
    
          [{ 'linkcss!' => '' }, { 'linkcss' => nil }, { 'linkcss' => false }].each do |attrs|
            output = Asciidoctor.convert input, standalone: true, attributes: attrs
            assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 1
            assert_css 'html:root > head > link[rel="stylesheet"][href="./asciidoctor.css"]', output, 0
            stylenode = xmlnodes_at_css 'html:root > head > style', output, 1
            styles = stylenode.content
            refute_nil styles
            refute_empty styles.strip
          end
        end
    
        test 'should embed default stylesheet if safe mode is less than SECURE and linkcss is unset from API' do
          sample_input_path = fixture_path('basic.adoc')
          output = Asciidoctor.convert_file sample_input_path, standalone: true, to_file: false,
              safe: Asciidoctor::SafeMode::SAFE, attributes: { 'linkcss!' => '' }
          assert_css 'html:root > head > style', output, 1
          stylenode = xmlnodes_at_css 'html:root > head > style', output, 1
          styles = stylenode.content
          refute_nil styles
          refute_empty styles.strip
        end
    
        test 'should not link to stylesheet if stylesheet is unset' do
          input = <<~'EOS'
          = Document Title
    
          text
          EOS
    
          output = Asciidoctor.convert input, standalone: true, attributes: { 'stylesheet!' => '' }
          assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 0
          assert_css 'html:root > head > link[rel="stylesheet"]', output, 0
        end
    
        test 'should link to custom stylesheet if specified in stylesheet attribute' do
          input = <<~'EOS'
          = Document Title
    
          text
          EOS
    
          output = Asciidoctor.convert input, standalone: true, attributes: { 'stylesheet' => './custom.css' }
          assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 0
          assert_css 'html:root > head > link[rel="stylesheet"][href="./custom.css"]', output, 1
    
          output = Asciidoctor.convert input, standalone: true, attributes: { 'stylesheet' => 'file:///home/username/custom.css' }
          assert_css 'html:root > head > link[rel="stylesheet"][href="file:///home/username/custom.css"]', output, 1
        end
    
        test 'should resolve custom stylesheet relative to stylesdir' do
          input = <<~'EOS'
          = Document Title
    
          text
          EOS
    
          output = Asciidoctor.convert input, standalone: true, attributes: { 'stylesheet' => 'custom.css', 'stylesdir' => './stylesheets' }
          assert_css 'html:root > head > link[rel="stylesheet"][href="./stylesheets/custom.css"]', output, 1
        end
    
        test 'should resolve custom stylesheet to embed relative to stylesdir' do
          sample_input_path = fixture_path('basic.adoc')
          output = Asciidoctor.convert_file sample_input_path, standalone: true, safe: Asciidoctor::SafeMode::SAFE, to_file: false,
              attributes: { 'stylesheet' => 'custom.css', 'stylesdir' => './stylesheets', 'linkcss!' => '' }
          stylenode = xmlnodes_at_css 'html:root > head > style', output, 1
          styles = stylenode.content
          refute_nil styles
          refute_empty styles.strip
        end
    
        test 'should embed custom remote stylesheet if SafeMode is less than SECURE and allow-uri-read is set' do
          input = <<~'EOS'
          = Document Title
    
          text
          EOS
    
          output = using_test_webserver do
            Asciidoctor.convert input, safe: Asciidoctor::SafeMode::SERVER, standalone: true, attributes: { 'allow-uri-read' => '', 'stylesheet' => %(http://#{resolve_localhost}:9876/fixtures/custom.css) }
          end
          stylenode = xmlnodes_at_css 'html:root > head > style', output, 1
          styles = stylenode.content
          refute_nil styles
          refute_empty styles.strip
          assert_include 'color: green', styles
        end
    
        test 'should embed custom stylesheet read from classloader URI', if: jruby? do
          require fixture_path 'assets.jar'
          input = <<~'EOS'
          = Document Title
    
          text
          EOS
    
          output = Asciidoctor.convert input, safe: :unsafe, standalone: true, attributes: { 'stylesdir' => 'uri:classloader:/styles-in-jar', 'stylesheet' => 'custom.css' }
          stylenode = xmlnodes_at_css 'html:root > head > style', output, 1
          styles = stylenode.content
          refute_nil styles
          refute_empty styles.strip
          assert_include 'color: green', styles
        end
    
        test 'should embed custom stylesheet in remote stylesdir if SafeMode is less than SECURE and allow-uri-read is set' do
          input = <<~'EOS'
          = Document Title
    
          text
          EOS
    
          output = using_test_webserver do
            Asciidoctor.convert input, safe: Asciidoctor::SafeMode::SERVER, standalone: true, attributes: { 'allow-uri-read' => '', 'stylesdir' => %(http://#{resolve_localhost}:9876/fixtures), 'stylesheet' => 'custom.css' }
          end
          stylenode = xmlnodes_at_css 'html:root > head > style', output, 1
          styles = stylenode.content
          refute_nil styles
          refute_empty styles.strip
          assert_include 'color: green', styles
        end
    
        test 'should copy custom stylesheet in folder to same folder in destination dir if copycss is set' do
          begin
            output_dir = fixture_path 'output'
            sample_input_path = fixture_path 'sample.adoc'
            sample_output_path = File.join output_dir, 'sample.html'
            custom_stylesheet_output_path = File.join output_dir, 'stylesheets', 'custom.css'
            Asciidoctor.convert_file sample_input_path, safe: :safe, to_dir: output_dir, mkdirs: true,
                attributes: { 'stylesheet' => 'stylesheets/custom.css', 'linkcss' => '', 'copycss' => '' }
            assert File.exist? sample_output_path
            assert File.exist? custom_stylesheet_output_path
            output = File.read sample_output_path, mode: Asciidoctor::FILE_READ_MODE
            assert_xpath '/html/head/link[@rel="stylesheet"][@href="./stylesheets/custom.css"]', output, 1
            assert_xpath 'style', output, 0
          ensure
            FileUtils.rm_r output_dir, force: true, secure: true
          end
        end
    
        test 'should copy custom stylesheet to destination dir if copycss is true' do
          begin
            output_dir = fixture_path 'output'
            sample_input_path = fixture_path 'sample.adoc'
            sample_output_path = File.join output_dir, 'sample.html'
            custom_stylesheet_output_path = File.join output_dir, 'custom.css'
            Asciidoctor.convert_file sample_input_path, safe: :safe, to_dir: output_dir, mkdirs: true,
                attributes: { 'stylesheet' => 'custom.css', 'linkcss' => true, 'copycss' => true }
            assert File.exist? sample_output_path
            assert File.exist? custom_stylesheet_output_path
            output = File.read sample_output_path, mode: Asciidoctor::FILE_READ_MODE
            assert_xpath '/html/head/link[@rel="stylesheet"][@href="./custom.css"]', output, 1
            assert_xpath 'style', output, 0
          ensure
            FileUtils.rm_r output_dir, force: true, secure: true
          end
        end
    
        test 'should copy custom stylesheet to destination dir if copycss is a path string' do
          begin
            output_dir = fixture_path 'output'
            sample_input_path = fixture_path 'sample.adoc'
            sample_output_path = File.join output_dir, 'sample.html'
            custom_stylesheet_output_path = File.join output_dir, 'styles.css'
            Asciidoctor.convert_file sample_input_path,
              safe: :safe, to_dir: output_dir, mkdirs: true, attributes: { 'stylesheet' => 'styles.css', 'linkcss' => true, 'copycss' => 'custom.css' }
            assert_path_exists sample_output_path
            assert_path_exists custom_stylesheet_output_path
            output = File.read sample_output_path, mode: Asciidoctor::FILE_READ_MODE
            assert_xpath '/html/head/link[@rel="stylesheet"][@href="./styles.css"]', output, 1
            assert_xpath 'style', output, 0
          ensure
            FileUtils.rm_r output_dir, force: true, secure: true
          end
        end
    
        test 'should copy custom stylesheet to destination dir if copycss is a Pathname object' do
          begin
            output_dir = fixture_path 'output'
            sample_input_path = fixture_path 'sample.adoc'
            sample_output_path = File.join output_dir, 'sample.html'
            custom_stylesheet_src_path = Pathname.new fixture_path 'custom.css'
            custom_stylesheet_output_path = File.join output_dir, 'styles.css'
            Asciidoctor.convert_file sample_input_path,
              safe: :safe, to_dir: output_dir, mkdirs: true, attributes: { 'stylesheet' => 'styles.css', 'linkcss' => true, 'copycss' => custom_stylesheet_src_path }
            assert_path_exists sample_output_path
            assert_path_exists custom_stylesheet_output_path
            output = File.read sample_output_path, mode: Asciidoctor::FILE_READ_MODE
            assert_xpath '/html/head/link[@rel="stylesheet"][@href="./styles.css"]', output, 1
            assert_xpath 'style', output, 0
          ensure
            FileUtils.rm_r output_dir, force: true, secure: true
          end
        end
    
        test 'should copy custom stylesheet to destination dir if copycss is a classloader URI', if: jruby? do
          require fixture_path 'assets.jar'
          begin
            output_dir = fixture_path 'output'
            sample_input_path = fixture_path 'sample.adoc'
            sample_output_path = File.join output_dir, 'sample.html'
            custom_stylesheet_src_path = 'uri:classloader:/styles-in-jar/custom.css'
            custom_stylesheet_output_path = File.join output_dir, 'styles.css'
            Asciidoctor.convert_file sample_input_path,
              safe: :unsafe, to_dir: output_dir, mkdirs: true, attributes: { 'stylesheet' => 'styles.css', 'linkcss' => true, 'copycss' => custom_stylesheet_src_path }
            assert_path_exists sample_output_path
            assert_path_exists custom_stylesheet_output_path
            output = File.read sample_output_path, mode: Asciidoctor::FILE_READ_MODE
            assert_xpath '/html/head/link[@rel="stylesheet"][@href="./styles.css"]', output, 1
            assert_xpath 'style', output, 0
          ensure
            FileUtils.rm_r output_dir, force: true, secure: true
          end
        end
    
        test 'should convert source file and write result to adjacent file by default' do
          sample_input_path = fixture_path('sample.adoc')
          sample_output_path = fixture_path('sample.html')
          begin
            Asciidoctor.convert_file sample_input_path
            assert File.exist?(sample_output_path)
            output = File.read(sample_output_path, mode: Asciidoctor::FILE_READ_MODE)
            refute_empty output
            assert_xpath '/html', output, 1
            assert_xpath '/html/head', output, 1
            assert_xpath '/html/body', output, 1
            assert_xpath '/html/head/title[text() = "Document Title"]', output, 1
            assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1
          ensure
            FileUtils.rm(sample_output_path)
          end
        end
    
        test 'should convert source file specified by pathname and write result to adjacent file by default' do
          sample_input_path = Pathname fixture_path 'sample.adoc'
          sample_output_path = Pathname fixture_path 'sample.html'
          begin
            doc = Asciidoctor.convert_file sample_input_path, safe: :safe
            assert_equal sample_output_path.expand_path.to_s, (doc.attr 'outfile')
            assert sample_output_path.file?
            output = sample_output_path.read mode: Asciidoctor::FILE_READ_MODE
            refute_empty output
            assert_xpath '/html', output, 1
            assert_xpath '/html/head', output, 1
            assert_xpath '/html/body', output, 1
            assert_xpath '/html/head/title[text() = "Document Title"]', output, 1
            assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1
          ensure
            sample_output_path.delete
          end
        end
    
        test 'should convert source file and write to specified file' do
          sample_input_path = fixture_path('sample.adoc')
          sample_output_path = fixture_path('result.html')
          begin
            Asciidoctor.convert_file sample_input_path, to_file: sample_output_path
            assert File.exist?(sample_output_path)
            output = File.read(sample_output_path, mode: Asciidoctor::FILE_READ_MODE)
            refute_empty output
            assert_xpath '/html', output, 1
            assert_xpath '/html/head', output, 1
            assert_xpath '/html/body', output, 1
            assert_xpath '/html/head/title[text() = "Document Title"]', output, 1
            assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1
          ensure
            FileUtils.rm(sample_output_path)
          end
        end
    
        test 'should convert source file and write to specified file in base_dir' do
          sample_input_path = fixture_path('sample.adoc')
          sample_output_path = fixture_path('result.html')
          fixture_dir = fixture_path('')
          begin
            Asciidoctor.convert_file sample_input_path, to_file: 'result.html', base_dir: fixture_dir
            assert File.exist?(sample_output_path)
            output = File.read(sample_output_path, mode: Asciidoctor::FILE_READ_MODE)
            refute_empty output
            assert_xpath '/html', output, 1
            assert_xpath '/html/head', output, 1
            assert_xpath '/html/body', output, 1
            assert_xpath '/html/head/title[text() = "Document Title"]', output, 1
            assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1
          rescue => e
            flunk e.message
          ensure
            FileUtils.rm(sample_output_path, force: true)
          end
        end
    
        test 'should resolve :to_dir option correctly when both :to_dir and :to_file options are set to an absolute path' do
          begin
            sample_input_path = fixture_path 'sample.adoc'
            sample_output_file = Tempfile.new %w(out- .html)
            sample_output_path = sample_output_file.path
            sample_output_dir = File.dirname sample_output_path
            sample_output_file.close
            doc = Asciidoctor.convert_file sample_input_path, to_file: sample_output_path, to_dir: sample_output_dir, safe: :unsafe
            assert File.exist? sample_output_path
            assert_equal sample_output_path, doc.options[:to_file]
            assert_equal sample_output_dir, doc.options[:to_dir]
          ensure
            sample_output_file.close!
          end
        end
    
        test 'in_place option is ignored when to_file is specified' do
          sample_input_path = fixture_path('sample.adoc')
          sample_output_path = fixture_path('result.html')
          begin
            Asciidoctor.convert_file sample_input_path, to_file: sample_output_path, in_place: true
            assert File.exist?(sample_output_path)
          ensure
            FileUtils.rm(sample_output_path) if File.exist? sample_output_path
          end
        end
    
        test 'in_place option is ignored when to_dir is specified' do
          sample_input_path = fixture_path('sample.adoc')
          sample_output_path = fixture_path('sample.html')
          begin
            Asciidoctor.convert_file sample_input_path, to_dir: File.dirname(sample_output_path), in_place: true
            assert File.exist?(sample_output_path)
          ensure
            FileUtils.rm(sample_output_path) if File.exist? sample_output_path
          end
        end
    
        test 'should set outfilesuffix to match file extension of target file' do
          sample_input = '{outfilesuffix}'
          sample_output_path = fixture_path('result.htm')
          begin
            Asciidoctor.convert sample_input, to_file: sample_output_path
            assert File.exist?(sample_output_path)
            output = File.read(sample_output_path, mode: Asciidoctor::FILE_READ_MODE)
            refute_empty output
            assert_include '

    .htm

    ', output ensure FileUtils.rm(sample_output_path) end end test 'should respect outfilesuffix soft set from API' do sample_input_path = fixture_path('sample.adoc') sample_output_path = fixture_path('sample.htm') begin Asciidoctor.convert_file sample_input_path, to_dir: (File.dirname sample_input_path), attributes: { 'outfilesuffix' => '.htm@' } assert File.exist?(sample_output_path) ensure FileUtils.rm(sample_output_path) end end test 'output should be relative to to_dir option' do sample_input_path = fixture_path('sample.adoc') output_dir = File.join(File.dirname(sample_input_path), 'test_output') Dir.mkdir output_dir unless File.exist? output_dir sample_output_path = File.join(output_dir, 'sample.html') begin Asciidoctor.convert_file sample_input_path, to_dir: output_dir assert File.exist? sample_output_path ensure FileUtils.rm(sample_output_path) if File.exist? sample_output_path FileUtils.rmdir output_dir end end test 'missing directories should be created if mkdirs is enabled' do sample_input_path = fixture_path('sample.adoc') output_dir = File.join(File.join(File.dirname(sample_input_path), 'test_output'), 'subdir') sample_output_path = File.join(output_dir, 'sample.html') begin Asciidoctor.convert_file sample_input_path, to_dir: output_dir, mkdirs: true assert File.exist? sample_output_path ensure FileUtils.rm(sample_output_path) if File.exist? sample_output_path FileUtils.rmdir output_dir FileUtils.rmdir File.dirname(output_dir) end end # TODO need similar test for when to_dir is specified test 'should raise exception if an attempt is made to overwrite input file' do sample_input_path = fixture_path('sample.adoc') assert_raises IOError do Asciidoctor.convert_file sample_input_path, attributes: { 'outfilesuffix' => '.adoc' } end end test 'to_file should be relative to to_dir when both given' do sample_input_path = fixture_path('sample.adoc') base_dir = File.dirname(sample_input_path) sample_rel_output_path = File.join('test_output', 'result.html') output_dir = File.dirname(File.join(base_dir, sample_rel_output_path)) Dir.mkdir output_dir unless File.exist? output_dir sample_output_path = File.join(base_dir, sample_rel_output_path) begin Asciidoctor.convert_file sample_input_path, to_dir: base_dir, to_file: sample_rel_output_path assert File.exist? sample_output_path ensure FileUtils.rm(sample_output_path) if File.exist? sample_output_path FileUtils.rmdir output_dir end end test 'should not modify options argument' do options = { safe: Asciidoctor::SafeMode::SAFE, to_file: false, } options.freeze sample_input_path = fixture_path('sample.adoc') begin Asciidoctor.convert_file sample_input_path, options rescue flunk %(options argument should not be modified) end end test 'should set to_dir option to parent directory of specified output file' do sample_input_path = fixture_path 'basic.adoc' sample_output_path = fixture_path 'basic.html' begin doc = Asciidoctor.convert_file sample_input_path, to_file: sample_output_path assert_equal File.dirname(sample_output_path), doc.options[:to_dir] ensure FileUtils.rm(sample_output_path) end end test 'should set to_dir option to parent directory of specified output directory and file' do sample_input_path = fixture_path 'basic.adoc' sample_output_path = fixture_path 'basic.html' fixture_base_path = File.dirname sample_output_path fixture_parent_path = File.dirname fixture_base_path sample_output_relpath = File.join 'fixtures', 'basic.html' begin doc = Asciidoctor.convert_file sample_input_path, to_dir: fixture_parent_path, to_file: sample_output_relpath assert_equal fixture_base_path, doc.options[:to_dir] ensure FileUtils.rm(sample_output_path) end end test 'timings are recorded for each step' do sample_input_path = fixture_path 'asciidoc_index.txt' Asciidoctor.convert_file sample_input_path, timings: (timings = Asciidoctor::Timings.new), to_file: false refute_equal '0.00000', '%05.5f' % timings.read_parse.to_f refute_equal '0.00000', '%05.5f' % timings.convert.to_f refute_equal timings.read_parse, timings.total end test 'can override syntax highlighter using syntax_highlighters option' do syntax_hl = Class.new Asciidoctor::SyntaxHighlighter::Base do def highlight? true end def highlight node, source, lang, opts 'highlighted' end end input = <<~'EOS' [source,ruby] ---- puts 'Hello, World!' ---- EOS output = Asciidoctor.convert input, safe: :safe, syntax_highlighters: { 'coderay' => syntax_hl }, attributes: { 'source-highlighter' => 'coderay' } assert_css 'pre.highlight > code[data-lang="ruby"]', output, 1 assert_xpath '//pre[@class="coderay highlight"]/code[text()="highlighted"]', output, 1 end end context 'AST' do test 'with no author' do input = <<~'EOS' = Getting Real: The Smarter, Faster, Easier Way to Build a Successful Web Application Getting Real details the business, design, programming, and marketing principles of 37signals. EOS doc = document_from_string input assert_equal 0, doc.authors.size end test 'with one author' do input = <<~'EOS' = Getting Real: The Smarter, Faster, Easier Way to Build a Successful Web Application David Heinemeier Hansson Getting Real details the business, design, programming, and marketing principles of 37signals. EOS doc = document_from_string input authors = doc.authors assert_equal 1, authors.size author_1 = authors[0] assert_equal 'david@37signals.com', author_1.email assert_equal 'David Heinemeier Hansson', author_1.name assert_equal 'David', author_1.firstname assert_equal 'Heinemeier', author_1.middlename assert_equal 'Hansson', author_1.lastname assert_equal 'DHH', author_1.initials end test 'with two authors' do input = <<~'EOS' = Getting Real: The Smarter, Faster, Easier Way to Build a Successful Web Application David Heinemeier Hansson ; Jason Fried Getting Real details the business, design, programming, and marketing principles of 37signals. EOS doc = document_from_string input authors = doc.authors assert_equal 2, authors.size author_1 = authors[0] assert_equal 'david@37signals.com', author_1.email assert_equal 'David Heinemeier Hansson', author_1.name assert_equal 'David', author_1.firstname assert_equal 'Heinemeier', author_1.middlename assert_equal 'Hansson', author_1.lastname assert_equal 'DHH', author_1.initials author_2 = authors[1] assert_equal 'jason@37signals.com', author_2.email assert_equal 'Jason Fried', author_2.name assert_equal 'Jason', author_2.firstname assert_nil author_2.middlename assert_equal 'Fried', author_2.lastname assert_equal 'JF', author_2.initials end test 'with authors as attributes' do input = <<~'EOS' = Getting Real: The Smarter, Faster, Easier Way to Build a Successful Web Application :author_1: David Heinemeier Hansson :email_1: david@37signals.com :author_2: Jason Fried :email_2: jason@37signals.com Getting Real details the business, design, programming, and marketing principles of 37signals. EOS doc = document_from_string input authors = doc.authors assert_equal 2, authors.size author_1 = authors[0] assert_equal 'david@37signals.com', author_1.email assert_equal 'David Heinemeier Hansson', author_1.name assert_equal 'David', author_1.firstname assert_equal 'Heinemeier', author_1.middlename assert_equal 'Hansson', author_1.lastname assert_equal 'DHH', author_1.initials author_2 = authors[1] assert_equal 'jason@37signals.com', author_2.email assert_equal 'Jason Fried', author_2.name assert_equal 'Jason', author_2.firstname assert_nil author_2.middlename assert_equal 'Fried', author_2.lastname assert_equal 'JF', author_2.initials end test 'should not crash if nil cell text is passed to Cell constructor' do input = <<~'EOS' |=== |a |=== EOS table = (document_from_string input).blocks[0] cell = Asciidoctor::Table::Cell.new table.rows.body[0][0].column, nil refute cell.style assert_same Asciidoctor::AbstractNode::NORMAL_SUBS, cell.subs assert_equal '', cell.text end test 'should set option on node when set_option is called' do input = <<~'EOS' . three . two . one EOS block = (document_from_string input).blocks[0] block.set_option('reversed') assert block.option? 'reversed' assert_equal '', block.attributes['reversed-option'] end test 'enabled_options should return all options which are set' do input = <<~'EOS' [%interactive] * [x] code * [ ] test * [ ] profit EOS block = (document_from_string input).blocks[0] assert_equal %w(checklist interactive).to_set, block.enabled_options end test 'should append option to existing options' do input = <<~'EOS' [%fancy] . three . two . one EOS block = (document_from_string input).blocks[0] block.set_option('reversed') assert block.option? 'fancy' assert block.option? 'reversed' end test 'should not append option if option is already set' do input = <<~'EOS' [%reversed] . three . two . one EOS block = (document_from_string input).blocks[0] refute block.set_option('reversed') assert_equal '', block.attributes['reversed-option'] end test 'should return set of option names' do input = <<~'EOS' [%compact%reversed] . three . two . one EOS block = (document_from_string input).blocks[0] assert_equal %w(compact reversed).to_set, block.enabled_options end test 'table column should not be a block or inline' do input = <<~'EOS' |=== |a |=== EOS col = (document_from_string input).blocks[0].columns[0] refute col.block? refute col.inline? end test 'table cell should be a block' do input = <<~'EOS' |=== |a |=== EOS cell = (document_from_string input).blocks[0].rows.body[0][0] assert_kind_of ::Asciidoctor::AbstractBlock, cell assert cell.block? refute cell.inline? end test 'next_adjacent_block should return next block' do input = <<~'EOS' first second EOS doc = document_from_string input assert_equal doc.blocks[1], doc.blocks[0].next_adjacent_block end test 'next_adjacent_block should return next sibling of parent if called on last sibling' do input = <<~'EOS' -- first -- second EOS doc = document_from_string input assert_equal doc.blocks[1], doc.blocks[0].blocks[0].next_adjacent_block end test 'next_adjacent_block should return next sibling of list if called on last item' do input = <<~'EOS' * first second EOS doc = document_from_string input assert_equal doc.blocks[1], doc.blocks[0].blocks[0].next_adjacent_block end test 'next_adjacent_block should return next item in dlist if called on last block of list item' do input = <<~'EOS' first:: desc + more desc second:: desc EOS doc = document_from_string input assert_equal doc.blocks[0].items[1], doc.blocks[0].items[0][1].blocks[0].next_adjacent_block end test 'should return true when sections? is called on a document or section that has sections' do input = <<~'EOS' = Document Title == First Section === First subsection content EOS doc = document_from_string input assert doc.sections? assert doc.blocks[0].sections? end test 'should return false when sections? is called on a document with no sections' do input = <<~'EOS' = Document Title content EOS doc = document_from_string input refute doc.sections? end test 'should return false when sections? is called on a section with no sections' do input = <<~'EOS' = Document Title == First Section EOS doc = document_from_string input refute doc.blocks[0].sections? end test 'should return false when sections? is called on anything that is not a section' do input = <<~'EOS' .Title ==== I'm not section! ==== [NOTE] I'm not a section either! EOS doc = document_from_string input refute doc.blocks[0].sections? refute doc.blocks[1].sections? end end end asciidoctor-2.0.20/test/attribute_list_test.rb000066400000000000000000000227541443135032600215120ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context 'AttributeList' do test 'collect unnamed attribute' do attributes = {} line = 'quote' expected = { 1 => 'quote' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect unnamed attribute double-quoted' do attributes = {} line = '"quote"' expected = { 1 => 'quote' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect empty unnamed attribute double-quoted' do attributes = {} line = '""' expected = { 1 => '' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect unnamed attribute double-quoted containing escaped quote' do attributes = {} line = '"ba\"zaar"' expected = { 1 => 'ba"zaar' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect unnamed attribute single-quoted' do attributes = {} line = '\'quote\'' expected = { 1 => 'quote' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect empty unnamed attribute single-quoted' do attributes = {} line = '\'\'' expected = { 1 => '' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect isolated single quote positional attribute' do attributes = {} line = '\'' expected = { 1 => '\'' } doc = empty_document def doc.apply_subs *args raise 'apply_subs should not be called' end Asciidoctor::AttributeList.new(line, doc).parse_into(attributes) assert_equal expected, attributes end test 'collect isolated single quote attribute value' do attributes = {} line = 'name=\'' expected = { 'name' => '\'' } doc = empty_document def doc.apply_subs *args raise 'apply_subs should not be called' end Asciidoctor::AttributeList.new(line, doc).parse_into(attributes) assert_equal expected, attributes end test 'collect attribute value as is if it has only leading single quote' do attributes = {} line = 'name=\'{val}' expected = { 'name' => '\'{val}' } doc = empty_document attributes: { 'val' => 'val' } def doc.apply_subs *args raise 'apply_subs should not be called' end Asciidoctor::AttributeList.new(line, doc).parse_into(attributes) assert_equal expected, attributes end test 'collect unnamed attribute single-quoted containing escaped quote' do attributes = {} line = '\'ba\\\'zaar\'' expected = { 1 => 'ba\'zaar' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect unnamed attribute with dangling delimiter' do attributes = {} line = 'quote , ' expected = { 1 => 'quote', 2 => nil } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect unnamed attribute in second position after empty attribute' do attributes = {} line = ', John Smith' expected = { 1 => nil, 2 => 'John Smith' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect unnamed attributes' do attributes = {} line = 'first, second one, third' expected = { 1 => 'first', 2 => 'second one', 3 => 'third' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect blank unnamed attributes' do attributes = {} line = 'first,,third,' expected = { 1 => 'first', 2 => nil, 3 => 'third', 4 => nil } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect unnamed attribute enclosed in equal signs' do attributes = {} line = '=foo=' expected = { 1 => '=foo=' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect named attribute' do attributes = {} line = 'foo=bar' expected = { 'foo' => 'bar' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect named attribute double-quoted' do attributes = {} line = 'foo="bar"' expected = { 'foo' => 'bar' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect named attribute with double-quoted empty value' do attributes = {} line = 'height=100,caption="",link="images/octocat.png"' expected = { 'height' => '100', 'caption' => '', 'link' => 'images/octocat.png' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect named attribute single-quoted' do attributes = {} line = 'foo=\'bar\'' expected = { 'foo' => 'bar' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect named attribute with single-quoted empty value' do attributes = {} line = %(height=100,caption='',link='images/octocat.png') expected = { 'height' => '100', 'caption' => '', 'link' => 'images/octocat.png' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect single named attribute with empty value' do attributes = {} line = 'foo=' expected = { 'foo' => '' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect single named attribute with empty value when followed by other attributes' do attributes = {} line = 'foo=,bar=baz' expected = { 'foo' => '', 'bar' => 'baz' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect named attributes unquoted' do attributes = {} line = 'first=value, second=two, third=3' expected = { 'first' => 'value', 'second' => 'two', 'third' => '3' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect named attributes quoted' do attributes = {} line = %(first='value', second="value two", third=three) expected = { 'first' => 'value', 'second' => 'value two', 'third' => 'three' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect named attributes quoted containing non-semantic spaces' do attributes = {} line = %( first = 'value', second ="value two" , third= three ) expected = { 'first' => 'value', 'second' => 'value two', 'third' => 'three' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect mixed named and unnamed attributes' do attributes = {} line = %(first, second="value two", third=three, Sherlock Holmes) expected = { 1 => 'first', 'second' => 'value two', 'third' => 'three', 4 => 'Sherlock Holmes' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect mixed empty named and blank unnamed attributes' do attributes = {} line = 'first,,third=,,fifth=five' expected = { 1 => 'first', 2 => nil, 'third' => '', 4 => nil, 'fifth' => 'five' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect options attribute' do attributes = {} line = %(quote, options='opt1,,opt2 , opt3') expected = { 1 => 'quote', 'opt1-option' => '', 'opt2-option' => '', 'opt3-option' => '' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect opts attribute as options' do attributes = {} line = %(quote, opts='opt1,,opt2 , opt3') expected = { 1 => 'quote', 'opt1-option' => '', 'opt2-option' => '', 'opt3-option' => '' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'should ignore options attribute if empty' do attributes = {} line = %(quote, opts=) expected = { 1 => 'quote' } Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect and rekey unnamed attributes' do attributes = {} line = 'first, second one, third, fourth' expected = { 1 => 'first', 2 => 'second one', 3 => 'third', 4 => 'fourth', 'a' => 'first', 'b' => 'second one', 'c' => 'third' } Asciidoctor::AttributeList.new(line).parse_into(attributes, ['a', 'b', 'c']) assert_equal expected, attributes end test 'should not assign nil to attribute mapped to missing positional attribute' do attributes = {} line = 'alt text,,100' expected = { 1 => 'alt text', 2 => nil, 3 => '100', 'alt' => 'alt text', 'height' => '100' } Asciidoctor::AttributeList.new(line).parse_into(attributes, %w(alt width height)) assert_equal expected, attributes end test 'rekey positional attributes' do attributes = { 1 => 'source', 2 => 'java' } expected = { 1 => 'source', 2 => 'java', 'style' => 'source', 'language' => 'java' } Asciidoctor::AttributeList.rekey(attributes, ['style', 'language', 'linenums']) assert_equal expected, attributes end end asciidoctor-2.0.20/test/attributes_test.rb000066400000000000000000001577311443135032600206460ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context 'Attributes' do default_logger = Asciidoctor::LoggerManager.logger setup do Asciidoctor::LoggerManager.logger = (@logger = Asciidoctor::MemoryLogger.new) end teardown do Asciidoctor::LoggerManager.logger = default_logger end context 'Assignment' do test 'creates an attribute' do doc = document_from_string(':frog: Tanglefoot') assert_equal 'Tanglefoot', doc.attributes['frog'] end test 'requires a space after colon following attribute name' do doc = document_from_string 'foo:bar' assert_nil doc.attributes['foo'] end # NOTE AsciiDoc.py recognizes this entry test 'does not recognize attribute entry if name contains colon' do input = ':foo:bar: baz' doc = document_from_string input refute doc.attr?('foo:bar') assert_equal 1, doc.blocks.size assert_equal :paragraph, doc.blocks[0].context end # NOTE AsciiDoc.py recognizes this entry test 'does not recognize attribute entry if name ends with colon' do input = ':foo:: bar' doc = document_from_string input refute doc.attr?('foo:') assert_equal 1, doc.blocks.size assert_equal :dlist, doc.blocks[0].context end # NOTE AsciiDoc.py does not recognize this entry test 'allows any word character defined by Unicode in an attribute name' do [['café', 'a coffee shop'], ['سمن', %(سازمان مردمنهاد)]].each do |(name, value)| str = <<~EOS :#{name}: #{value} {#{name}} EOS result = convert_string_to_embedded str assert_includes result, %(

    #{value}

    ) end end test 'creates an attribute by fusing a legacy multi-line value' do str = <<~'EOS' :description: This is the first + Ruby implementation of + AsciiDoc. EOS doc = document_from_string(str) assert_equal 'This is the first Ruby implementation of AsciiDoc.', doc.attributes['description'] end test 'creates an attribute by fusing a multi-line value' do str = <<~'EOS' :description: This is the first \ Ruby implementation of \ AsciiDoc. EOS doc = document_from_string(str) assert_equal 'This is the first Ruby implementation of AsciiDoc.', doc.attributes['description'] end test 'honors line break characters in multi-line values' do str = <<~'EOS' :signature: Linus Torvalds + \ Linux Hacker + \ linus.torvalds@example.com EOS doc = document_from_string(str) assert_equal %(Linus Torvalds +\nLinux Hacker +\nlinus.torvalds@example.com), doc.attributes['signature'] end test 'should allow pass macro to surround a multi-line value that contains line breaks' do str = <<~'EOS' :signature: pass:a[{author} + \ {title} + \ {email}] EOS doc = document_from_string str, attributes: { 'author' => 'Linus Torvalds', 'title' => 'Linux Hacker', 'email' => 'linus.torvalds@example.com' } assert_equal %(Linus Torvalds +\nLinux Hacker +\nlinus.torvalds@example.com), (doc.attr 'signature') end test 'should delete an attribute that ends with !' do doc = document_from_string(":frog: Tanglefoot\n:frog!:") assert_nil doc.attributes['frog'] end test 'should delete an attribute that ends with ! set via API' do doc = document_from_string(":frog: Tanglefoot", attributes: { 'frog!' => '' }) assert_nil doc.attributes['frog'] end test 'should delete an attribute that begins with !' do doc = document_from_string(":frog: Tanglefoot\n:!frog:") assert_nil doc.attributes['frog'] end test 'should delete an attribute that begins with ! set via API' do doc = document_from_string(":frog: Tanglefoot", attributes: { '!frog' => '' }) assert_nil doc.attributes['frog'] end test 'should delete an attribute set via API to nil value' do doc = document_from_string(":frog: Tanglefoot", attributes: { 'frog' => nil }) assert_nil doc.attributes['frog'] end test "doesn't choke when deleting a non-existing attribute" do doc = document_from_string(':frog!:') assert_nil doc.attributes['frog'] end test "replaces special characters in attribute value" do doc = document_from_string(":xml-busters: <>&") assert_equal '<>&', doc.attributes['xml-busters'] end test "performs attribute substitution on attribute value" do doc = document_from_string(":version: 1.0\n:release: Asciidoctor {version}") assert_equal 'Asciidoctor 1.0', doc.attributes['release'] end test 'assigns attribute to empty string if substitution fails to resolve attribute' do input = ':release: Asciidoctor {version}' document_from_string input, attributes: { 'attribute-missing' => 'drop-line' } assert_message @logger, :INFO, 'dropping line containing reference to missing attribute: version' end test 'assigns multi-line attribute to empty string if substitution fails to resolve attribute' do input = <<~'EOS' :release: Asciidoctor + {version} EOS doc = document_from_string input, attributes: { 'attribute-missing' => 'drop-line' } assert_equal '', doc.attributes['release'] assert_message @logger, :INFO, 'dropping line containing reference to missing attribute: version' end test 'resolves attributes inside attribute value within header' do input = <<~'EOS' = Document Title :big: big :bigfoot: {big}foot {bigfoot} EOS result = convert_string_to_embedded input assert_includes result, 'bigfoot' end test 'resolves attributes and pass macro inside attribute value outside header' do input = <<~'EOS' = Document Title content :big: pass:a,q[_big_] :bigfoot: {big}foot {bigfoot} EOS result = convert_string_to_embedded input assert_includes result, 'bigfoot' end test 'should limit maximum size of attribute value if safe mode is SECURE' do expected = 'a' * 4096 input = <<~EOS :name: #{'a' * 5000} {name} EOS result = convert_inline_string input assert_equal expected, result assert_equal 4096, result.bytesize end test 'should handle multibyte characters when limiting attribute value size' do expected = '日本' input = <<~'EOS' :name: 日本語 {name} EOS result = convert_inline_string input, attributes: { 'max-attribute-value-size' => 6 } assert_equal expected, result assert_equal 6, result.bytesize end test 'should not mangle multibyte characters when limiting attribute value size' do expected = '日本' input = <<~'EOS' :name: 日本語 {name} EOS result = convert_inline_string input, attributes: { 'max-attribute-value-size' => 8 } assert_equal expected, result assert_equal 6, result.bytesize end test 'should allow maximize size of attribute value to be disabled' do expected = 'a' * 5000 input = <<~EOS :name: #{'a' * 5000} {name} EOS result = convert_inline_string input, attributes: { 'max-attribute-value-size' => nil } assert_equal expected, result assert_equal 5000, result.bytesize end test 'resolves user-home attribute if safe mode is less than SERVER' do input = <<~'EOS' :imagesdir: {user-home}/etc/images {imagesdir} EOS output = convert_inline_string input, safe: :safe assert_equal %(#{Asciidoctor::USER_HOME}/etc/images), output end test 'user-home attribute resolves to . if safe mode is SERVER or greater' do input = <<~'EOS' :imagesdir: {user-home}/etc/images {imagesdir} EOS output = convert_inline_string input, safe: :server assert_equal './etc/images', output end test 'user-home attribute can be overridden by API if safe mode is less than SERVER' do input = <<~'EOS' Go {user-home}! EOS output = convert_inline_string input, attributes: { 'user-home' => '/home' } assert_equal 'Go /home!', output end test 'user-home attribute can be overridden by API if safe mode is SERVER or greater' do input = <<~'EOS' Go {user-home}! EOS output = convert_inline_string input, safe: :server, attributes: { 'user-home' => '/home' } assert_equal 'Go /home!', output end test "apply custom substitutions to text in passthrough macro and assign to attribute" do doc = document_from_string(":xml-busters: pass:[<>&]") assert_equal '<>&', doc.attributes['xml-busters'] doc = document_from_string(":xml-busters: pass:none[<>&]") assert_equal '<>&', doc.attributes['xml-busters'] doc = document_from_string(":xml-busters: pass:specialcharacters[<>&]") assert_equal '<>&', doc.attributes['xml-busters'] doc = document_from_string(":xml-busters: pass:n,-c[<(C)>]") assert_equal '<©>', doc.attributes['xml-busters'] end test 'should not recognize pass macro with invalid substitution list in attribute value' do [',', '42', 'a,'].each do |subs| doc = document_from_string %(:pass-fail: pass:#{subs}[whale]) assert_equal %(pass:#{subs}[whale]), doc.attributes['pass-fail'] end end test "attribute is treated as defined until it's not" do input = <<~'EOS' :holygrail: ifdef::holygrail[] The holy grail has been found! endif::holygrail[] :holygrail!: ifndef::holygrail[] Buggers! What happened to the grail? endif::holygrail[] EOS output = convert_string input assert_xpath '//p', output, 2 assert_xpath '(//p)[1][text() = "The holy grail has been found!"]', output, 1 assert_xpath '(//p)[2][text() = "Buggers! What happened to the grail?"]', output, 1 end test 'attribute set via API overrides attribute set in document' do doc = document_from_string(':cash: money', attributes: { 'cash' => 'heroes' }) assert_equal 'heroes', doc.attributes['cash'] end test 'attribute set via API cannot be unset by document' do doc = document_from_string(':cash!:', attributes: { 'cash' => 'heroes' }) assert_equal 'heroes', doc.attributes['cash'] end test 'attribute soft set via API using modifier on name can be overridden by document' do doc = document_from_string(':cash: money', attributes: { 'cash@' => 'heroes' }) assert_equal 'money', doc.attributes['cash'] end test 'attribute soft set via API using modifier on value can be overridden by document' do doc = document_from_string(':cash: money', attributes: { 'cash' => 'heroes@' }) assert_equal 'money', doc.attributes['cash'] end test 'attribute soft set via API using modifier on name can be unset by document' do doc = document_from_string(':cash!:', attributes: { 'cash@' => 'heroes' }) assert_nil doc.attributes['cash'] doc = document_from_string(':cash!:', attributes: { 'cash@' => true }) assert_nil doc.attributes['cash'] end test 'attribute soft set via API using modifier on value can be unset by document' do doc = document_from_string(':cash!:', attributes: { 'cash' => 'heroes@' }) assert_nil doc.attributes['cash'] end test 'attribute unset via API cannot be set by document' do [ { 'cash!' => '' }, { '!cash' => '' }, { 'cash' => nil }, ].each do |attributes| doc = document_from_string(':cash: money', attributes: attributes) assert_nil doc.attributes['cash'] end end test 'attribute soft unset via API can be set by document' do [ { 'cash!@' => '' }, { '!cash@' => '' }, { 'cash!' => '@' }, { '!cash' => '@' }, { 'cash' => false }, ].each do |attributes| doc = document_from_string(':cash: money', attributes: attributes) assert_equal 'money', doc.attributes['cash'] end end test 'can soft unset built-in attribute from API and still override in document' do [ { 'sectids!@' => '' }, { '!sectids@' => '' }, { 'sectids!' => '@' }, { '!sectids' => '@' }, { 'sectids' => false }, ].each do |attributes| doc = document_from_string '== Heading', attributes: attributes refute doc.attr?('sectids') assert_css '#_heading', (doc.convert standalone: false), 0 doc = document_from_string %(:sectids:\n\n== Heading), attributes: attributes assert doc.attr?('sectids') assert_css '#_heading', (doc.convert standalone: false), 1 end end test 'backend and doctype attributes are set by default in default configuration' do input = <<~'EOS' = Document Title Author Name content EOS doc = document_from_string input expect = { 'backend' => 'html5', 'backend-html5' => '', 'backend-html5-doctype-article' => '', 'outfilesuffix' => '.html', 'basebackend' => 'html', 'basebackend-html' => '', 'basebackend-html-doctype-article' => '', 'doctype' => 'article', 'doctype-article' => '', 'filetype' => 'html', 'filetype-html' => '', } expect.each do |key, val| assert doc.attributes.key? key assert_equal val, doc.attributes[key] end end test 'backend and doctype attributes are set by default in custom configuration' do input = <<~'EOS' = Document Title Author Name content EOS doc = document_from_string input, doctype: 'book', backend: 'docbook' expect = { 'backend' => 'docbook5', 'backend-docbook5' => '', 'backend-docbook5-doctype-book' => '', 'outfilesuffix' => '.xml', 'basebackend' => 'docbook', 'basebackend-docbook' => '', 'basebackend-docbook-doctype-book' => '', 'doctype' => 'book', 'doctype-book' => '', 'filetype' => 'xml', 'filetype-xml' => '', } expect.each do |key, val| assert doc.attributes.key? key assert_equal val, doc.attributes[key] end end test 'backend attributes are updated if backend attribute is defined in document and safe mode is less than SERVER' do input = <<~'EOS' = Document Title Author Name :backend: docbook :doctype: book content EOS doc = document_from_string input, safe: Asciidoctor::SafeMode::SAFE expect = { 'backend' => 'docbook5', 'backend-docbook5' => '', 'backend-docbook5-doctype-book' => '', 'outfilesuffix' => '.xml', 'basebackend' => 'docbook', 'basebackend-docbook' => '', 'basebackend-docbook-doctype-book' => '', 'doctype' => 'book', 'doctype-book' => '', 'filetype' => 'xml', 'filetype-xml' => '', } expect.each do |key, val| assert doc.attributes.key?(key) assert_equal val, doc.attributes[key] end refute doc.attributes.key?('backend-html5') refute doc.attributes.key?('backend-html5-doctype-article') refute doc.attributes.key?('basebackend-html') refute doc.attributes.key?('basebackend-html-doctype-article') refute doc.attributes.key?('doctype-article') refute doc.attributes.key?('filetype-html') end test 'backend attributes defined in document options overrides backend attribute in document' do doc = document_from_string(':backend: docbook5', safe: Asciidoctor::SafeMode::SAFE, attributes: { 'backend' => 'html5' }) assert_equal 'html5', doc.attributes['backend'] assert doc.attributes.key? 'backend-html5' assert_equal 'html', doc.attributes['basebackend'] assert doc.attributes.key? 'basebackend-html' end test 'can only access a positional attribute from the attributes hash' do node = Asciidoctor::Block.new nil, :paragraph, attributes: { 1 => 'position 1' } assert_nil node.attr(1) refute node.attr?(1) assert_equal 'position 1', node.attributes[1] end test 'attr should not retrieve attribute from document if not set on block' do doc = document_from_string 'paragraph', attributes: { 'name' => 'value' } para = doc.blocks[0] assert_nil para.attr 'name' end test 'attr looks for attribute on document if fallback name is true' do doc = document_from_string 'paragraph', attributes: { 'name' => 'value' } para = doc.blocks[0] assert_equal 'value', (para.attr 'name', nil, true) end test 'attr uses fallback name when looking for attribute on document' do doc = document_from_string 'paragraph', attributes: { 'alt-name' => 'value' } para = doc.blocks[0] assert_equal 'value', (para.attr 'name', nil, 'alt-name') end test 'attr? should not check for attribute on document if not set on block' do doc = document_from_string 'paragraph', attributes: { 'name' => 'value' } para = doc.blocks[0] refute para.attr? 'name' end test 'attr? checks for attribute on document if fallback name is true' do doc = document_from_string 'paragraph', attributes: { 'name' => 'value' } para = doc.blocks[0] assert para.attr? 'name', nil, true end test 'attr? checks for fallback name when looking for attribute on document' do doc = document_from_string 'paragraph', attributes: { 'alt-name' => 'value' } para = doc.blocks[0] assert para.attr? 'name', nil, 'alt-name' end test 'set_attr should set value to empty string if no value is specified' do node = Asciidoctor::Block.new nil, :paragraph, attributes: {} node.set_attr 'foo' assert_equal '', (node.attr 'foo') end test 'remove_attr should remove attribute and return previous value' do doc = empty_document node = Asciidoctor::Block.new doc, :paragraph, attributes: { 'foo' => 'bar' } assert_equal 'bar', (node.remove_attr 'foo') assert_nil node.attr('foo') end test 'set_attr should not overwrite existing key if overwrite is false' do node = Asciidoctor::Block.new nil, :paragraph, attributes: { 'foo' => 'bar' } assert_equal 'bar', (node.attr 'foo') node.set_attr 'foo', 'baz', false assert_equal 'bar', (node.attr 'foo') end test 'set_attr should overwrite existing key by default' do node = Asciidoctor::Block.new nil, :paragraph, attributes: { 'foo' => 'bar' } assert_equal 'bar', (node.attr 'foo') node.set_attr 'foo', 'baz' assert_equal 'baz', (node.attr 'foo') end test 'set_attr should set header attribute in loaded document' do input = <<~'EOS' :uri: http://example.org {uri} EOS doc = Asciidoctor.load input, attributes: { 'uri' => 'https://github.com' } doc.set_attr 'uri', 'https://google.com' output = doc.convert assert_xpath '//a[@href="https://google.com"]', output, 1 end test 'set_attribute should set attribute if key is not locked' do doc = empty_document refute doc.attr? 'foo' res = doc.set_attribute 'foo', 'baz' assert res assert_equal 'baz', (doc.attr 'foo') end test 'set_attribute should not set key if key is locked' do doc = empty_document attributes: { 'foo' => 'bar' } assert_equal 'bar', (doc.attr 'foo') res = doc.set_attribute 'foo', 'baz' refute res assert_equal 'bar', (doc.attr 'foo') end test 'set_attribute should update backend attributes' do doc = empty_document attributes: { 'backend' => 'html5@' } assert_equal '', (doc.attr 'backend-html5') res = doc.set_attribute 'backend', 'docbook5' assert res refute doc.attr? 'backend-html5' assert_equal '', (doc.attr 'backend-docbook5') end test 'verify toc attribute matrix' do expected_data = <<~'EOS' #attributes |toc|toc-position|toc-placement|toc-class toc | |nil |auto |nil toc=header | |nil |auto |nil toc=beeboo | |nil |auto |nil toc=left | |left |auto |toc2 toc2 | |left |auto |toc2 toc=right | |right |auto |toc2 toc=preamble | |content |preamble |nil toc=macro | |content |macro |nil toc toc-placement=macro toc-position=left | |content |macro |nil toc toc-placement! | |content |macro |nil EOS expected = expected_data.lines.map do |l| next if l.start_with? '#' l.split('|').map {|e| (e = e.strip) == 'nil' ? nil : e } end.compact expected.each do |expect| raw_attrs, toc, toc_position, toc_placement, toc_class = expect attrs = Hash[*raw_attrs.split.map {|e| e.include?('=') ? e.split('=', 2) : [e, ''] }.flatten] doc = document_from_string '', attributes: attrs toc ? (assert doc.attr?('toc', toc)) : (refute doc.attr?('toc')) toc_position ? (assert doc.attr?('toc-position', toc_position)) : (refute doc.attr?('toc-position')) toc_placement ? (assert doc.attr?('toc-placement', toc_placement)) : (refute doc.attr?('toc-placement')) toc_class ? (assert doc.attr?('toc-class', toc_class)) : (refute doc.attr?('toc-class')) end end end context 'Interpolation' do test "convert properly with simple names" do html = convert_string(":frog: Tanglefoot\n:my_super-hero: Spiderman\n\nYo, {frog}!\nBeat {my_super-hero}!") assert_xpath %(//p[text()="Yo, Tanglefoot!\nBeat Spiderman!"]), html, 1 end test 'attribute lookup is not case sensitive' do input = <<~'EOS' :He-Man: The most powerful man in the universe He-Man: {He-Man} She-Ra: {She-Ra} EOS result = convert_string_to_embedded input, attributes: { 'She-Ra' => 'The Princess of Power' } assert_xpath '//p[text()="He-Man: The most powerful man in the universe"]', result, 1 assert_xpath '//p[text()="She-Ra: The Princess of Power"]', result, 1 end test "convert properly with single character name" do html = convert_string(":r: Ruby\n\nR is for {r}!") assert_xpath %(//p[text()="R is for Ruby!"]), html, 1 end test "collapses spaces in attribute names" do input = <<~'EOS' Main Header =========== :My frog: Tanglefoot Yo, {myfrog}! EOS output = convert_string input assert_xpath '(//p)[1][text()="Yo, Tanglefoot!"]', output, 1 end test 'ignores lines with bad attributes if attribute-missing is drop-line' do input = <<~'EOS' :attribute-missing: drop-line This is blah blah {foobarbaz} all there is. EOS output = convert_string_to_embedded input para = xmlnodes_at_css 'p', output, 1 refute_includes 'blah blah', para.content assert_message @logger, :INFO, 'dropping line containing reference to missing attribute: foobarbaz' end test 'attribute value gets interpreted when converting' do doc = document_from_string(":google: http://google.com[Google]\n\n{google}") assert_equal 'http://google.com[Google]', doc.attributes['google'] output = doc.convert assert_xpath '//a[@href="http://google.com"][text() = "Google"]', output, 1 end test 'should drop line with reference to missing attribute if attribute-missing attribute is drop-line' do input = <<~'EOS' :attribute-missing: drop-line Line 1: This line should appear in the output. Line 2: Oh no, a {bogus-attribute}! This line should not appear in the output. EOS output = convert_string_to_embedded input assert_match(/Line 1/, output) refute_match(/Line 2/, output) assert_message @logger, :INFO, 'dropping line containing reference to missing attribute: bogus-attribute' end test 'should not drop line with reference to missing attribute by default' do input = <<~'EOS' Line 1: This line should appear in the output. Line 2: A {bogus-attribute}! This time, this line should appear in the output. EOS output = convert_string_to_embedded input assert_match(/Line 1/, output) assert_match(/Line 2/, output) assert_match(/\{bogus-attribute\}/, output) end test 'should drop line with attribute unassignment by default' do input = <<~'EOS' :a: Line 1: This line should appear in the output. Line 2: {set:a!}This line should not appear in the output. EOS output = convert_string_to_embedded input assert_match(/Line 1/, output) refute_match(/Line 2/, output) end test 'should not drop line with attribute unassignment if attribute-undefined is drop' do input = <<~'EOS' :attribute-undefined: drop :a: Line 1: This line should appear in the output. Line 2: {set:a!}This line should appear in the output. EOS output = convert_string_to_embedded input assert_match(/Line 1/, output) assert_match(/Line 2/, output) refute_match(/\{set:a!\}/, output) end test 'should drop line that only contains attribute assignment' do input = <<~'EOS' Line 1 {set:a} Line 2 EOS output = convert_string_to_embedded input assert_xpath %(//p[text()="Line 1\nLine 2"]), output, 1 end test 'should drop line that only contains unresolved attribute when attribute-missing is drop' do input = <<~'EOS' Line 1 {unresolved} Line 2 EOS output = convert_string_to_embedded input, attributes: { 'attribute-missing' => 'drop' } assert_xpath %(//p[text()="Line 1\nLine 2"]), output, 1 end test "substitutes inside unordered list items" do html = convert_string(":foo: bar\n* snort at the {foo}\n* yawn") assert_xpath %(//li/p[text()="snort at the bar"]), html, 1 end test 'substitutes inside section title' do output = convert_string(":prefix: Cool\n\n== {prefix} Title\n\ncontent") assert_xpath '//h2[text()="Cool Title"]', output, 1 assert_css 'h2#_cool_title', output, 1 end test 'interpolates attribute defined in header inside attribute entry in header' do input = <<~'EOS' = Title Author Name :attribute-a: value :attribute-b: {attribute-a} preamble EOS doc = document_from_string(input, parse_header_only: true) assert_equal 'value', doc.attributes['attribute-b'] end test 'interpolates author attribute inside attribute entry in header' do input = <<~'EOS' = Title Author Name :name: {author} preamble EOS doc = document_from_string(input, parse_header_only: true) assert_equal 'Author Name', doc.attributes['name'] end test 'interpolates revinfo attribute inside attribute entry in header' do input = <<~'EOS' = Title Author Name 2013-01-01 :date: {revdate} preamble EOS doc = document_from_string(input, parse_header_only: true) assert_equal '2013-01-01', doc.attributes['date'] end test 'attribute entries can resolve previously defined attributes' do input = <<~'EOS' = Title Author Name v1.0, 2010-01-01: First release! :a: value :a2: {a} :revdate2: {revdate} {a} == {a2} {revdate} == {revdate2} EOS doc = document_from_string input assert_equal '2010-01-01', doc.attr('revdate') assert_equal '2010-01-01', doc.attr('revdate2') assert_equal 'value', doc.attr('a') assert_equal 'value', doc.attr('a2') output = doc.convert assert_includes output, 'value == value' assert_includes output, '2010-01-01 == 2010-01-01' end test 'should warn if unterminated block comment is detected in document header' do input = <<~'EOS' = Document Title :foo: bar //// :hey: there content EOS doc = document_from_string input assert_nil doc.attr('hey') assert_message @logger, :WARN, ': line 3: unterminated comment block', Hash end test 'substitutes inside block title' do input = <<~'EOS' :gem_name: asciidoctor .Require the +{gem_name}+ gem To use {gem_name}, the first thing to do is to import it in your Ruby source file. EOS output = convert_string_to_embedded input, attributes: { 'compat-mode' => '' } assert_xpath '//*[@class="title"]/code[text()="asciidoctor"]', output, 1 input = <<~'EOS' :gem_name: asciidoctor .Require the `{gem_name}` gem To use {gem_name}, the first thing to do is to import it in your Ruby source file. EOS output = convert_string_to_embedded input assert_xpath '//*[@class="title"]/code[text()="asciidoctor"]', output, 1 end test 'sets attribute until it is deleted' do input = <<~'EOS' :foo: bar Crossing the {foo}. :foo!: Belly up to the {foo}. EOS output = convert_string_to_embedded input assert_xpath '//p[text()="Crossing the bar."]', output, 1 assert_xpath '//p[text()="Belly up to the bar."]', output, 0 end test 'should allow compat-mode to be set and unset in middle of document' do input = <<~'EOS' :foo: bar [[paragraph-a]] `{foo}` :compat-mode!: [[paragraph-b]] `{foo}` :compat-mode: [[paragraph-c]] `{foo}` EOS result = convert_string_to_embedded input, attributes: { 'compat-mode' => '@' } assert_xpath '/*[@id="paragraph-a"]//code[text()="{foo}"]', result, 1 assert_xpath '/*[@id="paragraph-b"]//code[text()="bar"]', result, 1 assert_xpath '/*[@id="paragraph-c"]//code[text()="{foo}"]', result, 1 end test 'does not disturb attribute-looking things escaped with backslash' do html = convert_string(":foo: bar\nThis is a \\{foo} day.") assert_xpath '//p[text()="This is a {foo} day."]', html, 1 end test 'does not disturb attribute-looking things escaped with literals' do html = convert_string(":foo: bar\nThis is a +++{foo}+++ day.") assert_xpath '//p[text()="This is a {foo} day."]', html, 1 end test 'does not substitute attributes inside listing blocks' do input = <<~'EOS' :forecast: snow ---- puts 'The forecast for today is {forecast}' ---- EOS output = convert_string(input) assert_match(/\{forecast\}/, output) end test 'does not substitute attributes inside literal blocks' do input = <<~'EOS' :foo: bar .... You insert the text {foo} to expand the value of the attribute named foo in your document. .... EOS output = convert_string(input) assert_match(/\{foo\}/, output) end test 'does not show docdir and shows relative docfile if safe mode is SERVER or greater' do input = <<~'EOS' * docdir: {docdir} * docfile: {docfile} EOS docdir = Dir.pwd docfile = File.join(docdir, 'sample.adoc') output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docdir' => docdir, 'docfile' => docfile } assert_xpath '//li[1]/p[text()="docdir: "]', output, 1 assert_xpath '//li[2]/p[text()="docfile: sample.adoc"]', output, 1 end test 'shows absolute docdir and docfile paths if safe mode is less than SERVER' do input = <<~'EOS' * docdir: {docdir} * docfile: {docfile} EOS docdir = Dir.pwd docfile = File.join(docdir, 'sample.adoc') output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => docdir, 'docfile' => docfile } assert_xpath %(//li[1]/p[text()="docdir: #{docdir}"]), output, 1 assert_xpath %(//li[2]/p[text()="docfile: #{docfile}"]), output, 1 end test 'assigns attribute defined in attribute reference with set prefix and value' do input = '{set:foo:bar}{foo}' output = convert_string_to_embedded input assert_xpath '//p', output, 1 assert_xpath '//p[text()="bar"]', output, 1 end test 'assigns attribute defined in attribute reference with set prefix and no value' do input = "{set:foo}\n{foo}yes" output = convert_string_to_embedded input assert_xpath '//p', output, 1 assert_xpath '//p[normalize-space(text())="yes"]', output, 1 end test 'assigns attribute defined in attribute reference with set prefix and empty value' do input = "{set:foo:}\n{foo}yes" output = convert_string_to_embedded input assert_xpath '//p', output, 1 assert_xpath '//p[normalize-space(text())="yes"]', output, 1 end test 'unassigns attribute defined in attribute reference with set prefix' do input = <<~'EOS' :attribute-missing: drop-line :foo: {set:foo!} {foo}yes EOS output = convert_string_to_embedded input assert_xpath '//p', output, 1 assert_xpath '//p/child::text()', output, 0 assert_message @logger, :INFO, 'dropping line containing reference to missing attribute: foo' end end context "Intrinsic attributes" do test "substitute intrinsics" do Asciidoctor::INTRINSIC_ATTRIBUTES.each_pair do |key, value| html = convert_string("Look, a {#{key}} is here") # can't use Nokogiri because it interprets the HTML entities and we can't match them assert_match(/Look, a #{Regexp.escape(value)} is here/, html) end end test "don't escape intrinsic substitutions" do html = convert_string('happy{nbsp}together') assert_match(/happy together/, html) end test "escape special characters" do html = convert_string('&') assert_match(/<node>&<\/node>/, html) end test 'creates counter' do input = '{counter:mycounter}' doc = document_from_string input output = doc.convert assert_equal 1, doc.attributes['mycounter'] assert_xpath '//p[text()="1"]', output, 1 end test 'creates counter silently' do input = '{counter2:mycounter}' doc = document_from_string input output = doc.convert assert_equal 1, doc.attributes['mycounter'] assert_xpath '//p[text()="1"]', output, 0 end test 'creates counter with numeric seed value' do input = '{counter2:mycounter:10}' doc = document_from_string input doc.convert assert_equal 10, doc.attributes['mycounter'] end test 'creates counter with character seed value' do input = '{counter2:mycounter:A}' doc = document_from_string input doc.convert assert_equal 'A', doc.attributes['mycounter'] end test 'can seed counter to start at 1' do input = <<~'EOS' :mycounter: 0 {counter:mycounter} EOS output = convert_string_to_embedded input assert_xpath '//p[text()="1"]', output, 1 end test 'can seed counter to start at A' do input = <<~'EOS' :mycounter: @ {counter:mycounter} EOS output = convert_string_to_embedded input assert_xpath '//p[text()="A"]', output, 1 end test 'increments counter with positive numeric value' do input = <<~'EOS' [subs=attributes] ++++ {counter:mycounter:1} {counter:mycounter} {counter:mycounter} {mycounter} ++++ EOS doc = document_from_string input, standalone: false output = doc.convert assert_equal 3, doc.attributes['mycounter'] assert_equal %w(1 2 3 3), output.lines.map {|l| l.rstrip } end test 'increments counter with negative numeric value' do input = <<~'EOS' [subs=attributes] ++++ {counter:mycounter:-2} {counter:mycounter} {counter:mycounter} {mycounter} ++++ EOS doc = document_from_string input, standalone: false output = doc.convert assert_equal 0, doc.attributes['mycounter'] assert_equal %w(-2 -1 0 0), output.lines.map {|l| l.rstrip } end test 'increments counter with ASCII character value' do input = <<~'EOS' [subs=attributes] ++++ {counter:mycounter:A} {counter:mycounter} {counter:mycounter} {mycounter} ++++ EOS output = convert_string_to_embedded input assert_equal %w(A B C C), output.lines.map {|l| l.rstrip } end test 'increments counter with non-ASCII character value' do input = <<~'EOS' [subs=attributes] ++++ {counter:mycounter:é} {counter:mycounter} {counter:mycounter} {mycounter} ++++ EOS output = convert_string_to_embedded input assert_equal %w(é ê ë ë), output.lines.map {|l| l.rstrip } end test 'increments counter with emoji character value' do input = <<~'EOS' [subs=attributes] ++++ {counter:smiley:😋} {counter:smiley} {counter:smiley} {smiley} ++++ EOS output = convert_string_to_embedded input assert_equal %w(😋 😌 😍 😍), output.lines.map {|l| l.rstrip } end test 'increments counter with multi-character value' do input = <<~'EOS' [subs=attributes] ++++ {counter:math:1x} {counter:math} {counter:math} {math} ++++ EOS output = convert_string_to_embedded input assert_equal %w(1x 1y 1z 1z), output.lines.map {|l| l.rstrip } end test 'counter uses 0 as seed value if seed attribute is nil' do input = <<~'EOS' :mycounter: {counter:mycounter} {mycounter} EOS doc = document_from_string input output = doc.convert standalone: false assert_equal 1, doc.attributes['mycounter'] assert_xpath '//p[text()="1"]', output, 2 end test 'counter value can be reset by attribute entry' do input = <<~'EOS' :mycounter: before: {counter:mycounter} {counter:mycounter} {counter:mycounter} :mycounter!: after: {counter:mycounter} EOS doc = document_from_string input output = doc.convert standalone: false assert_equal 1, doc.attributes['mycounter'] assert_xpath '//p[text()="before: 1 2 3"]', output, 1 assert_xpath '//p[text()="after: 1"]', output, 1 end test 'counter value can be advanced by attribute entry' do input = <<~'EOS' before: {counter:mycounter} :mycounter: 10 after: {counter:mycounter} EOS doc = document_from_string input output = doc.convert standalone: false assert_equal 11, doc.attributes['mycounter'] assert_xpath '//p[text()="before: 1"]', output, 1 assert_xpath '//p[text()="after: 11"]', output, 1 end test 'nested document should use counter from parent document' do input = <<~'EOS' .Title for Foo image::foo.jpg[] [cols="2*a"] |=== | .Title for Bar image::bar.jpg[] | .Title for Baz image::baz.jpg[] |=== .Title for Qux image::qux.jpg[] EOS output = convert_string_to_embedded input assert_xpath '//div[@class="title"]', output, 4 assert_xpath '//div[@class="title"][text() = "Figure 1. Title for Foo"]', output, 1 assert_xpath '//div[@class="title"][text() = "Figure 2. Title for Bar"]', output, 1 assert_xpath '//div[@class="title"][text() = "Figure 3. Title for Baz"]', output, 1 assert_xpath '//div[@class="title"][text() = "Figure 4. Title for Qux"]', output, 1 end test 'should not allow counter to modify locked attribute' do input = <<~'EOS' {counter:foo:ignored} is not {foo} EOS output = convert_string_to_embedded input, attributes: { 'foo' => 'bar' } assert_xpath '//p[text()="bas is not bar"]', output, 1 end test 'should not allow counter2 to modify locked attribute' do input = <<~'EOS' {counter2:foo:ignored}{foo} EOS output = convert_string_to_embedded input, attributes: { 'foo' => 'bar' } assert_xpath '//p[text()="bar"]', output, 1 end test 'should not allow counter to modify built-in locked attribute' do input = <<~'EOS' {counter:max-include-depth:128} is one more than {max-include-depth} EOS doc = document_from_string input, standalone: false output = doc.convert assert_xpath '//p[text()="65 is one more than 64"]', output, 1 assert_equal 64, doc.attributes['max-include-depth'] end test 'should not allow counter2 to modify built-in locked attribute' do input = <<~'EOS' {counter2:max-include-depth:128}{max-include-depth} EOS doc = document_from_string input, standalone: false output = doc.convert assert_xpath '//p[text()="64"]', output, 1 assert_equal 64, doc.attributes['max-include-depth'] end end context 'Block attributes' do test 'parses attribute names as name token' do input = <<~'EOS' [normal,foo="bar",_foo="_bar",foo1="bar1",foo-foo="bar-bar",foo.foo="bar.bar"] content EOS block = block_from_string input assert_equal 'bar', block.attr('foo') assert_equal '_bar', block.attr('_foo') assert_equal 'bar1', block.attr('foo1') assert_equal 'bar-bar', block.attr('foo-foo') assert_equal 'bar.bar', block.attr('foo.foo') end test 'positional attributes assigned to block' do input = <<~'EOS' [quote, author, source] ____ A famous quote. ____ EOS doc = document_from_string(input) qb = doc.blocks.first assert_equal 'quote', qb.style assert_equal 'author', qb.attr('attribution') assert_equal 'author', qb.attr(:attribution) assert_equal 'author', qb.attributes['attribution'] assert_equal 'source', qb.attributes['citetitle'] end test 'normal substitutions are performed on single-quoted positional attribute' do input = <<~'EOS' [quote, author, 'http://wikipedia.org[source]'] ____ A famous quote. ____ EOS doc = document_from_string(input) qb = doc.blocks.first assert_equal 'quote', qb.style assert_equal 'author', qb.attr('attribution') assert_equal 'author', qb.attr(:attribution) assert_equal 'author', qb.attributes['attribution'] assert_equal 'source', qb.attributes['citetitle'] end test 'normal substitutions are performed on single-quoted named attribute' do input = <<~'EOS' [quote, author, citetitle='http://wikipedia.org[source]'] ____ A famous quote. ____ EOS doc = document_from_string(input) qb = doc.blocks.first assert_equal 'quote', qb.style assert_equal 'author', qb.attr('attribution') assert_equal 'author', qb.attr(:attribution) assert_equal 'author', qb.attributes['attribution'] assert_equal 'source', qb.attributes['citetitle'] end test 'normal substitutions are performed once on single-quoted named title attribute' do input = <<~'EOS' [title='*title*'] content EOS output = convert_string_to_embedded input assert_xpath '//*[@class="title"]/strong[text()="title"]', output, 1 end test 'attribute list may not begin with space' do input = <<~'EOS' [ quote] ____ A famous quote. ____ EOS doc = document_from_string input b1 = doc.blocks.first assert_equal ['[ quote]'], b1.lines end test 'attribute list may begin with comma' do input = <<~'EOS' [, author, source] ____ A famous quote. ____ EOS doc = document_from_string input qb = doc.blocks.first assert_equal 'quote', qb.style assert_equal 'author', qb.attributes['attribution'] assert_equal 'source', qb.attributes['citetitle'] end test 'first attribute in list may be double quoted' do input = <<~'EOS' ["quote", "author", "source", role="famous"] ____ A famous quote. ____ EOS doc = document_from_string input qb = doc.blocks.first assert_equal 'quote', qb.style assert_equal 'author', qb.attributes['attribution'] assert_equal 'source', qb.attributes['citetitle'] assert_equal 'famous', qb.attributes['role'] end test 'first attribute in list may be single quoted' do input = <<~'EOS' ['quote', 'author', 'source', role='famous'] ____ A famous quote. ____ EOS doc = document_from_string input qb = doc.blocks.first assert_equal 'quote', qb.style assert_equal 'author', qb.attributes['attribution'] assert_equal 'source', qb.attributes['citetitle'] assert_equal 'famous', qb.attributes['role'] end test 'attribute with value None without quotes is ignored' do input = <<~'EOS' [id=None] paragraph EOS doc = document_from_string input para = doc.blocks.first refute para.attributes.key?('id') end test 'role? returns true if role is assigned' do input = <<~'EOS' [role="lead"] A paragraph EOS doc = document_from_string input p = doc.blocks.first assert p.role? end test 'role? does not return true if role attribute is set on document' do input = <<~'EOS' :role: lead A paragraph EOS doc = document_from_string input p = doc.blocks.first refute p.role? end test 'role? can check for exact role name match' do input = <<~'EOS' [role="lead"] A paragraph EOS doc = document_from_string input p = doc.blocks.first assert p.role?('lead') p2 = doc.blocks.last refute p2.role?('final') end test 'has_role? can check for presence of role name' do input = <<~'EOS' [role="lead abstract"] A paragraph EOS doc = document_from_string input p = doc.blocks.first refute p.role?('lead') assert p.has_role?('lead') end test 'has_role? does not look for role defined as document attribute' do input = <<~'EOS' :role: lead abstract A paragraph EOS doc = document_from_string input p = doc.blocks.first refute p.has_role?('lead') end test 'roles returns array of role names' do input = <<~'EOS' [role="story lead"] A paragraph EOS doc = document_from_string input p = doc.blocks.first assert_equal ['story', 'lead'], p.roles end test 'roles returns empty array if role attribute is not set' do input = 'a paragraph' doc = document_from_string input p = doc.blocks.first assert_equal [], p.roles end test 'roles does not return value of roles document attribute' do input = <<~'EOS' :role: story lead A paragraph EOS doc = document_from_string input p = doc.blocks.first assert_equal [], p.roles end test 'roles= sets the role attribute on the node' do doc = document_from_string 'a paragraph' p = doc.blocks.first p.role = 'foobar' assert_equal 'foobar', (p.attr 'role') end test 'roles= coerces array value to a space-separated string' do doc = document_from_string 'a paragraph' p = doc.blocks.first p.role = %w(foo bar) assert_equal 'foo bar', (p.attr 'role') end test "Attribute substitutions are performed on attribute list before parsing attributes" do input = <<~'EOS' :lead: role="lead" [{lead}] A paragraph EOS doc = document_from_string(input) para = doc.blocks.first assert_equal 'lead', para.attributes['role'] end test 'id, role and options attributes can be specified on block style using shorthand syntax' do input = <<~'EOS' [literal#first.lead%step] A literal paragraph. EOS doc = document_from_string(input) para = doc.blocks.first assert_equal :literal, para.context assert_equal 'first', para.attributes['id'] assert_equal 'lead', para.attributes['role'] assert para.attributes.key?('step-option') refute para.attributes.key?('options') end test 'id, role and options attributes can be specified using shorthand syntax on block style using multiple block attribute lines' do input = <<~'EOS' [literal] [#first] [.lead] [%step] A literal paragraph. EOS doc = document_from_string(input) para = doc.blocks.first assert_equal :literal, para.context assert_equal 'first', para.attributes['id'] assert_equal 'lead', para.attributes['role'] assert para.attributes.key?('step-option') refute para.attributes.key?('options') end test 'multiple roles and options can be specified in block style using shorthand syntax' do input = <<~'EOS' [.role1%option1.role2%option2] Text EOS doc = document_from_string input para = doc.blocks.first assert_equal 'role1 role2', para.attributes['role'] assert para.attributes.key?('option1-option') assert para.attributes.key?('option2-option') refute para.attributes.key?('options') end test 'options specified using shorthand syntax on block style across multiple lines should be additive' do input = <<~'EOS' [%option1] [%option2] Text EOS doc = document_from_string input para = doc.blocks.first assert para.attributes.key?('option1-option') assert para.attributes.key?('option2-option') refute para.attributes.key?('options') end test 'roles specified using shorthand syntax on block style across multiple lines should be additive' do input = <<~'EOS' [.role1] [.role2.role3] Text EOS doc = document_from_string input para = doc.blocks.first assert_equal 'role1 role2 role3', para.attributes['role'] end test 'setting a role using the role attribute replaces any existing roles' do input = <<~'EOS' [.role1] [role=role2] [.role3] Text EOS doc = document_from_string input para = doc.blocks.first assert_equal 'role2 role3', para.attributes['role'] end test 'setting a role using the shorthand syntax on block style should not clear the ID' do input = <<~'EOS' [#id] [.role] Text EOS doc = document_from_string input para = doc.blocks.first assert_equal 'id', para.id assert_equal 'role', para.role end test 'a role can be added using add_role when the node has no roles' do input = 'A normal paragraph' doc = document_from_string(input) para = doc.blocks.first res = para.add_role 'role1' assert res assert_equal 'role1', para.attributes['role'] assert para.has_role? 'role1' end test 'a role can be added using add_role when the node already has a role' do input = <<~'EOS' [.role1] A normal paragraph EOS doc = document_from_string(input) para = doc.blocks.first res = para.add_role 'role2' assert res assert_equal 'role1 role2', para.attributes['role'] assert para.has_role? 'role1' assert para.has_role? 'role2' end test 'a role is not added using add_role if the node already has that role' do input = <<~'EOS' [.role1] A normal paragraph EOS doc = document_from_string(input) para = doc.blocks.first res = para.add_role 'role1' refute res assert_equal 'role1', para.attributes['role'] assert para.has_role? 'role1' end test 'an existing role can be removed using remove_role' do input = <<~'EOS' [.role1.role2] A normal paragraph EOS doc = document_from_string(input) para = doc.blocks.first res = para.remove_role 'role1' assert res assert_equal 'role2', para.attributes['role'] assert para.has_role? 'role2' refute para.has_role?('role1') end test 'roles are removed when last role is removed using remove_role' do input = <<~'EOS' [.role1] A normal paragraph EOS doc = document_from_string(input) para = doc.blocks.first res = para.remove_role 'role1' assert res refute para.role? assert_nil para.attributes['role'] refute para.has_role? 'role1' end test 'roles are not changed when a non-existent role is removed using remove_role' do input = <<~'EOS' [.role1] A normal paragraph EOS doc = document_from_string(input) para = doc.blocks.first res = para.remove_role 'role2' refute res assert_equal 'role1', para.attributes['role'] assert para.has_role? 'role1' refute para.has_role?('role2') end test 'roles are not changed when using remove_role if the node has no roles' do input = 'A normal paragraph' doc = document_from_string(input) para = doc.blocks.first res = para.remove_role 'role1' refute res assert_nil para.attributes['role'] refute para.has_role?('role1') end test 'option can be specified in first position of block style using shorthand syntax' do input = <<~'EOS' [%interactive] - [x] checked EOS doc = document_from_string input list = doc.blocks.first assert list.attributes.key? 'interactive-option' refute list.attributes.key? 'options' end test 'id and role attributes can be specified on section style using shorthand syntax' do input = <<~'EOS' [dedication#dedication.small] == Section Content. EOS output = convert_string_to_embedded input assert_xpath '/div[@class="sect1 small"]', output, 1 assert_xpath '/div[@class="sect1 small"]/h2[@id="dedication"]', output, 1 end test 'id attribute specified using shorthand syntax should not create a special section' do input = <<~'EOS' [#idname] == Section content EOS doc = document_from_string input, backend: 'docbook' section = doc.blocks[0] refute_nil section assert_equal :section, section.context refute section.special output = doc.convert assert_css 'article:root > section', output, 1 assert_css 'article:root > section[xml|id="idname"]', output, 1 end test "Block attributes are additive" do input = <<~'EOS' [id='foo'] [role='lead'] A paragraph. EOS doc = document_from_string(input) para = doc.blocks.first assert_equal 'foo', para.id assert_equal 'lead', para.attributes['role'] end test "Last wins for id attribute" do input = <<~'EOS' [[bar]] [[foo]] == Section paragraph [[baz]] [id='coolio'] === Section EOS doc = document_from_string(input) sec = doc.first_section assert_equal 'foo', sec.id subsec = sec.blocks.last assert_equal 'coolio', subsec.id end test "trailing block attributes transfer to the following section" do input = <<~'EOS' [[one]] == Section One paragraph [[sub]] // try to mess this up! === Sub-section paragraph [role='classy'] //// block comment //// == Section Two content EOS doc = document_from_string(input) section_one = doc.blocks.first assert_equal 'one', section_one.id subsection = section_one.blocks.last assert_equal 'sub', subsection.id section_two = doc.blocks.last assert_equal 'classy', section_two.attr(:role) end end end asciidoctor-2.0.20/test/blocks_test.rb000066400000000000000000004115341443135032600177270ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context 'Blocks' do default_logger = Asciidoctor::LoggerManager.logger setup do Asciidoctor::LoggerManager.logger = (@logger = Asciidoctor::MemoryLogger.new) end teardown do Asciidoctor::LoggerManager.logger = default_logger end context 'Layout Breaks' do test 'horizontal rule' do %w(''' '''' '''''').each do |line| output = convert_string_to_embedded line assert_includes output, '
    ' end end test 'horizontal rule with markdown syntax disabled' do old_markdown_syntax = Asciidoctor::Compliance.markdown_syntax begin Asciidoctor::Compliance.markdown_syntax = false %w(''' '''' '''''').each do |line| output = convert_string_to_embedded line assert_includes output, '
    ' end %w(--- *** ___).each do |line| output = convert_string_to_embedded line refute_includes output, '
    ' end ensure Asciidoctor::Compliance.markdown_syntax = old_markdown_syntax end end test '< 3 chars does not make horizontal rule' do %w(' '').each do |line| output = convert_string_to_embedded line refute_includes output, '
    ' assert_includes output, %(

    #{line}

    ) end end test 'mixed chars does not make horizontal rule' do [%q(''<), %q('''<), %q(' ' ')].each do |line| output = convert_string_to_embedded line refute_includes output, '
    ' assert_includes output, %(

    #{line.sub '<', '<'}

    ) end end test 'horizontal rule between blocks' do output = convert_string_to_embedded %(Block above\n\n'''\n\nBlock below) assert_xpath '/hr', output, 1 assert_xpath '/hr/preceding-sibling::*', output, 1 assert_xpath '/hr/following-sibling::*', output, 1 end test 'page break' do output = convert_string_to_embedded %(page 1\n\n<<<\n\npage 2) assert_xpath '/*[translate(@style, ";", "")="page-break-after: always"]', output, 1 assert_xpath '/*[translate(@style, ";", "")="page-break-after: always"]/preceding-sibling::div/p[text()="page 1"]', output, 1 assert_xpath '/*[translate(@style, ";", "")="page-break-after: always"]/following-sibling::div/p[text()="page 2"]', output, 1 end end context 'Comments' do test 'line comment between paragraphs offset by blank lines' do input = <<~'EOS' first paragraph // line comment second paragraph EOS output = convert_string_to_embedded input refute_match(/line comment/, output) assert_xpath '//p', output, 2 end test 'adjacent line comment between paragraphs' do input = <<~'EOS' first line // line comment second line EOS output = convert_string_to_embedded input refute_match(/line comment/, output) assert_xpath '//p', output, 1 assert_xpath "//p[1][text()='first line\nsecond line']", output, 1 end test 'comment block between paragraphs offset by blank lines' do input = <<~'EOS' first paragraph //// block comment //// second paragraph EOS output = convert_string_to_embedded input refute_match(/block comment/, output) assert_xpath '//p', output, 2 end test 'comment block between paragraphs offset by blank lines inside delimited block' do input = <<~'EOS' ==== first paragraph //// block comment //// second paragraph ==== EOS output = convert_string_to_embedded input refute_match(/block comment/, output) assert_xpath '//p', output, 2 end test 'adjacent comment block between paragraphs' do input = <<~'EOS' first paragraph //// block comment //// second paragraph EOS output = convert_string_to_embedded input refute_match(/block comment/, output) assert_xpath '//p', output, 2 end test "can convert with block comment at end of document with trailing newlines" do input = <<~'EOS' paragraph //// block comment //// EOS output = convert_string_to_embedded input refute_match(/block comment/, output) end test "trailing newlines after block comment at end of document does not create paragraph" do input = <<~'EOS' paragraph //// block comment //// EOS d = document_from_string input assert_equal 1, d.blocks.size assert_xpath '//p', d.convert, 1 end test 'line starting with three slashes should not be line comment' do input = '/// not a line comment' output = convert_string_to_embedded input refute_empty output.strip, "Line should be emitted => #{input.rstrip}" end test 'preprocessor directives should not be processed within comment block within block metadata' do input = <<~'EOS' .sample title //// ifdef::asciidoctor[////] //// line should be shown EOS output = convert_string_to_embedded input assert_xpath '//p[text()="line should be shown"]', output, 1 end test 'preprocessor directives should not be processed within comment block' do input = <<~'EOS' dummy line //// ifdef::asciidoctor[////] //// line should be shown EOS output = convert_string_to_embedded input assert_xpath '//p[text()="line should be shown"]', output, 1 end test 'should warn if unterminated comment block is detected in body' do input = <<~'EOS' before comment block //// content that has been disabled supposed to be after comment block, except it got swallowed by block comment EOS convert_string_to_embedded input assert_message @logger, :WARN, ': line 3: unterminated comment block', Hash end test 'should warn if unterminated comment block is detected inside another block' do input = <<~'EOS' before sidebar block **** //// content that has been disabled **** supposed to be after sidebar block, except it got swallowed by block comment EOS convert_string_to_embedded input assert_message @logger, :WARN, ': line 4: unterminated comment block', Hash end # WARNING if first line of content is a directive, it will get interpreted before we know it's a comment block # it happens because we always look a line ahead...not sure what we can do about it test 'preprocessor directives should not be processed within comment open block' do input = <<~'EOS' [comment] -- first line of comment ifdef::asciidoctor[--] line should not be shown -- EOS output = convert_string_to_embedded input assert_xpath '//p', output, 0 end # WARNING this assertion fails if the directive is the first line of the paragraph instead of the second # it happens because we always look a line ahead; not sure what we can do about it test 'preprocessor directives should not be processed on subsequent lines of a comment paragraph' do input = <<~'EOS' [comment] first line of content ifdef::asciidoctor[////] this line should be shown EOS output = convert_string_to_embedded input assert_xpath '//p[text()="this line should be shown"]', output, 1 end test 'comment style on open block should only skip block' do input = <<~'EOS' [comment] -- skip this block -- not this text EOS result = convert_string_to_embedded input assert_xpath '//p', result, 1 assert_xpath '//p[text()="not this text"]', result, 1 end test 'comment style on paragraph should only skip paragraph' do input = <<~'EOS' [comment] skip this paragraph not this text EOS result = convert_string_to_embedded input assert_xpath '//p', result, 1 assert_xpath '//p[text()="not this text"]', result, 1 end test 'comment style on paragraph should not cause adjacent block to be skipped' do input = <<~'EOS' [comment] skip this paragraph [example] not this text EOS result = convert_string_to_embedded input assert_xpath '/*[@class="exampleblock"]', result, 1 assert_xpath '/*[@class="exampleblock"]//*[normalize-space(text())="not this text"]', result, 1 end # NOTE this test verifies the nil return value of Parser#next_block test 'should not drop content that follows skipped content inside a delimited block' do input = <<~'EOS' ==== paragraph [comment#idname] skip paragraph ==== EOS result = convert_string_to_embedded input assert_xpath '/*[@class="exampleblock"]', result, 1 assert_xpath '/*[@class="exampleblock"]//*[@class="paragraph"]', result, 2 assert_xpath '//*[@class="paragraph"][@id="idname"]', result, 0 end end context 'Sidebar Blocks' do test 'should parse sidebar block' do input = <<~'EOS' == Section .Sidebar **** Content goes here **** EOS result = convert_string input assert_xpath "//*[@class='sidebarblock']//p", result, 1 end end context 'Quote and Verse Blocks' do test 'quote block with no attribution' do input = <<~'EOS' ____ A famous quote. ____ EOS output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_css '.quoteblock > .attribution', output, 0 assert_xpath '//*[@class="quoteblock"]//p[text()="A famous quote."]', output, 1 end test 'quote block with attribution' do input = <<~'EOS' [quote, Famous Person, Famous Book (1999)] ____ A famous quote. ____ EOS output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_css '.quoteblock > .attribution', output, 1 assert_css '.quoteblock > .attribution > cite', output, 1 assert_css '.quoteblock > .attribution > br + cite', output, 1 assert_xpath '//*[@class="quoteblock"]/*[@class="attribution"]/cite[text()="Famous Book (1999)"]', output, 1 attribution = xmlnodes_at_xpath '//*[@class="quoteblock"]/*[@class="attribution"]', output, 1 author = attribution.children.first assert_equal "#{decode_char 8212} Famous Person", author.text.strip end test 'quote block with attribute and id and role shorthand' do input = <<~'EOS' [quote#justice-to-all.solidarity, Martin Luther King, Jr.] ____ Injustice anywhere is a threat to justice everywhere. ____ EOS output = convert_string_to_embedded input assert_css '.quoteblock', output, 1 assert_css '#justice-to-all.quoteblock.solidarity', output, 1 assert_css '.quoteblock > .attribution', output, 1 end test 'setting ID using style shorthand should not reset block style' do input = <<~'EOS' [quote] [#justice-to-all.solidarity, Martin Luther King, Jr.] ____ Injustice anywhere is a threat to justice everywhere. ____ EOS output = convert_string_to_embedded input assert_css '.quoteblock', output, 1 assert_css '#justice-to-all.quoteblock.solidarity', output, 1 assert_css '.quoteblock > .attribution', output, 1 end test 'quote block with complex content' do input = <<~'EOS' ____ A famous quote. NOTE: _That_ was inspiring. ____ EOS output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph', output, 1 assert_css '.quoteblock > blockquote > .paragraph + .admonitionblock', output, 1 end test 'quote block with attribution converted to DocBook' do input = <<~'EOS' [quote, Famous Person, Famous Book (1999)] ____ A famous quote. ____ EOS output = convert_string input, backend: :docbook assert_css 'blockquote', output, 1 assert_css 'blockquote > simpara', output, 1 assert_css 'blockquote > attribution', output, 1 assert_css 'blockquote > attribution > citetitle', output, 1 assert_xpath '//blockquote/attribution/citetitle[text()="Famous Book (1999)"]', output, 1 attribution = xmlnodes_at_xpath '//blockquote/attribution', output, 1 author = attribution.children.first assert_equal 'Famous Person', author.text.strip end test 'epigraph quote block with attribution converted to DocBook' do input = <<~'EOS' [.epigraph, Famous Person, Famous Book (1999)] ____ A famous quote. ____ EOS output = convert_string input, backend: :docbook assert_css 'epigraph', output, 1 assert_css 'epigraph > simpara', output, 1 assert_css 'epigraph > attribution', output, 1 assert_css 'epigraph > attribution > citetitle', output, 1 assert_xpath '//epigraph/attribution/citetitle[text()="Famous Book (1999)"]', output, 1 attribution = xmlnodes_at_xpath '//epigraph/attribution', output, 1 author = attribution.children.first assert_equal 'Famous Person', author.text.strip end test 'markdown-style quote block with single paragraph and no attribution' do input = <<~'EOS' > A famous quote. > Some more inspiring words. EOS output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_css '.quoteblock > .attribution', output, 0 assert_xpath %(//*[@class="quoteblock"]//p[text()="A famous quote.\nSome more inspiring words."]), output, 1 end test 'lazy markdown-style quote block with single paragraph and no attribution' do input = <<~'EOS' > A famous quote. Some more inspiring words. EOS output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_css '.quoteblock > .attribution', output, 0 assert_xpath %(//*[@class="quoteblock"]//p[text()="A famous quote.\nSome more inspiring words."]), output, 1 end test 'markdown-style quote block with multiple paragraphs and no attribution' do input = <<~'EOS' > A famous quote. > > Some more inspiring words. EOS output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 2 assert_css '.quoteblock > .attribution', output, 0 assert_xpath %((//*[@class="quoteblock"]//p)[1][text()="A famous quote."]), output, 1 assert_xpath %((//*[@class="quoteblock"]//p)[2][text()="Some more inspiring words."]), output, 1 end test 'markdown-style quote block with multiple blocks and no attribution' do input = <<~'EOS' > A famous quote. > > NOTE: Some more inspiring words. EOS output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_css '.quoteblock > blockquote > .admonitionblock', output, 1 assert_css '.quoteblock > .attribution', output, 0 assert_xpath %((//*[@class="quoteblock"]//p)[1][text()="A famous quote."]), output, 1 assert_xpath %((//*[@class="quoteblock"]//*[@class="admonitionblock note"]//*[@class="content"])[1][normalize-space(text())="Some more inspiring words."]), output, 1 end test 'markdown-style quote block with single paragraph and attribution' do input = <<~'EOS' > A famous quote. > Some more inspiring words. > -- Famous Person, Famous Source, Volume 1 (1999) EOS output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_xpath %(//*[@class="quoteblock"]//p[text()="A famous quote.\nSome more inspiring words."]), output, 1 assert_css '.quoteblock > .attribution', output, 1 assert_css '.quoteblock > .attribution > cite', output, 1 assert_css '.quoteblock > .attribution > br + cite', output, 1 assert_xpath '//*[@class="quoteblock"]/*[@class="attribution"]/cite[text()="Famous Source, Volume 1 (1999)"]', output, 1 attribution = xmlnodes_at_xpath '//*[@class="quoteblock"]/*[@class="attribution"]', output, 1 author = attribution.children.first assert_equal "#{decode_char 8212} Famous Person", author.text.strip end test 'markdown-style quote block with only attribution' do input = '> -- Anonymous' output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > *', output, 0 assert_css '.quoteblock > .attribution', output, 1 assert_xpath %(//*[@class="quoteblock"]//*[@class="attribution"][contains(text(),"Anonymous")]), output, 1 end test 'should parse credit line in markdown-style quote block like positional block attributes' do input = <<~'EOS' > I hold it that a little rebellion now and then is a good thing, > and as necessary in the political world as storms in the physical. -- Thomas Jefferson, https://jeffersonpapers.princeton.edu/selected-documents/james-madison-1[The Papers of Thomas Jefferson, Volume 11] EOS output = convert_string_to_embedded input assert_css '.quoteblock', output, 1 assert_css '.quoteblock cite a[href="https://jeffersonpapers.princeton.edu/selected-documents/james-madison-1"]', output, 1 end test 'quoted paragraph-style quote block with attribution' do input = <<~'EOS' "A famous quote. Some more inspiring words." -- Famous Person, Famous Source, Volume 1 (1999) EOS output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_xpath %(//*[@class="quoteblock"]/blockquote[normalize-space(text())="A famous quote. Some more inspiring words."]), output, 1 assert_css '.quoteblock > .attribution', output, 1 assert_css '.quoteblock > .attribution > cite', output, 1 assert_css '.quoteblock > .attribution > br + cite', output, 1 assert_xpath '//*[@class="quoteblock"]/*[@class="attribution"]/cite[text()="Famous Source, Volume 1 (1999)"]', output, 1 attribution = xmlnodes_at_xpath '//*[@class="quoteblock"]/*[@class="attribution"]', output, 1 author = attribution.children.first assert_equal "#{decode_char 8212} Famous Person", author.text.strip end test 'should parse credit line in quoted paragraph-style quote block like positional block attributes' do input = <<~'EOS' "I hold it that a little rebellion now and then is a good thing, and as necessary in the political world as storms in the physical." -- Thomas Jefferson, https://jeffersonpapers.princeton.edu/selected-documents/james-madison-1[The Papers of Thomas Jefferson, Volume 11] EOS output = convert_string_to_embedded input assert_css '.quoteblock', output, 1 assert_css '.quoteblock cite a[href="https://jeffersonpapers.princeton.edu/selected-documents/james-madison-1"]', output, 1 end test 'single-line verse block without attribution' do input = <<~'EOS' [verse] ____ A famous verse. ____ EOS output = convert_string input assert_css '.verseblock', output, 1 assert_css '.verseblock > pre', output, 1 assert_css '.verseblock > .attribution', output, 0 assert_css '.verseblock p', output, 0 assert_xpath '//*[@class="verseblock"]/pre[normalize-space(text())="A famous verse."]', output, 1 end test 'single-line verse block with attribution' do input = <<~'EOS' [verse, Famous Poet, Famous Poem] ____ A famous verse. ____ EOS output = convert_string input assert_css '.verseblock', output, 1 assert_css '.verseblock p', output, 0 assert_css '.verseblock > pre', output, 1 assert_css '.verseblock > .attribution', output, 1 assert_css '.verseblock > .attribution > cite', output, 1 assert_css '.verseblock > .attribution > br + cite', output, 1 assert_xpath '//*[@class="verseblock"]/*[@class="attribution"]/cite[text()="Famous Poem"]', output, 1 attribution = xmlnodes_at_xpath '//*[@class="verseblock"]/*[@class="attribution"]', output, 1 author = attribution.children.first assert_equal "#{decode_char 8212} Famous Poet", author.text.strip end test 'single-line verse block with attribution converted to DocBook' do input = <<~'EOS' [verse, Famous Poet, Famous Poem] ____ A famous verse. ____ EOS output = convert_string input, backend: :docbook assert_css 'blockquote', output, 1 assert_css 'blockquote simpara', output, 0 assert_css 'blockquote > literallayout', output, 1 assert_css 'blockquote > attribution', output, 1 assert_css 'blockquote > attribution > citetitle', output, 1 assert_xpath '//blockquote/attribution/citetitle[text()="Famous Poem"]', output, 1 attribution = xmlnodes_at_xpath '//blockquote/attribution', output, 1 author = attribution.children.first assert_equal 'Famous Poet', author.text.strip end test 'single-line epigraph verse block with attribution converted to DocBook' do input = <<~'EOS' [verse.epigraph, Famous Poet, Famous Poem] ____ A famous verse. ____ EOS output = convert_string input, backend: :docbook assert_css 'epigraph', output, 1 assert_css 'epigraph simpara', output, 0 assert_css 'epigraph > literallayout', output, 1 assert_css 'epigraph > attribution', output, 1 assert_css 'epigraph > attribution > citetitle', output, 1 assert_xpath '//epigraph/attribution/citetitle[text()="Famous Poem"]', output, 1 attribution = xmlnodes_at_xpath '//epigraph/attribution', output, 1 author = attribution.children.first assert_equal 'Famous Poet', author.text.strip end test 'multi-stanza verse block' do input = <<~'EOS' [verse] ____ A famous verse. Stanza two. ____ EOS output = convert_string input assert_xpath '//*[@class="verseblock"]', output, 1 assert_xpath '//*[@class="verseblock"]/pre', output, 1 assert_xpath '//*[@class="verseblock"]//p', output, 0 assert_xpath '//*[@class="verseblock"]/pre[contains(text(), "A famous verse.")]', output, 1 assert_xpath '//*[@class="verseblock"]/pre[contains(text(), "Stanza two.")]', output, 1 end test 'verse block does not contain block elements' do input = <<~'EOS' [verse] ____ A famous verse. .... not a literal .... ____ EOS output = convert_string input assert_css '.verseblock', output, 1 assert_css '.verseblock > pre', output, 1 assert_css '.verseblock p', output, 0 assert_css '.verseblock .literalblock', output, 0 end test 'verse should have normal subs' do input = <<~'EOS' [verse] ____ A famous verse ____ EOS verse = block_from_string input assert_equal Asciidoctor::Substitutors::NORMAL_SUBS, verse.subs end test 'should not recognize callouts in a verse' do input = <<~'EOS' [verse] ____ La la la <1> ____ <1> Not pointing to a callout EOS output = convert_string_to_embedded input assert_xpath '//pre[text()="La la la <1>"]', output, 1 assert_message @logger, :WARN, ': line 5: no callout found for <1>', Hash end test 'should perform normal subs on a verse block' do input = <<~'EOS' [verse] ____ _GET /groups/link:#group-id[\{group-id\}]_ ____ EOS output = convert_string_to_embedded input assert_includes output, '
    GET /groups/{group-id}
    ' end end context "Example Blocks" do test "can convert example block" do input = <<~'EOS' ==== This is an example of an example block. How crazy is that? ==== EOS output = convert_string input assert_xpath '//*[@class="exampleblock"]//p', output, 2 end test 'assigns sequential numbered caption to example block with title' do input = <<~'EOS' .Writing Docs with AsciiDoc ==== Here's how you write AsciiDoc. You just write. ==== .Writing Docs with DocBook ==== Here's how you write DocBook. You futz with XML. ==== EOS doc = document_from_string input assert_equal 1, doc.blocks[0].numeral assert_equal 1, doc.blocks[0].number assert_equal 2, doc.blocks[1].numeral assert_equal 2, doc.blocks[1].number output = doc.convert assert_xpath '(//*[@class="exampleblock"])[1]/*[@class="title"][text()="Example 1. Writing Docs with AsciiDoc"]', output, 1 assert_xpath '(//*[@class="exampleblock"])[2]/*[@class="title"][text()="Example 2. Writing Docs with DocBook"]', output, 1 assert_equal 2, doc.attributes['example-number'] end test 'assigns sequential character caption to example block with title' do input = <<~'EOS' :example-number: @ .Writing Docs with AsciiDoc ==== Here's how you write AsciiDoc. You just write. ==== .Writing Docs with DocBook ==== Here's how you write DocBook. You futz with XML. ==== EOS doc = document_from_string input assert_equal 'A', doc.blocks[0].numeral assert_equal 'A', doc.blocks[0].number assert_equal 'B', doc.blocks[1].numeral assert_equal 'B', doc.blocks[1].number output = doc.convert assert_xpath '(//*[@class="exampleblock"])[1]/*[@class="title"][text()="Example A. Writing Docs with AsciiDoc"]', output, 1 assert_xpath '(//*[@class="exampleblock"])[2]/*[@class="title"][text()="Example B. Writing Docs with DocBook"]', output, 1 assert_equal 'B', doc.attributes['example-number'] end test 'should increment counter for example even when example-number is locked by the API' do input = <<~'EOS' .Writing Docs with AsciiDoc ==== Here's how you write AsciiDoc. You just write. ==== .Writing Docs with DocBook ==== Here's how you write DocBook. You futz with XML. ==== EOS doc = document_from_string input, attributes: { 'example-number' => '`' } output = doc.convert assert_xpath '(//*[@class="exampleblock"])[1]/*[@class="title"][text()="Example a. Writing Docs with AsciiDoc"]', output, 1 assert_xpath '(//*[@class="exampleblock"])[2]/*[@class="title"][text()="Example b. Writing Docs with DocBook"]', output, 1 assert_equal 'b', doc.attributes['example-number'] end test 'should use explicit caption if specified' do input = <<~'EOS' [caption="Look! "] .Writing Docs with AsciiDoc ==== Here's how you write AsciiDoc. You just write. ==== EOS doc = document_from_string input assert_nil doc.blocks[0].numeral output = doc.convert assert_xpath '(//*[@class="exampleblock"])[1]/*[@class="title"][text()="Look! Writing Docs with AsciiDoc"]', output, 1 refute doc.attributes.key? 'example-number' end test 'automatic caption can be turned off and on and modified' do input = <<~'EOS' .first example ==== an example ==== :caption: .second example ==== another example ==== :caption!: :example-caption: Exhibit .third example ==== yet another example ==== EOS output = convert_string_to_embedded input assert_xpath '/*[@class="exampleblock"]', output, 3 assert_xpath '(/*[@class="exampleblock"])[1]/*[@class="title"][starts-with(text(), "Example ")]', output, 1 assert_xpath '(/*[@class="exampleblock"])[2]/*[@class="title"][text()="second example"]', output, 1 assert_xpath '(/*[@class="exampleblock"])[3]/*[@class="title"][starts-with(text(), "Exhibit ")]', output, 1 end test 'should use explicit caption if specified even if block-specific global caption is disabled' do input = <<~'EOS' :!example-caption: [caption="Look! "] .Writing Docs with AsciiDoc ==== Here's how you write AsciiDoc. You just write. ==== EOS doc = document_from_string input assert_nil doc.blocks[0].numeral output = doc.convert assert_xpath '(//*[@class="exampleblock"])[1]/*[@class="title"][text()="Look! Writing Docs with AsciiDoc"]', output, 1 refute doc.attributes.key? 'example-number' end test 'should use global caption if specified even if block-specific global caption is disabled' do input = <<~'EOS' :!example-caption: :caption: Look!{sp} .Writing Docs with AsciiDoc ==== Here's how you write AsciiDoc. You just write. ==== EOS doc = document_from_string input assert_nil doc.blocks[0].numeral output = doc.convert assert_xpath '(//*[@class="exampleblock"])[1]/*[@class="title"][text()="Look! Writing Docs with AsciiDoc"]', output, 1 refute doc.attributes.key? 'example-number' end test 'should not process caption attribute on block that does not support a caption' do input = <<~'EOS' [caption="Look! "] .No caption here -- content -- EOS doc = document_from_string input assert_nil doc.blocks[0].caption assert_equal 'Look! ', (doc.blocks[0].attr 'caption') output = doc.convert assert_xpath '(//*[@class="openblock"])[1]/*[@class="title"][text()="No caption here"]', output, 1 end test 'should create details/summary set if collapsible option is set' do input = <<~'EOS' .Toggle Me [%collapsible] ==== This content is revealed when the user clicks the words "Toggle Me". ==== EOS output = convert_string_to_embedded input assert_css 'details', output, 1 assert_css 'details[open]', output, 0 assert_css 'details > summary.title', output, 1 assert_xpath '//details/summary[text()="Toggle Me"]', output, 1 assert_css 'details > summary.title + .content', output, 1 assert_css 'details > summary.title + .content p', output, 1 end test 'should open details/summary set if collapsible and open options are set' do input = <<~'EOS' .Toggle Me [%collapsible%open] ==== This content is revealed when the user clicks the words "Toggle Me". ==== EOS output = convert_string_to_embedded input assert_css 'details', output, 1 assert_css 'details[open]', output, 1 assert_css 'details > summary.title', output, 1 assert_xpath '//details/summary[text()="Toggle Me"]', output, 1 end test 'should add default summary element if collapsible option is set and title is not specifed' do input = <<~'EOS' [%collapsible] ==== This content is revealed when the user clicks the words "Details". ==== EOS output = convert_string_to_embedded input assert_css 'details', output, 1 assert_css 'details > summary.title', output, 1 assert_xpath '//details/summary[text()="Details"]', output, 1 end test 'should not allow collapsible block to increment example number' do input = <<~'EOS' .Before ==== before ==== .Show Me The Goods [%collapsible] ==== This content is revealed when the user clicks the words "Show Me The Goods". ==== .After ==== after ==== EOS output = convert_string_to_embedded input assert_xpath '//*[@class="title"][text()="Example 1. Before"]', output, 1 assert_xpath '//*[@class="title"][text()="Example 2. After"]', output, 1 assert_css 'details', output, 1 assert_css 'details > summary.title', output, 1 assert_xpath '//details/summary[text()="Show Me The Goods"]', output, 1 end test 'should warn if example block is not terminated' do input = <<~'EOS' outside ==== inside still inside eof EOS output = convert_string_to_embedded input assert_xpath '/*[@class="exampleblock"]', output, 1 assert_message @logger, :WARN, ': line 3: unterminated example block', Hash end end context 'Admonition Blocks' do test 'caption block-level attribute should be used as caption' do input = <<~'EOS' :tip-caption: Pro Tip [caption="Pro Tip"] TIP: Override the caption of an admonition block using an attribute entry EOS output = convert_string_to_embedded input assert_xpath '/*[@class="admonitionblock tip"]//*[@class="icon"]/*[@class="title"][text()="Pro Tip"]', output, 1 end test 'can override caption of admonition block using document attribute' do input = <<~'EOS' :tip-caption: Pro Tip TIP: Override the caption of an admonition block using an attribute entry EOS output = convert_string_to_embedded input assert_xpath '/*[@class="admonitionblock tip"]//*[@class="icon"]/*[@class="title"][text()="Pro Tip"]', output, 1 end test 'blank caption document attribute should not blank admonition block caption' do input = <<~'EOS' :caption: TIP: Override the caption of an admonition block using an attribute entry EOS output = convert_string_to_embedded input assert_xpath '/*[@class="admonitionblock tip"]//*[@class="icon"]/*[@class="title"][text()="Tip"]', output, 1 end end context "Preformatted Blocks" do test 'should separate adjacent paragraphs and listing into blocks' do input = <<~'EOS' paragraph 1 ---- listing content ---- paragraph 2 EOS output = convert_string_to_embedded input assert_xpath '/*[@class="paragraph"]/p', output, 2 assert_xpath '/*[@class="listingblock"]', output, 1 assert_xpath '(/*[@class="paragraph"]/following-sibling::*)[1][@class="listingblock"]', output, 1 end test 'should warn if listing block is not terminated' do input = <<~'EOS' outside ---- inside still inside eof EOS output = convert_string_to_embedded input assert_xpath '/*[@class="listingblock"]', output, 1 assert_message @logger, :WARN, ': line 3: unterminated listing block', Hash end test 'should not crash when converting verbatim block that has no lines' do [%(----\n----), %(....\n....)].each do |input| output = convert_string_to_embedded input assert_css 'pre', output, 1 assert_css 'pre:empty', output, 1 end end test 'should return content as empty string for verbatim or raw block that has no lines' do [%(----\n----), %(....\n....)].each do |input| doc = document_from_string input assert_equal '', doc.blocks[0].content end end test 'should preserve newlines in literal block' do input = <<~'EOS' .... line one line two line three .... EOS [true, false].each do |standalone| output = convert_string input, standalone: standalone assert_xpath '//pre', output, 1 assert_xpath '//pre/text()', output, 1 text = xmlnodes_at_xpath('//pre/text()', output, 1).text lines = text.lines assert_equal 5, lines.size expected = "line one\n\nline two\n\nline three".lines assert_equal expected, lines blank_lines = output.scan(/\n[ \t]*\n/).size assert blank_lines >= 2 end end test 'should preserve newlines in listing block' do input = <<~'EOS' ---- line one line two line three ---- EOS [true, false].each do |standalone| output = convert_string input, standalone: standalone assert_xpath '//pre', output, 1 assert_xpath '//pre/text()', output, 1 text = xmlnodes_at_xpath('//pre/text()', output, 1).text lines = text.lines assert_equal 5, lines.size expected = "line one\n\nline two\n\nline three".lines assert_equal expected, lines blank_lines = output.scan(/\n[ \t]*\n/).size assert blank_lines >= 2 end end test 'should preserve newlines in verse block' do input = <<~'EOS' -- [verse] ____ line one line two line three ____ -- EOS [true, false].each do |standalone| output = convert_string input, standalone: standalone assert_xpath '//*[@class="verseblock"]/pre', output, 1 assert_xpath '//*[@class="verseblock"]/pre/text()', output, 1 text = xmlnodes_at_xpath('//*[@class="verseblock"]/pre/text()', output, 1).text lines = text.lines assert_equal 5, lines.size expected = "line one\n\nline two\n\nline three".lines assert_equal expected, lines blank_lines = output.scan(/\n[ \t]*\n/).size assert blank_lines >= 2 end end test 'should strip leading and trailing blank lines when converting verbatim block' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS [subs="attributes"] .... first line last line {empty} .... EOS doc = document_from_string input, standalone: false block = doc.blocks.first assert_equal ['', '', ' first line', '', 'last line', '', '{empty}', ''], block.lines result = doc.convert assert_xpath %(//pre[text()=" first line\n\nlast line"]), result, 1 end test 'should process block with CRLF line endings' do input = <<~EOS ----\r source line 1\r source line 2\r ----\r EOS output = convert_string_to_embedded input assert_xpath '/*[@class="listingblock"]//pre', output, 1 assert_xpath %(/*[@class="listingblock"]//pre[text()="source line 1\nsource line 2"]), output, 1 end test 'should remove block indent if indent attribute is 0' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS [indent="0"] ---- def names @names.split end ---- EOS # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop def names @names.split end EOS output = convert_string_to_embedded input assert_css 'pre', output, 1 assert_css '.listingblock pre', output, 1 result = xmlnodes_at_xpath('//pre', output, 1).text assert_equal expected, result end test 'should not remove block indent if indent attribute is -1' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS [indent="-1"] ---- def names @names.split end ---- EOS expected = (input.lines.slice 2, 5).join.chop output = convert_string_to_embedded input assert_css 'pre', output, 1 assert_css '.listingblock pre', output, 1 result = xmlnodes_at_xpath('//pre', output, 1).text assert_equal expected, result end test 'should set block indent to value specified by indent attribute' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS [indent="1"] ---- def names @names.split end ---- EOS expected = (input.lines.slice 2, 5).map {|l| l.sub ' ', ' ' }.join.chop output = convert_string_to_embedded input assert_css 'pre', output, 1 assert_css '.listingblock pre', output, 1 result = xmlnodes_at_xpath('//pre', output, 1).text assert_equal expected, result end test 'should set block indent to value specified by indent document attribute' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS :source-indent: 1 [source,ruby] ---- def names @names.split end ---- EOS expected = (input.lines.slice 4, 5).map {|l| l.sub ' ', ' ' }.join.chop output = convert_string_to_embedded input assert_css 'pre', output, 1 assert_css '.listingblock pre', output, 1 result = xmlnodes_at_xpath('//pre', output, 1).text assert_equal expected, result end test 'should expand tabs if tabsize attribute is positive' do input = <<~EOS :tabsize: 4 [indent=0] ---- \tdef names \t\t@names.split \tend ---- EOS # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop def names @names.split end EOS output = convert_string_to_embedded input assert_css 'pre', output, 1 assert_css '.listingblock pre', output, 1 result = xmlnodes_at_xpath('//pre', output, 1).text assert_equal expected, result end test 'literal block should honor nowrap option' do input = <<~'EOS' [options="nowrap"] ---- Do not wrap me if I get too long. ---- EOS output = convert_string_to_embedded input assert_css 'pre.nowrap', output, 1 end test 'literal block should set nowrap class if prewrap document attribute is disabled' do input = <<~'EOS' :prewrap!: ---- Do not wrap me if I get too long. ---- EOS output = convert_string_to_embedded input assert_css 'pre.nowrap', output, 1 end test 'should preserve guard in front of callout if icons are not enabled' do input = <<~'EOS' ---- puts 'Hello, World!' # <1> puts 'Goodbye, World ;(' # <2> ---- EOS result = convert_string_to_embedded input assert_include ' # (1)', result assert_include ' # (2)', result end test 'should preserve guard around callout if icons are not enabled' do input = <<~'EOS' ---- ---- EOS result = convert_string_to_embedded input assert_include ' <!--(1)-->', result assert_include ' <!--(2)-->', result end test 'literal block should honor explicit subs list' do input = <<~'EOS' [subs="verbatim,quotes"] ---- Map *attributes*; //<1> ---- EOS block = block_from_string input assert_equal [:specialcharacters, :callouts, :quotes], block.subs output = block.convert assert_includes output, 'Map<String, String> attributes;' assert_xpath '//pre/b[text()="(1)"]', output, 1 end test 'should be able to disable callouts for literal block' do input = <<~'EOS' [subs="specialcharacters"] ---- No callout here <1> ---- EOS block = block_from_string input assert_equal [:specialcharacters], block.subs output = block.convert assert_xpath '//pre/b[text()="(1)"]', output, 0 end test 'listing block should honor explicit subs list' do input = <<~'EOS' [subs="specialcharacters,quotes"] ---- $ *python functional_tests.py* Traceback (most recent call last): File "functional_tests.py", line 4, in assert 'Django' in browser.title AssertionError ---- EOS output = convert_string_to_embedded input assert_css '.listingblock pre', output, 1 assert_css '.listingblock pre strong', output, 1 assert_css '.listingblock pre em', output, 0 input2 = <<~'EOS' [subs="specialcharacters,macros"] ---- $ pass:quotes[*python functional_tests.py*] Traceback (most recent call last): File "functional_tests.py", line 4, in assert pass:quotes['Django'] in browser.title AssertionError ---- EOS output2 = convert_string_to_embedded input2 # FIXME JRuby is adding extra trailing newlines in the second document, # for now, rstrip is necessary assert_equal output.rstrip, output2.rstrip end test 'first character of block title may be a period if not followed by space' do input = <<~'EOS' ..gitignore ---- /.bundle/ /build/ /Gemfile.lock ---- EOS output = convert_string_to_embedded input assert_xpath '//*[@class="title"][text()=".gitignore"]', output end test 'listing block without title should generate screen element in docbook' do input = <<~'EOS' ---- listing block ---- EOS output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '/screen[text()="listing block"]', output, 1 end test 'listing block with title should generate screen element inside formalpara element in docbook' do input = <<~'EOS' .title ---- listing block ---- EOS output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '/formalpara', output, 1 assert_xpath '/formalpara/title[text()="title"]', output, 1 assert_xpath '/formalpara/para/screen[text()="listing block"]', output, 1 end test 'should not prepend caption to title of listing block with title if listing-caption attribute is not set' do input = <<~'EOS' .title ---- listing block content ---- EOS output = convert_string_to_embedded input assert_xpath '/*[@class="listingblock"][1]/*[@class="title"][text()="title"]', output, 1 end test 'should prepend caption specified by listing-caption attribute and number to title of listing block with title' do input = <<~'EOS' :listing-caption: Listing .title ---- listing block content ---- EOS output = convert_string_to_embedded input assert_xpath '/*[@class="listingblock"][1]/*[@class="title"][text()="Listing 1. title"]', output, 1 end test 'should prepend caption specified by caption attribute on listing block even if listing-caption attribute is not set' do input = <<~'EOS' [caption="Listing {counter:listing-number}. "] .Behold! ---- listing block content ---- EOS output = convert_string_to_embedded input assert_xpath '/*[@class="listingblock"][1]/*[@class="title"][text()="Listing 1. Behold!"]', output, 1 end test 'listing block without an explicit style and with a second positional argument should be promoted to a source block' do input = <<~'EOS' [,ruby] ---- puts 'Hello, Ruby!' ---- EOS matches = (document_from_string input).find_by context: :listing, style: 'source' assert_equal 1, matches.length assert_equal 'ruby', (matches[0].attr 'language') end test 'listing block without an explicit style should be promoted to a source block if source-language is set' do input = <<~'EOS' :source-language: ruby ---- puts 'Hello, Ruby!' ---- EOS matches = (document_from_string input).find_by context: :listing, style: 'source' assert_equal 1, matches.length assert_equal 'ruby', (matches[0].attr 'language') end test 'listing block with an explicit style and a second positional argument should not be promoted to a source block' do input = <<~'EOS' [listing,ruby] ---- puts 'Hello, Ruby!' ---- EOS matches = (document_from_string input).find_by context: :listing assert_equal 1, matches.length assert_equal 'listing', matches[0].style assert_nil matches[0].attr 'language' end test 'listing block with an explicit style should not be promoted to a source block if source-language is set' do input = <<~'EOS' :source-language: ruby [listing] ---- puts 'Hello, Ruby!' ---- EOS matches = (document_from_string input).find_by context: :listing assert_equal 1, matches.length assert_equal 'listing', matches[0].style assert_nil matches[0].attr 'language' end test 'source block with no title or language should generate screen element in docbook' do input = <<~'EOS' [source] ---- source block ---- EOS output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '/screen[@linenumbering="unnumbered"][text()="source block"]', output, 1 end test 'source block with title and no language should generate screen element inside formalpara element for docbook' do input = <<~'EOS' [source] .title ---- source block ---- EOS output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '/formalpara', output, 1 assert_xpath '/formalpara/title[text()="title"]', output, 1 assert_xpath '/formalpara/para/screen[@linenumbering="unnumbered"][text()="source block"]', output, 1 end end context "Open Blocks" do test "can convert open block" do input = <<~'EOS' -- This is an open block. It can span multiple lines. -- EOS output = convert_string input assert_xpath '//*[@class="openblock"]//p', output, 2 end test "open block can contain another block" do input = <<~'EOS' -- This is an open block. It can span multiple lines. ____ It can hold great quotes like this one. ____ -- EOS output = convert_string input assert_xpath '//*[@class="openblock"]//p', output, 3 assert_xpath '//*[@class="openblock"]//*[@class="quoteblock"]', output, 1 end test 'should transfer id and reftext on open block to DocBook output' do input = <<~'EOS' Check out that <>! [[open,Open Block]] -- This is an open block. TIP: An open block can have other blocks inside of it. -- Back to our regularly scheduled programming. EOS output = convert_string input, backend: :docbook, keep_namespaces: true assert_css 'article:root > para[xml|id="open"]', output, 1 assert_css 'article:root > para[xreflabel="Open Block"]', output, 1 assert_css 'article:root > simpara', output, 2 assert_css 'article:root > para', output, 1 assert_css 'article:root > para > simpara', output, 1 assert_css 'article:root > para > tip', output, 1 end test 'should transfer id and reftext on open paragraph to DocBook output' do input = <<~'EOS' [open#openpara,reftext="Open Paragraph"] This is an open paragraph. EOS output = convert_string input, backend: :docbook, keep_namespaces: true assert_css 'article:root > simpara', output, 1 assert_css 'article:root > simpara[xml|id="openpara"]', output, 1 assert_css 'article:root > simpara[xreflabel="Open Paragraph"]', output, 1 end test 'should transfer title on open block to DocBook output' do input = <<~'EOS' .Behold the open -- This is an open block with a title. -- EOS output = convert_string input, backend: :docbook assert_css 'article > formalpara', output, 1 assert_css 'article > formalpara > *', output, 2 assert_css 'article > formalpara > title', output, 1 assert_xpath '/article/formalpara/title[text()="Behold the open"]', output, 1 assert_css 'article > formalpara > para', output, 1 assert_css 'article > formalpara > para > simpara', output, 1 end test 'should transfer title on open paragraph to DocBook output' do input = <<~'EOS' .Behold the open This is an open paragraph with a title. EOS output = convert_string input, backend: :docbook assert_css 'article > formalpara', output, 1 assert_css 'article > formalpara > *', output, 2 assert_css 'article > formalpara > title', output, 1 assert_xpath '/article/formalpara/title[text()="Behold the open"]', output, 1 assert_css 'article > formalpara > para', output, 1 assert_css 'article > formalpara > para[text()="This is an open paragraph with a title."]', output, 1 end test 'should transfer role on open block to DocBook output' do input = <<~'EOS' [.container] -- This is an open block. It holds stuff. -- EOS output = convert_string input, backend: :docbook assert_css 'article > para[role=container]', output, 1 assert_css 'article > para[role=container] > simpara', output, 1 end test 'should transfer role on open paragraph to DocBook output' do input = <<~'EOS' [.container] This is an open block. It holds stuff. EOS output = convert_string input, backend: :docbook assert_css 'article > simpara[role=container]', output, 1 end end context 'Passthrough Blocks' do test 'can parse a passthrough block' do input = <<~'EOS' ++++ This is a passthrough block. ++++ EOS block = block_from_string input refute_nil block assert_equal 1, block.lines.size assert_equal 'This is a passthrough block.', block.source end test 'does not perform subs on a passthrough block by default' do input = <<~'EOS' :type: passthrough ++++ This is a '{type}' block. http://asciidoc.org image:tiger.png[] ++++ EOS expected = %(This is a '{type}' block.\nhttp://asciidoc.org\nimage:tiger.png[]) output = convert_string_to_embedded input assert_equal expected, output.strip end test 'does not perform subs on a passthrough block with pass style by default' do input = <<~'EOS' :type: passthrough [pass] ++++ This is a '{type}' block. http://asciidoc.org image:tiger.png[] ++++ EOS expected = %(This is a '{type}' block.\nhttp://asciidoc.org\nimage:tiger.png[]) output = convert_string_to_embedded input assert_equal expected, output.strip end test 'passthrough block honors explicit subs list' do input = <<~'EOS' :type: passthrough [subs="attributes,quotes,macros"] ++++ This is a _{type}_ block. http://asciidoc.org ++++ EOS expected = %(This is a passthrough block.\nhttp://asciidoc.org) output = convert_string_to_embedded input assert_equal expected, output.strip end test 'should strip leading and trailing blank lines when converting raw block' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS ++++ line above ++++ ++++ first line last line ++++ ++++ line below ++++ EOS doc = document_from_string input, standalone: false block = doc.blocks[1] assert_equal ['', '', ' first line', '', 'last line', '', ''], block.lines result = doc.convert assert_equal "line above\n first line\n\nlast line\nline below", result, 1 end end context 'Math blocks' do test 'should not crash when converting stem block that has no lines' do input = <<~'EOS' [stem] ++++ ++++ EOS output = convert_string_to_embedded input assert_css '.stemblock', output, 1 end test 'should return content as empty string for stem or pass block that has no lines' do [%(++++\n++++), %([stem]\n++++\n++++)].each do |input| doc = document_from_string input assert_equal '', doc.blocks[0].content end end test 'should add LaTeX math delimiters around latexmath block content' do input = <<~'EOS' [latexmath] ++++ \sqrt{3x-1}+(1+x)^2 < y ++++ EOS output = convert_string_to_embedded input assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output assert_equal '\[\sqrt{3x-1}+(1+x)^2 < y\]', nodes.first.to_s.strip end test 'should not add LaTeX math delimiters around latexmath block content if already present' do input = <<~'EOS' [latexmath] ++++ \[\sqrt{3x-1}+(1+x)^2 < y\] ++++ EOS output = convert_string_to_embedded input assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output assert_equal '\[\sqrt{3x-1}+(1+x)^2 < y\]', nodes.first.to_s.strip end test 'should display latexmath block in alt of equation in DocBook backend' do input = <<~'EOS' [latexmath] ++++ \sqrt{3x-1}+(1+x)^2 < y ++++ EOS expect = <<~'EOS' EOS output = convert_string_to_embedded input, backend: :docbook assert_equal expect.strip, output.strip end test 'should set autoNumber option for latexmath to none by default' do input = <<~'EOS' :stem: latexmath [stem] ++++ y = x^2 ++++ EOS output = convert_string input assert_includes output, 'TeX: { equationNumbers: { autoNumber: "none" } }' end test 'should set autoNumber option for latexmath to none if eqnums is set to none' do input = <<~'EOS' :stem: latexmath :eqnums: none [stem] ++++ y = x^2 ++++ EOS output = convert_string input assert_includes output, 'TeX: { equationNumbers: { autoNumber: "none" } }' end test 'should set autoNumber option for latexmath to AMS if eqnums is set' do input = <<~'EOS' :stem: latexmath :eqnums: [stem] ++++ \begin{equation} y = x^2 \end{equation} ++++ EOS output = convert_string input assert_includes output, 'TeX: { equationNumbers: { autoNumber: "AMS" } }' end test 'should set autoNumber option for latexmath to all if eqnums is set to all' do input = <<~'EOS' :stem: latexmath :eqnums: all [stem] ++++ y = x^2 ++++ EOS output = convert_string input assert_includes output, 'TeX: { equationNumbers: { autoNumber: "all" } }' end test 'should not split equation in AsciiMath block at single newline' do input = <<~'EOS' [asciimath] ++++ f: bbb"N" -> bbb"N" f: x |-> x + 1 ++++ EOS expected = <<~'EOS'.chop \$f: bbb"N" -> bbb"N" f: x |-> x + 1\$ EOS output = convert_string_to_embedded input assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]', output assert_equal expected, nodes.first.inner_html.strip end test 'should split equation in AsciiMath block at escaped newline' do input = <<~'EOS' [asciimath] ++++ f: bbb"N" -> bbb"N" \ f: x |-> x + 1 ++++ EOS expected = <<~'EOS'.chop \$f: bbb"N" -> bbb"N"\$ \$f: x |-> x + 1\$ EOS output = convert_string_to_embedded input assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]', output assert_equal expected, nodes.first.inner_html.strip end test 'should split equation in AsciiMath block at sequence of escaped newlines' do input = <<~'EOS' [asciimath] ++++ f: bbb"N" -> bbb"N" \ \ f: x |-> x + 1 ++++ EOS expected = <<~'EOS'.chop \$f: bbb"N" -> bbb"N"\$
    \$f: x |-> x + 1\$ EOS output = convert_string_to_embedded input assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]', output assert_equal expected, nodes.first.inner_html.strip end test 'should split equation in AsciiMath block at newline sequence and preserve breaks' do input = <<~'EOS' [asciimath] ++++ f: bbb"N" -> bbb"N" f: x |-> x + 1 ++++ EOS expected = <<~'EOS'.chop \$f: bbb"N" -> bbb"N"\$

    \$f: x |-> x + 1\$ EOS output = convert_string_to_embedded input assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]', output assert_equal expected, nodes.first.inner_html.strip end test 'should add AsciiMath delimiters around asciimath block content' do input = <<~'EOS' [asciimath] ++++ sqrt(3x-1)+(1+x)^2 < y ++++ EOS output = convert_string_to_embedded input assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output assert_equal '\$sqrt(3x-1)+(1+x)^2 < y\$', nodes.first.to_s.strip end test 'should not add AsciiMath delimiters around asciimath block content if already present' do input = <<~'EOS' [asciimath] ++++ \$sqrt(3x-1)+(1+x)^2 < y\$ ++++ EOS output = convert_string_to_embedded input assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output assert_equal '\$sqrt(3x-1)+(1+x)^2 < y\$', nodes.first.to_s.strip end test 'should convert contents of asciimath block to MathML in DocBook output if asciimath gem is available' do asciimath_available = !(Asciidoctor::Helpers.require_library 'asciimath', true, :ignore).nil? input = <<~'EOS' [asciimath] ++++ x+b/(2a)<+-sqrt((b^2)/(4a^2)-c/a) ++++ [asciimath] ++++ ++++ EOS expect = <<~'EOS'.chop x+b2a<±b24a2ca EOS using_memory_logger do |logger| doc = document_from_string input, backend: :docbook, standalone: false actual = doc.convert if asciimath_available assert_equal expect, actual.strip assert_equal :loaded, doc.converter.instance_variable_get(:@asciimath_status) else assert_message logger, :WARN, 'optional gem \'asciimath\' is not available. Functionality disabled.' assert_equal :unavailable, doc.converter.instance_variable_get(:@asciimath_status) end end end test 'should output title for latexmath block if defined' do input = <<~'EOS' .The Lorenz Equations [latexmath] ++++ \begin{aligned} \dot{x} & = \sigma(y-x) \\ \dot{y} & = \rho x - y - xz \\ \dot{z} & = -\beta z + xy \end{aligned} ++++ EOS output = convert_string_to_embedded input assert_css '.stemblock', output, 1 assert_css '.stemblock .title', output, 1 assert_xpath '//*[@class="title"][text()="The Lorenz Equations"]', output, 1 end test 'should output title for asciimath block if defined' do input = <<~'EOS' .Simple fraction [asciimath] ++++ a//b ++++ EOS output = convert_string_to_embedded input assert_css '.stemblock', output, 1 assert_css '.stemblock .title', output, 1 assert_xpath '//*[@class="title"][text()="Simple fraction"]', output, 1 end test 'should add AsciiMath delimiters around stem block content if stem attribute is asciimath, empty, or not set' do input = <<~'EOS' [stem] ++++ sqrt(3x-1)+(1+x)^2 < y ++++ EOS [ {}, { 'stem' => '' }, { 'stem' => 'asciimath' }, { 'stem' => 'bogus' }, ].each do |attributes| output = convert_string_to_embedded input, attributes: attributes assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output assert_equal '\$sqrt(3x-1)+(1+x)^2 < y\$', nodes.first.to_s.strip end end test 'should add LaTeX math delimiters around stem block content if stem attribute is latexmath, latex, or tex' do input = <<~'EOS' [stem] ++++ \sqrt{3x-1}+(1+x)^2 < y ++++ EOS [ { 'stem' => 'latexmath' }, { 'stem' => 'latex' }, { 'stem' => 'tex' }, ].each do |attributes| output = convert_string_to_embedded input, attributes: attributes assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output assert_equal '\[\sqrt{3x-1}+(1+x)^2 < y\]', nodes.first.to_s.strip end end test 'should allow stem style to be set using second positional argument of block attributes' do input = <<~'EOS' :stem: latexmath [stem,asciimath] ++++ sqrt(3x-1)+(1+x)^2 < y ++++ EOS doc = document_from_string input stemblock = doc.blocks[0] assert_equal :stem, stemblock.context assert_equal 'asciimath', stemblock.attributes['style'] output = doc.convert standalone: false assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output assert_equal '\$sqrt(3x-1)+(1+x)^2 < y\$', nodes.first.to_s.strip end end context 'Custom Blocks' do test 'should not warn if block style is unknown' do input = <<~'EOS' [foo] -- bar -- EOS convert_string_to_embedded input assert_empty @logger.messages end test 'should log debug message if block style is unknown and debug level is enabled' do input = <<~'EOS' [foo] -- bar -- EOS using_memory_logger Logger::Severity::DEBUG do |logger| convert_string_to_embedded input assert_message logger, :DEBUG, ': line 2: unknown style for open block: foo', Hash end end end context 'Metadata' do test 'block title above section gets carried over to first block in section' do input = <<~'EOS' .Title == Section paragraph EOS output = convert_string input assert_xpath '//*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="paragraph"]/*[@class="title"][text()="Title"]', output, 1 assert_xpath '//*[@class="paragraph"]/p[text()="paragraph"]', output, 1 end test 'block title above document title demotes document title to a section title' do input = <<~'EOS' .Block title = Section Title section paragraph EOS output = convert_string input assert_xpath '//*[@id="header"]/*', output, 0 assert_xpath '//*[@id="preamble"]/*', output, 0 assert_xpath '//*[@id="content"]/h1[text()="Section Title"]', output, 1 assert_xpath '//*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="paragraph"]/*[@class="title"][text()="Block title"]', output, 1 assert_message @logger, :ERROR, ': line 2: level 0 sections can only be used when doctype is book', Hash end test 'block title above document title gets carried over to first block in first section if no preamble' do input = <<~'EOS' :doctype: book .Block title = Document Title == First Section paragraph EOS doc = document_from_string input # NOTE block title demotes document title to level-0 section refute doc.header? output = doc.convert assert_xpath '//*[@class="sect1"]//*[@class="paragraph"]/*[@class="title"][text()="Block title"]', output, 1 end test 'should apply substitutions to a block title in normal order' do input = <<~'EOS' .{link-url}[{link-text}]{tm} The one and only! EOS output = convert_string_to_embedded input, attributes: { 'link-url' => 'https://acme.com', 'link-text' => 'ACME', 'tm' => '(TM)', } assert_css '.title', output, 1 assert_css '.title a[href="https://acme.com"]', output, 1 assert_xpath %(//*[@class="title"][contains(text(),"#{decode_char 8482}")]), output, 1 end test 'empty attribute list should not appear in output' do input = <<~'EOS' [] -- Block content -- EOS output = convert_string_to_embedded input assert_includes output, 'Block content' refute_includes output, '[]' end test 'empty block anchor should not appear in output' do input = <<~'EOS' [[]] -- Block content -- EOS output = convert_string_to_embedded input assert_includes output, 'Block content' refute_includes output, '[[]]' end end context 'Images' do test 'can convert block image with alt text defined in macro' do input = 'image::images/tiger.png[Tiger]' output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end test 'converts SVG image using img element by default' do input = 'image::tiger.svg[Tiger]' output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '/*[@class="imageblock"]//img[@src="tiger.svg"][@alt="Tiger"]', output, 1 end test 'converts interactive SVG image with alt text using object element' do input = <<~'EOS' :imagesdir: images [%interactive] image::tiger.svg[Tiger,100] EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '/*[@class="imageblock"]//object[@type="image/svg+xml"][@data="images/tiger.svg"][@width="100"]/span[@class="alt"][text()="Tiger"]', output, 1 end test 'converts SVG image with alt text using img element when safe mode is secure' do input = <<~'EOS' [%interactive] image::images/tiger.svg[Tiger,100] EOS output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.svg"][@alt="Tiger"]', output, 1 end test 'inserts fallback image for SVG inside object element using same dimensions' do input = <<~'EOS' :imagesdir: images [%interactive] image::tiger.svg[Tiger,100,fallback=tiger.png] EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '/*[@class="imageblock"]//object[@type="image/svg+xml"][@data="images/tiger.svg"][@width="100"]/img[@src="images/tiger.png"][@width="100"]', output, 1 end test 'detects SVG image URI that contains a query string' do input = <<~'EOS' :imagesdir: images [%interactive] image::http://example.org/tiger.svg?foo=bar[Tiger,100] EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '/*[@class="imageblock"]//object[@type="image/svg+xml"][@data="http://example.org/tiger.svg?foo=bar"][@width="100"]/span[@class="alt"][text()="Tiger"]', output, 1 end test 'detects SVG image when format attribute is svg' do input = <<~'EOS' :imagesdir: images [%interactive] image::http://example.org/tiger-svg[Tiger,100,format=svg] EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '/*[@class="imageblock"]//object[@type="image/svg+xml"][@data="http://example.org/tiger-svg"][@width="100"]/span[@class="alt"][text()="Tiger"]', output, 1 end test 'converts to inline SVG image when inline option is set on block' do input = <<~'EOS' :imagesdir: fixtures [%inline] image::circle.svg[Tiger,100] EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docdir' => testdir } assert_match(/]*width="100"[^>]*>/, output, 1) refute_match(/]*width="500"[^>]*>/, output) refute_match(/]*height="500"[^>]*>/, output) refute_match(/]*style="[^>]*>/, output) end test 'should honor percentage width for SVG image with inline option' do input = <<~'EOS' :imagesdir: fixtures image::circle.svg[Circle,50%,opts=inline] EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docdir' => testdir } assert_match(/]*width="50%"[^>]*>/, output, 1) end test 'should not crash if explicit width on SVG image block is an integer' do input = <<~'EOS' :imagesdir: fixtures image::circle.svg[Circle,opts=inline] EOS doc = document_from_string input, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docdir' => testdir } doc.blocks[0].set_attr 'width', 50 output = doc.convert assert_match %r/]*width="50"[^>]*>/, output, 1 end test 'converts to inline SVG image when inline option is set on block and data-uri is set on document' do input = <<~'EOS' :imagesdir: fixtures :data-uri: [%inline] image::circle.svg[Tiger,100] EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docdir' => testdir } assert_match(/]*width="100">/, output, 1) end test 'should not throw exception if SVG to inline is empty' do input = 'image::empty.svg[nada,opts=inline]' output = convert_string_to_embedded input, safe: :safe, attributes: { 'docdir' => testdir, 'imagesdir' => 'fixtures' } assert_xpath '//svg', output, 0 assert_xpath '//span[@class="alt"][text()="nada"]', output, 1 assert_message @logger, :WARN, '~contents of SVG is empty:' end test 'should not throw exception if SVG to inline contains an incomplete start tag and explicit width is specified' do input = 'image::incomplete.svg[,200,opts=inline]' output = convert_string_to_embedded input, safe: :safe, attributes: { 'docdir' => testdir, 'imagesdir' => 'fixtures' } assert_xpath '//svg', output, 1 assert_xpath '//span[@class="alt"]', output, 0 end test 'embeds remote SVG to inline when inline option is set on block and allow-uri-read is set on document' do input = %(image::http://#{resolve_localhost}:9876/fixtures/circle.svg[Circle,100,100,opts=inline]) output = using_test_webserver do convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } end assert_css 'svg', output, 1 assert_css 'svg[style]', output, 0 assert_css 'svg[width="100"]', output, 1 assert_css 'svg[height="100"]', output, 1 assert_css 'svg circle', output, 1 end test 'should cache remote SVG when allow-uri-read, cache-uri, and inline option are set' do begin if OpenURI.respond_to? :cache_open_uri OpenURI.singleton_class.send :remove_method, :open_uri OpenURI.singleton_class.send :alias_method, :open_uri, :cache_open_uri end using_test_webserver do |base_url, thr| image_url = %(#{base_url}/fixtures/circle.svg) attributes = { 'allow-uri-read' => '', 'cache-uri' => '' } input = %(image::#{image_url}[Circle,100,100,opts=inline]) output = convert_string_to_embedded input, safe: :safe, attributes: attributes assert defined? OpenURI::Cache assert_css 'svg circle', output, 1 # NOTE we can't assert here since this is using the system-wide cache #assert_equal thr[:requests].size, 1 #assert_equal thr[:requests][0], image_url thr[:requests].clear Dir.mktmpdir do |cache_path| original_cache_path = OpenURI::Cache.cache_path begin OpenURI::Cache.cache_path = cache_path assert_nil OpenURI::Cache.get image_url 2.times do output = convert_string_to_embedded input, safe: :safe, attributes: attributes refute_nil OpenURI::Cache.get image_url assert_css 'svg circle', output, 1 end assert_equal 1, thr[:requests].size assert_match %r/ \/fixtures\/circle\.svg /, thr[:requests][0], 1 ensure OpenURI::Cache.cache_path = original_cache_path end end end ensure OpenURI.singleton_class.send :alias_method, :cache_open_uri, :open_uri OpenURI.singleton_class.send :remove_method, :open_uri OpenURI.singleton_class.send :alias_method, :open_uri, :original_open_uri end end test 'converts to alt text for SVG with inline option set if SVG cannot be read' do input = <<~'EOS' [%inline] image::no-such-image.svg[Alt Text] EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '//span[@class="alt"][text()="Alt Text"]', output, 1 assert_message @logger, :WARN, '~SVG does not exist or cannot be read' end test 'can convert block image with alt text defined in macro containing square bracket' do input = 'image::images/tiger.png[A [Bengal] Tiger]' output = convert_string input img = xmlnodes_at_xpath '//img', output, 1 assert_equal 'A [Bengal] Tiger', img.attr('alt') end test 'can convert block image with target containing spaces' do input = 'image::images/big tiger.png[A Big Tiger]' output = convert_string input img = xmlnodes_at_xpath '//img', output, 1 assert_equal 'images/big%20tiger.png', img.attr('src') assert_equal 'A Big Tiger', img.attr('alt') end test 'should not recognize block image if target has leading or trailing spaces' do [' tiger.png', 'tiger.png '].each do |target| input = %(image::#{target}[Tiger]) output = convert_string_to_embedded input assert_xpath '//img', output, 0 end end test 'can convert block image with alt text defined in block attribute above macro' do input = <<~'EOS' [Tiger] image::images/tiger.png[] EOS output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end test 'alt text in macro overrides alt text above macro' do input = <<~'EOS' [Alt Text] image::images/tiger.png[Tiger] EOS output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end test 'should substitute attribute references in alt text defined in image block macro' do input = <<~'EOS' :alt-text: Tiger image::images/tiger.png[{alt-text}] EOS output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end test 'should set direction CSS class on image if float attribute is set' do input = <<~'EOS' [float=left] image::images/tiger.png[Tiger] EOS output = convert_string_to_embedded input assert_css '.imageblock.left', output, 1 assert_css '.imageblock[style]', output, 0 end test 'should set text alignment CSS class on image if align attribute is set' do input = <<~'EOS' [align=center] image::images/tiger.png[Tiger] EOS output = convert_string_to_embedded input assert_css '.imageblock.text-center', output, 1 assert_css '.imageblock[style]', output, 0 end test 'style attribute is dropped from image macro' do input = <<~'EOS' [style=value] image::images/tiger.png[Tiger] EOS doc = document_from_string input img = doc.blocks[0] refute(img.attributes.key? 'style') assert_nil img.style end test 'should apply specialcharacters and replacement substitutions to alt text' do input = 'A tiger\'s "roar" is < a bear\'s "growl"' expected = 'A tiger’s "roar" is < a bear’s "growl"' result = convert_string_to_embedded %(image::images/tiger-roar.png[#{input}]) assert_includes result, %(alt="#{expected}") end test 'should not encode double quotes in alt text when converting to DocBook' do input = 'Select "File > Open"' expected = 'Select "File > Open"' result = convert_string_to_embedded %(image::images/open.png[#{input}]), backend: :docbook assert_includes result, %(#{expected}) end test 'should auto-generate alt text for block image if alt text is not specified' do input = 'image::images/lions-and-tigers.png[]' image = block_from_string input assert_equal 'lions and tigers', (image.attr 'alt') assert_equal 'lions and tigers', (image.attr 'default-alt') output = image.convert assert_xpath '/*[@class="imageblock"]//img[@src="images/lions-and-tigers.png"][@alt="lions and tigers"]', output, 1 end test "can convert block image with alt text and height and width" do input = 'image::images/tiger.png[Tiger, 200, 300]' output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"][@width="200"][@height="300"]', output, 1 end test 'should not output empty width attribute if positional width attribute is empty' do input = 'image::images/tiger.png[Tiger,]' output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"]', output, 1 assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@width]', output, 0 end test "can convert block image with link" do input = <<~'EOS' image::images/tiger.png[Tiger, link='http://en.wikipedia.org/wiki/Tiger'] EOS output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//a[@class="image"][@href="http://en.wikipedia.org/wiki/Tiger"]/img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end test 'adds rel=noopener attribute to block image with link that targets _blank window' do input = 'image::images/tiger.png[Tiger,link=http://en.wikipedia.org/wiki/Tiger,window=_blank]' output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//a[@class="image"][@href="http://en.wikipedia.org/wiki/Tiger"][@target="_blank"][@rel="noopener"]/img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end test 'adds rel=noopener attribute to block image with link that targets name window when the noopener option is set' do input = 'image::images/tiger.png[Tiger,link=http://en.wikipedia.org/wiki/Tiger,window=name,opts=noopener]' output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//a[@class="image"][@href="http://en.wikipedia.org/wiki/Tiger"][@target="name"][@rel="noopener"]/img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end test 'adds rel=nofollow attribute to block image with a link when the nofollow option is set' do input = 'image::images/tiger.png[Tiger,link=http://en.wikipedia.org/wiki/Tiger,opts=nofollow]' output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//a[@class="image"][@href="http://en.wikipedia.org/wiki/Tiger"][@rel="nofollow"]/img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end test 'can convert block image with caption' do input = <<~'EOS' .The AsciiDoc Tiger image::images/tiger.png[Tiger] EOS doc = document_from_string input assert_equal 1, doc.blocks[0].numeral output = doc.convert assert_xpath '//*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 assert_xpath '//*[@class="imageblock"]/*[@class="title"][text()="Figure 1. The AsciiDoc Tiger"]', output, 1 assert_equal 1, doc.attributes['figure-number'] end test 'can convert block image with explicit caption' do input = <<~'EOS' [caption="Voila! "] .The AsciiDoc Tiger image::images/tiger.png[Tiger] EOS doc = document_from_string input assert_nil doc.blocks[0].numeral output = doc.convert assert_xpath '//*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 assert_xpath '//*[@class="imageblock"]/*[@class="title"][text()="Voila! The AsciiDoc Tiger"]', output, 1 refute doc.attributes.key?('figure-number') end test 'can align image in DocBook backend' do input = 'image::images/sunset.jpg[Sunset,align=right]' output = convert_string_to_embedded input, backend: :docbook assert_xpath '//imagedata', output, 1 assert_xpath '//imagedata[@align="right"]', output, 1 end test 'should set content width and depth in DocBook backend if no scaling' do input = 'image::images/sunset.jpg[Sunset,500,332]' output = convert_string_to_embedded input, backend: :docbook assert_xpath '//imagedata', output, 1 assert_xpath '//imagedata[@contentwidth="500"]', output, 1 assert_xpath '//imagedata[@contentdepth="332"]', output, 1 assert_xpath '//imagedata[@width]', output, 0 assert_xpath '//imagedata[@depth]', output, 0 end test 'can scale image in DocBook backend' do input = 'image::images/sunset.jpg[Sunset,500,332,scale=200]' output = convert_string_to_embedded input, backend: :docbook assert_xpath '//imagedata', output, 1 assert_xpath '//imagedata[@scale="200"]', output, 1 assert_xpath '//imagedata[@width]', output, 0 assert_xpath '//imagedata[@depth]', output, 0 assert_xpath '//imagedata[@contentwidth]', output, 0 assert_xpath '//imagedata[@contentdepth]', output, 0 end test 'scale image width in DocBook backend' do input = 'image::images/sunset.jpg[Sunset,500,332,scaledwidth=25%]' output = convert_string_to_embedded input, backend: :docbook assert_xpath '//imagedata', output, 1 assert_xpath '//imagedata[@width="25%"]', output, 1 assert_xpath '//imagedata[@depth]', output, 0 assert_xpath '//imagedata[@contentwidth]', output, 0 assert_xpath '//imagedata[@contentdepth]', output, 0 end test 'adds % to scaled width if no units given in DocBook backend ' do input = 'image::images/sunset.jpg[Sunset,scaledwidth=25]' output = convert_string_to_embedded input, backend: :docbook assert_xpath '//imagedata', output, 1 assert_xpath '//imagedata[@width="25%"]', output, 1 end test 'keeps attribute reference unprocessed if image target is missing attribute reference and attribute-missing is skip' do input = <<~'EOS' :attribute-missing: skip image::{bogus}[] EOS output = convert_string_to_embedded input assert_css 'img[src="{bogus}"]', output, 1 assert_empty @logger end test 'should not drop line if image target is missing attribute reference and attribute-missing is drop' do input = <<~'EOS' :attribute-missing: drop image::{bogus}/photo.jpg[] EOS output = convert_string_to_embedded input assert_css 'img[src="/photo.jpg"]', output, 1 assert_empty @logger end test 'drops line if image target is missing attribute reference and attribute-missing is drop-line' do input = <<~'EOS' :attribute-missing: drop-line image::{bogus}[] EOS output = convert_string_to_embedded input assert_empty output.strip assert_message @logger, :INFO, 'dropping line containing reference to missing attribute: bogus' end test 'should not drop line if image target resolves to blank and attribute-missing is drop-line' do input = <<~'EOS' :attribute-missing: drop-line image::{blank}[] EOS output = convert_string_to_embedded input assert_css 'img[src=""]', output, 1 assert_empty @logger end test 'dropped image does not break processing of following section and attribute-missing is drop-line' do input = <<~'EOS' :attribute-missing: drop-line image::{bogus}[] == Section Title EOS output = convert_string_to_embedded input assert_css 'img', output, 0 assert_css 'h2', output, 1 refute_includes output, '== Section Title' assert_message @logger, :INFO, 'dropping line containing reference to missing attribute: bogus' end test 'should pass through image that references uri' do input = <<~'EOS' :imagesdir: images image::http://asciidoc.org/images/tiger.png[Tiger] EOS output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//img[@src="http://asciidoc.org/images/tiger.png"][@alt="Tiger"]', output, 1 end test 'should encode spaces in image target if value is a URI' do input = 'image::http://example.org/svg?digraph=digraph G { a -> b; }[diagram]' output = convert_string_to_embedded input assert_xpath %(/*[@class="imageblock"]//img[@src="http://example.org/svg?digraph=digraph%20G%20{%20a%20-#{decode_char 62}%20b;%20}"]), output, 1 end test 'can resolve image relative to imagesdir' do input = <<~'EOS' :imagesdir: images image::tiger.png[Tiger] EOS output = convert_string_to_embedded input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end test 'embeds base64-encoded data uri for image when data-uri attribute is set' do input = <<~'EOS' :data-uri: :imagesdir: fixtures image::dot.gif[Dot] EOS doc = document_from_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => testdir } assert_equal 'fixtures', doc.attributes['imagesdir'] output = doc.convert assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end test 'embeds base64-encoded data uri for image in classloader when data-uri attribute is set', if: jruby? do require fixture_path 'assets.jar' input = <<~'EOS' :data-uri: :imagesdir: uri:classloader:/images-in-jar image::dot.gif[Dot] EOS doc = document_from_string input, safe: Asciidoctor::SafeMode::UNSAFE, attributes: { 'docdir' => testdir } assert_equal 'uri:classloader:/images-in-jar', doc.attributes['imagesdir'] output = doc.convert assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end test 'embeds SVG image with image/svg+xml mimetype when file extension is .svg' do input = <<~'EOS' :imagesdir: fixtures :data-uri: image::circle.svg[Tiger,100] EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docdir' => testdir } assert_xpath '//img[starts-with(@src,"data:image/svg+xml;base64,")]', output, 1 end test 'embeds empty base64-encoded data uri for unreadable image when data-uri attribute is set' do input = <<~'EOS' :data-uri: :imagesdir: fixtures image::unreadable.gif[Dot] EOS doc = document_from_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => testdir } assert_equal 'fixtures', doc.attributes['imagesdir'] output = doc.convert assert_xpath '//img[@src="data:image/gif;base64,"]', output, 1 assert_message @logger, :WARN, '~image to embed not found or not readable' end test 'embeds base64-encoded data uri with application/octet-stream mimetype when file extension is missing' do input = <<~'EOS' :data-uri: :imagesdir: fixtures image::dot[Dot] EOS doc = document_from_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => testdir } assert_equal 'fixtures', doc.attributes['imagesdir'] output = doc.convert assert_xpath '//img[starts-with(@src,"data:application/octet-stream;base64,")]', output, 1 end test 'embeds base64-encoded data uri for remote image when data-uri attribute is set' do input = <<~EOS :data-uri: image::http://#{resolve_localhost}:9876/fixtures/dot.gif[Dot] EOS output = using_test_webserver do convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } end assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end test 'embeds base64-encoded data uri for remote image when imagesdir is a URI and data-uri attribute is set' do input = <<~EOS :data-uri: :imagesdir: http://#{resolve_localhost}:9876/fixtures image::dot.gif[Dot] EOS output = using_test_webserver do convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } end assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end test 'should cache remote image when allow-uri-read, cache-uri, and data-uri are set' do begin if OpenURI.respond_to? :cache_open_uri OpenURI.singleton_class.send :remove_method, :open_uri OpenURI.singleton_class.send :alias_method, :open_uri, :cache_open_uri end using_test_webserver do |base_url, thr| image_url = %(#{base_url}/fixtures/dot.gif) image_data_uri = 'data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=' attributes = { 'allow-uri-read' => '', 'cache-uri' => '', 'data-uri' => '' } input = %(image::#{image_url}[Dot]) output = convert_string_to_embedded input, safe: :safe, attributes: attributes assert defined? OpenURI::Cache assert_xpath %(//img[@src="#{image_data_uri}"][@alt="Dot"]), output, 1 # NOTE we can't assert here since this is using the system-wide cache #assert_equal thr[:requests].size, 1 #assert_equal thr[:requests][0], image_url thr[:requests].clear Dir.mktmpdir do |cache_path| original_cache_path = OpenURI::Cache.cache_path begin OpenURI::Cache.cache_path = cache_path assert_nil OpenURI::Cache.get image_url 2.times do output = convert_string_to_embedded input, safe: :safe, attributes: attributes refute_nil OpenURI::Cache.get image_url assert_xpath %(//img[@src="#{image_data_uri}"][@alt="Dot"]), output, 1 end assert_equal 1, thr[:requests].size assert_match %r/ \/fixtures\/dot\.gif /, thr[:requests][0], 1 ensure OpenURI::Cache.cache_path = original_cache_path end end end ensure OpenURI.singleton_class.send :alias_method, :cache_open_uri, :open_uri OpenURI.singleton_class.send :remove_method, :open_uri OpenURI.singleton_class.send :alias_method, :open_uri, :original_open_uri end end test 'uses remote image uri when data-uri attribute is set and image cannot be retrieved' do image_uri = "http://#{resolve_localhost}:9876/fixtures/missing-image.gif" input = <<~EOS :data-uri: image::#{image_uri}[Missing image] EOS output = using_test_webserver do convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } end assert_xpath %(/*[@class="imageblock"]//img[@src="#{image_uri}"][@alt="Missing image"]), output, 1 assert_message @logger, :WARN, '~could not retrieve image data from URI' end test 'uses remote image uri when data-uri attribute is set and allow-uri-read is not set' do image_uri = "http://#{resolve_localhost}:9876/fixtures/dot.gif" input = <<~EOS :data-uri: image::#{image_uri}[Dot] EOS output = using_test_webserver do convert_string_to_embedded input, safe: :safe end assert_xpath %(/*[@class="imageblock"]//img[@src="#{image_uri}"][@alt="Dot"]), output, 1 end test 'can handle embedded data uri images' do input = 'image::data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=[Dot]' output = convert_string_to_embedded input assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end test 'can handle embedded data uri images when data-uri attribute is set' do input = <<~'EOS' :data-uri: image::data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=[Dot] EOS output = convert_string_to_embedded input assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end test 'cleans reference to ancestor directories in imagesdir before reading image if safe mode level is at least SAFE' do input = <<~'EOS' :data-uri: :imagesdir: ../..//fixtures/./../../fixtures image::dot.gif[Dot] EOS doc = document_from_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => testdir } assert_equal '../..//fixtures/./../../fixtures', doc.attributes['imagesdir'] output = doc.convert # image target resolves to fixtures/dot.gif relative to docdir (which is explicitly set to the directory of this file) # the reference cannot fall outside of the document directory in safe mode assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 assert_message @logger, :WARN, 'image has illegal reference to ancestor of jail; recovering automatically' end test 'cleans reference to ancestor directories in target before reading image if safe mode level is at least SAFE' do input = <<~'EOS' :data-uri: :imagesdir: ./ image::../..//fixtures/./../../fixtures/dot.gif[Dot] EOS doc = document_from_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => testdir } assert_equal './', doc.attributes['imagesdir'] output = doc.convert # image target resolves to fixtures/dot.gif relative to docdir (which is explicitly set to the directory of this file) # the reference cannot fall outside of the document directory in safe mode assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 assert_message @logger, :WARN, 'image has illegal reference to ancestor of jail; recovering automatically' end end context 'Media' do test 'should detect and convert video macro' do input = 'video::cats-vs-dogs.avi[]' output = convert_string_to_embedded input assert_css 'video', output, 1 assert_css 'video[src="cats-vs-dogs.avi"]', output, 1 end test 'should detect and convert video macro with positional attributes for poster and dimensions' do input = 'video::cats-vs-dogs.avi[cats-and-dogs.png, 200, 300]' output = convert_string_to_embedded input assert_css 'video', output, 1 assert_css 'video[src="cats-vs-dogs.avi"]', output, 1 assert_css 'video[poster="cats-and-dogs.png"]', output, 1 assert_css 'video[width="200"]', output, 1 assert_css 'video[height="300"]', output, 1 end test 'should set direction CSS class on video block if float attribute is set' do input = 'video::cats-vs-dogs.avi[cats-and-dogs.png,float=right]' output = convert_string_to_embedded input assert_css 'video', output, 1 assert_css 'video[src="cats-vs-dogs.avi"]', output, 1 assert_css '.videoblock.right', output, 1 end test 'should set text alignment CSS class on video block if align attribute is set' do input = 'video::cats-vs-dogs.avi[cats-and-dogs.png,align=center]' output = convert_string_to_embedded input assert_css 'video', output, 1 assert_css 'video[src="cats-vs-dogs.avi"]', output, 1 assert_css '.videoblock.text-center', output, 1 end test 'video macro should honor all options' do input = 'video::cats-vs-dogs.avi[options="autoplay,muted,nocontrols,loop",preload="metadata"]' output = convert_string_to_embedded input assert_css 'video', output, 1 assert_css 'video[autoplay]', output, 1 assert_css 'video[muted]', output, 1 assert_css 'video:not([controls])', output, 1 assert_css 'video[loop]', output, 1 assert_css 'video[preload=metadata]', output, 1 end test 'video macro should add time range anchor with start time if start attribute is set' do input = 'video::cats-vs-dogs.avi[start="30"]' output = convert_string_to_embedded input assert_css 'video', output, 1 assert_xpath '//video[@src="cats-vs-dogs.avi#t=30"]', output, 1 end test 'video macro should add time range anchor with end time if end attribute is set' do input = 'video::cats-vs-dogs.avi[end="30"]' output = convert_string_to_embedded input assert_css 'video', output, 1 assert_xpath '//video[@src="cats-vs-dogs.avi#t=,30"]', output, 1 end test 'video macro should add time range anchor with start and end time if start and end attributes are set' do input = 'video::cats-vs-dogs.avi[start="30",end="60"]' output = convert_string_to_embedded input assert_css 'video', output, 1 assert_xpath '//video[@src="cats-vs-dogs.avi#t=30,60"]', output, 1 end test 'video macro should use imagesdir attribute to resolve target and poster' do input = <<~'EOS' :imagesdir: assets video::cats-vs-dogs.avi[cats-and-dogs.png, 200, 300] EOS output = convert_string_to_embedded input assert_css 'video', output, 1 assert_css 'video[src="assets/cats-vs-dogs.avi"]', output, 1 assert_css 'video[poster="assets/cats-and-dogs.png"]', output, 1 assert_css 'video[width="200"]', output, 1 assert_css 'video[height="300"]', output, 1 end test 'video macro should not use imagesdir attribute to resolve target if target is a URL' do input = <<~'EOS' :imagesdir: assets video::http://example.org/videos/cats-vs-dogs.avi[] EOS output = convert_string_to_embedded input assert_css 'video', output, 1 assert_css 'video[src="http://example.org/videos/cats-vs-dogs.avi"]', output, 1 end test 'video macro should output custom HTML with iframe for vimeo service' do input = 'video::67480300[vimeo, 400, 300, start=60, options="autoplay,muted"]' output = convert_string_to_embedded input assert_css 'video', output, 0 assert_css 'iframe', output, 1 assert_css 'iframe[src="https://player.vimeo.com/video/67480300?autoplay=1&muted=1#at=60"]', output, 1 assert_css 'iframe[width="400"]', output, 1 assert_css 'iframe[height="300"]', output, 1 end test 'video macro should allow hash for vimeo video to be specified in video ID' do input = 'video::67480300/123456789[vimeo, 400, 300, options=loop]' output = convert_string_to_embedded input assert_css 'video', output, 0 assert_css 'iframe', output, 1 assert_css 'iframe[src="https://player.vimeo.com/video/67480300?h=123456789&loop=1"]', output, 1 assert_css 'iframe[width="400"]', output, 1 assert_css 'iframe[height="300"]', output, 1 end test 'video macro should allow hash for vimeo video to be specified using hash attribute' do input = 'video::67480300[vimeo, 400, 300, options=loop, hash=123456789]' output = convert_string_to_embedded input assert_css 'video', output, 0 assert_css 'iframe', output, 1 assert_css 'iframe[src="https://player.vimeo.com/video/67480300?h=123456789&loop=1"]', output, 1 assert_css 'iframe[width="400"]', output, 1 assert_css 'iframe[height="300"]', output, 1 end test 'video macro should output custom HTML with iframe for youtube service' do input = 'video::U8GBXvdmHT4/PLg7s6cbtAD15Das5LK9mXt_g59DLWxKUe[youtube, 640, 360, start=60, options="autoplay,muted,modest", theme=light]' output = convert_string_to_embedded input assert_css 'video', output, 0 assert_css 'iframe', output, 1 assert_css 'iframe[src="https://www.youtube.com/embed/U8GBXvdmHT4?rel=0&start=60&autoplay=1&mute=1&list=PLg7s6cbtAD15Das5LK9mXt_g59DLWxKUe&modestbranding=1&theme=light"]', output, 1 assert_css 'iframe[width="640"]', output, 1 assert_css 'iframe[height="360"]', output, 1 end test 'video macro should output custom HTML with iframe for youtube service with dynamic playlist' do input = 'video::SCZF6I-Rc4I,AsKGOeonbIs,HwrPhOp6-aM[youtube, 640, 360, start=60, options=autoplay]' output = convert_string_to_embedded input assert_css 'video', output, 0 assert_css 'iframe', output, 1 assert_css 'iframe[src="https://www.youtube.com/embed/SCZF6I-Rc4I?rel=0&start=60&autoplay=1&playlist=SCZF6I-Rc4I,AsKGOeonbIs,HwrPhOp6-aM"]', output, 1 assert_css 'iframe[width="640"]', output, 1 assert_css 'iframe[height="360"]', output, 1 end test 'should detect and convert audio macro' do input = 'audio::podcast.mp3[]' output = convert_string_to_embedded input assert_css 'audio', output, 1 assert_css 'audio[src="podcast.mp3"]', output, 1 end test 'audio macro should use imagesdir attribute to resolve target' do input = <<~'EOS' :imagesdir: assets audio::podcast.mp3[] EOS output = convert_string_to_embedded input assert_css 'audio', output, 1 assert_css 'audio[src="assets/podcast.mp3"]', output, 1 end test 'audio macro should not use imagesdir attribute to resolve target if target is a URL' do input = <<~'EOS' :imagesdir: assets video::http://example.org/podcast.mp3[] EOS output = convert_string_to_embedded input assert_css 'video', output, 1 assert_css 'video[src="http://example.org/podcast.mp3"]', output, 1 end test 'audio macro should honor all options' do input = 'audio::podcast.mp3[options="autoplay,nocontrols,loop"]' output = convert_string_to_embedded input assert_css 'audio', output, 1 assert_css 'audio[autoplay]', output, 1 assert_css 'audio:not([controls])', output, 1 assert_css 'audio[loop]', output, 1 end test 'audio macro should support start and end time' do input = 'audio::podcast.mp3[start=1,end=2]' output = convert_string_to_embedded input assert_css 'audio', output, 1 assert_css 'audio[controls]', output, 1 assert_css 'audio[src="podcast.mp3#t=1,2"]', output, 1 end end context 'Admonition icons' do test 'can resolve icon relative to default iconsdir' do input = <<~'EOS' :icons: [TIP] You can use icons for admonitions by setting the 'icons' attribute. EOS output = convert_string input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="./images/icons/tip.png"][@alt="Tip"]', output, 1 end test 'can resolve icon relative to custom iconsdir' do input = <<~'EOS' :icons: :iconsdir: icons [TIP] You can use icons for admonitions by setting the 'icons' attribute. EOS output = convert_string input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="icons/tip.png"][@alt="Tip"]', output, 1 end test 'should add file extension to custom icon if not specified' do input = <<~'EOS' :icons: font :iconsdir: images/icons [TIP,icon=a] Override the icon of an admonition block using an attribute EOS output = convert_string input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="images/icons/a.png"]', output, 1 end test 'should allow icontype to be specified when using built-in admonition icon' do input = 'TIP: Set the icontype using either the icontype attribute on the icons attribute.' [ { 'icons' => '', 'ext' => 'png' }, { 'icons' => '', 'icontype' => 'jpg', 'ext' => 'jpg' }, { 'icons' => 'jpg', 'ext' => 'jpg' }, { 'icons' => 'image', 'ext' => 'png' }, ].each do |attributes| expected_src = %(./images/icons/tip.#{attributes.delete 'ext'}) output = convert_string input, attributes: attributes assert_xpath %(//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="#{expected_src}"]), output, 1 end end test 'should allow icontype to be specified when using custom admonition icon' do input = <<~'EOS' [TIP,icon=hint] Set the icontype using either the icontype attribute on the icons attribute. EOS [ { 'icons' => '', 'ext' => 'png' }, { 'icons' => '', 'icontype' => 'jpg', 'ext' => 'jpg' }, { 'icons' => 'jpg', 'ext' => 'jpg' }, { 'icons' => 'image', 'ext' => 'png' }, ].each do |attributes| expected_src = %(./images/icons/hint.#{attributes.delete 'ext'}) output = convert_string input, attributes: attributes assert_xpath %(//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="#{expected_src}"]), output, 1 end end test 'embeds base64-encoded data uri of icon when data-uri attribute is set and safe mode level is less than SECURE' do input = <<~'EOS' :icons: :iconsdir: fixtures :icontype: gif :data-uri: [TIP] You can use icons for admonitions by setting the 'icons' attribute. EOS output = convert_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => testdir } assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Tip"]', output, 1 end test 'should embed base64-encoded data uri of custom icon when data-uri attribute is set' do input = <<~'EOS' :icons: :iconsdir: fixtures :icontype: gif :data-uri: [TIP,icon=tip] You can set a custom icon using the icon attribute on the block. EOS output = convert_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => testdir } assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Tip"]', output, 1 end test 'does not embed base64-encoded data uri of icon when safe mode level is SECURE or greater' do input = <<~'EOS' :icons: :iconsdir: fixtures :icontype: gif :data-uri: [TIP] You can use icons for admonitions by setting the 'icons' attribute. EOS output = convert_string input, attributes: { 'icons' => '' } assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="fixtures/tip.gif"][@alt="Tip"]', output, 1 end test 'cleans reference to ancestor directories before reading icon if safe mode level is at least SAFE' do input = <<~'EOS' :icons: :iconsdir: ../fixtures :icontype: gif :data-uri: [TIP] You can use icons for admonitions by setting the 'icons' attribute. EOS output = convert_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'docdir' => testdir } assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Tip"]', output, 1 assert_message @logger, :WARN, 'image has illegal reference to ancestor of jail; recovering automatically' end test 'should import Font Awesome and use font-based icons when value of icons attribute is font' do input = <<~'EOS' :icons: font [TIP] You can use icons for admonitions by setting the 'icons' attribute. EOS output = convert_string input, safe: Asciidoctor::SafeMode::SERVER assert_css %(html > head > link[rel="stylesheet"][href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/#{Asciidoctor::FONT_AWESOME_VERSION}/css/font-awesome.min.css"]), output, 1 assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/i[@class="fa icon-tip"]', output, 1 end test 'font-based icon should not override icon specified on admonition' do input = <<~'EOS' :icons: font :iconsdir: images/icons [TIP,icon=a.png] Override the icon of an admonition block using an attribute EOS output = convert_string input, safe: Asciidoctor::SafeMode::SERVER assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/i[@class="fa icon-tip"]', output, 0 assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="images/icons/a.png"]', output, 1 end test 'should use http uri scheme for assets when asset-uri-scheme is http' do input = <<~'EOS' :asset-uri-scheme: http :icons: font :source-highlighter: highlightjs TIP: You can control the URI scheme used for assets with the asset-uri-scheme attribute [source,ruby] puts "AsciiDoc, FTW!" EOS output = convert_string input, safe: Asciidoctor::SafeMode::SAFE assert_css %(html > head > link[rel="stylesheet"][href="http://cdnjs.cloudflare.com/ajax/libs/font-awesome/#{Asciidoctor::FONT_AWESOME_VERSION}/css/font-awesome.min.css"]), output, 1 assert_css %(html > body > script[src="http://cdnjs.cloudflare.com/ajax/libs/highlight.js/#{Asciidoctor::HIGHLIGHT_JS_VERSION}/highlight.min.js"]), output, 1 end test 'should use no uri scheme for assets when asset-uri-scheme is blank' do input = <<~'EOS' :asset-uri-scheme: :icons: font :source-highlighter: highlightjs TIP: You can control the URI scheme used for assets with the asset-uri-scheme attribute [source,ruby] puts "AsciiDoc, FTW!" EOS output = convert_string input, safe: Asciidoctor::SafeMode::SAFE assert_css %(html > head > link[rel="stylesheet"][href="//cdnjs.cloudflare.com/ajax/libs/font-awesome/#{Asciidoctor::FONT_AWESOME_VERSION}/css/font-awesome.min.css"]), output, 1 assert_css %(html > body > script[src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/#{Asciidoctor::HIGHLIGHT_JS_VERSION}/highlight.min.js"]), output, 1 end end context 'Image paths' do test 'restricts access to ancestor directories when safe mode level is at least SAFE' do input = 'image::asciidoctor.png[Asciidoctor]' basedir = testdir block = block_from_string input, attributes: { 'docdir' => basedir } doc = block.document assert doc.safe >= Asciidoctor::SafeMode::SAFE assert_equal File.join(basedir, 'images'), block.normalize_asset_path('images') assert_equal File.join(basedir, 'etc/images'), block.normalize_asset_path("#{disk_root}etc/images") assert_equal File.join(basedir, 'images'), block.normalize_asset_path('../../images') end test 'does not restrict access to ancestor directories when safe mode is disabled' do input = 'image::asciidoctor.png[Asciidoctor]' basedir = testdir block = block_from_string input, safe: Asciidoctor::SafeMode::UNSAFE, attributes: { 'docdir' => basedir } doc = block.document assert doc.safe == Asciidoctor::SafeMode::UNSAFE assert_equal File.join(basedir, 'images'), block.normalize_asset_path('images') absolute_path = "#{disk_root}etc/images" assert_equal absolute_path, block.normalize_asset_path(absolute_path) assert_equal File.expand_path(File.join(basedir, '../../images')), block.normalize_asset_path('../../images') end end context 'Source code' do test 'should support fenced code block using backticks' do input = <<~'EOS' ``` puts "Hello, World!" ``` EOS output = convert_string_to_embedded input assert_css '.listingblock', output, 1 assert_css '.listingblock pre code', output, 1 assert_css '.listingblock pre code:not([class])', output, 1 end test 'should not recognize fenced code blocks with more than three delimiters' do input = <<~'EOS' ````ruby puts "Hello, World!" ```` ~~~~ javascript alert("Hello, World!") ~~~~ EOS output = convert_string_to_embedded input assert_css '.listingblock', output, 0 end test 'should support fenced code blocks with languages' do input = <<~'EOS' ```ruby puts "Hello, World!" ``` ``` javascript alert("Hello, World!") ``` EOS output = convert_string_to_embedded input assert_css '.listingblock', output, 2 assert_css '.listingblock pre code.language-ruby[data-lang=ruby]', output, 1 assert_css '.listingblock pre code.language-javascript[data-lang=javascript]', output, 1 end test 'should support fenced code blocks with languages and numbering' do input = <<~'EOS' ```ruby,numbered puts "Hello, World!" ``` ``` javascript, numbered alert("Hello, World!") ``` EOS output = convert_string_to_embedded input assert_css '.listingblock', output, 2 assert_css '.listingblock pre code.language-ruby[data-lang=ruby]', output, 1 assert_css '.listingblock pre code.language-javascript[data-lang=javascript]', output, 1 end end context 'Abstract and Part Intro' do test 'should make abstract on open block without title a quote block for article' do input = <<~'EOS' = Article [abstract] -- This article is about stuff. And other stuff. -- == Section One content EOS output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock.abstract', output, 1 assert_css '#preamble .quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph', output, 2 end test 'should make abstract on open block with title a quote block with title for article' do input = <<~'EOS' = Article .My abstract [abstract] -- This article is about stuff. -- == Section One content EOS output = convert_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock.abstract', output, 1 assert_css '#preamble .quoteblock', output, 1 assert_css '.quoteblock > .title', output, 1 assert_css '.quoteblock > .title + blockquote', output, 1 assert_css '.quoteblock > .title + blockquote > .paragraph', output, 1 end test 'should allow abstract in document with title if doctype is book' do input = <<~'EOS' = Book :doctype: book [abstract] Abstract for book with title is valid EOS output = convert_string input assert_css '.abstract', output, 1 end test 'should not allow abstract as direct child of document if doctype is book' do input = <<~'EOS' :doctype: book [abstract] Abstract for book without title is invalid. EOS output = convert_string input assert_css '.abstract', output, 0 assert_message @logger, :WARN, 'abstract block cannot be used in a document without a title when doctype is book. Excluding block content.' end test 'should make abstract on open block without title converted to DocBook' do input = <<~'EOS' = Article [abstract] -- This article is about stuff. And other stuff. -- EOS output = convert_string input, backend: 'docbook' assert_css 'abstract', output, 1 assert_css 'abstract > simpara', output, 2 end test 'should make abstract on open block with title converted to DocBook' do input = <<~'EOS' = Article .My abstract [abstract] -- This article is about stuff. -- EOS output = convert_string input, backend: 'docbook' assert_css 'abstract', output, 1 assert_css 'abstract > title', output, 1 assert_css 'abstract > title + simpara', output, 1 end test 'should allow abstract in document with title if doctype is book converted to DocBook' do input = <<~'EOS' = Book :doctype: book [abstract] Abstract for book with title is valid EOS output = convert_string input, backend: 'docbook' assert_css 'abstract', output, 1 end test 'should not allow abstract as direct child of document if doctype is book converted to DocBook' do input = <<~'EOS' :doctype: book [abstract] Abstract for book is invalid. EOS output = convert_string input, backend: 'docbook' assert_css 'abstract', output, 0 assert_message @logger, :WARN, 'abstract block cannot be used in a document without a title when doctype is book. Excluding block content.' end # TODO partintro shouldn't be recognized if doctype is not book, should be in proper place test 'should accept partintro on open block without title' do input = <<~'EOS' = Book :doctype: book = Part 1 [partintro] -- This is a part intro. It can have multiple paragraphs. -- == Chapter 1 content EOS output = convert_string input assert_css '.openblock', output, 1 assert_css '.openblock.partintro', output, 1 assert_css '.openblock .title', output, 0 assert_css '.openblock .content', output, 1 assert_xpath %(//h1[@id="_part_1"]/following-sibling::*[#{contains_class(:openblock)}]), output, 1 assert_xpath %(//*[#{contains_class(:openblock)}]/*[@class="content"]/*[@class="paragraph"]), output, 2 end test 'should accept partintro on open block with title' do input = <<~'EOS' = Book :doctype: book = Part 1 .Intro title [partintro] -- This is a part intro with a title. -- == Chapter 1 content EOS output = convert_string input assert_css '.openblock', output, 1 assert_css '.openblock.partintro', output, 1 assert_css '.openblock .title', output, 1 assert_css '.openblock .content', output, 1 assert_xpath %(//h1[@id="_part_1"]/following-sibling::*[#{contains_class(:openblock)}]), output, 1 assert_xpath %(//*[#{contains_class(:openblock)}]/*[@class="title"][text()="Intro title"]), output, 1 assert_xpath %(//*[#{contains_class(:openblock)}]/*[@class="content"]/*[@class="paragraph"]), output, 1 end test 'should exclude partintro if not a child of part' do input = <<~'EOS' = Book :doctype: book [partintro] part intro paragraph EOS output = convert_string input assert_css '.partintro', output, 0 assert_message @logger, :ERROR, 'partintro block can only be used when doctype is book and must be a child of a book part. Excluding block content.' end test 'should not allow partintro unless doctype is book' do input = <<~'EOS' [partintro] part intro paragraph EOS output = convert_string input assert_css '.partintro', output, 0 assert_message @logger, :ERROR, 'partintro block can only be used when doctype is book and must be a child of a book part. Excluding block content.' end test 'should accept partintro on open block without title converted to DocBook' do input = <<~'EOS' = Book :doctype: book = Part 1 [partintro] -- This is a part intro. It can have multiple paragraphs. -- == Chapter 1 content EOS output = convert_string input, backend: 'docbook' assert_css 'partintro', output, 1 assert_css 'part[xml|id="_part_1"] > partintro', output, 1 assert_css 'partintro > simpara', output, 2 end test 'should accept partintro on open block with title converted to DocBook' do input = <<~'EOS' = Book :doctype: book = Part 1 .Intro title [partintro] -- This is a part intro with a title. -- == Chapter 1 content EOS output = convert_string input, backend: 'docbook' assert_css 'partintro', output, 1 assert_css 'part[xml|id="_part_1"] > partintro', output, 1 assert_css 'partintro > title', output, 1 assert_css 'partintro > title + simpara', output, 1 end test 'should exclude partintro if not a child of part converted to DocBook' do input = <<~'EOS' = Book :doctype: book [partintro] part intro paragraph EOS output = convert_string input, backend: 'docbook' assert_css 'partintro', output, 0 assert_message @logger, :ERROR, 'partintro block can only be used when doctype is book and must be a child of a book part. Excluding block content.' end test 'should not allow partintro unless doctype is book converted to DocBook' do input = <<~'EOS' [partintro] part intro paragraph EOS output = convert_string input, backend: 'docbook' assert_css 'partintro', output, 0 assert_message @logger, :ERROR, 'partintro block can only be used when doctype is book and must be a child of a book part. Excluding block content.' end end context 'Substitutions' do test 'processor should not crash if subs are empty' do input = <<~'EOS' [subs=","] .... content .... EOS doc = document_from_string input block = doc.blocks.first assert_equal [], block.subs end test 'should be able to append subs to default block substitution list' do input = <<~'EOS' :application: Asciidoctor [subs="+attributes,+macros"] .... {application} .... EOS doc = document_from_string input block = doc.blocks.first assert_equal [:specialcharacters, :attributes, :macros], block.subs end test 'should be able to prepend subs to default block substitution list' do input = <<~'EOS' :application: Asciidoctor [subs="attributes+"] .... {application} .... EOS doc = document_from_string input block = doc.blocks.first assert_equal [:attributes, :specialcharacters], block.subs end test 'should be able to remove subs to default block substitution list' do input = <<~'EOS' [subs="-quotes,-replacements"] content EOS doc = document_from_string input block = doc.blocks.first assert_equal [:specialcharacters, :attributes, :macros, :post_replacements], block.subs end test 'should be able to prepend, append and remove subs from default block substitution list' do input = <<~'EOS' :application: asciidoctor [subs="attributes+,-verbatim,+specialcharacters,+macros"] .... https://{application}.org[{gt}{gt}] <1> .... EOS doc = document_from_string input, standalone: false block = doc.blocks.first assert_equal [:attributes, :specialcharacters, :macros], block.subs result = doc.convert assert_includes result, '
    >> <1>
    ' end test 'should be able to set subs then modify them' do input = <<~'EOS' [subs="verbatim,-callouts"] _hey now_ <1> EOS doc = document_from_string input, standalone: false block = doc.blocks.first assert_equal [:specialcharacters], block.subs result = doc.convert assert_includes result, '_hey now_ <1>' end end context 'References' do test 'should not recognize block anchor with illegal id characters' do input = <<~'EOS' [[illegal$id,Reference Text]] ---- content ---- EOS doc = document_from_string input block = doc.blocks.first assert_nil block.id assert_nil(block.attr 'reftext') refute doc.catalog[:refs].key? 'illegal$id' end test 'should not recognize block anchor that starts with digit' do input = <<~'EOS' [[3-blind-mice]] -- see how they run -- EOS output = convert_string_to_embedded input assert_includes output, '[[3-blind-mice]]' assert_xpath '/*[@id=":3-blind-mice"]', output, 0 end test 'should recognize block anchor that starts with colon' do input = <<~'EOS' [[:idname]] -- content -- EOS output = convert_string_to_embedded input assert_xpath '/*[@id=":idname"]', output, 1 end test 'should use specified id and reftext when registering block reference' do input = <<~'EOS' [[debian,Debian Install]] .Installation on Debian ---- $ apt-get install asciidoctor ---- EOS doc = document_from_string input ref = doc.catalog[:refs]['debian'] refute_nil ref assert_equal 'Debian Install', ref.reftext assert_equal 'debian', (doc.resolve_id 'Debian Install') end test 'should allow square brackets in block reference text' do input = <<~'EOS' [[debian,[Debian] Install]] .Installation on Debian ---- $ apt-get install asciidoctor ---- EOS doc = document_from_string input ref = doc.catalog[:refs]['debian'] refute_nil ref assert_equal '[Debian] Install', ref.reftext assert_equal 'debian', (doc.resolve_id '[Debian] Install') end test 'should allow comma in block reference text' do input = <<~'EOS' [[debian, Debian, Ubuntu]] .Installation on Debian ---- $ apt-get install asciidoctor ---- EOS doc = document_from_string input ref = doc.catalog[:refs]['debian'] refute_nil ref assert_equal 'Debian, Ubuntu', ref.reftext assert_equal 'debian', (doc.resolve_id 'Debian, Ubuntu') end test 'should resolve attribute reference in title using attribute defined at location of block' do input = <<~'EOS' = Document Title :foo: baz intro paragraph. see <>. :foo: bar .foo is {foo} [#formal-para] paragraph with title [discrete#free-standing] == foo is still {foo} EOS doc = document_from_string input ref = doc.catalog[:refs]['formal-para'] refute_nil ref assert_equal 'foo is bar', ref.title assert_equal 'formal-para', (doc.resolve_id 'foo is bar') output = doc.convert standalone: false assert_include 'foo is still bar', output assert_include '

    foo is still bar

    ', output end test 'should substitute attribute references in reftext when registering block reference' do input = <<~'EOS' :label-tiger: Tiger [[tiger-evolution,Evolution of the {label-tiger}]] **** Information about the evolution of the tiger. **** EOS doc = document_from_string input ref = doc.catalog[:refs]['tiger-evolution'] refute_nil ref assert_equal 'Evolution of the Tiger', ref.attributes['reftext'] assert_equal 'tiger-evolution', (doc.resolve_id 'Evolution of the Tiger') end test 'should use specified reftext when registering block reference' do input = <<~'EOS' [[debian]] [reftext="Debian Install"] .Installation on Debian ---- $ apt-get install asciidoctor ---- EOS doc = document_from_string input ref = doc.catalog[:refs]['debian'] refute_nil ref assert_equal 'Debian Install', ref.reftext assert_equal 'debian', (doc.resolve_id 'Debian Install') end end end asciidoctor-2.0.20/test/converter_test.rb000066400000000000000000000714001443135032600204530ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' require 'tilt' unless defined? Tilt.new context 'Converter' do context 'View options' do test 'should set Haml format to html5 for html5 backend' do doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/haml'), template_cache: false assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter('paragraph') assert_kind_of Asciidoctor::Converter::TemplateConverter, selected assert_kind_of haml_template_class, selected.templates['paragraph'] assert_equal :html5, selected.templates['paragraph'].options[:format] end test 'should set Haml format to xhtml for docbook backend' do doc = Asciidoctor::Document.new [], backend: 'docbook5', template_dir: (fixture_path 'custom-backends/haml'), template_cache: false assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter('paragraph') assert_kind_of Asciidoctor::Converter::TemplateConverter, selected assert_kind_of haml_template_class, selected.templates['paragraph'] assert_equal :xhtml, selected.templates['paragraph'].options[:format] end test 'should configure Slim to resolve includes in specified template dirs' do template_dirs = [(fixture_path 'custom-backends/slim'), (fixture_path 'custom-backends/slim-overrides')] doc = Asciidoctor::Document.new [], template_dirs: template_dirs, template_cache: false assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter('paragraph') assert_kind_of Asciidoctor::Converter::TemplateConverter, selected assert_kind_of Slim::Template, selected.templates['paragraph'] assert_equal template_dirs.reverse.map {|dir| File.expand_path dir }, selected.templates['paragraph'].options[:include_dirs] end test 'should coerce template_dirs option to an Array' do template_dirs = fixture_path 'custom-backends/slim' doc = Asciidoctor::Document.new [], template_dirs: template_dirs, template_cache: false assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter('paragraph') assert_kind_of Asciidoctor::Converter::TemplateConverter, selected assert_kind_of Array, (selected.instance_variable_get :@template_dirs) end test 'should set Slim format to html for html5 backend' do doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/slim'), template_cache: false assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter('paragraph') assert_kind_of Asciidoctor::Converter::TemplateConverter, selected assert_kind_of Slim::Template, selected.templates['paragraph'] assert_equal :html, selected.templates['paragraph'].options[:format] end test 'should set Slim format to nil for docbook backend' do doc = Asciidoctor::Document.new [], backend: 'docbook5', template_dir: (fixture_path 'custom-backends/slim'), template_cache: false assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter('paragraph') assert_kind_of Asciidoctor::Converter::TemplateConverter, selected assert_kind_of Slim::Template, selected.templates['paragraph'] assert_nil selected.templates['paragraph'].options[:format] end test 'should set safe mode of Slim AsciiDoc engine to match document safe mode when Slim >= 3' do doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/slim'), template_cache: false, safe: :unsafe assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter('paragraph') assert_kind_of Asciidoctor::Converter::TemplateConverter, selected slim_asciidoc_opts = selected.instance_variable_get(:@engine_options)[:slim][:asciidoc] assert_equal({ safe: Asciidoctor::SafeMode::UNSAFE }, slim_asciidoc_opts) end test 'should support custom template engine options for known engine' do doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/slim'), template_cache: false, template_engine_options: { slim: { pretty: true } } assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter('paragraph') assert_kind_of Asciidoctor::Converter::TemplateConverter, selected assert_kind_of Slim::Template, selected.templates['paragraph'] assert_equal true, selected.templates['paragraph'].options[:pretty] end test 'should support custom template engine options' do doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/slim'), template_cache: false, template_engine_options: { slim: { pretty: true } } assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter('paragraph') assert_kind_of Asciidoctor::Converter::TemplateConverter, selected assert_kind_of Slim::Template, selected.templates['paragraph'] assert_equal false, selected.templates['paragraph'].options[:sort_attrs] assert_equal true, selected.templates['paragraph'].options[:pretty] end end context 'Custom backends' do test 'should load Haml templates for default backend' do doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/haml'), template_cache: false assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter %w(paragraph sidebar).each do |node_name| selected = doc.converter.find_converter node_name assert_kind_of Asciidoctor::Converter::TemplateConverter, selected assert_kind_of haml_template_class, selected.templates[node_name] assert_equal %(block_#{node_name}.html.haml), File.basename(selected.templates[node_name].file) end end test 'should set outfilesuffix according to backend info' do doc = Asciidoctor.load 'content' doc.convert assert_equal '.html', doc.attributes['outfilesuffix'] doc = Asciidoctor.load 'content', template_dir: (fixture_path 'custom-backends/haml'), template_cache: false doc.convert assert_equal '.html', doc.attributes['outfilesuffix'] end test 'should not override outfilesuffix attribute if locked' do doc = Asciidoctor.load 'content', attributes: { 'outfilesuffix' => '.foo' } doc.convert assert_equal '.foo', doc.attributes['outfilesuffix'] doc = Asciidoctor.load 'content', template_dir: (fixture_path 'custom-backends/haml'), template_cache: false, attributes: { 'outfilesuffix' => '.foo' } doc.convert assert_equal '.foo', doc.attributes['outfilesuffix'] end test 'should load Haml templates for docbook5 backend' do doc = Asciidoctor::Document.new [], backend: 'docbook5', template_dir: (fixture_path 'custom-backends/haml'), template_cache: false assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter %w(paragraph).each do |node_name| selected = doc.converter.find_converter node_name assert_kind_of Asciidoctor::Converter::TemplateConverter, selected assert_kind_of haml_template_class, selected.templates[node_name] assert_equal %(block_#{node_name}.xml.haml), File.basename(selected.templates[node_name].file) end end test 'should use Haml templates in place of built-in templates' do input = <<~'EOS' = Document Title Author Name == Section One Sample paragraph .Related **** Sidebar content **** EOS output = convert_string_to_embedded input, template_dir: (fixture_path 'custom-backends/haml'), template_cache: false assert_xpath '/*[@class="sect1"]/*[@class="sectionbody"]/p', output, 1 assert_xpath '//aside', output, 1 assert_xpath '/*[@class="sect1"]/*[@class="sectionbody"]/p/following-sibling::aside', output, 1 assert_xpath '//aside/header/h1[text()="Related"]', output, 1 assert_xpath '//aside/header/following-sibling::p[text()="Sidebar content"]', output, 1 end test 'should allow custom backend to emulate a known backend' do doc = Asciidoctor.load 'content', backend: 'html5-tweaks:html', template_dir: (fixture_path 'custom-backends/haml'), template_cache: false assert doc.basebackend? 'html' assert_equal 'html5-tweaks', doc.backend converter = doc.converter assert_kind_of Asciidoctor::Converter::TemplateConverter, (converter.find_converter 'embedded') refute_kind_of Asciidoctor::Converter::TemplateConverter, (converter.find_converter 'admonition') assert_equal '

    content

    ', doc.convert end test 'should create template converter even when a converter is not registered for the specified backend' do input = 'paragraph content' output = convert_string_to_embedded input, backend: :unknown, template_dir: (fixture_path 'custom-backends/haml/html5-tweaks'), template_cache: false assert_equal '

    paragraph content

    ', output end test 'should use built-in global cache to cache templates' do begin Asciidoctor::Converter::TemplateConverter.clear_caches if defined? Asciidoctor::Converter::TemplateConverter template_dir = fixture_path 'custom-backends/haml' doc = Asciidoctor::Document.new [], template_dir: template_dir doc.converter caches = Asciidoctor::Converter::TemplateConverter.caches if defined? ::Concurrent::Map assert_kind_of ::Concurrent::Map, caches[:templates] else assert_kind_of ::Hash, caches[:templates] end refute_empty caches[:templates] paragraph_template_before = caches[:templates].values.find {|t| File.basename(t.file) == 'block_paragraph.html.haml' } refute_nil paragraph_template_before # should use cache doc = Asciidoctor::Document.new [], template_dir: template_dir template_converter = doc.converter.find_converter('paragraph') paragraph_template_after = template_converter.templates['paragraph'] refute_nil paragraph_template_after assert paragraph_template_before.eql?(paragraph_template_after) # should not use cache doc = Asciidoctor::Document.new [], template_dir: template_dir, template_cache: false template_converter = doc.converter.find_converter('paragraph') paragraph_template_after = template_converter.templates['paragraph'] refute_nil paragraph_template_after refute paragraph_template_before.eql?(paragraph_template_after) ensure # clean up Asciidoctor::Converter::TemplateConverter.clear_caches if defined? Asciidoctor::Converter::TemplateConverter end end test 'should use custom cache to cache templates' do template_dir = fixture_path 'custom-backends/haml' Asciidoctor::PathResolver.new.system_path(File.join(template_dir, 'html5', 'block_paragraph.html.haml'), nil) caches = { scans: {}, templates: {} } doc = Asciidoctor::Document.new [], template_dir: template_dir, template_cache: caches doc.converter refute_empty caches[:scans] refute_empty caches[:templates] paragraph_template = caches[:templates].values.find {|t| File.basename(t.file) == 'block_paragraph.html.haml' } refute_nil paragraph_template assert_kind_of haml_template_class, paragraph_template end test 'should be able to disable template cache' do begin Asciidoctor::Converter::TemplateConverter.clear_caches if defined? Asciidoctor::Converter::TemplateConverter doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/haml'), template_cache: false doc.converter caches = Asciidoctor::Converter::TemplateConverter.caches assert_empty caches[:scans] assert_empty caches[:templates] ensure # clean up Asciidoctor::Converter::TemplateConverter.clear_caches if defined? Asciidoctor::Converter::TemplateConverter end end test 'should load ERB templates using ERBTemplate if eruby is not set' do input = %([.wrapper]\n--\nfoobar\n--) doc = Asciidoctor::Document.new input, template_dir: (fixture_path 'custom-backends/erb'), template_cache: false assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter %w(paragraph).each do |node_name| selected = doc.converter.find_converter node_name assert_kind_of Asciidoctor::Converter::TemplateConverter, selected template = selected.templates[node_name] assert_kind_of Tilt::ERBTemplate, template refute_kind_of Tilt::ErubiTemplate, template assert_kind_of ::ERB, template.instance_variable_get('@engine') assert_equal %(block_#{node_name}.html.erb), File.basename(selected.templates[node_name].file) end # NOTE verify behavior of trim mode expected_output = <<~'EOS'.chop

    foobar

    EOS assert_equal expected_output, doc.convert end test 'should load ERB templates using ErubiTemplate if eruby is set to erubi' do doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/erb'), template_cache: false, eruby: 'erubi' assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter %w(paragraph).each do |node_name| selected = doc.converter.find_converter node_name assert_kind_of Asciidoctor::Converter::TemplateConverter, selected template = selected.templates[node_name] assert_kind_of Tilt::ErubiTemplate, template assert_kind_of ::Erubi::Engine, template.instance_variable_get('@engine') assert_equal %(block_#{node_name}.html.erb), File.basename(selected.templates[node_name].file) end end test 'should load Slim templates for default backend' do doc = Asciidoctor::Document.new [], template_dir: (fixture_path 'custom-backends/slim'), template_cache: false assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter %w(paragraph sidebar).each do |node_name| selected = doc.converter.find_converter node_name assert_kind_of Asciidoctor::Converter::TemplateConverter, selected assert_kind_of Slim::Template, selected.templates[node_name] assert_equal %(block_#{node_name}.html.slim), File.basename(selected.templates[node_name].file) end end test 'should load Slim templates for docbook5 backend' do doc = Asciidoctor::Document.new [], backend: 'docbook5', template_dir: (fixture_path 'custom-backends/slim'), template_cache: false assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter %w(paragraph).each do |node_name| selected = doc.converter.find_converter node_name assert_kind_of Asciidoctor::Converter::TemplateConverter, selected assert_kind_of Slim::Template, selected.templates[node_name] assert_equal %(block_#{node_name}.xml.slim), File.basename(selected.templates[node_name].file) end end test 'should use Slim templates in place of built-in templates' do input = <<~'EOS' = Document Title Author Name == Section One Sample paragraph .Related **** Sidebar content **** EOS output = convert_string_to_embedded input, template_dir: (fixture_path 'custom-backends/slim'), template_cache: false assert_xpath '/*[@class="sect1"]/*[@class="sectionbody"]/p', output, 1 assert_xpath '//aside', output, 1 assert_xpath '/*[@class="sect1"]/*[@class="sectionbody"]/p/following-sibling::aside', output, 1 assert_xpath '//aside/header/h1[text()="Related"]', output, 1 assert_xpath '//aside/header/following-sibling::p[text()="Sidebar content"]', output, 1 end test 'should be able to override the outline using a custom template' do input = <<~'EOS' :toc: = Document Title == Section One == Section Two == Section Three EOS output = document_from_string(input, template_dir: (fixture_path 'custom-backends/slim/html5-custom-outline'), template_cache: false).convert assert_xpath '//*[@id="toc"]/ul', output, 1 assert_xpath '//*[@id="toc"]/ul[1]/li', output, 3 assert_xpath '//*[@id="toc"]/ul[1]/li[1][text()="Section One"]', output, 1 end end context 'Custom converters' do test 'should not expose included method on Converter class' do refute_includes Asciidoctor::Converter.methods, :included assert_includes Asciidoctor::Converter.private_methods, :included refute_respond_to Asciidoctor::Converter, :included end test 'should derive backend traits for the given backend' do expected = { basebackend: 'dita', filetype: 'dita', outfilesuffix: '.dita' } actual = Asciidoctor::Converter.derive_backend_traits 'dita2' assert_equal expected, actual end test 'should use specified converter for current backend' do input = <<~'EOS' = Document Title preamble == Section content EOS class CustomHtmlConverterA def initialize *args; end def convert node, name = nil 'document' end end doc = document_from_string input, converter: CustomHtmlConverterA assert_kind_of CustomHtmlConverterA, doc.converter assert_equal 'html', doc.attributes['filetype'] assert_equal 'document', doc.convert end test 'should use specified converter for specified backend' do input = <<~'EOS' = Document Title preamble == Section content EOS class CustomTextConverterA def initialize *args; end def convert node, name = nil 'document' end end doc = document_from_string input, backend: 'text', converter: CustomTextConverterA assert_kind_of CustomTextConverterA, doc.converter assert_equal 'text', doc.attributes['filetype'] assert_equal 'document', doc.convert end test 'should get converter from specified converter factory' do input = <<~'EOS' = Document Title preamble == Section content EOS my_converter_class = Class.new Asciidoctor::Converter::Base do def convert_document node 'document' end end converter_factory = Asciidoctor::Converter::CustomFactory.new 'html5' => my_converter_class doc = document_from_string input, converter_factory: converter_factory assert_kind_of my_converter_class, doc.converter assert_equal 'html', doc.attributes['filetype'] assert_equal 'document', doc.convert end test 'should allow converter to set htmlsyntax when basebackend is html' do input = 'image::sunset.jpg[]' converter = Asciidoctor::Converter.create 'html5', htmlsyntax: 'xml' doc = document_from_string input, converter: converter assert_equal converter, doc.converter assert_equal 'xml', (doc.attr 'htmlsyntax') output = doc.convert standalone: false assert_includes output, 'sunset' end test 'should use converter registered for backend' do begin converters_before = Asciidoctor::Converter.converters class CustomConverterB include Asciidoctor::Converter register_for 'foobar' def initialize *args super basebackend 'text' filetype 'text' outfilesuffix '.fb' end def convert node, name = nil 'foobar content' end end input = 'content' assert_equal CustomConverterB, (Asciidoctor::Converter.for 'foobar') converters = Asciidoctor::Converter.converters assert converters.size == converters_before.size + 1 assert converters['foobar'] == CustomConverterB output = convert_string input, backend: 'foobar' assert_equal 'foobar content', output ensure Asciidoctor::Converter.unregister_all end end test 'should be able to register converter using symbol' do begin converter = Class.new Asciidoctor::Converter::Base do register_for :foobaz def initialize *args super basebackend 'text' filetype 'text' outfilesuffix '.fb' end end assert_equal converter, (Asciidoctor::Converter.for 'foobaz') ensure Asciidoctor::Converter.unregister_all end end test 'should use basebackend to compute filetype and outfilesuffix' do begin assert_nil Asciidoctor::Converter.for 'slides' class SlidesConverter < Asciidoctor::Converter::Base register_for 'slides' def initialize backend, opts = {} super basebackend 'html' end end doc = document_from_string 'content', backend: 'slides' assert_equal '.html', doc.outfilesuffix expected_traits = { basebackend: 'html', filetype: 'html', htmlsyntax: 'html', outfilesuffix: '.html' } assert_equal expected_traits, doc.converter.backend_traits ensure Asciidoctor::Converter.unregister_all end end test 'should be able to register converter from converter class itself' do begin assert_nil Asciidoctor::Converter.for 'foobar' class AnotherCustomConverterB include Asciidoctor::Converter end AnotherCustomConverterB.register_for 'foobar' assert_equal AnotherCustomConverterB, (Asciidoctor::Converter.for 'foobar') ensure Asciidoctor::Converter.unregister_all end end test 'should map handles? method on converter to respond_to? implementation by default' do class CustomConverterC include Asciidoctor::Converter def convert_paragraph node 'paragraph' end end converter = CustomConverterC.new 'myhtml' assert_respond_to converter, :handles? assert converter.handles?(:convert_paragraph) end test 'should not configure converter to support templates by default' do begin class CustomConverterD include Asciidoctor::Converter register_for 'myhtml' def convert node, transform = nil, opts = nil transform ||= node.node_name send transform, node end def document node ['', '', '', node.content, '', ''] * %(\n) end def paragraph node ['
    ', %(

    #{node.content}

    ), '
    '] * %(\n) end end input = 'paragraph' doc = document_from_string input, backend: 'myhtml', template_dir: (fixture_path 'custom-backends/slim/html5'), template_cache: false assert_kind_of CustomConverterD, doc.converter refute doc.converter.supports_templates? output = doc.convert assert_xpath '//*[@class="paragraph"]/p[text()="paragraph"]', output, 1 ensure Asciidoctor::Converter.unregister_all end end test 'should wrap converter in composite converter with template converter if it declares that it supports templates' do begin class CustomConverterE < Asciidoctor::Converter::Base register_for 'myhtml' def initialize *args super supports_templates end def convert node, transform = nil, opts = nil transform ||= node.node_name send transform, node end alias handles? respond_to? def document node ['', '', '', node.content, '', ''] * %(\n) end def paragraph node ['
    ', %(

    #{node.content}

    ), '
    '] * %(\n) end end input = 'paragraph' doc = document_from_string input, backend: 'myhtml', template_dir: (fixture_path 'custom-backends/slim/html5'), template_cache: false assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter output = doc.convert assert_xpath '//*[@class="paragraph"]/p[text()="paragraph"]', output, 0 assert_xpath '//body/p[text()="paragraph"]', output, 1 ensure Asciidoctor::Converter.unregister_all end end test 'should map Factory.new to DefaultFactoryProxy constructor by default' do assert_equal (Asciidoctor::Converter.for 'html5'), (Asciidoctor::Converter::Factory.new.for 'html5') end test 'should map Factory.new to CustomFactory constructor if proxy keyword arg is false' do assert_nil (Asciidoctor::Converter::Factory.new proxy_default: false).for 'html5' end test 'should default to catch all converter' do begin class CustomConverterF include Asciidoctor::Converter register_for '*' def convert node, name = nil 'foobaz content' end end input = 'content' assert_equal CustomConverterF, (Asciidoctor::Converter.for 'all') assert_equal CustomConverterF, (Asciidoctor::Converter.for 'whatever') refute_equal CustomConverterF, (Asciidoctor::Converter.for 'html5') converters = Asciidoctor::Converter.converters assert_nil converters['*'] assert_equal CustomConverterF, (Asciidoctor::Converter.send :catch_all) output = convert_string input, backend: 'foobaz' assert_equal 'foobaz content', output ensure Asciidoctor::Converter.unregister_all end end test 'should use catch all converter from custom factory only if no other converter matches' do class FooConverter < Asciidoctor::Converter::Base; end class CatchAllConverter < Asciidoctor::Converter::Base; end factory = Asciidoctor::Converter::CustomFactory.new 'foo' => FooConverter, '*' => CatchAllConverter assert_equal FooConverter, (factory.for 'foo') assert_equal CatchAllConverter, (factory.for 'nada') assert_equal CatchAllConverter, (factory.for 'html5') end test 'should prefer catch all converter from proxy over statically registered catch all converter' do begin class StaticCatchAllConverter < Asciidoctor::Converter::Base register_for '*' end class LocalCatchAllConverter < Asciidoctor::Converter::Base; end factory = Asciidoctor::Converter::DefaultFactoryProxy.new '*' => LocalCatchAllConverter assert_equal LocalCatchAllConverter, (factory.for 'foobar') refute_equal LocalCatchAllConverter, (factory.for 'html5') refute_equal StaticCatchAllConverter, (factory.for 'html5') ensure Asciidoctor::Converter.unregister_all end end test 'should prefer converter in proxy with same name as provided converter' do class MyHtml5Converter < Asciidoctor::Converter::Base; end factory = Asciidoctor::Converter::DefaultFactoryProxy.new 'html5' => MyHtml5Converter assert_equal MyHtml5Converter, (factory.for 'html5') end test 'should allow nil to be registered as converter' do factory = Asciidoctor::Converter::DefaultFactoryProxy.new 'html5' => nil assert_nil factory.for 'html5' end test 'should create a new custom factory when Converter::Factory.new is invoked' do class MyConverter < Asciidoctor::Converter::Base; end converters = { 'mine' => MyConverter } factory = Asciidoctor::Converter::Factory.new converters assert_kind_of Asciidoctor::Converter::CustomFactory, factory assert_equal MyConverter, (factory.for 'mine') end test 'should delegate to method on HTML 5 converter with convert_ prefix if called without prefix' do doc = document_from_string 'paragraph' assert_respond_to doc.converter, :paragraph result = doc.converter.paragraph doc.blocks[0] assert_css 'p', result, 1 end test 'should not delegate unprefixed method on HTML 5 converter if converter does not handle transform' do doc = document_from_string 'paragraph' refute_respond_to doc.converter, :sentence assert_raises NoMethodError do doc.converter.sentence doc.blocks[0] end end test 'can call read_svg_contents on built-in HTML5 converter; should remove markup prior the root svg element' do doc = document_from_string 'image::circle.svg[]', base_dir: fixturedir result = doc.converter.read_svg_contents doc.blocks[0], 'circle.svg' refute_nil result assert result.start_with? '', result) assert_match('', result) end test 'maxdepth attribute should be set on asciidoc-toc and asciidoc-numbered processing instructions in DocBook backend' do doc = document_from_string 'content', backend: 'docbook', parse: true, attributes: { 'toclevels' => '1', 'sectnumlevels' => '1' } assert doc.attr?('toc') assert doc.attr?('sectnums') result = doc.convert assert_match('', result) assert_match('', result) end test 'should be able to disable toc and sectnums in document header in DocBook backend' do input = <<~'EOS' = Document Title :toc!: :sectnums!: EOS doc = document_from_string input, backend: 'docbook' refute doc.attr?('toc') refute doc.attr?('sectnums') end test 'noheader attribute should suppress info element when converting to DocBook' do input = <<~'EOS' = Document Title :noheader: content EOS result = convert_string input, backend: 'docbook' assert_xpath '/article', result, 1 assert_xpath '/article/info', result, 0 end test 'should be able to disable section numbering using numbered attribute in document header in DocBook backend' do input = <<~'EOS' = Document Title :numbered!: EOS doc = document_from_string input, backend: 'docbook' refute doc.attr?('sectnums') end end context 'Docinfo files' do test 'should include docinfo files for html backend' do sample_input_path = fixture_path('basic.adoc') cases = { 'docinfo' => { head_script: 1, meta: 0, top_link: 0, footer_script: 1, navbar: 1 }, 'docinfo=private' => { head_script: 1, meta: 0, top_link: 0, footer_script: 1, navbar: 1 }, 'docinfo1' => { head_script: 0, meta: 1, top_link: 1, footer_script: 0, navbar: 0 }, 'docinfo=shared' => { head_script: 0, meta: 1, top_link: 1, footer_script: 0, navbar: 0 }, 'docinfo2' => { head_script: 1, meta: 1, top_link: 1, footer_script: 1, navbar: 1 }, 'docinfo docinfo2' => { head_script: 1, meta: 1, top_link: 1, footer_script: 1, navbar: 1 }, 'docinfo=private,shared' => { head_script: 1, meta: 1, top_link: 1, footer_script: 1, navbar: 1 }, 'docinfo=private-head' => { head_script: 1, meta: 0, top_link: 0, footer_script: 0, navbar: 0 }, 'docinfo=private-header' => { head_script: 0, meta: 0, top_link: 0, footer_script: 0, navbar: 1 }, 'docinfo=shared-head' => { head_script: 0, meta: 1, top_link: 0, footer_script: 0, navbar: 0 }, 'docinfo=private-footer' => { head_script: 0, meta: 0, top_link: 0, footer_script: 1, navbar: 0 }, 'docinfo=shared-footer' => { head_script: 0, meta: 0, top_link: 1, footer_script: 0, navbar: 0 }, 'docinfo=private-head\ ,\ shared-footer' => { head_script: 1, meta: 0, top_link: 1, footer_script: 0, navbar: 0 }, } cases.each do |attr_val, markup| output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: %(linkcss copycss! #{attr_val}) refute_empty output assert_css 'script[src="modernizr.js"]', output, markup[:head_script] assert_css 'meta[http-equiv="imagetoolbar"]', output, markup[:meta] assert_css 'body > a#top', output, markup[:top_link] assert_css 'body > script', output, markup[:footer_script] assert_css 'body > nav.navbar', output, markup[:navbar] assert_css 'body > nav.navbar + #header', output, markup[:navbar] end end test 'should include docinfo header even if noheader attribute is set' do sample_input_path = fixture_path('basic.adoc') output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo' => 'private-header', 'noheader' => '' } refute_empty output assert_css 'body > nav.navbar', output, 1 assert_css 'body > nav.navbar + #content', output, 1 end test 'should include docinfo footer even if nofooter attribute is set' do sample_input_path = fixture_path('basic.adoc') output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo1' => '', 'nofooter' => '' } refute_empty output assert_css 'body > a#top', output, 1 end test 'should include user docinfo after built-in docinfo' do sample_input_path = fixture_path 'basic.adoc' attrs = { 'docinfo' => 'shared', 'source-highlighter' => 'highlight.js', 'linkcss' => '', 'copycss' => nil } output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, safe: :safe, attributes: attrs assert_css 'link[rel=stylesheet] + meta[http-equiv=imagetoolbar]', output, 1 assert_css 'meta[http-equiv=imagetoolbar] + *', output, 0 assert_css 'script + a#top', output, 1 assert_css 'a#top + *', output, 0 end test 'should include docinfo files for html backend with custom docinfodir' do sample_input_path = fixture_path('basic.adoc') output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo' => '', 'docinfodir' => 'custom-docinfodir' } refute_empty output assert_css 'script[src="bootstrap.js"]', output, 1 assert_css 'meta[name="robots"]', output, 0 output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo1' => '', 'docinfodir' => 'custom-docinfodir' } refute_empty output assert_css 'script[src="bootstrap.js"]', output, 0 assert_css 'meta[name="robots"]', output, 1 output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo2' => '', 'docinfodir' => './custom-docinfodir' } refute_empty output assert_css 'script[src="bootstrap.js"]', output, 1 assert_css 'meta[name="robots"]', output, 1 output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo2' => '', 'docinfodir' => 'custom-docinfodir/subfolder' } refute_empty output assert_css 'script[src="bootstrap.js"]', output, 0 assert_css 'meta[name="robots"]', output, 0 end test 'should include docinfo files in docbook backend' do sample_input_path = fixture_path('basic.adoc') output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo' => '' } refute_empty output assert_css 'productname', output, 0 assert_css 'copyright', output, 1 output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo1' => '' } refute_empty output assert_css 'productname', output, 1 assert_xpath '//xmlns:productname[text()="Asciidoctor™"]', output, 1 assert_css 'edition', output, 1 assert_xpath '//xmlns:edition[text()="1.0"]', output, 1 # verifies substitutions are performed assert_css 'copyright', output, 0 output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo2' => '' } refute_empty output assert_css 'productname', output, 1 assert_xpath '//xmlns:productname[text()="Asciidoctor™"]', output, 1 assert_css 'edition', output, 1 assert_xpath '//xmlns:edition[text()="1.0"]', output, 1 # verifies substitutions are performed assert_css 'copyright', output, 1 end test 'should use header docinfo in place of default header' do output = Asciidoctor.convert_file fixture_path('sample.adoc'), to_file: false, standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo' => 'private-header', 'noheader' => '' } refute_empty output assert_css 'article > info', output, 1 assert_css 'article > info > title', output, 1 assert_css 'article > info > revhistory', output, 1 assert_css 'article > info > revhistory > revision', output, 2 end test 'should include docinfo footer files for html backend' do sample_input_path = fixture_path('basic.adoc') output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo' => '' } refute_empty output assert_css 'body script', output, 1 assert_css 'a#top', output, 0 output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo1' => '' } refute_empty output assert_css 'body script', output, 0 assert_css 'a#top', output, 1 output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo2' => '' } refute_empty output assert_css 'body script', output, 1 assert_css 'a#top', output, 1 end test 'should include docinfo footer files in DocBook backend' do sample_input_path = fixture_path('basic.adoc') output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo' => '' } refute_empty output assert_css 'article > revhistory', output, 1 assert_xpath '/xmlns:article/xmlns:revhistory/xmlns:revision/xmlns:revnumber[text()="1.0"]', output, 1 # verifies substitutions are performed assert_css 'glossary', output, 0 output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo1' => '' } refute_empty output assert_css 'article > revhistory', output, 0 assert_css 'glossary[xml|id="_glossary"]', output, 1 output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo2' => '' } refute_empty output assert_css 'article > revhistory', output, 1 assert_xpath '/xmlns:article/xmlns:revhistory/xmlns:revision/xmlns:revnumber[text()="1.0"]', output, 1 # verifies substitutions are performed assert_css 'glossary[xml|id="_glossary"]', output, 1 end # WARNING this test manipulates runtime settings; should probably be run in forked process test 'should force encoding of docinfo files to UTF-8' do old_external = Encoding.default_external old_internal = Encoding.default_internal old_verbose = $VERBOSE begin $VERBOSE = nil # disable warnings since we have to modify constants Encoding.default_external = Encoding.default_internal = Encoding::IBM437 sample_input_path = fixture_path('basic.adoc') output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo' => 'private,shared' } refute_empty output assert_css 'productname', output, 1 assert_includes output, 'Asciidoctor™' assert_css 'edition', output, 1 assert_xpath '//xmlns:edition[text()="1.0"]', output, 1 # verifies substitutions are performed assert_css 'copyright', output, 1 ensure Encoding.default_external = old_external Encoding.default_internal = old_internal $VERBOSE = old_verbose end end test 'should not include docinfo files by default' do sample_input_path = fixture_path('basic.adoc') output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, safe: Asciidoctor::SafeMode::SERVER refute_empty output assert_css 'script[src="modernizr.js"]', output, 0 assert_css 'meta[http-equiv="imagetoolbar"]', output, 0 output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, backend: 'docbook', safe: Asciidoctor::SafeMode::SERVER refute_empty output assert_css 'productname', output, 0 assert_css 'copyright', output, 0 end test 'should not include docinfo files if safe mode is SECURE or greater' do sample_input_path = fixture_path('basic.adoc') output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, attributes: { 'docinfo2' => '' } refute_empty output assert_css 'script[src="modernizr.js"]', output, 0 assert_css 'meta[http-equiv="imagetoolbar"]', output, 0 output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, backend: 'docbook', attributes: { 'docinfo2' => '' } refute_empty output assert_css 'productname', output, 0 assert_css 'copyright', output, 0 end test 'should substitute attributes in docinfo files by default' do sample_input_path = fixture_path 'subs.adoc' using_memory_logger do |logger| output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, safe: :server, attributes: { 'docinfo' => '', 'bootstrap-version' => nil, 'linkcss' => '', 'attribute-missing' => 'drop-line' } refute_empty output assert_css 'script', output, 0 assert_xpath %(//meta[@name="copyright"][@content="(C) OpenDevise"]), output, 1 assert_message logger, :INFO, 'dropping line containing reference to missing attribute: bootstrap-version' end end test 'should apply explicit substitutions to docinfo files' do sample_input_path = fixture_path 'subs.adoc' output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, safe: :server, attributes: { 'docinfo' => '', 'docinfosubs' => 'attributes,replacements', 'linkcss' => '' } refute_empty output assert_css 'script[src="bootstrap.3.2.0.min.js"]', output, 1 assert_xpath %(//meta[@name="copyright"][@content="#{decode_char 169} OpenDevise"]), output, 1 end end context 'MathJax' do test 'should add MathJax script to HTML head if stem attribute is set' do output = convert_string '', attributes: { 'stem' => '' } assert_match('), {}, content_model: :raw end end class LegacyPosAttrsBlockMacro < Asciidoctor::Extensions::BlockMacroProcessor option :pos_attrs, ['target', 'format'] def process parent, _, attrs create_image_block parent, { 'target' => %(#{attrs['target']}.#{attrs['format']}) } end end class TemperatureMacro < Asciidoctor::Extensions::InlineMacroProcessor; use_dsl named :degrees resolve_attributes '1:units', 'precision=1' def process parent, target, attributes units = attributes['units'] || (parent.document.attr 'temperature-unit', 'C') precision = attributes['precision'].to_i c = target.to_f case units when 'C' create_inline parent, :quoted, %(#{c.round precision} °C), type: :unquoted when 'F' create_inline parent, :quoted, %(#{(c * 1.8 + 32).round precision} °F), type: :unquoted else raise ::ArgumentError, %(Unknown temperature units: #{units}) end end end class MetaRobotsDocinfoProcessor < Asciidoctor::Extensions::DocinfoProcessor def process document '' end end class MetaAppDocinfoProcessor < Asciidoctor::Extensions::DocinfoProcessor; use_dsl at_location :head def process document '' end end class SampleExtensionGroup < Asciidoctor::Extensions::Group def activate registry registry.document.attributes['activate-method-called'] = '' registry.preprocessor SamplePreprocessor end end def create_cat_in_sink_block_macro Asciidoctor::Extensions.create do block_macro do named :cat_in_sink process do |parent, target, attrs| image_attrs = {} unless target.nil_or_empty? image_attrs['target'] = %(cat-in-sink-day-#{target}.png) end if (title = attrs.delete 'title') image_attrs['title'] = title end if (alt = attrs.delete 1) image_attrs['alt'] = alt end create_image_block parent, image_attrs end end end end def create_santa_list_block_macro Asciidoctor::Extensions.create do block_macro do named :santa_list process do |parent, target| list = create_list parent, target guillaume = (create_list_item list, 'Guillaume') guillaume.add_role('friendly') guillaume.id = 'santa-list-guillaume' list << guillaume robert = (create_list_item list, 'Robert') robert.add_role('kind') robert.add_role('contributor') robert.add_role('java') list << robert pepijn = (create_list_item list, 'Pepijn') pepijn.id = 'santa-list-pepijn' list << pepijn dan = (create_list_item list, 'Dan') dan.add_role('naughty') dan.id = 'santa-list-dan' list << dan sarah = (create_list_item list, 'Sarah') list << sarah list end end end end context 'Extensions' do context 'Register' do test 'should not activate registry if no extension groups are registered' do assert defined? Asciidoctor::Extensions doc = empty_document refute doc.extensions?, 'Extensions should not be enabled if not groups are registered' end test 'should register extension group class' do begin Asciidoctor::Extensions.register :sample, SampleExtensionGroup refute_nil Asciidoctor::Extensions.groups assert_equal 1, Asciidoctor::Extensions.groups.size assert_equal SampleExtensionGroup, Asciidoctor::Extensions.groups[:sample] ensure Asciidoctor::Extensions.unregister_all end end test 'should self register extension group class' do begin SampleExtensionGroup.register :sample refute_nil Asciidoctor::Extensions.groups assert_equal 1, Asciidoctor::Extensions.groups.size assert_equal SampleExtensionGroup, Asciidoctor::Extensions.groups[:sample] ensure Asciidoctor::Extensions.unregister_all end end test 'should register extension group from class name' do begin Asciidoctor::Extensions.register :sample, 'SampleExtensionGroup' refute_nil Asciidoctor::Extensions.groups assert_equal 1, Asciidoctor::Extensions.groups.size assert_equal SampleExtensionGroup, Asciidoctor::Extensions.groups[:sample] ensure Asciidoctor::Extensions.unregister_all end end test 'should register extension group from instance' do begin Asciidoctor::Extensions.register :sample, SampleExtensionGroup.new refute_nil Asciidoctor::Extensions.groups assert_equal 1, Asciidoctor::Extensions.groups.size assert_kind_of SampleExtensionGroup, Asciidoctor::Extensions.groups[:sample] ensure Asciidoctor::Extensions.unregister_all end end test 'should register extension block' do begin Asciidoctor::Extensions.register :sample do end refute_nil Asciidoctor::Extensions.groups assert_equal 1, Asciidoctor::Extensions.groups.size assert_kind_of Proc, Asciidoctor::Extensions.groups[:sample] ensure Asciidoctor::Extensions.unregister_all end end test 'should coerce group name to symbol when registering' do begin Asciidoctor::Extensions.register 'sample', SampleExtensionGroup refute_nil Asciidoctor::Extensions.groups assert_equal 1, Asciidoctor::Extensions.groups.size assert_equal SampleExtensionGroup, Asciidoctor::Extensions.groups[:sample] ensure Asciidoctor::Extensions.unregister_all end end test 'should unregister extension group by symbol name' do begin Asciidoctor::Extensions.register :sample, SampleExtensionGroup refute_nil Asciidoctor::Extensions.groups assert_equal 1, Asciidoctor::Extensions.groups.size Asciidoctor::Extensions.unregister :sample assert_equal 0, Asciidoctor::Extensions.groups.size ensure Asciidoctor::Extensions.unregister_all end end test 'should unregister extension group by string name' do begin Asciidoctor::Extensions.register :sample, SampleExtensionGroup refute_nil Asciidoctor::Extensions.groups assert_equal 1, Asciidoctor::Extensions.groups.size Asciidoctor::Extensions.unregister 'sample' assert_equal 0, Asciidoctor::Extensions.groups.size ensure Asciidoctor::Extensions.unregister_all end end test 'should unregister multiple extension groups by name' do begin Asciidoctor::Extensions.register :sample1, SampleExtensionGroup Asciidoctor::Extensions.register :sample2, SampleExtensionGroup refute_nil Asciidoctor::Extensions.groups assert_equal 2, Asciidoctor::Extensions.groups.size Asciidoctor::Extensions.unregister :sample1, :sample2 assert_equal 0, Asciidoctor::Extensions.groups.size ensure Asciidoctor::Extensions.unregister_all end end test 'should not fail to unregister extension group if not registered' do refute_nil Asciidoctor::Extensions.groups assert_equal 0, Asciidoctor::Extensions.groups.size Asciidoctor::Extensions.unregister :sample assert_equal 0, Asciidoctor::Extensions.groups.size end test 'should not fail to unregister extension group if extension groups are not initialized' do Asciidoctor::Extensions.remove_instance_variable :@groups Asciidoctor::Extensions.unregister :sample assert_equal 0, Asciidoctor::Extensions.groups.size end test 'should raise NameError if extension class cannot be resolved from string' do begin Asciidoctor::Extensions.register do block 'foobar' end empty_document flunk 'Expecting RuntimeError to be raised' rescue NameError => e assert_match %r/^Could not resolve class for name: foobar$/, e.message ensure Asciidoctor::Extensions.unregister_all end end test 'should allow standalone registry to be created but not registered' do registry = Asciidoctor::Extensions.create 'sample' do block do named :whisper on_context :paragraph parse_content_as :simple def process parent, reader, attributes create_paragraph parent, reader.lines.map(&:downcase), attributes end end end assert_instance_of Asciidoctor::Extensions::Registry, registry refute_nil registry.groups assert_equal 1, registry.groups.size assert_equal 'sample', registry.groups.keys.first assert_equal 0, Asciidoctor::Extensions.groups.size end end context 'Activate' do test 'should call activate on extension group class' do begin doc = Asciidoctor::Document.new Asciidoctor::Extensions.register :sample, SampleExtensionGroup registry = Asciidoctor::Extensions::Registry.new registry.activate doc assert doc.attr? 'activate-method-called' assert registry.preprocessors? ensure Asciidoctor::Extensions.unregister_all end end test 'should reset registry if activate is called again' do begin Asciidoctor::Extensions.register :sample, SampleExtensionGroup doc = Asciidoctor::Document.new registry = Asciidoctor::Extensions::Registry.new registry.activate doc assert doc.attr? 'activate-method-called' assert registry.preprocessors? assert_equal 1, registry.preprocessors.size assert_same doc, registry.document doc = Asciidoctor::Document.new registry.activate doc assert doc.attr? 'activate-method-called' assert registry.preprocessors? assert_equal 1, registry.preprocessors.size assert_same doc, registry.document ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke extension block' do begin doc = Asciidoctor::Document.new Asciidoctor::Extensions.register do @document.attributes['block-called'] = '' preprocessor SamplePreprocessor end registry = Asciidoctor::Extensions::Registry.new registry.activate doc assert doc.attr? 'block-called' assert registry.preprocessors? ensure Asciidoctor::Extensions.unregister_all end end test 'should create registry in Document if extensions are loaded' do begin SampleExtensionGroup.register doc = Asciidoctor::Document.new assert doc.extensions? assert_kind_of Asciidoctor::Extensions::Registry, doc.extensions ensure Asciidoctor::Extensions.unregister_all end end end context 'Instantiate' do test 'should instantiate preprocessors' do registry = Asciidoctor::Extensions::Registry.new registry.preprocessor SamplePreprocessor registry.activate Asciidoctor::Document.new assert registry.preprocessors? extensions = registry.preprocessors assert_equal 1, extensions.size assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extensions.first assert_kind_of SamplePreprocessor, extensions.first.instance assert_kind_of Method, extensions.first.process_method end test 'should instantiate include processors' do registry = Asciidoctor::Extensions::Registry.new registry.include_processor SampleIncludeProcessor registry.activate Asciidoctor::Document.new assert registry.include_processors? extensions = registry.include_processors assert_equal 1, extensions.size assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extensions.first assert_kind_of SampleIncludeProcessor, extensions.first.instance assert_kind_of Method, extensions.first.process_method end test 'should instantiate docinfo processors' do registry = Asciidoctor::Extensions::Registry.new registry.docinfo_processor SampleDocinfoProcessor registry.activate Asciidoctor::Document.new assert registry.docinfo_processors? assert registry.docinfo_processors?(:head) extensions = registry.docinfo_processors assert_equal 1, extensions.size assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extensions.first assert_kind_of SampleDocinfoProcessor, extensions.first.instance assert_kind_of Method, extensions.first.process_method end # NOTE intentionally using the legacy names test 'should instantiate tree processors' do registry = Asciidoctor::Extensions::Registry.new registry.treeprocessor SampleTreeprocessor registry.activate Asciidoctor::Document.new assert registry.treeprocessors? extensions = registry.treeprocessors assert_equal 1, extensions.size assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extensions.first assert_kind_of SampleTreeprocessor, extensions.first.instance assert_kind_of Method, extensions.first.process_method end test 'should instantiate postprocessors' do registry = Asciidoctor::Extensions::Registry.new registry.postprocessor SamplePostprocessor registry.activate Asciidoctor::Document.new assert registry.postprocessors? extensions = registry.postprocessors assert_equal 1, extensions.size assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extensions.first assert_kind_of SamplePostprocessor, extensions.first.instance assert_kind_of Method, extensions.first.process_method end test 'should instantiate block processor' do registry = Asciidoctor::Extensions::Registry.new registry.block SampleBlock, :sample registry.activate Asciidoctor::Document.new assert registry.blocks? assert registry.registered_for_block? :sample, :paragraph extension = registry.find_block_extension :sample assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extension assert_kind_of SampleBlock, extension.instance assert_kind_of Method, extension.process_method end test 'should not match block processor for unsupported context' do registry = Asciidoctor::Extensions::Registry.new registry.block SampleBlock, :sample registry.activate Asciidoctor::Document.new refute registry.registered_for_block? :sample, :sidebar end test 'should instantiate block macro processor' do registry = Asciidoctor::Extensions::Registry.new registry.block_macro SampleBlockMacro, 'sample' registry.activate Asciidoctor::Document.new assert registry.block_macros? assert registry.registered_for_block_macro? 'sample' extension = registry.find_block_macro_extension 'sample' assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extension assert_kind_of SampleBlockMacro, extension.instance assert_kind_of Method, extension.process_method end test 'should instantiate inline macro processor' do registry = Asciidoctor::Extensions::Registry.new registry.inline_macro SampleInlineMacro, 'sample' registry.activate Asciidoctor::Document.new assert registry.inline_macros? assert registry.registered_for_inline_macro? 'sample' extension = registry.find_inline_macro_extension 'sample' assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extension assert_kind_of SampleInlineMacro, extension.instance assert_kind_of Method, extension.process_method end test 'should allow processors to be registered by a string name' do registry = Asciidoctor::Extensions::Registry.new registry.preprocessor 'SamplePreprocessor' registry.activate Asciidoctor::Document.new assert registry.preprocessors? extensions = registry.preprocessors assert_equal 1, extensions.size assert_kind_of Asciidoctor::Extensions::ProcessorExtension, extensions.first end end context 'Integration' do test 'can provide extension registry as an option' do registry = Asciidoctor::Extensions.create do tree_processor SampleTreeProcessor end doc = document_from_string %(= Document Title\n\ncontent), extension_registry: registry refute_nil doc.extensions assert_equal 1, doc.extensions.groups.size assert doc.extensions.tree_processors? assert_equal 1, doc.extensions.tree_processors.size assert_equal 0, Asciidoctor::Extensions.groups.size end # NOTE I'm not convinced we want to continue to support this use case test 'can provide extension registry created without any groups as option' do registry = Asciidoctor::Extensions.create registry.tree_processor SampleTreeProcessor doc = document_from_string %(= Document Title\n\ncontent), extension_registry: registry refute_nil doc.extensions assert_equal 0, doc.extensions.groups.size assert doc.extensions.tree_processors? assert_equal 1, doc.extensions.tree_processors.size assert_equal 0, Asciidoctor::Extensions.groups.size end test 'can provide extensions proc as option' do doc = document_from_string %(= Document Title\n\ncontent), extensions: proc { tree_processor SampleTreeProcessor } refute_nil doc.extensions assert_equal 1, doc.extensions.groups.size assert doc.extensions.tree_processors? assert_equal 1, doc.extensions.tree_processors.size assert_equal 0, Asciidoctor::Extensions.groups.size end test 'should not activate global registry if :extensions option is false' do begin Asciidoctor::Extensions.register :sample do end refute_nil Asciidoctor::Extensions.groups assert_equal 1, Asciidoctor::Extensions.groups.size doc = empty_document extensions: false refute doc.extensions?, 'Extensions should not be enabled if :extensions option is false' ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke preprocessors before parsing document' do input = <<~'EOS' junk line = Document Title sample content EOS begin Asciidoctor::Extensions.register do preprocessor ScrubHeaderPreprocessor end doc = document_from_string input assert doc.attr? 'skipped' assert_equal 'junk line', (doc.attr 'skipped').strip assert doc.has_header? assert_equal 'Document Title', doc.doctitle ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke include processor to process include directive' do input = <<~'EOS' before include::lorem-ipsum.txt[] after EOS begin Asciidoctor::Extensions.register do include_processor BoilerplateTextIncludeProcessor end # a custom include processor is not affected by the safe mode result = convert_string input, safe: :secure assert_css '.paragraph > p', result, 3 assert_includes result, 'before' assert_includes result, 'Lorem ipsum' assert_includes result, 'after' ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke include processor if it offers to handle include directive' do input = <<~'EOS' include::skip-me.adoc[] line after skip include::include-file.adoc[] include::fixtures/grandchild-include.adoc[] last line EOS registry = Asciidoctor::Extensions.create do include_processor do handles? do |target| target == 'skip-me.adoc' end process do |doc, reader, target, attributes| end end include_processor do handles? do |target| target == 'include-file.adoc' end process do |doc, reader, target, attributes| # demonstrates that push_include normalizes newlines content = [ %(found include target '#{target}' at line #{reader.cursor_at_prev_line.lineno}\r\n), %(\r\n), %(middle line\r\n) ] reader.push_include content, target, target, 1, attributes end end end # safe mode only required for built-in include processor document = empty_document base_dir: testdir, extension_registry: registry, safe: :safe reader = Asciidoctor::PreprocessorReader.new document, input, nil, normalize: true lines = [] lines << reader.read_line assert_equal 'line after skip', lines.last lines << reader.read_line lines << reader.read_line assert_equal 'found include target \'include-file.adoc\' at line 4', lines.last assert_equal 'include-file.adoc: line 2', reader.line_info while reader.has_more_lines? lines << reader.read_line end source = lines * ::Asciidoctor::LF assert_match(/^found include target 'include-file.adoc' at line 4$/, source) assert_match(/^middle line$/, source) assert_match(/^last line of grandchild$/, source) assert_match(/^last line$/, source) end test 'should invoke tree processors after parsing document' do input = <<~'EOS' = Document Title Doc Writer content EOS begin Asciidoctor::Extensions.register do tree_processor ReplaceAuthorTreeProcessor end doc = document_from_string input assert_equal 'Ghost Writer', doc.author ensure Asciidoctor::Extensions.unregister_all end end test 'should set source_location on document before invoking tree processors' do begin Asciidoctor::Extensions.register do tree_processor do process do |doc| para = create_paragraph doc.blocks.last.parent, %(file: #{doc.file}, lineno: #{doc.lineno}), {} doc << para end end end sample_doc = fixture_path 'sample.adoc' doc = Asciidoctor.load_file sample_doc, sourcemap: true assert_includes doc.convert, 'file: sample.adoc, lineno: 1' ensure Asciidoctor::Extensions.unregister_all end end test 'should allow tree processor to replace tree' do input = <<~'EOS' = Original Document Doc Writer content EOS begin Asciidoctor::Extensions.register do tree_processor ReplaceTreeTreeProcessor end doc = document_from_string input assert_equal 'Replacement Document', doc.doctitle ensure Asciidoctor::Extensions.unregister_all end end test 'should honor block title assigned in tree processor' do input = <<~'EOS' = Document Title :!example-caption: .Old block title ==== example block content ==== EOS old_title = nil begin Asciidoctor::Extensions.register do tree_processor do process do |doc| ex = (doc.find_by context: :example)[0] old_title = ex.title ex.title = 'New block title' end end end doc = document_from_string input assert_equal 'Old block title', old_title assert_equal 'New block title', (doc.find_by context: :example)[0].title ensure Asciidoctor::Extensions.unregister_all end end test 'should be able to register preferred tree processor' do begin Asciidoctor::Extensions.register do tree_processor do process do |doc| doc << (create_paragraph doc, 'd', {}) nil end end tree_processor do prefer process do |doc| doc << (create_paragraph doc, 'c', {}) nil end end prefer :tree_processor do process do |doc| doc << (create_paragraph doc, 'b', {}) nil end end prefer (tree_processor do process do |doc| doc << (create_paragraph doc, 'a', {}) nil end end) prefer :tree_processor, SelfSigningTreeProcessor end (doc = empty_document).convert assert_equal %w(SelfSigningTreeProcessor a b c d), doc.blocks.map(&:lines).map(&:first) ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke postprocessors after converting document' do input = <<~'EOS' * one * two * three EOS begin Asciidoctor::Extensions.register do postprocessor StripAttributesPostprocessor end output = convert_string input refute_match(/
    /, output) ensure Asciidoctor::Extensions.unregister_all end end test 'should yield to document processor block if block has non-zero arity' do input = <<~'EOS' hi! EOS begin Asciidoctor::Extensions.register do tree_processor do |processor| processor.process do |doc| doc << (create_paragraph doc, 'bye!', {}) end end end output = convert_string_to_embedded input assert_xpath '//p', output, 2 assert_xpath '//p[text()="hi!"]', output, 1 assert_xpath '//p[text()="bye!"]', output, 1 ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke processor for custom block' do input = <<~'EOS' [yell] Hi there! [yell,chars=aeiou] Hi there! EOS begin Asciidoctor::Extensions.register do block UppercaseBlock end output = convert_string_to_embedded input assert_xpath '//p', output, 2 assert_xpath '(//p)[1][text()="HI THERE!"]', output, 1 assert_xpath '(//p)[2][text()="hI thErE!"]', output, 1 ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke processor for custom block in an AsciiDoc table cell' do input = <<~'EOS' |=== a| [yell] Hi there! |=== EOS begin Asciidoctor::Extensions.register do block UppercaseBlock end output = convert_string_to_embedded input assert_xpath '/table//p', output, 1 assert_xpath '/table//p[text()="HI THERE!"]', output, 1 ensure Asciidoctor::Extensions.unregister_all end end test 'should yield to syntax processor block if block has non-zero arity' do input = <<~'EOS' [eval] .... 'yolo' * 5 .... EOS begin Asciidoctor::Extensions.register do block :eval do |processor| processor.on_context :literal processor.process do |parent, reader, attrs| create_paragraph parent, (eval reader.read_lines[0]), {} end end end output = convert_string_to_embedded input assert_xpath '//p[text()="yoloyoloyoloyoloyolo"]', output, 1 ensure Asciidoctor::Extensions.unregister_all end end test 'should pass cloaked context in attributes passed to process method of custom block' do input = <<~'EOS' [custom] **** sidebar **** EOS cloaked_context = nil begin Asciidoctor::Extensions.register do block :custom do on_context :sidebar process do |doc, reader, attrs| cloaked_context = attrs['cloaked-context'] nil end end end convert_string_to_embedded input assert_equal :sidebar, cloaked_context ensure Asciidoctor::Extensions.unregister_all end end test 'should allow extension to promote paragraph to compound block' do input = <<~'EOS' [ex] example EOS begin Asciidoctor::Extensions.register do block :ex do on_context :paragraph process do |parent, reader| create_example_block parent, reader.read_lines, {}, content_model: :compound end end end output = convert_string_to_embedded input assert_css '.exampleblock .paragraph', output, 1 ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke processor for custom block macro' do input = 'snippet::12345[mode=edit]' begin Asciidoctor::Extensions.register do block_macro SnippetMacro, :snippet end output = convert_string_to_embedded input assert_includes output, '' ensure Asciidoctor::Extensions.unregister_all end end test 'should substitute attributes in target of custom block macro' do input = 'snippet::{gist-id}[mode=edit]' begin Asciidoctor::Extensions.register do block_macro SnippetMacro, :snippet end output = convert_string_to_embedded input, attributes: { 'gist-id' => '12345' } assert_includes output, '' ensure Asciidoctor::Extensions.unregister_all end end test 'should log debug message if custom block macro is unknown' do input = 'unknown::[]' using_memory_logger Logger::Severity::DEBUG do |logger| convert_string_to_embedded input assert_message logger, :DEBUG, ': line 1: unknown name for block macro: unknown', Hash end end test 'should drop block macro line if target references missing attribute and attribute-missing is drop-line' do input = <<~'EOS' [.rolename] snippet::{gist-ns}12345[mode=edit] following paragraph EOS begin Asciidoctor::Extensions.register do block_macro SnippetMacro, :snippet end doc, output = nil, nil using_memory_logger do |logger| doc = document_from_string input, attributes: { 'attribute-missing' => 'drop-line' } assert_equal 1, doc.blocks.size assert_equal :paragraph, doc.blocks[0].context output = doc.convert assert_message logger, :INFO, 'dropping line containing reference to missing attribute: gist-ns' end assert_css '.paragraph', output, 1 assert_css '.rolename', output, 0 ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke processor for custom block macro in an AsciiDoc table cell' do input = <<~'EOS' |=== a|message::hi[] |=== EOS begin Asciidoctor::Extensions.register do block_macro :message do process do |parent, target, attrs| create_paragraph parent, target.upcase, {} end end end output = convert_string_to_embedded input assert_xpath '/table//p[text()="HI"]', output, 1 ensure Asciidoctor::Extensions.unregister_all end end test 'should match short form of block macro' do input = 'custom-toc::[]' resolved_target = nil begin Asciidoctor::Extensions.register do block_macro do named 'custom-toc' process do |parent, target, attrs| resolved_target = target create_pass_block parent, '', {}, content_model: :raw end end end output = convert_string_to_embedded input assert_equal '', output assert_equal '', resolved_target ensure Asciidoctor::Extensions.unregister_all end end test 'should fail to convert if name of block macro is illegal' do input = 'illegal name::target[]' begin Asciidoctor::Extensions.register do block_macro do named 'illegal name' process do |parent, target, attrs| nil end end end assert_raises ArgumentError do convert_string_to_embedded input end ensure Asciidoctor::Extensions.unregister_all end end test 'should honor legacy :pos_attrs option set via static method' do begin Asciidoctor::Extensions.register do block_macro LegacyPosAttrsBlockMacro, :diag end result = convert_string_to_embedded 'diag::[filename,png]' assert_css 'img[src="filename.png"]', result, 1 ensure Asciidoctor::Extensions.unregister_all end end test 'should honor legacy :pos_attrs option set via DSL' do begin Asciidoctor::Extensions.register do block_macro :diag do option :pos_attrs, ['target', 'format'] process do |parent, _, attrs| create_image_block parent, { 'target' => %(#{attrs['target']}.#{attrs['format']}) } end end end result = convert_string_to_embedded 'diag::[filename,png]' assert_css 'img[src="filename.png"]', result, 1 ensure Asciidoctor::Extensions.unregister_all end end test 'should be able to set header attribute in block macro processor' do begin Asciidoctor::Extensions.register do block_macro do named :attribute resolves_attributes '1:value' process do |parent, target, attrs| parent.document.set_attr target, attrs['value'] nil end end block_macro do named :header_attribute resolves_attributes '1:value' process do |parent, target, attrs| parent.document.set_header_attribute target, attrs['value'] nil end end end input = <<~'EOS' attribute::yin[yang] header_attribute::foo[bar] EOS doc = document_from_string input assert_nil doc.attr 'yin' assert_equal 'bar', (doc.attr 'foo') ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke processor for custom inline macro' do begin Asciidoctor::Extensions.register do inline_macro TemperatureMacro, :deg end output = convert_string_to_embedded 'Room temperature is deg:25[C,precision=0].', attributes: { 'temperature-unit' => 'F' } assert_includes output, 'Room temperature is 25 °C.' output = convert_string_to_embedded 'Normal body temperature is deg:37[].', attributes: { 'temperature-unit' => 'F' } assert_includes output, 'Normal body temperature is 98.6 °F.' ensure Asciidoctor::Extensions.unregister_all end end test 'should resolve regexp for inline macro lazily' do begin Asciidoctor::Extensions.register do inline_macro do named :label match_format :short parse_content_as :text process do |parent, _, attrs| create_inline_pass parent, %() end end end output = convert_string_to_embedded 'label:[Checkbox]' assert_includes output, '' ensure Asciidoctor::Extensions.unregister_all end end test 'should map unparsed attrlist to target when format is short' do begin Asciidoctor::Extensions.register do inline_macro do named :label match_format :short process do |parent, target| create_inline_pass parent, %() end end end output = convert_string_to_embedded 'label:[Checkbox]' assert_includes output, '' ensure Asciidoctor::Extensions.unregister_all end end test 'should parse text in square brackets as attrlist by default' do begin Asciidoctor::Extensions.register do inline_macro do named :json match_format :short process do |parent, _, attrs| create_inline_pass parent, %({ #{attrs.map {|k, v| %["#{k}": "#{v}"] }.join ', '} }) end end inline_macro do named :data process do |parent, target, attrs| if target == 'json' create_inline_pass parent, %({ #{attrs.map {|k, v| %["#{k}": "#{v}"] }.join ', '} }) else nil end end end end output = convert_string_to_embedded 'json:[a=A,b=B,c=C]', doctype: :inline assert_equal '{ "a": "A", "b": "B", "c": "C" }', output output = convert_string_to_embedded 'data:json[a=A,b=B,c=C]', doctype: :inline assert_equal '{ "a": "A", "b": "B", "c": "C" }', output ensure Asciidoctor::Extensions.unregister_all end end test 'should assign captures correctly for inline macros' do begin Asciidoctor::Extensions.register do inline_macro do named :short_attributes match_format :short resolve_attributes '1:name' process do |parent, target, attrs| create_inline_pass parent, %(target=#{target.inspect}, attributes=#{attrs.sort_by {|(k)| k.to_s }.to_h}) end end inline_macro do named :short_text match_format :short resolve_attributes false process do |parent, target, attrs| create_inline_pass parent, %(target=#{target.inspect}, attributes=#{attrs.sort_by {|(k)| k.to_s }.to_h}) end end inline_macro do named :'full-attributes' resolve_attributes '1:name' => nil process do |parent, target, attrs| create_inline_pass parent, %(target=#{target.inspect}, attributes=#{attrs.sort_by {|(k)| k.to_s }.to_h}) end end inline_macro do named :'full-text' resolve_attributes false process do |parent, target, attrs| create_inline_pass parent, %(target=#{target.inspect}, attributes=#{attrs.sort_by {|(k)| k.to_s }.to_h}) end end inline_macro do named :@short_match match %r/@(\w+)/ resolve_attributes false process do |parent, target, attrs| create_inline_pass parent, %(target=#{target.inspect}, attributes=#{attrs.sort_by {|(k)| k.to_s }.to_h}) end end end input = <<~'EOS' [subs=normal] ++++ short_attributes:[] short_attributes:[value,key=val] short_text:[] short_text:[[text\]] full-attributes:target[] full-attributes:target[value,key=val] full-text:target[] full-text:target[[text\]] @target ++++ EOS expected = <<~'EOS'.chop target="", attributes={} target="value,key=val", attributes={1=>"value", "key"=>"val", "name"=>"value"} target="", attributes={"text"=>""} target="[text]", attributes={"text"=>"[text]"} target="target", attributes={} target="target", attributes={1=>"value", "key"=>"val", "name"=>"value"} target="target", attributes={"text"=>""} target="target", attributes={"text"=>"[text]"} target="target", attributes={} EOS output = convert_string_to_embedded input assert_equal expected, output ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke convert on return value if value is an inline node' do begin Asciidoctor::Extensions.register do inline_macro do named :mention resolve_attributes false process do |parent, target, attrs| if (text = attrs['text']).empty? text = %(@#{target}) end create_anchor parent, text, type: :link, target: %(https://github.com/#{target}) end end end output = convert_string_to_embedded 'mention:mojavelinux[Dan]' assert_includes output, 'Dan' ensure Asciidoctor::Extensions.unregister_all end end test 'should allow return value of inline macro to be nil' do begin Asciidoctor::Extensions.register do inline_macro do named :skipme match_format :short process do nil end end end using_memory_logger do |logger| output = convert_string_to_embedded '-skipme:[]-', doctype: :inline assert_equal '--', output assert_empty logger end ensure Asciidoctor::Extensions.unregister_all end end test 'should warn if return value of inline macro is a string' do begin Asciidoctor::Extensions.register do inline_macro do named :say process do |parent, target, attrs| target end end end using_memory_logger do |logger| output = convert_string_to_embedded 'say:yo[]', doctype: :inline assert_equal 'yo', output assert_message logger, :INFO, 'expected substitution value for custom inline macro to be of type Inline; got String: say:yo[]' end ensure Asciidoctor::Extensions.unregister_all end end test 'should not apply subs to inline node returned by process method by default' do begin Asciidoctor::Extensions.register do inline_macro do named :say process do |parent, target, attrs| create_inline parent, :quoted, %(*#{target}*), type: :emphasis end end end output = convert_string_to_embedded 'say:yo[]', doctype: :inline assert_equal '*yo*', output ensure Asciidoctor::Extensions.unregister_all end end test 'should apply subs specified as symbol to inline node returned by process method' do begin Asciidoctor::Extensions.register do inline_macro do named :say process do |parent, target, attrs| create_inline_pass parent, %(*#{target}*), attributes: { 'subs' => :normal } end end end output = convert_string_to_embedded 'say:yo[]', doctype: :inline assert_equal 'yo', output ensure Asciidoctor::Extensions.unregister_all end end test 'should apply subs specified as array to inline node returned by process method' do begin Asciidoctor::Extensions.register do inline_macro do named :say process do |parent, target, attrs| create_inline_pass parent, %(*#{target}*), attributes: { 'subs' => [:specialchars, :quotes] } end end end output = convert_string_to_embedded 'say:{lt}message{gt}[]', doctype: :inline assert_equal '<message>', output ensure Asciidoctor::Extensions.unregister_all end end test 'should apply subs specified as string to inline node returned by process method' do begin Asciidoctor::Extensions.register do inline_macro do named :say process do |parent, target, attrs| create_inline_pass parent, %(*#{target}*), attributes: { 'subs' => 'specialchars,quotes' } end end end output = convert_string_to_embedded 'say:{lt}message{gt}[]', doctype: :inline assert_equal '<message>', output ensure Asciidoctor::Extensions.unregister_all end end test 'should prefer attributes parsed from inline macro over default attributes' do begin Asciidoctor::Extensions.register do inline_macro :attrs do match_format :short default_attributes 1 => 'a', 2 => 'b', 'foo' => 'baz' positional_attributes 'a', 'b' process do |parent, _, attrs| create_inline_pass parent, %(a=#{attrs['a']},2=#{attrs[2]},b=#{attrs['b'] || 'nil'},foo=#{attrs['foo']}) end end end output = convert_string_to_embedded 'attrs:[A,foo=bar]', doctype: :inline # note that default attributes aren't considered when mapping positional attributes assert_equal 'a=A,2=b,b=nil,foo=bar', output ensure Asciidoctor::Extensions.unregister_all end end test 'should not carry over attributes if block processor returns nil' do begin Asciidoctor::Extensions.register do block do named 'skip-me' on_context :paragraph parse_content_as :raw process do |parent, reader, attrs| nil end end end input = <<~'EOS' .unused title [skip-me] not shown -- shown -- EOS doc = document_from_string input assert_equal 1, doc.blocks.size assert_nil doc.blocks[0].attributes['title'] ensure Asciidoctor::Extensions.unregister_all end end test 'should not invoke process method or carry over attributes if block processor declares skip content model' do begin process_method_called = false Asciidoctor::Extensions.register do block do named :ignore on_context :paragraph parse_content_as :skip process do |parent, reader, attrs| process_method_called = true nil end end end input = <<~'EOS' .unused title [ignore] not shown -- shown -- EOS doc = document_from_string input refute process_method_called assert_equal 1, doc.blocks.size assert_nil doc.blocks[0].attributes['title'] ensure Asciidoctor::Extensions.unregister_all end end test 'should pass attributes by value to block processor' do begin Asciidoctor::Extensions.register do block do named :foo on_context :paragraph parse_content_as :raw process do |parent, reader, attrs| original_attrs = attrs.dup attrs.delete('title') create_paragraph parent, reader.read_lines, original_attrs.merge('id' => 'value') end end end input = <<~'EOS' .title [foo] content EOS doc = document_from_string input assert_equal 1, doc.blocks.size assert_equal 'title', doc.blocks[0].attributes['title'] assert_equal 'value', doc.blocks[0].id ensure Asciidoctor::Extensions.unregister_all end end test 'should ignore return value of custom block if value is parent' do begin Asciidoctor::Extensions.register do block do named :unwrap on_context :open process do |parent, reader| parse_content parent, reader.read_lines end end end input = <<~'EOS' [unwrap] -- a b c -- EOS doc = document_from_string input assert_equal 3, doc.blocks.size doc.blocks.each do |block| assert_equal :paragraph, block.context end assert_equal 'a', doc.blocks[0].source assert_css 'p', doc.convert, 3 ensure Asciidoctor::Extensions.unregister_all end end test 'should ignore return value of custom block macro if value is parent' do begin Asciidoctor::Extensions.register do block_macro :para do process do |parent, target| parse_content parent, target end end end input = <<~'EOS' para::text[] EOS doc = document_from_string input assert_equal 1, doc.blocks.size assert_equal :paragraph, doc.blocks[0].context assert_equal 'text', doc.blocks[0].source assert_css 'p', doc.convert, 1 ensure Asciidoctor::Extensions.unregister_all end end test 'parse_content should not share attributes between parsed blocks' do begin Asciidoctor::Extensions.register do block do named :wrap on_context :open process do |parent, reader, attrs| wrap = create_open_block parent, nil, attrs parse_content wrap, reader.read_lines end end end input = <<~'EOS' [wrap] -- [foo=bar] ==== content ==== [baz=qux] ==== content ==== -- EOS doc = document_from_string input assert_equal 1, doc.blocks.size wrap = doc.blocks[0] assert_equal 2, wrap.blocks.size assert_equal 2, wrap.blocks[0].attributes.size assert_equal 2, wrap.blocks[1].attributes.size assert_nil wrap.blocks[1].attributes['foo'] ensure Asciidoctor::Extensions.unregister_all end end test 'should allow extension to replace custom block with a list' do begin Asciidoctor::Extensions.register do block do named :lst on_context :paragraph process do |parent, reader| reader.read_lines.each_with_object create_list parent, :ulist do |line, list| list << (create_list_item list, line) end end end end input = <<~'EOS' before [lst] a b c after EOS doc = document_from_string input assert_equal 3, doc.blocks.size list = doc.blocks[1] assert_equal :ulist, list.context assert_equal 3, list.items.size assert_equal 'a', list.items[0].text assert_css 'li', doc.convert, 3 ensure Asciidoctor::Extensions.unregister_all end end test 'should allow extension to replace custom block with a section' do begin Asciidoctor::Extensions.register do block do named :sect on_context :open process do |parent, _, attrs| create_section parent, attrs['title'], {} end end end input = <<~'EOS' .Section Title [sect] -- a b -- EOS doc = document_from_string input assert_equal 1, doc.blocks.size sect = doc.blocks[0] assert_equal :section, sect.context assert_equal 'Section Title', sect.title assert_equal 2, sect.blocks.size assert_equal :paragraph, sect.blocks[0].context assert_equal :paragraph, sect.blocks[1].context assert_css 'p', doc.convert, 2 ensure Asciidoctor::Extensions.unregister_all end end test 'can use parse_content to append blocks to current parent' do begin Asciidoctor::Extensions.register do block do named :csv on_context :literal process do |parent, reader| parse_content parent, [',==='] + reader.read_lines + [',==='] nil end end end input = <<~'EOS' before [csv] .... a,b,c .... after EOS doc = document_from_string input assert_equal 3, doc.blocks.size table = doc.blocks[1] assert_equal :table, table.context assert_css 'td', doc.convert, 3 ensure Asciidoctor::Extensions.unregister_all end end test 'can use parse_attributes to parse attrlist' do begin parsed_attrs = nil Asciidoctor::Extensions.register do block do named :attrs on_context :open process do |parent, reader, attrs| parsed_attrs = parse_attributes parent, reader.read_line, positional_attributes: ['a', 'b'] parsed_attrs.update parse_attributes parent, 'foo={foo}', sub_attributes: true nil end end end input = <<~'EOS' :foo: bar [attrs] -- a,b,c,key=val -- EOS convert_string_to_embedded input assert_equal 'a', parsed_attrs['a'] assert_equal 'b', parsed_attrs['b'] assert_equal 'val', parsed_attrs['key'] assert_equal 'bar', parsed_attrs['foo'] ensure Asciidoctor::Extensions.unregister_all end end test 'create_section should set up all section properties' do begin sect = nil Asciidoctor::Extensions.register do block_macro do named :sect process do |parent, target, attrs| opts = (level = attrs.delete 'level') ? { level: level.to_i } : {} attrs['id'] = false if attrs['id'] == 'false' parent = parent.parent if parent.context == :preamble sect = create_section parent, 'Section Title', attrs, opts nil end end end input_tpl = <<~'EOS' = Document Title :doctype: book :sectnums: sect::[%s] EOS { '' => ['chapter', 1, false, true, '_section_title'], 'level=0' => ['part', 0, false, false, '_section_title'], 'level=0,alt' => ['part', 0, false, true, '_section_title', 'partnums' => ''], 'level=0,style=appendix' => ['appendix', 1, true, true, '_section_title'], 'style=appendix' => ['appendix', 1, true, true, '_section_title'], 'style=glossary' => ['glossary', 1, true, false, '_section_title'], 'style=glossary,alt' => ['glossary', 1, true, :chapter, '_section_title', 'sectnums' => 'all'], 'style=abstract' => ['chapter', 1, false, true, '_section_title'], 'id=section-title' => ['chapter', 1, false, true, 'section-title'], 'id=false' => ['chapter', 1, false, true, nil] }.each do |attrlist, (expect_sectname, expect_level, expect_special, expect_numbered, expect_id, extra_attrs)| input = input_tpl % attrlist document_from_string input, safe: :server, attributes: extra_attrs assert_equal expect_sectname, sect.sectname assert_equal expect_level, sect.level assert_equal expect_special, sect.special assert_equal expect_numbered, sect.numbered if expect_id assert_equal expect_id, sect.id else assert_nil sect.id end end ensure Asciidoctor::Extensions.unregister_all end end test 'should add docinfo to document' do input = <<~'EOS' = Document Title sample content EOS begin Asciidoctor::Extensions.register do docinfo_processor MetaRobotsDocinfoProcessor end doc = document_from_string input assert_equal Asciidoctor::SafeMode::SECURE, doc.safe assert_equal '', doc.docinfo ensure Asciidoctor::Extensions.unregister_all end end test 'should add multiple docinfo to document' do input = <<~'EOS' = Document Title sample content EOS begin Asciidoctor::Extensions.register do docinfo_processor MetaAppDocinfoProcessor docinfo_processor MetaRobotsDocinfoProcessor, position: :>> docinfo_processor do at_location :footer process do |doc| '' end end end doc = document_from_string input, safe: :server assert_equal %(\n), doc.docinfo assert_equal '', doc.docinfo(:footer) ensure Asciidoctor::Extensions.unregister_all end end test 'should append docinfo to document' do begin Asciidoctor::Extensions.register do docinfo_processor MetaRobotsDocinfoProcessor end sample_input_path = fixture_path('basic.adoc') output = Asciidoctor.convert_file sample_input_path, to_file: false, standalone: true, safe: Asciidoctor::SafeMode::SERVER, attributes: { 'docinfo' => '' } refute_empty output assert_css 'script[src="modernizr.js"]', output, 1 assert_css 'meta[name="robots"]', output, 1 assert_css 'meta[http-equiv="imagetoolbar"]', output, 0 ensure Asciidoctor::Extensions.unregister_all end end test 'should return extension instance after registering' do begin exts = [] Asciidoctor::Extensions.register do exts.push preprocessor SamplePreprocessor exts.push include_processor SampleIncludeProcessor exts.push tree_processor SampleTreeProcessor exts.push docinfo_processor SampleDocinfoProcessor exts.push postprocessor SamplePostprocessor end empty_document exts.each do |ext| assert_kind_of Asciidoctor::Extensions::ProcessorExtension, ext end ensure Asciidoctor::Extensions.unregister_all end end test 'should raise exception if document processor extension does not provide process method' do extension_registry = Asciidoctor::Extensions.create do tree_processor do # rubocop:disable Lint/EmptyBlock end end exception = assert_raises NoMethodError do convert_string_to_embedded 'content', extension_registry: extension_registry end assert_match %r/No block specified to process tree processor extension at .*extensions_test\.rb:\d+/, exception.message end test 'should raise exception if syntax processor extension does not provide process method' do extension_registry = Asciidoctor::Extensions.create do block_macro :foo do # rubocop:disable Lint/EmptyBlock end end exception = assert_raises NoMethodError do convert_string_to_embedded 'foo::bar[]', extension_registry: extension_registry end assert_match %r/No block specified to process block macro extension at .*extensions_test\.rb:\d+/, exception.message end test 'should raise exception if syntax processor extension does not provide a name' do macro_name = (Class.new String do def to_sym nil end end).new 'foo' extension_registry = Asciidoctor::Extensions.create do block_macro do named macro_name process do nil end end end exception = assert_raises ArgumentError do convert_string_to_embedded 'foo::bar[]', extension_registry: extension_registry end assert_match %r/No name specified for block macro extension at .*extensions_test\.rb:\d+/, exception.message end test 'should raise an exception if mandatory target attribute is not provided for image block' do input = 'cat_in_sink::[]' exception = assert_raises ArgumentError do convert_string_to_embedded input, extension_registry: create_cat_in_sink_block_macro end assert_match %r/target attribute is required/, exception.message end test 'should assign alt attribute to image block if alt is not provided' do input = 'cat_in_sink::25[]' doc = document_from_string input, standalone: false, extension_registry: create_cat_in_sink_block_macro image = doc.blocks[0] assert_equal 'cat in sink day 25', (image.attr 'alt') assert_equal 'cat in sink day 25', (image.attr 'default-alt') output = doc.convert assert_includes output, 'cat in sink day 25' end test 'should create an image block if mandatory attributes are provided' do input = 'cat_in_sink::30[cat in sink (yes)]' doc = document_from_string input, standalone: false, extension_registry: create_cat_in_sink_block_macro image = doc.blocks[0] assert_equal 'cat in sink (yes)', (image.attr 'alt') refute(image.attr? 'default-alt') output = doc.convert assert_includes output, 'cat in sink (yes)' end test 'should not assign caption on image block if title is not set on custom block macro' do input = 'cat_in_sink::30[]' doc = document_from_string input, standalone: false, extension_registry: create_cat_in_sink_block_macro output = doc.convert assert_xpath '/*[@class="imageblock"]/*[@class="title"]', output, 0 end test 'should assign caption on image block if title is set on custom block macro' do input = <<~'EOS' .Cat in Sink? cat_in_sink::30[] EOS doc = document_from_string input, standalone: false, extension_registry: create_cat_in_sink_block_macro output = doc.convert assert_xpath '/*[@class="imageblock"]/*[@class="title"][text()="Figure 1. Cat in Sink?"]', output, 1 end test 'should not fail if alt attribute is not set on block image node' do begin Asciidoctor::Extensions.register do block_macro :no_alt do process do |parent, target, attrs| create_block parent, 'image', nil, { 'target' => 'picture.jpg' } end end end output = Asciidoctor.convert 'no_alt::[]' assert_include '', output ensure Asciidoctor::Extensions.unregister_all end end test 'should not fail if alt attribute is not set on inline image node' do begin Asciidoctor::Extensions.register do inline_macro :no_alt do match_format :short process do |parent, target, attrs| create_inline parent, 'image', nil, target: 'picture.jpg' end end end output = Asciidoctor.convert 'no_alt:[]' assert_include '', output ensure Asciidoctor::Extensions.unregister_all end end test 'should assign id and role on list items unordered' do input = 'santa_list::ulist[]' doc = document_from_string input, standalone: false, extension_registry: create_santa_list_block_macro output = doc.convert assert_xpath '/div[@class="ulist"]/ul/li[@class="friendly"][@id="santa-list-guillaume"]', output, 1 assert_xpath '/div[@class="ulist"]/ul/li[@class="kind contributor java"]', output, 1 assert_xpath '/div[@class="ulist"]/ul/li[@class="kind contributor java"][not(@id)]', output, 1 assert_xpath '/div[@class="ulist"]/ul/li[@id="santa-list-pepijn"][not(@class)]', output, 1 assert_xpath '/div[@class="ulist"]/ul/li[@id="santa-list-dan"][@class="naughty"]', output, 1 assert_xpath '/div[@class="ulist"]/ul/li[not(@id)][not(@class)]/p[text()="Sarah"]', output, 1 end test 'should assign id and role on list items ordered' do input = 'santa_list::olist[]' doc = document_from_string input, standalone: false, extension_registry: create_santa_list_block_macro output = doc.convert assert_xpath '/div[@class="olist"]/ol/li[@class="friendly"][@id="santa-list-guillaume"]', output, 1 assert_xpath '/div[@class="olist"]/ol/li[@class="kind contributor java"]', output, 1 assert_xpath '/div[@class="olist"]/ol/li[@class="kind contributor java"][not(@id)]', output, 1 assert_xpath '/div[@class="olist"]/ol/li[@id="santa-list-pepijn"][not(@class)]', output, 1 assert_xpath '/div[@class="olist"]/ol/li[@id="santa-list-dan"][@class="naughty"]', output, 1 assert_xpath '/div[@class="olist"]/ol/li[not(@id)][not(@class)]/p[text()="Sarah"]', output, 1 end end end asciidoctor-2.0.20/test/fixtures/000077500000000000000000000000001443135032600167275ustar00rootroot00000000000000asciidoctor-2.0.20/test/fixtures/asciidoc_index.txt000066400000000000000000000533001443135032600224360ustar00rootroot00000000000000AsciiDoc Home Page ================== // Web page meta data. :keywords: AsciiDoc, DocBook, EPUB, PDF, ebooks, slideshow, slidy, man page :description: AsciiDoc is a text document format for writing notes, + documentation, articles, books, ebooks, slideshows, + web pages, man pages and blogs. AsciiDoc files can be + translated to many formats including HTML, PDF, EPUB, + man page. .{revdate}: AsciiDoc {revnumber} Released ************************************************************************ Read the link:CHANGELOG.html[CHANGELOG] for release highlights and a full list of all additions, changes and bug fixes. Changes are documented in the updated link:userguide.html[User Guide]. See the link:INSTALL.html[Installation page] for downloads and and installation instructions. 'Stuart Rackham' ************************************************************************ Introduction ------------ {description} AsciiDoc is highly configurable: both the AsciiDoc source file syntax and the backend output markups (which can be almost any type of SGML/XML markup) can be customized and extended by the user. AsciiDoc is free software and is licenced under the terms of the 'GNU General Public License version 2' (GPLv2). TIP: The pages you are reading were written using AsciiDoc, to view the corresponding AsciiDoc source click on the *Page Source* menu item in the left hand margin. Overview and Examples --------------------- You write an AsciiDoc document the same way you would write a normal text document, there are no markup tags or weird format notations. AsciiDoc files are designed to be viewed, edited and printed directly or translated to other presentation formats using the asciidoc(1) command. The asciidoc(1) command translates AsciiDoc files to HTML, XHTML and DocBook markups. DocBook can be post-processed to presentation formats such as HTML, PDF, EPUB, DVI, LaTeX, roff, and Postscript using readily available Open Source tools. Example Articles ~~~~~~~~~~~~~~~~ - This XHTML version of the link:asciidoc.css-embedded.html[AsciiDoc User Guide] was generated by AsciiDoc from link:asciidoc.txt[this AsciiDoc file]. - Here's the link:asciidoc.html[same document] created by first generating DocBook markup using AsciiDoc and then converting the DocBook markup to HTML using 'DocBook XSL Stylesheets'. - The User Guide again, this time a link:chunked/index.html[chunked version]. - AsciiDoc generated this link:article-standalone.html[stand-alone HTML file] containing embedded CSS, JavaScript and images from this link:article.txt[AsciiDoc article template] with this command: asciidoc -a data-uri -a icons -a toc -a max-width=55em article.txt - The same link:article.txt[AsciiDoc article template] generated link:article-html5-toc2.html[this HTML 5] (the 'toc2' attribute puts a table of contents in the left margin) from this command: asciidoc -b html5 -a icons -a toc2 -a theme=flask article.txt - The same link:article.txt[AsciiDoc article template] produced this link:article.html[HTML file] and this link:article.pdf[PDF file] via DocBook markup generated by AsciiDoc. [[X7]] Example Books ~~~~~~~~~~~~~ AsciiDoc markup supports all the standard DocBook frontmatter and backmatter sections (dedication, preface, bibliography, glossary, index, colophon) plus footnotes and index entries. - This link:book.txt[AsciiDoc book] produced link:book.html[this HTML file] using the 'DocBook XSL Stylesheets'. - The link:asciidoc.pdf[PDF formatted AsciiDoc User Guide] was generated from asciidoc(1) DocBook output. - The link:asciidoc.epub[EPUB formatted AsciiDoc User Guide] was generated using link:a2x.1.html[a2x]. - This link:book.epub[EPUB formatted book skeleton] was generated using link:a2x.1.html[a2x]. - This link:book-multi.txt[multi-part AsciiDoc book] produced link:book-multi.html[this HTML file] using the 'DocBook XSL Stylesheets'. Example UNIX Man Pages ~~~~~~~~~~~~~~~~~~~~~~ HTML formatted AsciiDoc man pages link:asciidoc.1.css-embedded.html[with stylesheets] and link:asciidoc.1.html[without stylesheets] were generated by AsciiDoc from link:asciidoc.1.txt[this file]. This link:asciidoc.1[roff formatted man page] was generated from asciidoc(1) DocBook output using `xsltproc(1)` and DocBook XSL Stylesheets. [[X8]] Example Slideshows ~~~~~~~~~~~~~~~~~~ The http://www.w3.org/Talks/Tools/Slidy2/[Slidy] backend generates HTML slideshows that can be viewed in any web browser. What's nice is that you can create completely self contained slideshows including embedded images. - Here is the link:slidy.html[slidy backend documentation] slideshow and here is it's link:slidy.txt[AsciiDoc source]. - An link:slidy-example.html[example slidy slideshow] and the link:slidy-example.txt[AsciiDoc source]. Example Web Site ~~~~~~~~~~~~~~~~ The link:README-website.html[AsciiDoc website] is included in the AsciiDoc distribution (in `./examples/website/`) as an example website built using AsciiDoc. See `./examples/website/README-website.txt`. More examples ~~~~~~~~~~~~~ - See below: <>. - Example link:newtables.html[Tables]. eBook Publication ----------------- The two most popular open eBook formats are http://en.wikipedia.org/wiki/EPUB[EPUB] and PDF. The AsciiDoc link:a2x.1.html[a2x] toolchain wrapper makes it easy to link:publishing-ebooks-with-asciidoc.html[publish EPUB and PDF eBooks with AsciiDoc]. See also <> and link:epub-notes.html[AsciiDoc EPUB Notes]). Blogpost weblog client ---------------------- http://srackham.wordpress.com/blogpost-readme/[blogpost] is a command-line weblog client for publishing AsciiDoc documents to http://wordpress.org/[WordPress] blog hosts. It creates and updates weblog posts and pages directly from AsciiDoc source documents. Source code highlighter ----------------------- AsciiDoc includes a link:source-highlight-filter.html[source code highlighter filter] that uses http://www.gnu.org/software/src-highlite/[GNU source-highlight] to highlight HTML outputs. You also have the option of using the http://pygments.org/[Pygments] highlighter. [[X3]] Mathematical Formulae --------------------- You can include mathematical formulae in AsciiDoc XHTML documents using link:asciimathml.html[ASCIIMathML] or link:latexmathml.html[LaTeXMathML] notation. The link:latex-filter.html[AsciiDoc LaTeX filter] translates LaTeX source to an image that is automatically inserted into the AsciiDoc output documents. AsciiDoc also has 'latexmath' macros for DocBook outputs -- they are documented in link:latexmath.pdf[this PDF file] and can be used in AsciiDoc documents processed by `dblatex(1)`. Editor Support -------------- - An AsciiDoc syntax highlighter for the Vim text editor is included in the AsciiDoc distribution (see the 'Vim Syntax Highlighter' appendix in the 'AsciiDoc User Guide' for details). + .Syntax highlighter screenshot image::images/highlighter.png[height=400,caption="",link="images/highlighter.png"] - Dag Wieers has implemented an alternative Vim syntax file for AsciiDoc which can be found here http://svn.rpmforge.net/svn/trunk/tools/asciidoc-vim/. - David Avsajanishvili has written a source highlighter for AsciiDoc files for http://projects.gnome.org/gtksourceview/[GtkSourceView] (used by http://projects.gnome.org/gedit/[gedit] and a number of other applications). The project is hosted here: https://launchpad.net/asciidoc-gtk-highlight - AsciiDoc resources for the Emacs editor can be found on the http://www.emacswiki.org/emacs/AsciiDoc[AsciiDoc page] at the http://www.emacswiki.org/emacs/EmacsWiki[Emacs Wiki]. - Christian Zuckschwerdt has written a https://github.com/zuckschwerdt/asciidoc.tmbundle[TextMate bundle] for AsciiDoc. Try AsciiDoc on the Web ----------------------- Andrew Koster has written a Web based application to interactively convert and display AsciiDoc source: http://andrewk.webfactional.com/asciidoc.php [[X2]] External Resources and Applications ----------------------------------- Here are resources that I know of, if you know of more drop me a line and I'll add them to the list. - Check the link:INSTALL.html#X2[installation page] for packaged versions of AsciiDoc. - Alex Efros has written an HTML formatted http://powerman.name/doc/asciidoc[AsciiDoc Cheatsheet] using Asciidoc. - Thomas Berker has written an http://liksom.info/blog/?q=node/114[AsciiDoc Cheatsheet] in Open Document and PDF formats. - The http://www.wikimatrix.org/[WikiMatrix] website has an excellent http://www.wikimatrix.org/syntax.php[web page] that compares the various Wiki markup syntaxes. An interesting attempt at Wiki markup standardization is http://www.wikicreole.org/[CREOLE]. - Franck Pommereau has written http://www.univ-paris12.fr/lacl/pommereau/soft/asciidoctest.html[Asciidoctest], a program that doctests snippets of Python code within your Asciidoc documents. - The http://remips.sourceforge.net/[ReMIPS] project website has been built using AsciiDoc. - Here are some link:asciidoc-docbook-xsl.html[DocBook XSL Stylesheets Notes]. - Karl Mowatt-Wilson has developed an http://ikiwiki.info/[ikiwiki] plugin for AsciiDoc which he uses to render http://mowson.org/karl[his website]. The plugin is available http://www.mowson.org/karl/colophon/[here] and there is some discussion of the ikiwiki integration http://ikiwiki.info/users/KarlMW/discussion/[here]. - Glenn Eychaner has http://groups.google.com/group/asciidoc/browse_thread/thread/bf04b55628efe214[reworked the Asciidoc plugin for ikiwiki] that was created by Karl Mowson, the source can be downloaded from http://dl.dropbox.com/u/11256359/asciidoc.pm - David Hajage has written an AsciiDoc package for the http://www.r-project.org/[R Project] (R is a free software environment for statistical computing). 'ascii' is available on 'CRAN' (just run `install.packages("ascii")` from R). Briefly, 'ascii' replaces R results in AsciiDoc document with AsciiDoc markup. More information and examples here: http://eusebe.github.com/ascii/. - Pascal Rapaz has written a Python script to automate AsciiDoc website generation. You can find it at http://www.rapazp.ch/opensource/tools/asciidoc.html. - Jared Henley has written http://jared.henley.id.au/software/awb/documentation.html[AsciiDoc Website Builder]. 'AsciiDoc Website Builder' (awb) is a python program that automates the building of of a website written in AsciiDoc. All you need to write is the AsciiDoc source plus a few simple configuration files. - Brad Adkins has written http://dbixjcl.org/jcl/asciidocgen/asciidocgen.html[AsciiDocGen], a web site generation and deployment tool that allows you write your web site content in AsciiDoc. The http://dbixjcl.org/jcl/asciidocgen/asciidocgen.html[AsciiDocGen web site] is managed using 'AsciiDocGen'. - Filippo Negroni has developed a set of tools to facilitate 'literate programming' using AsciiDoc. The set of tools is called http://eweb.sourceforge.net/[eWEB]. - http://vanderwijk.info/2009/4/23/full-text-based-document-generation-using-asciidoc-and-ditaa[Ivo's blog] describes a http://ditaa.sourceforge.net/[ditaa] filter for AsciiDoc which converts http://en.wikipedia.org/wiki/ASCII_art[ASCII art] into graphics. - http://github.com/github/gollum[Gollum] is a git-powered wiki, it supports various formats, including AsciiDoc. - Gregory Romé has written an http://github.com/gpr/redmine_asciidoc_formatter[AsciiDoc plugin] for the http://www.redmine.org/[Redmine] project management application. - Paul Hsu has started a http://github.com/paulhsu/AsciiDoc.CHT.userguide[Chinese translation of the AsciiDoc User Guide]. - Dag Wieers has written http://dag.wieers.com/home-made/unoconv/[UNOCONV]. 'UNOCONV' can export AsciiDoc outputs to OpenOffice export formats. - Ed Keith has written http://codeextactor.berlios.de/[Code Extractor], it extracts code snippets from source code files and inserts them into AsciiDoc documents. - The http://csrp.iut-blagnac.fr/jmiwebsite/home/[JMI website] hosts a number of extras for AsciiDoc and Slidy written by Jean-Michel Inglebert. - Ryan Tomayko has written an number of http://tomayko.com/src/adoc-themes/[themes for AsciiDoc] along with a http://tomayko.com/src/adoc-themes/hacking.html[script for combining the CSS files] into single CSS theme files for AsciiDoc embedded CSS documents. - Ilya Portnov has written a https://gitorious.org/doc-building-system[document building system for AsciiDoc], here is http://iportnov.blogspot.com/2011/03/asciidoc-beamer.html[short article in Russian] describing it. - Lex Trotman has written https://github.com/elextr/codiicsa[codiicsa], a program that converts DocBook to AsciiDoc. - Qingping Hou has written http://houqp.github.com/asciidoc-deckjs/[an AsciiDoc backend for deck.js]. http://imakewebthings.github.com/deck.js/[deck.js] is a JavaScript library for building modern HTML presentations (slideshows). - The guys from O'Reilly Media have posted an https://github.com/oreillymedia/docbook2asciidoc[XSL Stylesheet to github] that converts DocBook to AsciiDoc. - Lex Trotman has written https://github.com/elextr/flexndex[flexndex], an index generator tool that be used with AsciiDoc. - Michael Haberler has created a https://code.google.com/p/asciidoc-diag-filter/[blockdiag filter for Asciidoc] which embeds http://blockdiag.com/[blockdiag] images in AsciiDoc documents. - Dan Allen has written a https://github.com/mojavelinux/asciidoc-bootstrap-docs-backend[Bootstrap backend] for AsciiDoc. - Steven Boscarine has written https://github.com/StevenBoscarine/JavaAsciidocWrapper[Maven wrapper for AsciiDoc]. - Christian Goltz has written https://github.com/christiangoltz/shaape[Shaape], an Ascii art to image converter for AsciiDoc. - Eduardo Santana has written an https://github.com/edusantana/asciidoc-highlight[Asciidoc Highlight for Notepad++]. - http://www.geany.org/[Geany] 1.23 adds document structure support for AsciiDoc. Please let me know if any of these links need updating. [[X6]] Documents written using AsciiDoc -------------------------------- Here are some documents I know of, if you know of more drop me a line and I'll add them to the list. - The book http://practicalunittesting.com/[Practical Unit Testing] by Tomek Kaczanowski was https://groups.google.com/group/asciidoc/browse_frm/thread/4ba13926262efa23[written using Asciidoc]. - The book http://oreilly.com/catalog/9781449397296[Programming iOS 4] by Matt Neuburg was written using AsciiDoc. Matt has http://www.apeth.net/matt/iosbooktoolchain.html[written an article] describing how he used AsciiDoc and other tools to write the book. - The book http://oreilly.com/catalog/9780596155957/index.html[Programming Scala] by Dean Wampler and Alex Payne (O'Reilly) was http://groups.google.com/group/asciidoc/browse_frm/thread/449f1199343f0e27[written using Asciidoc]. - The http://www.ncfaculty.net/dogle/fishR/index.html[fishR] website has a number of http://www.ncfaculty.net/dogle/fishR/bookex/AIFFD/AIFFD.html[book examples] written using AsciiDoc. - The Neo4j graph database project uses Asciidoc, and the output is published here: http://docs.neo4j.org/. The build process includes live tested source code snippets and is described http://groups.google.com/group/asciidoc/browse_thread/thread/49d570062fd3ff52[here]. - http://frugalware.org/[Frugalware Linux] uses AsciiDoc for http://frugalware.org/docs[documentation]. - http://www.cherokee-project.com/doc/[Cherokee documentation]. - Henrik Maier produced this professional User manual using AsciiDoc: http://www.proconx.com/assets/files/products/modg100/UMMBRG300-1101.pdf - Henrik also produced this folded single page brochure format example: http://www.proconx.com/assets/files/products/modg100/IGMBRG300-1101-up.pdf + See this http://groups.google.com/group/asciidoc/browse_thread/thread/16ab5a06864b934f[AsciiDoc discussion group thread] for details. - The http://www.kernel.org/pub/software/scm/git/docs/user-manual.html[Git User's Manual]. - 'Git Magic' + http://www-cs-students.stanford.edu/~blynn/gitmagic/ + http://github.com/blynn/gitmagic/tree/1e5780f658962f8f9b01638059b27275cfda095c - 'CouchDB: The Definitive Guide' + http://books.couchdb.org/relax/ + http://groups.google.com/group/asciidoc/browse_thread/thread/a60f67cbbaf862aa/d214bf7fa2d538c4?lnk=gst&q=book#d214bf7fa2d538c4 - 'Ramaze Manual' + http://book.ramaze.net/ + http://github.com/manveru/ramaze-book - Some documentation about git by Nico Schottelius (in German) http://nico.schotteli.us/papers/linux/git-firmen/. - The http://www.netpromi.com/kirbybase_ruby.html[KirbyBase for Ruby] database management system manual. - The http://xpt.sourceforge.net/[*Nix Power Tools project] uses AsciiDoc for documentation. - The http://www.wesnoth.org/[Battle for Wesnoth] project uses AsciiDoc for its http://www.wesnoth.org/wiki/WesnothManual[Manual] in a number of different languages. - Troy Hanson uses AsciiDoc to generate user guides for the http://tpl.sourceforge.net/[tpl] and http://uthash.sourceforge.net/[uthash] projects (the HTML versions have a customised contents sidebar). - http://volnitsky.com/[Leonid Volnitsky's site] is generated using AsciiDoc and includes Leonid's matplotlib filter. - http://www.weechat.org/[WeeChat] uses AsciiDoc for http://www.weechat.org/doc[project documentation]. - http://www.clansuite.com/[Clansuite] uses AsciiDoc for http://www.clansuite.com/documentation/[project documentation]. - The http://fc-solve.berlios.de/[Freecell Solver program] uses AsciiDoc for its http://fc-solve.berlios.de/docs/#distributed-docs[distributed documentation]. - Eric Raymond's http://gpsd.berlios.de/AIVDM.html[AIVDM/AIVDO protocol decoding] documentation is written using AsciiDoc. - Dwight Schauer has written an http://lxc.teegra.net/[LXC HOWTO] in AsciiDoc. - The http://www.rowetel.com/ucasterisk/[Free Telephony Project] website is generated using AsciiDoc. - Warren Block has http://www.wonkity.com/~wblock/docs/[posted a number of articles written using AsciiDoc]. - The http://code.google.com/p/waf/[Waf project's] 'Waf Book' is written using AsciiDoc, there is an http://waf.googlecode.com/svn/docs/wafbook/single.html[HTML] and a http://waf.googlecode.com/svn/docs/wafbook/waf.pdf[PDF] version. - The http://www.diffkit.org/[DiffKit] project's documentation and website have been written using Asciidoc. - The http://www.networkupstools.org[Network UPS Tools] project http://www.networkupstools.org/documentation.html[documentation] is an example of a large documentation project written using AsciiDoc. - http://www.archlinux.org/pacman/[Pacman], the http://www.archlinux.org/[Arch Linux] package manager, has been documented using AsciiDoc. - Suraj Kurapati has written a number of customized manuals for his Open Source projects using AsciiDoc: * http://snk.tuxfamily.org/lib/detest/ * http://snk.tuxfamily.org/lib/ember/ * http://snk.tuxfamily.org/lib/inochi/ * http://snk.tuxfamily.org/lib/rumai/ - The http://cxxtest.com/[CxxTest] project (unit testing for C++ language) has written its User Guide using AsciiDoc. Please let me know if any of these links need updating. DocBook 5.0 Backend ------------------- Shlomi Fish has begun work on a DocBook 5.0 `docbook50.conf` backend configuration file, you can find it http://bitbucket.org/shlomif/asciidoc[here]. See also: http://groups.google.com/group/asciidoc/browse_thread/thread/4386c7cc053d51a9 [[X1]] LaTeX Backend ------------- An experimental LaTeX backend was written for AsciiDoc in 2006 by Benjamin Klum. Benjamin did a superhuman job (I admit it, I didn't think this was doable due to AsciiDoc's SGML/XML bias). Owning to to other commitments, Benjamin was unable to maintain this backend. Here's link:latex-backend.html[Benjamin's original documentation]. Incompatibilities introduced after AsciiDoc 8.2.7 broke the LaTeX backend. In 2009 Geoff Eddy stepped up and updated the LaTeX backend, thanks to Geoff's efforts it now works with AsciiDoc 8.4.3. Geoff's updated `latex.conf` file shipped with AsciiDoc version 8.4.4. The backend still has limitations and remains experimental (see link:latex-bugs.html[Geoff's notes]). It's probably also worth pointing out that LaTeX output can be generated by passing AsciiDoc generated DocBook through `dblatex(1)`. Patches and bug reports ----------------------- Patches and bug reports are are encouraged, but please try to follow these guidelines: - Post bug reports and patches to the http://groups.google.com/group/asciidoc[asciidoc discussion list], this keeps things transparent and gives everyone a chance to comment. - The email subject line should be a specific and concise topic summary. Commonly accepted subject line prefixes such as '[ANN]', '[PATCH]' and '[SOLVED]' are good. === Bug reports - When reporting problems please illustrate the problem with the smallest possible example that replicates the issue (and please test your example before posting). This technique will also help to eliminate red herrings prior to posting. - Paste the commands that you executed along with any relevant outputs. - Include the version of AsciiDoc and the platform you're running it on. - If you can program please consider writing a patch to fix the problem. === Patches - Keep patches small and atomic (one issue per patch) -- no patch bombs. - If possible test your patch against the current trunk. - If your patch adds or modifies functionality include a short example that illustrates the changes. - Send patches in `diff -u` format, inline inside the mail message is usually best; if it is a very long patch then send it as an attachment. - Include documentation updates if you're up to it; otherwise insert 'TODO' comments at relevant places in the documentation. asciidoctor-2.0.20/test/fixtures/assets.jar000066400000000000000000000046121443135032600207320ustar00rootroot00000000000000PK V META-INF/PKPK VMETA-INF/MANIFEST.MFMLK-. K-*ϳR03r.JM,IMu (h&d*8djrrPK9#!vA@PK V --no-manifest ffa`97 p208z;b* Y}<\C|>9㭫w[Wܙ ?xZ{t ׌ג/fjdP{"&\b))Ӟe}*Xvb#'x ┥y#g,^U<8{qRR灕,gyo]]>zt;3?nY'a_Տ_Vo,s}~ZVʛyJjbN'yy^j֯#eMK7%TľBxDùWOg5?bS8w_3z\r]vk_ly^u%KT,QSSSII}ࠎ7EqZ.'U:y|2[RU̿/J(yErw^&!G'NW$Ts:օ19ez֭+*?78Q{++ClyP}}<{>QsY@Bo$ٮx',0 REiY1*6X3*`9əh_d$z7['mrU n9+2L{r1-˩g[I#Ca0!M'uO,_!-yC  n?@0rAIp|@Z L3ĉVCmJ~^zfZ|RRR2R.0&M`̭l v"ed!#/94U+bQ e%:ak{ ;qo! ȣ582Z.ZqIe6'P\Z\\\|WT؊gA@ם{vUPa3Lj;I܂hBZK"m8 @d@%rB1U/\ .2c&cAz(X dl=tZ@"Xtcbd`KƂ:NENX(z'aEoV6N lTPK-lKqPK  Vimages-in-jar/PK Vimages-in-jar/dot.gifstLdd`dh```eaa``b #5PK>!#PK  Vincludes-in-jar/PK V!includes-in-jar/include-file.adoc+,VH+I+Q(O,VK)MIMQH+UHTHIPKY(&PK  Vstyles-in-jar/PK Vstyles-in-jar/custom.cssM,VRPH/RH/JMͳPK1aPK V META-INF/PK V9#!vA@=META-INF/MANIFEST.MFPK V-lKq --no-manifestPK  Vlimages-in-jar/PK V>!#images-in-jar/dot.gifPK  Vincludes-in-jar/PK VY(&!*includes-in-jar/include-file.adocPK  Vstyles-in-jar/PK V1astyles-in-jar/custom.cssPK F.asciidoctor-2.0.20/test/fixtures/basic-docinfo-footer.html000066400000000000000000000003571443135032600236160ustar00rootroot00000000000000 asciidoctor-2.0.20/test/fixtures/basic-docinfo-footer.xml000066400000000000000000000003021443135032600234400ustar00rootroot00000000000000 {revnumber} 01 Jan 2013 abc Unleashed into the wild asciidoctor-2.0.20/test/fixtures/basic-docinfo-header.html000066400000000000000000000003651443135032600235470ustar00rootroot00000000000000 asciidoctor-2.0.20/test/fixtures/basic-docinfo.html000066400000000000000000000000451443135032600223140ustar00rootroot00000000000000 asciidoctor-2.0.20/test/fixtures/basic-docinfo.xml000066400000000000000000000001631443135032600221510ustar00rootroot00000000000000 2013 Acme™, Inc. asciidoctor-2.0.20/test/fixtures/basic.adoc000066400000000000000000000001261443135032600206370ustar00rootroot00000000000000= Document Title Doc Writer v1.0, 2013-01-01 Body content. asciidoctor-2.0.20/test/fixtures/chapter-a.adoc000066400000000000000000000000251443135032600214200ustar00rootroot00000000000000= Chapter A content asciidoctor-2.0.20/test/fixtures/child-include.adoc000066400000000000000000000001141443135032600222570ustar00rootroot00000000000000first line of child include::grandchild-include.adoc[] last line of child asciidoctor-2.0.20/test/fixtures/circle.svg000066400000000000000000000005201443135032600207060ustar00rootroot00000000000000 asciidoctor-2.0.20/test/fixtures/configure-stdin.rb000066400000000000000000000003371443135032600223570ustar00rootroot00000000000000require 'stringio' io = StringIO.new String.new %(é\n\n#{Encoding.default_external}:#{Encoding.default_internal}), encoding: Encoding::UTF_8 io.set_encoding Encoding.default_external, Encoding.default_internal $stdin = io asciidoctor-2.0.20/test/fixtures/custom-backends/000077500000000000000000000000001443135032600220115ustar00rootroot00000000000000asciidoctor-2.0.20/test/fixtures/custom-backends/erb/000077500000000000000000000000001443135032600225615ustar00rootroot00000000000000asciidoctor-2.0.20/test/fixtures/custom-backends/erb/html5/000077500000000000000000000000001443135032600236125ustar00rootroot00000000000000asciidoctor-2.0.20/test/fixtures/custom-backends/erb/html5/block_paragraph.html.erb000066400000000000000000000002561443135032600303710ustar00rootroot00000000000000 class="<%= ['paragraph',role].compact * ' ' %>"><% if title? %>
    <%= title %>
    <% end %>

    <%= content %>

    asciidoctor-2.0.20/test/fixtures/custom-backends/erb/html5/open.html.erb000066400000000000000000000003521443135032600262100ustar00rootroot00000000000000 class="<%= ['openblock',(@style == 'open' ? nil : @style),role].compact * ' ' %>"><% if title? %>
    <%= title %>
    <% end %>
    <%= content %>
    asciidoctor-2.0.20/test/fixtures/custom-backends/haml/000077500000000000000000000000001443135032600227325ustar00rootroot00000000000000asciidoctor-2.0.20/test/fixtures/custom-backends/haml/docbook5/000077500000000000000000000000001443135032600244375ustar00rootroot00000000000000asciidoctor-2.0.20/test/fixtures/custom-backends/haml/docbook5/block_paragraph.xml.haml000066400000000000000000000003221443135032600312150ustar00rootroot00000000000000- if title? %formalpara{'xml:id'=>@id, role: (attr :role), xreflabel: (attr :reftext)} %title=title %para=content - else %para{'xml:id'=>@id, role: (attr :role), xreflabel: (attr :reftext)}=content asciidoctor-2.0.20/test/fixtures/custom-backends/haml/html5-tweaks/000077500000000000000000000000001443135032600252575ustar00rootroot00000000000000asciidoctor-2.0.20/test/fixtures/custom-backends/haml/html5-tweaks/block_paragraph.html.haml000066400000000000000000000000131443135032600321760ustar00rootroot00000000000000%p=content asciidoctor-2.0.20/test/fixtures/custom-backends/haml/html5-tweaks/embedded.html.haml000066400000000000000000000000111443135032600306060ustar00rootroot00000000000000=content asciidoctor-2.0.20/test/fixtures/custom-backends/haml/html5/000077500000000000000000000000001443135032600237635ustar00rootroot00000000000000asciidoctor-2.0.20/test/fixtures/custom-backends/haml/html5/block_paragraph.html.haml000066400000000000000000000001051443135032600307040ustar00rootroot00000000000000- if title? .title=title %p{id: @id, class: (attr 'role')}=content asciidoctor-2.0.20/test/fixtures/custom-backends/haml/html5/block_sidebar.html.haml000066400000000000000000000001411443135032600303500ustar00rootroot00000000000000%aside{id: @id, class: (attr 'role')} - if title? %header %h1=title =content.chomp asciidoctor-2.0.20/test/fixtures/custom-backends/slim/000077500000000000000000000000001443135032600227555ustar00rootroot00000000000000asciidoctor-2.0.20/test/fixtures/custom-backends/slim/docbook5/000077500000000000000000000000001443135032600244625ustar00rootroot00000000000000asciidoctor-2.0.20/test/fixtures/custom-backends/slim/docbook5/block_paragraph.xml.slim000066400000000000000000000002771443135032600312740ustar00rootroot00000000000000- if title? formalpara xml:id=@id role=(attr :role) xreflabel=(attr :reftext) title=title para=content - else para xml:id=@id role=(attr :role) xreflabel=(attr :reftext) =content asciidoctor-2.0.20/test/fixtures/custom-backends/slim/html5-custom-outline/000077500000000000000000000000001443135032600267735ustar00rootroot00000000000000asciidoctor-2.0.20/test/fixtures/custom-backends/slim/html5-custom-outline/outline.html.slim000066400000000000000000000000711443135032600323010ustar00rootroot00000000000000ul - sections.each do |section| li = section.title asciidoctor-2.0.20/test/fixtures/custom-backends/slim/html5/000077500000000000000000000000001443135032600240065ustar00rootroot00000000000000asciidoctor-2.0.20/test/fixtures/custom-backends/slim/html5/block_paragraph.html.slim000066400000000000000000000000741443135032600307570ustar00rootroot00000000000000- if title? .title=title p id=id class="#{role}" =content asciidoctor-2.0.20/test/fixtures/custom-backends/slim/html5/block_sidebar.html.slim000066400000000000000000000001171443135032600304210ustar00rootroot00000000000000aside id=id class="#{role}" - if title? header h1=title =content asciidoctor-2.0.20/test/fixtures/custom-docinfodir/000077500000000000000000000000001443135032600223575ustar00rootroot00000000000000asciidoctor-2.0.20/test/fixtures/custom-docinfodir/basic-docinfo.html000066400000000000000000000000451443135032600257440ustar00rootroot00000000000000 asciidoctor-2.0.20/test/fixtures/custom-docinfodir/docinfo.html000066400000000000000000000000541443135032600246650ustar00rootroot00000000000000 asciidoctor-2.0.20/test/fixtures/custom.css000066400000000000000000000000311443135032600207450ustar00rootroot00000000000000mark { color: green; } asciidoctor-2.0.20/test/fixtures/data.tsv000066400000000000000000000000441443135032600203740ustar00rootroot00000000000000First Second Third a b c 1 2 x y z asciidoctor-2.0.20/test/fixtures/docinfo-footer.html000066400000000000000000000000451443135032600225310ustar00rootroot00000000000000Back to top asciidoctor-2.0.20/test/fixtures/docinfo-footer.xml000066400000000000000000000002561443135032600223710ustar00rootroot00000000000000 Glossary term definition asciidoctor-2.0.20/test/fixtures/docinfo.html000066400000000000000000000000611443135032600212330ustar00rootroot00000000000000 asciidoctor-2.0.20/test/fixtures/docinfo.xml000066400000000000000000000001561443135032600210740ustar00rootroot00000000000000Asciidoctor™ 1.0.0 {revnumber} asciidoctor-2.0.20/test/fixtures/doctime-localtime.adoc000066400000000000000000000000261443135032600231500ustar00rootroot00000000000000{doctime} {localtime} asciidoctor-2.0.20/test/fixtures/dot000066400000000000000000000004201443135032600174340ustar00rootroot00000000000000PNG  IHDR%VgAMA a cHRMz&u0`:pQ<PLTE°,bKGDHtIME /px IDATc`!3%tEXtdate:create2018-06-02T19:46:20-06:00 X%tEXtdate:modify2013-01-09T15:20:47-07:00IENDB`asciidoctor-2.0.20/test/fixtures/dot.gif000066400000000000000000000000431443135032600202010ustar00rootroot00000000000000GIF89a,D;asciidoctor-2.0.20/test/fixtures/empty.svg000066400000000000000000000000001443135032600205740ustar00rootroot00000000000000asciidoctor-2.0.20/test/fixtures/encoding.adoc000066400000000000000000000006501443135032600213460ustar00rootroot00000000000000Gregory Romé has written an AsciiDoc plugin for the Redmine project management application. https://github.com/foo-users/foo へと `vicmd` キーマップを足してみている試み、 アニメーションgifです。 tag::romé[] Gregory Romé has written an AsciiDoc plugin for the Redmine project management application. end::romé[] == Überschrift * Codierungen sind verrückt auf älteren Versionen von Ruby asciidoctor-2.0.20/test/fixtures/file-with-missing-include.adoc000066400000000000000000000000351443135032600245350ustar00rootroot00000000000000include::no-such-file.adoc[] asciidoctor-2.0.20/test/fixtures/file-with-utf8-bom.adoc000066400000000000000000000000111443135032600230760ustar00rootroot00000000000000= 人 asciidoctor-2.0.20/test/fixtures/grandchild-include.adoc000066400000000000000000000000621443135032600232750ustar00rootroot00000000000000first line of grandchild last line of grandchild asciidoctor-2.0.20/test/fixtures/hello-asciidoctor.pdf000066400000000000000000000015171443135032600230320ustar00rootroot00000000000000%PDF-1.3 % 1 0 obj << /Creator /Producer >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 3 0 obj << /Type /Pages /Count 1 /Kids [5 0 R] >> endobj 4 0 obj << /Length 92 >> stream q BT 36.0 747.384 Td /F1.0 12 Tf [<48656c6c6f> 40 <2c204173636969646f63746f7221>] TJ ET Q endstream endobj 5 0 obj << /Type /Page /Parent 3 0 R /MediaBox [0 0 612.0 792.0] /Contents 4 0 R /Resources << /ProcSet [/PDF /Text /ImageB /ImageC /ImageI] /Font << /F1.0 6 0 R >> >> >> endobj 6 0 obj << /Type /Font /Subtype /Type1 /BaseFont /Helvetica /Encoding /WinAnsiEncoding >> endobj xref 0 7 0000000000 65535 f 0000000015 00000 n 0000000109 00000 n 0000000158 00000 n 0000000215 00000 n 0000000357 00000 n 0000000535 00000 n trailer << /Size 7 /Root 2 0 R /Info 1 0 R >> startxref 632 %%EOF asciidoctor-2.0.20/test/fixtures/include-alt-extension.asciidoc000066400000000000000000000001001443135032600246310ustar00rootroot00000000000000first line ifdef::asciidoctor-version[Asciidoctor!] last line asciidoctor-2.0.20/test/fixtures/include-asciidoctor.rb000066400000000000000000000000241443135032600231740ustar00rootroot00000000000000include Asciidoctor asciidoctor-2.0.20/test/fixtures/include-file.adoc000066400000000000000000000007351443135032600221240ustar00rootroot00000000000000first line of included content second line of included content third line of included content fourth line of included content fifth line of included content sixth line of included content seventh line of included content eighth line of included content // tag::snippet[] // tag::snippetA[] snippetA content // end::snippetA[] non-tagged content // tag::snippetB[] snippetB content // end::snippetB[] // end::snippet[] more non-tagged content last line of included content asciidoctor-2.0.20/test/fixtures/include-file.jsx000066400000000000000000000002351443135032600220150ustar00rootroot00000000000000const element = (

    Hello, Programmer!

    Welcome to the club.

    ) asciidoctor-2.0.20/test/fixtures/include-file.ml000066400000000000000000000000751443135032600216230ustar00rootroot00000000000000(* tag::snippet[] *) let s = SS.empty;; (* end::snippet[] *) asciidoctor-2.0.20/test/fixtures/include-file.xml000066400000000000000000000001401443135032600220040ustar00rootroot00000000000000 content asciidoctor-2.0.20/test/fixtures/include-with-leading-blank-line.adoc000066400000000000000000000000451443135032600255650ustar00rootroot00000000000000 = Document Title :toc: == Section asciidoctor-2.0.20/test/fixtures/incomplete.svg000066400000000000000000000000051443135032600216020ustar00rootroot00000000000000 Preamble paragraph. NOTE: This is test, only a test. == Lists .Unordered, basic * Edgar Allen Poe * Sheri S. Tepper * Bill Bryson .Unordered, max nesting * level 1 ** level 2 *** level 3 **** level 4 ***** level 5 * level 1 .Checklist - [*] checked - [x] also checked - [ ] not checked - normal list item .Ordered, basic . Step 1 . Step 2 . Step 3 .Ordered, nested . Step 1 . Step 2 .. Step 2a .. Step 2b . Step 3 .Ordered, max nesting . level 1 .. level 2 ... level 3 .... level 4 ..... level 5 . level 1 .Labeled, single-line first term:: definition of first term section term:: definition of second term .Labeled, multi-line first term:: definition of first term second term:: definition of second term .Q&A [qanda] What is Asciidoctor?:: An implementation of the AsciiDoc processor in Ruby. What is the answer to the Ultimate Question?:: 42 .Mixed Operating Systems:: Linux::: . Fedora * Desktop . Ubuntu * Desktop * Server BSD::: . FreeBSD . NetBSD Cloud Providers:: PaaS::: . OpenShift . CloudBees IaaS::: . Amazon EC2 . Rackspace .Unordered, complex * level 1 ** level 2 *** level 3 This is a new line inside an unordered list using {plus} symbol. We can even force content to start on a separate line... + Amazing, isn't it? **** level 4 + The {plus} symbol is on a new line. ***** level 5 ���������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/main.adoc����������������������������������������������������������0000664�0000000�0000000�00000000103�14431350326�0020475�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������= Main Document preamble include::chapter-a.adoc[leveloffset=+1] �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/mismatched-end-tag.adoc��������������������������������������������0000664�0000000�0000000�00000000062�14431350326�0023210�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������//tag::a[] a //tag::b[] b //end::a[] //end::b[] c ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/other-chapters.adoc������������������������������������������������0000664�0000000�0000000�00000000247�14431350326�0022512�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// tag::ch2[] [#ch2] // tag::ch2-noid[] == Chapter 2 The plot thickens. // end::ch2-noid[] // end::ch2[] [#ch3] == Chapter 3 The plot runs its course, predictably. ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/outer-include.adoc�������������������������������������������������0000664�0000000�0000000�00000000117�14431350326�0022335�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������first line of outer include::subdir/middle-include.adoc[] last line of outer �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/parent-include-restricted.adoc�������������������������������������0000664�0000000�0000000�00000000120�14431350326�0024630�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������first line of parent include::child-include.adoc[depth=0] last line of parent ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/parent-include.adoc������������������������������������������������0000664�0000000�0000000�00000000111�14431350326�0022462�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������first line of parent include::child-include.adoc[] last line of parent �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/sample-alt-extension.asciidoc��������������������������������������0000664�0000000�0000000�00000000033�14431350326�0024474�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������= Document Title contents �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/sample-docinfo-header.xml������������������������������������������0000664�0000000�0000000�00000001124�14431350326�0023575�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ {doctitle} {docdate} {firstname} {lastname} {email} {authorinitials} 1.0 2000-01-01 jwz New millennium, new release. 2.0 2010-12-25 why Why not? A new release. ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/sample.adoc��������������������������������������������������������0000664�0000000�0000000�00000000534�14431350326�0021042�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������Document Title ============== Doc Writer :idprefix: id_ Preamble paragraph. NOTE: This is test, only a test. == Section A *Section A* paragraph. === Section A Subsection *Section A* 'subsection' paragraph. == Section B *Section B* paragraph. |=== |a |b |c |1 |2 |3 |=== .Section B list * Item 1 * Item 2 * Item 3 ��������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/section-a.adoc�����������������������������������������������������0000664�0000000�0000000�00000000044�14431350326�0021437�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������[#section-a] == Section A contents ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/source-block.adoc��������������������������������������������������0000664�0000000�0000000�00000000055�14431350326�0022147�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������[source,ruby] ---- puts 'Hello, World!' ---- �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/stylesheets/�������������������������������������������������������0000775�0000000�0000000�00000000000�14431350326�0021303�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/stylesheets/custom.css���������������������������������������������0000664�0000000�0000000�00000000027�14431350326�0023326�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������body { color: red; } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/subdir/������������������������������������������������������������0000775�0000000�0000000�00000000000�14431350326�0020217�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/subdir/index.adoc��������������������������������������������������0000664�0000000�0000000�00000000033�14431350326�0022152�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������= Sample Document content �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/subdir/inner-include.adoc������������������������������������������0000664�0000000�0000000�00000000050�14431350326�0023576�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������first line of inner last line of inner ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/subdir/middle-include.adoc�����������������������������������������0000664�0000000�0000000�00000000111�14431350326�0023717�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������first line of middle include::inner-include.adoc[] last line of middle �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������asciidoctor-2.0.20/test/fixtures/subs-docinfo.html��������������������������������������������������0000664�0000000�0000000�00000000156�14431350326�0022212�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ asciidoctor-2.0.20/test/fixtures/subs.adoc000066400000000000000000000001601443135032600205300ustar00rootroot00000000000000= Document Title Doc Writer v1.0, 2013-01-01 :bootstrap-version: 3.2.0 Body content. asciidoctor-2.0.20/test/fixtures/tagged-class-enclosed.rb000066400000000000000000000006121443135032600234030ustar00rootroot00000000000000#tag::all[] class Dog #tag::init[] def initialize breed @breed = breed end #end::init[] #tag::bark[] def bark #tag::bark-beagle[] if @breed == 'beagle' 'woof woof woof woof woof' #end::bark-beagle[] #tag::bark-other[] else 'woof woof' #end::bark-other[] #tag::bark-all[] end #end::bark-all[] end #end::bark[] end #end::all[] asciidoctor-2.0.20/test/fixtures/tagged-class.rb000066400000000000000000000005621443135032600216150ustar00rootroot00000000000000class Dog #tag::init[] def initialize breed @breed = breed end #end::init[] #tag::bark[] def bark #tag::bark-beagle[] if @breed == 'beagle' 'woof woof woof woof woof' #end::bark-beagle[] #tag::bark-other[] else 'woof woof' #end::bark-other[] #tag::bark-all[] end #end::bark-all[] end #end::bark[] end asciidoctor-2.0.20/test/fixtures/tip.gif000066400000000000000000000000431443135032600202070ustar00rootroot00000000000000GIF89a,D;asciidoctor-2.0.20/test/fixtures/unclosed-tag.adoc000066400000000000000000000000201443135032600221340ustar00rootroot00000000000000x // tag::a[] a asciidoctor-2.0.20/test/fixtures/undef-dir-home.rb000066400000000000000000000001251443135032600220550ustar00rootroot00000000000000# undef_method wasn't public until 2.5 Dir.singleton_class.send :undef_method, :home asciidoctor-2.0.20/test/fixtures/unexpected-end-tag.adoc000066400000000000000000000000461443135032600232400ustar00rootroot00000000000000// tag::a[] a // end::a[] // end::a[] asciidoctor-2.0.20/test/helpers_test.rb000066400000000000000000000123501443135032600201050ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context 'Helpers' do context 'URI Encoding' do test 'should URI encode non-word characters generally' do given = ' !*/%&?\\=' expect = '+%21%2A%2F%25%26%3F%5C%3D' assert_equal expect, (Asciidoctor::Helpers.encode_uri_component given) end test 'should not URI encode select non-word characters' do # NOTE Ruby 2.5 and up stopped encoding ~ given = '-.' expect = given assert_equal expect, (Asciidoctor::Helpers.encode_uri_component given) end end context 'URIs and Paths' do test 'rootname should return file name without extension' do assert_equal 'main', Asciidoctor::Helpers.rootname('main.adoc') assert_equal 'docs/main', Asciidoctor::Helpers.rootname('docs/main.adoc') end test 'rootname should file name if it has no extension' do assert_equal 'main', Asciidoctor::Helpers.rootname('main') assert_equal 'docs/main', Asciidoctor::Helpers.rootname('docs/main') end test 'rootname should ignore dot not in last segment' do assert_equal 'include.d/main', Asciidoctor::Helpers.rootname('include.d/main') assert_equal 'include.d/main', Asciidoctor::Helpers.rootname('include.d/main.adoc') end test 'extname? should return whether path contains an extname' do assert Asciidoctor::Helpers.extname?('document.adoc') assert Asciidoctor::Helpers.extname?('path/to/document.adoc') assert_nil Asciidoctor::Helpers.extname?('basename') refute Asciidoctor::Helpers.extname?('include.d/basename') end test 'UriSniffRx should detect URIs' do assert Asciidoctor::UriSniffRx =~ 'http://example.com' assert Asciidoctor::UriSniffRx =~ 'https://example.com' assert Asciidoctor::UriSniffRx =~ 'data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=' end test 'UriSniffRx should not detect an absolute Windows path as a URI' do assert Asciidoctor::UriSniffRx !~ 'c:/sample.adoc' assert Asciidoctor::UriSniffRx !~ 'c:\\sample.adoc' end test 'uriish? should not detect a classloader path as a URI on JRuby' do input = 'uri:classloader:/sample.png' assert Asciidoctor::UriSniffRx =~ input if jruby? refute Asciidoctor::Helpers.uriish? input else assert Asciidoctor::Helpers.uriish? input end end test 'UriSniffRx should not detect URI that does not start on first line' do assert Asciidoctor::UriSniffRx !~ %(text\nhttps://example.org) end end context 'Type Resolution' do test 'should get class for top-level class name' do clazz = Asciidoctor::Helpers.class_for_name 'String' refute_nil clazz assert_equal String, clazz end test 'should get class for class name in module' do clazz = Asciidoctor::Helpers.class_for_name 'Asciidoctor::Document' refute_nil clazz assert_equal Asciidoctor::Document, clazz end test 'should get class for class name resolved from root' do clazz = Asciidoctor::Helpers.class_for_name '::Asciidoctor::Document' refute_nil clazz assert_equal Asciidoctor::Document, clazz end test 'should raise exception if cannot find class for name' do begin Asciidoctor::Helpers.class_for_name 'InvalidModule::InvalidClass' flunk 'Expecting RuntimeError to be raised' rescue NameError => e assert_match %r/^Could not resolve class for name: InvalidModule::InvalidClass$/, e.message end end test 'should raise exception if constant name is invalid' do begin Asciidoctor::Helpers.class_for_name 'foobar' flunk 'Expecting RuntimeError to be raised' rescue NameError => e assert_match %r/^Could not resolve class for name: foobar$/, e.message end end test 'should raise exception if class not found in scope' do begin Asciidoctor::Helpers.class_for_name 'Asciidoctor::Extensions::String' flunk 'Expecting RuntimeError to be raised' rescue NameError => e assert_match %r/^Could not resolve class for name: Asciidoctor::Extensions::String$/, e.message end end test 'should raise exception if name resolves to module' do begin Asciidoctor::Helpers.class_for_name 'Asciidoctor::Extensions' flunk 'Expecting RuntimeError to be raised' rescue NameError => e assert_match %r/^Could not resolve class for name: Asciidoctor::Extensions$/, e.message end end test 'should resolve class if class is given' do clazz = Asciidoctor::Helpers.resolve_class Asciidoctor::Document refute_nil clazz assert_equal Asciidoctor::Document, clazz end test 'should resolve class if class from string' do clazz = Asciidoctor::Helpers.resolve_class 'Asciidoctor::Document' refute_nil clazz assert_equal Asciidoctor::Document, clazz end test 'should not resolve class if not in scope' do begin Asciidoctor::Helpers.resolve_class 'Asciidoctor::Extensions::String' flunk 'Expecting RuntimeError to be raised' rescue NameError => e assert_match %r/^Could not resolve class for name: Asciidoctor::Extensions::String$/, e.message end end end end asciidoctor-2.0.20/test/invoker_test.rb000066400000000000000000000716651443135032600201360ustar00rootroot00000000000000# frozen_string_literal: false require_relative 'test_helper' require File.join Asciidoctor::LIB_DIR, 'asciidoctor/cli' context 'Invoker' do test 'should allow Options to be passed as first argument of constructor' do opts = Asciidoctor::Cli::Options.new attributes: { 'toc' => '' }, doctype: 'book', eruby: 'erubis' invoker = Asciidoctor::Cli::Invoker.new opts assert_same invoker.options, opts end test 'should allow options Hash to be passed as first argument of constructor' do opts = { attributes: { 'toc' => '' }, doctype: 'book', eruby: 'erubis' } invoker = Asciidoctor::Cli::Invoker.new opts resolved_opts = invoker.options assert_equal opts[:attributes], resolved_opts[:attributes] assert_equal 'book', resolved_opts[:attributes]['doctype'] assert_equal 'erubis', resolved_opts[:eruby] end test 'should parse options from array passed as first argument of constructor' do input_file = fixture_path 'basic.adoc' invoker = Asciidoctor::Cli::Invoker.new ['-s', input_file] resolved_options = invoker.options refute resolved_options[:standalone] assert_equal [input_file], resolved_options[:input_files] end test 'should parse options from multiple arguments passed to constructor' do input_file = fixture_path 'basic.adoc' invoker = Asciidoctor::Cli::Invoker.new '-s', input_file resolved_options = invoker.options refute resolved_options[:standalone] assert_equal [input_file], resolved_options[:input_files] end test 'should parse source and convert to html5 article by default' do invoker = nil output = nil redirect_streams do |out, err| invoker = invoke_cli %w(-o -) output = out.string end refute_nil invoker doc = invoker.document refute_nil doc assert_equal 'Document Title', doc.doctitle assert_equal 'Doc Writer', doc.attr('author') assert_equal 'html5', doc.attr('backend') assert_equal '.html', doc.attr('outfilesuffix') assert_equal 'article', doc.attr('doctype') assert doc.blocks? assert_equal :preamble, doc.blocks.first.context refute_empty output assert_xpath '/html', output, 1 assert_xpath '/html/head', output, 1 assert_xpath '/html/body', output, 1 assert_xpath '/html/head/title[text() = "Document Title"]', output, 1 assert_xpath '/html/body[@class="article"]/*[@id="header"]/h1[text() = "Document Title"]', output, 1 end test 'should set implicit doc info attributes' do sample_filepath = fixture_path 'sample.adoc' sample_filedir = fixturedir invoker = invoke_cli_to_buffer %w(-o /dev/null), sample_filepath doc = invoker.document assert_equal 'sample', doc.attr('docname') assert_equal sample_filepath, doc.attr('docfile') assert_equal sample_filedir, doc.attr('docdir') assert doc.attr?('docdate') assert doc.attr?('docyear') assert doc.attr?('doctime') assert doc.attr?('docdatetime') assert_empty invoker.read_output end test 'should allow docdate and doctime to be overridden' do sample_filepath = fixture_path 'sample.adoc' invoker = invoke_cli_to_buffer %w(-o /dev/null -a docdate=2015-01-01 -a doctime=10:00:00-0700), sample_filepath doc = invoker.document assert doc.attr?('docdate', '2015-01-01') assert doc.attr?('docyear', '2015') assert doc.attr?('doctime', '10:00:00-0700') assert doc.attr?('docdatetime', '2015-01-01 10:00:00-0700') end test 'should accept document from stdin and write to stdout' do invoker = invoke_cli_to_buffer(%w(-e), '-') { 'content' } doc = invoker.document refute doc.attr?('docname') refute doc.attr?('docfile') assert_equal Dir.pwd, doc.attr('docdir') assert_equal doc.attr('docdate'), doc.attr('localdate') assert_equal doc.attr('docyear'), doc.attr('localyear') assert_equal doc.attr('doctime'), doc.attr('localtime') assert_equal doc.attr('docdatetime'), doc.attr('localdatetime') refute doc.attr?('outfile') output = invoker.read_output refute_empty output assert_xpath '/*[@class="paragraph"]/p[text()="content"]', output, 1 end test 'should not fail to rewind input if reading document from stdin' do begin old_stdin = $stdin $stdin = StringIO.new 'paragraph' invoker = invoke_cli_to_buffer(%w(-e), '-') assert_equal 0, invoker.code assert_equal 1, invoker.document.blocks.size ensure $stdin = old_stdin end end test 'should accept document from stdin and write to output file' do sample_outpath = fixture_path 'sample-output.html' begin invoker = invoke_cli(%W(-e -o #{sample_outpath}), '-') { 'content' } doc = invoker.document refute doc.attr?('docname') refute doc.attr?('docfile') assert_equal Dir.pwd, doc.attr('docdir') assert_equal doc.attr('docdate'), doc.attr('localdate') assert_equal doc.attr('docyear'), doc.attr('localyear') assert_equal doc.attr('doctime'), doc.attr('localtime') assert_equal doc.attr('docdatetime'), doc.attr('localdatetime') assert doc.attr?('outfile') assert_equal sample_outpath, doc.attr('outfile') assert File.exist?(sample_outpath) ensure FileUtils.rm_f(sample_outpath) end end test 'should fail if input file matches resolved output file' do invoker = invoke_cli_to_buffer %w(-a outfilesuffix=.adoc), 'sample.adoc' assert_match(/input file and output file cannot be the same/, invoker.read_error) end test 'should fail if input file matches specified output file' do sample_outpath = fixture_path 'sample.adoc' invoker = invoke_cli_to_buffer %W(-o #{sample_outpath}), 'sample.adoc' assert_match(/input file and output file cannot be the same/, invoker.read_error) end test 'should accept input from named pipe and output to stdout', unless: windows? do sample_inpath = fixture_path 'sample-pipe.adoc' begin %x(mkfifo #{sample_inpath}) write_thread = Thread.new do File.write sample_inpath, 'pipe content' end invoker = invoke_cli_to_buffer %w(-a stylesheet!), sample_inpath result = invoker.read_output assert_match(/pipe content/, result) write_thread.join ensure FileUtils.rm_f sample_inpath end end test 'should allow docdir to be specified when input is a string' do expected_docdir = fixturedir invoker = invoke_cli_to_buffer(%w(-e --base-dir test/fixtures -o /dev/null), '-') { 'content' } doc = invoker.document assert_equal expected_docdir, doc.attr('docdir') assert_equal expected_docdir, doc.base_dir end test 'should display version and exit' do expected = %(Asciidoctor #{Asciidoctor::VERSION} [https://asciidoctor.org]\nRuntime Environment (#{RUBY_DESCRIPTION})) ['--version', '-V'].each do |switch| actual = nil redirect_streams do |out, err| invoke_cli [switch] actual = out.string.rstrip end refute_nil actual assert actual.start_with?(expected), %(Expected to print version when using #{switch} switch) end end test 'should print warnings to stderr by default' do input = <<~'EOS' 2. second 3. third EOS warnings = nil redirect_streams do |out, err| invoke_cli_to_buffer(%w(-o /dev/null), '-') { input } warnings = err.string end assert_match(/WARNING/, warnings) end test 'should enable script warnings if -w flag is specified' do old_verbose, $VERBOSE = $VERBOSE, false begin warnings = nil redirect_streams do |_, err| invoke_cli_to_buffer %w(-w -o /dev/null), '-' do A_CONST = 10 A_CONST = 20 end warnings = err.string end assert_equal false, $VERBOSE refute_empty warnings ensure $VERBOSE = old_verbose end end test 'should silence warnings if -q flag is specified' do input = <<~'EOS' 2. second 3. third EOS warnings = nil redirect_streams do |out, err| invoke_cli_to_buffer(%w(-q -o /dev/null), '-') { input } warnings = err.string end assert_equal '', warnings end test 'should not fail to check log level when -q flag is specified' do input = <<~'EOS' skip to <> . download . install[[install]] . run EOS begin old_stderr, $stderr = $stderr, ::StringIO.new old_stdout, $stdout = $stdout, ::StringIO.new invoker = invoke_cli(%w(-q), '-') { input } assert_equal 0, invoker.code ensure $stderr = old_stderr $stdout = old_stdout end end test 'should return non-zero exit code if failure level is reached' do input = <<~'EOS' 2. second 3. third EOS exit_code, messages = redirect_streams do |_, err| [invoke_cli(%w(-q --failure-level=WARN -o /dev/null), '-') { input }.code, err.string] end assert_equal 1, exit_code assert messages.empty? end test 'should report usage if no input file given' do redirect_streams do |out, err| invoke_cli [], nil assert_match(/Usage:/, err.string) end end test 'should report error if input file does not exist' do redirect_streams do |out, err| invoker = invoke_cli [], 'missing_file.adoc' assert_match(/input file .* is missing/, err.string) assert_equal 1, invoker.code end end test 'should treat extra arguments as files' do redirect_streams do |out, err| invoker = invoke_cli %w(-o /dev/null extra arguments sample.adoc), nil assert_match(/input file .* is missing/, err.string) assert_equal 1, invoker.code end end test 'should output to file name based on input file name' do sample_outpath = fixture_path 'sample.html' begin invoker = invoke_cli doc = invoker.document assert_equal sample_outpath, doc.attr('outfile') assert File.exist?(sample_outpath) output = File.read(sample_outpath, mode: Asciidoctor::FILE_READ_MODE) refute_empty output assert_xpath '/html', output, 1 assert_xpath '/html/head', output, 1 assert_xpath '/html/body', output, 1 assert_xpath '/html/head/title[text() = "Document Title"]', output, 1 assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1 ensure FileUtils.rm_f(sample_outpath) end end test 'should output to file in destination directory if set' do destination_path = File.join testdir, 'test_output' sample_outpath = File.join destination_path, 'sample.html' begin FileUtils.mkdir_p(destination_path) # QUESTION should -D be relative to working directory or source directory? invoker = invoke_cli %w(-D test/test_output) #invoker = invoke_cli %w(-D ../../test/test_output) doc = invoker.document assert_equal sample_outpath, doc.attr('outfile') assert File.exist?(sample_outpath) ensure FileUtils.rm_f(sample_outpath) FileUtils.rmdir(destination_path) end end test 'should preserve directory structure in destination directory if source directory is set' do sample_inpath = 'subdir/index.adoc' destination_path = 'test_output' destination_subdir_path = File.join destination_path, 'subdir' sample_outpath = File.join destination_subdir_path, 'index.html' begin FileUtils.mkdir_p(destination_path) invoke_cli %W(-D #{destination_path} -R test/fixtures), sample_inpath assert File.directory?(destination_subdir_path) assert File.exist?(sample_outpath) ensure FileUtils.rm_f(sample_outpath) FileUtils.rmdir(destination_subdir_path) FileUtils.rmdir(destination_path) end end test 'should output to file specified' do sample_outpath = fixture_path 'sample-output.html' begin invoker = invoke_cli %W(-o #{sample_outpath}) doc = invoker.document assert_equal sample_outpath, doc.attr('outfile') assert File.exist?(sample_outpath) ensure FileUtils.rm_f(sample_outpath) end end test 'should copy default stylesheet to target directory if linkcss is specified' do sample_outpath = fixture_path 'sample-output.html' asciidoctor_stylesheet = fixture_path 'asciidoctor.css' coderay_stylesheet = fixture_path 'coderay-asciidoctor.css' begin invoke_cli %W(-o #{sample_outpath} -a linkcss -a source-highlighter=coderay), 'source-block.adoc' assert File.exist?(sample_outpath) assert File.exist?(asciidoctor_stylesheet) assert File.exist?(coderay_stylesheet) ensure FileUtils.rm_f(sample_outpath) FileUtils.rm_f(asciidoctor_stylesheet) FileUtils.rm_f(coderay_stylesheet) end end test 'should not copy coderay stylesheet to target directory when no source blocks where highlighted' do sample_outpath = fixture_path 'sample-output.html' asciidoctor_stylesheet = fixture_path 'asciidoctor.css' coderay_stylesheet = fixture_path 'coderay-asciidoctor.css' begin invoke_cli %W(-o #{sample_outpath} -a linkcss -a source-highlighter=coderay) assert File.exist?(sample_outpath) assert File.exist?(asciidoctor_stylesheet) refute File.exist?(coderay_stylesheet) ensure FileUtils.rm_f(sample_outpath) FileUtils.rm_f(asciidoctor_stylesheet) FileUtils.rm_f(coderay_stylesheet) end end test 'should not copy default stylesheet to target directory if linkcss is set and copycss is unset' do sample_outpath = fixture_path 'sample-output.html' default_stylesheet = fixture_path 'asciidoctor.css' begin invoker = invoke_cli %W(-o #{sample_outpath} -a linkcss -a copycss!) invoker.document assert File.exist?(sample_outpath) refute File.exist?(default_stylesheet) ensure FileUtils.rm_f(sample_outpath) FileUtils.rm_f(default_stylesheet) end end test 'should copy custom stylesheet to target directory if stylesheet and linkcss is specified' do destdir = fixture_path 'output' sample_outpath = File.join destdir, 'sample-output.html' stylesdir = File.join destdir, 'styles' custom_stylesheet = File.join stylesdir, 'custom.css' begin invoker = invoke_cli %W(-o #{sample_outpath} -a linkcss -a copycss=stylesheets/custom.css -a stylesdir=./styles -a stylesheet=custom.css) invoker.document assert File.exist?(sample_outpath) assert File.exist?(custom_stylesheet) ensure FileUtils.rm_f(sample_outpath) FileUtils.rm_f(custom_stylesheet) FileUtils.rmdir(stylesdir) FileUtils.rmdir(destdir) end end test 'should not copy custom stylesheet to target directory if stylesheet and linkcss are set and copycss is unset' do destdir = fixture_path 'output' sample_outpath = File.join destdir, 'sample-output.html' stylesdir = File.join destdir, 'styles' custom_stylesheet = File.join stylesdir, 'custom.css' begin invoker = invoke_cli %W(-o #{sample_outpath} -a linkcss -a stylesdir=./styles -a stylesheet=custom.css -a copycss!) invoker.document assert File.exist?(sample_outpath) refute File.exist?(custom_stylesheet) ensure FileUtils.rm_f(sample_outpath) FileUtils.rm_f(custom_stylesheet) FileUtils.rmdir(stylesdir) if File.directory? stylesdir FileUtils.rmdir(destdir) end end test 'should not copy custom stylesheet to target directory if stylesdir is a URI' do destdir = fixture_path 'output' sample_outpath = File.join destdir, 'sample-output.html' stylesdir = File.join destdir, 'http:' begin invoker = invoke_cli %W(-o #{sample_outpath} -a linkcss -a stylesdir=http://example.org/styles -a stylesheet=custom.css) invoker.document assert File.exist?(sample_outpath) refute File.exist?(stylesdir) ensure FileUtils.rm_f(sample_outpath) FileUtils.rmdir(stylesdir) if File.directory? stylesdir FileUtils.rmdir(destdir) end end test 'should convert all passed files' do basic_outpath = fixture_path 'basic.html' sample_outpath = fixture_path 'sample.html' begin invoke_cli_with_filenames [], %w(basic.adoc sample.adoc) assert File.exist?(basic_outpath) assert File.exist?(sample_outpath) ensure FileUtils.rm_f(basic_outpath) FileUtils.rm_f(sample_outpath) end end test 'options should not be modified when processing multiple files' do destination_path = File.join testdir, 'test_output' basic_outpath = File.join destination_path, 'basic.htm' sample_outpath = File.join destination_path, 'sample.htm' begin invoke_cli_with_filenames %w(-D test/test_output -a outfilesuffix=.htm), %w(basic.adoc sample.adoc) assert File.exist?(basic_outpath) assert File.exist?(sample_outpath) ensure FileUtils.rm_f(basic_outpath) FileUtils.rm_f(sample_outpath) FileUtils.rmdir(destination_path) end end test 'should convert all files that matches a glob expression' do basic_outpath = fixture_path 'basic.html' begin invoke_cli_to_buffer [], "ba*.adoc" assert File.exist?(basic_outpath) ensure FileUtils.rm_f(basic_outpath) end end test 'should convert all files that matches an absolute path glob expression' do basic_outpath = fixture_path 'basic.html' glob = fixture_path 'ba*.adoc' # test Windows using backslash-style pathname if File::ALT_SEPARATOR == '\\' glob = glob.tr '/', '\\' end begin invoke_cli_to_buffer [], glob assert File.exist?(basic_outpath) ensure FileUtils.rm_f(basic_outpath) end end test 'should suppress header footer if specified' do # NOTE this verifies support for the legacy alias -s [%w(-e -o -), %w(-s -o -)].each do |flags| invoker = invoke_cli_to_buffer flags output = invoker.read_output assert_xpath '/html', output, 0 assert_xpath '/*[@id="preamble"]', output, 1 end end test 'should write page for each alternate manname' do outdir = fixturedir outfile_1 = File.join outdir, 'eve.1' outfile_2 = File.join outdir, 'islifeform.1' input = <<~'EOS' = eve(1) Andrew Stanton v1.0.0 :doctype: manpage :manmanual: EVE :mansource: EVE == NAME eve, islifeform - analyzes an image to determine if it's a picture of a life form == SYNOPSIS *eve* ['OPTION']... 'FILE'... EOS begin invoke_cli(%W(-b manpage -o #{outfile_1}), '-') { input } assert File.exist?(outfile_1) assert File.exist?(outfile_2) assert_equal '.so eve.1', (File.read outfile_2, mode: Asciidoctor::FILE_READ_MODE).chomp ensure FileUtils.rm_f outfile_1 FileUtils.rm_f outfile_2 end end test 'should output a trailing newline to stdout' do invoker = nil output = nil redirect_streams do |out, err| invoker = invoke_cli %w(-o -) output = out.string end refute_nil invoker refute_nil output assert output.end_with?("\n") end test 'should set backend to html5 if specified' do invoker = invoke_cli_to_buffer %w(-b html5 -o -) doc = invoker.document assert_equal 'html5', doc.attr('backend') assert_equal '.html', doc.attr('outfilesuffix') output = invoker.read_output assert_xpath '/html', output, 1 end test 'should set backend to docbook5 if specified' do invoker = invoke_cli_to_buffer %w(-b docbook5 -a xmlns -o -) doc = invoker.document assert_equal 'docbook5', doc.attr('backend') assert_equal '.xml', doc.attr('outfilesuffix') output = invoker.read_output assert_xpath '/xmlns:article', output, 1 end test 'should set doctype to article if specified' do invoker = invoke_cli_to_buffer %w(-d article -o -) doc = invoker.document assert_equal 'article', doc.attr('doctype') output = invoker.read_output assert_xpath '/html/body[@class="article"]', output, 1 end test 'should set doctype to book if specified' do invoker = invoke_cli_to_buffer %w(-d book -o -) doc = invoker.document assert_equal 'book', doc.attr('doctype') output = invoker.read_output assert_xpath '/html/body[@class="book"]', output, 1 end test 'should warn if doctype is inline and the first block is not a candidate for inline conversion' do ['== Section Title', 'image::tiger.png[]'].each do |input| warnings = redirect_streams do |out, err| invoke_cli_to_buffer(%w(-d inline), '-') { input } err.string end assert_match(/WARNING: no inline candidate/, warnings) end end test 'should not warn if doctype is inline and the document has no blocks' do warnings = redirect_streams do |out, err| invoke_cli_to_buffer(%w(-d inline), '-') { '// comment' } err.string end refute_match(/WARNING/, warnings) end test 'should not warn if doctype is inline and the document contains multiple blocks' do warnings = redirect_streams do |out, err| invoke_cli_to_buffer(%w(-d inline), '-') { %(paragraph one\n\nparagraph two\n\nparagraph three) } err.string end refute_match(/WARNING/, warnings) end test 'should locate custom templates based on template dir, template engine and backend' do custom_backend_root = fixture_path 'custom-backends' invoker = invoke_cli_to_buffer %W(-E haml -T #{custom_backend_root} -o -) doc = invoker.document assert_kind_of Asciidoctor::Converter::CompositeConverter, doc.converter selected = doc.converter.find_converter 'paragraph' assert_kind_of Asciidoctor::Converter::TemplateConverter, selected assert_kind_of haml_template_class, selected.templates['paragraph'] end test 'should load custom templates from multiple template directories' do custom_backend_1 = fixture_path 'custom-backends/haml/html5' custom_backend_2 = fixture_path 'custom-backends/haml/html5-tweaks' invoker = invoke_cli_to_buffer %W(-T #{custom_backend_1} -T #{custom_backend_2} -o - -e) output = invoker.read_output assert_css '.paragraph', output, 0 assert_css '#preamble > .sectionbody > p', output, 1 end test 'should set attribute with value' do invoker = invoke_cli_to_buffer %w(--trace -a idprefix=id -e -o -) doc = invoker.document assert_equal 'id', doc.attr('idprefix') output = invoker.read_output assert_xpath '//h2[@id="idsection_a"]', output, 1 end test 'should set attribute with value containing equal sign' do invoker = invoke_cli_to_buffer %w(--trace -a toc -a toc-title=t=o=c -o -) doc = invoker.document assert_equal 't=o=c', doc.attr('toc-title') output = invoker.read_output assert_xpath '//*[@id="toctitle"][text() = "t=o=c"]', output, 1 end test 'should set attribute with quoted value containing a space' do # emulating commandline arguments: --trace -a toc -a note-caption="Note to self:" -o - invoker = invoke_cli_to_buffer %w(--trace -a toc -a note-caption=Note\ to\ self: -o -) doc = invoker.document assert_equal 'Note to self:', doc.attr('note-caption') output = invoker.read_output assert_xpath %(//*[#{contains_class('admonitionblock')}]//*[@class='title'][text() = 'Note to self:']), output, 1 end test 'should not set attribute ending in @ if defined in document' do invoker = invoke_cli_to_buffer %w(--trace -a idprefix=id@ -e -o -) doc = invoker.document assert_equal 'id_', doc.attr('idprefix') output = invoker.read_output assert_xpath '//h2[@id="id_section_a"]', output, 1 end test 'should set attribute with no value' do invoker = invoke_cli_to_buffer %w(-a icons -e -o -) doc = invoker.document assert_equal '', doc.attr('icons') output = invoker.read_output assert_xpath '//*[@class="admonitionblock note"]//img[@alt="Note"]', output, 1 end test 'should unset attribute ending in bang' do invoker = invoke_cli_to_buffer %w(-a sectids! -e -o -) doc = invoker.document refute doc.attr?('sectids') output = invoker.read_output # leave the count loose in case we add more sections assert_xpath '//h2[not(@id)]', output end test 'default mode for cli should be unsafe' do invoker = invoke_cli_to_buffer %w(-o /dev/null) doc = invoker.document assert_equal Asciidoctor::SafeMode::UNSAFE, doc.safe end test 'should set safe mode if specified' do invoker = invoke_cli_to_buffer %w(--safe -o /dev/null) doc = invoker.document assert_equal Asciidoctor::SafeMode::SAFE, doc.safe end test 'should set safe mode to specified level' do levels = { 'unsafe' => Asciidoctor::SafeMode::UNSAFE, 'safe' => Asciidoctor::SafeMode::SAFE, 'server' => Asciidoctor::SafeMode::SERVER, 'secure' => Asciidoctor::SafeMode::SECURE, } levels.each do |name, const| invoker = invoke_cli_to_buffer %W(-S #{name} -o /dev/null) doc = invoker.document assert_equal const, doc.safe end end test 'should set eRuby impl if specified' do invoker = invoke_cli_to_buffer %w(--eruby erubi -o /dev/null) doc = invoker.document assert_equal 'erubi', doc.instance_variable_get('@options')[:eruby] end test 'should force default external encoding to UTF-8' do input_path = fixture_path 'encoding.adoc' # using open3 to work around a bug in JRuby process_manager.rb, # which tries to run a gsub on stdout prematurely breaking the test # warnings may be issued, so don't assert on stderr stdout_lines = run_command(asciidoctor_cmd, '-o', '-', '--trace', input_path, env: { 'LANG' => 'US-ASCII' }) {|out| out.readlines } refute_empty stdout_lines # NOTE Ruby on Windows runs with a IBM437 encoding by default stdout_lines.each {|l| l.force_encoding Encoding::UTF_8 } unless Encoding.default_external == Encoding::UTF_8 stdout_str = stdout_lines.join assert_includes stdout_str, 'Codierungen sind verrückt auf älteren Versionen von Ruby' end test 'should force stdio encoding to UTF-8' do cmd = asciidoctor_cmd ['-E', 'IBM866:IBM866'] # NOTE configure-stdin.rb populates stdin result = run_command(cmd, '-r', (fixture_path 'configure-stdin.rb'), '-e', '-o', '-', '-') {|out| out.read } # NOTE Ruby on Windows runs with a IBM437 encoding by default result.force_encoding Encoding::UTF_8 unless Encoding.default_external == Encoding::UTF_8 assert_equal Encoding::UTF_8, result.encoding assert_include '

    é

    ', result assert_include '

    IBM866:IBM866

    ', result end test 'should not fail to load if call to Dir.home fails', unless: RUBY_ENGINE == 'truffleruby' do cmd = asciidoctor_cmd ['-r', (fixture_path 'undef-dir-home.rb')] result = run_command(cmd, '-e', '-o', '-', (fixture_path 'basic.adoc')) {|out| out.read } assert_include 'Body content', result end test 'should print timings when -t flag is specified' do input = 'Sample *AsciiDoc*' invoker = nil error = nil redirect_streams do |_, err| invoker = invoke_cli(%w(-t -o /dev/null), '-') { input } error = err.string end refute_nil invoker refute_nil error assert_match(/Total time/, error) end test 'should show timezone as UTC if system TZ is set to UTC' do input_path = fixture_path 'doctime-localtime.adoc' output = run_command(asciidoctor_cmd, '-d', 'inline', '-o', '-', '-e', input_path, env: { 'TZ' => 'UTC', 'SOURCE_DATE_EPOCH' => nil, 'IGNORE_SOURCE_DATE_EPOCH' => '1' }) {|out| out.read } doctime, localtime = output.lines.map(&:chomp) assert doctime.end_with?(' UTC') assert localtime.end_with?(' UTC') end test 'should show timezone as offset if system TZ is not set to UTC' do input_path = fixture_path 'doctime-localtime.adoc' output = run_command(asciidoctor_cmd, '-d', 'inline', '-o', '-', '-e', input_path, env: { 'TZ' => 'EST+5', 'SOURCE_DATE_EPOCH' => nil, 'IGNORE_SOURCE_DATE_EPOCH' => '1' }) {|out| out.read } doctime, localtime = output.lines.map(&:chomp) assert doctime.end_with?(' -0500') assert localtime.end_with?(' -0500') end test 'should use SOURCE_DATE_EPOCH as modified time of input file and local time' do old_source_date_epoch = ENV.delete 'SOURCE_DATE_EPOCH' begin ENV['SOURCE_DATE_EPOCH'] = '1234123412' sample_filepath = fixture_path 'sample.adoc' invoker = invoke_cli_to_buffer %w(-o /dev/null), sample_filepath doc = invoker.document assert_equal '2009-02-08', (doc.attr 'docdate') assert_equal '2009', (doc.attr 'docyear') assert_match(/2009-02-08 20:03:32 UTC/, (doc.attr 'docdatetime')) assert_equal '2009-02-08', (doc.attr 'localdate') assert_equal '2009', (doc.attr 'localyear') assert_match(/2009-02-08 20:03:32 UTC/, (doc.attr 'localdatetime')) ensure if old_source_date_epoch ENV['SOURCE_DATE_EPOCH'] = old_source_date_epoch else ENV.delete 'SOURCE_DATE_EPOCH' end end end test 'should fail if SOURCE_DATE_EPOCH is malformed' do old_source_date_epoch = ENV.delete 'SOURCE_DATE_EPOCH' begin ENV['SOURCE_DATE_EPOCH'] = 'aaaaaaaa' sample_filepath = fixture_path 'sample.adoc' assert_equal 1, (invoke_cli_to_buffer %w(-o /dev/null), sample_filepath).code ensure if old_source_date_epoch ENV['SOURCE_DATE_EPOCH'] = old_source_date_epoch else ENV.delete 'SOURCE_DATE_EPOCH' end end end end asciidoctor-2.0.20/test/links_test.rb000066400000000000000000001457471443135032600176040ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context 'Links' do test 'qualified url inline with text' do assert_xpath "//a[@href='http://asciidoc.org'][@class='bare'][text() = 'http://asciidoc.org']", convert_string("The AsciiDoc project is located at http://asciidoc.org.") end test 'qualified url with role inline with text' do assert_xpath "//a[@href='http://asciidoc.org'][@class='bare project'][text() = 'http://asciidoc.org']", convert_string("The AsciiDoc project is located at http://asciidoc.org[role=project].") end test 'qualified http url inline with hide-uri-scheme set' do assert_xpath "//a[@href='http://asciidoc.org'][@class='bare'][text() = 'asciidoc.org']", convert_string("The AsciiDoc project is located at http://asciidoc.org.", attributes: { 'hide-uri-scheme' => '' }) end test 'qualified file url inline with label' do assert_xpath "//a[@href='file:///home/user/bookmarks.html'][text() = 'My Bookmarks']", convert_string_to_embedded('file:///home/user/bookmarks.html[My Bookmarks]') end test 'qualified file url inline with hide-uri-scheme set' do assert_xpath "//a[@href='file:///etc/app.conf'][text() = '/etc/app.conf']", convert_string('Edit the configuration file link:file:///etc/app.conf[]', attributes: { 'hide-uri-scheme' => '' }) end test 'should not hide bare URI scheme in implicit text of link macro when hide-uri-scheme is set' do { 'link:https://[]' => 'https://', 'link:ssh://[]' => 'ssh://', }.each do |input, expected| assert_xpath %(/a[text() = "#{expected}"]), (convert_inline_string input, attributes: { 'hide-uri-scheme' => '' }) end end test 'qualified url with label' do assert_xpath "//a[@href='http://asciidoc.org'][text() = 'AsciiDoc']", convert_string("We're parsing http://asciidoc.org[AsciiDoc] markup") end test 'qualified url with label containing escaped right square bracket' do assert_xpath "//a[@href='http://asciidoc.org'][text() = '[Ascii]Doc']", convert_string("We're parsing http://asciidoc.org[[Ascii\\]Doc] markup") end test 'qualified url with backslash label' do assert_xpath "//a[@href='https://google.com'][text() = 'Google for \\']", convert_string("I advise you to https://google.com[Google for +\\+]") end test 'qualified url with label using link macro' do assert_xpath "//a[@href='http://asciidoc.org'][text() = 'AsciiDoc']", convert_string("We're parsing link:http://asciidoc.org[AsciiDoc] markup") end test 'qualified url with role using link macro' do assert_xpath "//a[@href='http://asciidoc.org'][@class='bare project'][text() = 'http://asciidoc.org']", convert_string("We're parsing link:http://asciidoc.org[role=project] markup") end test 'qualified url using macro syntax with multi-line label inline with text' do assert_xpath %{//a[@href='http://asciidoc.org'][text() = 'AsciiDoc\nmarkup']}, convert_string("We're parsing link:http://asciidoc.org[AsciiDoc\nmarkup]") end test 'qualified url with label containing square brackets using link macro' do str = 'http://example.com[[bracket1\]]' doc = document_from_string str, standalone: false, doctype: 'inline' assert_match '[bracket1]', doc.convert, 1 doc = document_from_string str, standalone: false, backend: 'docbook', doctype: 'inline' assert_match '[bracket1]', doc.convert, 1 end test 'link macro with empty target' do input = 'Link to link:[this page].' output = convert_string_to_embedded input assert_xpath '//a', output, 1 assert_xpath '//a[@href=""]', output, 1 end test 'should not recognize link macro with double colons' do input = 'The link::http://example.org[example domain] is reserved for tests and documentation.' output = convert_string_to_embedded input assert_includes output, 'link::http://example.org[example domain]' end test 'qualified url surrounded by angled brackets' do assert_xpath '//a[@href="http://asciidoc.org"][text()="http://asciidoc.org"]', convert_string(' is the project page for AsciiDoc.'), 1 end test 'qualified url surrounded by round brackets' do assert_xpath '//a[@href="http://asciidoc.org"][text()="http://asciidoc.org"]', convert_string('(http://asciidoc.org) is the project page for AsciiDoc.'), 1 end test 'qualified url with trailing period' do result = convert_string_to_embedded 'The homepage for Asciidoctor is https://asciidoctor.org.' assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(.,".")]', result, 1 end test 'qualified url with trailing explanation point' do result = convert_string_to_embedded 'Check out https://asciidoctor.org!' assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(.,"!")]', result, 1 end test 'qualified url with trailing question mark' do result = convert_string_to_embedded 'Is the homepage for Asciidoctor https://asciidoctor.org?' assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(.,"?")]', result, 1 end test 'qualified url with trailing round bracket' do result = convert_string_to_embedded 'Asciidoctor is a Ruby-based AsciiDoc processor (see https://asciidoctor.org)' assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(.,")")]', result, 1 end test 'qualified url with trailing period followed by round bracket' do result = convert_string_to_embedded '(The homepage for Asciidoctor is https://asciidoctor.org.)' assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(.,".)")]', result, 1 end test 'qualified url with trailing exclamation point followed by round bracket' do result = convert_string_to_embedded '(Check out https://asciidoctor.org!)' assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(.,"!)")]', result, 1 end test 'qualified url with trailing question mark followed by round bracket' do result = convert_string_to_embedded '(Is the homepage for Asciidoctor https://asciidoctor.org?)' assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(.,"?)")]', result, 1 end test 'qualified url with trailing semi-colon' do result = convert_string_to_embedded 'https://asciidoctor.org; where text gets parsed' assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(.,";")]', result, 1 end test 'qualified url with trailing colon' do result = convert_string_to_embedded 'https://asciidoctor.org: where text gets parsed' assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(.,":")]', result, 1 end test 'qualified url in round brackets with trailing colon' do result = convert_string_to_embedded '(https://asciidoctor.org): where text gets parsed' assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(.,"):")]', result, 1 end test 'qualified url with trailing round bracket followed by colon' do result = convert_string_to_embedded '(from https://asciidoctor.org): where text gets parsed' assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(., "):")]', result, 1 end test 'qualified url in round brackets with trailing semi-colon' do result = convert_string_to_embedded '(https://asciidoctor.org); where text gets parsed' assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(., ");")]', result, 1 end test 'qualified url with trailing round bracket followed by semi-colon' do result = convert_string_to_embedded '(from https://asciidoctor.org); where text gets parsed' assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]', result, 1 assert_xpath '//a[@href="https://asciidoctor.org"][text()="https://asciidoctor.org"]/following-sibling::text()[starts-with(., ");")]', result, 1 end test 'URI scheme with trailing characters should not be converted to a link' do input_sources = %w( (https://) http://; file://: ) expected_outputs = %w( (https://) http://; file://: <ftp://> ) input_sources.each_with_index do |input_source, i| expected_output = expected_outputs[i] actual = block_from_string input_source assert_equal expected_output, actual.content end end test 'qualified url containing round brackets' do assert_xpath '//a[@href="http://jruby.org/apidocs/org/jruby/Ruby.html#addModule(org.jruby.RubyModule)"][text()="addModule() adds a Ruby module"]', convert_string('http://jruby.org/apidocs/org/jruby/Ruby.html#addModule(org.jruby.RubyModule)[addModule() adds a Ruby module]'), 1 end test 'qualified url adjacent to text in square brackets' do assert_xpath '//a[@href="http://asciidoc.org"][text()="AsciiDoc"]', convert_string(']http://asciidoc.org[AsciiDoc] project page.'), 1 end test 'qualified url adjacent to text in round brackets' do assert_xpath '//a[@href="http://asciidoc.org"][text()="AsciiDoc"]', convert_string(')http://asciidoc.org[AsciiDoc] project page.'), 1 end test 'qualified url following no-break space' do assert_xpath '//a[@href="http://asciidoc.org"][text()="AsciiDoc"]', convert_string(%(#{[0xa0].pack 'U1'}http://asciidoc.org[AsciiDoc] project page.)), 1 end test 'qualified url following smart apostrophe' do output = convert_string_to_embedded("l’http://www.irit.fr[IRIT]") assert_match(/l’https://asciidoctor.org"', output end test 'should convert qualified url as macro with trailing period' do result = convert_string_to_embedded 'Information about the https://symbols.example.org/.[.] character.' assert_xpath '//a[@href="https://symbols.example.org/."][text()="."]', result, 1 end test 'should convert qualified url as macro enclosed in single quotes' do output = convert_string_to_embedded('\'https://asciidoctor.org[]\'') assert_include '\'https://asciidoctor.org\'', output end test 'qualified url using invalid link macro should not create link' do assert_xpath '//a', convert_string('link:http://asciidoc.org is the project page for AsciiDoc.'), 0 end test 'escaped inline qualified url should not create link' do assert_xpath '//a', convert_string('\http://asciidoc.org is the project page for AsciiDoc.'), 0 end test 'escaped inline qualified url as macro should not create link' do output = convert_string '\http://asciidoc.org[asciidoc.org] is the project page for AsciiDoc.' assert_xpath '//a', output, 0 assert_xpath '//p[starts-with(text(), "http://asciidoc.org[asciidoc.org]")]', output, 1 end test 'url in link macro with at (@) sign should not create mailto link' do assert_xpath '//a[@href="http://xircles.codehaus.org/lists/dev@geb.codehaus.org"][text()="subscribe"]', convert_string('http://xircles.codehaus.org/lists/dev@geb.codehaus.org[subscribe]') end test 'implicit url with at (@) sign should not create mailto link' do assert_xpath '//a[@href="http://xircles.codehaus.org/lists/dev@geb.codehaus.org"][text()="http://xircles.codehaus.org/lists/dev@geb.codehaus.org"]', convert_string('http://xircles.codehaus.org/lists/dev@geb.codehaus.org') end test 'escaped inline qualified url using macro syntax should not create link' do assert_xpath '//a', convert_string('\http://asciidoc.org[AsciiDoc] is the key to good docs.'), 0 end test 'inline qualified url followed by a newline should not include newline in link' do assert_xpath '//a[@href="https://github.com/asciidoctor"]', convert_string("The source code for Asciidoctor can be found at https://github.com/asciidoctor\nwhich is a GitHub organization."), 1 end test 'qualified url divided by newline using macro syntax should not create link' do assert_xpath '//a', convert_string("The source code for Asciidoctor can be found at link:https://github.com/asciidoctor\n[]which is a GitHub organization."), 0 end test 'qualified url containing whitespace using macro syntax should not create link' do assert_xpath '//a', convert_string('I often need to refer to the chapter on link:http://asciidoc.org?q=attribute references[Attribute References].'), 0 end test 'qualified url containing an encoded space using macro syntax should create a link' do assert_xpath '//a', convert_string('I often need to refer to the chapter on link:http://asciidoc.org?q=attribute%20references[Attribute References].'), 1 end test 'inline quoted qualified url should not consume surrounding angled brackets' do assert_xpath '//a[@href="https://github.com/asciidoctor"]', convert_string('Asciidoctor GitHub organization: <**https://github.com/asciidoctor**>'), 1 end test 'link with quoted text should not be separated into attributes when text contains an equal sign' do assert_xpath '//a[@href="http://search.example.com"][text()="Google, Yahoo, Bing = Search Engines"]', convert_string_to_embedded('http://search.example.com["Google, Yahoo, Bing = Search Engines"]'), 1 end test 'should leave link text as is if it contains an equals sign but no attributes are found' do assert_xpath %(//a[@href="https://example.com"][text()="What You Need\n= What You Get"]), convert_string_to_embedded(%(https://example.com[What You Need\n= What You Get])), 1 end test 'link with quoted text but no equal sign should carry quotes over to output' do assert_xpath %(//a[@href="http://search.example.com"][text()='"Google, Yahoo, Bing"']), convert_string_to_embedded('http://search.example.com["Google, Yahoo, Bing"]'), 1 end test 'link with comma in text but no equal sign should not be separated into attributes' do assert_xpath '//a[@href="http://search.example.com"][text()="Google, Yahoo, Bing"]', convert_string_to_embedded('http://search.example.com[Google, Yahoo, Bing]'), 1 end test 'link with formatted wrapped text should not be separated into attributes' do result = convert_string_to_embedded %(https://example.com[[.role]#Foo\nBar#]) assert_include %(Foo\nBar), result end test 'should process role and window attributes on link' do assert_xpath '//a[@href="http://google.com"][@class="external"][@target="_blank"]', convert_string_to_embedded('http://google.com[Google, role=external, window="_blank"]'), 1 end test 'should parse link with wrapped text that includes attributes' do result = convert_string_to_embedded %(https://example.com[Foo\nBar,role=foobar]) assert_include %(Foo Bar), result end test 'link macro with attributes but no text should use URL as text' do url = 'https://fonts.googleapis.com/css?family=Roboto:400,400italic,' assert_xpath %(//a[@href="#{url}"][text()="#{url}"]), convert_string_to_embedded(%(link:#{url}[family=Roboto,weight=400])), 1 end test 'link macro with attributes but blank text should use URL as text' do url = 'https://fonts.googleapis.com/css?family=Roboto:400,400italic,' assert_xpath %(//a[@href="#{url}"][text()="#{url}"]), convert_string_to_embedded(%(link:#{url}[,family=Roboto,weight=400])), 1 end test 'link macro with comma but no explicit attributes in text should not parse text' do url = 'https://fonts.googleapis.com/css?family=Roboto:400,400italic,' assert_xpath %(//a[@href="#{url}"][text()="Roboto,400"]), convert_string_to_embedded(%(link:#{url}[Roboto,400])), 1 end test 'link macro should support id and role attributes' do url = 'https://fonts.googleapis.com/css?family=Roboto:400' assert_xpath %(//a[@href="#{url}"][@id="roboto-regular"][@class="bare font"][text()="#{url}"]), convert_string_to_embedded(%(link:#{url}[,id=roboto-regular,role=font])), 1 end test 'link text that ends in ^ should set link window to _blank' do assert_xpath '//a[@href="http://google.com"][@target="_blank"]', convert_string_to_embedded('http://google.com[Google^]'), 1 end test 'rel=noopener should be added to a link that targets the _blank window' do assert_xpath '//a[@href="http://google.com"][@target="_blank"][@rel="noopener"]', convert_string_to_embedded('http://google.com[Google^]'), 1 end test 'rel=noopener should be added to a link that targets a named window when the noopener option is set' do assert_xpath '//a[@href="http://google.com"][@target="name"][@rel="noopener"]', convert_string_to_embedded('http://google.com[Google,window=name,opts=noopener]'), 1 end test 'rel=noopener should not be added to a link if it does not target a window' do result = convert_string_to_embedded 'http://google.com[Google,opts=noopener]' assert_xpath '//a[@href="http://google.com"]', result, 1 assert_xpath '//a[@href="http://google.com"][@rel="noopener"]', result, 0 end test 'rel=nofollow should be added to a link when the nofollow option is set' do assert_xpath '//a[@href="http://google.com"][@target="name"][@rel="nofollow noopener"]', convert_string_to_embedded('http://google.com[Google,window=name,opts="nofollow,noopener"]'), 1 end test 'id attribute on link is processed' do assert_xpath '//a[@href="http://google.com"][@id="link-1"]', convert_string_to_embedded('http://google.com[Google, id="link-1"]'), 1 end test 'title attribute on link is processed' do assert_xpath '//a[@href="http://google.com"][@title="title-1"]', convert_string_to_embedded('http://google.com[Google, title="title-1"]'), 1 end test 'inline irc link' do assert_xpath '//a[@href="irc://irc.freenode.net"][text()="irc://irc.freenode.net"]', convert_string_to_embedded('irc://irc.freenode.net'), 1 end test 'inline irc link with text' do assert_xpath '//a[@href="irc://irc.freenode.net"][text()="Freenode IRC"]', convert_string_to_embedded('irc://irc.freenode.net[Freenode IRC]'), 1 end test 'inline ref' do variations = %w([[tigers]] anchor:tigers[]) variations.each do |anchor| doc = document_from_string %(Here you can read about tigers.#{anchor}) output = doc.convert assert_kind_of Asciidoctor::Inline, doc.catalog[:refs]['tigers'] assert_nil doc.catalog[:refs]['tigers'].text assert_xpath '//a[@id="tigers"]', output, 1 assert_xpath '//a[@id="tigers"]/child::text()', output, 0 end end test 'escaped inline ref' do variations = %w([[tigers]] anchor:tigers[]) variations.each do |anchor| doc = document_from_string %(Here you can read about tigers.\\#{anchor}) output = doc.convert refute doc.catalog[:refs].key?('tigers') assert_xpath '//a[@id="tigers"]', output, 0 end end test 'inline ref can start with colon' do input = '[[:idname]] text' output = convert_string_to_embedded input assert_xpath '//a[@id=":idname"]', output, 1 end test 'inline ref cannot start with digit' do input = '[[1-install]] text' output = convert_string_to_embedded input assert_includes output, '[[1-install]]' assert_xpath '//a[@id = "1-install"]', output, 0 end test 'reftext of shorthand inline ref cannot resolve to empty' do input = '[[no-such-id,{empty}]]text' doc = document_from_string input assert_empty doc.catalog[:refs] output = doc.convert standalone: false assert_includes output, (input.sub '{empty}', '') end test 'reftext of macro inline ref can resolve to empty' do input = 'anchor:id-only[{empty}]text\n\nsee <>' doc = document_from_string input assert doc.catalog[:refs].key? 'id-only' output = doc.convert standalone: false assert_xpath '//a[@id="id-only"]', output, 1 assert_xpath '//a[@href="#id-only"]', output, 1 assert_xpath '//a[@href="#id-only"][text()="[id-only]"]', output, 1 end test 'inline ref with reftext' do %w([[tigers,Tigers]] anchor:tigers[Tigers]).each do |anchor| doc = document_from_string %(Here you can read about tigers.#{anchor}) output = doc.convert assert_kind_of Asciidoctor::Inline, doc.catalog[:refs]['tigers'] assert_equal 'Tigers', doc.catalog[:refs]['tigers'].text assert_xpath '//a[@id="tigers"]', output, 1 assert_xpath '//a[@id="tigers"]/child::text()', output, 0 end end test 'should encode double quotes in reftext of anchor macro in DocBook output' do input = 'anchor:uncola[the "un"-cola]' result = convert_inline_string input, backend: :docbook assert_equal '', result end test 'should substitute attribute references in reftext when registering inline ref' do %w([[tigers,{label-tigers}]] anchor:tigers[{label-tigers}]).each do |anchor| doc = document_from_string %(Here you can read about tigers.#{anchor}), attributes: { 'label-tigers' => 'Tigers' } doc.convert assert_kind_of Asciidoctor::Inline, doc.catalog[:refs]['tigers'] assert_equal 'Tigers', doc.catalog[:refs]['tigers'].text end end test 'inline ref with reftext converted to DocBook' do %w([[tigers,]] anchor:tigers[]).each do |anchor| doc = document_from_string %(Here you can read about tigers.#{anchor}), backend: :docbook output = doc.convert standalone: false assert_kind_of Asciidoctor::Inline, doc.catalog[:refs]['tigers'] assert_equal '', doc.catalog[:refs]['tigers'].text assert_includes output, '' end end test 'does not match bibliography anchor in prose when scanning for inline anchor' do doc = document_from_string 'Use [[[label]]] to assign a label to a bibliography entry.' refute doc.catalog[:refs].key? 'label' end test 'repeating inline anchor macro with empty reftext' do input = 'anchor:one[] anchor:two[] anchor:three[]' result = convert_inline_string input assert_equal ' ', result end test 'mixed inline anchor macro and anchor shorthand with empty reftext' do input = 'anchor:one[][[two]]anchor:three[][[four]]anchor:five[]' result = convert_inline_string input assert_equal '', result end test 'assigns xreflabel value for anchor macro without reftext in DocBook output' do ['anchor:foo[]bar', '[[foo]]bar'].each do |input| result = convert_inline_string input, backend: :docbook assert_equal 'bar', result end end test 'unescapes square bracket in reftext of anchor macro' do input = <<~'EOS' see <> anchor:foo[b[a\]r]tex EOS result = convert_string_to_embedded input assert_includes result, 'see b[a]r' end test 'unescapes square bracket in reftext of anchor macro in DocBook output' do input = 'anchor:foo[b[a\]r]' result = convert_inline_string input, backend: :docbook assert_equal '', result end test 'xref using angled bracket syntax' do doc = document_from_string '<>' doc.register :refs, ['tigers', (Asciidoctor::Inline.new doc, :anchor, '[tigers]', type: :ref, target: 'tigers'), '[tigers]'] assert_xpath '//a[@href="#tigers"][text() = "[tigers]"]', doc.convert, 1 end test 'xref using angled bracket syntax with explicit hash' do doc = document_from_string '<<#tigers>>' doc.register :refs, ['tigers', (Asciidoctor::Inline.new doc, :anchor, 'Tigers', type: :ref, target: 'tigers'), 'Tigers'] assert_xpath '//a[@href="#tigers"][text() = "Tigers"]', doc.convert, 1 end test 'xref using angled bracket syntax with label' do input = <<~'EOS' <> [#tigers] == Tigers EOS assert_xpath '//a[@href="#tigers"][text() = "About Tigers"]', convert_string(input), 1 end test 'xref should use title of target as link text when no explicit reftext is specified' do input = <<~'EOS' <> [#tigers] == Tigers EOS assert_xpath '//a[@href="#tigers"][text() = "Tigers"]', convert_string(input), 1 end test 'xref should use title of target as link text when explicit link text is empty' do input = <<~'EOS' <> [#tigers] == Tigers EOS assert_xpath '//a[@href="#tigers"][text() = "Tigers"]', convert_string(input), 1 end test 'xref using angled bracket syntax with quoted label' do input = <<~'EOS' <> [#tigers] == Tigers EOS assert_xpath %q(//a[@href="#tigers"][text() = '"About Tigers"']), convert_string(input), 1 end test 'should not interpret path sans extension in xref with angled bracket syntax in compat mode' do using_memory_logger do |logger| doc = document_from_string '<>', standalone: false, attributes: { 'compat-mode' => '' } assert_xpath '//a[@href="#tigers#"][text() = "[tigers#]"]', doc.convert, 1 end end test 'xref using angled bracket syntax with path sans extension' do doc = document_from_string '<>', standalone: false assert_xpath '//a[@href="tigers.html"][text() = "tigers.html"]', doc.convert, 1 end test 'inter-document xref shorthand syntax should assume AsciiDoc extension if AsciiDoc extension not present' do { 'using-.net-web-services#' => 'Using .NET web services', 'asciidoctor.1#' => 'Asciidoctor Manual', 'path/to/document#' => 'Document Title', }.each do |target, text| result = convert_string_to_embedded %(<<#{target},#{text}>>) assert_xpath %(//a[@href="#{target.chop}.html"][text()="#{text}"]), result, 1 end end test 'xref macro with explicit inter-document target should assume implicit AsciiDoc file extension if no file extension is present' do { 'using-.net-web-services#' => 'Using .NET web services', 'asciidoctor.1#' => 'Asciidoctor Manual', }.each do |target, text| result = convert_string_to_embedded %(xref:#{target}[#{text}]) assert_xpath %(//a[@href="#{target.chop}"][text()="#{text}"]), result, 1 end { 'document#' => 'Document Title', 'path/to/document#' => 'Document Title', 'include.d/document#' => 'Document Title', }.each do |target, text| result = convert_string_to_embedded %(xref:#{target}[#{text}]) assert_xpath %(//a[@href="#{target.chop}.html"][text()="#{text}"]), result, 1 end end test 'xref macro with implicit inter-document target should preserve path with file extension' do { 'refcard.pdf' => 'Refcard', 'asciidoctor.1' => 'Asciidoctor Manual', }.each do |path, text| result = convert_string_to_embedded %(xref:#{path}[#{text}]) assert_xpath %(//a[@href="#{path}"][text()="#{text}"]), result, 1 end { 'sections.d/first' => 'First Section', }.each do |path, text| result = convert_string_to_embedded %(xref:#{path}[#{text}]) assert_xpath %(//a[@href="##{path}"][text()="#{text}"]), result, 1 end end test 'inter-document xref should only remove the file extension part if the path contains a period elsewhere' do result = convert_string_to_embedded '<>' assert_xpath '//a[@href="using-.net-web-services.html"][text() = "Using .NET web services"]', result, 1 end test 'xref macro target containing dot should be interpreted as a path unless prefixed by #' do result = convert_string_to_embedded 'xref:using-.net-web-services[Using .NET web services]' assert_xpath '//a[@href="using-.net-web-services"][text() = "Using .NET web services"]', result, 1 result = convert_string_to_embedded 'xref:#using-.net-web-services[Using .NET web services]' assert_xpath '//a[@href="#using-.net-web-services"][text() = "Using .NET web services"]', result, 1 end test 'should not interpret double underscore in target of xref macro if sequence is preceded by a backslash' do result = convert_string_to_embedded 'xref:doc\__with_double__underscore.adoc[text]' assert_xpath '//a[@href="doc__with_double__underscore.html"][text() = "text"]', result, 1 end test 'should not interpret double underscore in target of xref shorthand if sequence is preceded by a backslash' do result = convert_string_to_embedded '<>' assert_xpath '//a[@href="doc__with_double__underscore.html"][text() = "text"]', result, 1 end test 'xref using angled bracket syntax with path sans extension using docbook backend' do doc = document_from_string '<>', standalone: false, backend: 'docbook' assert_match 'tigers.xml', doc.convert, 1 end test 'xref using angled bracket syntax with ancestor path sans extension' do doc = document_from_string '<<../tigers#,tigers>>', standalone: false assert_xpath '//a[@href="../tigers.html"][text() = "tigers"]', doc.convert, 1 end test 'xref using angled bracket syntax with absolute path sans extension' do doc = document_from_string '<>', standalone: false assert_xpath '//a[@href="/path/to/tigers.html"][text() = "tigers"]', doc.convert, 1 end test 'xref using angled bracket syntax with path and extension' do using_memory_logger do |logger| doc = document_from_string '<>', standalone: false assert_xpath '//a[@href="#tigers.adoc"][text() = "[tigers.adoc]"]', doc.convert, 1 end end test 'xref using angled bracket syntax with path and extension with hash' do doc = document_from_string '<>', standalone: false assert_xpath '//a[@href="tigers.html"][text() = "tigers.html"]', doc.convert, 1 end test 'xref using angled bracket syntax with path and extension with fragment' do doc = document_from_string '<>', standalone: false assert_xpath '//a[@href="tigers.html#id"][text() = "tigers.html"]', doc.convert, 1 end test 'xref using macro syntax with path and extension in compat mode' do using_memory_logger do |logger| doc = document_from_string 'xref:tigers.adoc[]', standalone: false, attributes: { 'compat-mode' => '' } assert_xpath '//a[@href="#tigers.adoc"][text() = "[tigers.adoc]"]', doc.convert, 1 end end test 'xref using macro syntax with path and extension' do doc = document_from_string 'xref:tigers.adoc[]', standalone: false assert_xpath '//a[@href="tigers.html"][text() = "tigers.html"]', doc.convert, 1 end test 'xref using angled bracket syntax with path and fragment' do doc = document_from_string '<>', standalone: false assert_xpath '//a[@href="tigers.html#about"][text() = "tigers.html"]', doc.convert, 1 end test 'xref using angled bracket syntax with path, fragment and text' do doc = document_from_string '<>', standalone: false assert_xpath '//a[@href="tigers.html#about"][text() = "About Tigers"]', doc.convert, 1 end test 'xref using angled bracket syntax with path and custom relfilesuffix and outfilesuffix' do attributes = { 'relfileprefix' => '../', 'outfilesuffix' => '/' } doc = document_from_string '<>', standalone: false, attributes: attributes assert_xpath '//a[@href="../tigers/#about"][text() = "About Tigers"]', doc.convert, 1 end test 'xref using angled bracket syntax with path and custom relfilesuffix' do attributes = { 'relfilesuffix' => '/' } doc = document_from_string '<>', standalone: false, attributes: attributes assert_xpath '//a[@href="tigers/#about"][text() = "About Tigers"]', doc.convert, 1 end test 'xref using angled bracket syntax with path which has been included in this document' do using_memory_logger do |logger| in_verbose_mode do doc = document_from_string '<>', standalone: false doc.catalog[:includes]['tigers'] = true output = doc.convert assert_xpath '//a[@href="#about"][text() = "About Tigers"]', output, 1 assert_message logger, :INFO, 'possible invalid reference: about' end end end test 'xref using angled bracket syntax with nested path which has been included in this document' do using_memory_logger do |logger| in_verbose_mode do doc = document_from_string '<>', standalone: false doc.catalog[:includes]['part1/tigers'] = true output = doc.convert assert_xpath '//a[@href="#about"][text() = "About Tigers"]', output, 1 assert_message logger, :INFO, 'possible invalid reference: about' end end end test 'xref using angled bracket syntax inline with text' do input = <<~'EOS' Want to learn <>? [#tigers] == Tigers EOS assert_xpath '//a[@href="#tigers"][text() = "about tigers"]', convert_string(input), 1 end test 'xref using angled bracket syntax with multi-line label inline with text' do input = <<~'EOS' Want to learn <>? [#tigers] == Tigers EOS assert_xpath %{//a[@href="#tigers"][normalize-space(text()) = "about tigers"]}, convert_string(input), 1 end test 'xref with escaped text' do # when \x0 was used as boundary character for passthrough, it was getting stripped # now using unicode marks as boundary characters, which resolves issue input = <<~'EOS' See the <> section for details about tigers. [#tigers] == Tigers EOS output = convert_string_to_embedded input assert_xpath %(//a[@href="#tigers"]/code[text()="[tigers]"]), output, 1 end test 'xref with target that begins with attribute reference in title' do ['<<{lessonsdir}/lesson-1#,Lesson 1>>', 'xref:{lessonsdir}/lesson-1.adoc[Lesson 1]'].each do |xref| input = <<~EOS :lessonsdir: lessons [#lesson-1-listing] == #{xref} A summary of the first lesson. EOS output = convert_string_to_embedded input assert_xpath '//h2/a[@href="lessons/lesson-1.html"]', output, 1 end end test 'xref using macro syntax' do doc = document_from_string 'xref:tigers[]' doc.register :refs, ['tigers', (Asciidoctor::Inline.new doc, :anchor, '[tigers]', type: :ref, target: 'tigers'), '[tigers]'] assert_xpath '//a[@href="#tigers"][text() = "[tigers]"]', doc.convert, 1 end test 'multiple xref macros with implicit text in single line' do input = <<~'EOS' This document has two sections, xref:sect-a[] and xref:sect-b[]. [#sect-a] == Section A [#sect-b] == Section B EOS result = convert_string_to_embedded input assert_xpath '//a[@href="#sect-a"][text() = "Section A"]', result, 1 assert_xpath '//a[@href="#sect-b"][text() = "Section B"]', result, 1 end test 'xref using macro syntax with explicit hash' do doc = document_from_string 'xref:#tigers[]' doc.register :refs, ['tigers', (Asciidoctor::Inline.new doc, :anchor, 'Tigers', type: :ref, target: 'tigers'), 'Tigers'] assert_xpath '//a[@href="#tigers"][text() = "Tigers"]', doc.convert, 1 end test 'xref using macro syntax with label' do input = <<~'EOS' xref:tigers[About Tigers] [#tigers] == Tigers EOS assert_xpath '//a[@href="#tigers"][text() = "About Tigers"]', convert_string(input), 1 end test 'xref using macro syntax inline with text' do input = <<~'EOS' Want to learn xref:tigers[about tigers]? [#tigers] == Tigers EOS assert_xpath '//a[@href="#tigers"][text() = "about tigers"]', convert_string(input), 1 end test 'xref using macro syntax with multi-line label inline with text' do input = <<~'EOS' Want to learn xref:tigers[about tigers]? [#tigers] == Tigers EOS assert_xpath %{//a[@href="#tigers"][normalize-space(text()) = "about tigers"]}, convert_string(input), 1 end test 'xref using macro syntax with text that ends with an escaped closing bracket' do input = <<~'EOS' xref:tigers[[tigers\]] [#tigers] == Tigers EOS assert_xpath '//a[@href="#tigers"][text() = "[tigers]"]', convert_string_to_embedded(input), 1 end test 'xref using macro syntax with text that contains an escaped closing bracket' do input = <<~'EOS' xref:tigers[[tigers\] are cats] [#tigers] == Tigers EOS assert_xpath '//a[@href="#tigers"][text() = "[tigers] are cats"]', convert_string_to_embedded(input), 1 end test 'unescapes square bracket in reftext used by xref' do input = <<~'EOS' anchor:foo[b[a\]r]about see <> EOS result = convert_string_to_embedded input assert_xpath '//a[@href="#foo"]', result, 1 assert_xpath '//a[@href="#foo"][text()="b[a]r"]', result, 1 end test 'xref using invalid macro syntax does not create link' do doc = document_from_string 'xref:tigers' doc.register :refs, ['tigers', (Asciidoctor::Inline.new doc, :anchor, 'Tigers', type: :ref, target: 'tigers'), 'Tigers'] assert_xpath '//a', doc.convert, 0 end test 'should warn and create link if verbose flag is set and reference is not found' do input = <<~'EOS' [#foobar] == Foobar == Section B See <>. EOS using_memory_logger do |logger| in_verbose_mode do output = convert_string_to_embedded input assert_xpath '//a[@href="#foobaz"][text() = "[foobaz]"]', output, 1 assert_message logger, :INFO, 'possible invalid reference: foobaz' end end end test 'should not warn if verbose flag is set and reference is found in compat mode' do input = <<~'EOS' [[foobar]] == Foobar == Section B See <>. EOS using_memory_logger do |logger| in_verbose_mode do output = convert_string_to_embedded input, attributes: { 'compat-mode' => '' } assert_xpath '//a[@href="#foobar"][text() = "Foobar"]', output, 1 assert_empty logger end end end test 'should warn and create link if verbose flag is set and reference using # notation is not found' do input = <<~'EOS' [#foobar] == Foobar == Section B See <<#foobaz>>. EOS using_memory_logger do |logger| in_verbose_mode do output = convert_string_to_embedded input assert_xpath '//a[@href="#foobaz"][text() = "[foobaz]"]', output, 1 assert_message logger, :INFO, 'possible invalid reference: foobaz' end end end test 'should produce an internal anchor from an inter-document xref to file included into current file' do input = <<~'EOS' = Book Title :doctype: book [#ch1] == Chapter 1 So it begins. Read <> to find out what happens next! include::other-chapters.adoc[] EOS doc = document_from_string input, safe: :safe, base_dir: fixturedir assert doc.catalog[:includes].key?('other-chapters') assert doc.catalog[:includes]['other-chapters'] output = doc.convert assert_xpath '//a[@href="#ch2"][text()="Chapter 2"]', output, 1 end test 'should produce an internal anchor from an inter-document xref to file included entirely into current file using tags' do input = <<~'EOS' = Book Title :doctype: book [#ch1] == Chapter 1 So it begins. Read <> to find out what happens next! include::other-chapters.adoc[tags=**] EOS output = convert_string_to_embedded input, safe: :safe, base_dir: fixturedir assert_xpath '//a[@href="#ch2"][text()="Chapter 2"]', output, 1 end test 'should not produce an internal anchor for inter-document xref to file partially included into current file' do input = <<~'EOS' = Book Title :doctype: book [#ch1] == Chapter 1 So it begins. Read <> to find out what happens next! include::other-chapters.adoc[tags=ch2] EOS doc = document_from_string input, safe: :safe, base_dir: fixturedir assert doc.catalog[:includes].key?('other-chapters') refute doc.catalog[:includes]['other-chapters'] output = doc.convert assert_xpath '//a[@href="other-chapters.html#ch2"][text()="the next chapter"]', output, 1 end test 'should produce an internal anchor for inter-document xref to file included fully and partially' do input = <<~'EOS' = Book Title :doctype: book [#ch1] == Chapter 1 So it begins. Read <> to find out what happens next! include::other-chapters.adoc[] include::other-chapters.adoc[tag=ch2-noid] EOS doc = document_from_string input, safe: :safe, base_dir: fixturedir assert doc.catalog[:includes].key?('other-chapters') assert doc.catalog[:includes]['other-chapters'] output = doc.convert assert_xpath '//a[@href="#ch2"][text()="the next chapter"]', output, 1 end test 'should warn and create link if debug mode is enabled, inter-document xref points to current doc, and reference not found' do input = <<~'EOS' [#foobar] == Foobar == Section B See <>. EOS using_memory_logger do |logger| in_verbose_mode do output = convert_string_to_embedded input, attributes: { 'docname' => 'test' } assert_xpath '//a[@href="#foobaz"][text() = "[foobaz]"]', output, 1 assert_message logger, :INFO, 'possible invalid reference: foobaz' end end end test 'should use doctitle as fallback link text if inter-document xref points to current doc and no link text is provided' do input = <<~'EOS' = Links & Stuff at https://example.org See xref:test.adoc[] EOS output = convert_string_to_embedded input, attributes: { 'docname' => 'test' } assert_include 'Links & Stuff at https://example.org', output end test 'should use doctitle of root document as fallback link text for inter-document xref in AsciiDoc table cell that resolves to current doc' do input = <<~'EOS' = Document Title |=== a|See xref:test.adoc[] |=== EOS output = convert_string_to_embedded input, attributes: { 'docname' => 'test' } assert_include 'Document Title', output end test 'should use reftext on document as fallback link text if inter-document xref points to current doc and no link text is provided' do input = <<~'EOS' [reftext="Links and Stuff"] = Links & Stuff See xref:test.adoc[] EOS output = convert_string_to_embedded input, attributes: { 'docname' => 'test' } assert_include 'Links and Stuff', output end test 'should use reftext on document as fallback link text if xref points to empty fragment and no link text is provided' do input = <<~'EOS' [reftext="Links and Stuff"] = Links & Stuff See xref:#[] EOS output = convert_string_to_embedded input, attributes: { 'docname' => 'test' } assert_include 'Links and Stuff', output end test 'should use fallback link text if inter-document xref points to current doc without header and no link text is provided' do input = <<~'EOS' See xref:test.adoc[] EOS output = convert_string_to_embedded input, attributes: { 'docname' => 'test' } assert_include '[^top]', output end test 'should use fallback link text if fragment of internal xref is empty and no link text is provided' do input = <<~'EOS' See xref:#[] EOS output = convert_string_to_embedded input, attributes: { 'docname' => 'test' } assert_include '[^top]', output end test 'should use document id as linkend for self xref in DocBook backend' do input = <<~'EOS' [#docid] = Document Title See xref:test.adoc[] EOS output = convert_string_to_embedded input, backend: :docbook, attributes: { 'docname' => 'test' } assert_include '', output end test 'should auto-generate document id to use as linkend for self xref in DocBook backend' do input = <<~'EOS' = Document Title See xref:test.adoc[] EOS doc = document_from_string input, backend: :docbook, attributes: { 'docname' => 'test' } assert_nil doc.id output = doc.convert assert_nil doc.id assert_include ' xml:id="__article-root__"', output assert_include '', output end test 'should produce an internal anchor for inter-document xref to file outside of base directory' do input = <<~'EOS' = Document Title See <<../section-a.adoc#section-a>>. include::../section-a.adoc[] EOS doc = document_from_string input, safe: :unsafe, base_dir: (File.join fixturedir, 'subdir') assert_includes doc.catalog[:includes], '../section-a' output = doc.convert standalone: false assert_xpath '//a[@href="#section-a"][text()="Section A"]', output, 1 end test 'xref uses title of target as label for forward and backward references in html output' do input = <<~'EOS' == Section A <<_section_b>> == Section B <<_section_a>> EOS output = convert_string_to_embedded input assert_xpath '//h2[@id="_section_a"][text()="Section A"]', output, 1 assert_xpath '//a[@href="#_section_a"][text()="Section A"]', output, 1 assert_xpath '//h2[@id="_section_b"][text()="Section B"]', output, 1 assert_xpath '//a[@href="#_section_b"][text()="Section B"]', output, 1 end test 'should not fail to resolve broken xref in title of block with ID' do input = <<~'EOS' [#p1] .<> paragraph text EOS output = convert_string_to_embedded input assert_xpath '//*[@class="title"]/a[@href="#DNE"][text()="[DNE]"]', output, 1 end test 'should resolve forward xref in title of block with ID' do input = <<~'EOS' [#p1] .<> paragraph text [#conclusion] == Conclusion EOS output = convert_string_to_embedded input assert_xpath '//*[@class="title"]/a[@href="#conclusion"][text()="Conclusion"]', output, 1 end test 'should not fail to resolve broken xref in section title' do input = <<~'EOS' [#s1] == <> == <> EOS output = convert_string_to_embedded input assert_xpath '//h2[@id="s1"]/a[@href="#DNE"][text()="[DNE]"]', output, 1 assert_xpath '//h2/a[@href="#s1"][text()="[DNE]"]', output, 1 end test 'should break circular xref reference in section title' do input = <<~'EOS' [#a] == A <> [#b] == B <> EOS output = convert_string_to_embedded input assert_includes output, '

    A B [a]

    ' assert_includes output, '

    B [a]

    ' end test 'should drop nested anchor in xreftext' do input = <<~'EOS' [#a] == See <> [#b] == Consult https://google.com[Google] EOS output = convert_string_to_embedded input assert_includes output, '

    See Consult Google

    ' end test 'should not resolve forward xref evaluated during parsing' do input = <<~'EOS' [#s1] == <> == <> [#forward] == Forward EOS output = convert_string_to_embedded input assert_xpath '//a[@href="#forward"][text()="Forward"]', output, 0 end test 'should not resolve forward natural xref evaluated during parsing' do input = <<~'EOS' :idprefix: [#s1] == <> == <> == Forward EOS output = convert_string_to_embedded input assert_xpath '//a[@href="#forward"][text()="Forward"]', output, 0 end test 'should resolve first matching natural xref' do input = <<~'EOS' see <
    > [#s1] == Section Title [#s2] == Section Title EOS output = convert_string_to_embedded input assert_xpath '//a[@href="#s1"]', output, 1 assert_xpath '//a[@href="#s1"][text()="Section Title"]', output, 1 end test 'should not match numeric character references while searching for fragment in xref target' do input = <<~'EOS' see < Tiger>> == Cub => Tiger EOS output = convert_string_to_embedded input assert_xpath '//a[@href="#_cub_tiger"]', output, 1 assert_xpath %(//a[@href="#_cub_tiger"][text()="Cub #{decode_char 8658} Tiger"]), output, 1 end test 'should not match numeric character references in path of interdocument xref' do input = <<~'EOS' see xref:{cpp}[{cpp}]. EOS output = convert_string_to_embedded input assert_includes output, 'C++' end test 'anchor creates reference' do doc = document_from_string '[[tigers]]Tigers roam here.' ref = doc.catalog[:refs]['tigers'] refute_nil ref assert_nil ref.reftext end test 'anchor with label creates reference' do doc = document_from_string '[[tigers,Tigers]]Tigers roam here.' ref = doc.catalog[:refs]['tigers'] refute_nil ref assert_equal 'Tigers', ref.reftext end test 'anchor with quoted label creates reference with quoted label text' do doc = document_from_string %([[tigers,"Tigers roam here"]]Tigers roam here.) ref = doc.catalog[:refs]['tigers'] refute_nil ref assert_equal '"Tigers roam here"', ref.reftext end test 'anchor with label containing a comma creates reference' do doc = document_from_string %([[tigers,Tigers, scary tigers, roam here]]Tigers roam here.) ref = doc.catalog[:refs]['tigers'] refute_nil ref assert_equal 'Tigers, scary tigers, roam here', ref.reftext end end asciidoctor-2.0.20/test/lists_test.rb000066400000000000000000005437261443135032600176210ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context "Bulleted lists (:ulist)" do context "Simple lists" do test "dash elements with no blank lines" do input = <<~'EOS' List ==== - Foo - Boo - Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'indented dash elements using spaces' do input = <<~EOS \x20- Foo \x20- Boo \x20- Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'indented dash elements using tabs' do input = <<~EOS \t-\tFoo \t-\tBoo \t-\tBlech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test "dash elements separated by blank lines should merge lists" do input = <<~'EOS' List ==== - Foo - Boo - Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'dash elements with interspersed line comments should be skipped and not break list' do input = <<~'EOS' == List - Foo // line comment // another line comment - Boo // line comment more text // another line comment - Blech EOS output = convert_string_to_embedded input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath %((//ul/li)[2]/p[text()="Boo\nmore text"]), output, 1 end test "dash elements separated by a line comment offset by blank lines should not merge lists" do input = <<~'EOS' List ==== - Foo - Boo // - Blech EOS output = convert_string input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 end test "dash elements separated by a block title offset by a blank line should not merge lists" do input = <<~'EOS' List ==== - Foo - Boo .Also - Blech EOS output = convert_string input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 assert_xpath '(//ul)[2]/preceding-sibling::*[@class = "title"][text() = "Also"]', output, 1 end test "dash elements separated by an attribute entry offset by a blank line should not merge lists" do input = <<~'EOS' == List - Foo - Boo :foo: bar - Blech EOS output = convert_string_to_embedded input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 end test 'a non-indented wrapped line is folded into text of list item' do input = <<~'EOS' List ==== - Foo wrapped content - Boo - Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li[1]/*', output, 1 assert_xpath "//ul/li[1]/p[text() = 'Foo\nwrapped content']", output, 1 end test 'a non-indented wrapped line that resembles a block title is folded into text of list item' do input = <<~'EOS' == List - Foo .wrapped content - Boo - Blech EOS output = convert_string_to_embedded input assert_xpath '//ul', output, 1 assert_xpath '//ul/li[1]/*', output, 1 assert_xpath "//ul/li[1]/p[text() = 'Foo\n.wrapped content']", output, 1 end test 'a non-indented wrapped line that resembles an attribute entry is folded into text of list item' do input = <<~'EOS' == List - Foo :foo: bar - Boo - Blech EOS output = convert_string_to_embedded input assert_xpath '//ul', output, 1 assert_xpath '//ul/li[1]/*', output, 1 assert_xpath "//ul/li[1]/p[text() = 'Foo\n:foo: bar']", output, 1 end test 'a list item with a nested marker terminates non-indented paragraph for text of list item' do input = <<~'EOS' - Foo Bar * Foo EOS output = convert_string_to_embedded input assert_css 'ul ul', output, 1 refute_includes output, '* Foo' end test 'a list item for a different list terminates non-indented paragraph for text of list item' do input = <<~'EOS' == Example 1 - Foo Bar . Foo == Example 2 * Item text term:: def EOS output = convert_string_to_embedded input assert_css 'ul ol', output, 1 refute_includes output, '* Foo' assert_css 'ul dl', output, 1 refute_includes output, 'term:: def' end test 'an indented wrapped line is unindented and folded into text of list item' do input = <<~'EOS' List ==== - Foo wrapped content - Boo - Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li[1]/*', output, 1 assert_xpath "//ul/li[1]/p[text() = 'Foo\nwrapped content']", output, 1 end test 'wrapped list item with hanging indent followed by non-indented line' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Lists - list item 1 // not line comment second wrapped line - list item 2 EOS output = convert_string_to_embedded input assert_css 'ul', output, 1 assert_css 'ul li', output, 2 # NOTE for some reason, we're getting an extra line after the indented line lines = xmlnodes_at_xpath('(//ul/li)[1]/p', output, 1).text.gsub(/\n[[:space:]]*\n/, ?\n).lines assert_equal 3, lines.size assert_equal 'list item 1', lines[0].chomp assert_equal ' // not line comment', lines[1].chomp assert_equal 'second wrapped line', lines[2].chomp end test 'a list item with a nested marker terminates indented paragraph for text of list item' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS - Foo Bar * Foo EOS output = convert_string_to_embedded input assert_css 'ul ul', output, 1 refute_includes output, '* Foo' end test 'a list item that starts with a sequence of list markers characters should not match a nested list' do input = <<~EOS \x20* first item \x20*. normal text EOS output = convert_string_to_embedded input assert_css 'ul', output, 1 assert_css 'ul li', output, 1 assert_xpath "//ul/li/p[text()='first item\n*. normal text']", output, 1 end test 'a list item for a different list terminates indented paragraph for text of list item' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Example 1 - Foo Bar . Foo == Example 2 * Item text term:: def EOS output = convert_string_to_embedded input assert_css 'ul ol', output, 1 refute_includes output, '* Foo' assert_css 'ul dl', output, 1 refute_includes output, 'term:: def' end test "a literal paragraph offset by blank lines in list content is appended as a literal block" do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS List ==== - Foo literal - Boo - Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul/li)[1]/p[text() = "Foo"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]', output, 1 assert_xpath '(//ul/li)[1]/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '((//ul/li)[1]/*[@class="literalblock"])[1]//pre[text() = "literal"]', output, 1 end test 'should escape special characters in all literal paragraphs attached to list item' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS * first item text more text * second item EOS output = convert_string_to_embedded input assert_css 'li', output, 2 assert_css 'code', output, 0 assert_css 'li:first-of-type > *', output, 3 assert_css 'li:first-of-type pre', output, 2 assert_xpath '((//li)[1]//pre)[1][text()="text"]', output, 1 assert_xpath '((//li)[1]//pre)[2][text()="more text"]', output, 1 end test "a literal paragraph offset by a blank line in list content followed by line with continuation is appended as two blocks" do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS List ==== - Foo literal + para - Boo - Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul/li)[1]/p[text() = "Foo"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]', output, 1 assert_xpath '(//ul/li)[1]/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '((//ul/li)[1]/*[@class="literalblock"])[1]//pre[text() = "literal"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 end test 'an admonition paragraph attached by a line continuation to a list item with wrapped text should produce admonition' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS - first-line text wrapped text + NOTE: This is a note. EOS output = convert_string_to_embedded input assert_css 'ul', output, 1 assert_css 'ul > li', output, 1 assert_css 'ul > li > p', output, 1 assert_xpath %(//ul/li/p[text()="first-line text\nwrapped text"]), output, 1 assert_css 'ul > li > p + .admonitionblock.note', output, 1 assert_xpath '//ul/li/*[@class="admonitionblock note"]//td[@class="content"][normalize-space(text())="This is a note."]', output, 1 end test 'paragraph-like blocks attached to an ancestry list item by a list continuation should produce blocks' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS * parent ** child + NOTE: This is a note. * another parent ** another child + ''' EOS output = convert_string_to_embedded input assert_css 'ul ul .admonitionblock.note', output, 0 assert_xpath '(//ul)[1]/li/*[@class="admonitionblock note"]', output, 1 assert_css 'ul ul hr', output, 0 assert_xpath '(//ul)[1]/li/hr', output, 1 end test 'should not inherit block attributes from previous block when block is attached using a list continuation' do input = <<~'EOS' * complex list item + [source,xml] ---- value ---- <1> a configuration value EOS doc = document_from_string input colist = doc.blocks[0].items[0].blocks[-1] assert_equal :colist, colist.context refute_equal 'source', colist.style output = doc.convert standalone: false assert_css 'ul', output, 1 assert_css 'ul > li', output, 1 assert_css 'ul > li > p', output, 1 assert_css 'ul > li > .listingblock', output, 1 assert_css 'ul > li > .colist', output, 1 end test 'should continue to parse blocks attached by a list continuation after block is dropped' do input = <<~'EOS' * item + paragraph + [comment] comment + ==== example ==== ''' EOS output = convert_string_to_embedded input assert_css 'ul > li > .paragraph', output, 1 assert_css 'ul > li > .exampleblock', output, 1 end test 'appends line as paragraph if attached by continuation following line comment' do input = <<~'EOS' - list item 1 // line comment + paragraph in list item 1 - list item 2 EOS output = convert_string_to_embedded input assert_css 'ul', output, 1 assert_css 'ul li', output, 2 assert_xpath '(//ul/li)[1]/p[text()="list item 1"]', output, 1 assert_xpath '(//ul/li)[1]/p/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '(//ul/li)[1]/p/following-sibling::*[@class="paragraph"]/p[text()="paragraph in list item 1"]', output, 1 assert_xpath '(//ul/li)[2]/p[text()="list item 2"]', output, 1 end test "a literal paragraph with a line that appears as a list item that is followed by a continuation should create two blocks" do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS * Foo + literal . still literal + para * Bar EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '(//ul/li)[1]/p[text() = "Foo"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]', output, 1 assert_xpath '(//ul/li)[1]/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath %(((//ul/li)[1]/*[@class="literalblock"])[1]//pre[text() = " literal\n. still literal"]), output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 end test "consecutive literal paragraph offset by blank lines in list content are appended as a literal blocks" do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS List ==== - Foo literal more literal - Boo - Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul/li)[1]/p[text() = "Foo"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]', output, 2 assert_xpath '(//ul/li)[1]/p/following-sibling::*[@class="literalblock"]', output, 2 assert_xpath '((//ul/li)[1]/*[@class="literalblock"])[1]//pre[text()="literal"]', output, 1 assert_xpath "((//ul/li)[1]/*[@class='literalblock'])[2]//pre[text()='more\nliteral']", output, 1 end test "a literal paragraph without a trailing blank line consumes following list items" do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS List ==== - Foo literal - Boo - Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 1 assert_xpath '(//ul/li)[1]/p[text() = "Foo"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]', output, 1 assert_xpath '(//ul/li)[1]/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath "((//ul/li)[1]/*[@class='literalblock'])[1]//pre[text() = ' literal\n- Boo\n- Blech']", output, 1 end test "asterisk elements with no blank lines" do input = <<~'EOS' List ==== * Foo * Boo * Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'indented asterisk elements using spaces' do input = <<~EOS \x20* Foo \x20* Boo \x20* Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'indented unicode bullet elements using spaces' do input = <<~EOS \x20• Foo \x20• Boo \x20• Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'indented asterisk elements using tabs' do input = <<~EOS \t*\tFoo \t*\tBoo \t*\tBlech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'should represent block style as style class' do ['disc', 'square', 'circle'].each do |style| input = <<~EOS [#{style}] * a * b * c EOS output = convert_string_to_embedded input assert_css ".ulist.#{style}", output, 1 assert_css ".ulist.#{style} ul.#{style}", output, 1 end end test "asterisk elements separated by blank lines should merge lists" do input = <<~'EOS' List ==== * Foo * Boo * Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'asterisk elements with interspersed line comments should be skipped and not break list' do input = <<~'EOS' == List * Foo // line comment // another line comment * Boo // line comment more text // another line comment * Blech EOS output = convert_string_to_embedded input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath %((//ul/li)[2]/p[text()="Boo\nmore text"]), output, 1 end test "asterisk elements separated by a line comment offset by blank lines should not merge lists" do input = <<~'EOS' List ==== * Foo * Boo // * Blech EOS output = convert_string input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 end test "asterisk elements separated by a block title offset by a blank line should not merge lists" do input = <<~'EOS' List ==== * Foo * Boo .Also * Blech EOS output = convert_string input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 assert_xpath '(//ul)[2]/preceding-sibling::*[@class = "title"][text() = "Also"]', output, 1 end test "asterisk elements separated by an attribute entry offset by a blank line should not merge lists" do input = <<~'EOS' == List * Foo * Boo :foo: bar * Blech EOS output = convert_string_to_embedded input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 end test "list should terminate before next lower section heading" do input = <<~'EOS' List ==== * first item * second item == Section EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//h2[text() = "Section"]', output, 1 end test "list should terminate before next lower section heading with implicit id" do input = <<~'EOS' List ==== * first item * second item [[sec]] == Section EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//h2[@id = "sec"][text() = "Section"]', output, 1 end test 'should not find section title immediately below last list item' do input = <<~'EOS' * first * second == Not a section EOS output = convert_string_to_embedded input assert_css 'ul', output, 1 assert_css 'ul > li', output, 2 assert_css 'h2', output, 0 assert_includes output, '== Not a section' assert_xpath %((//li)[2]/p[text() = "second\n== Not a section"]), output, 1 end test 'should match trailing line separator in text of list item' do input = <<~EOS.chop * a * b#{decode_char 8232} * c EOS output = convert_string input assert_css 'li', output, 3 assert_xpath %((//li)[2]/p[text()="b#{decode_char 8232}"]), output, 1 end test 'should match line separator in text of list item' do input = <<~EOS.chop * a * b#{decode_char 8232}b * c EOS output = convert_string input assert_css 'li', output, 3 assert_xpath %((//li)[2]/p[text()="b#{decode_char 8232}b"]), output, 1 end end context "Lists with inline markup" do test "quoted text" do input = <<~'EOS' List ==== - I am *strong*. - I am _stressed_. - I am `flexible`. EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul/li)[1]//strong', output, 1 assert_xpath '(//ul/li)[2]//em', output, 1 assert_xpath '(//ul/li)[3]//code', output, 1 end test "attribute substitutions" do input = <<~'EOS' List ==== :foo: bar - side a {vbar} side b - Take me to a {foo}. EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '(//ul/li)[1]//p[text() = "side a | side b"]', output, 1 assert_xpath '(//ul/li)[2]//p[text() = "Take me to a bar."]', output, 1 end test "leading dot is treated as text not block title" do input = <<~'EOS' * .first * .second * .third EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 %w(.first .second .third).each_with_index do |text, index| assert_xpath "(//ul/li)[#{index + 1}]//p[text() = '#{text}']", output, 1 end end test "word ending sentence on continuing line not treated as a list item" do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS A. This is the story about AsciiDoc. It begins here. B. And it ends here. EOS output = convert_string input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 2 end test 'should discover anchor at start of unordered list item text and register it as a reference' do input = <<~'EOS' The highest peak in the Front Range is <>, which tops <> by just a few feet. * [[mount-evans,Mount Evans]]At 14,271 feet, Mount Evans is the highest summit of the Chicago Peaks in the Front Range of the Rocky Mountains. * [[grays-peak,Grays Peak]] Grays Peak rises to 14,278 feet, making it the highest summit in the Front Range of the Rocky Mountains. * Longs Peak is a 14,259-foot high, prominent mountain summit in the northern Front Range of the Rocky Mountains. * Pikes Peak is the highest summit of the southern Front Range of the Rocky Mountains at 14,115 feet. EOS doc = document_from_string input refs = doc.catalog[:refs] assert refs.key?('mount-evans') assert refs.key?('grays-peak') output = doc.convert standalone: false assert_xpath '(//p)[1]/a[@href="#grays-peak"][text()="Grays Peak"]', output, 1 assert_xpath '(//p)[1]/a[@href="#mount-evans"][text()="Mount Evans"]', output, 1 end test 'should discover anchor at start of ordered list item text and register it as a reference' do input = <<~'EOS' This is a cross-reference to <>. This is a cross-reference to <>. . Ordered list, item 1, without anchor . [[step-2,Step 2]]Ordered list, item 2, with anchor . Ordered list, item 3, without anchor . [[step-4,Step 4]]Ordered list, item 4, with anchor EOS doc = document_from_string input refs = doc.catalog[:refs] assert refs.key?('step-2') assert refs.key?('step-4') output = doc.convert standalone: false assert_xpath '(//p)[1]/a[@href="#step-2"][text()="Step 2"]', output, 1 assert_xpath '(//p)[1]/a[@href="#step-4"][text()="Step 4"]', output, 1 end test 'should discover anchor at start of callout list item text and register it as a reference' do input = <<~'EOS' This is a cross-reference to <>. [source,ruby] ---- require 'sinatra' <1> get '/hi' do <2> <3> "Hello World!" end ---- <1> Library import <2> [[url-mapping,url mapping]]URL mapping <3> Response block EOS doc = document_from_string input refs = doc.catalog[:refs] assert refs.key?('url-mapping') output = doc.convert standalone: false assert_xpath '(//p)[1]/a[@href="#url-mapping"][text()="url mapping"]', output, 1 end end context "Nested lists" do test "asterisk element mixed with dash elements should be nested" do input = <<~'EOS' List ==== - Foo * Boo - Blech EOS output = convert_string input assert_xpath '//ul', output, 2 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[1]/li//ul/li', output, 1 end test "dash element mixed with asterisks elements should be nested" do input = <<~'EOS' List ==== * Foo - Boo * Blech EOS output = convert_string input assert_xpath '//ul', output, 2 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[1]/li//ul/li', output, 1 end test "lines prefixed with alternating list markers separated by blank lines should be nested" do input = <<~'EOS' List ==== - Foo * Boo - Blech EOS output = convert_string input assert_xpath '//ul', output, 2 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[1]/li//ul/li', output, 1 end test "nested elements (2) with asterisks" do input = <<~'EOS' List ==== * Foo ** Boo * Blech EOS output = convert_string input assert_xpath '//ul', output, 2 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[1]/li//ul/li', output, 1 end test "nested elements (3) with asterisks" do input = <<~'EOS' List ==== * Foo ** Boo *** Snoo * Blech EOS output = convert_string input assert_xpath '//ul', output, 3 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li//ul)[1]/li', output, 1 assert_xpath '(((//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 end test "nested elements (4) with asterisks" do input = <<~'EOS' List ==== * Foo ** Boo *** Snoo **** Froo * Blech EOS output = convert_string input assert_xpath '//ul', output, 4 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li//ul)[1]/li', output, 1 assert_xpath '(((//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 assert_xpath '((((//ul)[1]/li//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 end test "nested elements (5) with asterisks" do input = <<~'EOS' List ==== * Foo ** Boo *** Snoo **** Froo ***** Groo * Blech EOS output = convert_string input assert_xpath '//ul', output, 5 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li//ul)[1]/li', output, 1 assert_xpath '(((//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 assert_xpath '((((//ul)[1]/li//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 assert_xpath '(((((//ul)[1]/li//ul)[1]/li//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 end test 'nested arbitrary depth with asterisks' do input = [] ('a'..'z').each_with_index do |ch, i| input << %(#{'*' * (i + 1)} #{ch}) end output = convert_string_to_embedded input.join(%(\n)) refute_includes output, '*' assert_css 'li', output, 26 end test 'level of unordered list should match section level' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Parent Section * item 1.1 ** item 2.1 *** item 3.1 ** item 2.2 * item 1.2 === Nested Section * item 1.1 EOS doc = document_from_string input lists = doc.find_by context: :ulist assert_equal 1, lists[0].level assert_equal 1, lists[1].level assert_equal 1, lists[2].level assert_equal 2, lists[3].level end test 'does not recognize lists with repeating unicode bullets' do input = '•• Boo' output = convert_string input assert_xpath '//ul', output, 0 assert_includes output, '•' end test "nested ordered elements (2)" do input = <<~'EOS' List ==== . Foo .. Boo . Blech EOS output = convert_string input assert_xpath '//ol', output, 2 assert_xpath '//ol/li', output, 3 assert_xpath '(//ol)[1]/li', output, 2 assert_xpath '(//ol)[1]/li//ol/li', output, 1 end test "nested ordered elements (3)" do input = <<~'EOS' List ==== . Foo .. Boo ... Snoo . Blech EOS output = convert_string input assert_xpath '//ol', output, 3 assert_xpath '(//ol)[1]/li', output, 2 assert_xpath '((//ol)[1]/li//ol)[1]/li', output, 1 assert_xpath '(((//ol)[1]/li//ol)[1]/li//ol)[1]/li', output, 1 end test 'nested arbitrary depth with dot marker' do input = [] ('a'..'z').each_with_index do |ch, i| input << %(#{'.' * (i + 1)} #{ch}) end output = convert_string_to_embedded input.join(%(\n)) refute_includes output, '.' assert_css 'li', output, 26 end test 'level of ordered list should match section level' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Parent Section . item 1.1 .. item 2.1 ... item 3.1 .. item 2.2 . item 1.2 === Nested Section . item 1.1 EOS doc = document_from_string input lists = doc.find_by context: :olist assert_equal 1, lists[0].level assert_equal 1, lists[1].level assert_equal 1, lists[2].level assert_equal 2, lists[3].level end test "nested unordered inside ordered elements" do input = <<~'EOS' List ==== . Foo * Boo . Blech EOS output = convert_string input assert_xpath '//ol', output, 1 assert_xpath '//ul', output, 1 assert_xpath '(//ol)[1]/li', output, 2 assert_xpath '((//ol)[1]/li//ul)[1]/li', output, 1 end test "nested ordered inside unordered elements" do input = <<~'EOS' List ==== * Foo . Boo * Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ol', output, 1 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li//ol)[1]/li', output, 1 end test 'three levels of alternating unordered and ordered elements' do input = <<~'EOS' == Lists * bullet 1 . numbered 1.1 ** bullet 1.1.1 * bullet 2 EOS output = convert_string_to_embedded input assert_css '.ulist', output, 2 assert_css '.olist', output, 1 assert_css '.ulist > ul > li > p', output, 3 assert_css '.ulist > ul > li > p + .olist', output, 1 assert_css '.ulist > ul > li > p + .olist > ol > li > p', output, 1 assert_css '.ulist > ul > li > p + .olist > ol > li > p + .ulist', output, 1 assert_css '.ulist > ul > li > p + .olist > ol > li > p + .ulist > ul > li > p', output, 1 assert_css '.ulist > ul > li + li > p', output, 1 end test "lines with alternating markers of unordered and ordered list types separated by blank lines should be nested" do input = <<~'EOS' List ==== * Foo . Boo * Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ol', output, 1 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li//ol)[1]/li', output, 1 end test 'list item with literal content should not consume nested list of different type' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS List ==== - bullet literal but not hungry . numbered EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//li', output, 2 assert_xpath '//ul//ol', output, 1 assert_xpath '//ul/li/p', output, 1 assert_xpath '//ul/li/p[text()="bullet"]', output, 1 assert_xpath '//ul/li/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath %(//ul/li/p/following-sibling::*[@class="literalblock"]//pre[text()="literal\nbut not\nhungry"]), output, 1 assert_xpath '//*[@class="literalblock"]/following-sibling::*[@class="olist arabic"]', output, 1 assert_xpath '//*[@class="literalblock"]/following-sibling::*[@class="olist arabic"]//p[text()="numbered"]', output, 1 end test 'nested list item does not eat the title of the following detached block' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS List ==== - bullet * nested bullet 1 * nested bullet 2 .Title .... literal .... EOS # use convert_string so we can match all ulists easier output = convert_string input assert_xpath '//*[@class="ulist"]/ul', output, 2 assert_xpath '(//*[@class="ulist"])[1]/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '(//*[@class="ulist"])[1]/following-sibling::*[@class="literalblock"]/*[@class="title"]', output, 1 end test "lines with alternating markers of bulleted and description list types separated by blank lines should be nested" do input = <<~'EOS' List ==== * Foo term1:: def1 * Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//dl', output, 1 assert_xpath '//ul[1]/li', output, 2 assert_xpath '//ul[1]/li//dl[1]/dt', output, 1 assert_xpath '//ul[1]/li//dl[1]/dd', output, 1 end test "nested ordered with attribute inside unordered elements" do input = <<~'EOS' Blah ==== * Foo [start=2] . Boo * Blech EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ol', output, 1 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li//ol)[1][@start = 2]/li', output, 1 end end context "List continuations" do test "adjacent list continuation line attaches following paragraph" do input = <<~'EOS' Lists ===== * Item one, paragraph one + Item one, paragraph two + * Item two EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/p', output, 1 assert_xpath '//ul/li[1]//p', output, 2 assert_xpath '//ul/li[1]/p[text() = "Item one, paragraph one"]', output, 1 assert_xpath '//ul/li[1]/*[@class = "paragraph"]/p[text() = "Item one, paragraph two"]', output, 1 end test "adjacent list continuation line attaches following block" do input = <<~'EOS' Lists ===== * Item one, paragraph one + .... Item one, literal block .... + * Item two EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/p', output, 1 assert_xpath '(//ul/li[1]/p/following-sibling::*)[1][@class = "literalblock"]', output, 1 end test 'adjacent list continuation line attaches following block with block attributes' do input = <<~'EOS' Lists ===== * Item one, paragraph one + :foo: bar [[beck]] .Read the following aloud to yourself [source, ruby] ---- 5.times { print "Odelay!" } ---- * Item two EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/p', output, 1 assert_xpath '(//ul/li[1]/p/following-sibling::*)[1][@id="beck"][@class = "listingblock"]', output, 1 assert_xpath '(//ul/li[1]/p/following-sibling::*)[1][@id="beck"]/div[@class="title"][starts-with(text(),"Read")]', output, 1 assert_xpath '(//ul/li[1]/p/following-sibling::*)[1][@id="beck"]//code[@data-lang="ruby"][starts-with(text(),"5.times")]', output, 1 end test 'trailing block attribute line attached by continuation should not create block' do input = <<~'EOS' Lists ===== * Item one, paragraph one + [source] * Item two EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/*', output, 1 assert_xpath '//ul/li//*[@class="listingblock"]', output, 0 end test 'trailing block title line attached by continuation should not create block' do input = <<~'EOS' Lists ===== * Item one, paragraph one + .Disappears into the ether * Item two EOS output = convert_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/*', output, 1 end test 'consecutive blocks in list continuation attach to list item' do input = <<~'EOS' Lists ===== * Item one, paragraph one + .... Item one, literal block .... + ____ Item one, quote block ____ + * Item two EOS output = convert_string_to_embedded input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/p', output, 1 assert_xpath '(//ul/li[1]/p/following-sibling::*)[1][@class = "literalblock"]', output, 1 assert_xpath '(//ul/li[1]/p/following-sibling::*)[2][@class = "quoteblock"]', output, 1 end test 'list item with hanging indent followed by block attached by list continuation' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Lists . list item 1 continued + -- open block in list item 1 -- . list item 2 EOS output = convert_string_to_embedded input assert_css 'ol', output, 1 assert_css 'ol li', output, 2 assert_xpath %((//ol/li)[1]/p[text()="list item 1\ncontinued"]), output, 1 assert_xpath '(//ol/li)[1]/p/following-sibling::*[@class="openblock"]', output, 1 assert_xpath '(//ol/li)[1]/p/following-sibling::*[@class="openblock"]//p[text()="open block in list item 1"]', output, 1 assert_xpath %((//ol/li)[2]/p[text()="list item 2"]), output, 1 end test 'list item paragraph in list item and nested list item' do input = <<~'EOS' == Lists . list item 1 + list item 1 paragraph * nested list item + nested list item paragraph . list item 2 EOS output = convert_string_to_embedded input assert_css '.olist ol', output, 1 assert_css '.olist ol > li', output, 2 assert_css '.ulist ul', output, 1 assert_css '.ulist ul > li', output, 1 assert_xpath '(//ol/li)[1]/*', output, 3 assert_xpath '((//ol/li)[1]/*)[1]/self::p', output, 1 assert_xpath '((//ol/li)[1]/*)[1]/self::p[text()="list item 1"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="paragraph"]', output, 1 assert_xpath '((//ol/li)[1]/*)[3]/self::div[@class="ulist"]', output, 1 assert_xpath '((//ol/li)[1]/*)[3]/self::div[@class="ulist"]/ul/li', output, 1 assert_xpath '((//ol/li)[1]/*)[3]/self::div[@class="ulist"]/ul/li/p[text()="nested list item"]', output, 1 assert_xpath '((//ol/li)[1]/*)[3]/self::div[@class="ulist"]/ul/li/p/following-sibling::div[@class="paragraph"]', output, 1 end test 'trailing list continuations should attach to list items at respective levels' do input = <<~'EOS' == Lists . list item 1 + * nested list item 1 * nested list item 2 + paragraph for nested list item 2 + paragraph for list item 1 . list item 2 EOS output = convert_string_to_embedded input assert_css '.olist ol', output, 1 assert_css '.olist ol > li', output, 2 assert_css '.ulist ul', output, 1 assert_css '.ulist ul > li', output, 2 assert_css '.olist .ulist', output, 1 assert_xpath '(//ol/li)[1]/*', output, 3 assert_xpath '((//ol/li)[1]/*)[1]/self::p', output, 1 assert_xpath '((//ol/li)[1]/*)[1]/self::p[text()="list item 1"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="ulist"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/*', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/p', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/div[@class="paragraph"]', output, 1 assert_xpath '((//ol/li)[1]/*)[3]/self::div[@class="paragraph"]', output, 1 end test 'trailing list continuations should attach to list items of different types at respective levels' do input = <<~'EOS' == Lists * bullet 1 . numbered 1.1 ** bullet 1.1.1 + numbered 1.1 paragraph + bullet 1 paragraph * bullet 2 EOS output = convert_string_to_embedded input assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li[1])/*', output, 3 assert_xpath '(((//ul)[1]/li[1])/*)[1]/self::p[text()="bullet 1"]', output, 1 assert_xpath '(((//ul)[1]/li[1])/*)[2]/ol', output, 1 assert_xpath '(((//ul)[1]/li[1])/*)[3]/self::div[@class="paragraph"]/p[text()="bullet 1 paragraph"]', output, 1 assert_xpath '((//ul)[1]/li)[1]/div/ol/li', output, 1 assert_xpath '((//ul)[1]/li)[1]/div/ol/li/*', output, 3 assert_xpath '(((//ul)[1]/li)[1]/div/ol/li/*)[1]/self::p[text()="numbered 1.1"]', output, 1 assert_xpath '(((//ul)[1]/li)[1]/div/ol/li/*)[2]/self::div[@class="ulist"]', output, 1 assert_xpath '(((//ul)[1]/li)[1]/div/ol/li/*)[3]/self::div[@class="paragraph"]/p[text()="numbered 1.1 paragraph"]', output, 1 assert_xpath '((//ul)[1]/li)[1]/div/ol/li/div[@class="ulist"]/ul/li', output, 1 assert_xpath '((//ul)[1]/li)[1]/div/ol/li/div[@class="ulist"]/ul/li/*', output, 1 assert_xpath '((//ul)[1]/li)[1]/div/ol/li/div[@class="ulist"]/ul/li/p[text()="bullet 1.1.1"]', output, 1 end test 'repeated list continuations should attach to list items at respective levels' do input = <<~'EOS' == Lists . list item 1 * nested list item 1 + -- open block for nested list item 1 -- + * nested list item 2 + paragraph for nested list item 2 + paragraph for list item 1 . list item 2 EOS output = convert_string_to_embedded input assert_css '.olist ol', output, 1 assert_css '.olist ol > li', output, 2 assert_css '.ulist ul', output, 1 assert_css '.ulist ul > li', output, 2 assert_css '.olist .ulist', output, 1 assert_xpath '(//ol/li)[1]/*', output, 3 assert_xpath '((//ol/li)[1]/*)[1]/self::p', output, 1 assert_xpath '((//ol/li)[1]/*)[1]/self::p[text()="list item 1"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="ulist"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/*', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/p', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/div[@class="openblock"]', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/*', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/p', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/div[@class="paragraph"]', output, 1 assert_xpath '((//ol/li)[1]/*)[3]/self::div[@class="paragraph"]', output, 1 end test 'repeated list continuations attached directly to list item should attach to list items at respective levels' do input = <<~'EOS' == Lists . list item 1 + * nested list item 1 + -- open block for nested list item 1 -- + * nested list item 2 + paragraph for nested list item 2 + paragraph for list item 1 . list item 2 EOS output = convert_string_to_embedded input assert_css '.olist ol', output, 1 assert_css '.olist ol > li', output, 2 assert_css '.ulist ul', output, 1 assert_css '.ulist ul > li', output, 2 assert_css '.olist .ulist', output, 1 assert_xpath '(//ol/li)[1]/*', output, 3 assert_xpath '((//ol/li)[1]/*)[1]/self::p', output, 1 assert_xpath '((//ol/li)[1]/*)[1]/self::p[text()="list item 1"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="ulist"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/*', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/p', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/div[@class="openblock"]', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/*', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/p', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/div[@class="paragraph"]', output, 1 assert_xpath '((//ol/li)[1]/*)[3]/self::div[@class="paragraph"]', output, 1 end test 'repeated list continuations should attach to list items at respective levels ignoring blank lines' do input = <<~'EOS' == Lists . list item 1 + * nested list item 1 + -- open block for nested list item 1 -- + * nested list item 2 + paragraph for nested list item 2 + paragraph for list item 1 . list item 2 EOS output = convert_string_to_embedded input assert_css '.olist ol', output, 1 assert_css '.olist ol > li', output, 2 assert_css '.ulist ul', output, 1 assert_css '.ulist ul > li', output, 2 assert_css '.olist .ulist', output, 1 assert_xpath '(//ol/li)[1]/*', output, 3 assert_xpath '((//ol/li)[1]/*)[1]/self::p', output, 1 assert_xpath '((//ol/li)[1]/*)[1]/self::p[text()="list item 1"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="ulist"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/*', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/p', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/div[@class="openblock"]', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/*', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/p', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/div[@class="paragraph"]', output, 1 assert_xpath '((//ol/li)[1]/*)[3]/self::div[@class="paragraph"]', output, 1 end test 'trailing list continuations should ignore preceding blank lines' do input = <<~'EOS' == Lists * bullet 1 ** bullet 1.1 *** bullet 1.1.1 + -- open block -- + bullet 1.1 paragraph + bullet 1 paragraph * bullet 2 EOS output = convert_string_to_embedded input assert_xpath '((//ul)[1]/li[1])/*', output, 3 assert_xpath '(((//ul)[1]/li[1])/*)[1]/self::p[text()="bullet 1"]', output, 1 assert_xpath '(((//ul)[1]/li[1])/*)[2]/self::div[@class="ulist"]', output, 1 assert_xpath '(((//ul)[1]/li[1])/*)[3]/self::div[@class="paragraph"]/p[text()="bullet 1 paragraph"]', output, 1 assert_xpath '((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li', output, 1 assert_xpath '((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li/*', output, 3 assert_xpath '(((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li/*)[1]/self::p[text()="bullet 1.1"]', output, 1 assert_xpath '(((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li/*)[2]/self::div[@class="ulist"]', output, 1 assert_xpath '(((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li/*)[3]/self::div[@class="paragraph"]/p[text()="bullet 1.1 paragraph"]', output, 1 assert_xpath '((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li/div[@class="ulist"]/ul/li', output, 1 assert_xpath '((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li/div[@class="ulist"]/ul/li/*', output, 2 assert_xpath '(((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li/div[@class="ulist"]/ul/li/*)[1]/self::p', output, 1 assert_xpath '(((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li/div[@class="ulist"]/ul/li/*)[2]/self::div[@class="openblock"]', output, 1 end test 'indented outline list item with different marker offset by a blank line should be recognized as a nested list' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS * item 1 . item 1.1 + attached paragraph . item 1.2 + attached paragraph * item 2 EOS output = convert_string_to_embedded input assert_css 'ul', output, 1 assert_css 'ol', output, 1 assert_css 'ul ol', output, 1 assert_css 'ul > li', output, 2 assert_xpath '((//ul/li)[1]/*)', output, 2 assert_xpath '((//ul/li)[1]/*)[1]/self::p', output, 1 assert_xpath '((//ul/li)[1]/*)[2]/self::div/ol', output, 1 assert_xpath '((//ul/li)[1]/*)[2]/self::div/ol/li', output, 2 (1..2).each do |idx| assert_xpath "(((//ul/li)[1]/*)[2]/self::div/ol/li)[#{idx}]/*", output, 2 assert_xpath "((((//ul/li)[1]/*)[2]/self::div/ol/li)[#{idx}]/*)[1]/self::p", output, 1 assert_xpath "((((//ul/li)[1]/*)[2]/self::div/ol/li)[#{idx}]/*)[2]/self::div[@class=\"paragraph\"]", output, 1 end end test 'indented description list item inside outline list item offset by a blank line should be recognized as a nested list' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS * item 1 term a:: description a + attached paragraph term b:: description b + attached paragraph * item 2 EOS output = convert_string_to_embedded input assert_css 'ul', output, 1 assert_css 'dl', output, 1 assert_css 'ul dl', output, 1 assert_css 'ul > li', output, 2 assert_xpath '((//ul/li)[1]/*)', output, 2 assert_xpath '((//ul/li)[1]/*)[1]/self::p', output, 1 assert_xpath '((//ul/li)[1]/*)[2]/self::div/dl', output, 1 assert_xpath '((//ul/li)[1]/*)[2]/self::div/dl/dt', output, 2 assert_xpath '((//ul/li)[1]/*)[2]/self::div/dl/dd', output, 2 (1..2).each do |idx| assert_xpath "(((//ul/li)[1]/*)[2]/self::div/dl/dd)[#{idx}]/*", output, 2 assert_xpath "((((//ul/li)[1]/*)[2]/self::div/dl/dd)[#{idx}]/*)[1]/self::p", output, 1 assert_xpath "((((//ul/li)[1]/*)[2]/self::div/dl/dd)[#{idx}]/*)[2]/self::div[@class=\"paragraph\"]", output, 1 end end # NOTE this is not consistent w/ AsciiDoc.py, but this is some screwy input anyway # FIXME one list continuation is left behind test 'consecutive list continuation lines are folded' do input = <<~'EOS' Lists ===== * Item one, paragraph one + + Item one, paragraph two + + * Item two + + EOS output = convert_string_to_embedded input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/p', output, 1 assert_xpath '//ul/li[1]/div/p', output, 1 assert_xpath '//ul/li[1]//p[text() = "Item one, paragraph one"]', output, 1 # NOTE this is a negative assertion assert_xpath %(//ul/li[1]//p[text() = "+\nItem one, paragraph two"]), output, 1 end test 'should warn if unterminated block is detected in list item' do input = <<~'EOS' * item + ==== example * swallowed item EOS using_memory_logger do |logger| output = convert_string_to_embedded input assert_xpath '//ul/li', output, 1 assert_xpath '//ul/li/*[@class="exampleblock"]', output, 1 assert_xpath %(//p[text()="example\n* swallowed item"]), output, 1 assert_message logger, :WARN, ': line 3: unterminated example block', Hash end end end end context "Ordered lists (:olist)" do context "Simple lists" do test "dot elements with no blank lines" do input = <<~'EOS' List ==== . Foo . Boo . Blech EOS output = convert_string input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 3 end test 'indented dot elements using spaces' do input = <<~EOS \x20. Foo \x20. Boo \x20. Blech EOS output = convert_string input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 3 end test 'indented dot elements using tabs' do input = <<~EOS \t.\tFoo \t.\tBoo \t.\tBlech EOS output = convert_string input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 3 end test 'should represent explicit role attribute as style class' do input = <<~'EOS' [role="dry"] . Once . Again . Refactor! EOS output = convert_string_to_embedded input assert_css '.olist.arabic.dry', output, 1 assert_css '.olist ol.arabic', output, 1 end test 'should base list style on marker length rather than list depth' do input = <<~'EOS' ... parent .. child . grandchild EOS output = convert_string_to_embedded input assert_css '.olist.lowerroman', output, 1 assert_css '.olist.lowerroman .olist.loweralpha', output, 1 assert_css '.olist.lowerroman .olist.loweralpha .olist.arabic', output, 1 end test 'should allow list style to be specified explicitly when using markers with implicit style' do input = <<~'EOS' [loweralpha] i) 1 ii) 2 iii) 3 EOS output = convert_string_to_embedded input assert_css '.olist.loweralpha', output, 1 assert_css '.olist.lowerroman', output, 0 end test 'should represent custom numbering and explicit role attribute as style classes' do input = <<~'EOS' [loweralpha, role="dry"] . Once . Again . Refactor! EOS output = convert_string_to_embedded input assert_css '.olist.loweralpha.dry', output, 1 assert_css '.olist ol.loweralpha', output, 1 end test 'should set reversed attribute on list if reversed option is set' do input = <<~'EOS' [%reversed, start=3] . three . two . one . blast off! EOS output = convert_string_to_embedded input assert_css 'ol[reversed][start="3"]', output, 1 end test 'should represent implicit role attribute as style class' do input = <<~'EOS' [.dry] . Once . Again . Refactor! EOS output = convert_string_to_embedded input assert_css '.olist.arabic.dry', output, 1 assert_css '.olist ol.arabic', output, 1 end test 'should represent custom numbering and implicit role attribute as style classes' do input = <<~'EOS' [loweralpha.dry] . Once . Again . Refactor! EOS output = convert_string_to_embedded input assert_css '.olist.loweralpha.dry', output, 1 assert_css '.olist ol.loweralpha', output, 1 end test "dot elements separated by blank lines should merge lists" do input = <<~'EOS' List ==== . Foo . Boo . Blech EOS output = convert_string input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 3 end test 'should escape special characters in all literal paragraphs attached to list item' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS . first item text more text . second item EOS output = convert_string_to_embedded input assert_css 'li', output, 2 assert_css 'code', output, 0 assert_css 'li:first-of-type > *', output, 3 assert_css 'li:first-of-type pre', output, 2 assert_xpath '((//li)[1]//pre)[1][text()="text"]', output, 1 assert_xpath '((//li)[1]//pre)[2][text()="more text"]', output, 1 end test 'dot elements with interspersed line comments should be skipped and not break list' do input = <<~'EOS' == List . Foo // line comment // another line comment . Boo // line comment more text // another line comment . Blech EOS output = convert_string_to_embedded input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 3 assert_xpath %((//ol/li)[2]/p[text()="Boo\nmore text"]), output, 1 end test "dot elements separated by line comment offset by blank lines should not merge lists" do input = <<~'EOS' List ==== . Foo . Boo // . Blech EOS output = convert_string input assert_xpath '//ol', output, 2 assert_xpath '(//ol)[1]/li', output, 2 assert_xpath '(//ol)[2]/li', output, 1 end test "dot elements separated by a block title offset by a blank line should not merge lists" do input = <<~'EOS' List ==== . Foo . Boo .Also . Blech EOS output = convert_string input assert_xpath '//ol', output, 2 assert_xpath '(//ol)[1]/li', output, 2 assert_xpath '(//ol)[2]/li', output, 1 assert_xpath '(//ol)[2]/preceding-sibling::*[@class = "title"][text() = "Also"]', output, 1 end test "dot elements separated by an attribute entry offset by a blank line should not merge lists" do input = <<~'EOS' == List . Foo . Boo :foo: bar . Blech EOS output = convert_string_to_embedded input assert_xpath '//ol', output, 2 assert_xpath '(//ol)[1]/li', output, 2 assert_xpath '(//ol)[2]/li', output, 1 end test 'should use start number in docbook5 backend' do input = <<~'EOS' == List [start=7] . item 7 . item 8 EOS output = convert_string_to_embedded input, backend: 'docbook5' assert_xpath '//orderedlist', output, 1 assert_xpath '(//orderedlist)/listitem', output, 2 assert_xpath '(//orderedlist)[@startingnumber = "7"]', output, 1 end test 'should match trailing line separator in text of list item' do input = <<~EOS.chop . a . b#{decode_char 8232} . c EOS output = convert_string input assert_css 'li', output, 3 assert_xpath %((//li)[2]/p[text()="b#{decode_char 8232}"]), output, 1 end test 'should match line separator in text of list item' do input = <<~EOS.chop . a . b#{decode_char 8232}b . c EOS output = convert_string input assert_css 'li', output, 3 assert_xpath %((//li)[2]/p[text()="b#{decode_char 8232}b"]), output, 1 end end test 'should warn if explicit uppercase roman numerals in list are out of sequence' do input = <<~'EOS' I) one III) three EOS using_memory_logger do |logger| output = convert_string_to_embedded input assert_xpath '//ol/li', output, 2 assert_message logger, :WARN, ': line 2: list item index: expected II, got III', Hash end end test 'should warn if explicit lowercase roman numerals in list are out of sequence' do input = <<~'EOS' i) one iii) three EOS using_memory_logger do |logger| output = convert_string_to_embedded input assert_xpath '//ol/li', output, 2 assert_message logger, :WARN, ': line 2: list item index: expected ii, got iii', Hash end end end context "Description lists (:dlist)" do context "Simple lists" do test 'should not parse a bare dlist delimiter as a dlist' do input = '::' output = convert_string_to_embedded input assert_css 'dl', output, 0 assert_xpath '//p[text()="::"]', output, 1 end test 'should not parse an indented bare dlist delimiter as a dlist' do input = ' ::' output = convert_string_to_embedded input assert_css 'dl', output, 0 assert_xpath '//pre[text()="::"]', output, 1 end test 'should parse a dlist delimiter preceded by a blank attribute as a dlist' do input = '{blank}::' output = convert_string_to_embedded input assert_css 'dl', output, 1 assert_css 'dl > dt', output, 1 assert_css 'dl > dt:empty', output, 1 end test 'should parse a dlist if term is include and principal text is []' do input = 'include:: []' output = convert_string_to_embedded input assert_css 'dl', output, 1 assert_css 'dl > dt', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "[]"]', output, 1 end test 'should parse a dlist if term is include and principal text matches macro form' do input = 'include:: pass:[${placeholder}]' output = convert_string_to_embedded input assert_css 'dl', output, 1 assert_css 'dl > dt', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "${placeholder}"]', output, 1 end test "single-line adjacent elements" do input = <<~'EOS' term1:: def1 term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test 'should parse sibling items using same rules' do input = <<~'EOS' term1;; ;; def1 term2;; ;; def2 EOS output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = ";; def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = ";; def2"]', output, 1 end test 'should allow term to end with a semicolon when using double semicolon delimiter' do input = <<~'EOS' term;;; def EOS output = convert_string_to_embedded input assert_css 'dl', output, 1 assert_css 'dl > dt', output, 1 assert_xpath '(//dl/dt)[1][text() = "term;"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def"]', output, 1 end test "single-line indented adjacent elements" do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS term1:: def1 term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "single-line indented adjacent elements with tabs" do input = <<~EOS term1::\tdef1 \tterm2::\tdef2 EOS output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "single-line elements separated by blank line should create a single list" do input = <<~'EOS' term1:: def1 term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 end test "a line comment between elements should divide them into separate lists" do input = <<~'EOS' term1:: def1 // term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl/dt', output, 2 assert_xpath '(//dl)[1]/dt', output, 1 assert_xpath '(//dl)[2]/dt', output, 1 end test "a ruler between elements should divide them into separate lists" do input = <<~'EOS' term1:: def1 ''' term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl//hr', output, 0 assert_xpath '(//dl)[1]/dt', output, 1 assert_xpath '(//dl)[2]/dt', output, 1 end test "a block title between elements should divide them into separate lists" do input = <<~'EOS' term1:: def1 .Some more term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl/dt', output, 2 assert_xpath '(//dl)[1]/dt', output, 1 assert_xpath '(//dl)[2]/dt', output, 1 assert_xpath '(//dl)[2]/preceding-sibling::*[@class="title"][text() = "Some more"]', output, 1 end test "multi-line elements with paragraph content" do input = <<~'EOS' term1:: def1 term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "multi-line elements with indented paragraph content" do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS term1:: def1 term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "multi-line elements with indented paragraph content that includes comment lines" do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS term1:: def1 // comment term2:: def2 // comment def2 continued EOS output = convert_string_to_embedded input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath %((//dl/dt)[2]/following-sibling::dd/p[text() = "def2\ndef2 continued"]), output, 1 end test "should not strip comment line in literal paragraph block attached to list item" do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS term1:: + line 1 // not a comment line 3 EOS output = convert_string_to_embedded input assert_xpath '//*[@class="literalblock"]', output, 1 assert_xpath %(//*[@class="literalblock"]//pre[text()=" line 1\n// not a comment\n line 3"]), output, 1 end test 'should escape special characters in all literal paragraphs attached to list item' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS term:: desc text more text another term:: text in a paragraph EOS output = convert_string_to_embedded input assert_css 'dt', output, 2 assert_css 'code', output, 0 assert_css 'dd:first-of-type > *', output, 3 assert_css 'dd:first-of-type pre', output, 2 assert_xpath '((//dd)[1]//pre)[1][text()="text"]', output, 1 assert_xpath '((//dd)[1]//pre)[2][text()="more text"]', output, 1 assert_xpath '((//dd)[2]//p)[1][text()="text in a paragraph"]', output, 1 end test 'multi-line element with paragraph starting with multiple dashes should not be seen as list' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS term1:: def1 -- and a note term2:: def2 EOS output = convert_string_to_embedded input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath %((//dl/dt)[1]/following-sibling::dd/p[text() = "def1#{decode_char 8201}#{decode_char 8212}#{decode_char 8201}and a note"]), output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "multi-line element with multiple terms" do input = <<~'EOS' term1:: term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dd', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dt', output, 1 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test 'consecutive terms share same varlistentry in docbook' do input = <<~'EOS' term:: alt term:: description last:: EOS output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '//varlistentry', output, 2 assert_xpath '(//varlistentry)[1]/term', output, 2 assert_xpath '(//varlistentry)[2]/term', output, 1 assert_xpath '(//varlistentry)[2]/listitem', output, 1 assert_xpath '(//varlistentry)[2]/listitem[normalize-space(text())=""]', output, 1 end test "multi-line elements with blank line before paragraph content" do input = <<~'EOS' term1:: def1 term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "multi-line elements with paragraph and literal content" do # blank line following literal paragraph is required or else it will gobble up the second term # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS term1:: def1 literal term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '//dl/dt/following-sibling::dd//pre', output, 1 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "mixed single and multi-line adjacent elements" do input = <<~'EOS' term1:: def1 term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test 'should discover anchor at start of description term text and register it as a reference' do input = <<~'EOS' The highest peak in the Front Range is <>, which tops <> by just a few feet. [[mount-evans,Mount Evans]]Mount Evans:: 14,271 feet [[grays-peak]]Grays Peak:: 14,278 feet EOS doc = document_from_string input refs = doc.catalog[:refs] assert refs.key?('mount-evans') assert refs.key?('grays-peak') output = doc.convert standalone: false assert_xpath '(//p)[1]/a[@href="#grays-peak"][text()="Grays Peak"]', output, 1 assert_xpath '(//p)[1]/a[@href="#mount-evans"][text()="Mount Evans"]', output, 1 assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '(//dl/dt)[1]/a[@id="mount-evans"]', output, 1 assert_xpath '(//dl/dt)[2]/a[@id="grays-peak"]', output, 1 end test "missing space before term does not produce description list" do input = <<~'EOS' term1::def1 term2::def2 EOS output = convert_string input assert_xpath '//dl', output, 0 end test "literal block inside description list" do input = <<~'EOS' term:: + .... literal, line 1 literal, line 2 .... anotherterm:: def EOS output = convert_string input assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dd', output, 2 assert_xpath '//dl/dd//pre', output, 1 assert_xpath '(//dl/dd)[1]/*[@class="literalblock"]//pre', output, 1 assert_xpath '(//dl/dd)[2]/p[text() = "def"]', output, 1 end test "literal block inside description list with trailing line continuation" do input = <<~'EOS' term:: + .... literal, line 1 literal, line 2 .... + anotherterm:: def EOS output = convert_string input assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dd', output, 2 assert_xpath '//dl/dd//pre', output, 1 assert_xpath '(//dl/dd)[1]/*[@class="literalblock"]//pre', output, 1 assert_xpath '(//dl/dd)[2]/p[text() = "def"]', output, 1 end test "multiple listing blocks inside description list" do input = <<~'EOS' term:: + ---- listing, line 1 listing, line 2 ---- + ---- listing, line 1 listing, line 2 ---- anotherterm:: def EOS output = convert_string input assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dd', output, 2 assert_xpath '//dl/dd//pre', output, 2 assert_xpath '(//dl/dd)[1]/*[@class="listingblock"]//pre', output, 2 assert_xpath '(//dl/dd)[2]/p[text() = "def"]', output, 1 end test "open block inside description list" do input = <<~'EOS' term:: + -- Open block as description of term. And some more detail... -- anotherterm:: def EOS output = convert_string input assert_xpath '//dl/dd//p', output, 3 assert_xpath '(//dl/dd)[1]//*[@class="openblock"]//p', output, 2 end test "paragraph attached by a list continuation on either side in a description list" do input = <<~'EOS' term1:: def1 + more detail + term2:: def2 EOS output = convert_string input assert_xpath '(//dl/dt)[1][normalize-space(text())="term1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text())="term2"]', output, 1 assert_xpath '(//dl/dd)[1]//p', output, 2 assert_xpath '((//dl/dd)[1]//p)[1][text()="def1"]', output, 1 assert_xpath '(//dl/dd)[1]/p/following-sibling::*[@class="paragraph"]/p[text() = "more detail"]', output, 1 end test "paragraph attached by a list continuation on either side to a multi-line element in a description list" do input = <<~'EOS' term1:: def1 + more detail + term2:: def2 EOS output = convert_string input assert_xpath '(//dl/dt)[1][normalize-space(text())="term1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text())="term2"]', output, 1 assert_xpath '(//dl/dd)[1]//p', output, 2 assert_xpath '((//dl/dd)[1]//p)[1][text()="def1"]', output, 1 assert_xpath '(//dl/dd)[1]/p/following-sibling::*[@class="paragraph"]/p[text() = "more detail"]', output, 1 end test 'should continue to parse subsequent blocks attached to list item after first block is dropped' do input = <<~'EOS' :attribute-missing: drop-line term:: + image::{unresolved}[] + paragraph EOS output = convert_string_to_embedded input assert_css 'dl', output, 1 assert_css 'dl > dt', output, 1 assert_css 'dl > dt + dd', output, 1 assert_css 'dl > dt + dd > .imageblock', output, 0 assert_css 'dl > dt + dd > .paragraph', output, 1 end test "verse paragraph inside a description list" do input = <<~'EOS' term1:: def + [verse] la la la term2:: def EOS output = convert_string input assert_xpath '//dl/dd//p', output, 2 assert_xpath '(//dl/dd)[1]/*[@class="verseblock"]/pre[text() = "la la la"]', output, 1 end test "list inside a description list" do input = <<~'EOS' term1:: * level 1 ** level 2 * level 1 term2:: def EOS output = convert_string input assert_xpath '//dl/dd', output, 2 assert_xpath '//dl/dd/p', output, 1 assert_xpath '(//dl/dd)[1]//ul', output, 2 assert_xpath '((//dl/dd)[1]//ul)[1]//ul', output, 1 end test "list inside a description list offset by blank lines" do input = <<~'EOS' term1:: * level 1 ** level 2 * level 1 term2:: def EOS output = convert_string input assert_xpath '//dl/dd', output, 2 assert_xpath '//dl/dd/p', output, 1 assert_xpath '(//dl/dd)[1]//ul', output, 2 assert_xpath '((//dl/dd)[1]//ul)[1]//ul', output, 1 end test "should only grab one line following last item if item has no inline description" do input = <<~'EOS' term1:: def1 term2:: def2 A new paragraph Another new paragraph EOS output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dd', output, 2 assert_xpath '(//dl/dd)[1]/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dd)[2]/p[text() = "def2"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]', output, 2 assert_xpath '(//*[@class="dlist"]/following-sibling::*[@class="paragraph"])[1]/p[text() = "A new paragraph"]', output, 1 assert_xpath '(//*[@class="dlist"]/following-sibling::*[@class="paragraph"])[2]/p[text() = "Another new paragraph"]', output, 1 end test "should only grab one literal line following last item if item has no inline description" do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS term1:: def1 term2:: def2 A new paragraph Another new paragraph EOS output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dd', output, 2 assert_xpath '(//dl/dd)[1]/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dd)[2]/p[text() = "def2"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]', output, 2 assert_xpath '(//*[@class="dlist"]/following-sibling::*[@class="paragraph"])[1]/p[text() = "A new paragraph"]', output, 1 assert_xpath '(//*[@class="dlist"]/following-sibling::*[@class="paragraph"])[2]/p[text() = "Another new paragraph"]', output, 1 end test "should append subsequent paragraph literals to list item as block content" do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS term1:: def1 term2:: def2 literal A new paragraph. EOS output = convert_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dd', output, 2 assert_xpath '(//dl/dd)[1]/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dd)[2]/p[text() = "def2"]', output, 1 assert_xpath '(//dl/dd)[2]/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '(//dl/dd)[2]/p/following-sibling::*[@class="literalblock"]//pre[text() = "literal"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '(//*[@class="dlist"]/following-sibling::*[@class="paragraph"])[1]/p[text() = "A new paragraph."]', output, 1 end test 'should not match comment line that looks like description list term' do input = <<~'EOS' before //key:: val after EOS output = convert_string_to_embedded input assert_css 'dl', output, 0 end test 'should not match comment line following list that looks like description list term' do input = <<~'EOS' * item //term:: desc == Section section text EOS output = convert_string_to_embedded input assert_xpath '/*[@class="ulist"]', output, 1 assert_xpath '/*[@class="sect1"]', output, 1 assert_xpath '/*[@class="sect1"]/h2[text()="Section"]', output, 1 assert_xpath '/*[@class="ulist"]/following-sibling::*[@class="sect1"]', output, 1 end test 'should not match comment line that looks like sibling description list term' do input = <<~'EOS' before foo:: bar //yin:: yang after EOS output = convert_string_to_embedded input assert_css '.dlist', output, 1 assert_css '.dlist dt', output, 1 refute_includes output, 'yin' end test 'should not hang on description list item in list that begins with ///' do input = <<~'EOS' * a ///b:: c EOS output = convert_string_to_embedded input assert_css 'ul', output, 1 assert_css 'ul li dl', output, 1 assert_xpath '//ul/li/p[text()="a"]', output, 1 assert_xpath '//dt[text()="///b"]', output, 1 assert_xpath '//dd/p[text()="c"]', output, 1 end test 'should not hang on sibling description list item that begins with ///' do input = <<~'EOS' a:: ///b:: c EOS output = convert_string_to_embedded input assert_css 'dl', output, 1 assert_xpath '(//dl/dt)[1][text()="a"]', output, 1 assert_xpath '(//dl/dt)[2][text()="///b"]', output, 1 assert_xpath '//dl/dd/p[text()="c"]', output, 1 end test 'should skip dlist term that begins with // unless it begins with ///' do input = <<~'EOS' category a:: //ignored term:: def category b:: ///term:: def EOS output = convert_string_to_embedded input refute_includes output, 'ignored term' assert_xpath '//dt[text()="///term"]', output, 1 end test 'more than 4 consecutive colons should become part of description list term' do input = <<~'EOS' A term::::: a description EOS output = convert_string_to_embedded input assert_css 'dl', output, 1 assert_css 'dl > dt', output, 1 assert_xpath '//dl/dt[text()="A term:"]', output, 1 assert_xpath '//dl/dd/p[text()="a description"]', output, 1 end test 'text method of dd node should return nil if dd node only contains blocks' do input = <<~'EOS' term:: + paragraph EOS doc = document_from_string input dd = doc.blocks[0].items[0][1] assert_nil dd.text end test 'should match trailing line separator in text of list item' do input = <<~EOS.chop A:: a B:: b#{decode_char 8232} C:: c EOS output = convert_string input assert_css 'dd', output, 3 assert_xpath %((//dd)[2]/p[text()="b#{decode_char 8232}"]), output, 1 end test 'should match line separator in text of list item' do input = <<~EOS.chop A:: a B:: b#{decode_char 8232}b C:: c EOS output = convert_string input assert_css 'dd', output, 3 assert_xpath %((//dd)[2]/p[text()="b#{decode_char 8232}b"]), output, 1 end end context "Nested lists" do test 'should not parse a nested dlist delimiter without a term as a dlist' do input = <<~'EOS' t:: ;; EOS output = convert_string_to_embedded input assert_xpath '//dl', output, 1 assert_xpath '//dl/dd/p[text()=";;"]', output, 1 end test 'should not parse a nested indented dlist delimiter without a term as a dlist' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS t:: desc ;; EOS output = convert_string_to_embedded input assert_xpath '//dl', output, 1 assert_xpath %(//dl/dd/p[text()="desc\n ;;"]), output, 1 end test "single-line adjacent nested elements" do input = <<~'EOS' term1:: def1 label1::: detail1 term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '//dl//dl/dt[normalize-space(text()) = "label1"]', output, 1 assert_xpath '//dl//dl/dt/following-sibling::dd/p[text() = "detail1"]', output, 1 assert_xpath '(//dl)[1]/dt[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl)[1]/dt[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "single-line adjacent maximum nested elements" do input = <<~'EOS' term1:: def1 label1::: detail1 name1:::: value1 item1;; price1 term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 4 assert_xpath '//dl//dl//dl//dl', output, 1 end test 'single-line nested elements separated by blank line at top level' do input = <<~'EOS' term1:: def1 label1::: detail1 term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '//dl//dl/dt[normalize-space(text()) = "label1"]', output, 1 assert_xpath '//dl//dl/dt/following-sibling::dd/p[text() = "detail1"]', output, 1 assert_xpath '(//dl)[1]/dt[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl)[1]/dt[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test 'single-line nested elements separated by blank line at nested level' do input = <<~'EOS' term1:: def1 label1::: detail1 label2::: detail2 term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '//dl//dl/dt[normalize-space(text()) = "label1"]', output, 1 assert_xpath '//dl//dl/dt/following-sibling::dd/p[text() = "detail1"]', output, 1 assert_xpath '(//dl)[1]/dt[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl)[1]/dt[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "single-line adjacent nested elements with alternate delimiters" do input = <<~'EOS' term1:: def1 label1;; detail1 term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '//dl//dl/dt[normalize-space(text()) = "label1"]', output, 1 assert_xpath '//dl//dl/dt/following-sibling::dd/p[text() = "detail1"]', output, 1 assert_xpath '(//dl)[1]/dt[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl)[1]/dt[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "multi-line adjacent nested elements" do input = <<~'EOS' term1:: def1 label1::: detail1 term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '//dl//dl/dt[normalize-space(text()) = "label1"]', output, 1 assert_xpath '//dl//dl/dt/following-sibling::dd/p[text() = "detail1"]', output, 1 assert_xpath '(//dl)[1]/dt[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl)[1]/dt[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test 'multi-line nested elements separated by blank line at nested level repeated' do input = <<~'EOS' term1:: def1 label1::: detail1 label2::: detail2 term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl//dl/dt)[1][normalize-space(text()) = "label1"]', output, 1 assert_xpath '(//dl//dl/dt)[1]/following-sibling::dd/p[text() = "detail1"]', output, 1 assert_xpath '(//dl//dl/dt)[2][normalize-space(text()) = "label2"]', output, 1 assert_xpath '(//dl//dl/dt)[2]/following-sibling::dd/p[text() = "detail2"]', output, 1 end test "multi-line element with indented nested element" do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS term1:: def1 label1;; detail1 term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt', output, 2 assert_xpath '(//dl)[1]/dd', output, 2 assert_xpath '((//dl)[1]/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '((//dl)[1]/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '//dl//dl/dt', output, 1 assert_xpath '//dl//dl/dt[normalize-space(text()) = "label1"]', output, 1 assert_xpath '//dl//dl/dt/following-sibling::dd/p[text() = "detail1"]', output, 1 assert_xpath '((//dl)[1]/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '((//dl)[1]/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "mixed single and multi-line elements with indented nested elements" do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS term1:: def1 label1::: detail1 term2:: def2 EOS output = convert_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '//dl//dl/dt[normalize-space(text()) = "label1"]', output, 1 assert_xpath '//dl//dl/dt/following-sibling::dd/p[text() = "detail1"]', output, 1 assert_xpath '(//dl)[1]/dt[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl)[1]/dt[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "multi-line elements with first paragraph folded to text with adjacent nested element" do input = <<~'EOS' term1:: def1 continued label1::: detail1 EOS output = convert_string_to_embedded input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[starts-with(text(), "def1")]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[contains(text(), "continued")]', output, 1 assert_xpath '//dl//dl/dt[normalize-space(text()) = "label1"]', output, 1 assert_xpath '//dl//dl/dt/following-sibling::dd/p[text() = "detail1"]', output, 1 end end context 'Special lists' do test 'should convert glossary list with proper semantics' do input = <<~'EOS' [glossary] term 1:: def 1 term 2:: def 2 EOS output = convert_string_to_embedded input assert_css '.dlist.glossary', output, 1 assert_css '.dlist dt:not([class])', output, 2 end test 'consecutive glossary terms should share same glossentry element in docbook' do input = <<~'EOS' [glossary] term:: alt term:: description last:: EOS output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '/glossentry', output, 2 assert_xpath '(/glossentry)[1]/glossterm', output, 2 assert_xpath '(/glossentry)[2]/glossterm', output, 1 assert_xpath '(/glossentry)[2]/glossdef', output, 1 assert_xpath '(/glossentry)[2]/glossdef[normalize-space(text())=""]', output, 1 end test 'should convert horizontal list with proper markup' do input = <<~'EOS' [horizontal] first term:: description + more detail second term:: description EOS output = convert_string_to_embedded input assert_css '.hdlist', output, 1 assert_css '.hdlist table', output, 1 assert_css '.hdlist table colgroup', output, 0 assert_css '.hdlist table tr', output, 2 # see nokogiri#1803 for why this is necessary tbody_path = jruby? ? 'tbody/' : '' refute_includes output, '' assert_xpath %(/*[@class="hdlist"]/table/#{tbody_path}tr[1]/td), output, 2 assert_xpath %(/*[@class="hdlist"]/table/#{tbody_path}tr[1]/td[@class="hdlist1"]), output, 1 assert_xpath %(/*[@class="hdlist"]/table/#{tbody_path}tr[1]/td[@class="hdlist2"]), output, 1 assert_xpath %(/*[@class="hdlist"]/table/#{tbody_path}tr[1]/td[@class="hdlist2"]/p), output, 1 assert_xpath %(/*[@class="hdlist"]/table/#{tbody_path}tr[1]/td[@class="hdlist2"]/p/following-sibling::*[@class="paragraph"]), output, 1 assert_xpath '((//tr)[1]/td)[1][normalize-space(text())="first term"]', output, 1 assert_xpath '((//tr)[1]/td)[2]/p[normalize-space(text())="description"]', output, 1 assert_xpath %(/*[@class="hdlist"]/table/#{tbody_path}tr[2]/td), output, 2 assert_xpath '((//tr)[2]/td)[1][normalize-space(text())="second term"]', output, 1 assert_xpath '((//tr)[2]/td)[2]/p[normalize-space(text())="description"]', output, 1 end test 'should set col widths of item and label if specified' do input = <<~'EOS' [horizontal] [labelwidth="25", itemwidth="75"] term:: def EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup', output, 1 assert_css 'table > colgroup > col', output, 2 assert_xpath '(//table/colgroup/col)[1][@style="width: 25%;"]', output, 1 assert_xpath '(//table/colgroup/col)[2][@style="width: 75%;"]', output, 1 end test 'should set col widths of item and label in docbook if specified' do input = <<~'EOS' [horizontal] [labelwidth="25", itemwidth="75"] term:: def EOS output = convert_string_to_embedded input, backend: 'docbook' assert_css 'informaltable', output, 1 assert_css 'informaltable > tgroup', output, 1 assert_css 'informaltable > tgroup > colspec', output, 2 assert_xpath '(/informaltable/tgroup/colspec)[1][@colwidth="25*"]', output, 1 assert_xpath '(/informaltable/tgroup/colspec)[2][@colwidth="75*"]', output, 1 end test 'should add strong class to label if strong option is set' do input = <<~'EOS' [horizontal, options="strong"] term:: def EOS output = convert_string_to_embedded input assert_css '.hdlist', output, 1 assert_css '.hdlist td.hdlist1.strong', output, 1 end test 'consecutive terms in horizontal list should share same cell' do input = <<~'EOS' [horizontal] term:: alt term:: description last:: EOS output = convert_string_to_embedded input assert_xpath '//tr', output, 2 assert_xpath '(//tr)[1]/td[@class="hdlist1"]', output, 1 # NOTE I'm trimming the trailing
    in Asciidoctor #assert_xpath '(//tr)[1]/td[@class="hdlist1"]/br', output, 2 assert_xpath '(//tr)[1]/td[@class="hdlist1"]/br', output, 1 assert_xpath '(//tr)[2]/td[@class="hdlist2"]', output, 1 end test 'consecutive terms in horizontal list should share same entry in docbook' do input = <<~'EOS' [horizontal] term:: alt term:: description last:: EOS output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '//row', output, 2 assert_xpath '(//row)[1]/entry', output, 2 assert_xpath '((//row)[1]/entry)[1]/simpara', output, 2 assert_xpath '(//row)[2]/entry', output, 2 assert_xpath '((//row)[2]/entry)[2][normalize-space(text())=""]', output, 1 end test 'should convert horizontal list in docbook with proper markup' do input = <<~'EOS' .Terms [horizontal] first term:: description + more detail second term:: description EOS output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '/table', output, 1 assert_xpath '/table[@tabstyle="horizontal"]', output, 1 assert_xpath '/table[@tabstyle="horizontal"]/title[text()="Terms"]', output, 1 assert_xpath '/table//row', output, 2 assert_xpath '(/table//row)[1]/entry', output, 2 assert_xpath '(/table//row)[2]/entry', output, 2 assert_xpath '((/table//row)[1]/entry)[2]/simpara', output, 2 end test 'should convert qanda list in HTML with proper semantics' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS [qanda] Question 1:: Answer 1. Question 2:: Answer 2. + NOTE: A note about Answer 2. EOS output = convert_string_to_embedded input assert_css '.qlist.qanda', output, 1 assert_css '.qanda > ol', output, 1 assert_css '.qanda > ol > li', output, 2 (1..2).each do |idx| assert_css ".qanda > ol > li:nth-child(#{idx}) > p", output, 2 assert_css ".qanda > ol > li:nth-child(#{idx}) > p:first-child > em", output, 1 assert_xpath "/*[@class = 'qlist qanda']/ol/li[#{idx}]/p[1]/em[normalize-space(text()) = 'Question #{idx}']", output, 1 assert_css ".qanda > ol > li:nth-child(#{idx}) > p:last-child > *", output, 0 assert_xpath "/*[@class = 'qlist qanda']/ol/li[#{idx}]/p[2][normalize-space(text()) = 'Answer #{idx}.']", output, 1 end assert_xpath "/*[@class = 'qlist qanda']/ol/li[2]/p[2]/following-sibling::div[@class='admonitionblock note']", output, 1 end test 'should convert qanda list in DocBook with proper semantics' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS [qanda] Question 1:: Answer 1. Question 2:: Answer 2. + NOTE: A note about Answer 2. EOS output = convert_string_to_embedded input, backend: 'docbook' assert_css 'qandaset', output, 1 assert_css 'qandaset > qandaentry', output, 2 (1..2).each do |idx| assert_css "qandaset > qandaentry:nth-child(#{idx}) > question", output, 1 assert_css "qandaset > qandaentry:nth-child(#{idx}) > question > simpara", output, 1 assert_xpath "/qandaset/qandaentry[#{idx}]/question/simpara[normalize-space(text()) = 'Question #{idx}']", output, 1 assert_css "qandaset > qandaentry:nth-child(#{idx}) > answer", output, 1 assert_css "qandaset > qandaentry:nth-child(#{idx}) > answer > simpara", output, 1 assert_xpath "/qandaset/qandaentry[#{idx}]/answer/simpara[normalize-space(text()) = 'Answer #{idx}.']", output, 1 end assert_xpath "/qandaset/qandaentry[2]/answer/simpara/following-sibling::note", output, 1 end test 'consecutive questions should share same question element in docbook' do input = <<~'EOS' [qanda] question:: follow-up question:: response last question:: EOS output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '//qandaentry', output, 2 assert_xpath '(//qandaentry)[1]/question', output, 1 assert_xpath '(//qandaentry)[1]/question/simpara', output, 2 assert_xpath '(//qandaentry)[2]/question', output, 1 assert_xpath '(//qandaentry)[2]/answer', output, 1 assert_xpath '(//qandaentry)[2]/answer[normalize-space(text())=""]', output, 1 end test 'should convert bibliography list with proper semantics' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS [bibliography] - [[[taoup]]] Eric Steven Raymond. _The Art of Unix Programming_. Addison-Wesley. ISBN 0-13-142901-9. - [[[walsh-muellner]]] Norman Walsh & Leonard Muellner. _DocBook - The Definitive Guide_. O'Reilly & Associates. 1999. ISBN 1-56592-580-7. EOS output = convert_string_to_embedded input assert_css '.ulist.bibliography', output, 1 assert_css '.ulist.bibliography ul', output, 1 assert_css '.ulist.bibliography ul li', output, 2 assert_css '.ulist.bibliography ul li p', output, 2 assert_css '.ulist.bibliography ul li:nth-child(1) p a#taoup', output, 1 assert_xpath '//a/*', output, 0 assert_xpath '(//a)[1][starts-with(following-sibling::text(), "[taoup] ")]', output, 1 end test 'should convert bibliography list with proper semantics to DocBook' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS [bibliography] - [[[taoup]]] Eric Steven Raymond. _The Art of Unix Programming_. Addison-Wesley. ISBN 0-13-142901-9. - [[[walsh-muellner]]] Norman Walsh & Leonard Muellner. _DocBook - The Definitive Guide_. O'Reilly & Associates. 1999. ISBN 1-56592-580-7. EOS output = convert_string_to_embedded input, backend: 'docbook' assert_css 'bibliodiv', output, 1 assert_css 'bibliodiv > bibliomixed', output, 2 assert_css 'bibliodiv > bibliomixed > bibliomisc', output, 2 assert_css 'bibliodiv > bibliomixed:nth-child(1) > bibliomisc > anchor', output, 1 assert_css 'bibliodiv > bibliomixed:nth-child(1) > bibliomisc > anchor[xreflabel="[taoup]"]', output, 1 assert_xpath '(//bibliomixed)[1]/bibliomisc/anchor[starts-with(following-sibling::text(), "[taoup] Eric")]', output, 1 assert_css 'bibliodiv > bibliomixed:nth-child(2) > bibliomisc > anchor', output, 1 assert_css 'bibliodiv > bibliomixed:nth-child(2) > bibliomisc > anchor[xreflabel="[walsh-muellner]"]', output, 1 assert_xpath '(//bibliomixed)[2]/bibliomisc/anchor[starts-with(following-sibling::text(), "[walsh-muellner] Norman")]', output, 1 end test 'should warn if a bibliography ID is already in use' do input = <<~'EOS' [bibliography] * [[[Fowler]]] Fowler M. _Analysis Patterns: Reusable Object Models_. Addison-Wesley. 1997. * [[[Fowler]]] Fowler M. _Analysis Patterns: Reusable Object Models_. Addison-Wesley. 1997. EOS using_memory_logger do |logger| output = convert_string_to_embedded input assert_css '.ulist.bibliography', output, 1 assert_css '.ulist.bibliography ul li:nth-child(1) p a#Fowler', output, 1 assert_css '.ulist.bibliography ul li:nth-child(2) p a#Fowler', output, 1 assert_message logger, :WARN, ': line 4: id assigned to bibliography anchor already in use: Fowler', Hash end end test 'should automatically add bibliography style to top-level lists in bibliography section' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS [bibliography] == Bibliography .Books * [[[taoup]]] Eric Steven Raymond. _The Art of Unix Programming_. Addison-Wesley. ISBN 0-13-142901-9. * [[[walsh-muellner]]] Norman Walsh & Leonard Muellner. _DocBook - The Definitive Guide_. O'Reilly & Associates. 1999. ISBN 1-56592-580-7. .Periodicals * [[[doc-writer]]] Doc Writer. _Documentation As Code_. Static Times, 54. August 2016. EOS doc = document_from_string input ulists = doc.find_by context: :ulist assert_equal 2, ulists.size assert_equal ulists[0].style, 'bibliography' assert_equal ulists[1].style, 'bibliography' end test 'should not recognize bibliography anchor that begins with a digit' do input = <<~'EOS' [bibliography] - [[[1984]]] George Orwell. _1984_. New American Library. 1950. EOS output = convert_string_to_embedded input assert_includes output, '[[[1984]]]' assert_xpath '//a[@id="1984"]', output, 0 end test 'should recognize bibliography anchor that contains a digit but does not start with one' do input = <<~'EOS' [bibliography] - [[[_1984]]] George Orwell. __1984__. New American Library. 1950. EOS output = convert_string_to_embedded input refute_includes output, '[[[_1984]]]' assert_includes output, '[_1984]' assert_xpath '//a[@id="_1984"]', output, 1 end test 'should catalog bibliography anchors in bibliography list' do input = <<~'EOS' = Article Title Please read <>. [bibliography] == References * [[[Fowler_1997]]] Fowler M. _Analysis Patterns: Reusable Object Models_. Addison-Wesley. 1997. EOS doc = document_from_string input assert doc.catalog[:refs].key? 'Fowler_1997' end test 'should use reftext from bibliography anchor at xref and entry' do input = <<~'EOS' = Article Title Begin with <>. Then move on to <>. [bibliography] == References * [[[TMMM]]] Brooks F. _The Mythical Man-Month_. Addison-Wesley. 1975. * [[[Fowler_1997,1]]] Fowler M. _Analysis Patterns: Reusable Object Models_. Addison-Wesley. 1997. EOS doc = document_from_string input, standalone: false tmmm_ref = doc.catalog[:refs]['TMMM'] refute_nil tmmm_ref assert_nil tmmm_ref.reftext fowler_1997_ref = doc.catalog[:refs]['Fowler_1997'] refute_nil fowler_1997_ref assert_equal '[1]', fowler_1997_ref.reftext result = doc.convert standalone: false assert_xpath '//a[@href="#Fowler_1997"]', result, 1 assert_xpath '//a[@href="#Fowler_1997"][text()="[1]"]', result, 1 assert_xpath '//a[@id="Fowler_1997"]', result, 1 assert_xpath '(//a[@id="Fowler_1997"])[1][starts-with(following-sibling::text(), "[1] ")]', result, 1 assert_xpath '//a[@href="#TMMM"]', result, 1 assert_xpath '//a[@href="#TMMM"][text()="[TMMM]"]', result, 1 assert_xpath '//a[@id="TMMM"]', result, 1 assert_xpath '(//a[@id="TMMM"])[1][starts-with(following-sibling::text(), "[TMMM] ")]', result, 1 end test 'should assign reftext of bibliography anchor to xreflabel in DocBook backend' do input = <<~'EOS' [bibliography] * [[[Fowler_1997,1]]] Fowler M. _Analysis Patterns: Reusable Object Models_. Addison-Wesley. 1997. EOS result = convert_string_to_embedded input, backend: :docbook assert_includes result, '[1] Fowler' end end end context 'Description lists redux' do context 'Label without text on same line' do test 'folds text from subsequent line' do input = <<~'EOS' == Lists term1:: def1 EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text from first line after blank lines' do input = <<~'EOS' == Lists term1:: def1 EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text from first line after blank line and immediately preceding next item' do input = <<~'EOS' == Lists term1:: def1 term2:: def2 EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 2 assert_xpath '(//*[@class="dlist"]//dd)[1]/p[text()="def1"]', output, 1 end test 'paragraph offset by blank lines does not break list if label does not have inline text' do input = <<~'EOS' == Lists term1:: def1 term2:: def2 EOS output = convert_string_to_embedded input assert_css 'dl', output, 1 assert_css 'dl > dt', output, 2 assert_css 'dl > dd', output, 2 assert_xpath '(//dl/dd)[1]/p[text()="def1"]', output, 1 end test 'folds text from first line after comment line' do input = <<~'EOS' == Lists term1:: // comment def1 EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text from line following comment line offset by blank line' do input = <<~'EOS' == Lists term1:: // comment def1 EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text from subsequent indented line' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Lists term1:: def1 EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text from indented line after blank line' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Lists term1:: def1 EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text that looks like ruler offset by blank line' do input = <<~'EOS' == Lists term1:: ''' EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="'''"]), output, 1 end test 'folds text that looks like ruler offset by blank line and line comment' do input = <<~'EOS' == Lists term1:: // comment ''' EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="'''"]), output, 1 end test 'folds text that looks like ruler and the line following it offset by blank line' do input = <<~'EOS' == Lists term1:: ''' continued EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[normalize-space(text())="''' continued"]), output, 1 end test 'folds text that looks like title offset by blank line' do input = <<~'EOS' == Lists term1:: .def1 EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()=".def1"]', output, 1 end test 'folds text that looks like title offset by blank line and line comment' do input = <<~'EOS' == Lists term1:: // comment .def1 EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()=".def1"]', output, 1 end test 'folds text that looks like admonition offset by blank line' do input = <<~'EOS' == Lists term1:: NOTE: def1 EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="NOTE: def1"]', output, 1 end test 'folds text that looks like section title offset by blank line' do input = <<~'EOS' == Lists term1:: == Another Section EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="== Another Section"]', output, 1 assert_xpath '//h2', output, 1 end test 'folds text of first literal line offset by blank line appends subsequent literals offset by blank line as blocks' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Lists term1:: def1 literal literal EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="literalblock"]', output, 2 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="literalblock"]//pre[text()="literal"]', output, 2 end test 'folds text of subsequent line and appends following literal line offset by blank line as block if term has no inline description' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Lists term1:: def1 literal term2:: def2 EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 2 assert_xpath '(//*[@class="dlist"]//dd)[1]/p[text()="def1"]', output, 1 assert_xpath '(//*[@class="dlist"]//dd)[1]/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '(//*[@class="dlist"]//dd)[1]/p/following-sibling::*[@class="literalblock"]//pre[text()="literal"]', output, 1 end test 'appends literal line attached by continuation as block if item has no inline description' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Lists term1:: + literal EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]//pre[text()="literal"]', output, 1 end test 'appends literal line attached by continuation as block if item has no inline description followed by ruler' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Lists term1:: + literal ''' EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]//pre[text()="literal"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::hr', output, 1 end test 'appends line attached by continuation as block if item has no inline description followed by ruler' do input = <<~'EOS' == Lists term1:: + para ''' EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]/p[text()="para"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::hr', output, 1 end test 'appends line attached by continuation as block if item has no inline description followed by block' do input = <<~'EOS' == Lists term1:: + para .... literal .... EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]/p[text()="para"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="literalblock"]//pre[text()="literal"]', output, 1 end test 'appends block attached by continuation but not subsequent block not attached by continuation' do input = <<~'EOS' == Lists term1:: + .... literal .... .... detached .... EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]//pre[text()="literal"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="literalblock"]//pre[text()="detached"]', output, 1 end test 'appends list if item has no inline description' do input = <<~'EOS' == Lists term1:: * one * two * three EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd//ul/li', output, 3 end test 'appends list to first term when followed immediately by second term' do input = <<~'EOS' == Lists term1:: * one * two * three term2:: def2 EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 2 assert_xpath '(//*[@class="dlist"]//dd)[1]/p', output, 0 assert_xpath '(//*[@class="dlist"]//dd)[1]//ul/li', output, 3 assert_xpath '(//*[@class="dlist"]//dd)[2]/p[text()="def2"]', output, 1 end test 'appends indented list to first term that is adjacent to second term' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Lists label 1:: description 1 * one * two * three label 2:: description 2 paragraph EOS output = convert_string_to_embedded input assert_css '.dlist > dl', output, 1 assert_css '.dlist dt', output, 2 assert_xpath '(//*[@class="dlist"]//dt)[1][normalize-space(text())="label 1"]', output, 1 assert_xpath '(//*[@class="dlist"]//dt)[2][normalize-space(text())="label 2"]', output, 1 assert_css '.dlist dd', output, 2 assert_xpath '(//*[@class="dlist"]//dd)[1]/p[text()="description 1"]', output, 1 assert_xpath '(//*[@class="dlist"]//dd)[2]/p[text()="description 2"]', output, 1 assert_xpath '(//*[@class="dlist"]//dd)[1]/p/following-sibling::*[@class="ulist"]', output, 1 assert_xpath '(//*[@class="dlist"]//dd)[1]/p/following-sibling::*[@class="ulist"]//li', output, 3 assert_css '.dlist + .paragraph', output, 1 end test 'appends indented list to first term that is attached by a continuation and adjacent to second term' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Lists label 1:: description 1 + * one * two * three label 2:: description 2 paragraph EOS output = convert_string_to_embedded input assert_css '.dlist > dl', output, 1 assert_css '.dlist dt', output, 2 assert_xpath '(//*[@class="dlist"]//dt)[1][normalize-space(text())="label 1"]', output, 1 assert_xpath '(//*[@class="dlist"]//dt)[2][normalize-space(text())="label 2"]', output, 1 assert_css '.dlist dd', output, 2 assert_xpath '(//*[@class="dlist"]//dd)[1]/p[text()="description 1"]', output, 1 assert_xpath '(//*[@class="dlist"]//dd)[2]/p[text()="description 2"]', output, 1 assert_xpath '(//*[@class="dlist"]//dd)[1]/p/following-sibling::*[@class="ulist"]', output, 1 assert_xpath '(//*[@class="dlist"]//dd)[1]/p/following-sibling::*[@class="ulist"]//li', output, 3 assert_css '.dlist + .paragraph', output, 1 end test 'appends list and paragraph block when line following list attached by continuation' do input = <<~'EOS' == Lists term1:: * one * two * three + para EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]/ul/li', output, 3 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 end test 'first continued line associated with nested list item and second continued line associated with term' do input = <<~'EOS' == Lists term1:: * one + nested list para + term1 para EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]/ul/li', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]/ul/li/*[@class="paragraph"]/p[text()="nested list para"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]/following-sibling::*[@class="paragraph"]/p[text()="term1 para"]', output, 1 end test 'literal line attached by continuation swallows adjacent line that looks like term' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Lists term1:: + literal notnestedterm::: + literal notnestedterm::: EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]', output, 2 assert_xpath %(//*[@class="dlist"]//dd/*[@class="literalblock"]//pre[text()=" literal\nnotnestedterm:::"]), output, 2 end test 'line attached by continuation is appended as paragraph if term has no inline description' do input = <<~'EOS' == Lists term1:: + para EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]/p[text()="para"]', output, 1 end test 'attached paragraph does not break on adjacent nested description list term' do input = <<~'EOS' term1:: def + more description not a term::: def EOS output = convert_string_to_embedded input assert_css '.dlist > dl > dt', output, 1 assert_css '.dlist > dl > dd', output, 1 assert_css '.dlist > dl > dd > .paragraph', output, 1 assert_includes output, 'not a term::: def' end # FIXME this is a negative test; the behavior should be the other way around test 'attached paragraph is terminated by adjacent sibling description list term' do input = <<~'EOS' term1:: def + more description not a term:: def EOS output = convert_string_to_embedded input assert_css '.dlist > dl > dt', output, 2 assert_css '.dlist > dl > dd', output, 2 assert_css '.dlist > dl > dd > .paragraph', output, 1 refute_includes output, 'not a term:: def' end test 'attached styled paragraph does not break on adjacent nested description list term' do input = <<~'EOS' term1:: def + [quote] more description not a term::: def EOS output = convert_string_to_embedded input assert_css '.dlist > dl > dt', output, 1 assert_css '.dlist > dl > dd', output, 1 assert_css '.dlist > dl > dd > .quoteblock', output, 1 assert_includes output, 'not a term::: def' end test 'appends line as paragraph if attached by continuation following blank line and line comment when term has no inline description' do input = <<~'EOS' == Lists term1:: // comment + para EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]/p[text()="para"]', output, 1 end test 'line attached by continuation offset by blank line is appended as paragraph if term has no inline description' do input = <<~'EOS' == Lists term1:: + para EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]/p[text()="para"]', output, 1 end test 'delimited block breaks list even when term has no inline description' do input = <<~'EOS' == Lists term1:: ==== detached ==== EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 0 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="exampleblock"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="exampleblock"]//p[text()="detached"]', output, 1 end test 'attribute line breaks list even when term has no inline description' do input = <<~'EOS' == Lists term1:: [verse] detached EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 0 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="verseblock"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="verseblock"]/pre[text()="detached"]', output, 1 end test 'id line breaks list even when term has no inline description' do input = <<~'EOS' == Lists term1:: [[id]] detached EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 0 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]/p[text()="detached"]', output, 1 end test 'block attribute lines above nested horizontal list does not break list' do input = <<~'EOS' Operating Systems:: [horizontal] Linux::: Fedora BSD::: OpenBSD Cloud Providers:: PaaS::: OpenShift IaaS::: AWS EOS output = convert_string_to_embedded input assert_xpath '//dl', output, 2 assert_xpath '/*[@class="dlist"]/dl', output, 1 assert_xpath '(//dl)[1]/dd', output, 2 assert_xpath '((//dl)[1]/dd)[1]//table', output, 1 assert_xpath '((//dl)[1]/dd)[2]//table', output, 0 end test 'block attribute lines above nested list with style does not break list' do input = <<~'EOS' TODO List:: * get groceries Grocery List:: [square] * bread * milk * lettuce EOS output = convert_string_to_embedded input assert_xpath '//dl', output, 1 assert_xpath '(//dl)[1]/dd', output, 2 assert_xpath '((//dl)[1]/dd)[2]//ul[@class="square"]', output, 1 end test 'multiple block attribute lines above nested list does not break list' do input = <<~'EOS' Operating Systems:: [[variants]] [horizontal] Linux::: Fedora BSD::: OpenBSD Cloud Providers:: PaaS::: OpenShift IaaS::: AWS EOS output = convert_string_to_embedded input assert_xpath '//dl', output, 2 assert_xpath '/*[@class="dlist"]/dl', output, 1 assert_xpath '(//dl)[1]/dd', output, 2 assert_xpath '(//dl)[1]/dd/*[@id="variants"]', output, 1 assert_xpath '((//dl)[1]/dd)[1]//table', output, 1 assert_xpath '((//dl)[1]/dd)[2]//table', output, 0 end test 'multiple block attribute lines separated by empty line above nested list does not break list' do input = <<~'EOS' Operating Systems:: [[variants]] [horizontal] Linux::: Fedora BSD::: OpenBSD Cloud Providers:: PaaS::: OpenShift IaaS::: AWS EOS output = convert_string_to_embedded input assert_xpath '//dl', output, 2 assert_xpath '/*[@class="dlist"]/dl', output, 1 assert_xpath '(//dl)[1]/dd', output, 2 assert_xpath '(//dl)[1]/dd/*[@id="variants"]', output, 1 assert_xpath '((//dl)[1]/dd)[1]//table', output, 1 assert_xpath '((//dl)[1]/dd)[2]//table', output, 0 end end context 'Item with text inline' do test 'folds text from inline description and subsequent line' do input = <<~'EOS' == Lists term1:: def1 continued EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="def1\ncontinued"]), output, 1 end test 'folds text from inline description and subsequent lines' do input = <<~'EOS' == Lists term1:: def1 continued continued EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="def1\ncontinued\ncontinued"]), output, 1 end test 'folds text from inline description and line following comment line' do input = <<~'EOS' == Lists term1:: def1 // comment continued EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="def1\ncontinued"]), output, 1 end test 'folds text from inline description and subsequent indented line' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == List term1:: def1 continued EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="def1\ncontinued"]), output, 1 end test 'appends literal line offset by blank line as block if item has inline description' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Lists term1:: def1 literal EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="literalblock"]//pre[text()="literal"]', output, 1 end test 'appends literal line offset by blank line as block and appends line after continuation as block if item has inline description' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Lists term1:: def1 literal + para EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="literalblock"]//pre[text()="literal"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 end test 'appends line after continuation as block and literal line offset by blank line as block if item has inline description' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Lists term1:: def1 + para literal EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]/following-sibling::*[@class="literalblock"]//pre[text()="literal"]', output, 1 end test 'appends list if item has inline description' do input = <<~'EOS' == Lists term1:: def1 * one * two * three EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="ulist"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="ulist"]/ul/li', output, 3 end test 'appends literal line attached by continuation as block if item has inline description followed by ruler' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Lists term1:: def1 + literal ''' EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="literalblock"]//pre[text()="literal"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::hr', output, 1 end test 'line offset by blank line breaks list if term has inline description' do input = <<~'EOS' == Lists term1:: def1 detached EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]/p[text()="detached"]', output, 1 end test 'nested term with description does not consume following heading' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS == Lists term:: def nestedterm;; nesteddef Detached ~~~~~~~~ EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 2 assert_xpath '//*[@class="dlist"]//dd', output, 2 assert_xpath '//*[@class="dlist"]/dl//dl', output, 1 assert_xpath '//*[@class="dlist"]/dl//dl/dt', output, 1 assert_xpath '((//*[@class="dlist"])[1]//dd)[1]/p[text()="def"]', output, 1 assert_xpath '((//*[@class="dlist"])[1]//dd)[1]/p/following-sibling::*[@class="dlist"]', output, 1 assert_xpath '((//*[@class="dlist"])[1]//dd)[1]/p/following-sibling::*[@class="dlist"]//dd/p[text()="nesteddef"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="sect2"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="sect2"]/h3[text()="Detached"]', output, 1 end test 'line attached by continuation is appended as paragraph if term has inline description followed by detached paragraph' do input = <<~'EOS' == Lists term1:: def1 + para detached EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]/p[text()="detached"]', output, 1 end test 'line attached by continuation is appended as paragraph if term has inline description followed by detached block' do input = <<~'EOS' == Lists term1:: def1 + para **** detached **** EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="sidebarblock"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="sidebarblock"]//p[text()="detached"]', output, 1 end test 'line attached by continuation offset by line comment is appended as paragraph if term has inline description' do input = <<~'EOS' == Lists term1:: def1 // comment + para EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 end test 'line attached by continuation offset by blank line is appended as paragraph if term has inline description' do input = <<~'EOS' == Lists term1:: def1 + para EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 end test 'line comment offset by blank line divides lists because item has text' do input = <<~'EOS' == Lists term1:: def1 // term2:: def2 EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 2 end test 'ruler offset by blank line divides lists because item has text' do input = <<~'EOS' == Lists term1:: def1 ''' term2:: def2 EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 2 end test 'block title offset by blank line divides lists and becomes title of second list because item has text' do input = <<~'EOS' == Lists term1:: def1 .title term2:: def2 EOS output = convert_string_to_embedded input assert_xpath '//*[@class="dlist"]/dl', output, 2 assert_xpath '(//*[@class="dlist"])[2]/*[@class="title"][text()="title"]', output, 1 end end end context 'Callout lists' do test 'does not recognize callout list denoted by markers that only have a trailing bracket' do input = <<~'EOS' ---- require 'asciidoctor' # <1> ---- 1> Not a callout list item EOS output = convert_string_to_embedded input assert_css '.colist', output, 0 end test 'should not hang if obsolete callout list is found inside list item' do input = <<~'EOS' * foo 1> bar EOS output = convert_string_to_embedded input assert_css '.colist', output, 0 end test 'should not hang if obsolete callout list is found inside dlist item' do input = <<~'EOS' foo:: 1> bar EOS output = convert_string_to_embedded input assert_css '.colist', output, 0 end test 'should recognize auto-numberd callout list inside list' do input = <<~'EOS' ---- require 'asciidoctor' # <1> ---- * foo <.> bar EOS output = convert_string_to_embedded input assert_css '.colist', output, 1 end test 'listing block with sequential callouts followed by adjacent callout list' do input = <<~'EOS' [source, ruby] ---- require 'asciidoctor' # <1> doc = Asciidoctor::Document.new('Hello, World!') # <2> puts doc.convert # <3> ---- <1> Describe the first line <2> Describe the second line <3> Describe the third line EOS output = convert_string input, attributes: { 'backend' => 'docbook' } assert_xpath '//programlisting', output, 1 assert_xpath '//programlisting//co', output, 3 assert_xpath '(//programlisting//co)[1][@xml:id="CO1-1"]', output, 1 assert_xpath '(//programlisting//co)[2][@xml:id="CO1-2"]', output, 1 assert_xpath '(//programlisting//co)[3][@xml:id="CO1-3"]', output, 1 assert_xpath '//programlisting/following-sibling::calloutlist/callout', output, 3 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[1][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[2][@arearefs = "CO1-2"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[3][@arearefs = "CO1-3"]', output, 1 end test 'listing block with sequential callouts followed by non-adjacent callout list' do input = <<~'EOS' [source, ruby] ---- require 'asciidoctor' # <1> doc = Asciidoctor::Document.new('Hello, World!') # <2> puts doc.convert # <3> ---- Paragraph. <1> Describe the first line <2> Describe the second line <3> Describe the third line EOS output = convert_string input, attributes: { 'backend' => 'docbook' } assert_xpath '//programlisting', output, 1 assert_xpath '//programlisting//co', output, 3 assert_xpath '(//programlisting//co)[1][@xml:id="CO1-1"]', output, 1 assert_xpath '(//programlisting//co)[2][@xml:id="CO1-2"]', output, 1 assert_xpath '(//programlisting//co)[3][@xml:id="CO1-3"]', output, 1 assert_xpath '//programlisting/following-sibling::*[1][self::simpara]', output, 1 assert_xpath '//programlisting/following-sibling::calloutlist/callout', output, 3 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[1][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[2][@arearefs = "CO1-2"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[3][@arearefs = "CO1-3"]', output, 1 end test 'listing block with a callout that refers to two different lines' do input = <<~'EOS' [source, ruby] ---- require 'asciidoctor' # <1> doc = Asciidoctor::Document.new('Hello, World!') # <2> puts doc.convert # <2> ---- <1> Import the library <2> Where the magic happens EOS output = convert_string input, attributes: { 'backend' => 'docbook' } assert_xpath '//programlisting', output, 1 assert_xpath '//programlisting//co', output, 3 assert_xpath '(//programlisting//co)[1][@xml:id="CO1-1"]', output, 1 assert_xpath '(//programlisting//co)[2][@xml:id="CO1-2"]', output, 1 assert_xpath '(//programlisting//co)[3][@xml:id="CO1-3"]', output, 1 assert_xpath '//programlisting/following-sibling::calloutlist/callout', output, 2 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[1][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[2][@arearefs = "CO1-2 CO1-3"]', output, 1 end test 'source block with non-sequential callouts followed by adjacent callout list' do input = <<~'EOS' [source,ruby] ---- require 'asciidoctor' # <2> doc = Asciidoctor::Document.new('Hello, World!') # <3> puts doc.convert # <1> ---- <1> Describe the first line <2> Describe the second line <3> Describe the third line EOS output = convert_string input, attributes: { 'backend' => 'docbook' } assert_xpath '//programlisting', output, 1 assert_xpath '//programlisting//co', output, 3 assert_xpath '(//programlisting//co)[1][@xml:id="CO1-1"]', output, 1 assert_xpath '(//programlisting//co)[2][@xml:id="CO1-2"]', output, 1 assert_xpath '(//programlisting//co)[3][@xml:id="CO1-3"]', output, 1 assert_xpath '//programlisting/following-sibling::calloutlist/callout', output, 3 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[1][@arearefs = "CO1-3"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[2][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[3][@arearefs = "CO1-2"]', output, 1 end test 'two listing blocks can share the same callout list' do input = <<~'EOS' .Import library [source, ruby] ---- require 'asciidoctor' # <1> ---- .Use library [source, ruby] ---- doc = Asciidoctor::Document.new('Hello, World!') # <2> puts doc.convert # <3> ---- <1> Describe the first line <2> Describe the second line <3> Describe the third line EOS output = convert_string input, attributes: { 'backend' => 'docbook' } assert_xpath '//programlisting', output, 2 assert_xpath '(//programlisting)[1]//co', output, 1 assert_xpath '(//programlisting)[1]//co[@xml:id="CO1-1"]', output, 1 assert_xpath '(//programlisting)[2]//co', output, 2 assert_xpath '((//programlisting)[2]//co)[1][@xml:id="CO1-2"]', output, 1 assert_xpath '((//programlisting)[2]//co)[2][@xml:id="CO1-3"]', output, 1 assert_xpath '(//calloutlist/callout)[1][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//calloutlist/callout)[2][@arearefs = "CO1-2"]', output, 1 assert_xpath '(//calloutlist/callout)[3][@arearefs = "CO1-3"]', output, 1 end test 'two listing blocks each followed by an adjacent callout list' do input = <<~'EOS' .Import library [source, ruby] ---- require 'asciidoctor' # <1> ---- <1> Describe the first line .Use library [source, ruby] ---- doc = Asciidoctor::Document.new('Hello, World!') # <1> puts doc.convert # <2> ---- <1> Describe the second line <2> Describe the third line EOS output = convert_string input, attributes: { 'backend' => 'docbook' } assert_xpath '//programlisting', output, 2 assert_xpath '(//programlisting)[1]//co', output, 1 assert_xpath '(//programlisting)[1]//co[@xml:id="CO1-1"]', output, 1 assert_xpath '(//programlisting)[2]//co', output, 2 assert_xpath '((//programlisting)[2]//co)[1][@xml:id="CO2-1"]', output, 1 assert_xpath '((//programlisting)[2]//co)[2][@xml:id="CO2-2"]', output, 1 assert_xpath '//calloutlist', output, 2 assert_xpath '(//calloutlist)[1]/callout', output, 1 assert_xpath '((//calloutlist)[1]/callout)[1][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//calloutlist)[2]/callout', output, 2 assert_xpath '((//calloutlist)[2]/callout)[1][@arearefs = "CO2-1"]', output, 1 assert_xpath '((//calloutlist)[2]/callout)[2][@arearefs = "CO2-2"]', output, 1 end test 'callout list retains block content' do input = <<~'EOS' [source, ruby] ---- require 'asciidoctor' # <1> doc = Asciidoctor::Document.new('Hello, World!') # <2> puts doc.convert # <3> ---- <1> Imports the library as a RubyGem <2> Creates a new document * Scans the lines for known blocks * Converts the lines into blocks <3> Renders the document + You can write this to file rather than printing to stdout. EOS output = convert_string_to_embedded input assert_xpath '//ol/li', output, 3 assert_xpath %((//ol/li)[1]/p[text()="Imports the library\nas a RubyGem"]), output, 1 assert_xpath %((//ol/li)[2]//ul), output, 1 assert_xpath %((//ol/li)[2]//ul/li), output, 2 assert_xpath %((//ol/li)[3]//p), output, 2 end test 'callout list retains block content when converted to DocBook' do input = <<~'EOS' [source, ruby] ---- require 'asciidoctor' # <1> doc = Asciidoctor::Document.new('Hello, World!') # <2> puts doc.convert # <3> ---- <1> Imports the library as a RubyGem <2> Creates a new document * Scans the lines for known blocks * Converts the lines into blocks <3> Renders the document + You can write this to file rather than printing to stdout. EOS output = convert_string input, attributes: { 'backend' => 'docbook' } assert_xpath '//calloutlist', output, 1 assert_xpath '//calloutlist/callout', output, 3 assert_xpath '(//calloutlist/callout)[1]/*', output, 1 assert_xpath '(//calloutlist/callout)[2]/para', output, 1 assert_xpath '(//calloutlist/callout)[2]/itemizedlist', output, 1 assert_xpath '(//calloutlist/callout)[3]/para', output, 1 assert_xpath '(//calloutlist/callout)[3]/simpara', output, 1 end test 'escaped callout should not be interpreted as a callout' do input = <<~'EOS' [source,text] ---- require 'asciidoctor' # \<1> Asciidoctor.convert 'convert me!' \<2> ---- EOS [{}, { 'source-highlighter' => 'coderay' }].each do |attributes| output = convert_string_to_embedded input, attributes: attributes assert_css 'pre b', output, 0 assert_includes output, ' # <1>' assert_includes output, ' <2>' end end test 'should autonumber <.> callouts' do input = <<~'EOS' [source, ruby] ---- require 'asciidoctor' # <.> doc = Asciidoctor::Document.new('Hello, World!') # <.> puts doc.convert # <.> ---- <.> Describe the first line <.> Describe the second line <.> Describe the third line EOS output = convert_string_to_embedded input pre_html = (xmlnodes_at_css 'pre', output)[0].inner_html assert_includes pre_html, '(1)' assert_includes pre_html, '(2)' assert_includes pre_html, '(3)' assert_css '.colist ol', output, 1 assert_css '.colist ol li', output, 3 end test 'should not recognize callouts in middle of line' do input = <<~'EOS' [source, ruby] ---- puts "The syntax <1> at the end of the line makes a code callout" ---- EOS output = convert_string_to_embedded input assert_xpath '//b', output, 0 end test 'should allow multiple callouts on the same line' do input = <<~'EOS' [source, ruby] ---- require 'asciidoctor' <1> doc = Asciidoctor.load('Hello, World!') # <2> <3> <4> puts doc.convert <5><6> exit 0 ---- <1> Require library <2> Load document from String <3> Uses default backend and doctype <4> One more for good luck <5> Renders document to String <6> Prints output to stdout EOS output = convert_string_to_embedded input assert_xpath '//code/b', output, 6 assert_match(/ \(1\)<\/b>$/, output) assert_match(/ \(2\)<\/b> \(3\)<\/b> \(4\)<\/b>$/, output) assert_match(/ \(5\)<\/b>\(6\)<\/b>$/, output) end test 'should allow XML comment-style callouts' do input = <<~'EOS' [source, xml] ----
    Section Title Just a paragraph
    ---- <1> The title is required <2> The content isn't EOS output = convert_string_to_embedded input assert_xpath '//b', output, 2 assert_xpath '//b[text()="(1)"]', output, 1 assert_xpath '//b[text()="(2)"]', output, 1 end test 'should not allow callouts with half an XML comment' do input = <<~'EOS' ---- First line <1--> Second line <2--> ---- EOS output = convert_string_to_embedded input assert_xpath '//b', output, 0 end test 'should not recognize callouts in an indented description list paragraph' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS foo:: bar <1> <1> Not pointing to a callout EOS using_memory_logger do |logger| output = convert_string_to_embedded input assert_xpath '//dl//b', output, 0 assert_xpath '//dl/dd/p[text()="bar <1>"]', output, 1 assert_xpath '//ol/li/p[text()="Not pointing to a callout"]', output, 1 assert_message logger, :WARN, ': line 4: no callout found for <1>', Hash end end test 'should not recognize callouts in an indented outline list paragraph' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS * foo bar <1> <1> Not pointing to a callout EOS using_memory_logger do |logger| output = convert_string_to_embedded input assert_xpath '//ul//b', output, 0 assert_xpath %(//ul/li/p[text()="foo\nbar <1>"]), output, 1 assert_xpath '//ol/li/p[text()="Not pointing to a callout"]', output, 1 assert_message logger, :WARN, ': line 4: no callout found for <1>', Hash end end test 'should warn if numbers in callout list are out of sequence' do input = <<~'EOS' ---- <1> ---- <1> Container of beans. Beans are fun. <3> An actual bean. EOS using_memory_logger do |logger| output = convert_string_to_embedded input assert_xpath '//ol/li', output, 2 assert_messages logger, [ [:WARN, ': line 8: callout list item index: expected 2, got 3', Hash], [:WARN, ': line 8: no callout found for <2>', Hash] ] end end test 'should preserve line comment chars that precede callout number if icons is not set' do input = <<~'EOS' [source,ruby] ---- puts 'Hello, world!' # <1> ---- <1> Ruby [source,groovy] ---- println 'Hello, world!' // <1> ---- <1> Groovy [source,clojure] ---- (def hello (fn [] "Hello, world!")) ;; <1> (hello) ---- <1> Clojure [source,haskell] ---- main = putStrLn "Hello, World!" -- <1> ---- <1> Haskell EOS [{}, { 'source-highlighter' => 'coderay' }].each do |attributes| output = convert_string_to_embedded input, attributes: attributes assert_xpath '//b', output, 4 nodes = xmlnodes_at_css 'pre', output assert_equal %(puts 'Hello, world!' # (1)), nodes[0].text assert_equal %(println 'Hello, world!' // (1)), nodes[1].text assert_equal %((def hello (fn [] "Hello, world!")) ;; (1)\n(hello)), nodes[2].text assert_equal %(main = putStrLn "Hello, World!" -- (1)), nodes[3].text end end test 'should remove line comment chars that precede callout number if icons is font' do input = <<~'EOS' [source,ruby] ---- puts 'Hello, world!' # <1> ---- <1> Ruby [source,groovy] ---- println 'Hello, world!' // <1> ---- <1> Groovy [source,clojure] ---- (def hello (fn [] "Hello, world!")) ;; <1> (hello) ---- <1> Clojure [source,haskell] ---- main = putStrLn "Hello, World!" -- <1> ---- <1> Haskell EOS [{}, { 'source-highlighter' => 'coderay' }].each do |attributes| output = convert_string_to_embedded input, attributes: attributes.merge({ 'icons' => 'font' }) assert_css 'pre b', output, 4 assert_css 'pre i.conum', output, 4 nodes = xmlnodes_at_css 'pre', output assert_equal %(puts 'Hello, world!' (1)), nodes[0].text assert_equal %(println 'Hello, world!' (1)), nodes[1].text assert_equal %((def hello (fn [] "Hello, world!")) (1)\n(hello)), nodes[2].text assert_equal %(main = putStrLn "Hello, World!" (1)), nodes[3].text end end test 'should allow line comment chars that precede callout number to be specified' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS [source,erlang,line-comment=%] ---- hello_world() -> % <1> io:fwrite("hello, world~n"). %<2> ---- <1> Erlang function clause head. <2> ~n adds a new line to the output. EOS output = convert_string_to_embedded input assert_xpath '//b', output, 2 nodes = xmlnodes_at_css 'pre', output assert_equal %(hello_world() -> % (1)\n io:fwrite("hello, world~n"). %(2)), nodes[0].text end test 'should allow line comment chars preceding callout number to be configurable when source-highlighter is coderay' do input = <<~'EOS' [source,html,line-comment=-#] ---- -# <1> %p Hello ---- <1> Prints a paragraph with the text "Hello" EOS output = convert_string_to_embedded input, attributes: { 'source-highlighter' => 'coderay' } assert_xpath '//b', output, 1 nodes = xmlnodes_at_css 'pre', output assert_equal %(-# (1)\n%p Hello), nodes[0].text end test 'should not eat whitespace before callout number if line-comment attribute is empty' do input = <<~'EOS' [source,asciidoc,line-comment=] ---- -- <1> ---- <1> The start of an open block. EOS output = convert_string_to_embedded input, attributes: { 'icons' => 'font' } assert_includes output, '-- Violets are blue <2> .... <1> And so is Ruby <2> But violet is more like purple EOS output = convert_string input, attributes: { 'backend' => 'docbook' } assert_xpath '//literallayout', output, 1 assert_xpath '//literallayout//co', output, 2 assert_xpath '(//literallayout//co)[1][@xml:id="CO1-1"]', output, 1 assert_xpath '(//literallayout//co)[2][@xml:id="CO1-2"]', output, 1 assert_xpath '//literallayout/following-sibling::*[1][self::calloutlist]/callout', output, 2 assert_xpath '(//literallayout/following-sibling::*[1][self::calloutlist]/callout)[1][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//literallayout/following-sibling::*[1][self::calloutlist]/callout)[2][@arearefs = "CO1-2"]', output, 1 end test 'callout list with icons enabled' do input = <<~'EOS' [source, ruby] ---- require 'asciidoctor' # <1> doc = Asciidoctor::Document.new('Hello, World!') # <2> puts doc.convert # <3> ---- <1> Describe the first line <2> Describe the second line <3> Describe the third line EOS output = convert_string_to_embedded input, attributes: { 'icons' => '' } assert_css '.listingblock code > img', output, 3 (1..3).each do |i| assert_xpath %((/div[@class="listingblock"]//code/img)[#{i}][@src="./images/icons/callouts/#{i}.png"][@alt="#{i}"]), output, 1 end assert_css '.colist table td img', output, 3 (1..3).each do |i| assert_xpath %((/div[@class="colist arabic"]//td/img)[#{i}][@src="./images/icons/callouts/#{i}.png"][@alt="#{i}"]), output, 1 end end test 'callout list with font-based icons enabled' do input = <<~'EOS' [source] ---- require 'asciidoctor' # <1> doc = Asciidoctor::Document.new('Hello, World!') #<2> puts doc.convert #<3> ---- <1> Describe the first line <2> Describe the second line <3> Describe the third line EOS output = convert_string_to_embedded input, attributes: { 'icons' => 'font' } assert_css '.listingblock code > i', output, 3 (1..3).each do |i| assert_xpath %((/div[@class="listingblock"]//code/i)[#{i}]), output, 1 assert_xpath %((/div[@class="listingblock"]//code/i)[#{i}][@class="conum"][@data-value="#{i}"]), output, 1 assert_xpath %((/div[@class="listingblock"]//code/i)[#{i}]/following-sibling::b[text()="(#{i})"]), output, 1 end assert_css '.colist table td i', output, 3 (1..3).each do |i| assert_xpath %((/div[@class="colist arabic"]//td/i)[#{i}]), output, 1 assert_xpath %((/div[@class="colist arabic"]//td/i)[#{i}][@class="conum"][@data-value = "#{i}"]), output, 1 assert_xpath %((/div[@class="colist arabic"]//td/i)[#{i}]/following-sibling::b[text() = "#{i}"]), output, 1 end end test 'should match trailing line separator in text of list item' do input = <<~EOS.chop ---- A <1> B <2> C <3> ---- <1> a <2> b#{decode_char 8232} <3> c EOS output = convert_string input assert_css 'li', output, 3 assert_xpath %((//li)[2]/p[text()="b#{decode_char 8232}"]), output, 1 end test 'should match line separator in text of list item' do input = <<~EOS.chop ---- A <1> B <2> C <3> ---- <1> a <2> b#{decode_char 8232}b <3> c EOS output = convert_string input assert_css 'li', output, 3 assert_xpath %((//li)[2]/p[text()="b#{decode_char 8232}b"]), output, 1 end end context 'Checklists' do test 'should create checklist if at least one item has checkbox syntax' do input = <<~'EOS' - [ ] todo - [x] done - [ ] another todo - [*] another done - plain EOS doc = document_from_string input checklist = doc.blocks[0] assert checklist.option?('checklist') assert checklist.items[0].attr?('checkbox') refute checklist.items[0].attr?('checked') assert checklist.items[1].attr?('checkbox') assert checklist.items[1].attr?('checked') refute checklist.items[4].attr?('checkbox') output = doc.convert standalone: false assert_css '.ulist.checklist', output, 1 assert_xpath %((/*[@class="ulist checklist"]/ul/li)[1]/p[text()="#{decode_char 10063} todo"]), output, 1 assert_xpath %((/*[@class="ulist checklist"]/ul/li)[2]/p[text()="#{decode_char 10003} done"]), output, 1 assert_xpath %((/*[@class="ulist checklist"]/ul/li)[3]/p[text()="#{decode_char 10063} another todo"]), output, 1 assert_xpath %((/*[@class="ulist checklist"]/ul/li)[4]/p[text()="#{decode_char 10003} another done"]), output, 1 assert_xpath '(/*[@class="ulist checklist"]/ul/li)[5]/p[text()="plain"]', output, 1 end test 'entry is not a checklist item if the closing bracket is not immediately followed by the space character' do input = <<~EOS - [ ] todo - [x] \t done - [ ]\t another todo - [x]\t another done EOS doc = document_from_string input checklist = doc.blocks[0] assert checklist.option?('checklist') assert checklist.items[0].attr?('checkbox') refute checklist.items[0].attr?('checked') assert checklist.items[1].attr?('checkbox') assert checklist.items[1].attr?('checked') refute checklist.items[2].attr?('checkbox') refute checklist.items[3].attr?('checkbox') end test 'should create checklist with font icons if at least one item has checkbox syntax and icons attribute is font' do input = <<~'EOS' - [ ] todo - [x] done - plain EOS output = convert_string_to_embedded input, attributes: { 'icons' => 'font' } assert_css '.ulist.checklist', output, 1 assert_css '.ulist.checklist li i.fa-check-square-o', output, 1 assert_css '.ulist.checklist li i.fa-square-o', output, 1 assert_xpath '(/*[@class="ulist checklist"]/ul/li)[3]/p[text()="plain"]', output, 1 end test 'should create interactive checklist if interactive option is set even with icons attribute is font' do input = <<~'EOS' :icons: font [%interactive] - [ ] todo - [x] done EOS doc = document_from_string input checklist = doc.blocks[0] assert checklist.option?('checklist') assert checklist.option?('interactive') output = doc.convert standalone: false assert_css '.ulist.checklist', output, 1 assert_css '.ulist.checklist li input[type="checkbox"]', output, 2 assert_css '.ulist.checklist li input[type="checkbox"][disabled]', output, 0 assert_css '.ulist.checklist li input[type="checkbox"][checked]', output, 1 end test 'should not create checklist if checkbox on item is followed by a tab' do ['[ ]', '[x]', '[*]'].each do |checkbox| input = <<~EOS - #{checkbox}\ttodo EOS doc = document_from_string input list = doc.blocks[0] assert_equal :ulist, list.context refute list.option?('checklist') end end end context 'Lists model' do test 'content should return items in list' do input = <<~'EOS' * one * two * three EOS doc = document_from_string input list = doc.blocks.first assert_kind_of Asciidoctor::List, list items = list.items assert_equal 3, items.size assert_equal list.items, list.content end test 'list item should be the parent of block attached to a list item' do input = <<~'EOS' * list item 1 + ---- listing block in list item 1 ---- EOS doc = document_from_string input list = doc.blocks.first list_item_1 = list.items.first listing_block = list_item_1.blocks.first assert_equal :listing, listing_block.context assert_equal list_item_1, listing_block.parent end test 'outline? should return true for unordered list' do input = <<~'EOS' * one * two * three EOS doc = document_from_string input list = doc.blocks.first assert list.outline? end test 'outline? should return true for ordered list' do input = <<~'EOS' . one . two . three EOS doc = document_from_string input list = doc.blocks.first assert list.outline? end test 'outline? should return false for description list' do input = 'label:: desc' doc = document_from_string input list = doc.blocks.first refute list.outline? end test 'simple? should return true for list item with no nested blocks' do input = <<~'EOS' * one * two * three EOS doc = document_from_string input list = doc.blocks.first assert list.items.first.simple? refute list.items.first.compound? end test 'simple? should return true for list item with nested outline list' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS * one ** more about one ** and more * two * three EOS doc = document_from_string input list = doc.blocks.first assert list.items.first.simple? refute list.items.first.compound? end test 'simple? should return false for list item with block content' do input = <<~'EOS' * one + ---- listing block in list item 1 ---- * two * three EOS doc = document_from_string input list = doc.blocks.first refute list.items.first.simple? assert list.items.first.compound? end test 'should allow text of ListItem to be assigned' do input = <<~'EOS' * one * two * three EOS doc = document_from_string input list = (doc.find_by context: :ulist).first assert_equal 3, list.items.size assert_equal 'one', list.items[0].text list.items[0].text = 'un' assert_equal 'un', list.items[0].text end test 'id and role assigned to ulist item in model are transmitted to output' do input = <<~'EOS' * one * two * three EOS doc = document_from_string input item_0 = doc.blocks[0].items[0] item_0.id = 'one' item_0.add_role 'item' output = doc.convert assert_css 'li#one.item', output, 1 end test 'id and role assigned to olist item in model are transmitted to output' do input = <<~'EOS' . one . two . three EOS doc = document_from_string input item_0 = doc.blocks[0].items[0] item_0.id = 'one' item_0.add_role 'item' output = doc.convert assert_css 'li#one.item', output, 1 end test 'should allow API control over substitutions applied to ListItem text' do input = <<~'EOS' * *one* * _two_ * `three` * #four# EOS doc = document_from_string input list = (doc.find_by context: :ulist).first assert_equal 4, list.items.size list.items[0].remove_sub :quotes assert_equal '*one*', list.items[0].text refute_includes list.items[0].subs, :quotes list.items[1].subs.clear assert_empty list.items[1].subs assert_equal '_two_', list.items[1].text list.items[2].subs.replace [:specialcharacters] assert_equal [:specialcharacters], list.items[2].subs assert_equal '`three`', list.items[2].text assert_equal 'four', list.items[3].text end test 'should set lineno to line number in source where list starts' do input = <<~'EOS' * bullet 1 ** bullet 1.1 *** bullet 1.1.1 * bullet 2 EOS doc = document_from_string input, sourcemap: true lists = doc.find_by context: :ulist assert_equal 1, lists[0].lineno assert_equal 2, lists[1].lineno assert_equal 3, lists[2].lineno list_items = doc.find_by context: :list_item assert_equal 1, list_items[0].lineno assert_equal 2, list_items[1].lineno assert_equal 3, list_items[2].lineno assert_equal 4, list_items[3].lineno end end asciidoctor-2.0.20/test/logger_test.rb000066400000000000000000000170011443135032600177200ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context 'Logger' do MyLogger = Class.new Logger context 'LoggerManager' do test 'provides access to logger via static logger method' do logger = Asciidoctor::LoggerManager.logger refute_nil logger assert_kind_of Logger, logger end test 'allows logger instance to be changed' do old_logger = Asciidoctor::LoggerManager.logger new_logger = MyLogger.new $stdout begin Asciidoctor::LoggerManager.logger = new_logger assert_same new_logger, Asciidoctor::LoggerManager.logger ensure Asciidoctor::LoggerManager.logger = old_logger end end test 'setting logger instance to falsy value resets instance to default logger' do old_logger = Asciidoctor::LoggerManager.logger begin Asciidoctor::LoggerManager.logger = MyLogger.new $stdout Asciidoctor::LoggerManager.logger = nil refute_nil Asciidoctor::LoggerManager.logger assert_kind_of Logger, Asciidoctor::LoggerManager.logger ensure Asciidoctor::LoggerManager.logger = old_logger end end test 'creates logger instance from static logger_class property' do old_logger_class = Asciidoctor::LoggerManager.logger_class old_logger = Asciidoctor::LoggerManager.logger begin Asciidoctor::LoggerManager.logger_class = MyLogger Asciidoctor::LoggerManager.logger = nil refute_nil Asciidoctor::LoggerManager.logger assert_kind_of MyLogger, Asciidoctor::LoggerManager.logger ensure Asciidoctor::LoggerManager.logger_class = old_logger_class Asciidoctor::LoggerManager.logger = old_logger end end end context 'Logger' do test 'configures default logger with progname set to asciidoctor' do assert_equal 'asciidoctor', Asciidoctor::LoggerManager.logger.progname end test 'configures default logger with level set to WARN' do assert_equal Logger::Severity::WARN, Asciidoctor::LoggerManager.logger.level end test 'configures default logger to write messages to $stderr' do out_string, err_string = redirect_streams do |out, err| Asciidoctor::LoggerManager.logger.warn 'this is a call' [out.string, err.string] end assert_empty out_string refute_empty err_string assert_includes err_string, 'this is a call' end test 'configures default logger to use a formatter that matches traditional format' do err_string = redirect_streams do |_, err| Asciidoctor::LoggerManager.logger.warn 'this is a call' Asciidoctor::LoggerManager.logger.fatal 'it cannot be done' err.string end assert_includes err_string, %(asciidoctor: WARNING: this is a call) assert_includes err_string, %(asciidoctor: FAILED: it cannot be done) end test 'NullLogger level is not nil' do logger = Asciidoctor::NullLogger.new refute_nil logger.level assert_equal Logger::WARN, logger.level end end context ':logger API option' do test 'should be able to set logger when invoking load API' do old_logger = Asciidoctor::LoggerManager.logger new_logger = MyLogger.new $stdout begin Asciidoctor.load 'contents', logger: new_logger assert_same new_logger, Asciidoctor::LoggerManager.logger ensure Asciidoctor::LoggerManager.logger = old_logger end end test 'should be able to set logger when invoking load_file API' do old_logger = Asciidoctor::LoggerManager.logger new_logger = MyLogger.new $stdout begin Asciidoctor.load_file fixture_path('basic.adoc'), logger: new_logger assert_same new_logger, Asciidoctor::LoggerManager.logger ensure Asciidoctor::LoggerManager.logger = old_logger end end test 'should be able to set logger when invoking convert API' do old_logger = Asciidoctor::LoggerManager.logger new_logger = MyLogger.new $stdout begin Asciidoctor.convert 'contents', logger: new_logger assert_same new_logger, Asciidoctor::LoggerManager.logger ensure Asciidoctor::LoggerManager.logger = old_logger end end test 'should be able to set logger when invoking convert_file API' do old_logger = Asciidoctor::LoggerManager.logger new_logger = MyLogger.new $stdout begin Asciidoctor.convert_file fixture_path('basic.adoc'), to_file: false, logger: new_logger assert_same new_logger, Asciidoctor::LoggerManager.logger ensure Asciidoctor::LoggerManager.logger = old_logger end end test 'should be able to set logger to NullLogger by setting :logger option to a falsy value' do [nil, false].each do |falsy_val| old_logger = Asciidoctor::LoggerManager.logger begin Asciidoctor.load 'contents', logger: falsy_val assert_kind_of Asciidoctor::NullLogger, Asciidoctor::LoggerManager.logger ensure Asciidoctor::LoggerManager.logger = old_logger end end end end context 'Logging' do test 'including Logging gives instance methods on module access to logging infrastructure' do module SampleModuleA include Asciidoctor::Logging def get_logger logger end end class SampleClassA include SampleModuleA end assert_same Asciidoctor::LoggerManager.logger, SampleClassA.new.get_logger assert SampleClassA.public_method_defined? :logger end test 'including Logging gives static methods on module access to logging infrastructure' do module SampleModuleB include Asciidoctor::Logging def self.get_logger logger end end assert_same Asciidoctor::LoggerManager.logger, SampleModuleB.get_logger end test 'including Logging gives instance methods on class access to logging infrastructure' do class SampleClassC include Asciidoctor::Logging def get_logger logger end end assert_same Asciidoctor::LoggerManager.logger, SampleClassC.new.get_logger assert SampleClassC.public_method_defined? :logger end test 'including Logging gives static methods on class access to logging infrastructure' do class SampleClassD include Asciidoctor::Logging def self.get_logger logger end end assert_same Asciidoctor::LoggerManager.logger, SampleClassD.get_logger end test 'can create an auto-formatting message with context' do class SampleClassE include Asciidoctor::Logging def create_message cursor message_with_context 'Asciidoctor was here', source_location: cursor end end cursor = Asciidoctor::Reader::Cursor.new 'file.adoc', fixturedir, 'file.adoc', 5 message = SampleClassE.new.create_message cursor assert_equal 'Asciidoctor was here', message[:text] assert_same cursor, message[:source_location] assert_equal 'file.adoc: line 5: Asciidoctor was here', message.inspect end test 'writes message prefixed with program name and source location to stderr' do input = <<~'EOS' [#first] first paragraph [#first] another first paragraph EOS messages = redirect_streams do |_, err| convert_string_to_embedded input err.string.chomp end assert_equal 'asciidoctor: WARNING: : line 5: id assigned to block already in use: first', messages end end end asciidoctor-2.0.20/test/manpage_test.rb000066400000000000000000001072111443135032600200540ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context 'Manpage' do SAMPLE_MANPAGE_HEADER = <<~'EOS'.chop = command (1) Author Name :doctype: manpage :man manual: Command Manual :man source: Command 1.2.3 == NAME command - does stuff == SYNOPSIS *command* [_OPTION_]... _FILE_... == DESCRIPTION EOS context 'Configuration' do test 'should set proper manpage-related attributes' do input = SAMPLE_MANPAGE_HEADER doc = Asciidoctor.load input, backend: :manpage assert_equal 'man', doc.attributes['filetype'] assert_equal '', doc.attributes['filetype-man'] assert_equal '1', doc.attributes['manvolnum'] assert_equal '.1', doc.attributes['outfilesuffix'] assert_equal 'command', doc.attributes['manname'] assert_equal 'command', doc.attributes['mantitle'] assert_equal 'does stuff', doc.attributes['manpurpose'] assert_equal 'command', doc.attributes['docname'] end test 'should not escape hyphen when printing manname in NAME section' do input = SAMPLE_MANPAGE_HEADER.sub(/^command - /, 'git-describe - ') output = Asciidoctor.convert input, backend: :manpage, standalone: true assert_includes output, %(\n.SH "NAME"\ngit-describe \\- does stuff\n) end test 'should output multiple mannames in NAME section' do input = SAMPLE_MANPAGE_HEADER.sub(/^command - /, 'command, alt_command - ') output = Asciidoctor.convert input, backend: :manpage, standalone: true assert_includes output.lines, %(command, alt_command \\- does stuff\n) end test 'should replace invalid characters in mantitle in info comment' do input = <<~'EOS' = foo\-- (1) Author Name :doctype: manpage :man manual: Foo Bar Manual :man source: Foo Bar 1.0 == NAME foo-bar - puts the foo in your bar EOS doc = Asciidoctor.load input, backend: :manpage, standalone: true output = doc.convert assert_includes output, %(Title: foo--bar\n) end test 'should substitute attributes in manname and manpurpose in NAME section' do input = <<~'EOS' = {cmdname} (1) Author Name :doctype: manpage :man manual: Foo Bar Manual :man source: Foo Bar 1.0 == NAME {cmdname} - {cmdname} puts the foo in your bar EOS doc = Asciidoctor.load input, backend: :manpage, standalone: true, attributes: { 'cmdname' => 'foobar' } assert_equal 'foobar', (doc.attr 'manname') assert_equal ['foobar'], (doc.attr 'mannames') assert_equal 'foobar puts the foo in your bar', (doc.attr 'manpurpose') assert_equal 'foobar', (doc.attr 'docname') end test 'should not parse NAME section if manname and manpurpose attributes are set' do input = <<~'EOS' = foobar (1) Author Name :doctype: manpage :man manual: Foo Bar Manual :man source: Foo Bar 1.0 == SYNOPSIS *foobar* [_OPTIONS_]... == DESCRIPTION When you need to put some foo on the bar. EOS attrs = { 'manname' => 'foobar', 'manpurpose' => 'puts some foo on the bar' } doc = Asciidoctor.load input, backend: :manpage, standalone: true, attributes: attrs assert_equal 'foobar', (doc.attr 'manname') assert_equal ['foobar'], (doc.attr 'mannames') assert_equal 'puts some foo on the bar', (doc.attr 'manpurpose') assert_equal 'SYNOPSIS', doc.sections[0].title end test 'should normalize whitespace and skip line comments before and inside NAME section' do input = <<~'EOS' = foobar (1) Author Name :doctype: manpage :man manual: Foo Bar Manual :man source: Foo Bar 1.0 // this is the name section == NAME // it follows the form `name - description` foobar - puts some foo on the bar // a little bit of this, a little bit of that == SYNOPSIS *foobar* [_OPTIONS_]... == DESCRIPTION When you need to put some foo on the bar. EOS doc = Asciidoctor.load input, backend: :manpage, standalone: true assert_equal 'puts some foo on the bar', (doc.attr 'manpurpose') end test 'should parse malformed document with warnings' do input = 'garbage in' using_memory_logger do |logger| doc = Asciidoctor.load input, backend: :manpage, standalone: true, attributes: { 'docname' => 'cmd' } assert_equal 'cmd', doc.attr('manname') assert_equal ['cmd'], doc.attr('mannames') assert_equal '.1', doc.attr('outfilesuffix') output = doc.convert refute_empty logger.messages assert_includes output, %(Title: cmd\n) assert output.end_with?('garbage in') end end test 'should warn if document title is non-conforming' do input = <<~'EOS' = command == Name command - does stuff EOS using_memory_logger do |logger| document_from_string input, backend: :manpage assert_message logger, :ERROR, ': line 1: non-conforming manpage title', Hash end end test 'should warn if first section is not name section' do input = <<~'EOS' = command(1) == Synopsis Does stuff. EOS using_memory_logger do |logger| doc = document_from_string input, backend: :manpage assert_message logger, :ERROR, ': line 3: non-conforming name section body', Hash refute_nil doc.sections[0] assert_equal 'Synopsis', doc.sections[0].title end end test 'should break circular reference in section title' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} [#a] == A <> [#b] == B <> EOS output = Asciidoctor.convert input, backend: :manpage assert_match %r/^\.SH "A B \[A\]"$/, output assert_match %r/^\.SH "B \[A\]"$/, output end test 'should define default linkstyle' do input = SAMPLE_MANPAGE_HEADER output = Asciidoctor.convert input, backend: :manpage, standalone: true assert_includes output.lines, %(. LINKSTYLE blue R < >\n) end test 'should use linkstyle defined by man-linkstyle attribute' do input = SAMPLE_MANPAGE_HEADER output = Asciidoctor.convert input, backend: :manpage, standalone: true, attributes: { 'man-linkstyle' => 'cyan B \[fo] \[fc]' } assert_includes output.lines, %(. LINKSTYLE cyan B \\[fo] \\[fc]\n) end test 'should require specialchars in value of man-linkstyle attribute defined in document to be escaped' do input = <<~EOS.chop :man-linkstyle: cyan R < > #{SAMPLE_MANPAGE_HEADER} EOS output = Asciidoctor.convert input, backend: :manpage, standalone: true assert_includes output.lines, %(. LINKSTYLE cyan R < >\n) input = <<~EOS.chop :man-linkstyle: pass:[cyan R < >] #{SAMPLE_MANPAGE_HEADER} EOS output = Asciidoctor.convert input, backend: :manpage, standalone: true assert_includes output.lines, %(. LINKSTYLE cyan R < >\n) end end context 'Manify' do test 'should unescape literal ampersand' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} (C) & (R) are translated to character references, but not the &. EOS output = Asciidoctor.convert input, backend: :manpage assert_equal '\\(co & \\(rg are translated to character references, but not the &.', output.lines.last.chomp end test 'should replace numeric character reference for plus' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} A {plus} B EOS output = Asciidoctor.convert input, backend: :manpage assert_equal 'A + B', output.lines.last.chomp end test 'should replace numeric character reference for degree sign' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} 0{deg} is freezing EOS output = Asciidoctor.convert input, backend: :manpage assert_equal '0\(de is freezing', output.lines.last.chomp end test 'should replace em dashes' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} go -- to go--to EOS output = Asciidoctor.convert input, backend: :manpage assert_includes output, 'go \\(em to' assert_includes output, 'go\\(emto' end test 'should replace quotes' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} 'command' EOS output = Asciidoctor.convert input, backend: :manpage assert_includes output, '\*(Aqcommand\*(Aq' end test 'should escape lone period' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} . EOS output = Asciidoctor.convert input, backend: :manpage assert_equal '\&.', output.lines.last.chomp end test 'should escape raw macro' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} AAA this line of text should be show .if 1 .nx BBB this line and the one above it should be visible EOS output = Asciidoctor.convert input, backend: :manpage assert_equal '\&.if 1 .nx', output.lines[-2].chomp end test 'should escape ellipsis at start of line' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} -x:: Ao gravar o commit, acrescente uma linha que diz "(cherry picked from commit ...)" à mensagem de commit original para indicar qual commit esta mudança foi escolhida. Isso é feito apenas para picaretas de cereja sem conflitos. EOS output = Asciidoctor.convert input, backend: :manpage assert_equal '\&...', output.lines[-3][0..4].chomp end test 'should not escape ellipsis in the middle of a line' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} -x:: Ao gravar o commit, acrescente uma linha que diz "(cherry picked from commit...)" à mensagem de commit original para indicar qual commit esta mudança foi escolhida. Isso é feito apenas para picaretas de cereja sem conflitos. EOS output = Asciidoctor.convert input, backend: :manpage assert(output.lines[-5].include? 'commit...') end test 'should normalize whitespace in a paragraph' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} Oh, here it goes again I should have known, should have known, should have known again EOS output = Asciidoctor.convert input, backend: :manpage assert_includes output, %(Oh, here it goes again\nI should have known,\nshould have known,\nshould have known again) end test 'should normalize whitespace in a list item' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} * Oh, here it goes again I should have known, should have known, should have known again EOS output = Asciidoctor.convert input, backend: :manpage assert_includes output, %(Oh, here it goes again\nI should have known,\nshould have known,\nshould have known again) end test 'should honor start attribute on ordered list' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} [start=5] . five . six EOS output = Asciidoctor.convert input, backend: :manpage assert_match %r/IP " 5\.".*five/m, output assert_match %r/IP " 6\.".*six/m, output end test 'should collapse whitespace in the man manual and man source' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} Describe this thing. EOS output = Asciidoctor.convert input, backend: :manpage, standalone: true, attributes: { 'manmanual' => %(General\nCommands\nManual), 'mansource' => %(Control\nAll\nThe\nThings\n5.0), } assert_includes output, 'Manual: General Commands Manual' assert_includes output, 'Source: Control All The Things 5.0' assert_includes output, '"Control All The Things 5.0" "General Commands Manual"' end test 'should uppercase section titles without mangling formatting macros' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} does stuff == "`Main`" __ EOS output = Asciidoctor.convert input, backend: :manpage assert_includes output, '.SH "\(lqMAIN\(rq \fI\fP"' end test 'should not uppercase monospace span in section titles' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} does stuff == `show` option EOS output = Asciidoctor.convert input, backend: :manpage assert_includes output, '.SH "\f(CRshow\fP OPTION"' end end context 'Backslash' do test 'should not escape spaces for empty manual or source fields' do input = SAMPLE_MANPAGE_HEADER.lines.reject {|l| l.start_with? ':man ' } output = Asciidoctor.convert input, backend: :manpage, standalone: true assert_match ' Manual: \ \&', output assert_match ' Source: \ \&', output assert_match(/^\.TH "COMMAND" .* "\\ \\&" "\\ \\&"$/, output) end test 'should preserve backslashes in escape sequences' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} "`hello`" '`goodbye`' *strong* _weak_ `even` EOS output = Asciidoctor.convert input, backend: :manpage assert_equal '\(lqhello\(rq \(oqgoodbye\(cq \fBstrong\fP \fIweak\fP \f(CReven\fP', output.lines.last.chomp end test 'should preserve literal backslashes in content' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} \\.foo \\ bar \\\\ baz\\ more EOS output = Asciidoctor.convert input, backend: :manpage assert_equal '\(rs.foo \(rs bar \(rs\(rs baz\(rs', output.lines[-2].chomp end test 'should escape literal escape sequence' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} \\fB makes text bold EOS output = Asciidoctor.convert input, backend: :manpage assert_match '\(rsfB makes text bold', output end test 'should preserve inline breaks' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} Before break. + After break. EOS expected = <<~'EOS'.chop Before break. .br After break. EOS output = Asciidoctor.convert input, backend: :manpage assert_equal expected, output.lines[-3..-1].join end end context 'URL macro' do test 'should not leave blank line before URL macro' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} First paragraph. http://asciidoc.org[AsciiDoc] EOS expected = <<~'EOS'.chop .sp First paragraph. .sp .URL "http://asciidoc.org" "AsciiDoc" "" EOS output = Asciidoctor.convert input, backend: :manpage assert_equal expected, output.lines[-4..-1].join end test 'should not swallow content following URL' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} http://asciidoc.org[AsciiDoc] can be used to create man pages. EOS expected = <<~'EOS'.chop .URL "http://asciidoc.org" "AsciiDoc" "" can be used to create man pages. EOS output = Asciidoctor.convert input, backend: :manpage assert_equal expected, output.lines[-2..-1].join end test 'should pass adjacent character as final argument of URL macro' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} This is http://asciidoc.org[AsciiDoc]. EOS expected = <<~'EOS'.chop This is \c .URL "http://asciidoc.org" "AsciiDoc" "." EOS output = Asciidoctor.convert input, backend: :manpage assert_equal expected, output.lines[-2..-1].join end test 'should pass adjacent character as final argument of URL macro and move trailing content to next line' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} This is http://asciidoc.org[AsciiDoc], which can be used to write content. EOS expected = <<~'EOS'.chop This is \c .URL "http://asciidoc.org" "AsciiDoc" "," which can be used to write content. EOS output = Asciidoctor.convert input, backend: :manpage assert_equal expected, output.lines[-3..-1].join end test 'should not leave blank lines between URLs on contiguous lines of input' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} The corresponding implementations are http://clisp.sf.net[CLISP], http://ccl.clozure.com[Clozure CL], http://cmucl.org[CMUCL], http://ecls.sf.net[ECL], and http://sbcl.sf.net[SBCL]. EOS expected = <<~'EOS'.chop .sp The corresponding implementations are .URL "http://clisp.sf.net" "CLISP" "," .URL "http://ccl.clozure.com" "Clozure CL" "," .URL "http://cmucl.org" "CMUCL" "," .URL "http://ecls.sf.net" "ECL" "," and \c .URL "http://sbcl.sf.net" "SBCL" "." EOS output = Asciidoctor.convert input, backend: :manpage assert_equal expected, output.lines[-8..-1].join end test 'should not leave blank lines between URLs on same line of input' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} The corresponding implementations are http://clisp.sf.net[CLISP], http://ccl.clozure.com[Clozure CL], http://cmucl.org[CMUCL], http://ecls.sf.net[ECL], and http://sbcl.sf.net[SBCL]. EOS expected = <<~'EOS'.chop .sp The corresponding implementations are \c .URL "http://clisp.sf.net" "CLISP" "," .URL "http://ccl.clozure.com" "Clozure CL" "," .URL "http://cmucl.org" "CMUCL" "," .URL "http://ecls.sf.net" "ECL" "," and .URL "http://sbcl.sf.net" "SBCL" "." EOS output = Asciidoctor.convert input, backend: :manpage assert_equal expected, output.lines[-8..-1].join end test 'should not insert space between link and non-whitespace characters surrounding it' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} Please search |link:http://discuss.asciidoctor.org[the forums]| before asking. EOS expected = <<~'EOS'.chop .sp Please search |\c .URL "http://discuss.asciidoctor.org" "the forums" "|" before asking. EOS output = Asciidoctor.convert input, backend: :manpage assert_equal expected, output.lines[-4..-1].join end test 'should be able to use monospaced text inside a link' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} Enter the link:cat[`cat`] command. EOS expected = <<~'EOS'.chop .sp Enter the \c .URL "cat" "\f(CRcat\fP" "" command. EOS output = Asciidoctor.convert input, backend: :manpage assert_equal expected, output.lines[-4..-1].join end end context 'MTO macro' do test 'should convert inline email macro into MTO macro' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} First paragraph. mailto:doc@example.org[Contact the doc] EOS expected = <<~'EOS'.chop .sp First paragraph. .sp .MTO "doc\(atexample.org" "Contact the doc" "" EOS output = Asciidoctor.convert input, backend: :manpage assert_equal expected, output.lines[-4..-1].join end test 'should set text of MTO macro to blank for implicit email' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} Bugs fixed daily by doc@example.org. EOS expected_coda = <<~'EOS'.chop Bugs fixed daily by \c .MTO "doc\(atexample.org" "" "." EOS output = Asciidoctor.convert input, backend: :manpage assert output.end_with? expected_coda end end context 'Table' do test 'should create header, body, and footer rows in correct order' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} [%header%footer] |=== |Header |Body 1 |Body 2 |Footer |=== EOS expected_coda = <<~'EOS'.chop allbox tab(:); lt. T{ .sp Header T} T{ .sp Body 1 T} T{ .sp Body 2 T} T{ .sp Footer T} .TE .sp EOS output = Asciidoctor.convert input, backend: :manpage assert output.end_with? expected_coda end test 'should manify normal table cell content' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} |=== |*Col A* |_Col B_ |*bold* |`mono` |_italic_ | #mark# |=== EOS output = Asciidoctor.convert input, backend: :manpage refute_match(/<\/?BOUNDARY>/, output) end test 'should manify table title' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} .Table of options |=== | Name | Description | Default | dim | dimension of the object | 3 |=== EOS expected_coda = <<~'EOS'.chop .it 1 an-trap .nr an-no-space-flag 1 .nr an-break-flag 1 .br .B Table 1. Table of options .TS allbox tab(:); lt lt lt. T{ .sp Name T}:T{ .sp Description T}:T{ .sp Default T} T{ .sp dim T}:T{ .sp dimension of the object T}:T{ .sp 3 T} .TE .sp EOS output = Asciidoctor.convert input, backend: :manpage assert output.end_with? expected_coda end test 'should manify and preserve whitespace in literal table cell' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} |=== |a l|b c _d_ . |=== EOS expected_coda = <<~'EOS'.chop .TS allbox tab(:); lt lt. T{ .sp a T}:T{ .sp .nf b c _d_ \&. .fi T} .TE .sp EOS output = Asciidoctor.convert input, backend: :manpage assert output.end_with? expected_coda end end context 'Images' do test 'should replace block image with alt text enclosed in square brackets' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} Behold the wisdom of the Magic 8 Ball! image::signs-point-to-yes.jpg[] EOS output = Asciidoctor.convert input, backend: :manpage assert output.end_with? %(\n.sp\n[signs point to yes]) end test 'should manify alt text of block image' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} image::rainbow.jpg["That's a double rainbow, otherwise known as rainbow{pp}!"] EOS output = Asciidoctor.convert input, backend: :manpage assert output.end_with? %/\n.sp\n[That\\(cqs a double rainbow, otherwise known as rainbow++!]/ end test 'should replace inline image with alt text enclosed in square brackets' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} The Magic 8 Ball says image:signs-point-to-yes.jpg[]. EOS output = Asciidoctor.convert input, backend: :manpage assert_includes output, 'The Magic 8 Ball says [signs point to yes].' end test 'should place link after alt text for inline image if link is defined' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} The Magic 8 Ball says image:signs-point-to-yes.jpg[link=https://en.wikipedia.org/wiki/Magic_8-Ball]. EOS output = Asciidoctor.convert input, backend: :manpage assert_includes output, 'The Magic 8 Ball says [signs point to yes] .' end test 'should reference image with title usign styled xref' do input = <<~EOS.chomp #{SAMPLE_MANPAGE_HEADER} To get your fortune, see <>. .Magic 8-Ball [#magic-8-ball] image::signs-point-to-yes.jpg[] EOS output = Asciidoctor.convert input, backend: :manpage, attributes: { 'xrefstyle' => 'full' } lines = output.lines.map(&:chomp) assert_includes lines, 'To get your fortune, see Figure 1, \(lqMagic 8\-Ball\(rq.' assert_includes lines, '.B Figure 1. Magic 8\-Ball' end end context 'Quote Block' do test 'should indent quote block' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} [,James Baldwin] ____ Not everything that is faced can be changed. But nothing can be changed until it is faced. ____ EOS expected_coda = <<~'EOS'.chop .RS 3 .ll -.6i .sp Not everything that is faced can be changed. But nothing can be changed until it is faced. .br .RE .ll .RS 5 .ll -.10i \(em James Baldwin .RE .ll EOS output = Asciidoctor.convert input, backend: :manpage assert output.end_with? expected_coda end end context 'Verse Block' do test 'should preserve hard line breaks in verse block' do input = SAMPLE_MANPAGE_HEADER.lines synopsis_idx = input.find_index {|it| it == %(== SYNOPSIS\n) } + 2 input[synopsis_idx..synopsis_idx] = <<~'EOS'.lines [verse] _command_ [_OPTION_]... _FILE_... EOS input = <<~EOS.chop #{input.join} description EOS expected_coda = <<~'EOS'.chop .SH "SYNOPSIS" .sp .nf \fIcommand\fP [\fIOPTION\fP]... \fIFILE\fP... .fi .br .SH "DESCRIPTION" .sp description EOS output = Asciidoctor.convert input, backend: :manpage assert output.end_with? expected_coda end end context 'Callout List' do test 'should generate callout list using proper formatting commands' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} ---- $ gem install asciidoctor # <1> ---- <1> Installs the asciidoctor gem from RubyGems.org EOS expected_coda = <<~'EOS'.chop .TS tab(:); r lw(\n(.lu*75u/100u). \fB(1)\fP\h'-2n':T{ Installs the asciidoctor gem from RubyGems.org T} .TE EOS output = Asciidoctor.convert input, backend: :manpage assert output.end_with? expected_coda end end context 'Page breaks' do test 'should insert page break at location of page break macro' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} == Section With Break before break <<< after break EOS expected_coda = <<~'EOS'.chop .SH "SECTION WITH BREAK" .sp before break .bp .sp after break EOS output = Asciidoctor.convert input, backend: :manpage assert output.end_with? expected_coda end end context 'UI macros' do test 'should enclose button in square brackets and format as bold' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} == UI Macros btn:[Save] EOS expected_coda = <<~'EOS'.chop .SH "UI MACROS" .sp \fB[\0Save\0]\fP EOS output = Asciidoctor.convert input, backend: :manpage, attributes: { 'experimental' => '' } assert output.end_with? expected_coda end test 'should format single key in monospaced text' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} == UI Macros kbd:[Enter] EOS expected_coda = <<~'EOS'.chop .SH "UI MACROS" .sp \f(CREnter\fP EOS output = Asciidoctor.convert input, backend: :manpage, attributes: { 'experimental' => '' } assert output.end_with? expected_coda end test 'should format each key in sequence as monospaced text separated by +' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} == UI Macros kbd:[Ctrl,s] EOS expected_coda = <<~'EOS'.chop .SH "UI MACROS" .sp \f(CRCtrl\0+\0s\fP EOS output = Asciidoctor.convert input, backend: :manpage, attributes: { 'experimental' => '' } assert output.end_with? expected_coda end test 'should format single menu reference in italic' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} == UI Macros menu:File[] EOS expected_coda = <<~'EOS'.chop .SH "UI MACROS" .sp \fIFile\fP EOS output = Asciidoctor.convert input, backend: :manpage, attributes: { 'experimental' => '' } assert output.end_with? expected_coda end test 'should format menu sequence in italic separated by carets' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} == UI Macros menu:File[New Tab] EOS expected_coda = <<~'EOS'.chop .SH "UI MACROS" .sp \fIFile\0\(fc\0New Tab\fP EOS output = Asciidoctor.convert input, backend: :manpage, attributes: { 'experimental' => '' } assert output.end_with? expected_coda end test 'should format menu sequence with submenu in italic separated by carets' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} == UI Macros menu:View[Zoom > Zoom In] EOS expected_coda = <<~'EOS'.chop .SH "UI MACROS" .sp \fIView\fP\0\(fc\0\fIZoom\fP\0\(fc\0\fIZoom In\fP EOS output = Asciidoctor.convert input, backend: :manpage, attributes: { 'experimental' => '' } assert output.end_with? expected_coda end end context 'xrefs' do test 'should populate automatic link text for internal xref' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} You can access this information using the options listed under <<_generic_program_information>>. == Options === Generic Program Information --help:: Output a usage message and exit. -V, --version:: Output the version number of grep and exit. EOS output = Asciidoctor.convert input, backend: :manpage, attributes: { 'experimental' => '' } assert_includes output, 'You can access this information using the options listed under Generic Program Information.' end test 'should populate automatic link text for each occurrence of internal xref' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} You can access this information using the options listed under <<_generic_program_information>>. The options listed in <<_generic_program_information>> should always be used by themselves. == Options === Generic Program Information --help:: Output a usage message and exit. -V, --version:: Output the version number of grep and exit. EOS output = Asciidoctor.convert input, backend: :manpage, attributes: { 'experimental' => '' } assert_includes output, 'You can access this information using the options listed under Generic Program Information.' assert_includes output, 'The options listed in Generic Program Information should always be used by themselves.' end test 'should uppercase the reftext for level-2 section titles if the reftext matches the section title' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} If you read nothing else, read the <<_foo_bar>> section. === Options --foo-bar _foobar_:: Puts the foo in your bar. See <<_foo_bar>> section for details. == Foo Bar Foo goes with bar, not baz. EOS output = Asciidoctor.convert input, backend: :manpage, attributes: { 'experimental' => '' } assert_includes output, 'If you read nothing else, read the FOO BAR section.' assert_includes output, 'See FOO BAR section for details.' end end context 'Footnotes' do test 'should generate list of footnotes using numbered list with numbers enclosed in brackets' do [true, false].each do |standalone| input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} text.footnote:[first footnote] more text.footnote:[second footnote] EOS expected_coda = <<~'EOS'.chop .sp text.[1] .sp more text.[2] .SH "NOTES" .IP [1] first footnote .IP [2] second footnote EOS if standalone expected_coda = <<~EOS.chop #{expected_coda} .SH "AUTHOR" .sp Author Name EOS end output = Asciidoctor.convert input, backend: :manpage, standalone: standalone assert output.end_with? expected_coda end end test 'should number footnotes according to footnote index' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} text.footnote:fn1[first footnote]footnote:[second footnote] more text.footnote:fn1[] EOS expected_coda = <<~'EOS'.chop .sp text.[1][2] .sp more text.[1] .SH "NOTES" .IP [1] first footnote .IP [2] second footnote EOS output = Asciidoctor.convert input, backend: :manpage assert output.end_with? expected_coda end test 'should format footnote with bare URL' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} text.footnote:[https://example.org] EOS expected_coda = <<~'EOS'.chop .SH "NOTES" .IP [1] .URL "https://example.org" "" "" EOS output = Asciidoctor.convert input, backend: :manpage assert output.end_with? expected_coda end test 'should format footnote with text before bare URL' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} text.footnote:[see https://example.org] EOS expected_coda = <<~'EOS'.chop .SH "NOTES" .IP [1] see \c .URL "https://example.org" "" "" EOS output = Asciidoctor.convert input, backend: :manpage assert output.end_with? expected_coda end test 'should format footnote with text after bare URL' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} text.footnote:[https://example.org is the place] EOS expected_coda = <<~'EOS'.chop .SH "NOTES" .IP [1] .URL "https://example.org" "" "" is the place EOS output = Asciidoctor.convert input, backend: :manpage assert output.end_with? expected_coda end test 'should format footnote with URL macro' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} text.footnote:[go to https://example.org[example site].] EOS expected_coda = <<~'EOS'.chop .SH "NOTES" .IP [1] go to \c .URL "https://example.org" "example site" "." EOS output = Asciidoctor.convert input, backend: :manpage assert output.end_with? expected_coda end test 'should produce a warning message and output fallback text at location of macro of unresolved footnote' do input = <<~EOS.chop #{SAMPLE_MANPAGE_HEADER} text.footnote:does-not-exist[] EOS expected_coda = <<~'EOS'.chop .sp text.[does\-not\-exist] EOS using_memory_logger do |logger| output = Asciidoctor.convert input, backend: :manpage assert output.end_with? expected_coda assert_message logger, :WARN, 'invalid footnote reference: does-not-exist' end end end context 'Environment' do test 'should use SOURCE_DATE_EPOCH as modified time of input file and local time' do old_source_date_epoch = ENV.delete 'SOURCE_DATE_EPOCH' begin ENV['SOURCE_DATE_EPOCH'] = '1234123412' output = Asciidoctor.convert SAMPLE_MANPAGE_HEADER, backend: :manpage, standalone: true assert_match(/Date: 2009-02-08/, output) assert_match(/^\.TH "COMMAND" "1" "2009-02-08" "Command 1.2.3" "Command Manual"$/, output) ensure if old_source_date_epoch ENV['SOURCE_DATE_EPOCH'] = old_source_date_epoch else ENV.delete 'SOURCE_DATE_EPOCH' end end end test 'should fail if SOURCE_DATE_EPOCH is malformed' do old_source_date_epoch = ENV.delete 'SOURCE_DATE_EPOCH' begin ENV['SOURCE_DATE_EPOCH'] = 'aaaaaaaa' Asciidoctor.convert SAMPLE_MANPAGE_HEADER, backend: :manpage, standalone: true assert false rescue assert true ensure if old_source_date_epoch ENV['SOURCE_DATE_EPOCH'] = old_source_date_epoch else ENV.delete 'SOURCE_DATE_EPOCH' end end end end end asciidoctor-2.0.20/test/options_test.rb000066400000000000000000000301541443135032600201400ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' require File.join Asciidoctor::LIB_DIR, 'asciidoctor/cli/options' context 'Options' do test 'should print usage and return error code 0 when help flag is present' do redirect_streams do |stdout, stderr| exitval = Asciidoctor::Cli::Options.parse!(%w(-h)) assert_equal 0, exitval assert_match(/^Usage:/, stdout.string) end end test 'should show safe modes in severity order' do redirect_streams do |stdout, stderr| exitval = Asciidoctor::Cli::Options.parse!(%w(-h)) assert_equal 0, exitval assert_match(/unsafe, safe, server, secure/, stdout.string) end end test 'should print usage and return error code 0 when help flag is unknown' do exitval, output = redirect_streams do |out, _| [Asciidoctor::Cli::Options.parse!(%w(-h unknown)), out.string] end assert_equal 0, exitval assert_match(/^Usage:/, output) end test 'should dump man page and return error code 0 when help topic is manpage' do exitval, output = redirect_streams do |out, _| [Asciidoctor::Cli::Options.parse!(%w(-h manpage)), out.string] end assert_equal 0, exitval assert_includes output, 'Manual: Asciidoctor Manual' assert_includes output, '.TH "ASCIIDOCTOR"' end test 'should an overview of the AsciiDoc syntax and return error code 0 when help topic is syntax' do exitval, output = redirect_streams do |out, _| [Asciidoctor::Cli::Options.parse!(%w(-h syntax)), out.string] end assert_equal 0, exitval assert_includes output, '= AsciiDoc Syntax' assert_includes output, '== Text Formatting' end test 'should print message and return error code 1 when manpage is not found' do old_manpage_path = ENV['ASCIIDOCTOR_MANPAGE_PATH'] begin ENV['ASCIIDOCTOR_MANPAGE_PATH'] = (manpage_path = fixture_path 'no-such-file.1') redirect_streams do |out, stderr| exitval = Asciidoctor::Cli::Options.parse!(%w(-h manpage)) assert_equal 1, exitval assert_equal %(asciidoctor: FAILED: manual page not found: #{manpage_path}), stderr.string.chomp end ensure if old_manpage_path ENV['ASCIIDOCTOR_MANPAGE_PATH'] = old_manpage_path else ENV.delete 'ASCIIDOCTOR_MANPAGE_PATH' end end end test 'should return error code 1 when invalid option present' do redirect_streams do |stdout, stderr| exitval = Asciidoctor::Cli::Options.parse!(%w(--foobar)) assert_equal 1, exitval assert_equal 'asciidoctor: invalid option: --foobar', stderr.string.chomp end end test 'should return error code 1 when option has invalid argument' do redirect_streams do |stdout, stderr| exitval = Asciidoctor::Cli::Options.parse!(%w(-d chapter input.ad)) # had to change for #320 assert_equal 1, exitval assert_equal 'asciidoctor: invalid argument: -d chapter', stderr.string.chomp end end test 'should return error code 1 when option is missing required argument' do redirect_streams do |stdout, stderr| exitval = Asciidoctor::Cli::Options.parse!(%w(-b)) assert_equal 1, exitval assert_equal 'asciidoctor: option missing argument: -b', stderr.string.chomp end end test 'should emit warning when unparsed options remain' do redirect_streams do |stdout, stderr| options = Asciidoctor::Cli::Options.parse!(%w(-b docbook - -)) assert_kind_of Hash, options assert_match(/asciidoctor: WARNING: extra arguments .*/, stderr.string.chomp) end end test 'basic argument assignment' do options = Asciidoctor::Cli::Options.parse!(%w(-w -v -e -d book test/fixtures/sample.adoc)) assert_equal 2, options[:verbose] assert_equal false, options[:standalone] assert_equal 'book', options[:attributes]['doctype'] assert_equal 1, options[:input_files].size assert_equal 'test/fixtures/sample.adoc', options[:input_files][0] end test 'supports legacy option for no header footer' do options = Asciidoctor::Cli::Options.parse!(%w(-s test/fixtures/sample.adoc)) assert_equal false, options[:standalone] assert_equal 1, options[:input_files].size assert_equal 'test/fixtures/sample.adoc', options[:input_files][0] end test 'standard attribute assignment' do options = Asciidoctor::Cli::Options.parse!(%w(-a docinfosubs=attributes,replacements -a icons test/fixtures/sample.adoc)) assert_equal 'attributes,replacements', options[:attributes]['docinfosubs'] assert_equal '', options[:attributes]['icons'] end test 'multiple attribute arguments' do options = Asciidoctor::Cli::Options.parse!(%w(-a imagesdir=images -a icons test/fixtures/sample.adoc)) assert_equal 'images', options[:attributes]['imagesdir'] assert_equal '', options[:attributes]['icons'] end test 'should only split attribute key/value pairs on first equal sign' do options = Asciidoctor::Cli::Options.parse!(%w(-a name=value=value test/fixtures/sample.adoc)) assert_equal 'value=value', options[:attributes]['name'] end test 'should not fail if value of attribute option is empty' do options = Asciidoctor::Cli::Options.parse!(['-a', '', 'test/fixtures/sample.adoc']) assert_nil options[:attributes] end test 'should not fail if value of attribute option is equal sign' do options = Asciidoctor::Cli::Options.parse!(['-a', '=', 'test/fixtures/sample.adoc']) assert_nil options[:attributes] end test 'should gracefully force encoding to UTF-8 if encoding on string is mislabeled' do args = ['-a', ((%w(platform-name 云平台).join '=').force_encoding Encoding::ASCII_8BIT), '-'] options = Asciidoctor::Cli::Options.parse! args assert_equal '云平台', options[:attributes]['platform-name'] assert_equal Encoding::UTF_8, options[:attributes]['platform-name'].encoding end test 'should allow safe mode to be specified' do options = Asciidoctor::Cli::Options.parse!(%w(-S safe test/fixtures/sample.adoc)) assert_equal Asciidoctor::SafeMode::SAFE, options[:safe] end test 'should allow any backend to be specified' do options = Asciidoctor::Cli::Options.parse!(%w(-b my_custom_backend test/fixtures/sample.adoc)) assert_equal 'my_custom_backend', options[:attributes]['backend'] end test 'article doctype assignment' do options = Asciidoctor::Cli::Options.parse!(%w(-d article test/fixtures/sample.adoc)) assert_equal 'article', options[:attributes]['doctype'] end test 'book doctype assignment' do options = Asciidoctor::Cli::Options.parse!(%w(-d book test/fixtures/sample.adoc)) assert_equal 'book', options[:attributes]['doctype'] end test 'inline doctype assignment' do options = Asciidoctor::Cli::Options.parse!(%w(-d inline test/fixtures/sample.adoc)) assert_equal 'inline', options[:attributes]['doctype'] end test 'template engine assignment' do options = Asciidoctor::Cli::Options.parse!(%w(-E haml test/fixtures/sample.adoc)) assert_equal 'haml', options[:template_engine] end test 'template directory assignment' do options = Asciidoctor::Cli::Options.parse!(%w(-T custom-backend test/fixtures/sample.adoc)) assert_equal ['custom-backend'], options[:template_dirs] end test 'multiple template directory assignments' do options = Asciidoctor::Cli::Options.parse!(%w(-T custom-backend -T custom-backend-hacks test/fixtures/sample.adoc)) assert_equal ['custom-backend', 'custom-backend-hacks'], options[:template_dirs] end test 'multiple -r flags requires specified libraries' do options = Asciidoctor::Cli::Options.new redirect_streams do |stdout, stderr| exitval = options.parse! %w(-r foobar -r foobaz test/fixtures/sample.adoc) assert_match(%(asciidoctor: FAILED: 'foobar' could not be loaded), stderr.string) assert_equal 1, exitval assert_equal ['foobar', 'foobaz'], options[:requires] end end test '-r flag with multiple values requires specified libraries' do options = Asciidoctor::Cli::Options.new redirect_streams do |stdout, stderr| exitval = options.parse! %w(-r foobar,foobaz test/fixtures/sample.adoc) assert_match(%(asciidoctor: FAILED: 'foobar' could not be loaded), stderr.string) assert_equal 1, exitval assert_equal ['foobar', 'foobaz'], options[:requires] end end test '-I option appends paths to $LOAD_PATH' do options = Asciidoctor::Cli::Options.new old_load_path = $:.dup begin exitval = options.parse! %w(-I foobar -I foobaz test/fixtures/sample.adoc) refute_equal 1, exitval assert_equal old_load_path.size + 2, $:.size assert_equal File.expand_path('foobar'), $:[0] assert_equal File.expand_path('foobaz'), $:[1] assert_equal ['foobar', 'foobaz'], options[:load_paths] ensure ($:.size - old_load_path.size).times { $:.shift } end end test '-I option appends multiple paths to $LOAD_PATH' do options = Asciidoctor::Cli::Options.new old_load_path = $:.dup begin exitval = options.parse! %W(-I foobar#{File::PATH_SEPARATOR}foobaz test/fixtures/sample.adoc) refute_equal 1, exitval assert_equal old_load_path.size + 2, $:.size assert_equal File.expand_path('foobar'), $:[0] assert_equal File.expand_path('foobaz'), $:[1] assert_equal ['foobar', 'foobaz'], options[:load_paths] ensure ($:.size - old_load_path.size).times { $:.shift } end end test 'should set failure level to FATAL by default' do options = Asciidoctor::Cli::Options.parse! %w(test/fixtures/sample.adoc) assert_equal ::Logger::Severity::FATAL, options[:failure_level] end test 'should allow failure level to be set to FATAL using any recognized abbreviation' do %w(f fatal FATAL).each do |val| options = Asciidoctor::Cli::Options.parse! %W(--failure-level=#{val} test/fixtures/sample.adoc) assert_equal ::Logger::Severity::FATAL, options[:failure_level] end end test 'should allow failure level to be set to ERROR using any recognized abbreviation' do %w(e err ERR error ERROR).each do |val| options = Asciidoctor::Cli::Options.parse!(%W(--failure-level=#{val} test/fixtures/sample.adoc)) assert_equal ::Logger::Severity::ERROR, options[:failure_level] end end test 'should allow failure level to be set to WARN using any recognized abbreviation' do %w(w warn WARN warning WARNING).each do |val| options = Asciidoctor::Cli::Options.parse! %W(--failure-level=#{val} test/fixtures/sample.adoc) assert_equal ::Logger::Severity::WARN, options[:failure_level] end end test 'should not allow failure level to be set to unknown value' do exit_code, messages = redirect_streams do |_, err| [(Asciidoctor::Cli::Options.parse! %w(--failure-level=foobar test/fixtures/sample.adoc)), err.string] end assert_equal 1, exit_code assert_includes messages, 'invalid argument: --failure-level=foobar' end test 'should set verbose to 2 when -v flag is specified' do options = Asciidoctor::Cli::Options.parse!(%w(-v test/fixtures/sample.adoc)) assert_equal 2, options[:verbose] end test 'should set verbose to 0 when -q flag is specified' do options = Asciidoctor::Cli::Options.parse!(%w(-q test/fixtures/sample.adoc)) assert_equal 0, options[:verbose] end test 'should set verbose to 2 when -v flag is specified after -q flag' do options = Asciidoctor::Cli::Options.parse!(%w(-q -v test/fixtures/sample.adoc)) assert_equal 2, options[:verbose] end test 'should set verbose to 0 when -q flag is specified after -v flag' do options = Asciidoctor::Cli::Options.parse!(%w(-v -q test/fixtures/sample.adoc)) assert_equal 0, options[:verbose] end test 'should enable warnings when -w flag is specified' do options = Asciidoctor::Cli::Options.parse!(%w(-w test/fixtures/sample.adoc)) assert options[:warnings] end test 'should enable timings when -t flag is specified' do options = Asciidoctor::Cli::Options.parse!(%w(-t test/fixtures/sample.adoc)) assert_equal true, options[:timings] end test 'timings option is disable by default' do options = Asciidoctor::Cli::Options.parse!(%w(test/fixtures/sample.adoc)) assert_equal false, options[:timings] end end asciidoctor-2.0.20/test/paragraphs_test.rb000066400000000000000000000460361443135032600206030ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context 'Paragraphs' do context 'Normal' do test 'should treat plain text separated by blank lines as paragraphs' do input = <<~'EOS' Plain text for the win! Yep. Text. Plain and simple. EOS output = convert_string_to_embedded input assert_css 'p', output, 2 assert_xpath '(//p)[1][text() = "Plain text for the win!"]', output, 1 assert_xpath '(//p)[2][text() = "Yep. Text. Plain and simple."]', output, 1 end test 'should associate block title with paragraph' do input = <<~'EOS' .Titled Paragraph. Winning. EOS output = convert_string_to_embedded input assert_css 'p', output, 2 assert_xpath '(//p)[1]/preceding-sibling::*[@class = "title"]', output, 1 assert_xpath '(//p)[1]/preceding-sibling::*[@class = "title"][text() = "Titled"]', output, 1 assert_xpath '(//p)[2]/preceding-sibling::*[@class = "title"]', output, 0 end test 'no duplicate block before next section' do input = <<~'EOS' = Title Preamble == First Section Paragraph 1 Paragraph 2 == Second Section Last words EOS output = convert_string input assert_xpath '//p[text() = "Paragraph 2"]', output, 1 end test 'does not treat wrapped line as a list item' do input = <<~'EOS' paragraph . wrapped line EOS output = convert_string_to_embedded input assert_css 'p', output, 1 assert_xpath %(//p[text()="paragraph\n. wrapped line"]), output, 1 end test 'does not treat wrapped line as a block title' do input = <<~'EOS' paragraph .wrapped line EOS output = convert_string_to_embedded input assert_css 'p', output, 1 assert_xpath %(//p[text()="paragraph\n.wrapped line"]), output, 1 end test 'interprets normal paragraph style as normal paragraph' do input = <<~'EOS' [normal] Normal paragraph. Nothing special. EOS output = convert_string_to_embedded input assert_css 'p', output, 1 end test 'removes indentation from literal paragraph marked as normal' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS [normal] Normal paragraph. Nothing special. Last line. EOS output = convert_string_to_embedded input assert_css 'p', output, 1 assert_xpath %(//p[text()="Normal paragraph.\n Nothing special.\nLast line."]), output, 1 end test 'normal paragraph terminates at block attribute list' do input = <<~'EOS' normal text [literal] literal text EOS output = convert_string_to_embedded input assert_css '.paragraph:root', output, 1 assert_css '.literalblock:root', output, 1 end test 'normal paragraph terminates at block delimiter' do input = <<~'EOS' normal text -- text in open block -- EOS output = convert_string_to_embedded input assert_css '.paragraph:root', output, 1 assert_css '.openblock:root', output, 1 end test 'normal paragraph terminates at list continuation' do input = <<~'EOS' normal text + EOS output = convert_string_to_embedded input assert_css '.paragraph:root', output, 2 assert_xpath %((/*[@class="paragraph"])[1]/p[text() = "normal text"]), output, 1 assert_xpath %((/*[@class="paragraph"])[2]/p[text() = "+"]), output, 1 end test 'normal style turns literal paragraph into normal paragraph' do input = <<~'EOS' [normal] normal paragraph, despite the leading indent EOS output = convert_string_to_embedded input assert_css '.paragraph:root > p', output, 1 end test 'automatically promotes index terms in DocBook output if indexterm-promotion-option is set' do input = <<~'EOS' Here is an index entry for ((tigers)). indexterm:[Big cats,Tigers,Siberian Tiger] Here is an index entry for indexterm2:[Linux]. (((Operating Systems,Linux))) Note that multi-entry terms generate separate index entries. EOS output = convert_string_to_embedded input, backend: 'docbook', attributes: { 'indexterm-promotion-option' => '' } assert_xpath '/simpara', output, 1 term1 = xmlnodes_at_xpath '(//indexterm)[1]', output, 1 assert_equal %(\ntigers\n), term1.to_s assert term1.next.content.start_with?('tigers') term2 = xmlnodes_at_xpath '(//indexterm)[2]', output, 1 term2_elements = term2.elements assert_equal 3, term2_elements.size assert_equal 'Big cats', term2_elements[0].to_s assert_equal 'Tigers', term2_elements[1].to_s assert_equal 'Siberian Tiger', term2_elements[2].to_s term3 = xmlnodes_at_xpath '(//indexterm)[3]', output, 1 term3_elements = term3.elements assert_equal 2, term3_elements.size assert_equal 'Tigers', term3_elements[0].to_s assert_equal 'Siberian Tiger', term3_elements[1].to_s term4 = xmlnodes_at_xpath '(//indexterm)[4]', output, 1 term4_elements = term4.elements assert_equal 1, term4_elements.size assert_equal 'Siberian Tiger', term4_elements[0].to_s term5 = xmlnodes_at_xpath '(//indexterm)[5]', output, 1 assert_equal %(\nLinux\n), term5.to_s assert term5.next.content.start_with?('Linux') assert_xpath '(//indexterm)[6]/*', output, 2 assert_xpath '(//indexterm)[7]/*', output, 1 end test 'does not automatically promote index terms in DocBook output if indexterm-promotion-option is not set' do input = <<~'EOS' The Siberian Tiger is one of the biggest living cats. indexterm:[Big cats,Tigers,Siberian Tiger] Note that multi-entry terms generate separate index entries. (((Operating Systems,Linux))) EOS output = convert_string_to_embedded input, backend: 'docbook' assert_css 'indexterm', output, 2 terms = xmlnodes_at_css 'indexterm', output, 2 term1 = terms[0] term1_elements = term1.elements assert_equal 3, term1_elements.size assert_equal 'Big cats', term1_elements[0].to_s assert_equal 'Tigers', term1_elements[1].to_s assert_equal 'Siberian Tiger', term1_elements[2].to_s term2 = terms[1] term2_elements = term2.elements assert_equal 2, term2_elements.size assert_equal 'Operating Systems', term2_elements[0].to_s assert_equal 'Linux', term2_elements[1].to_s end test 'normal paragraph should honor explicit subs list' do input = <<~'EOS' [subs="specialcharacters"] ** EOS output = convert_string_to_embedded input assert_includes output, '*<Hey Jude>*' end test 'normal paragraph should honor specialchars shorthand' do input = <<~'EOS' [subs="specialchars"] ** EOS output = convert_string_to_embedded input assert_includes output, '*<Hey Jude>*' end test 'should add a hardbreak at end of each line when hardbreaks option is set' do input = <<~'EOS' [%hardbreaks] read my lips EOS output = convert_string_to_embedded input assert_css 'br', output, 2 assert_xpath '//p', output, 1 assert_includes output, "

    read
    \nmy
    \nlips

    " end test 'should be able to toggle hardbreaks by setting hardbreaks-option on document' do input = <<~'EOS' :hardbreaks-option: make it so :!hardbreaks: roll it back EOS output = convert_string_to_embedded input assert_xpath '(//p)[1]/br', output, 2 assert_xpath '(//p)[2]/br', output, 0 end end context 'Literal' do test 'single-line literal paragraphs' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS you know what? LITERALS ARE LITERALLY AWESOME! EOS output = convert_string_to_embedded input assert_xpath '//pre', output, 3 end test 'multi-line literal paragraph' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS Install instructions: yum install ruby rubygems gem install asciidoctor You're good to go! EOS output = convert_string_to_embedded input assert_xpath '//pre', output, 1 # indentation should be trimmed from literal block assert_xpath %(//pre[text() = "yum install ruby rubygems\ngem install asciidoctor"]), output, 1 end test 'literal paragraph' do input = <<~'EOS' [literal] this text is literally literal EOS output = convert_string_to_embedded input assert_xpath %(/*[@class="literalblock"]//pre[text()="this text is literally literal"]), output, 1 end test 'should read content below literal style verbatim' do input = <<~'EOS' [literal] image::not-an-image-block[] EOS output = convert_string_to_embedded input assert_xpath %(/*[@class="literalblock"]//pre[text()="image::not-an-image-block[]"]), output, 1 assert_css 'img', output, 0 end test 'listing paragraph' do input = <<~'EOS' [listing] this text is a listing EOS output = convert_string_to_embedded input assert_xpath %(/*[@class="listingblock"]//pre[text()="this text is a listing"]), output, 1 end test 'source paragraph' do input = <<~'EOS' [source] use the source, luke! EOS output = convert_string_to_embedded input assert_xpath %(/*[@class="listingblock"]//pre[@class="highlight"]/code[text()="use the source, luke!"]), output, 1 end test 'source code paragraph with language' do input = <<~'EOS' [source, perl] die 'zomg perl is tough'; EOS output = convert_string_to_embedded input assert_xpath %(/*[@class="listingblock"]//pre[@class="highlight"]/code[@class="language-perl"][@data-lang="perl"][text()="die 'zomg perl is tough';"]), output, 1 end test 'literal paragraph terminates at block attribute list' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS literal text [normal] normal text EOS output = convert_string_to_embedded input assert_xpath %(/*[@class="literalblock"]), output, 1 assert_xpath %(/*[@class="paragraph"]), output, 1 end test 'literal paragraph terminates at block delimiter' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS literal text -- normal text -- EOS output = convert_string_to_embedded input assert_xpath %(/*[@class="literalblock"]), output, 1 assert_xpath %(/*[@class="openblock"]), output, 1 end test 'literal paragraph terminates at list continuation' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS literal text + EOS output = convert_string_to_embedded input assert_xpath %(/*[@class="literalblock"]), output, 1 assert_xpath %(/*[@class="literalblock"]//pre[text() = "literal text"]), output, 1 assert_xpath %(/*[@class="paragraph"]), output, 1 assert_xpath %(/*[@class="paragraph"]/p[text() = "+"]), output, 1 end end context 'Quote' do test "single-line quote paragraph" do input = <<~'EOS' [quote] Famous quote. EOS output = convert_string input assert_xpath '//*[@class = "quoteblock"]', output, 1 assert_xpath '//*[@class = "quoteblock"]//p', output, 0 assert_xpath '//*[@class = "quoteblock"]//*[contains(text(), "Famous quote.")]', output, 1 end test 'quote paragraph terminates at list continuation' do input = <<~'EOS' [quote] A famouse quote. + EOS output = convert_string_to_embedded input assert_css '.quoteblock:root', output, 1 assert_css '.paragraph:root', output, 1 assert_xpath %(/*[@class="paragraph"]/p[text() = "+"]), output, 1 end test "verse paragraph" do output = convert_string("[verse]\nFamous verse.") assert_xpath '//*[@class = "verseblock"]', output, 1 assert_xpath '//*[@class = "verseblock"]/pre', output, 1 assert_xpath '//*[@class = "verseblock"]//p', output, 0 assert_xpath '//*[@class = "verseblock"]/pre[normalize-space(text()) = "Famous verse."]', output, 1 end test 'should perform normal subs on a verse paragraph' do input = <<~'EOS' [verse] _GET /groups/link:#group-id[\{group-id\}]_ EOS output = convert_string_to_embedded input assert_includes output, '
    GET /groups/{group-id}
    ' end test 'quote paragraph should honor explicit subs list' do input = <<~'EOS' [subs="specialcharacters"] [quote] *Hey Jude* EOS output = convert_string_to_embedded input assert_includes output, '*Hey Jude*' end end context "special" do test "note multiline syntax" do Asciidoctor::ADMONITION_STYLES.each do |style| assert_xpath "//div[@class='admonitionblock #{style.downcase}']", convert_string("[#{style}]\nThis is a winner.") end end test "note block syntax" do Asciidoctor::ADMONITION_STYLES.each do |style| assert_xpath "//div[@class='admonitionblock #{style.downcase}']", convert_string("[#{style}]\n====\nThis is a winner.\n====") end end test "note inline syntax" do Asciidoctor::ADMONITION_STYLES.each do |style| assert_xpath "//div[@class='admonitionblock #{style.downcase}']", convert_string("#{style}: This is important, fool!") end end test 'should process preprocessor conditional in paragraph content' do input = <<~'EOS' ifdef::asciidoctor-version[] [sidebar] First line of sidebar. ifdef::backend[The backend is {backend}.] Last line of sidebar. endif::[] EOS expected = <<~'EOS'.chop
    First line of sidebar. The backend is html5. Last line of sidebar.
    EOS result = convert_string_to_embedded input assert_equal expected, result end context 'Styled Paragraphs' do test 'should wrap text in simpara for styled paragraphs when converted to DocBook' do input = <<~'EOS' = Book :doctype: book [preface] = About this book [abstract] An abstract for the book. = Part 1 [partintro] An intro to this part. == Chapter 1 [sidebar] Just a side note. [example] As you can see here. [quote] Wise words from a wise person. [open] Make it what you want. EOS output = convert_string input, backend: 'docbook' assert_css 'abstract > simpara', output, 1 assert_css 'partintro > simpara', output, 1 assert_css 'sidebar > simpara', output, 1 assert_css 'informalexample > simpara', output, 1 assert_css 'blockquote > simpara', output, 1 assert_css 'chapter > simpara', output, 1 end test 'should convert open paragraph to open block' do input = <<~'EOS' [open] Make it what you want. EOS output = convert_string_to_embedded input assert_css '.openblock', output, 1 assert_css '.openblock p', output, 0 end test 'should wrap text in simpara for styled paragraphs with title when converted to DocBook' do input = <<~'EOS' = Book :doctype: book [preface] = About this book [abstract] .Abstract title An abstract for the book. = Part 1 [partintro] .Part intro title An intro to this part. == Chapter 1 [sidebar] .Sidebar title Just a side note. [example] .Example title As you can see here. [quote] .Quote title Wise words from a wise person. EOS output = convert_string input, backend: 'docbook' assert_css 'abstract > title', output, 1 assert_xpath '//abstract/title[text() = "Abstract title"]', output, 1 assert_css 'abstract > title + simpara', output, 1 assert_css 'partintro > title', output, 1 assert_xpath '//partintro/title[text() = "Part intro title"]', output, 1 assert_css 'partintro > title + simpara', output, 1 assert_css 'sidebar > title', output, 1 assert_xpath '//sidebar/title[text() = "Sidebar title"]', output, 1 assert_css 'sidebar > title + simpara', output, 1 assert_css 'example > title', output, 1 assert_xpath '//example/title[text() = "Example title"]', output, 1 assert_css 'example > title + simpara', output, 1 assert_css 'blockquote > title', output, 1 assert_xpath '//blockquote/title[text() = "Quote title"]', output, 1 assert_css 'blockquote > title + simpara', output, 1 end end context 'Inline doctype' do test 'should only format and output text in first paragraph when doctype is inline' do input = "http://asciidoc.org[AsciiDoc] is a _lightweight_ markup language...\n\nignored" output = convert_string input, doctype: 'inline' assert_equal 'AsciiDoc is a lightweight markup language…​', output end test 'should output nil and warn if first block is not a paragraph' do input = '* bullet' using_memory_logger do |logger| output = convert_string input, doctype: 'inline' assert_nil output assert_message logger, :WARN, '~no inline candidate' end end end end context 'Custom' do test 'should not warn if paragraph style is unregisted' do input = <<~'EOS' [foo] bar EOS using_memory_logger do |logger| convert_string_to_embedded input assert_empty logger.messages end end test 'should log debug message if paragraph style is unknown and debug level is enabled' do input = <<~'EOS' [foo] bar EOS using_memory_logger Logger::Severity::DEBUG do |logger| convert_string_to_embedded input assert_message logger, :DEBUG, ': line 2: unknown style for paragraph: foo', Hash end end end end asciidoctor-2.0.20/test/parser_test.rb000066400000000000000000000647761443135032600177620ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context "Parser" do test "is_section_title?" do assert Asciidoctor::Parser.is_section_title?('AsciiDoc Home Page', '==================') assert Asciidoctor::Parser.is_section_title?('=== AsciiDoc Home Page') end test 'sanitize attribute name' do assert_equal 'foobar', Asciidoctor::Parser.sanitize_attribute_name("Foo Bar") assert_equal 'foo', Asciidoctor::Parser.sanitize_attribute_name("foo") assert_equal 'foo3-bar', Asciidoctor::Parser.sanitize_attribute_name("Foo 3^ # - Bar[") end test 'store attribute with value' do attr_name, attr_value = Asciidoctor::Parser.store_attribute 'foo', 'bar' assert_equal 'foo', attr_name assert_equal 'bar', attr_value end test 'store attribute with negated value' do { 'foo!' => nil, '!foo' => nil, 'foo' => nil }.each do |name, value| attr_name, attr_value = Asciidoctor::Parser.store_attribute name, value assert_equal name.sub('!', ''), attr_name assert_nil attr_value end end test 'store accessible attribute on document with value' do doc = empty_document doc.set_attribute 'foo', 'baz' attrs = {} attr_name, attr_value = Asciidoctor::Parser.store_attribute 'foo', 'bar', doc, attrs assert_equal 'foo', attr_name assert_equal 'bar', attr_value assert_equal 'bar', (doc.attr 'foo') assert attrs.key?(:attribute_entries) assert_equal 1, attrs[:attribute_entries].size assert_equal 'foo', attrs[:attribute_entries][0].name assert_equal 'bar', attrs[:attribute_entries][0].value end test 'store accessible attribute on document with value that contains attribute reference' do doc = empty_document doc.set_attribute 'foo', 'baz' doc.set_attribute 'release', 'ultramega' attrs = {} attr_name, attr_value = Asciidoctor::Parser.store_attribute 'foo', '{release}', doc, attrs assert_equal 'foo', attr_name assert_equal 'ultramega', attr_value assert_equal 'ultramega', (doc.attr 'foo') assert attrs.key?(:attribute_entries) assert_equal 1, attrs[:attribute_entries].size assert_equal 'foo', attrs[:attribute_entries][0].name assert_equal 'ultramega', attrs[:attribute_entries][0].value end test 'store inaccessible attribute on document with value' do doc = empty_document attributes: { 'foo' => 'baz' } attrs = {} attr_name, attr_value = Asciidoctor::Parser.store_attribute 'foo', 'bar', doc, attrs assert_equal 'foo', attr_name assert_equal 'bar', attr_value assert_equal 'baz', (doc.attr 'foo') refute attrs.key?(:attribute_entries) end test 'store accessible attribute on document with negated value' do { 'foo!' => nil, '!foo' => nil, 'foo' => nil }.each do |name, value| doc = empty_document doc.set_attribute 'foo', 'baz' attrs = {} attr_name, attr_value = Asciidoctor::Parser.store_attribute name, value, doc, attrs assert_equal name.sub('!', ''), attr_name assert_nil attr_value assert attrs.key?(:attribute_entries) assert_equal 1, attrs[:attribute_entries].size assert_equal 'foo', attrs[:attribute_entries][0].name assert_nil attrs[:attribute_entries][0].value end end test 'store inaccessible attribute on document with negated value' do { 'foo!' => nil, '!foo' => nil, 'foo' => nil }.each do |name, value| doc = empty_document attributes: { 'foo' => 'baz' } attrs = {} attr_name, attr_value = Asciidoctor::Parser.store_attribute name, value, doc, attrs assert_equal name.sub('!', ''), attr_name assert_nil attr_value refute attrs.key?(:attribute_entries) end end test 'parse style attribute with id and role' do attributes = { 1 => 'style#id.role' } style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_equal 'style', style assert_equal 'style', attributes['style'] assert_equal 'id', attributes['id'] assert_equal 'role', attributes['role'] assert_equal 'style#id.role', attributes[1] end test 'parse style attribute with style, role, id and option' do attributes = { 1 => 'style.role#id%fragment' } style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_equal 'style', style assert_equal 'style', attributes['style'] assert_equal 'id', attributes['id'] assert_equal 'role', attributes['role'] assert_equal '', attributes['fragment-option'] assert_equal 'style.role#id%fragment', attributes[1] refute attributes.key? 'options' end test 'parse style attribute with style, id and multiple roles' do attributes = { 1 => 'style#id.role1.role2' } style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_equal 'style', style assert_equal 'style', attributes['style'] assert_equal 'id', attributes['id'] assert_equal 'role1 role2', attributes['role'] assert_equal 'style#id.role1.role2', attributes[1] end test 'parse style attribute with style, multiple roles and id' do attributes = { 1 => 'style.role1.role2#id' } style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_equal 'style', style assert_equal 'style', attributes['style'] assert_equal 'id', attributes['id'] assert_equal 'role1 role2', attributes['role'] assert_equal 'style.role1.role2#id', attributes[1] end test 'parse style attribute with positional and original style' do attributes = { 1 => 'new_style', 'style' => 'original_style' } style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_equal 'new_style', style assert_equal 'new_style', attributes['style'] assert_equal 'new_style', attributes[1] end test 'parse style attribute with id and role only' do attributes = { 1 => '#id.role' } style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_nil style assert_equal 'id', attributes['id'] assert_equal 'role', attributes['role'] assert_equal '#id.role', attributes[1] end test 'parse empty style attribute' do attributes = { 1 => nil } style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_nil style assert_nil attributes['id'] assert_nil attributes['role'] assert_nil attributes[1] end test 'parse style attribute with option should preserve existing options' do attributes = { 1 => '%header', 'footer-option' => '' } style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_nil style assert_equal '', attributes['header-option'] assert_equal '', attributes['footer-option'] end test "parse author first" do metadata, _ = parse_header_metadata 'Stuart' assert_equal 5, metadata.size assert_equal 1, metadata['authorcount'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Stuart', metadata['firstname'] assert_equal 'S', metadata['authorinitials'] end test "parse author first last" do metadata, _ = parse_header_metadata 'Yukihiro Matsumoto' assert_equal 6, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Yukihiro Matsumoto', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Yukihiro', metadata['firstname'] assert_equal 'Matsumoto', metadata['lastname'] assert_equal 'YM', metadata['authorinitials'] end test "parse author first middle last" do metadata, _ = parse_header_metadata 'David Heinemeier Hansson' assert_equal 7, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'David Heinemeier Hansson', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'David', metadata['firstname'] assert_equal 'Heinemeier', metadata['middlename'] assert_equal 'Hansson', metadata['lastname'] assert_equal 'DHH', metadata['authorinitials'] end test "parse author first middle last email" do metadata, _ = parse_header_metadata 'David Heinemeier Hansson ' assert_equal 8, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'David Heinemeier Hansson', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'David', metadata['firstname'] assert_equal 'Heinemeier', metadata['middlename'] assert_equal 'Hansson', metadata['lastname'] assert_equal 'rails@ruby-lang.org', metadata['email'] assert_equal 'DHH', metadata['authorinitials'] end test "parse author first email" do metadata, _ = parse_header_metadata 'Stuart ' assert_equal 6, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Stuart', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Stuart', metadata['firstname'] assert_equal 'founder@asciidoc.org', metadata['email'] assert_equal 'S', metadata['authorinitials'] end test "parse author first last email" do metadata, _ = parse_header_metadata 'Stuart Rackham ' assert_equal 7, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Stuart Rackham', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Stuart', metadata['firstname'] assert_equal 'Rackham', metadata['lastname'] assert_equal 'founder@asciidoc.org', metadata['email'] assert_equal 'SR', metadata['authorinitials'] end test "parse author with hyphen" do metadata, _ = parse_header_metadata 'Tim Berners-Lee ' assert_equal 7, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Tim Berners-Lee', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Tim', metadata['firstname'] assert_equal 'Berners-Lee', metadata['lastname'] assert_equal 'founder@www.org', metadata['email'] assert_equal 'TB', metadata['authorinitials'] end test "parse author with single quote" do metadata, _ = parse_header_metadata 'Stephen O\'Grady ' assert_equal 7, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Stephen O\'Grady', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Stephen', metadata['firstname'] assert_equal 'O\'Grady', metadata['lastname'] assert_equal 'founder@redmonk.com', metadata['email'] assert_equal 'SO', metadata['authorinitials'] end test "parse author with dotted initial" do metadata, _ = parse_header_metadata 'Heiko W. Rupp ' assert_equal 8, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Heiko W. Rupp', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Heiko', metadata['firstname'] assert_equal 'W.', metadata['middlename'] assert_equal 'Rupp', metadata['lastname'] assert_equal 'hwr@example.de', metadata['email'] assert_equal 'HWR', metadata['authorinitials'] end test "parse author with underscore" do metadata, _ = parse_header_metadata 'Tim_E Fella' assert_equal 6, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Tim E Fella', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Tim E', metadata['firstname'] assert_equal 'Fella', metadata['lastname'] assert_equal 'TF', metadata['authorinitials'] end test 'parse author name with letters outside basic latin' do metadata, _ = parse_header_metadata 'Stéphane Brontë' assert_equal 6, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Stéphane Brontë', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Stéphane', metadata['firstname'] assert_equal 'Brontë', metadata['lastname'] assert_equal 'SB', metadata['authorinitials'] end test 'parse ideographic author names' do metadata, _ = parse_header_metadata '李 四 ' assert_equal 7, metadata.size assert_equal 1, metadata['authorcount'] assert_equal '李 四', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal '李', metadata['firstname'] assert_equal '四', metadata['lastname'] assert_equal 'si.li@example.com', metadata['email'] assert_equal '李四', metadata['authorinitials'] end test "parse author condenses whitespace" do metadata, _ = parse_header_metadata 'Stuart Rackham ' assert_equal 7, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Stuart Rackham', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Stuart', metadata['firstname'] assert_equal 'Rackham', metadata['lastname'] assert_equal 'founder@asciidoc.org', metadata['email'] assert_equal 'SR', metadata['authorinitials'] end test "parse invalid author line becomes author" do metadata, _ = parse_header_metadata ' Stuart Rackham, founder of AsciiDoc ' assert_equal 5, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Stuart Rackham, founder of AsciiDoc ', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Stuart Rackham, founder of AsciiDoc ', metadata['firstname'] assert_equal 'S', metadata['authorinitials'] end test 'parse multiple authors' do metadata, _ = parse_header_metadata 'Doc Writer ; John Smith ' assert_equal 2, metadata['authorcount'] assert_equal 'Doc Writer, John Smith', metadata['authors'] assert_equal 'Doc Writer', metadata['author'] assert_equal 'Doc Writer', metadata['author_1'] assert_equal 'John Smith', metadata['author_2'] end test 'should not parse multiple authors if semi-colon is not followed by space' do metadata, _ = parse_header_metadata 'Joe Doe;Smith Johnson' assert_equal 1, metadata['authorcount'] end test 'skips blank author entries in implicit author line' do metadata, _ = parse_header_metadata 'Doc Writer; ; John Smith ;' assert_equal 2, metadata['authorcount'] assert_equal 'Doc Writer', metadata['author_1'] assert_equal 'John Smith', metadata['author_2'] end test 'parse name with more than 3 parts in author attribute' do doc = empty_document parse_header_metadata ':author: Leroy Harold Scherer, Jr.', doc assert_equal 'Leroy Harold Scherer, Jr.', doc.attributes['author'] assert_equal 'Leroy', doc.attributes['firstname'] assert_equal 'Harold', doc.attributes['middlename'] assert_equal 'Scherer, Jr.', doc.attributes['lastname'] end test 'use explicit authorinitials if set after implicit author line' do input = <<~'EOS' Jean-Claude Van Damme :authorinitials: JCVD EOS doc = empty_document parse_header_metadata input, doc assert_equal 'JCVD', doc.attributes['authorinitials'] end test 'use explicit authorinitials if set after author attribute' do input = <<~'EOS' :author: Jean-Claude Van Damme :authorinitials: JCVD EOS doc = empty_document parse_header_metadata input, doc assert_equal 'JCVD', doc.attributes['authorinitials'] end test 'use implicit authors if value of authors attribute matches computed value' do input = <<~'EOS' Doc Writer; Junior Writer :authors: Doc Writer, Junior Writer EOS doc = empty_document parse_header_metadata input, doc assert_equal 'Doc Writer, Junior Writer', doc.attributes['authors'] assert_equal 'Doc Writer', doc.attributes['author_1'] assert_equal 'Junior Writer', doc.attributes['author_2'] end test 'replace implicit authors if value of authors attribute does not match computed value' do input = <<~'EOS' Doc Writer; Junior Writer :authors: Stuart Rackham; Dan Allen; Sarah White EOS doc = empty_document metadata, _ = parse_header_metadata input, doc assert_equal 3, metadata['authorcount'] assert_equal 3, doc.attributes['authorcount'] assert_equal 'Stuart Rackham, Dan Allen, Sarah White', doc.attributes['authors'] assert_equal 'Stuart Rackham', doc.attributes['author_1'] assert_equal 'Dan Allen', doc.attributes['author_2'] assert_equal 'Sarah White', doc.attributes['author_3'] end test 'sets authorcount to 0 if document has no authors' do input = '' doc = empty_document metadata, _ = parse_header_metadata input, doc assert_equal 0, doc.attributes['authorcount'] assert_equal 0, metadata['authorcount'] end test 'returns empty hash if document has no authors and invoked without document' do metadata, _ = parse_header_metadata '' assert_empty metadata end test 'does not drop name joiner when using multiple authors' do input = 'Kismet Chameleon; Lazarus het_Draeke' doc = empty_document parse_header_metadata input, doc assert_equal 2, doc.attributes['authorcount'] assert_equal 'Kismet Chameleon, Lazarus het Draeke', doc.attributes['authors'] assert_equal 'Kismet Chameleon', doc.attributes['author_1'] assert_equal 'Lazarus het Draeke', doc.attributes['author_2'] assert_equal 'het Draeke', doc.attributes['lastname_2'] end test 'allows authors to be overridden using explicit author attributes' do input = <<~'EOS' Kismet Chameleon; Johnny Bravo; Lazarus het_Draeke :author_2: Danger Mouse EOS doc = empty_document parse_header_metadata input, doc assert_equal 3, doc.attributes['authorcount'] assert_equal 'Kismet Chameleon, Danger Mouse, Lazarus het Draeke', doc.attributes['authors'] assert_equal 'Kismet Chameleon', doc.attributes['author_1'] assert_equal 'Danger Mouse', doc.attributes['author_2'] assert_equal 'Lazarus het Draeke', doc.attributes['author_3'] assert_equal 'het Draeke', doc.attributes['lastname_3'] end test 'removes formatting before partitioning author defined using author attribute' do input = ':author: pass:n[http://example.org/community/team.html[Ze_**Project** team]]' doc = empty_document parse_header_metadata input, doc assert_equal 1, doc.attributes['authorcount'] assert_equal 'Ze Project team', doc.attributes['authors'] assert_equal 'Ze Project', doc.attributes['firstname'] assert_equal 'team', doc.attributes['lastname'] end test "parse rev number date remark" do input = <<~'EOS' Ryan Waldron v0.0.7, 2013-12-18: The first release you can stand on EOS metadata, _ = parse_header_metadata input assert_equal 9, metadata.size assert_equal '0.0.7', metadata['revnumber'] assert_equal '2013-12-18', metadata['revdate'] assert_equal 'The first release you can stand on', metadata['revremark'] end test 'parse rev number, data, and remark as attribute references' do input = <<~'EOS' Author Name v{project-version}, {release-date}: {release-summary} EOS metadata, _ = parse_header_metadata input assert_equal 9, metadata.size assert_equal '{project-version}', metadata['revnumber'] assert_equal '{release-date}', metadata['revdate'] assert_equal '{release-summary}', metadata['revremark'] end test 'should resolve attribute references in rev number, data, and remark' do input = <<~'EOS' = Document Title Author Name {project-version}, {release-date}: {release-summary} EOS doc = document_from_string input, attributes: { 'project-version' => '1.0.1', 'release-date' => '2018-05-15', 'release-summary' => 'The one you can count on!', } assert_equal '1.0.1', (doc.attr 'revnumber') assert_equal '2018-05-15', (doc.attr 'revdate') assert_equal 'The one you can count on!', (doc.attr 'revremark') end test "parse rev date" do input = <<~'EOS' Ryan Waldron 2013-12-18 EOS metadata, _ = parse_header_metadata input assert_equal 7, metadata.size assert_equal '2013-12-18', metadata['revdate'] end test 'parse rev number with trailing comma' do input = <<~'EOS' Stuart Rackham v8.6.8, EOS metadata, _ = parse_header_metadata input assert_equal 7, metadata.size assert_equal '8.6.8', metadata['revnumber'] refute metadata.key?('revdate') end # Asciidoctor recognizes a standalone revision without a trailing comma test 'parse rev number' do input = <<~'EOS' Stuart Rackham v8.6.8 EOS metadata, _ = parse_header_metadata input assert_equal 7, metadata.size assert_equal '8.6.8', metadata['revnumber'] refute metadata.key?('revdate') end # while compliant w/ AsciiDoc, this is just sloppy parsing test "treats arbitrary text on rev line as revdate" do input = <<~'EOS' Ryan Waldron foobar EOS metadata, _ = parse_header_metadata input assert_equal 7, metadata.size assert_equal 'foobar', metadata['revdate'] end test "parse rev date remark" do input = <<~'EOS' Ryan Waldron 2013-12-18: The first release you can stand on EOS metadata, _ = parse_header_metadata input assert_equal 8, metadata.size assert_equal '2013-12-18', metadata['revdate'] assert_equal 'The first release you can stand on', metadata['revremark'] end test "should not mistake attribute entry as rev remark" do input = <<~'EOS' Joe Cool :page-layout: post EOS metadata, _ = parse_header_metadata input refute_equal 'page-layout: post', metadata['revremark'] refute metadata.key?('revdate') end test "parse rev remark only" do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS Joe Cool :Must start revremark-only line with space EOS metadata, _ = parse_header_metadata input assert_equal 'Must start revremark-only line with space', metadata['revremark'] refute metadata.key?('revdate') end test "skip line comments before author" do input = <<~'EOS' // Asciidoctor // release artist Ryan Waldron EOS metadata, _ = parse_header_metadata input assert_equal 6, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Ryan Waldron', metadata['author'] assert_equal 'Ryan', metadata['firstname'] assert_equal 'Waldron', metadata['lastname'] assert_equal 'RW', metadata['authorinitials'] end test "skip block comment before author" do input = <<~'EOS' //// Asciidoctor release artist //// Ryan Waldron EOS metadata, _ = parse_header_metadata input assert_equal 6, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Ryan Waldron', metadata['author'] assert_equal 'Ryan', metadata['firstname'] assert_equal 'Waldron', metadata['lastname'] assert_equal 'RW', metadata['authorinitials'] end test "skip block comment before rev" do input = <<~'EOS' Ryan Waldron //// Asciidoctor release info //// v0.0.7, 2013-12-18 EOS metadata, _ = parse_header_metadata input assert_equal 8, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Ryan Waldron', metadata['author'] assert_equal '0.0.7', metadata['revnumber'] assert_equal '2013-12-18', metadata['revdate'] end test 'break header at line with three forward slashes' do input = <<~'EOS' Joe Cool v1.0 /// stuff EOS metadata, _ = parse_header_metadata input assert_equal 7, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Joe Cool', metadata['author'] assert_equal '1.0', metadata['revnumber'] end test 'attribute entry overrides generated author initials' do doc = empty_document metadata, _ = parse_header_metadata %(Stuart Rackham \n:Author Initials: SJR), doc assert_equal 'SR', metadata['authorinitials'] assert_equal 'SJR', doc.attributes['authorinitials'] end test 'adjust indentation to 0' do input = <<~EOS \x20 def names \x20 @name.split \x20 end EOS # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop def names @name.split end EOS lines = input.split ?\n Asciidoctor::Parser.adjust_indentation! lines assert_equal expected, (lines * ?\n) end test 'adjust indentation mixed with tabs and spaces to 0' do input = <<~EOS def names \t @name.split end EOS expected = <<~EOS.chop def names @name.split end EOS lines = input.split ?\n Asciidoctor::Parser.adjust_indentation! lines, 0, 4 assert_equal expected, (lines * ?\n) end test 'expands tabs to spaces' do input = <<~'EOS' Filesystem Size Used Avail Use% Mounted on Filesystem Size Used Avail Use% Mounted on devtmpfs 3.9G 0 3.9G 0% /dev /dev/mapper/fedora-root 48G 18G 29G 39% / EOS expected = <<~'EOS'.chop Filesystem Size Used Avail Use% Mounted on Filesystem Size Used Avail Use% Mounted on devtmpfs 3.9G 0 3.9G 0% /dev /dev/mapper/fedora-root 48G 18G 29G 39% / EOS lines = input.split ?\n Asciidoctor::Parser.adjust_indentation! lines, 0, 4 assert_equal expected, (lines * ?\n) end test 'adjust indentation to non-zero' do input = <<~EOS \x20 def names \x20 @name.split \x20 end EOS expected = <<~EOS.chop \x20 def names \x20 @name.split \x20 end EOS lines = input.split ?\n Asciidoctor::Parser.adjust_indentation! lines, 2 assert_equal expected, (lines * ?\n) end test 'preserve block indent if indent is -1' do input = <<~EOS \x20 def names \x20 @name.split \x20 end EOS expected = input lines = input.lines Asciidoctor::Parser.adjust_indentation! lines, -1 assert_equal expected, lines.join end test 'adjust indentation handles empty lines gracefully' do input = [] expected = input lines = input.dup Asciidoctor::Parser.adjust_indentation! lines assert_equal expected, lines end test 'should warn if inline anchor is already in use' do input = <<~'EOS' [#in-use] A paragraph with an id. Another paragraph [[in-use]]that uses an id which is already in use. EOS using_memory_logger do |logger| document_from_string input assert_message logger, :WARN, ': line 5: id assigned to anchor already in use: in-use', Hash end end end asciidoctor-2.0.20/test/paths_test.rb000066400000000000000000000431631443135032600175700ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context 'Path Resolver' do context 'Web Paths' do def setup @resolver = Asciidoctor::PathResolver.new end test 'target with absolute path' do assert_equal '/images', @resolver.web_path('/images') assert_equal '/images', @resolver.web_path('/images', '') assert_equal '/images', @resolver.web_path('/images', nil) end test 'target with relative path' do assert_equal 'images', @resolver.web_path('images') assert_equal 'images', @resolver.web_path('images', '') assert_equal 'images', @resolver.web_path('images', nil) end test 'target with hidden relative path' do assert_equal '.images', @resolver.web_path('.images') assert_equal '.images', @resolver.web_path('.images', '') assert_equal '.images', @resolver.web_path('.images', nil) end test 'target with path relative to current directory' do assert_equal './images', @resolver.web_path('./images') assert_equal './images', @resolver.web_path('./images', '') assert_equal './images', @resolver.web_path('./images', nil) end test 'target with absolute path ignores start path' do assert_equal '/images', @resolver.web_path('/images', 'foo') assert_equal '/images', @resolver.web_path('/images', '/foo') assert_equal '/images', @resolver.web_path('/images', './foo') end test 'target with relative path appended to start path' do assert_equal 'assets/images', @resolver.web_path('images', 'assets') assert_equal '/assets/images', @resolver.web_path('images', '/assets') #assert_equal '/assets/images/tiger.png', @resolver.web_path('tiger.png', '/assets//images') assert_equal './assets/images', @resolver.web_path('images', './assets') assert_equal '/theme.css', @resolver.web_path('theme.css', '/') assert_equal '/css/theme.css', @resolver.web_path('theme.css', '/css/') end test 'target with path relative to current directory appended to start path' do assert_equal 'assets/images', @resolver.web_path('./images', 'assets') assert_equal '/assets/images', @resolver.web_path('./images', '/assets') assert_equal './assets/images', @resolver.web_path('./images', './assets') end test 'target with relative path appended to url start path' do assert_equal 'http://www.example.com/assets/images', @resolver.web_path('images', 'http://www.example.com/assets') end # enable if we want to allow web_path to detect and preserve a target URI #test 'target with file url appended to relative path' do # assert_equal 'file:///home/username/styles/asciidoctor.css', @resolver.web_path('file:///home/username/styles/asciidoctor.css', '.') #end # enable if we want to allow web_path to detect and preserve a target URI #test 'target with http url appended to relative path' do # assert_equal 'http://example.com/asciidoctor.css', @resolver.web_path('http://example.com/asciidoctor.css', '.') #end test 'normalize target' do assert_equal '../images', @resolver.web_path('../images/../images') end test 'append target to start path and normalize' do assert_equal '../images', @resolver.web_path('../images/../images', '../images') assert_equal '../../images', @resolver.web_path('../images', '..') end test 'normalize parent directory that follows root' do assert_equal '/tiger.png', @resolver.web_path('/../tiger.png') assert_equal '/tiger.png', @resolver.web_path('/../../tiger.png') end test 'uses start when target is empty' do assert_equal 'assets/images', @resolver.web_path('', 'assets/images') assert_equal 'assets/images', @resolver.web_path(nil, 'assets/images') end test 'posixifies windows paths' do @resolver.file_separator = '\\' assert_equal '/images', @resolver.web_path('\\images') assert_equal '../images', @resolver.web_path('..\\images') assert_equal '/images', @resolver.web_path('\\..\\images') assert_equal 'assets/images', @resolver.web_path('assets\\images') assert_equal '../assets/images', @resolver.web_path('assets\\images', '..\\images\\..') end test 'URL encode spaces in path' do assert_equal 'assets%20and%20stuff/lots%20of%20images', @resolver.web_path('lots of images', 'assets and stuff') end end context 'System Paths' do JAIL = '/home/doctor/docs' default_logger = Asciidoctor::LoggerManager.logger def setup @resolver = Asciidoctor::PathResolver.new @logger = (Asciidoctor::LoggerManager.logger = Asciidoctor::MemoryLogger.new) end teardown do Asciidoctor::LoggerManager.logger = default_logger end test 'raises security error if jail is not an absolute path' do begin @resolver.system_path('images/tiger.png', '/etc', 'foo') flunk 'Expecting SecurityError to be raised' rescue SecurityError end end #test 'raises security error if jail is not a canonical path' do # begin # @resolver.system_path('images/tiger.png', '/etc', %(#{JAIL}/../foo)) # flunk 'Expecting SecurityError to be raised' # rescue SecurityError # end #end test 'prevents access to paths outside of jail' do result = @resolver.system_path '../../../../../css', %(#{JAIL}/assets/stylesheets), JAIL assert_equal %(#{JAIL}/css), result assert_message @logger, :WARN, 'path has illegal reference to ancestor of jail; recovering automatically' @logger.clear result = @resolver.system_path '/../../../../../css', %(#{JAIL}/assets/stylesheets), JAIL assert_equal %(#{JAIL}/css), result assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' @logger.clear result = @resolver.system_path '../../../css', '../../..', JAIL assert_equal %(#{JAIL}/css), result assert_message @logger, :WARN, 'path has illegal reference to ancestor of jail; recovering automatically' end test 'throws exception for illegal path access if recover is false' do begin @resolver.system_path('../../../../../css', "#{JAIL}/assets/stylesheets", JAIL, recover: false) flunk 'Expecting SecurityError to be raised' rescue SecurityError end end test 'resolves start path if target is empty' do assert_equal "#{JAIL}/assets/stylesheets", @resolver.system_path('', "#{JAIL}/assets/stylesheets", JAIL) assert_equal "#{JAIL}/assets/stylesheets", @resolver.system_path(nil, "#{JAIL}/assets/stylesheets", JAIL) end test 'expands parent references in start path if target is empty' do assert_equal "#{JAIL}/stylesheets", @resolver.system_path('', "#{JAIL}/assets/../stylesheets", JAIL) end test 'expands parent references in start path if target is not empty' do assert_equal "#{JAIL}/stylesheets/site.css", @resolver.system_path('site.css', "#{JAIL}/assets/../stylesheets", JAIL) end test 'resolves start path if target is dot' do assert_equal "#{JAIL}/assets/stylesheets", @resolver.system_path('.', "#{JAIL}/assets/stylesheets", JAIL) assert_equal "#{JAIL}/assets/stylesheets", @resolver.system_path('./', "#{JAIL}/assets/stylesheets", JAIL) end test 'treats absolute target outside of jail as relative when jail is specified' do result = @resolver.system_path '/', "#{JAIL}/assets/stylesheets", JAIL assert_equal JAIL, result assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' @logger.clear result = @resolver.system_path '/foo', "#{JAIL}/assets/stylesheets", JAIL assert_equal "#{JAIL}/foo", result assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' @logger.clear result = @resolver.system_path '/../foo', "#{JAIL}/assets/stylesheets", JAIL assert_equal "#{JAIL}/foo", result assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' @logger.clear @resolver.file_separator = '\\' result = @resolver.system_path 'baz.adoc', 'C:/foo', 'C:/bar' assert_equal 'C:/bar/baz.adoc', result assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' end test 'allows use of absolute target or start if resolved path is sub-path of jail' do assert_equal "#{JAIL}/my/path", @resolver.system_path("#{JAIL}/my/path", '', JAIL) assert_equal "#{JAIL}/my/path", @resolver.system_path("#{JAIL}/my/path", nil, JAIL) assert_equal "#{JAIL}/my/path", @resolver.system_path('', "#{JAIL}/my/path", JAIL) assert_equal "#{JAIL}/my/path", @resolver.system_path(nil, "#{JAIL}/my/path", JAIL) assert_equal "#{JAIL}/my/path", @resolver.system_path('path', "#{JAIL}/my", JAIL) assert_equal '/foo/bar/baz.adoc', @resolver.system_path('/foo/bar/baz.adoc', nil, '/') assert_equal '/foo/bar/baz.adoc', @resolver.system_path('baz.adoc', '/foo/bar', '/') assert_equal '/foo/bar/baz.adoc', @resolver.system_path('baz.adoc', 'foo/bar', '/') end test 'uses jail path if start path is empty' do assert_equal "#{JAIL}/images/tiger.png", @resolver.system_path('images/tiger.png', '', JAIL) assert_equal "#{JAIL}/images/tiger.png", @resolver.system_path('images/tiger.png', nil, JAIL) end test 'warns if start is not contained within jail' do result = @resolver.system_path 'images/tiger.png', '/etc', JAIL assert_equal %(#{JAIL}/images/tiger.png), result assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' @logger.clear result = @resolver.system_path '.', '/etc', JAIL assert_equal JAIL, result assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' @logger.clear @resolver.file_separator = '\\' result = @resolver.system_path '.', 'C:/foo', 'C:/bar' assert_equal 'C:/bar', result assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' end test 'allows start path to be parent of jail if resolved target is inside jail' do assert_equal "#{JAIL}/foo/path", @resolver.system_path('foo/path', JAIL, "#{JAIL}/foo") @resolver.file_separator = '\\' assert_equal "C:/dev/project/README.adoc", @resolver.system_path('project/README.adoc', 'C:/dev', 'C:/dev/project') end test 'relocates target to jail if resolved value fails outside of jail' do result = @resolver.system_path 'bar/baz.adoc', JAIL, "#{JAIL}/foo" assert_equal %(#{JAIL}/foo/bar/baz.adoc), result assert_message @logger, :WARN, 'path is outside of jail; recovering automatically' @logger.clear @resolver.file_separator = '\\' result = @resolver.system_path 'bar/baz.adoc', 'D:/', 'C:/foo' assert_equal 'C:/foo/bar/baz.adoc', result assert_message @logger, :WARN, '~outside of jail root' end test 'raises security error if start is not contained within jail and recover is disabled' do begin @resolver.system_path('images/tiger.png', '/etc', JAIL, recover: false) flunk 'Expecting SecurityError to be raised' rescue SecurityError end begin @resolver.system_path('.', '/etc', JAIL, recover: false) flunk 'Expecting SecurityError to be raised' rescue SecurityError end end test 'expands parent references in absolute path if jail is not specified' do assert_equal '/etc/stylesheet.css', @resolver.system_path('/usr/share/../../etc/stylesheet.css') end test 'resolves absolute directory if jail is not specified' do assert_equal '/usr/share/stylesheet.css', @resolver.system_path('/usr/share/stylesheet.css', '/home/dallen/docs/assets/stylesheets') end test 'resolves ancestor directory of start if jail is not specified' do assert_equal '/usr/share/stylesheet.css', @resolver.system_path('../../../../../usr/share/stylesheet.css', '/home/dallen/docs/assets/stylesheets') end test 'resolves absolute path if start is absolute and target is relative' do assert_equal '/usr/share/assets/stylesheet.css', @resolver.system_path('assets/stylesheet.css', '/usr/share') end test 'File.dirname preserves UNC path root on Windows', if: windows? do assert_equal File.dirname('\\\\server\\docs\\file.html'), '\\\\server\\docs' end test 'File.dirname preserves posix-style UNC path root on Windows', if: windows? do assert_equal File.dirname('//server/docs/file.html'), '//server/docs' end test 'resolves UNC path if start is absolute and target is relative' do assert_equal '//QA/c$/users/asciidoctor/assets/stylesheet.css', @resolver.system_path('assets/stylesheet.css', '//QA/c$/users/asciidoctor') end test 'resolves UNC path if target is UNC path' do @resolver.file_separator = '\\' assert_equal '//server/docs/output.html', @resolver.system_path('\\\\server\\docs\\output.html') end test 'resolves UNC path if target is posix-style UNC path' do assert_equal '//server/docs/output.html', @resolver.system_path('//server/docs/output.html') end test 'resolves classloader path if start is classloader path and target is relative', if: jruby? do assert_equal 'uri:classloader:images/sample.png', @resolver.system_path('sample.png', 'uri:classloader:images') end test 'resolves classloader path if start is root-relative classloader path and target is relative', if: jruby? do assert_equal 'uri:classloader:/images/sample.png', @resolver.system_path('sample.png', 'uri:classloader:/images') end test 'preserves classloader path if start is absolute path and target is classloader path', if: jruby? do assert_equal 'uri:classloader:/images/sample.png', @resolver.system_path('uri:classloader:/images/sample.png', '/home/doctor/docs') end test 'resolves relative target relative to current directory if start is empty' do pwd = File.expand_path(Dir.pwd) assert_equal "#{pwd}/images/tiger.png", @resolver.system_path('images/tiger.png', '') assert_equal "#{pwd}/images/tiger.png", @resolver.system_path('images/tiger.png', nil) assert_equal "#{pwd}/images/tiger.png", @resolver.system_path('images/tiger.png') end test 'resolves relative hidden target relative to current directory if start is empty' do pwd = File.expand_path(Dir.pwd) assert_equal "#{pwd}/.images/tiger.png", @resolver.system_path('.images/tiger.png', '') assert_equal "#{pwd}/.images/tiger.png", @resolver.system_path('.images/tiger.png', nil) end test 'resolves and normalizes start when target is empty' do pwd = File.expand_path Dir.pwd assert_equal '/home/doctor/docs', (@resolver.system_path '', '/home/doctor/docs') assert_equal '/home/doctor/docs', (@resolver.system_path '', '/home/doctor/./docs') assert_equal '/home/doctor/docs', (@resolver.system_path nil, '/home/doctor/docs') assert_equal '/home/doctor/docs', (@resolver.system_path nil, '/home/doctor/./docs') assert_equal %(#{pwd}/assets/images), (@resolver.system_path nil, 'assets/images') @resolver.system_path '', '../assets/images', JAIL assert_message @logger, :WARN, 'path has illegal reference to ancestor of jail; recovering automatically' end test 'posixifies windows paths' do @resolver.file_separator = '\\' assert_equal "#{JAIL}/assets/css", @resolver.system_path('..\\css', 'assets\\stylesheets', JAIL) end test 'resolves windows paths when file separator is backlash' do @resolver.file_separator = '\\' assert_equal 'C:/data/docs', (@resolver.system_path '..', 'C:\\data\\docs\\assets', 'C:\\data\\docs') result = @resolver.system_path '..\\..', 'C:\\data\\docs\\assets', 'C:\\data\\docs' assert_equal 'C:/data/docs', result assert_message @logger, :WARN, 'path has illegal reference to ancestor of jail; recovering automatically' @logger.clear result = @resolver.system_path '..\\..\\css', 'C:\\data\\docs\\assets', 'C:\\data\\docs' assert_equal 'C:/data/docs/css', result assert_message @logger, :WARN, 'path has illegal reference to ancestor of jail; recovering automatically' end test 'should calculate relative path' do filename = @resolver.system_path('part1/chapter1/section1.adoc', nil, JAIL) assert_equal "#{JAIL}/part1/chapter1/section1.adoc", filename assert_equal 'part1/chapter1/section1.adoc', @resolver.relative_path(filename, JAIL) end test 'should resolve relative path to filename outside of base directory' do filename = '/home/shared/partials' base_dir = '/home/user/docs' result = @resolver.relative_path filename, base_dir assert_equal '../../shared/partials', result end test 'should return original path if relative path cannot be computed', if: windows? do filename = 'D:/path/to/include/file.txt' base_dir = 'C:/docs' result = @resolver.relative_path filename, base_dir assert_equal 'D:/path/to/include/file.txt', result end test 'should resolve relative path relative to base dir in unsafe mode' do base_dir = fixture_path 'base' doc = empty_document base_dir: base_dir, safe: Asciidoctor::SafeMode::UNSAFE expected = ::File.join base_dir, 'images', 'tiger.png' actual = doc.normalize_system_path 'tiger.png', 'images' assert_equal expected, actual end test 'should resolve absolute path as absolute in unsafe mode' do base_dir = fixture_path 'base' doc = empty_document base_dir: base_dir, safe: Asciidoctor::SafeMode::UNSAFE actual = doc.normalize_system_path 'tiger.png', '/etc/images' assert_equal '/etc/images/tiger.png', actual end end end asciidoctor-2.0.20/test/preamble_test.rb000066400000000000000000000077761443135032600202520ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context 'Preamble' do test 'title and single paragraph preamble before section' do input = <<~'EOS' = Title Preamble paragraph 1. == First Section Section paragraph 1. EOS result = convert_string(input) assert_xpath '//p', result, 2 assert_xpath '//*[@id="preamble"]', result, 1 assert_xpath '//*[@id="preamble"]//p', result, 1 assert_xpath '//*[@id="preamble"]/following-sibling::*//h2[@id="_first_section"]', result, 1 assert_xpath '//*[@id="preamble"]/following-sibling::*//p', result, 1 end test 'title of preface is blank by default in DocBook output' do input = <<~'EOS' = Document Title :doctype: book Preface content. == First Section Section content. EOS result = convert_string input, backend: :docbook assert_xpath '//preface/title', result, 1 title_node = xmlnodes_at_xpath '//preface/title', result, 1 assert_equal '', title_node.text end test 'preface-title attribute is assigned as title of preface in DocBook output' do input = <<~'EOS' = Document Title :doctype: book :preface-title: Preface Preface content. == First Section Section content. EOS result = convert_string input, backend: :docbook assert_xpath '//preface/title[text()="Preface"]', result, 1 end test 'title and multi-paragraph preamble before section' do input = <<~'EOS' = Title Preamble paragraph 1. Preamble paragraph 2. == First Section Section paragraph 1. EOS result = convert_string(input) assert_xpath '//p', result, 3 assert_xpath '//*[@id="preamble"]', result, 1 assert_xpath '//*[@id="preamble"]//p', result, 2 assert_xpath '//*[@id="preamble"]/following-sibling::*//h2[@id="_first_section"]', result, 1 assert_xpath '//*[@id="preamble"]/following-sibling::*//p', result, 1 end test 'should not wrap content in preamble if document has title but no sections' do input = <<~'EOS' = Title paragraph EOS result = convert_string(input) assert_xpath '//p', result, 1 assert_xpath '//*[@id="content"]/*[@class="paragraph"]/p', result, 1 assert_xpath '//*[@id="content"]/*[@class="paragraph"]/following-sibling::*', result, 0 end test 'title and section without preamble' do input = <<~'EOS' = Title == First Section Section paragraph 1. EOS result = convert_string(input) assert_xpath '//p', result, 1 assert_xpath '//*[@id="preamble"]', result, 0 assert_xpath '//h2[@id="_first_section"]', result, 1 end test 'no title with preamble and section' do input = <<~'EOS' Preamble paragraph 1. == First Section Section paragraph 1. EOS result = convert_string(input) assert_xpath '//p', result, 2 assert_xpath '//*[@id="preamble"]', result, 0 assert_xpath '//h2[@id="_first_section"]/preceding::p', result, 1 end test 'preamble in book doctype' do input = <<~'EOS' = Book :doctype: book Back then... = Chapter One [partintro] It was a dark and stormy night... == Scene One Someone's gonna get axed. = Chapter Two [partintro] They couldn't believe their eyes when... == Scene One The axe came swinging. EOS d = document_from_string(input) assert_equal 'book', d.doctype output = d.convert assert_xpath '//h1', output, 3 assert_xpath %{//*[@id="preamble"]//p[text() = "Back then#{decode_char 8230}#{decode_char 8203}"]}, output, 1 end test 'should output table of contents in preamble if toc-placement attribute value is preamble' do input = <<~'EOS' = Article :toc: :toc-placement: preamble Once upon a time... == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = convert_string input assert_xpath '//*[@id="preamble"]/*[@id="toc"]', output, 1 end end asciidoctor-2.0.20/test/reader_test.rb000066400000000000000000003063551443135032600177200ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' class ReaderTest < Minitest::Test DIRNAME = ASCIIDOCTOR_TEST_DIR SAMPLE_DATA = ['first line', 'second line', 'third line'] context 'Reader' do context 'Prepare lines' do test 'should prepare lines from Array data' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA, reader.lines end test 'should prepare lines from String data' do reader = Asciidoctor::Reader.new SAMPLE_DATA.join(Asciidoctor::LF) assert_equal SAMPLE_DATA, reader.lines end test 'should prepare lines from String data with trailing newline' do reader = Asciidoctor::Reader.new SAMPLE_DATA.join(Asciidoctor::LF) + Asciidoctor::LF assert_equal SAMPLE_DATA, reader.lines end test 'should remove UTF-8 BOM from first line of String data' do ['UTF-8', 'ASCII-8BIT'].each do |start_encoding| data = String.new %(\xef\xbb\xbf#{SAMPLE_DATA.join ::Asciidoctor::LF}), encoding: start_encoding reader = Asciidoctor::Reader.new data, nil, normalize: true assert_equal Encoding::UTF_8, reader.lines[0].encoding assert_equal 'f', reader.lines[0].chr assert_equal SAMPLE_DATA, reader.lines end end test 'should remove UTF-8 BOM from first line of Array data' do ['UTF-8', 'ASCII-8BIT'].each do |start_encoding| data = SAMPLE_DATA.drop 0 data[0] = String.new %(\xef\xbb\xbf#{data.first}), encoding: start_encoding reader = Asciidoctor::Reader.new data, nil, normalize: true assert_equal Encoding::UTF_8, reader.lines[0].encoding assert_equal 'f', reader.lines[0].chr assert_equal SAMPLE_DATA, reader.lines end end test 'should encode UTF-16LE string to UTF-8 when BOM is found' do ['UTF-8', 'ASCII-8BIT'].each do |start_encoding| data = "\ufeff#{SAMPLE_DATA.join ::Asciidoctor::LF}".encode('UTF-16LE').force_encoding(start_encoding) reader = Asciidoctor::Reader.new data, nil, normalize: true assert_equal Encoding::UTF_8, reader.lines[0].encoding assert_equal 'f', reader.lines[0].chr assert_equal SAMPLE_DATA, reader.lines end end test 'should encode UTF-16LE string array to UTF-8 when BOM is found' do ['UTF-8', 'ASCII-8BIT'].each do |start_encoding| # NOTE can't split a UTF-16LE string using .lines when encoding is set to UTF-8 data = SAMPLE_DATA.drop 0 data.unshift %(\ufeff#{data.shift}) data.each {|line| (line.encode 'UTF-16LE').force_encoding start_encoding } reader = Asciidoctor::Reader.new data, nil, normalize: true assert_equal Encoding::UTF_8, reader.lines[0].encoding assert_equal 'f', reader.lines[0].chr assert_equal SAMPLE_DATA, reader.lines end end test 'should encode UTF-16BE string to UTF-8 when BOM is found' do ['UTF-8', 'ASCII-8BIT'].each do |start_encoding| data = "\ufeff#{SAMPLE_DATA.join ::Asciidoctor::LF}".encode('UTF-16BE').force_encoding(start_encoding) reader = Asciidoctor::Reader.new data, nil, normalize: true assert_equal Encoding::UTF_8, reader.lines[0].encoding assert_equal 'f', reader.lines[0].chr assert_equal SAMPLE_DATA, reader.lines end end test 'should encode UTF-16BE string array to UTF-8 when BOM is found' do ['UTF-8', 'ASCII-8BIT'].each do |start_encoding| data = SAMPLE_DATA.drop 0 data.unshift %(\ufeff#{data.shift}) data = data.map {|line| (line.encode 'UTF-16BE').force_encoding start_encoding } reader = Asciidoctor::Reader.new data, nil, normalize: true assert_equal Encoding::UTF_8, reader.lines[0].encoding assert_equal 'f', reader.lines[0].chr assert_equal SAMPLE_DATA, reader.lines end end end context 'With empty data' do test 'has_more_lines? should return false with empty data' do refute Asciidoctor::Reader.new.has_more_lines? end test 'empty? should return true with empty data' do assert Asciidoctor::Reader.new.empty? assert Asciidoctor::Reader.new.eof? end test 'next_line_empty? should return true with empty data' do assert Asciidoctor::Reader.new.next_line_empty? end test 'peek_line should return nil with empty data' do assert_nil Asciidoctor::Reader.new.peek_line end test 'peek_lines should return empty Array with empty data' do assert_equal [], Asciidoctor::Reader.new.peek_lines(1) end test 'read_line should return nil with empty data' do assert_nil Asciidoctor::Reader.new.read_line #assert_nil Asciidoctor::Reader.new.get_line end test 'read_lines should return empty Array with empty data' do assert_equal [], Asciidoctor::Reader.new.read_lines #assert_equal [], Asciidoctor::Reader.new.get_lines end end context 'With data' do test 'has_more_lines? should return true if there are lines remaining' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert reader.has_more_lines? end test 'empty? should return false if there are lines remaining' do reader = Asciidoctor::Reader.new SAMPLE_DATA refute reader.empty? refute reader.eof? end test 'next_line_empty? should return false if next line is not blank' do reader = Asciidoctor::Reader.new SAMPLE_DATA refute reader.next_line_empty? end test 'next_line_empty? should return true if next line is blank' do reader = Asciidoctor::Reader.new ['', 'second line'] assert reader.next_line_empty? end test 'peek_line should return nil if next entry is nil' do assert_nil (Asciidoctor::Reader.new [nil]).peek_line end test 'peek_line should return next line if there are lines remaining' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA.first, reader.peek_line end test 'peek_line should not consume line or increment line number' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA.first, reader.peek_line assert_equal SAMPLE_DATA.first, reader.peek_line assert_equal 1, reader.lineno end test 'peek_line should return next lines if there are lines remaining' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA[0..1], reader.peek_lines(2) end test 'peek_lines should not consume lines or increment line number' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA[0..1], reader.peek_lines(2) assert_equal SAMPLE_DATA[0..1], reader.peek_lines(2) assert_equal 1, reader.lineno end test 'peek_lines should not increment line number if reader overruns buffer' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA, (reader.peek_lines SAMPLE_DATA.size * 2) assert_equal 1, reader.lineno end test 'peek_lines should peek all lines if no arguments are given' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA, reader.peek_lines assert_equal 1, reader.lineno end test 'peek_lines should not invert order of lines' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA, reader.lines reader.peek_lines 3 assert_equal SAMPLE_DATA, reader.lines end test 'read_line should return next line if there are lines remaining' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA.first, reader.read_line end test 'read_line should consume next line and increment line number' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA[0], reader.read_line assert_equal SAMPLE_DATA[1], reader.read_line assert_equal 3, reader.lineno end test 'advance should consume next line and return a Boolean indicating if a line was consumed' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert reader.advance assert reader.advance assert reader.advance refute reader.advance end test 'read_lines should return all lines' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA, reader.read_lines end test 'read should return all lines joined as String' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA.join(::Asciidoctor::LF), reader.read end test 'has_more_lines? should return false after read_lines is invoked' do reader = Asciidoctor::Reader.new SAMPLE_DATA reader.read_lines refute reader.has_more_lines? end test 'unshift puts line onto Reader as next line to read' do reader = Asciidoctor::Reader.new SAMPLE_DATA, nil, normalize: true reader.unshift 'line zero' assert_equal 'line zero', reader.peek_line assert_equal 'line zero', reader.read_line assert_equal 1, reader.lineno end test 'terminate should consume all lines and update line number' do reader = Asciidoctor::Reader.new SAMPLE_DATA reader.terminate assert reader.eof? assert_equal 4, reader.lineno end test 'skip_blank_lines should skip blank lines' do reader = Asciidoctor::Reader.new ['', ''].concat(SAMPLE_DATA) reader.skip_blank_lines assert_equal SAMPLE_DATA.first, reader.peek_line end test 'lines should return remaining lines' do reader = Asciidoctor::Reader.new SAMPLE_DATA reader.read_line assert_equal SAMPLE_DATA[1..-1], reader.lines end test 'source_lines should return copy of original data Array' do reader = Asciidoctor::Reader.new SAMPLE_DATA reader.read_lines assert_equal SAMPLE_DATA, reader.source_lines end test 'source should return original data Array joined as String' do reader = Asciidoctor::Reader.new SAMPLE_DATA reader.read_lines assert_equal SAMPLE_DATA.join(::Asciidoctor::LF), reader.source end end context 'Line context' do test 'cursor.to_s should return file name and line number of current line' do reader = Asciidoctor::Reader.new SAMPLE_DATA, 'sample.adoc' reader.read_line assert_equal 'sample.adoc: line 2', reader.cursor.to_s end test 'line_info should return file name and line number of current line' do reader = Asciidoctor::Reader.new SAMPLE_DATA, 'sample.adoc' reader.read_line assert_equal 'sample.adoc: line 2', reader.line_info end test 'cursor_at_prev_line should return file name and line number of previous line read' do reader = Asciidoctor::Reader.new SAMPLE_DATA, 'sample.adoc' reader.read_line assert_equal 'sample.adoc: line 1', reader.cursor_at_prev_line.to_s end end context 'Read lines until' do test 'Read lines until until end' do lines = <<~'EOS'.lines This is one paragraph. This is another paragraph. EOS reader = Asciidoctor::Reader.new lines, nil, normalize: true result = reader.read_lines_until assert_equal 3, result.size assert_equal lines.map(&:chomp), result refute reader.has_more_lines? assert reader.eof? end test 'Read lines until until blank line' do lines = <<~'EOS'.lines This is one paragraph. This is another paragraph. EOS reader = Asciidoctor::Reader.new lines, nil, normalize: true result = reader.read_lines_until break_on_blank_lines: true assert_equal 1, result.size assert_equal lines.first.chomp, result.first assert_equal lines.last.chomp, reader.peek_line end test 'Read lines until until blank line preserving last line' do lines = <<~'EOS'.split ::Asciidoctor::LF This is one paragraph. This is another paragraph. EOS reader = Asciidoctor::Reader.new lines result = reader.read_lines_until break_on_blank_lines: true, preserve_last_line: true assert_equal 1, result.size assert_equal lines.first.chomp, result.first assert reader.next_line_empty? end test 'Read lines until until condition is true' do lines = <<~'EOS'.split ::Asciidoctor::LF -- This is one paragraph inside the block. This is another paragraph inside the block. -- This is a paragraph outside the block. EOS reader = Asciidoctor::Reader.new lines reader.read_line result = reader.read_lines_until {|line| line == '--' } assert_equal 3, result.size assert_equal lines[1, 3], result assert reader.next_line_empty? end test 'Read lines until until condition is true, taking last line' do lines = <<~'EOS'.split ::Asciidoctor::LF -- This is one paragraph inside the block. This is another paragraph inside the block. -- This is a paragraph outside the block. EOS reader = Asciidoctor::Reader.new lines reader.read_line result = reader.read_lines_until(read_last_line: true) {|line| line == '--' } assert_equal 4, result.size assert_equal lines[1, 4], result assert reader.next_line_empty? end test 'Read lines until until condition is true, taking and preserving last line' do lines = <<~'EOS'.split ::Asciidoctor::LF -- This is one paragraph inside the block. This is another paragraph inside the block. -- This is a paragraph outside the block. EOS reader = Asciidoctor::Reader.new lines reader.read_line result = reader.read_lines_until(read_last_line: true, preserve_last_line: true) {|line| line == '--' } assert_equal 4, result.size assert_equal lines[1, 4], result assert_equal '--', reader.peek_line end test 'read lines until terminator' do lines = <<~'EOS'.lines **** captured also captured **** not captured EOS expected = ['captured', '', 'also captured'] doc = empty_safe_document base_dir: DIRNAME reader = Asciidoctor::PreprocessorReader.new doc, lines, nil, normalize: true terminator = reader.read_line result = reader.read_lines_until terminator: terminator, skip_processing: true assert_equal expected, result refute reader.unterminated end test 'should flag reader as unterminated if reader reaches end of source without finding terminator' do lines = <<~'EOS'.lines **** captured also captured captured yet again EOS expected = lines[1..-1].map(&:chomp) using_memory_logger do |logger| doc = empty_safe_document base_dir: DIRNAME reader = Asciidoctor::PreprocessorReader.new doc, lines, nil, normalize: true terminator = reader.peek_line result = reader.read_lines_until terminator: terminator, skip_first_line: true, skip_processing: true assert_equal expected, result assert reader.unterminated assert_message logger, :WARN, ': line 1: unterminated **** block', Hash end end end end context 'PreprocessorReader' do context 'Type hierarchy' do test 'PreprocessorReader should extend from Reader' do reader = empty_document.reader assert_kind_of Asciidoctor::PreprocessorReader, reader end test 'PreprocessorReader should invoke or emulate Reader initializer' do doc = Asciidoctor::Document.new SAMPLE_DATA reader = doc.reader assert_equal SAMPLE_DATA, reader.lines assert_equal 1, reader.lineno end end context 'Prepare lines' do test 'should prepare and normalize lines from Array data' do data = SAMPLE_DATA.drop 0 data.unshift '' data.push '' doc = Asciidoctor::Document.new data reader = doc.reader assert_equal [''] + SAMPLE_DATA, reader.lines end test 'should prepare and normalize lines from String data' do data = SAMPLE_DATA.drop 0 data.unshift ' ' data.push ' ' data_as_string = data * ::Asciidoctor::LF doc = Asciidoctor::Document.new data_as_string reader = doc.reader assert_equal [''] + SAMPLE_DATA, reader.lines end test 'should drop all lines if all lines are empty' do data = ['', ' ', '', ' '] doc = Asciidoctor::Document.new data reader = doc.reader assert reader.lines.empty? end test 'should clean CRLF from end of lines' do input = <<~EOS source\r with\r CRLF\r line endings\r EOS [input, input.lines, input.split(::Asciidoctor::LF), input.split(::Asciidoctor::LF).join(::Asciidoctor::LF)].each do |lines| doc = Asciidoctor::Document.new lines reader = doc.reader reader.lines.each do |line| refute line.end_with?("\r"), "CRLF not properly cleaned for source lines: #{lines.inspect}" refute line.end_with?("\r\n"), "CRLF not properly cleaned for source lines: #{lines.inspect}" refute line.end_with?("\n"), "CRLF not properly cleaned for source lines: #{lines.inspect}" end end end test 'should not skip front matter by default' do input = <<~'EOS' --- layout: post title: Document Title author: username tags: [ first, second ] --- = Document Title Author Name preamble EOS doc = Asciidoctor::Document.new input reader = doc.reader refute doc.attributes.key?('front-matter') assert_equal '---', reader.peek_line assert_equal 1, reader.lineno end test 'should not skip front matter if ending delimiter is not found' do input = <<~'EOS' --- title: Document Title tags: [ first, second ] = Document Title Author Name preamble EOS doc = Asciidoctor::Document.new input, attributes: { 'skip-front-matter' => '' } reader = doc.reader assert_equal '---', reader.peek_line refute doc.attributes.key? 'front-matter' assert_equal 1, reader.lineno end test 'should skip front matter if specified by skip-front-matter attribute' do front_matter = <<~'EOS'.chop layout: post title: Document Title author: username tags: [ first, second ] EOS input = <<~EOS --- #{front_matter} --- = Document Title Author Name preamble EOS doc = Asciidoctor::Document.new input, attributes: { 'skip-front-matter' => '' } reader = doc.reader assert_equal '= Document Title', reader.peek_line assert_equal front_matter, doc.attributes['front-matter'] assert_equal 7, reader.lineno end end context 'Include Stack' do test 'PreprocessorReader#push_include method should return reader' do reader = empty_document.reader append_lines = %w(one two three) result = reader.push_include append_lines, '', '' assert_equal reader, result end test 'PreprocessorReader#push_include method should put lines on top of stack' do lines = %w(a b c) doc = Asciidoctor::Document.new lines reader = doc.reader append_lines = %w(one two three) reader.push_include append_lines, '', '' assert_equal 1, reader.include_stack.size assert_equal 'one', reader.read_line.rstrip end test 'PreprocessorReader#push_include method should gracefully handle file and path' do lines = %w(a b c) doc = Asciidoctor::Document.new lines reader = doc.reader append_lines = %w(one two three) reader.push_include append_lines assert_equal 1, reader.include_stack.size assert_equal 'one', reader.read_line.rstrip assert_nil reader.file assert_equal '', reader.path end test 'PreprocessorReader#push_include method should set path from file automatically if not specified' do lines = %w(a b c) doc = Asciidoctor::Document.new lines reader = doc.reader append_lines = %w(one two three) reader.push_include append_lines, '/tmp/lines.adoc' assert_equal '/tmp/lines.adoc', reader.file assert_equal 'lines.adoc', reader.path assert doc.catalog[:includes]['lines'] end test 'PreprocessorReader#push_include method should accept file as a URI and compute dir and path' do file_uri = ::URI.parse 'http://example.com/docs/file.adoc' dir_uri = ::URI.parse 'http://example.com/docs' reader = empty_document.reader reader.push_include %w(one two three), file_uri assert_same file_uri, reader.file assert_equal dir_uri, reader.dir assert_equal 'file.adoc', reader.path end test 'PreprocessorReader#push_include method should accept file as a top-level URI and compute dir and path' do file_uri = ::URI.parse 'http://example.com/index.adoc' dir_uri = ::URI.parse 'http://example.com' reader = empty_document.reader reader.push_include %w(one two three), file_uri assert_same file_uri, reader.file assert_equal dir_uri, reader.dir assert_equal 'index.adoc', reader.path end test 'PreprocessorReader#push_include method should not fail if data is nil' do lines = %w(a b c) doc = Asciidoctor::Document.new lines reader = doc.reader reader.push_include nil, '', '' assert_equal 0, reader.include_stack.size assert_equal 'a', reader.read_line.rstrip end test 'PreprocessorReader#push_include method should ignore dot in directory name when computing include path' do lines = %w(a b c) doc = Asciidoctor::Document.new lines reader = doc.reader append_lines = %w(one two three) reader.push_include append_lines, nil, 'include.d/data' assert_nil reader.file assert_equal 'include.d/data', reader.path assert doc.catalog[:includes]['include.d/data'] end end context 'Include Directive' do test 'should replace include directive with link macro in default safe mode' do input = 'include::include-file.adoc[]' doc = Asciidoctor::Document.new input reader = doc.reader assert_equal 'link:include-file.adoc[role=include]', reader.read_line end test 'should replace include directive with link macro if safe mode allows it, but allow-uri-read is not set' do using_memory_logger do |logger| input = 'include::https://example.org/dist/info.adoc[]' doc = Asciidoctor::Document.new input, safe: :safe reader = doc.reader assert_equal 'link:https://example.org/dist/info.adoc[role=include]', reader.read_line assert_empty logger end end test 'include directive is enabled when safe mode is less than SECURE' do input = 'include::fixtures/include-file.adoc[]' doc = document_from_string input, safe: :safe, standalone: false, base_dir: DIRNAME output = doc.convert assert_match(/included content/, output) assert doc.catalog[:includes]['fixtures/include-file'] end test 'should strip BOM from include file' do input = %(:showtitle:\ninclude::fixtures/file-with-utf8-bom.adoc[]) output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_css '.paragraph', output, 0 assert_css 'h1', output, 1 assert_match(/

    人<\/h1>/, output) end test 'should include content from a file on the classloader', if: jruby? do require fixture_path 'assets.jar' input = 'include::uri:classloader:/includes-in-jar/include-file.adoc[]' doc = document_from_string input, safe: :unsafe, standalone: false, base_dir: DIRNAME output = doc.convert assert_match(/included from a file/, output) assert doc.catalog[:includes]['uri:classloader:/includes-in-jar/include-file'] end test 'should not track include in catalog for non-AsciiDoc include files' do input = <<~'EOS' ---- include::fixtures/circle.svg[] ---- EOS doc = document_from_string input, safe: :safe, standalone: false, base_dir: DIRNAME assert doc.catalog[:includes].empty? end test 'include directive should resolve file with spaces in name' do input = 'include::fixtures/include file.adoc[]' include_file = File.join DIRNAME, 'fixtures', 'include-file.adoc' include_file_with_sp = File.join DIRNAME, 'fixtures', 'include file.adoc' begin FileUtils.cp include_file, include_file_with_sp doc = document_from_string input, safe: :safe, standalone: false, base_dir: DIRNAME output = doc.convert assert_match(/included content/, output) ensure FileUtils.rm include_file_with_sp end end test 'include directive should resolve file with {sp} in name' do input = 'include::fixtures/include{sp}file.adoc[]' include_file = File.join DIRNAME, 'fixtures', 'include-file.adoc' include_file_with_sp = File.join DIRNAME, 'fixtures', 'include file.adoc' begin FileUtils.cp include_file, include_file_with_sp doc = document_from_string input, safe: :safe, standalone: false, base_dir: DIRNAME output = doc.convert assert_match(/included content/, output) ensure FileUtils.rm include_file_with_sp end end test 'include directive should not match if target is empty or starts or ends with space' do ['include::[]', 'include:: []', 'include:: not-include[]', 'include::not-include []'].each do |input| doc = Asciidoctor::Document.new input reader = doc.reader assert_equal input, reader.read_line end end test 'include directive should not attempt to resolve target as remote if allow-uri-read is set and URL is not on first line' do using_memory_logger do |logger| input = <<~'EOS' :target: not-a-file.adoc + \ http://example.org/team.adoc include::{target}[] EOS doc = Asciidoctor.load input, safe: :safe, base_dir: fixturedir lines = doc.blocks[0].lines assert_equal [%(Unresolved directive in - include::not-a-file.adoc +\nhttp://example.org/team.adoc[])], lines assert_message logger, :ERROR, %(: line 4: include file not found: #{fixture_path 'not-a-file.adoc'} +\nhttp://example.org/team.adoc), Hash end end test 'include directive should resolve file relative to current include' do input = 'include::fixtures/parent-include.adoc[]' pseudo_docfile = File.join DIRNAME, 'main.adoc' fixtures_dir = File.join DIRNAME, 'fixtures' parent_include_docfile = File.join fixtures_dir, 'parent-include.adoc' child_include_docfile = File.join fixtures_dir, 'child-include.adoc' grandchild_include_docfile = File.join fixtures_dir, 'grandchild-include.adoc' doc = empty_safe_document base_dir: DIRNAME reader = Asciidoctor::PreprocessorReader.new doc, input, pseudo_docfile, normalize: true assert_equal pseudo_docfile, reader.file assert_equal DIRNAME, reader.dir assert_equal 'main.adoc', reader.path assert_equal 'first line of parent', reader.read_line assert_equal 'fixtures/parent-include.adoc: line 1', reader.cursor_at_prev_line.to_s assert_equal parent_include_docfile, reader.file assert_equal fixtures_dir, reader.dir assert_equal 'fixtures/parent-include.adoc', reader.path reader.skip_blank_lines assert_equal 'first line of child', reader.read_line assert_equal 'fixtures/child-include.adoc: line 1', reader.cursor_at_prev_line.to_s assert_equal child_include_docfile, reader.file assert_equal fixtures_dir, reader.dir assert_equal 'fixtures/child-include.adoc', reader.path reader.skip_blank_lines assert_equal 'first line of grandchild', reader.read_line assert_equal 'fixtures/grandchild-include.adoc: line 1', reader.cursor_at_prev_line.to_s assert_equal grandchild_include_docfile, reader.file assert_equal fixtures_dir, reader.dir assert_equal 'fixtures/grandchild-include.adoc', reader.path reader.skip_blank_lines assert_equal 'last line of grandchild', reader.read_line reader.skip_blank_lines assert_equal 'last line of child', reader.read_line reader.skip_blank_lines assert_equal 'last line of parent', reader.read_line assert_equal 'fixtures/parent-include.adoc: line 5', reader.cursor_at_prev_line.to_s assert_equal parent_include_docfile, reader.file assert_equal fixtures_dir, reader.dir assert_equal 'fixtures/parent-include.adoc', reader.path end test 'include directive should process lines when file extension of target is .asciidoc' do input = 'include::fixtures/include-alt-extension.asciidoc[]' doc = document_from_string input, safe: :safe, base_dir: DIRNAME assert_equal 3, doc.blocks.size assert_equal ['first line'], doc.blocks[0].lines assert_equal ['Asciidoctor!'], doc.blocks[1].lines assert_equal ['last line'], doc.blocks[2].lines end test 'should only strip trailing newlines, not trailing whitespace, if include file is not AsciiDoc' do input = <<~'EOS' .... include::fixtures/data.tsv[] .... EOS doc = document_from_string input, safe: :safe, base_dir: DIRNAME assert_equal 1, doc.blocks.size assert doc.blocks[0].lines[2].end_with? ?\t end test 'should fail to read include file if not UTF-8 encoded and encoding is not specified' do input = <<~'EOS' .... include::fixtures/iso-8859-1.txt[] .... EOS assert_raises StandardError, 'invalid byte sequence in UTF-8' do doc = document_from_string input, safe: :safe, base_dir: DIRNAME assert_equal 1, doc.blocks.size refute_equal ['Où est l\'hôpital ?'], doc.blocks[0].lines doc.convert end end test 'should ignore encoding attribute if value is not a valid encoding' do input = <<~'EOS' .... include::fixtures/encoding.adoc[tag=romé,encoding=iso-1000-1] .... EOS doc = document_from_string input, safe: :safe, base_dir: DIRNAME assert_equal 1, doc.blocks.size assert_equal doc.blocks[0].lines[0].encoding, Encoding::UTF_8 assert_equal ['Gregory Romé has written an AsciiDoc plugin for the Redmine project management application.'], doc.blocks[0].lines end test 'should use encoding specified by encoding attribute when reading include file' do input = <<~'EOS' .... include::fixtures/iso-8859-1.txt[encoding=iso-8859-1] .... EOS doc = document_from_string input, safe: :safe, base_dir: DIRNAME assert_equal 1, doc.blocks.size assert_equal doc.blocks[0].lines[0].encoding, Encoding::UTF_8 assert_equal ['Où est l\'hôpital ?'], doc.blocks[0].lines end test 'unresolved target referenced by include directive is skipped when optional option is set' do input = <<~'EOS' include::fixtures/{no-such-file}[opts=optional] trailing content EOS begin using_memory_logger do |logger| doc = document_from_string input, safe: :safe, base_dir: DIRNAME assert_equal 1, doc.blocks.size assert_equal ['trailing content'], doc.blocks[0].lines assert_message logger, :INFO, '~: line 1: optional include dropped because include file not found', Hash end rescue flunk 'include directive should not raise exception on unresolved target' end end test 'should skip include directive that references missing file if optional option is set' do input = <<~'EOS' include::fixtures/no-such-file.adoc[opts=optional] trailing content EOS begin using_memory_logger do |logger| doc = document_from_string input, safe: :safe, base_dir: DIRNAME assert_equal 1, doc.blocks.size assert_equal ['trailing content'], doc.blocks[0].lines assert_message logger, :INFO, '~: line 1: optional include dropped because include file not found', Hash end rescue flunk 'include directive should not raise exception on missing file' end end test 'should replace include directive that references missing file with message' do input = <<~'EOS' include::fixtures/no-such-file.adoc[] trailing content EOS begin using_memory_logger do |logger| doc = document_from_string input, safe: :safe, base_dir: DIRNAME assert_equal 2, doc.blocks.size assert_equal ['Unresolved directive in - include::fixtures/no-such-file.adoc[]'], doc.blocks[0].lines assert_equal ['trailing content'], doc.blocks[1].lines assert_message logger, :ERROR, '~: line 1: include file not found', Hash end rescue flunk 'include directive should not raise exception on missing file' end end test 'should replace include directive that references unreadable file with message', unless: (windows? || Process.euid == 0) do include_file = File.join DIRNAME, 'fixtures', 'chapter-a.adoc' old_mode = (File.stat include_file).mode FileUtils.chmod 0o000, include_file input = <<~'EOS' include::fixtures/chapter-a.adoc[] trailing content EOS begin using_memory_logger do |logger| doc = document_from_string input, safe: :safe, base_dir: DIRNAME assert_equal 2, doc.blocks.size assert_equal ['Unresolved directive in - include::fixtures/chapter-a.adoc[]'], doc.blocks[0].lines assert_equal ['trailing content'], doc.blocks[1].lines assert_message logger, :ERROR, '~: line 1: include file not readable', Hash end rescue flunk 'include directive should not raise exception on missing file' ensure FileUtils.chmod old_mode, include_file end end # IMPORTANT this test needs to be run on Windows to verify proper behavior in Windows test 'can resolve include directive with absolute path' do include_path = ::File.join DIRNAME, 'fixtures', 'chapter-a.adoc' input = %(include::#{include_path}[]) result = document_from_string input, safe: :safe assert_equal 'Chapter A', result.doctitle result = document_from_string input, safe: :unsafe, base_dir: ::Dir.tmpdir assert_equal 'Chapter A', result.doctitle end test 'include directive can retrieve data from uri' do url = %(http://#{resolve_localhost}:9876/name/asciidoctor) input = <<~EOS .... include::#{url}[] .... EOS expect = /\{"name": "asciidoctor"\}/ output = using_test_webserver do convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } end refute_nil output assert_match(expect, output) end test 'nested include directives are resolved relative to current file' do input = <<~'EOS' .... include::fixtures/outer-include.adoc[] .... EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME expected = <<~'EOS'.chop first line of outer first line of middle first line of inner last line of inner last line of middle last line of outer EOS assert_includes output, expected end test 'nested remote include directive is resolved relative to uri of current file' do url = %(http://#{resolve_localhost}:9876/fixtures/outer-include.adoc) input = <<~EOS .... include::#{url}[] .... EOS output = using_test_webserver do convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } end expected = <<~'EOS'.chop first line of outer first line of middle first line of inner last line of inner last line of middle last line of outer EOS assert_includes output, expected end test 'nested remote include directive that cannot be resolved does not crash processor' do include_url = %(http://#{resolve_localhost}:9876/fixtures/file-with-missing-include.adoc) nested_include_url = 'no-such-file.adoc' input = <<~EOS .... include::#{include_url}[] .... EOS begin using_memory_logger do |logger| result = using_test_webserver do convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } end assert_includes result, %(Unresolved directive in #{include_url} - include::#{nested_include_url}[]) assert_message logger, :ERROR, %(#{include_url}: line 1: include uri not readable: http://#{resolve_localhost}:9876/fixtures/#{nested_include_url}), Hash end rescue flunk 'include directive should not raise exception on missing file' end end test 'should support tag filtering for remote includes' do url = %(http://#{resolve_localhost}:9876/fixtures/tagged-class.rb) input = <<~EOS [source,ruby] ---- include::#{url}[tag=init,indent=0] ---- EOS output = using_test_webserver do convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } end # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop def initialize breed @breed = breed end EOS assert_includes output, expected end test 'should not crash if include directive references inaccessible uri' do url = %(http://#{resolve_localhost}:9876/no_such_file) input = <<~EOS .... include::#{url}[] .... EOS begin using_memory_logger do |logger| output = using_test_webserver do convert_string_to_embedded input, safe: :safe, attributes: { 'allow-uri-read' => '' } end refute_nil output assert_match(/Unresolved directive/, output) assert_message logger, :ERROR, %(: line 2: include uri not readable: #{url}), Hash end rescue flunk 'include directive should not raise exception on inaccessible uri' end end test 'include directive supports selecting lines by line number' do input = 'include::fixtures/include-file.adoc[lines=1;3..4;6..-1]' output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_match(/first line/, output) refute_match(/second line/, output) assert_match(/third line/, output) assert_match(/fourth line/, output) refute_match(/fifth line/, output) assert_match(/sixth line/, output) assert_match(/seventh line/, output) assert_match(/eighth line/, output) assert_match(/last line of included content/, output) end test 'include directive supports line ranges separated by commas in quoted attribute value' do input = 'include::fixtures/include-file.adoc[lines="1,3..4,6..-1"]' output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_match(/first line/, output) refute_match(/second line/, output) assert_match(/third line/, output) assert_match(/fourth line/, output) refute_match(/fifth line/, output) assert_match(/sixth line/, output) assert_match(/seventh line/, output) assert_match(/eighth line/, output) assert_match(/last line of included content/, output) end test 'include directive ignores spaces between line ranges in quoted attribute value' do input = 'include::fixtures/include-file.adoc[lines="1, 3..4 , 6 .. -1"]' output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_match(/first line/, output) refute_match(/second line/, output) assert_match(/third line/, output) assert_match(/fourth line/, output) refute_match(/fifth line/, output) assert_match(/sixth line/, output) assert_match(/seventh line/, output) assert_match(/eighth line/, output) assert_match(/last line of included content/, output) end test 'include directive supports implicit endless range' do input = 'include::fixtures/include-file.adoc[lines=6..]' output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME refute_match(/first line/, output) refute_match(/second line/, output) refute_match(/third line/, output) refute_match(/fourth line/, output) refute_match(/fifth line/, output) assert_match(/sixth line/, output) assert_match(/seventh line/, output) assert_match(/eighth line/, output) assert_match(/last line of included content/, output) end test 'include directive ignores lines attribute if empty' do input = <<~'EOS' ++++ include::fixtures/include-file.adoc[lines=] ++++ EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_includes output, 'first line of included content' assert_includes output, 'last line of included content' end test 'include directive ignores lines attribute with invalid range' do input = <<~'EOS' ++++ include::fixtures/include-file.adoc[lines=10..5] ++++ EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_includes output, 'first line of included content' assert_includes output, 'last line of included content' end test 'include directive supports selecting lines by tag' do input = 'include::fixtures/include-file.adoc[tag=snippetA]' output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_match(/snippetA content/, output) refute_match(/snippetB content/, output) refute_match(/non-tagged content/, output) refute_match(/included content/, output) end test 'include directive supports selecting lines by tags' do input = 'include::fixtures/include-file.adoc[tags=snippetA;snippetB]' output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_match(/snippetA content/, output) assert_match(/snippetB content/, output) refute_match(/non-tagged content/, output) refute_match(/included content/, output) end test 'include directive supports selecting lines by tag in language that uses circumfix comments' do { 'include-file.xml' => 'content', 'include-file.ml' => 'let s = SS.empty;;', 'include-file.jsx' => '

    Welcome to the club.

    ', }.each do |filename, expect| input = <<~EOS [source,xml] ---- include::fixtures/#{filename}[tag=snippet,indent=0] ---- EOS doc = document_from_string input, safe: :safe, base_dir: DIRNAME assert_equal expect, doc.blocks[0].source end end test 'include directive supports selecting lines by tag in file that has CRLF line endings' do begin tmp_include = Tempfile.new %w(include- .adoc) tmp_include_dir, tmp_include_path = File.split tmp_include.path tmp_include.write %(do not include\r\ntag::include-me[]\r\nincluded line\r\nend::include-me[]\r\ndo not include\r\n) tmp_include.close input = %(include::#{tmp_include_path}[tag=include-me]) output = convert_string_to_embedded input, safe: :safe, base_dir: tmp_include_dir assert_includes output, 'included line' refute_includes output, 'do not include' ensure tmp_include.close! end end test 'include directive finds closing tag on last line of file without a trailing newline' do begin tmp_include = Tempfile.new %w(include- .adoc) tmp_include_dir, tmp_include_path = File.split tmp_include.path tmp_include.write %(line not included\ntag::include-me[]\nline included\nend::include-me[]) tmp_include.close input = %(include::#{tmp_include_path}[tag=include-me]) using_memory_logger do |logger| output = convert_string_to_embedded input, safe: :safe, base_dir: tmp_include_dir assert_empty logger.messages assert_includes output, 'line included' refute_includes output, 'line not included' end ensure tmp_include.close! end end test 'include directive does not select lines containing tag directives within selected tag region' do input = <<~'EOS' ++++ include::fixtures/include-file.adoc[tags=snippet] ++++ EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME expected = <<~'EOS'.chop snippetA content non-tagged content snippetB content EOS assert_equal expected, output end test 'include directive skips lines inside tag which is negated' do input = <<~'EOS' ---- include::fixtures/tagged-class-enclosed.rb[tags=all;!bark] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop class Dog def initialize breed @breed = breed end end EOS assert_includes output, %(
    #{expected}
    ) end test 'include directive selects all lines without a tag directive when value is double asterisk' do input = <<~'EOS' ---- include::fixtures/tagged-class.rb[tags=**] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop class Dog def initialize breed @breed = breed end def bark if @breed == 'beagle' 'woof woof woof woof woof' else 'woof woof' end end end EOS assert_includes output, %(
    #{expected}
    ) end test 'include directive selects all lines except lines inside tag which is negated when value starts with double asterisk' do input = <<~'EOS' ---- include::fixtures/tagged-class.rb[tags=**;!bark] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop class Dog def initialize breed @breed = breed end end EOS assert_includes output, %(
    #{expected}
    ) end test 'include directive selects all lines, including lines inside nested tags, except lines inside tag which is negated when value starts with double asterisk' do input = <<~'EOS' ---- include::fixtures/tagged-class.rb[tags=**;!init] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop class Dog def bark if @breed == 'beagle' 'woof woof woof woof woof' else 'woof woof' end end end EOS assert_includes output, %(
    #{expected}
    ) end test 'include directive selects all lines outside of tags when value is double asterisk followed by negated wildcard' do input = <<~'EOS' ---- include::fixtures/tagged-class.rb[tags=**;!*] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME expected = <<~'EOS'.chop class Dog end EOS assert_includes output, %(
    #{expected}
    ) end test 'include directive skips all tagged regions when value of tags attribute is negated wildcard' do input = <<~'EOS' ---- include::fixtures/tagged-class.rb[tags=!*] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME expected = %(class Dog\nend) assert_includes output, %(
    #{expected}
    ) end # FIXME this is a weird one since we'd expect it to only select the specified tags; but it's always been this way test 'include directive selects all lines except for lines containing tag directive if value is double asterisk followed by nested tag names' do input = <<~'EOS' ---- include::fixtures/tagged-class.rb[tags=**;bark-beagle;bark-all] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop class Dog def initialize breed @breed = breed end def bark if @breed == 'beagle' 'woof woof woof woof woof' else 'woof woof' end end end EOS assert_includes output, %(
    #{expected}
    ) end # FIXME this is a weird one since we'd expect it to only select the specified tags; but it's always been this way test 'include directive selects all lines except for lines containing tag directive when value is double asterisk followed by outer tag name' do input = <<~'EOS' ---- include::fixtures/tagged-class.rb[tags=**;bark] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop class Dog def initialize breed @breed = breed end def bark if @breed == 'beagle' 'woof woof woof woof woof' else 'woof woof' end end end EOS assert_includes output, %(
    #{expected}
    ) end test 'include directive selects all lines inside unspecified tags when value is negated double asterisk followed by negated tags' do input = <<~'EOS' ---- include::fixtures/tagged-class.rb[tags=!**;!init] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME expected = <<~EOS.chop \x20 def bark \x20 if @breed == 'beagle' \x20 'woof woof woof woof woof' \x20 else \x20 'woof woof' \x20 end \x20 end EOS assert_includes output, %(
    #{expected}
    ) end test 'include directive selects all lines except tag which is negated when value only contains negated tag' do input = <<~'EOS' ---- include::fixtures/tagged-class.rb[tag=!bark] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop class Dog def initialize breed @breed = breed end end EOS assert_includes output, %(
    #{expected}
    ) end test 'include directive selects all lines except tags which are negated when value only contains negated tags' do input = <<~'EOS' ---- include::fixtures/tagged-class.rb[tags=!bark;!init] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME expected = <<~'EOS'.chop class Dog end EOS assert_includes output, %(
    #{expected}
    ) end test 'should recognize tag wildcard if not at start of tags list' do input = <<~'EOS' ---- include::fixtures/tagged-class.rb[tags=init;**;*;!bark-other] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop class Dog def initialize breed @breed = breed end def bark if @breed == 'beagle' 'woof woof woof woof woof' end end end EOS assert_includes output, %(
    #{expected}
    ) end test 'include directive selects lines between tags when value of tags attribute is wildcard' do input = <<~'EOS' ---- include::fixtures/tagged-class.rb[tags=*] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME expected = <<~EOS.chop \x20 def initialize breed \x20 @breed = breed \x20 end \x20 def bark \x20 if @breed == 'beagle' \x20 'woof woof woof woof woof' \x20 else \x20 'woof woof' \x20 end \x20 end EOS assert_includes output, %(
    #{expected}
    ) end test 'include directive selects lines inside tags when value of tags attribute is wildcard and tag surrounds content' do input = <<~'EOS' ---- include::fixtures/tagged-class-enclosed.rb[tags=*] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop class Dog def initialize breed @breed = breed end def bark if @breed == 'beagle' 'woof woof woof woof woof' else 'woof woof' end end end EOS assert_includes output, %(
    #{expected}
    ) end test 'include directive selects lines inside all tags except tag which is negated when value of tags attribute is wildcard followed by negated tag' do input = <<~'EOS' ---- include::fixtures/tagged-class-enclosed.rb[tags=*;!init] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop class Dog def bark if @breed == 'beagle' 'woof woof woof woof woof' else 'woof woof' end end end EOS assert_includes output, %(
    #{expected}
    ) end test 'include directive skips all tagged regions except ones re-enabled when value of tags attribute is negated wildcard followed by tag name' do ['!*;init', '**;!*;init'].each do |pattern| input = <<~EOS ---- include::fixtures/tagged-class.rb[tags=#{pattern}] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop class Dog def initialize breed @breed = breed end end EOS assert_includes output, %(
    #{expected}
    ) end end test 'include directive includes regions outside tags and inside specified tags when value begins with negated wildcard' do input = <<~'EOS' ---- include::fixtures/tagged-class.rb[tags=!*;bark] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop class Dog def bark end end EOS assert_includes output, %(
    #{expected}
    ) end test 'include directive includes lines inside tag except for lines inside nested tags when tag is followed by negated wildcard' do ['bark;!*', '!**;bark;!*', '!**;!*;bark'].each do |pattern| input = <<~EOS ---- include::fixtures/tagged-class.rb[tags=#{pattern}] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME expected = <<~EOS.chop \x20 def bark \x20 end EOS assert_includes output, %(
    #{expected}
    ) end end test 'include directive selects lines inside tag except for lines inside nested tags when tag is preceded by negated double asterisk and negated wildcard' do input = <<~'EOS' ---- include::fixtures/tagged-class.rb[tags=!**;!*;bark] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME expected = <<~EOS.chop \x20 def bark \x20 end EOS assert_includes output, %(
    #{expected}
    ) end test 'include directive does not select lines inside tag that has been included then excluded' do input = <<~'EOS' ---- include::fixtures/tagged-class.rb[tags=!*;init;!init] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME expected = <<~'EOS'.chop class Dog end EOS assert_includes output, %(
    #{expected}
    ) end test 'include directive only selects lines inside specified tag, even if proceeded by negated double asterisk' do ['bark', '!**;bark'].each do |pattern| input = <<~EOS ---- include::fixtures/tagged-class.rb[tags=#{pattern}] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME expected = <<~EOS.chop \x20 def bark \x20 if @breed == 'beagle' \x20 'woof woof woof woof woof' \x20 else \x20 'woof woof' \x20 end \x20 end EOS assert_includes output, %(
    #{expected}
    ) end end test 'include directive selects lines inside specified tag and ignores lines inside a negated tag' do input = <<~'EOS' [indent=0] ---- include::fixtures/tagged-class.rb[tags=bark;!bark-other] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 expected = <<~EOS.chop def bark if @breed == 'beagle' 'woof woof woof woof woof' end end EOS assert_includes output, %(
    #{expected}
    ) end test 'should warn if specified tag is not found in include file' do input = 'include::fixtures/include-file.adoc[tag=no-such-tag]' using_memory_logger do |logger| convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_message logger, :WARN, %(~: line 1: tag 'no-such-tag' not found in include file), Hash end end test 'should not warn if specified negated tag is not found in include file' do input = <<~'EOS' ---- include::fixtures/tagged-class-enclosed.rb[tag=!no-such-tag] ---- EOS expected = <<~EOS.chop class Dog def initialize breed @breed = breed end def bark if @breed == 'beagle' 'woof woof woof woof woof' else 'woof woof' end end end EOS using_memory_logger do |logger| output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_includes output, %(
    #{expected}
    ) assert_empty logger.messages end end test 'should warn if specified tags are not found in include file' do input = <<~'EOS' ++++ include::fixtures/include-file.adoc[tags=no-such-tag-b;no-such-tag-a] ++++ EOS using_memory_logger do |logger| convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME expected_tags = 'no-such-tag-b, no-such-tag-a' assert_message logger, :WARN, %(~: line 2: tags '#{expected_tags}' not found in include file), Hash end end test 'should not warn if specified negated tags are not found in include file' do input = <<~'EOS' ---- include::fixtures/tagged-class-enclosed.rb[tags=all;!no-such-tag;!unknown-tag] ---- EOS expected = <<~EOS.chop class Dog def initialize breed @breed = breed end def bark if @breed == 'beagle' 'woof woof woof woof woof' else 'woof woof' end end end EOS using_memory_logger do |logger| output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_includes output, %(
    #{expected}
    ) assert_empty logger.messages end end test 'should warn if specified tag in include file is not closed' do input = <<~'EOS' ++++ include::fixtures/unclosed-tag.adoc[tag=a] ++++ EOS using_memory_logger do |logger| result = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_equal 'a', result assert_message logger, :WARN, %(~: line 2: detected unclosed tag 'a' starting at line 2 of include file), Hash refute_nil logger.messages[0][:message][:include_location] end end test 'should warn if end tag in included file is mismatched' do input = <<~'EOS' ++++ include::fixtures/mismatched-end-tag.adoc[tags=a;b] ++++ EOS inc_path = File.join DIRNAME, 'fixtures/mismatched-end-tag.adoc' using_memory_logger do |logger| result = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_equal %(a\nb), result assert_message logger, :WARN, %(: line 2: mismatched end tag (expected 'b' but found 'a') at line 5 of include file: #{inc_path}), Hash refute_nil logger.messages[0][:message][:include_location] end end test 'should warn if unexpected end tag is found in included file' do input = <<~'EOS' ++++ include::fixtures/unexpected-end-tag.adoc[tags=a] ++++ EOS inc_path = File.join DIRNAME, 'fixtures/unexpected-end-tag.adoc' using_memory_logger do |logger| result = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_equal 'a', result assert_message logger, :WARN, %(: line 2: unexpected end tag 'a' at line 4 of include file: #{inc_path}), Hash refute_nil logger.messages[0][:message][:include_location] end end test 'include directive ignores tags attribute when empty' do ['tag', 'tags'].each do |attr_name| input = <<~EOS ++++ include::fixtures/include-file.xml[#{attr_name}=] ++++ EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_match(/(?:tag|end)::/, output, 2) end end test 'lines attribute takes precedence over tags attribute in include directive' do input = 'include::fixtures/include-file.adoc[lines=1, tags=snippetA;snippetB]' output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_match(/first line of included content/, output) refute_match(/snippetA content/, output) refute_match(/snippetB content/, output) end test 'indent of included file can be reset to size of indent attribute' do input = <<~'EOS' [source, xml] ---- include::fixtures/basic-docinfo.xml[lines=2..3, indent=0] ---- EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME result = xmlnodes_at_xpath('//pre', output, 1).text assert_equal "2013\nAcme™, Inc.", result end test 'should substitute attribute references in attrlist' do input = <<~'EOS' :name-of-tag: snippetA include::fixtures/include-file.adoc[tag={name-of-tag}] EOS output = convert_string_to_embedded input, safe: :safe, base_dir: DIRNAME assert_match(/snippetA content/, output) refute_match(/snippetB content/, output) refute_match(/non-tagged content/, output) refute_match(/included content/, output) end test 'should fall back to built-in include directive behavior when not handled by include processor' do input = 'include::fixtures/include-file.adoc[]' include_processor = Class.new do def initialize document; end def handles? target false end def process reader, target, attributes raise 'TestIncludeHandler should not have been invoked' end end document = empty_safe_document base_dir: DIRNAME reader = Asciidoctor::PreprocessorReader.new document, input, nil, normalize: true reader.instance_variable_set '@include_processors', [include_processor.new(document)] lines = reader.read_lines source = lines * ::Asciidoctor::LF assert_match(/included content/, source) end test 'leveloffset attribute entries should be added to content if leveloffset attribute is specified' do input = 'include::fixtures/main.adoc[]' expected = <<~'EOS'.split ::Asciidoctor::LF = Main Document preamble :leveloffset: +1 = Chapter A content :leveloffset!: EOS document = Asciidoctor.load input, safe: :safe, base_dir: DIRNAME, parse: false assert_equal expected, document.reader.read_lines end test 'attributes are substituted in target of include directive' do input = <<~'EOS' :fixturesdir: fixtures :ext: adoc include::{fixturesdir}/include-file.{ext}[] EOS doc = document_from_string input, safe: :safe, base_dir: DIRNAME output = doc.convert assert_match(/included content/, output) end test 'line is skipped by default if target of include directive resolves to empty' do input = 'include::{blank}[]' using_memory_logger do |logger| doc = empty_safe_document base_dir: DIRNAME reader = Asciidoctor::PreprocessorReader.new doc, input, nil, normalize: true line = reader.read_line assert_equal 'Unresolved directive in - include::{blank}[]', line assert_message logger, :WARN, ': line 1: include dropped because resolved target is blank: include::{blank}[]', Hash end end test 'include is dropped if target contains missing attribute and attribute-missing is drop-line' do input = 'include::{foodir}/include-file.adoc[]' using_memory_logger Logger::INFO do |logger| doc = empty_safe_document base_dir: DIRNAME, attributes: { 'attribute-missing' => 'drop-line' } reader = Asciidoctor::PreprocessorReader.new doc, input, nil, normalize: true line = reader.read_line assert_nil line assert_messages logger, [ [:INFO, 'dropping line containing reference to missing attribute: foodir'], [:INFO, ': line 1: include dropped due to missing attribute: include::{foodir}/include-file.adoc[]', Hash], ] end end test 'line following dropped include is not dropped' do input = <<~'EOS' include::{foodir}/include-file.adoc[] yo EOS using_memory_logger do |logger| doc = empty_safe_document base_dir: DIRNAME, attributes: { 'attribute-missing' => 'warn' } reader = Asciidoctor::PreprocessorReader.new doc, input, nil, normalize: true line = reader.read_line assert_equal 'Unresolved directive in - include::{foodir}/include-file.adoc[]', line line = reader.read_line assert_equal 'yo', line assert_messages logger, [ [:INFO, 'dropping line containing reference to missing attribute: foodir'], [:WARN, ': line 1: include dropped due to missing attribute: include::{foodir}/include-file.adoc[]', Hash], ] end end test 'escaped include directive is left unprocessed' do input = <<~'EOS' \include::fixtures/include-file.adoc[] \escape preserved here EOS doc = empty_safe_document base_dir: DIRNAME reader = Asciidoctor::PreprocessorReader.new doc, input, nil, normalize: true # we should be able to peek it multiple times and still have the backslash preserved # this is the test for @unescape_next_line assert_equal 'include::fixtures/include-file.adoc[]', reader.peek_line assert_equal 'include::fixtures/include-file.adoc[]', reader.peek_line assert_equal 'include::fixtures/include-file.adoc[]', reader.read_line assert_equal '\\escape preserved here', reader.read_line end test 'include directive not at start of line is ignored' do input = ' include::include-file.adoc[]' para = block_from_string input assert_equal 1, para.lines.size # NOTE the space gets stripped because the line is treated as an inline literal assert_equal :literal, para.context assert_equal 'include::include-file.adoc[]', para.source end test 'include directive is disabled when max-include-depth attribute is 0' do input = 'include::include-file.adoc[]' para = block_from_string input, safe: :safe, attributes: { 'max-include-depth' => 0 } assert_equal 1, para.lines.size assert_equal 'include::include-file.adoc[]', para.source end test 'max-include-depth cannot be set by document' do input = <<~'EOS' :max-include-depth: 1 include::include-file.adoc[] EOS para = block_from_string input, safe: :safe, attributes: { 'max-include-depth' => 0 } assert_equal 1, para.lines.size assert_equal 'include::include-file.adoc[]', para.source end test 'include directive should be disabled if max include depth has been exceeded' do input = 'include::fixtures/parent-include.adoc[depth=1]' using_memory_logger do |logger| pseudo_docfile = File.join DIRNAME, 'main.adoc' doc = empty_safe_document base_dir: DIRNAME reader = Asciidoctor::PreprocessorReader.new doc, input, Asciidoctor::Reader::Cursor.new(pseudo_docfile), normalize: true lines = reader.readlines assert_includes lines, 'include::grandchild-include.adoc[]' assert_message logger, :ERROR, 'fixtures/child-include.adoc: line 3: maximum include depth of 1 exceeded', Hash end end test 'include directive should be disabled if max include depth set in nested context has been exceeded' do input = 'include::fixtures/parent-include-restricted.adoc[depth=3]' using_memory_logger do |logger| pseudo_docfile = File.join DIRNAME, 'main.adoc' doc = empty_safe_document base_dir: DIRNAME reader = Asciidoctor::PreprocessorReader.new doc, input, Asciidoctor::Reader::Cursor.new(pseudo_docfile), normalize: true lines = reader.readlines assert_includes lines, 'first line of child' assert_includes lines, 'include::grandchild-include.adoc[]' assert_message logger, :ERROR, 'fixtures/child-include.adoc: line 3: maximum include depth of 0 exceeded', Hash end end test 'read_lines_until should not process lines if process option is false' do lines = <<~'EOS'.lines //// include::fixtures/no-such-file.adoc[] //// EOS doc = empty_safe_document base_dir: DIRNAME reader = Asciidoctor::PreprocessorReader.new doc, lines, nil, normalize: true reader.read_line result = reader.read_lines_until(terminator: '////', skip_processing: true) assert_equal lines.map(&:chomp)[1..1], result end test 'skip_comment_lines should not process lines read' do lines = <<~'EOS'.lines //// include::fixtures/no-such-file.adoc[] //// EOS using_memory_logger do |logger| doc = empty_safe_document base_dir: DIRNAME reader = Asciidoctor::PreprocessorReader.new doc, lines, nil, normalize: true reader.skip_comment_lines assert reader.empty? assert logger.empty? end end end context 'Conditional Inclusions' do test 'process_line returns nil if cursor advanced' do input = <<~'EOS' ifdef::asciidoctor[] Asciidoctor! endif::asciidoctor[] EOS doc = Asciidoctor::Document.new input reader = doc.reader assert_nil reader.send :process_line, reader.lines.first end test 'peek_line advances cursor to next conditional line of content' do input = <<~'EOS' ifdef::asciidoctor[] Asciidoctor! endif::asciidoctor[] EOS doc = Asciidoctor::Document.new input reader = doc.reader assert_equal 1, reader.lineno assert_equal 'Asciidoctor!', reader.peek_line assert_equal 2, reader.lineno end test 'peek_lines should preprocess lines if direct is false' do input = <<~'EOS' The Asciidoctor ifdef::asciidoctor[is in.] EOS doc = Asciidoctor::Document.new input reader = doc.reader result = reader.peek_lines 2, false assert_equal ['The Asciidoctor', 'is in.'], result end test 'peek_lines should not preprocess lines if direct is true' do input = <<~'EOS' The Asciidoctor ifdef::asciidoctor[is in.] EOS doc = Asciidoctor::Document.new input reader = doc.reader result = reader.peek_lines 2, true assert_equal ['The Asciidoctor', 'ifdef::asciidoctor[is in.]'], result end test 'peek_lines should not prevent subsequent preprocessing of peeked lines' do input = <<~'EOS' The Asciidoctor ifdef::asciidoctor[is in.] EOS doc = Asciidoctor::Document.new input reader = doc.reader result = reader.peek_lines 2, true result = reader.peek_lines 2, false assert_equal ['The Asciidoctor', 'is in.'], result end test 'process_line returns line if cursor not advanced' do input = <<~'EOS' content ifdef::asciidoctor[] Asciidoctor! endif::asciidoctor[] EOS doc = Asciidoctor::Document.new input reader = doc.reader refute_nil reader.send :process_line, reader.lines.first end test 'peek_line does not advance cursor when on a regular content line' do input = <<~'EOS' content ifdef::asciidoctor[] Asciidoctor! endif::asciidoctor[] EOS doc = Asciidoctor::Document.new input reader = doc.reader assert_equal 1, reader.lineno assert_equal 'content', reader.peek_line assert_equal 1, reader.lineno end test 'peek_line returns nil if cursor advances past end of source' do input = <<~'EOS' ifdef::foobar[] swallowed content endif::foobar[] EOS doc = Asciidoctor::Document.new input reader = doc.reader assert_equal 1, reader.lineno assert_nil reader.peek_line assert_equal 4, reader.lineno end test 'ifdef with defined attribute includes content' do input = <<~'EOS' ifdef::holygrail[] There is a holy grail! endif::holygrail[] EOS doc = Asciidoctor::Document.new input, attributes: { 'holygrail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'There is a holy grail!', (lines * ::Asciidoctor::LF) end test 'ifdef with defined attribute includes text in brackets' do input = <<~'EOS' On our quest we go... ifdef::holygrail[There is a holy grail!] There was much rejoicing. EOS doc = Asciidoctor::Document.new input, attributes: { 'holygrail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal "On our quest we go...\nThere is a holy grail!\nThere was much rejoicing.", (lines * ::Asciidoctor::LF) end test 'ifdef with defined attribute processes include directive in brackets' do input = 'ifdef::asciidoctor-version[include::fixtures/include-file.adoc[tag=snippetA]]' doc = Asciidoctor::Document.new input, safe: :safe, base_dir: DIRNAME reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'snippetA content', lines[0] end test 'ifdef attribute name is not case sensitive' do input = <<~'EOS' ifdef::showScript[] The script is shown! endif::showScript[] EOS doc = Asciidoctor::Document.new input, attributes: { 'showscript' => '' } result = doc.reader.read assert_equal 'The script is shown!', result end test 'ifndef with defined attribute does not include text in brackets' do input = <<~'EOS' On our quest we go... ifndef::hardships[There is a holy grail!] There was no rejoicing. EOS doc = Asciidoctor::Document.new input, attributes: { 'hardships' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal "On our quest we go...\nThere was no rejoicing.", (lines * ::Asciidoctor::LF) end test 'include with non-matching nested exclude' do input = <<~'EOS' ifdef::grail[] holy ifdef::swallow[] swallow endif::swallow[] grail endif::grail[] EOS doc = Asciidoctor::Document.new input, attributes: { 'grail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal "holy\ngrail", (lines * ::Asciidoctor::LF) end test 'nested excludes with same condition' do input = <<~'EOS' ifndef::grail[] ifndef::grail[] not here endif::grail[] endif::grail[] EOS doc = Asciidoctor::Document.new input, attributes: { 'grail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal '', (lines * ::Asciidoctor::LF) end test 'include with nested exclude of inverted condition' do input = <<~'EOS' ifdef::grail[] holy ifndef::grail[] not here endif::grail[] grail endif::grail[] EOS doc = Asciidoctor::Document.new input, attributes: { 'grail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal "holy\ngrail", (lines * ::Asciidoctor::LF) end test 'exclude with matching nested exclude' do input = <<~'EOS' poof ifdef::swallow[] no ifdef::swallow[] swallow endif::swallow[] here endif::swallow[] gone EOS doc = Asciidoctor::Document.new input, attributes: { 'grail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal "poof\ngone", (lines * ::Asciidoctor::LF) end test 'exclude with nested include using shorthand end' do input = <<~'EOS' poof ifndef::grail[] no grail ifndef::swallow[] or swallow endif::[] in here endif::[] gone EOS doc = Asciidoctor::Document.new input, attributes: { 'grail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal "poof\ngone", (lines * ::Asciidoctor::LF) end test 'ifdef with one alternative attribute set includes content' do input = <<~'EOS' ifdef::holygrail,swallow[] Our quest is complete! endif::holygrail,swallow[] EOS doc = Asciidoctor::Document.new input, attributes: { 'swallow' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'Our quest is complete!', (lines * ::Asciidoctor::LF) end test 'ifdef with no alternative attributes set does not include content' do input = <<~'EOS' ifdef::holygrail,swallow[] Our quest is complete! endif::holygrail,swallow[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal '', (lines * ::Asciidoctor::LF) end test 'ifdef with all required attributes set includes content' do input = <<~'EOS' ifdef::holygrail+swallow[] Our quest is complete! endif::holygrail+swallow[] EOS doc = Asciidoctor::Document.new input, attributes: { 'holygrail' => '', 'swallow' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'Our quest is complete!', (lines * ::Asciidoctor::LF) end test 'ifdef with missing required attributes does not include content' do input = <<~'EOS' ifdef::holygrail+swallow[] Our quest is complete! endif::holygrail+swallow[] EOS doc = Asciidoctor::Document.new input, attributes: { 'holygrail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal '', (lines * ::Asciidoctor::LF) end test 'ifdef should permit leading, trailing, and repeat operators' do { 'asciidoctor,' => 'content', ',asciidoctor' => 'content', 'asciidoctor+' => '', '+asciidoctor' => '', 'asciidoctor,,asciidoctor-version' => 'content', 'asciidoctor++asciidoctor-version' => '', }.each do |condition, expected| input = <<~EOS ifdef::#{condition}[] content endif::[] EOS assert_equal expected, (document_from_string input, parse: false).reader.read end end test 'ifndef with undefined attribute includes block' do input = <<~'EOS' ifndef::holygrail[] Our quest continues to find the holy grail! endif::holygrail[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'Our quest continues to find the holy grail!', (lines * ::Asciidoctor::LF) end test 'ifndef with one alternative attribute set does not include content' do input = <<~'EOS' ifndef::holygrail,swallow[] Our quest is complete! endif::holygrail,swallow[] EOS result = (Asciidoctor::Document.new input, attributes: { 'swallow' => '' }).reader.read assert_empty result end test 'ifndef with both alternative attributes set does not include content' do input = <<~'EOS' ifndef::holygrail,swallow[] Our quest is complete! endif::holygrail,swallow[] EOS result = (Asciidoctor::Document.new input, attributes: { 'swallow' => '', 'holygrail' => '' }).reader.read assert_empty result end test 'ifndef with no alternative attributes set includes content' do input = <<~'EOS' ifndef::holygrail,swallow[] Our quest is complete! endif::holygrail,swallow[] EOS result = (Asciidoctor::Document.new input).reader.read assert_equal 'Our quest is complete!', result end test 'ifndef with no required attributes set includes content' do input = <<~'EOS' ifndef::holygrail+swallow[] Our quest is complete! endif::holygrail+swallow[] EOS result = (Asciidoctor::Document.new input).reader.read assert_equal 'Our quest is complete!', result end test 'ifndef with all required attributes set does not include content' do input = <<~'EOS' ifndef::holygrail+swallow[] Our quest is complete! endif::holygrail+swallow[] EOS result = (Asciidoctor::Document.new input, attributes: { 'swallow' => '', 'holygrail' => '' }).reader.read assert_empty result end test 'ifndef with at least one required attributes set does not include content' do input = <<~'EOS' ifndef::holygrail+swallow[] Our quest is complete! endif::holygrail+swallow[] EOS result = (Asciidoctor::Document.new input, attributes: { 'swallow' => '' }).reader.read assert_equal 'Our quest is complete!', result end test 'should log warning if endif is unmatched' do input = <<~'EOS' Our quest is complete! endif::on-quest[] EOS using_memory_logger do |logger| result = (Asciidoctor::Document.new input, attributes: { 'on-quest' => '' }).reader.read assert_equal 'Our quest is complete!', result assert_message logger, :ERROR, '~: line 2: unmatched preprocessor directive: endif::on-quest[]', Hash end end test 'should log warning if endif is mismatched' do input = <<~'EOS' ifdef::on-quest[] Our quest is complete! endif::on-journey[] EOS using_memory_logger do |logger| result = (Asciidoctor::Document.new input, attributes: { 'on-quest' => '' }).reader.read assert_equal 'Our quest is complete!', result assert_message logger, :ERROR, '~: line 3: mismatched preprocessor directive: endif::on-journey[]', Hash end end test 'should log warning if endif contains text' do input = <<~'EOS' ifdef::on-quest[] Our quest is complete! endif::on-quest[complete!] EOS using_memory_logger do |logger| result = (Asciidoctor::Document.new input, attributes: { 'on-quest' => '' }).reader.read assert_equal 'Our quest is complete!', result assert_message logger, :ERROR, '~: line 3: malformed preprocessor directive - text not permitted: endif::on-quest[complete!]', Hash end end test 'escaped ifdef is unescaped and ignored' do input = <<~'EOS' \ifdef::holygrail[] content \endif::holygrail[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal "ifdef::holygrail[]\ncontent\nendif::holygrail[]", (lines * ::Asciidoctor::LF) end test 'ifeval comparing missing attribute to nil includes content' do input = <<~'EOS' ifeval::['{foo}' == ''] No foo for you! endif::[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'No foo for you!', (lines * ::Asciidoctor::LF) end test 'ifeval comparing missing attribute to 0 drops content' do input = <<~'EOS' ifeval::[{leveloffset} == 0] I didn't make the cut! endif::[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal '', (lines * ::Asciidoctor::LF) end test 'ifeval running unsupported operation on missing attribute drops content' do input = <<~'EOS' ifeval::[{leveloffset} >= 3] I didn't make the cut! endif::[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal '', (lines * ::Asciidoctor::LF) end test 'ifeval running invalid operation drops content' do input = <<~'EOS' ifeval::[{asciidoctor-version} > true] I didn't make the cut! endif::[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal '', (lines * ::Asciidoctor::LF) end test 'ifeval comparing double-quoted attribute to matching string includes content' do input = <<~'EOS' ifeval::["{gem}" == "asciidoctor"] Asciidoctor it is! endif::[] EOS doc = Asciidoctor::Document.new input, attributes: { 'gem' => 'asciidoctor' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'Asciidoctor it is!', (lines * ::Asciidoctor::LF) end test 'ifeval comparing single-quoted attribute to matching string includes content' do input = <<~'EOS' ifeval::['{gem}' == 'asciidoctor'] Asciidoctor it is! endif::[] EOS doc = Asciidoctor::Document.new input, attributes: { 'gem' => 'asciidoctor' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'Asciidoctor it is!', (lines * ::Asciidoctor::LF) end test 'ifeval comparing quoted attribute to non-matching string drops content' do input = <<~'EOS' ifeval::['{gem}' == 'asciidoctor'] Asciidoctor it is! endif::[] EOS doc = Asciidoctor::Document.new input, attributes: { 'gem' => 'tilt' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal '', (lines * ::Asciidoctor::LF) end test 'ifeval comparing attribute to lower version number includes content' do input = <<~'EOS' ifeval::['{asciidoctor-version}' >= '0.1.0'] That version will do! endif::[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'That version will do!', (lines * ::Asciidoctor::LF) end test 'ifeval comparing attribute to self includes content' do input = <<~'EOS' ifeval::['{asciidoctor-version}' == '{asciidoctor-version}'] Of course it's the same! endif::[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'Of course it\'s the same!', (lines * ::Asciidoctor::LF) end test 'ifeval arguments can be transposed' do input = <<~'EOS' ifeval::['0.1.0' <= '{asciidoctor-version}'] That version will do! endif::[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'That version will do!', (lines * ::Asciidoctor::LF) end test 'ifeval matching numeric equality includes content' do input = <<~'EOS' ifeval::[{rings} == 1] One ring to rule them all! endif::[] EOS doc = Asciidoctor::Document.new input, attributes: { 'rings' => '1' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'One ring to rule them all!', (lines * ::Asciidoctor::LF) end test 'ifeval matching numeric inequality includes content' do input = <<~'EOS' ifeval::[{rings} != 0] One ring to rule them all! endif::[] EOS doc = Asciidoctor::Document.new input, attributes: { 'rings' => '1' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'One ring to rule them all!', (lines * ::Asciidoctor::LF) end test 'should warn if ifeval has target' do input = <<~'EOS' ifeval::target[1 == 1] content EOS using_memory_logger do |logger| doc = Asciidoctor::Document.new input reader = doc.reader lines = [] lines << reader.read_line while reader.has_more_lines? assert_equal 'content', (lines * ::Asciidoctor::LF) assert_message logger, :ERROR, '~: line 1: malformed preprocessor directive - target not permitted: ifeval::target[1 == 1]', Hash end end test 'should warn if ifeval has invalid expression' do input = <<~'EOS' ifeval::[1 | 2] content EOS using_memory_logger do |logger| doc = Asciidoctor::Document.new input reader = doc.reader lines = [] lines << reader.read_line while reader.has_more_lines? assert_equal 'content', (lines * ::Asciidoctor::LF) assert_message logger, :ERROR, '~: line 1: malformed preprocessor directive - invalid expression: ifeval::[1 | 2]', Hash end end test 'should warn if ifeval is missing expression' do input = <<~'EOS' ifeval::[] content EOS using_memory_logger do |logger| doc = Asciidoctor::Document.new input reader = doc.reader lines = [] lines << reader.read_line while reader.has_more_lines? assert_equal 'content', (lines * ::Asciidoctor::LF) assert_message logger, :ERROR, '~: line 1: malformed preprocessor directive - missing expression: ifeval::[]', Hash end end test 'ifdef with no target is ignored' do input = <<~'EOS' ifdef::[] content EOS using_memory_logger do |logger| doc = Asciidoctor::Document.new input reader = doc.reader lines = [] lines << reader.read_line while reader.has_more_lines? assert_equal 'content', (lines * ::Asciidoctor::LF) assert_message logger, :ERROR, '~: line 1: malformed preprocessor directive - missing target: ifdef::[]', Hash end end test 'should not warn if preprocessor directive is invalid if already skipping' do input = <<~'EOS' ifdef::attribute-not-set[] foo ifdef::[] bar endif::[] EOS using_memory_logger do |logger| result = (Asciidoctor::Document.new input).reader.read assert_empty result assert_empty logger end end test 'should not fail to process preprocessor directive that evaluates to false and has a large number of lines' do lines = (%w(data) * 5000) * ?\n input = <<~EOS before ifdef::attribute-not-set[] #{lines} endif::attribute-not-set[] after EOS doc = Asciidoctor.load input assert_equal 2, doc.blocks.size assert_equal 'before', doc.blocks[0].source assert_equal 'after', doc.blocks[1].source end test 'should not fail to process lines if reader contains a nil entry' do input = ['before', '', '', '', 'after'] doc = Asciidoctor.load input, extensions: proc { preprocessor do process do |_, reader| reader.source_lines[2] = nil nil end end } assert_equal 2, doc.blocks.size assert_equal 'before', doc.blocks[0].source assert_equal 'after', doc.blocks[1].source end end end end asciidoctor-2.0.20/test/sections_test.rb000066400000000000000000003233751443135032600203060ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context 'Sections' do context 'Ids' do test 'synthetic id is generated by default' do sec = block_from_string('== Section One') assert_equal '_section_one', sec.id end test 'duplicate synthetic id is automatically enumerated' do doc = document_from_string <<~'EOS' == Section One == Section One EOS assert_equal 2, doc.blocks.size assert_equal '_section_one', doc.blocks[0].id assert_equal '_section_one_2', doc.blocks[1].id end test 'synthetic id removes non-word characters' do sec = block_from_string("== We’re back!") assert_equal '_were_back', sec.id end test 'synthetic id removes repeating separators' do sec = block_from_string('== Section $ One') assert_equal '_section_one', sec.id end test 'synthetic id removes entities' do sec = block_from_string('== Ben & Jerry & Company¹ "Ice Cream Brothers" あ') assert_equal '_ben_jerry_company_ice_cream_brothers', sec.id end test 'synthetic id removes adjacent entities with mixed case' do sec = block_from_string('== a ®&© b') assert_equal '_a_b', sec.id end test 'synthetic id removes XML tags' do sec = block_from_string('== Use the `run` command to make it icon:gear[]') assert_equal '_use_the_run_command_to_make_it_gear', sec.id end test 'synthetic id collapses repeating spaces' do sec = block_from_string('== Go Far') assert_equal '_go_far', sec.id end test 'synthetic id replaces hyphens with separator' do sec = block_from_string('== State-of-the-art design') assert_equal '_state_of_the_art_design', sec.id end test 'synthetic id replaces dots with separator' do sec = block_from_string("== Section 1.1.1") assert_equal '_section_1_1_1', sec.id end test 'synthetic id prefix can be customized' do sec = block_from_string(":idprefix: id_\n\n== Section One") assert_equal 'id_section_one', sec.id end test 'synthetic id prefix can be set to blank' do sec = block_from_string(":idprefix:\n\n== Section One") assert_equal 'section_one', sec.id end test 'synthetic id prefix is stripped from beginning of id if set to blank' do sec = block_from_string(":idprefix:\n\n== & ! More") assert_equal 'more', sec.id end test 'synthetic id separator can be customized' do sec = block_from_string(":idseparator: -\n\n== Section One") assert_equal '_section-one', sec.id end test 'synthetic id separator can be hyphen and hyphens are preserved' do sec = block_from_string(":idseparator: -\n\n== State-of-the-art design") assert_equal '_state-of-the-art-design', sec.id end test 'synthetic id separator can be dot and dots are preserved' do sec = block_from_string(":idseparator: .\n\n== Version 5.0.1") assert_equal '_version.5.0.1', sec.id end test 'synthetic id separator can only be one character' do input = <<~'EOS' :idseparator: -=- == This Section Is All You Need EOS sec = block_from_string input assert_equal '_this-section-is-all-you-need', sec.id end test 'synthetic id separator can be set to blank' do sec = block_from_string(":idseparator:\n\n== Section One") assert_equal '_sectionone', sec.id end test 'synthetic id separator can be set to blank when idprefix is blank' do sec = block_from_string(":idprefix:\n:idseparator:\n\n== Section One") assert_equal 'sectionone', sec.id end test 'synthetic id separator is removed from beginning of id when idprefix is blank' do sec = block_from_string(":idprefix:\n:idseparator: _\n\n== +Section One") assert_equal 'section_one', sec.id end test 'synthetic ids can be disabled' do sec = block_from_string(":sectids!:\n\n== Section One\n") assert_nil sec.id end test 'explicit id in anchor above section title overrides synthetic id' do sec = block_from_string("[[one]]\n== Section One") assert_equal 'one', sec.id end test 'explicit id in block attributes above section title overrides synthetic id' do sec = block_from_string("[id=one]\n== Section One") assert_equal 'one', sec.id end test 'explicit id set using shorthand in style above section title overrides synthetic id' do sec = block_from_string("[#one]\n== Section One") assert_equal 'one', sec.id end test 'should use explicit id from last block attribute line above section title that defines an explicit id' do input = <<~'EOS' [#un] [#one] == Section One EOS sec = block_from_string input assert_equal 'one', sec.id end test 'explicit id can be defined using an embedded anchor' do sec = block_from_string("== Section One [[one]] ==") assert_equal 'one', sec.id assert_equal 'Section One', sec.title end test 'explicit id can be defined using an embedded anchor when using setext section titles' do input = <<~'EOS' Section Title [[refid,reftext]] ------------------------------- EOS sec = block_from_string input assert_equal 'Section Title', sec.title assert_equal 'refid', sec.id assert_equal 'reftext', (sec.attr 'reftext') end test 'explicit id can be defined using an embedded anchor with reftext' do sec = block_from_string("== Section One [[one,Section Uno]] ==") assert_equal 'one', sec.id assert_equal 'Section One', sec.title assert_equal 'Section Uno', (sec.attr 'reftext') end test 'id and reftext in embedded anchor cannot be quoted' do sec = block_from_string(%(== Section One [["one","Section Uno"]] ==)) refute_equal 'one', sec.id assert_equal 'Section One [["one","Section Uno"]]', sec.title assert_nil(sec.attr 'reftext') end test 'reftext in embedded anchor may contain comma' do sec = block_from_string(%(== Section One [[one, Section,Uno]] ==)) assert_equal 'one', sec.id assert_equal 'Section One', sec.title assert_equal 'Section,Uno', (sec.attr 'reftext') end test 'should unescape but not process inline anchor' do sec = block_from_string(%(== Section One \\[[one]] ==)) refute_equal 'one', sec.id assert_equal 'Section One [[one]]', sec.title end test 'should not process inline anchor in section title if section has explicit ID' do sec = block_from_string(%([#sect-one]\n== Section One [[one]])) assert_equal 'sect-one', sec.id assert_equal 'Section One ', sec.title end test 'should apply substititons to title with attribute references when registering section with auto-generated ID' do input = <<~'EOS' = Document Title :foo: bar See <<_section_baz>>. :foo: baz == Section {foo} That's all, folks! EOS doc = document_from_string input ref = doc.catalog[:refs]['_section_baz'] refute_nil ref output = doc.convert standalone: false assert_xpath '//a[@href="#_section_baz"][text()="Section baz"]', output, 1 assert_xpath '//h2[@id="_section_baz"][text()="Section baz"]', output, 1 end test 'should apply substititons to title with attribute references when registering section with explicit ID' do input = <<~'EOS' = Document Title :foo: bar See <>. :foo: baz [#explicit] == Section {foo} That's all, folks! EOS doc = document_from_string input ref = doc.catalog[:refs]['explicit'] refute_nil ref output = doc.convert standalone: false assert_xpath '//a[@href="#explicit"][text()="Section baz"]', output, 1 assert_xpath '//h2[@id="explicit"][text()="Section baz"]', output, 1 end test 'title substitutions are applied before generating id' do sec = block_from_string("== Section{sp}One\n") assert_equal '_section_one', sec.id end test 'synthetic ids are unique' do input = <<~'EOS' == Some section text == Some section text EOS doc = document_from_string input assert_equal '_some_section', doc.blocks[0].id assert_equal '_some_section_2', doc.blocks[1].id end # NOTE test cannot be run in parallel with other tests test 'can set start index of synthetic ids' do old_unique_id_start_index = Asciidoctor::Compliance.unique_id_start_index begin input = <<~'EOS' == Some section text == Some section text EOS Asciidoctor::Compliance.unique_id_start_index = 1 doc = document_from_string input assert_equal '_some_section', doc.blocks[0].id assert_equal '_some_section_1', doc.blocks[1].id ensure Asciidoctor::Compliance.unique_id_start_index = old_unique_id_start_index end end test 'should use specified id and reftext when registering section reference' do input = <<~'EOS' [[install,Install Procedure]] == Install content EOS doc = document_from_string input ref = doc.catalog[:refs]['install'] refute_nil ref assert_equal 'Install Procedure', ref.reftext assert_equal 'install', (doc.resolve_id 'Install Procedure') end test 'should use specified reftext when registering section reference' do input = <<~'EOS' [reftext="Install Procedure"] == Install content EOS doc = document_from_string input ref = doc.catalog[:refs]['_install'] refute_nil ref assert_equal 'Install Procedure', ref.reftext assert_equal '_install', (doc.resolve_id 'Install Procedure') end test 'should resolve attribute reference in title using attribute defined at location of section title' do input = <<~'EOS' :platform-id: linux :platform-name: Linux [#install-{platform-id}] == Install on {platform-name} content :platform-id: win32 :platform-name: Windows [#install-{platform-id}] == Install on {platform-name} content EOS doc = document_from_string input ref = doc.catalog[:refs]['install-win32'] refute_nil ref assert_equal 'Install on Windows', ref.title assert_equal 'install-win32', (doc.resolve_id 'Install on Windows') end test 'should substitute attributes when registering reftext for section' do input = <<~'EOS' :platform-name: n/a == Overview :platform-name: Linux [[install,install on {platform-name}]] == Install content EOS doc = document_from_string input ref = doc.catalog[:refs]['install'] refute_nil ref assert_equal 'install on Linux', ref.reftext assert_equal 'install', (doc.resolve_id 'install on Linux') end test 'duplicate section id should not overwrite existing section id entry in references table' do input = <<~'EOS' [#install] == First Install content [#install] == Second Install content EOS using_memory_logger do |logger| doc = document_from_string input ref = doc.catalog[:refs]['install'] refute_nil ref assert_nil ref.reftext assert_equal 'First Install', ref.title assert_equal 'install', (doc.resolve_id 'First Install') assert_message logger, :WARN, ': line 7: id assigned to section already in use: install', Hash end end test 'should warn if explicit section ID matches auto-generated section ID' do input = <<~'EOS' == Do Not Repeat Yourself content [#_do_not_repeat_yourself] == Do Not Repeat Yourself content EOS using_memory_logger do |logger| doc = document_from_string input ref = doc.catalog[:refs]['_do_not_repeat_yourself'] refute_nil ref assert_nil ref.reftext assert_equal 'Do Not Repeat Yourself', ref.title assert_equal '_do_not_repeat_yourself', (doc.resolve_id 'Do Not Repeat Yourself') assert_message logger, :WARN, ': line 6: id assigned to section already in use: _do_not_repeat_yourself', Hash assert_equal 2, (doc.convert.scan 'id="_do_not_repeat_yourself"').size end end test 'duplicate block id should not overwrite existing section id entry in references table' do input = <<~'EOS' [#install] == First Install content [#install] content EOS using_memory_logger do |logger| doc = document_from_string input ref = doc.catalog[:refs]['install'] refute_nil ref assert_nil ref.reftext assert_equal 'First Install', ref.title assert_equal 'install', (doc.resolve_id 'First Install') assert_message logger, :WARN, ': line 7: id assigned to block already in use: install', Hash end end end context 'Levels' do context 'Document Title (Level 0)' do test "document title with multiline syntax" do title = "My Title" chars = "=" * title.length assert_xpath "//h1[not(@id)][text() = 'My Title']", convert_string(title + "\n" + chars) assert_xpath "//h1[not(@id)][text() = 'My Title']", convert_string(title + "\n" + chars + "\n") end test "document title with multiline syntax, give a char" do title = "My Title" chars = "=" * (title.length + 1) assert_xpath "//h1[not(@id)][text() = 'My Title']", convert_string(title + "\n" + chars) assert_xpath "//h1[not(@id)][text() = 'My Title']", convert_string(title + "\n" + chars + "\n") end test "document title with multiline syntax, take a char" do title = "My Title" chars = "=" * (title.length - 1) assert_xpath "//h1[not(@id)][text() = 'My Title']", convert_string(title + "\n" + chars) assert_xpath "//h1[not(@id)][text() = 'My Title']", convert_string(title + "\n" + chars + "\n") end test 'document title with multiline syntax and unicode characters' do input = <<~'EOS' AsciiDoc Writer’s Guide ======================= Author Name preamble EOS result = convert_string input assert_xpath '//h1', result, 1 assert_xpath '//h1[text()="AsciiDoc Writer’s Guide"]', result, 1 end test "not enough chars for a multiline document title" do title = "My Title" chars = "=" * (title.length - 2) using_memory_logger do |logger| output = convert_string(title + "\n" + chars) assert_xpath '//h1', output, 0 refute logger.empty? logger.clear output = convert_string(title + "\n" + chars + "\n") assert_xpath '//h1', output, 0 refute logger.empty? end end test "too many chars for a multiline document title" do title = "My Title" chars = "=" * (title.length + 2) using_memory_logger do |logger| output = convert_string(title + "\n" + chars) assert_xpath '//h1', output, 0 refute logger.empty? logger.clear output = convert_string(title + "\n" + chars + "\n") assert_xpath '//h1', output, 0 refute logger.empty? end end test "document title with multiline syntax cannot begin with a dot" do title = ".My Title" chars = "=" * title.length using_memory_logger do |logger| output = convert_string(title + "\n" + chars) assert_xpath '//h1', output, 0 refute logger.empty? end end test "document title with atx syntax" do assert_xpath "//h1[not(@id)][text() = 'My Title']", convert_string("= My Title") end test "document title with symmetric syntax" do assert_xpath "//h1[not(@id)][text() = 'My Title']", convert_string("= My Title =") end test 'document title created from leveloffset shift defined in document' do assert_xpath "//h1[not(@id)][text() = 'Document Title']", convert_string(%(:leveloffset: -1\n== Document Title)) end test 'document title created from leveloffset shift defined in API' do assert_xpath "//h1[not(@id)][text() = 'Document Title']", convert_string('== Document Title', attributes: { 'leveloffset' => '-1@' }) end test 'should assign id on document title to body' do input = <<~'EOS' [[idname]] = Document Title content EOS output = convert_string input assert_css 'body#idname', output, 1 end test 'should assign id defined using shorthand syntax on document title to body' do input = <<~'EOS' [#idname] = Document Title content EOS output = convert_string input assert_css 'body#idname', output, 1 end test 'should use ID defined in block attributes instead of ID defined inline' do input = <<~'EOS' [#idname-block] = Document Title [[idname-inline]] content EOS output = convert_string input assert_css 'body#idname-block', output, 1 end test 'block id above document title sets id on document' do input = <<~'EOS' [[reference]] = Reference Manual :css-signature: refguide preamble EOS doc = document_from_string input assert_equal 'reference', doc.id assert_equal 'refguide', doc.attr('css-signature') output = doc.convert assert_css 'body#reference', output, 1 end test 'should register document in catalog if id is set' do input = <<~'EOS' [[manual,Manual]] = Reference Manual preamble EOS doc = document_from_string input assert_equal 'manual', doc.id assert_equal 'Manual', doc.attributes['reftext'] assert_equal doc, doc.catalog[:refs]['manual'] end test 'should compute xreftext to document title' do input = <<~'EOS' [#manual] = Reference Manual :xrefstyle: full This is the <>. EOS output = convert_string input assert_xpath '//a[text()="Reference Manual"]', output, 1 end test 'should discard style, role and options shorthand attributes defined on document title' do input = <<~'EOS' [style#idname.rolename%optionname] = Document Title content EOS doc = document_from_string input assert_empty doc.blocks[0].attributes output = doc.convert assert_css '#idname', output, 1 assert_css 'body#idname', output, 1 assert_css '.rolename', output, 1 assert_css 'body.rolename', output, 1 end end context 'Level 1' do test "with multiline syntax" do assert_xpath "//h2[@id='_my_section'][text() = 'My Section']", convert_string("My Section\n-----------") end test 'should not recognize underline containing a mix of characters as setext section title' do input = <<~'EOS' My Section ----^^---- EOS result = convert_string_to_embedded input assert_xpath '//h2[@id="_my_section"][text() = "My Section"]', result, 0 assert_includes result, '----^^----' end test 'should not recognize section title that does not contain alphanumeric character' do input = <<~'EOS' !@#$ ---- EOS using_memory_logger do |logger| result = convert_string_to_embedded input assert_css 'h2', result, 0 end end test 'should not recognize section title that consists of only underscores' do input = <<~'EOS' ____ ---- EOS using_memory_logger do |logger| result = convert_string_to_embedded input assert_css 'h2', result, 0 end end test 'should preprocess second line of setext section title' do input = <<~'EOS' Section Title ifdef::asciidoctor[] ------------- endif::[] EOS result = convert_string_to_embedded input assert_xpath '//h2', result, 1 end test "heading title with multiline syntax cannot begin with a dot" do title = ".My Title" chars = "-" * title.length using_memory_logger do |logger| output = convert_string(title + "\n" + chars) assert_xpath '//h2', output, 0 refute logger.empty? end end test "with atx syntax" do assert_xpath "//h2[@id='_my_title'][text() = 'My Title']", convert_string("== My Title") end test "with atx symmetric syntax" do assert_xpath "//h2[@id='_my_title'][text() = 'My Title']", convert_string("== My Title ==") end test "with atx non-matching symmetric syntax" do assert_xpath "//h2[@id='_my_title'][text() = 'My Title ===']", convert_string("== My Title ===") end test "with XML entity" do assert_xpath "//h2[@id='_whats_new'][text() = \"What#{decode_char 8217}s new?\"]", convert_string("== What's new?") end test "with non-word character" do assert_xpath "//h2[@id='_whats_new'][text() = \"What’s new?\"]", convert_string("== What’s new?") end test "with sequential non-word characters" do assert_xpath "//h2[@id='_what_the_is_this'][text() = 'What the \#@$ is this?']", convert_string('== What the #@$ is this?') end test "with trailing whitespace" do assert_xpath "//h2[@id='_my_title'][text() = 'My Title']", convert_string("== My Title ") end test "with custom blank idprefix" do assert_xpath "//h2[@id='my_title'][text() = 'My Title']", convert_string(":idprefix:\n\n== My Title ") end test "with custom non-blank idprefix" do assert_xpath "//h2[@id='ref_my_title'][text() = 'My Title']", convert_string(":idprefix: ref_\n\n== My Title ") end test 'with multibyte characters' do input = '== Asciidoctor in 中文' output = convert_string input assert_xpath '//h2[@id="_asciidoctor_in_中文"][text()="Asciidoctor in 中文"]', output end test 'with only multibyte characters' do input = '== 视图' output = convert_string_to_embedded input assert_xpath '//h2[@id="_视图"][text()="视图"]', output end test 'multiline syntax with only multibyte characters' do input = <<~'EOS' 视图 -- content 连接器 --- content EOS output = convert_string_to_embedded input assert_xpath '//h2[@id="_视图"][text()="视图"]', output assert_xpath '//h2[@id="_连接器"][text()="连接器"]', output end end context 'Level 2' do test "with multiline syntax" do assert_xpath "//h3[@id='_my_section'][text() = 'My Section']", convert_string(":fragment:\nMy Section\n~~~~~~~~~~~") end test "with atx line syntax" do assert_xpath "//h3[@id='_my_title'][text() = 'My Title']", convert_string(":fragment:\n=== My Title") end end context 'Level 3' do test "with multiline syntax" do assert_xpath "//h4[@id='_my_section'][text() = 'My Section']", convert_string(":fragment:\nMy Section\n^^^^^^^^^^") end test 'with atx line syntax' do assert_xpath "//h4[@id='_my_title'][text() = 'My Title']", convert_string(":fragment:\n==== My Title") end end context 'Level 4' do test "with multiline syntax" do assert_xpath "//h5[@id='_my_section'][text() = 'My Section']", convert_string(":fragment:\nMy Section\n++++++++++") end test "with atx line syntax" do assert_xpath "//h5[@id='_my_title'][text() = 'My Title']", convert_string(":fragment:\n===== My Title") end end context 'Level 5' do test "with atx line syntax" do assert_xpath "//h6[@id='_my_title'][text() = 'My Title']", convert_string(":fragment:\n====== My Title") end end end context 'Substitutions' do test 'should apply substitutions in normal order' do input = <<~'EOS' == {link-url}[{link-text}]{tm} The one and only! EOS output = convert_string_to_embedded input, attributes: { 'link-url' => 'https://acme.com', 'link-text' => 'ACME', 'tm' => '(TM)', } assert_css 'h2', output, 1 assert_css 'h2 a[href="https://acme.com"]', output, 1 assert_xpath %(//h2[contains(text(),"#{decode_char 8482}")]), output, 1 end end context 'Nesting' do test 'should warn if section title is out of sequence' do input = <<~'EOS' = Document Title == Section A ==== Nested Section content == Section B content EOS using_memory_logger do |logger| result = convert_string_to_embedded input assert_xpath '//h4[text()="Nested Section"]', result, 1 assert_message logger, :WARN, ': line 5: section title out of sequence: expected level 2, got level 3', Hash end end test 'should warn if chapter title is out of sequence' do input = <<~'EOS' = Document Title :doctype: book === Not a Chapter content EOS using_memory_logger do |logger| result = convert_string_to_embedded input assert_xpath '//h3[text()="Not a Chapter"]', result, 1 assert_message logger, :WARN, ': line 4: section title out of sequence: expected levels 0 or 1, got level 2', Hash end end test 'should not warn if top-level section title is out of sequence when fragment attribute is set on document' do input = <<~'EOS' = Document Title === First Section content EOS using_memory_logger do |logger| convert_string_to_embedded input, attributes: { 'fragment' => '' } assert logger.empty? end end test 'should warn if nested section title is out of sequence when fragment attribute is set on document' do input = <<~'EOS' = Document Title === First Section ===== Nested Section EOS using_memory_logger do |logger| convert_string_to_embedded input, attributes: { 'fragment' => '' } assert_message logger, :WARN, ': line 5: section title out of sequence: expected level 3, got level 4', Hash end end test 'should log error if subsections are found in special sections in article that do not support subsections' do input = <<~'EOS' = Document Title == Section === Subsection of Section allowed [appendix] == Appendix === Subsection of Appendix allowed [glossary] == Glossary === Subsection of Glossary not allowed [bibliography] == Bibliography === Subsection of Bibliography not allowed EOS using_memory_logger do |logger| convert_string_to_embedded input assert_messages logger, [ [:ERROR, ': line 19: glossary sections do not support nested sections', Hash], [:ERROR, ': line 26: bibliography sections do not support nested sections', Hash], ] end end test 'should log error if subsections are found in special sections in book that do not support subsections' do input = <<~'EOS' = Document Title :doctype: book [preface] = Preface === Subsection of Preface allowed [colophon] = Colophon === Subsection of Colophon not allowed [dedication] = Dedication === Subsection of Dedication not allowed = Part 1 [abstract] == Abstract === Subsection of Abstract allowed == Chapter 1 === Subsection of Chapter allowed [appendix] = Appendix === Subsection of Appendix allowed [glossary] = Glossary === Subsection of Glossary not allowed [bibliography] = Bibliography === Subsection of Bibliography not allowed EOS using_memory_logger do |logger| convert_string_to_embedded input assert_messages logger, [ [:ERROR, ': line 14: colophon sections do not support nested sections', Hash], [:ERROR, ': line 21: dedication sections do not support nested sections', Hash], [:ERROR, ': line 50: glossary sections do not support nested sections', Hash], [:ERROR, ': line 57: bibliography sections do not support nested sections', Hash] ] end end end context 'Markdown-style headings' do test 'atx document title with leading marker' do input = '# Document Title' output = convert_string input assert_xpath "//h1[not(@id)][text() = 'Document Title']", output, 1 end test 'atx document title with symmetric markers' do input = '# Document Title #' output = convert_string input assert_xpath "//h1[not(@id)][text() = 'Document Title']", output, 1 end test 'atx section title with leading marker' do input = <<~'EOS' ## Section One blah blah EOS output = convert_string input assert_xpath "//h2[@id='_section_one'][text() = 'Section One']", output, 1 end test 'atx section title with symmetric markers' do input = <<~'EOS' ## Section One ## blah blah EOS output = convert_string input assert_xpath "//h2[@id='_section_one'][text() = 'Section One']", output, 1 end test 'should not match atx syntax with mixed markers' do input = '=#= My Title' output = convert_string_to_embedded input assert_xpath "//h3[@id='_my_title'][text() = 'My Title']", output, 0 assert_includes output, '

    =#= My Title

    ' end end context 'Discrete Heading' do test 'should create discrete heading instead of section if style is float' do input = <<~'EOS' [float] = Independent Heading! not in section EOS output = convert_string_to_embedded input assert_xpath '/h1[@id="_independent_heading"]', output, 1 assert_xpath '/h1[@class="float"]', output, 1 assert_xpath %(/h1[@class="float"][text()="Independent Heading!"]), output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]/p', output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]/p[text()="not in section"]', output, 1 end test 'should create discrete heading instead of section if style is discrete' do input = <<~'EOS' [discrete] === Independent Heading! not in section EOS output = convert_string_to_embedded input assert_xpath '/h3', output, 1 assert_xpath '/h3[@id="_independent_heading"]', output, 1 assert_xpath '/h3[@class="discrete"]', output, 1 assert_xpath %(/h3[@class="discrete"][text()="Independent Heading!"]), output, 1 assert_xpath '/h3/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '/h3/following-sibling::*[@class="paragraph"]/p', output, 1 assert_xpath '/h3/following-sibling::*[@class="paragraph"]/p[text()="not in section"]', output, 1 end test 'should generate id for discrete heading from converted title' do input = <<~'EOS' [discrete] === {sp}Heading{sp} not in section EOS output = convert_string_to_embedded input assert_xpath '/h3', output, 1 assert_xpath '/h3[@class="discrete"][@id="_heading"]', output, 1 assert_xpath '/h3[@class="discrete"][@id="_heading"][text()=" Heading "]', output, 1 end test 'should create discrete heading if style is float with shorthand role and id' do input = <<~'EOS' [float.independent#first] = Independent Heading! not in section EOS output = convert_string_to_embedded input assert_xpath '/h1[@id="first"]', output, 1 assert_xpath '/h1[@class="float independent"]', output, 1 assert_xpath %(/h1[@class="float independent"][text()="Independent Heading!"]), output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]/p', output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]/p[text()="not in section"]', output, 1 end test 'should create discrete heading if style is discrete with shorthand role and id' do input = <<~'EOS' [discrete.independent#first] = Independent Heading! not in section EOS output = convert_string_to_embedded input assert_xpath '/h1[@id="first"]', output, 1 assert_xpath '/h1[@class="discrete independent"]', output, 1 assert_xpath %(/h1[@class="discrete independent"][text()="Independent Heading!"]), output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]/p', output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]/p[text()="not in section"]', output, 1 end test 'discrete heading should be a block with context floating_title' do input = <<~'EOS' [float] === Independent Heading! not in section EOS doc = document_from_string input heading = doc.blocks.first assert_kind_of Asciidoctor::Block, heading assert_equal :floating_title, heading.context assert_equal '_independent_heading', heading.id assert doc.catalog[:refs].key? '_independent_heading' end test 'should preprocess second line of setext discrete heading' do input = <<~'EOS' [discrete] Heading Title ifdef::asciidoctor[] ------------- endif::[] EOS result = convert_string_to_embedded input assert_xpath '//h2', result, 1 end test 'can assign explicit id to discrete heading' do input = <<~'EOS' [[unchained]] [float] === Independent Heading! not in section EOS doc = document_from_string input heading = doc.blocks.first assert_equal 'unchained', heading.id assert doc.catalog[:refs].key? 'unchained' end test 'should not include discrete heading in toc' do input = <<~'EOS' :toc: == Section One [float] === Miss Independent == Section Two EOS output = convert_string input assert_xpath '//*[@id="toc"]', output, 1 assert_xpath %(//*[@id="toc"]//a[contains(text(), "Section ")]), output, 2 assert_xpath %(//*[@id="toc"]//a[text()="Miss Independent"]), output, 0 end test 'should not set id on discrete heading if sectids attribute is unset' do input = <<~'EOS' [float] === Independent Heading! not in section EOS output = convert_string_to_embedded input, attributes: { 'sectids' => nil } assert_xpath '/h3', output, 1 assert_xpath '/h3[@id="_independent_heading"]', output, 0 assert_xpath '/h3[@class="float"]', output, 1 end test 'should use explicit id for discrete heading if specified' do input = <<~'EOS' [[free]] [float] == Independent Heading! not in section EOS output = convert_string_to_embedded input assert_xpath '/h2', output, 1 assert_xpath '/h2[@id="free"]', output, 1 assert_xpath '/h2[@class="float"]', output, 1 end test 'should add role to class attribute on discrete heading' do input = <<~'EOS' [float, role="isolated"] == Independent Heading! not in section EOS output = convert_string_to_embedded input assert_xpath '/h2', output, 1 assert_xpath '/h2[@id="_independent_heading"]', output, 1 assert_xpath '/h2[@class="float isolated"]', output, 1 end test 'should ignore title attribute on discrete heading' do input = <<~'EOS' [discrete,title="Captured!"] == Independent Heading! not in section EOS doc = document_from_string input heading = doc.blocks[0] assert_equal 'Independent Heading!', heading.title refute heading.attributes.key? 'title' end test 'should use specified id and reftext when registering discrete section reference' do input = <<~'EOS' [[install,Install Procedure]] [discrete] == Install content EOS doc = document_from_string input ref = doc.catalog[:refs]['install'] refute_nil ref assert_equal 'Install Procedure', ref.reftext assert_equal 'install', (doc.resolve_id 'Install Procedure') end test 'should use specified reftext when registering discrete section reference' do input = <<~'EOS' [reftext="Install Procedure"] [discrete] == Install content EOS doc = document_from_string input ref = doc.catalog[:refs]['_install'] refute_nil ref assert_equal 'Install Procedure', ref.reftext assert_equal '_install', (doc.resolve_id 'Install Procedure') end test 'should not process inline anchor in discrete heading if explicit ID is assigned' do input = <<~'EOS' [discrete#install] == Install [[installation]] content EOS block = block_from_string input assert_equal block.id, 'install' assert_equal 'Install ', block.title end end context 'Level offset' do test 'should print error if standalone document is included without level offset' do input = <<~'EOS' = Main Document Doc Writer text in main document // begin simulated include::[] = Standalone Document :author: Junior Writer text in standalone document // end simulated include::[] EOS using_memory_logger do |logger| convert_string input assert_message logger, :ERROR, ': line 7: level 0 sections can only be used when doctype is book', Hash end end test 'should add level offset to section level' do input = <<~'EOS' = Main Document Doc Writer Main document written by {author}. :leveloffset: 1 // begin simulated include::[] = Standalone Document :author: Junior Writer Standalone document written by {author}. == Section in Standalone Standalone section text. // end simulated include::[] :leveloffset!: == Section in Main Main section text. EOS output = nil using_memory_logger do |logger| output = convert_string input assert logger.empty? end assert_match(/Main document written by Doc Writer/, output) assert_match(/Standalone document written by Junior Writer/, output) assert_xpath '//*[@class="sect1"]/h2[text() = "Standalone Document"]', output, 1 assert_xpath '//*[@class="sect2"]/h3[text() = "Section in Standalone"]', output, 1 assert_xpath '//*[@class="sect1"]/h2[text() = "Section in Main"]', output, 1 end test 'level offset should be added to discrete heading' do input = <<~'EOS' = Main Document Doc Writer :leveloffset: 1 [float] = Discrete Heading EOS output = convert_string input assert_xpath '//h2[@class="float"][text() = "Discrete Heading"]', output, 1 end test 'should be able to reset level offset' do input = <<~'EOS' = Main Document Doc Writer Main preamble. :leveloffset: 1 = Standalone Document Standalone preamble. :leveloffset!: == Level 1 Section EOS output = convert_string input assert_xpath '//*[@class = "sect1"]/h2[text() = "Standalone Document"]', output, 1 assert_xpath '//*[@class = "sect1"]/h2[text() = "Level 1 Section"]', output, 1 end test 'should add relative offset value to current leveloffset' do input = <<~'EOS' = Main Document Doc Writer Main preamble. :leveloffset: 1 = Chapter 1 content :leveloffset: +1 = Standalone Section content EOS output = convert_string input assert_xpath '//*[@class = "sect1"]/h2[text() = "Chapter 1"]', output, 1 assert_xpath '//*[@class = "sect2"]/h3[text() = "Standalone Section"]', output, 1 end end context 'Section Numbering' do test 'should create section number with one entry for level 1' do doc = empty_document sect1 = Asciidoctor::Section.new nil, nil, true doc << sect1 assert_equal '1.', sect1.sectnum end test 'should create section number with two entries for level 2' do doc = empty_document sect1 = Asciidoctor::Section.new nil, nil, true doc << sect1 sect1_1 = Asciidoctor::Section.new sect1, nil, true sect1 << sect1_1 assert_equal '1.1.', sect1_1.sectnum end test 'should create section number with three entries for level 3' do doc = empty_document sect1 = Asciidoctor::Section.new nil, nil, true doc << sect1 sect1_1 = Asciidoctor::Section.new sect1, nil, true sect1 << sect1_1 sect1_1_1 = Asciidoctor::Section.new sect1_1, nil, true sect1_1 << sect1_1_1 assert_equal '1.1.1.', sect1_1_1.sectnum end test 'should create section number for second section in level' do doc = empty_document sect1 = Asciidoctor::Section.new nil, nil, true doc << sect1 sect1_1 = Asciidoctor::Section.new sect1, nil, true sect1 << sect1_1 sect1_2 = Asciidoctor::Section.new sect1, nil, true sect1 << sect1_2 assert_equal '1.2.', sect1_2.sectnum end test 'sectnum should use specified delimiter and append string' do doc = empty_document sect1 = Asciidoctor::Section.new nil, nil, true doc << sect1 sect1_1 = Asciidoctor::Section.new sect1, nil, true sect1 << sect1_1 sect1_1_1 = Asciidoctor::Section.new sect1_1, nil, true sect1_1 << sect1_1_1 assert_equal '1,1,1,', sect1_1_1.sectnum(',') assert_equal '1:1:1', sect1_1_1.sectnum(':', false) end test 'should output section numbers when sectnums attribute is set' do input = <<~'EOS' = Title :sectnums: == Section_1 text === Section_1_1 text ==== Section_1_1_1 text == Section_2 text === Section_2_1 text === Section_2_2 text EOS output = convert_string input assert_xpath '//h2[@id="_section_1"][starts-with(text(), "1. ")]', output, 1 assert_xpath '//h3[@id="_section_1_1"][starts-with(text(), "1.1. ")]', output, 1 assert_xpath '//h4[@id="_section_1_1_1"][starts-with(text(), "1.1.1. ")]', output, 1 assert_xpath '//h2[@id="_section_2"][starts-with(text(), "2. ")]', output, 1 assert_xpath '//h3[@id="_section_2_1"][starts-with(text(), "2.1. ")]', output, 1 assert_xpath '//h3[@id="_section_2_2"][starts-with(text(), "2.2. ")]', output, 1 end test 'should output section numbers when numbered attribute is set' do input = <<~'EOS' = Title :numbered: == Section_1 text === Section_1_1 text ==== Section_1_1_1 text == Section_2 text === Section_2_1 text === Section_2_2 text EOS output = convert_string input assert_xpath '//h2[@id="_section_1"][starts-with(text(), "1. ")]', output, 1 assert_xpath '//h3[@id="_section_1_1"][starts-with(text(), "1.1. ")]', output, 1 assert_xpath '//h4[@id="_section_1_1_1"][starts-with(text(), "1.1.1. ")]', output, 1 assert_xpath '//h2[@id="_section_2"][starts-with(text(), "2. ")]', output, 1 assert_xpath '//h3[@id="_section_2_1"][starts-with(text(), "2.1. ")]', output, 1 assert_xpath '//h3[@id="_section_2_2"][starts-with(text(), "2.2. ")]', output, 1 end test 'should not crash if child section of part is out of sequence and part numbering is disabled' do input = <<~'EOS' = Document Title :doctype: book :sectnums: = Part === Out of Sequence Section EOS using_memory_logger do |logger| output = convert_string input assert_xpath '//h1[text()="Part"]', output, 1 assert_xpath '//h3[text()=".1. Out of Sequence Section"]', output, 1 end end test 'should not hang if relative leveloffset attempts to make resolved section level negative' do input = <<~'EOS' = Document Title :doctype: book :leveloffset: -1 = Part Title == Chapter Title EOS using_memory_logger do |logger| output = convert_string input assert_xpath '//h1[text()="Part Title"]', output, 1 assert_xpath '//h1[text()="Chapter Title"]', output, 1 end end test 'should number parts when doctype is book and partnums attributes is set' do input = <<~'EOS' = Book Title :doctype: book :sectnums: :partnums: = Language == Syntax content = Processor == CLI content EOS output = convert_string input assert_xpath '//h1[@id="_language"][text() = "I: Language"]', output, 1 assert_xpath '//h1[@id="_processor"][text() = "II: Processor"]', output, 1 end test 'should assign sequential roman numerals to book parts' do input = <<~'EOS' = Book Title :doctype: book :sectnums: :partnums: = First Part part intro == First Chapter = Second Part part intro == Second Chapter EOS doc = document_from_string input assert_equal 'I', doc.sections[0].numeral assert_equal '1', doc.sections[0].sections[0].numeral assert_equal 'II', doc.sections[1].numeral assert_equal '2', doc.sections[1].sections[0].numeral end test 'should prepend value of part-signifier attribute to title of numbered part' do input = <<~'EOS' = Book Title :doctype: book :sectnums: :partnums: :part-signifier: Part = Language == Syntax content = Processor == CLI content EOS output = convert_string input assert_xpath '//h1[@id="_language"][text() = "Part I: Language"]', output, 1 assert_xpath '//h1[@id="_processor"][text() = "Part II: Processor"]', output, 1 end test 'should prepend value of chapter-signifier attribute to title of numbered chapter' do input = <<~'EOS' = Book Title :doctype: book :sectnums: :partnums: :chapter-signifier: Chapter = Language == Syntax content = Processor == CLI content EOS output = convert_string input assert_xpath '//h2[@id="_syntax"][text() = "Chapter 1. Syntax"]', output, 1 assert_xpath '//h2[@id="_cli"][text() = "Chapter 2. CLI"]', output, 1 end test 'should allow chapter number to be controlled using chapter-number attribute' do input = <<~'EOS' = Book Title :doctype: book :sectnums: :chapter-signifier: Chapter :chapter-number: 9 == Not the Beginning == Maybe the End EOS output = convert_string input assert_xpath '//h2[@id="_not_the_beginning"][text() = "Chapter 10. Not the Beginning"]', output, 1 assert_xpath '//h2[@id="_maybe_the_end"][text() = "Chapter 11. Maybe the End"]', output, 1 end test 'blocks should have level' do input = <<~'EOS' = Title preamble == Section 1 paragraph === Section 1.1 paragraph EOS doc = document_from_string input assert_equal 0, doc.blocks[0].level assert_equal 1, doc.blocks[1].level assert_equal 1, doc.blocks[1].blocks[0].level assert_equal 2, doc.blocks[1].blocks[1].level assert_equal 2, doc.blocks[1].blocks[1].blocks[0].level end test 'section numbers should not increment when numbered attribute is turned off within document' do input = <<~'EOS' = Document Title :numbered: :numbered!: == Colophon Section == Another Colophon Section == Final Colophon Section :numbered: == Section One === Section One Subsection == Section Two == Section Three EOS output = convert_string input assert_xpath '//h1[text()="Document Title"]', output, 1 assert_xpath '//h2[@id="_colophon_section"][text()="Colophon Section"]', output, 1 assert_xpath '//h2[@id="_another_colophon_section"][text()="Another Colophon Section"]', output, 1 assert_xpath '//h2[@id="_final_colophon_section"][text()="Final Colophon Section"]', output, 1 assert_xpath '//h2[@id="_section_one"][text()="1. Section One"]', output, 1 assert_xpath '//h3[@id="_section_one_subsection"][text()="1.1. Section One Subsection"]', output, 1 assert_xpath '//h2[@id="_section_two"][text()="2. Section Two"]', output, 1 assert_xpath '//h2[@id="_section_three"][text()="3. Section Three"]', output, 1 end test 'section numbers can be toggled even if numbered attribute is enable via the API' do input = <<~'EOS' = Document Title :numbered!: == Colophon Section == Another Colophon Section == Final Colophon Section :numbered: == Section One === Section One Subsection == Section Two == Section Three EOS output = convert_string input, attributes: { 'numbered' => '' } assert_xpath '//h1[text()="Document Title"]', output, 1 assert_xpath '//h2[@id="_colophon_section"][text()="Colophon Section"]', output, 1 assert_xpath '//h2[@id="_another_colophon_section"][text()="Another Colophon Section"]', output, 1 assert_xpath '//h2[@id="_final_colophon_section"][text()="Final Colophon Section"]', output, 1 assert_xpath '//h2[@id="_section_one"][text()="1. Section One"]', output, 1 assert_xpath '//h3[@id="_section_one_subsection"][text()="1.1. Section One Subsection"]', output, 1 assert_xpath '//h2[@id="_section_two"][text()="2. Section Two"]', output, 1 assert_xpath '//h2[@id="_section_three"][text()="3. Section Three"]', output, 1 end test 'section numbers cannot be toggled even if numbered attribute is disabled via the API' do input = <<~'EOS' = Document Title :numbered!: == Colophon Section == Another Colophon Section == Final Colophon Section :numbered: == Section One === Section One Subsection == Section Two == Section Three EOS output = convert_string input, attributes: { 'numbered!' => '' } assert_xpath '//h1[text()="Document Title"]', output, 1 assert_xpath '//h2[@id="_colophon_section"][text()="Colophon Section"]', output, 1 assert_xpath '//h2[@id="_another_colophon_section"][text()="Another Colophon Section"]', output, 1 assert_xpath '//h2[@id="_final_colophon_section"][text()="Final Colophon Section"]', output, 1 assert_xpath '//h2[@id="_section_one"][text()="Section One"]', output, 1 assert_xpath '//h3[@id="_section_one_subsection"][text()="Section One Subsection"]', output, 1 assert_xpath '//h2[@id="_section_two"][text()="Section Two"]', output, 1 assert_xpath '//h2[@id="_section_three"][text()="Section Three"]', output, 1 end # NOTE AsciiDoc.py fails this test because it does not properly check for a None value when looking up the numbered attribute test 'section numbers should not increment until numbered attribute is turned back on' do input = <<~'EOS' = Document Title :numbered!: == Colophon Section == Another Colophon Section == Final Colophon Section :numbered: == Section One === Section One Subsection == Section Two == Section Three EOS output = convert_string input assert_xpath '//h1[text()="Document Title"]', output, 1 assert_xpath '//h2[@id="_colophon_section"][text()="Colophon Section"]', output, 1 assert_xpath '//h2[@id="_another_colophon_section"][text()="Another Colophon Section"]', output, 1 assert_xpath '//h2[@id="_final_colophon_section"][text()="Final Colophon Section"]', output, 1 assert_xpath '//h2[@id="_section_one"][text()="1. Section One"]', output, 1 assert_xpath '//h3[@id="_section_one_subsection"][text()="1.1. Section One Subsection"]', output, 1 assert_xpath '//h2[@id="_section_two"][text()="2. Section Two"]', output, 1 assert_xpath '//h2[@id="_section_three"][text()="3. Section Three"]', output, 1 end test 'table with asciidoc content should not disable numbering of subsequent sections' do input = <<~'EOS' = Document Title :numbered: preamble == Section One |=== a|content |=== == Section Two content EOS output = convert_string input assert_xpath '//h2[@id="_section_one"]', output, 1 assert_xpath '//h2[@id="_section_one"][text()="1. Section One"]', output, 1 assert_xpath '//h2[@id="_section_two"]', output, 1 assert_xpath '//h2[@id="_section_two"][text()="2. Section Two"]', output, 1 end test 'should not number parts when doctype is book' do input = <<~'EOS' = Document Title :doctype: book :numbered: = Part 1 == Chapter 1 content = Part 2 == Chapter 2 content EOS output = convert_string input assert_xpath '(//h1)[1][text()="Document Title"]', output, 1 assert_xpath '(//h1)[2][text()="Part 1"]', output, 1 assert_xpath '(//h1)[3][text()="Part 2"]', output, 1 assert_xpath '(//h2)[1][text()="1. Chapter 1"]', output, 1 assert_xpath '(//h2)[2][text()="2. Chapter 2"]', output, 1 end test 'should number chapters sequentially even when divided into parts' do input = <<~'EOS' = Document Title :doctype: book :numbered: == Chapter 1 content = Part 1 == Chapter 2 content = Part 2 == Chapter 3 content == Chapter 4 content EOS result = convert_string input (1..4).each do |num| assert_xpath %(//h2[@id="_chapter_#{num}"]), result, 1 assert_xpath %(//h2[@id="_chapter_#{num}"][text()="#{num}. Chapter #{num}"]), result, 1 end end test 'reindex_sections should correct section enumeration after sections are modified' do input = <<~'EOS' :sectnums: == First Section content == Last Section content EOS doc = document_from_string input second_section = Asciidoctor::Section.new doc, nil, true doc.blocks.insert 1, second_section doc.reindex_sections sections = doc.sections [0, 1, 2].each do |index| assert_equal index, sections[index].index assert_equal (index + 1).to_s, sections[index].numeral assert_equal index + 1, sections[index].number end end test 'should allow sections to be renumbered using numeral or deprecated number property' do input = <<~'EOS' == Somewhere in the Middle == A Bit Later == Nearing the End == The End EOS doc = document_from_string input, attributes: { 'sectnums' => '' } doc.sections.each do |sect| if sect.numeral.to_i.even? sect.numeral.next! else sect.number += 1 end end output = doc.convert standalone: false assert_xpath '//h2[text()="2. Somewhere in the Middle"]', output, 1 assert_xpath '//h2[text()="3. A Bit Later"]', output, 1 assert_xpath '//h2[text()="4. Nearing the End"]', output, 1 assert_xpath '//h2[text()="5. The End"]', output, 1 end end context 'Links and anchors' do test 'should include anchor if sectanchors document attribute is set' do input = <<~'EOS' == Installation Installation section. === Linux Linux installation instructions. EOS output = convert_string_to_embedded input, attributes: { 'sectanchors' => '' } assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a', output, 1 assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a[@class="anchor"][@href="#_installation"]', output, 1 assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a/following-sibling::text()="Installation"', output, true assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a', output, 1 assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a[@class="anchor"][@href="#_linux"]', output, 1 assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a/following-sibling::text()="Linux"', output, true end test 'should position after title text if sectanchors is set to after' do input = <<~'EOS' == Installation Installation section. === Linux Linux installation instructions. EOS output = convert_string_to_embedded input, attributes: { 'sectanchors' => 'after' } assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a', output, 1 assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a[@class="anchor"][@href="#_installation"]', output, 1 assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a/preceding-sibling::text()="Installation"', output, true assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a', output, 1 assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a[@class="anchor"][@href="#_linux"]', output, 1 assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a/preceding-sibling::text()="Linux"', output, true end test 'should link section if sectlinks document attribute is set' do input = <<~'EOS' == Installation Installation section. === Linux Linux installation instructions. EOS output = convert_string_to_embedded input, attributes: { 'sectlinks' => '' } assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a', output, 1 assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a[@class="link"][@href="#_installation"]', output, 1 assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a[text()="Installation"]', output, 1 assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a', output, 1 assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a[@class="link"][@href="#_linux"]', output, 1 assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a[text()="Linux"]', output, 1 end end context 'Special sections' do test 'should ignore style if it matches sectN' do input = <<~'EOS' = Document Title [sect1] == Section Level 1 content [sect2] == Section Level 2 content EOS output = convert_string input, backend: :docbook assert_xpath '//section', output, 2 assert_xpath '//sect1', output, 0 assert_xpath '//sect2', output, 0 end test 'should assign sectname, caption, and numeral to appendix section by default' do input = <<~'EOS' [appendix] == Attribute Options Details EOS appendix = block_from_string input assert_equal 'appendix', appendix.sectname assert_equal 'Appendix A: ', appendix.caption assert_equal 'A', appendix.numeral assert_equal 'A', appendix.number assert_equal true, appendix.numbered end test 'should prefix appendix title by numbered label even when section numbering is disabled' do input = <<~'EOS' [appendix] == Attribute Options Details EOS output = convert_string_to_embedded input assert_xpath '//h2[text()="Appendix A: Attribute Options"]', output, 1 end test 'should allow appendix number to be controlled using appendix-number attribute' do input = <<~'EOS' :appendix-number: α [appendix] == Attribute Options Details [appendix] == All the Other Stuff Details EOS output = convert_string_to_embedded input assert_xpath %(//h2[text()="Appendix #{decode_char 946}: Attribute Options"]), output, 1 assert_xpath %(//h2[text()="Appendix #{decode_char 947}: All the Other Stuff"]), output, 1 end test 'should use style from last block attribute line above section that defines a style' do input = <<~'EOS' [glossary] [appendix] == Attribute Options Details EOS output = convert_string_to_embedded input assert_xpath '//h2[text()="Appendix A: Attribute Options"]', output, 1 end test 'setting ID using style shorthand should not clear section style' do input = <<~'EOS' [appendix] [#attribute-options] == Attribute Options Details EOS output = convert_string_to_embedded input assert_xpath '//h2[@id="attribute-options"][text()="Appendix A: Attribute Options"]', output, 1 end test 'should use custom appendix caption if specified' do input = <<~'EOS' :appendix-caption: App [appendix] == Attribute Options Details EOS output = convert_string_to_embedded input assert_xpath '//h2[text()="App A: Attribute Options"]', output, 1 end test 'should only assign letter to appendix when numbered is enabled and appendix caption is not set' do input = <<~'EOS' :numbered: :!appendix-caption: [appendix] == Attribute Options Details EOS output = convert_string_to_embedded input assert_xpath '//h2[text()="A. Attribute Options"]', output, 1 end test 'should increment appendix number for each appendix section' do input = <<~'EOS' [appendix] == Attribute Options Details [appendix] == Migration Details EOS output = convert_string_to_embedded input assert_xpath '(//h2)[1][text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '(//h2)[2][text()="Appendix B: Migration"]', output, 1 end test 'should continue numbering after appendix' do input = <<~'EOS' :numbered: == First Section content [appendix] == Attribute Options content == Migration content EOS output = convert_string_to_embedded input assert_xpath '(//h2)[1][text()="1. First Section"]', output, 1 assert_xpath '(//h2)[2][text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '(//h2)[3][text()="2. Migration"]', output, 1 end test 'should number appendix subsections using appendix letter' do input = <<~'EOS' :numbered: [appendix] == Attribute Options Details === Optional Attributes Details EOS output = convert_string_to_embedded input assert_xpath '(//h2)[1][text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '(//h3)[1][text()="A.1. Optional Attributes"]', output, 1 end test 'should not number level 4 section by default' do input = <<~'EOS' :numbered: == Level_1 === Level_2 ==== Level_3 ===== Level_4 text EOS output = convert_string_to_embedded input assert_xpath '//h5', output, 1 assert_xpath '//h5[text()="Level_4"]', output, 1 end test 'should only number levels up to value defined by sectnumlevels attribute' do input = <<~'EOS' :numbered: :sectnumlevels: 2 == Level_1 === Level_2 ==== Level_3 ===== Level_4 text EOS output = convert_string_to_embedded input assert_xpath '//h2', output, 1 assert_xpath '//h2[text()="1. Level_1"]', output, 1 assert_xpath '//h3', output, 1 assert_xpath '//h3[text()="1.1. Level_2"]', output, 1 assert_xpath '//h4', output, 1 assert_xpath '//h4[text()="Level_3"]', output, 1 assert_xpath '//h5', output, 1 assert_xpath '//h5[text()="Level_4"]', output, 1 end test 'should not number sections or subsections in regions where numbered is off' do input = <<~'EOS' :numbered: == Section One :numbered!: [appendix] == Attribute Options Details [appendix] == Migration Details === Gotchas Details [glossary] == Glossary Terms EOS output = convert_string_to_embedded input assert_xpath '(//h2)[1][text()="1. Section One"]', output, 1 assert_xpath '(//h2)[2][text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '(//h2)[3][text()="Appendix B: Migration"]', output, 1 assert_xpath '(//h3)[1][text()="Gotchas"]', output, 1 assert_xpath '(//h2)[4][text()="Glossary"]', output, 1 end test 'should not number sections or subsections in toc in regions where numbered is off' do input = <<~'EOS' :numbered: :toc: == Section One :numbered!: [appendix] == Attribute Options Details [appendix] == Migration Details === Gotchas Details [glossary] == Glossary Terms EOS output = convert_string input assert_xpath '//*[@id="toc"]/ul//li/a[text()="1. Section One"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Appendix B: Migration"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Gotchas"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Glossary"]', output, 1 end test 'should only number sections in toc up to value defined by sectnumlevels attribute' do input = <<~'EOS' :numbered: :toc: :sectnumlevels: 2 :toclevels: 3 == Level 1 === Level 2 ==== Level 3 EOS output = convert_string input assert_xpath '//*[@id="toc"]//a[@href="#_level_1"][text()="1. Level 1"]', output, 1 assert_xpath '//*[@id="toc"]//a[@href="#_level_2"][text()="1.1. Level 2"]', output, 1 assert_xpath '//*[@id="toc"]//a[@href="#_level_3"][text()="Level 3"]', output, 1 end test 'should not number special sections or their subsections by default except for appendices' do input = <<~'EOS' :doctype: book :sectnums: [preface] == Preface === Preface Subsection content == Section One content [appendix] == Attribute Options Details [appendix] == Migration Details === Gotchas Details [glossary] == Glossary Terms EOS output = convert_string_to_embedded input assert_xpath '(//h2)[1][text()="Preface"]', output, 1 assert_xpath '(//h3)[1][text()="Preface Subsection"]', output, 1 assert_xpath '(//h2)[2][text()="1. Section One"]', output, 1 assert_xpath '(//h2)[3][text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '(//h2)[4][text()="Appendix B: Migration"]', output, 1 assert_xpath '(//h3)[2][text()="B.1. Gotchas"]', output, 1 assert_xpath '(//h2)[5][text()="Glossary"]', output, 1 end test 'should not number special sections or their subsections in toc by default except for appendices' do input = <<~'EOS' :doctype: book :sectnums: :toc: [preface] == Preface === Preface Subsection content == Section One content [appendix] == Attribute Options Details [appendix] == Migration Details === Gotchas Details [glossary] == Glossary Terms EOS output = convert_string input assert_xpath '//*[@id="toc"]/ul//li/a[text()="Preface"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Preface Subsection"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="1. Section One"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Appendix B: Migration"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="B.1. Gotchas"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Glossary"]', output, 1 end test 'should number special sections and their subsections when sectnums is all' do input = <<~'EOS' :doctype: book :sectnums: all [preface] == Preface === Preface Subsection content == Section One content [appendix] == Attribute Options Details [appendix] == Migration Details === Gotchas Details [glossary] == Glossary Terms EOS output = convert_string_to_embedded input assert_xpath '(//h2)[1][text()="1. Preface"]', output, 1 assert_xpath '(//h3)[1][text()="1.1. Preface Subsection"]', output, 1 assert_xpath '(//h2)[2][text()="2. Section One"]', output, 1 assert_xpath '(//h2)[3][text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '(//h2)[4][text()="Appendix B: Migration"]', output, 1 assert_xpath '(//h3)[2][text()="B.1. Gotchas"]', output, 1 assert_xpath '(//h2)[5][text()="3. Glossary"]', output, 1 end test 'should number special sections and their subsections in toc when sectnums is all' do input = <<~'EOS' :doctype: book :sectnums: all :toc: [preface] == Preface === Preface Subsection content == Section One content [appendix] == Attribute Options Details [appendix] == Migration Details === Gotchas Details [glossary] == Glossary Terms EOS output = convert_string input assert_xpath '//*[@id="toc"]/ul//li/a[text()="1. Preface"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="1.1. Preface Subsection"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="2. Section One"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Appendix B: Migration"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="B.1. Gotchas"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="3. Glossary"]', output, 1 end test 'level 0 special sections in multipart book should be coerced to level 1' do input = <<~'EOS' = Multipart Book Doc Writer :doctype: book [preface] = Preface Preface text [appendix] = Appendix Appendix text EOS output = convert_string input assert_xpath '//h2[@id = "_preface"]', output, 1 assert_xpath '//h2[@id = "_appendix"]', output, 1 end test 'should output docbook elements that correspond to special sections in book doctype' do input = <<~'EOS' = Multipart Book :doctype: book :idprefix: [abstract] = Abstract Title Normal chapter (no abstract in book) [dedication] = Dedication Title Dedication content [preface] = Preface Title Preface content === Preface sub-section Preface subsection content = Part 1 [partintro] .Part intro title Part intro content == Chapter 1 blah blah == Chapter 2 blah blah = Part 2 [partintro] blah blah == Chapter 3 blah blah == Chapter 4 blah blah [appendix] = Appendix Title Appendix content === Appendix sub-section Appendix sub-section content [bibliography] = Bibliography Title Bibliography content [glossary] = Glossary Title Glossary content [colophon] = Colophon Title Colophon content [index] = Index Title EOS output = convert_string input, backend: 'docbook' assert_xpath '/book/chapter[@xml:id="abstract_title"]', output, 1 assert_xpath '/book/chapter[@xml:id="abstract_title"]/title[text()="Abstract Title"]', output, 1 assert_xpath '/book/chapter/following-sibling::dedication[@xml:id="dedication_title"]', output, 1 assert_xpath '/book/chapter/following-sibling::dedication[@xml:id="dedication_title"]/title[text()="Dedication Title"]', output, 1 assert_xpath '/book/dedication/following-sibling::preface[@xml:id="preface_title"]', output, 1 assert_xpath '/book/dedication/following-sibling::preface[@xml:id="preface_title"]/title[text()="Preface Title"]', output, 1 assert_xpath '/book/preface/section[@xml:id="preface_sub_section"]', output, 1 assert_xpath '/book/preface/section[@xml:id="preface_sub_section"]/title[text()="Preface sub-section"]', output, 1 assert_xpath '/book/preface/following-sibling::part[@xml:id="part_1"]', output, 1 assert_xpath '/book/preface/following-sibling::part[@xml:id="part_1"]/title[text()="Part 1"]', output, 1 assert_xpath '/book/part[@xml:id="part_1"]/partintro', output, 1 assert_xpath '/book/part[@xml:id="part_1"]/partintro/title[text()="Part intro title"]', output, 1 assert_xpath '/book/part[@xml:id="part_1"]/partintro/following-sibling::chapter[@xml:id="chapter_1"]', output, 1 assert_xpath '/book/part[@xml:id="part_1"]/partintro/following-sibling::chapter[@xml:id="chapter_1"]/title[text()="Chapter 1"]', output, 1 assert_xpath '(/book/part)[2]/following-sibling::appendix[@xml:id="appendix_title"]', output, 1 assert_xpath '(/book/part)[2]/following-sibling::appendix[@xml:id="appendix_title"]/title[text()="Appendix Title"]', output, 1 assert_xpath '/book/appendix/section[@xml:id="appendix_sub_section"]', output, 1 assert_xpath '/book/appendix/section[@xml:id="appendix_sub_section"]/title[text()="Appendix sub-section"]', output, 1 assert_xpath '/book/appendix/following-sibling::bibliography[@xml:id="bibliography_title"]', output, 1 assert_xpath '/book/appendix/following-sibling::bibliography[@xml:id="bibliography_title"]/title[text()="Bibliography Title"]', output, 1 assert_xpath '/book/bibliography/following-sibling::glossary[@xml:id="glossary_title"]', output, 1 assert_xpath '/book/bibliography/following-sibling::glossary[@xml:id="glossary_title"]/title[text()="Glossary Title"]', output, 1 assert_xpath '/book/glossary/following-sibling::colophon[@xml:id="colophon_title"]', output, 1 assert_xpath '/book/glossary/following-sibling::colophon[@xml:id="colophon_title"]/title[text()="Colophon Title"]', output, 1 assert_xpath '/book/colophon/following-sibling::index[@xml:id="index_title"]', output, 1 assert_xpath '/book/colophon/following-sibling::index[@xml:id="index_title"]/title[text()="Index Title"]', output, 1 end test 'abstract section maps to abstract element in docbook for article doctype' do input = <<~'EOS' = Article :idprefix: [abstract] == Abstract Title Abstract content EOS output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '/abstract[@xml:id="abstract_title"]', output, 1 assert_xpath '/abstract[@xml:id="abstract_title"]/title[text()="Abstract Title"]', output, 1 end test 'should allow a special section to be nested at arbitrary depth in DocBook output' do input = <<~'EOS' = Document Title :doctype: book == Glossaries [glossary] === Glossary A Glossaries are optional. Glossaries entries are an example of a style of AsciiDoc description lists. [glossary] A glossary term:: The corresponding definition. A second glossary term:: The corresponding definition. EOS output = convert_string input, backend: :docbook assert_xpath '//glossary', output, 1 assert_xpath '//chapter/glossary', output, 1 assert_xpath '//glossary/title[text()="Glossary A"]', output, 1 assert_xpath '//glossary/glossentry', output, 2 end test 'should drop title on special section in DocBook output if notitle or untitled option is set' do %w(notitle untitled).each do |option| input = <<~EOS [dedication%#{option}] == Dedication content EOS output = convert_string_to_embedded input, backend: :docbook assert_xpath '/dedication', output, 1 assert_xpath '/dedication/title', output, 0 end end end context "heading patterns in blocks" do test "should not interpret a listing block as a heading" do input = <<~'EOS' Section ------- ---- code ---- fin. EOS output = convert_string input assert_xpath "//h2", output, 1 end test "should not interpret an open block as a heading" do input = <<~'EOS' Section ------- -- ha -- fin. EOS output = convert_string input assert_xpath "//h2", output, 1 end test "should not interpret an attribute list as a heading" do input = <<~'EOS' Section ======= preamble [TIP] ==== This should be a tip, not a heading. ==== EOS output = convert_string input assert_xpath "//*[@class='admonitionblock tip']//p[text() = 'This should be a tip, not a heading.']", output, 1 end test "should not match a heading in a description list" do input = <<~'EOS' Section ------- term1:: + ---- list = [1, 2, 3]; ---- term2:: == not a heading term3:: def // fin. EOS output = convert_string input assert_xpath "//h2", output, 1 assert_xpath "//dl", output, 1 end test "should not match a heading in a bulleted list" do input = <<~'EOS' Section ------- * first + ---- list = [1, 2, 3]; ---- + * second == not a heading * third fin. EOS output = convert_string input assert_xpath "//h2", output, 1 assert_xpath "//ul", output, 1 end test "should not match a heading in a block" do input = <<~'EOS' ==== == not a heading ==== EOS output = convert_string input assert_xpath "//h2", output, 0 assert_xpath "//*[@class='exampleblock']//p[text() = '== not a heading']", output, 1 end end context 'Table of Contents' do test 'should output unnumbered table of contents in header if toc attribute is set' do input = <<~'EOS' = Article :toc: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... === Interlude While they were waiting... == Section Three That's all she wrote! EOS output = convert_string input assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/*[@id="toctitle"][text()="Table of Contents"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul[@class="sectlevel1"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]//ul', output, 2 assert_xpath '//*[@id="header"]//*[@id="toc"]//li', output, 4 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="Section One"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li/ul', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li/ul[@class="sectlevel2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li/ul/li', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li/ul/li/a[@href="#_interlude"][text()="Interlude"]', output, 1 assert_xpath '((//*[@id="header"]//*[@id="toc"]/ul)[1]/li)[3]/a[@href="#_section_three"][text()="Section Three"]', output, 1 end test 'should output numbered table of contents in header if toc and numbered attributes are set' do input = <<~'EOS' = Article :toc: :numbered: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... === Interlude While they were waiting... == Section Three That's all she wrote! EOS output = convert_string input assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/*[@id="toctitle"][text()="Table of Contents"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]//ul', output, 2 assert_xpath '//*[@id="header"]//*[@id="toc"]//li', output, 4 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li/ul/li', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li/ul/li/a[@href="#_interlude"][text()="2.1. Interlude"]', output, 1 assert_xpath '((//*[@id="header"]//*[@id="toc"]/ul)[1]/li)[3]/a[@href="#_section_three"][text()="3. Section Three"]', output, 1 end test 'should output a table of contents that honors numbered setting at position of section in document' do input = <<~'EOS' = Article :toc: :numbered: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... === Interlude While they were waiting... :numbered!: == Section Three That's all she wrote! EOS output = convert_string input assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/*[@id="toctitle"][text()="Table of Contents"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]//ul', output, 2 assert_xpath '//*[@id="header"]//*[@id="toc"]//li', output, 4 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 assert_xpath '((//*[@id="header"]//*[@id="toc"]/ul)[1]/li)[3]/a[@href="#_section_three"][text()="Section Three"]', output, 1 end test 'should not number parts in table of contents for book doctype when numbered attribute is set' do input = <<~'EOS' = Book :doctype: book :toc: :numbered: = Part 1 == First Section of Part 1 blah == Second Section of Part 1 blah = Part 2 == First Section of Part 2 blah EOS output = convert_string input assert_xpath '//*[@id="toc"]', output, 1 assert_xpath '//*[@id="toc"]/ul', output, 1 assert_xpath '//*[@id="toc"]/ul[@class="sectlevel0"]', output, 1 assert_xpath '//*[@id="toc"]/ul[@class="sectlevel0"]/li', output, 2 assert_xpath '(//*[@id="toc"]/ul[@class="sectlevel0"]/li)[1]/a[text()="Part 1"]', output, 1 assert_xpath '(//*[@id="toc"]/ul[@class="sectlevel0"]/li)[2]/a[text()="Part 2"]', output, 1 assert_xpath '(//*[@id="toc"]/ul[@class="sectlevel0"]/li)[1]/ul', output, 1 assert_xpath '(//*[@id="toc"]/ul[@class="sectlevel0"]/li)[1]/ul[@class="sectlevel1"]', output, 1 assert_xpath '(//*[@id="toc"]/ul[@class="sectlevel0"]/li)[1]/ul/li', output, 2 assert_xpath '((//*[@id="toc"]/ul[@class="sectlevel0"]/li)[1]/ul/li)[1]/a[text()="1. First Section of Part 1"]', output, 1 end test 'should output table of contents in header if toc2 attribute is set' do input = <<~'EOS' = Article :toc2: :numbered: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = convert_string input assert_xpath '//body[@class="article toc2 toc-left"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 end test 'should set toc position if toc attribute is set to position' do input = <<~'EOS' = Article :toc: > :numbered: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = convert_string input assert_xpath '//body[@class="article toc2 toc-right"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 end test 'should set toc position if toc and toc-position attributes are set' do input = <<~'EOS' = Article :toc: :toc-position: right :numbered: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = convert_string input assert_xpath '//body[@class="article toc2 toc-right"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 end test 'should set toc position if toc2 and toc-position attribute are set' do input = <<~'EOS' = Article :toc2: :toc-position: right :numbered: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = convert_string input assert_xpath '//body[@class="article toc2 toc-right"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 end test 'should set toc position if toc attribute is set to direction' do input = <<~'EOS' = Article :toc: right :numbered: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = convert_string input assert_xpath '//body[@class="article toc2 toc-right"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 end test 'should set toc placement to preamble if toc attribute is set to preamble' do input = <<~'EOS' = Article :toc: preamble Yada yada == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = convert_string input assert_css '#preamble #toc', output, 1 assert_css '#preamble .sectionbody + #toc', output, 1 end test 'should use document attributes toc-class, toc-title and toclevels to create toc' do input = <<~'EOS' = Article :toc: :toc-title: Contents :toc-class: toc2 :toclevels: 1 == Section 1 === Section 1.1 ==== Section 1.1.1 ==== Section 1.1.2 === Section 1.2 == Section 2 Fin. EOS output = convert_string input assert_css '#header #toc', output, 1 assert_css '#header #toc.toc2', output, 1 assert_css '#header #toc li', output, 2 assert_css '#header #toc #toctitle', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/*[@id="toctitle"][text()="Contents"]', output, 1 end test 'should not output table of contents if toc-placement attribute is unset' do input = <<~'EOS' = Article :toc: :toc-placement!: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = convert_string input assert_xpath '//*[@id="toc"]', output, 0 end test 'should output table of contents at location of toc macro' do input = <<~'EOS' = Article :toc: :toc-placement: macro Once upon a time... toc::[] == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = convert_string input assert_css '#preamble #toc', output, 1 assert_css '#preamble .paragraph + #toc', output, 1 end test 'should output table of contents at location of toc macro in embedded document' do input = <<~'EOS' = Article :toc: :toc-placement: macro Once upon a time... toc::[] == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = convert_string_to_embedded input assert_css '#preamble:root #toc', output, 1 assert_css '#preamble:root .paragraph + #toc', output, 1 end test 'should output table of contents at default location in embedded document if toc attribute is set' do input = <<~'EOS' = Article :showtitle: :toc: Once upon a time... == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = convert_string_to_embedded input assert_css 'h1:root', output, 1 assert_css 'h1:root + #toc:root', output, 1 assert_css 'h1:root + #toc:root + #preamble:root', output, 1 end test 'should not activate toc macro if toc-placement is not set' do input = <<~'EOS' = Article :toc: Once upon a time... toc::[] == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = convert_string input assert_css '#toc', output, 1 assert_css '#toctitle', output, 1 assert_css '.toc', output, 1 assert_css '#content .toc', output, 0 end test 'should only output toc at toc macro if toc is macro' do input = <<~'EOS' = Article :toc: macro Once upon a time... toc::[] == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = convert_string input assert_css '#toc', output, 1 assert_css '#toctitle', output, 1 assert_css '.toc', output, 1 assert_css '#content .toc', output, 1 end test 'should use global attributes for toc-title, toc-class and toclevels for toc macro' do input = <<~'EOS' = Article :toc: :toc-placement: macro :toc-title: Contents :toc-class: contents :toclevels: 1 Preamble. toc::[] == Section 1 === Section 1.1 ==== Section 1.1.1 ==== Section 1.1.2 === Section 1.2 == Section 2 Fin. EOS output = convert_string input assert_css '#toc', output, 1 assert_css '#toctitle', output, 1 assert_css '#preamble #toc', output, 1 assert_css '#preamble #toc.contents', output, 1 assert_xpath '//*[@id="toc"]/*[@class="title"][text() = "Contents"]', output, 1 assert_css '#toc li', output, 2 assert_xpath '(//*[@id="toc"]//li)[1]/a[text() = "Section 1"]', output, 1 assert_xpath '(//*[@id="toc"]//li)[2]/a[text() = "Section 2"]', output, 1 end test 'should honor id, title, role and level attributes on toc macro' do input = <<~'EOS' = Article :toc: :toc-placement: macro :toc-title: Ignored :toc-class: ignored :toclevels: 5 :tocdepth: 1 Preamble. [[contents]] [role="contents"] .Contents toc::[levels={tocdepth}] == Section 1 === Section 1.1 ==== Section 1.1.1 ==== Section 1.1.2 === Section 1.2 == Section 2 Fin. EOS output = convert_string input assert_css '#toc', output, 0 assert_css '#toctitle', output, 0 assert_css '#preamble #contents', output, 1 assert_css '#preamble #contents.contents', output, 1 assert_xpath '//*[@id="contents"]/*[@class="title"][text() = "Contents"]', output, 1 assert_css '#contents li', output, 2 assert_xpath '(//*[@id="contents"]//li)[1]/a[text() = "Section 1"]', output, 1 assert_xpath '(//*[@id="contents"]//li)[2]/a[text() = "Section 2"]', output, 1 end test 'child toc levels should not have additional bullet at parent level in html' do input = <<~'EOS' = Article :toc: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... === Interlude While they were waiting... == Section Three That's all she wrote! EOS output = convert_string input assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/*[@id="toctitle"][text()="Table of Contents"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]//ul', output, 2 assert_xpath '//*[@id="header"]//*[@id="toc"]//li', output, 4 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[2]/a[@href="#_section_two"][text()="Section Two"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li/ul/li', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[2]/ul/li', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li/ul/li/a[@href="#_interlude"][text()="Interlude"]', output, 1 assert_xpath '((//*[@id="header"]//*[@id="toc"]/ul)[1]/li)[3]/a[@href="#_section_three"][text()="Section Three"]', output, 1 end test 'should not display a table of contents if document has no sections' do input_src = <<~'EOS' = Document Title :toc: toc::[] This document has no sections. It only has content. EOS ['', 'left', 'preamble', 'macro'].each do |placement| input = input_src.gsub(':toc:', "\\& #{placement}") output = convert_string input assert_css '#toctitle', output, 0 end end test 'should drop anchors from contents of entries in table of contents' do input = <<~'EOS' = Document Title :toc: == [[un]]Section One content == [[two]][[deux]]Section Two content == Plant Trees by https://ecosia.org[Searching] content EOS output = convert_string_to_embedded input assert_xpath '/*[@id="toc"]', output, 1 toc_links = xmlnodes_at_xpath '/*[@id="toc"]//li', output assert_equal 3, toc_links.size assert_equal 'Section One', toc_links[0].inner_html assert_equal 'Section Two', toc_links[1].inner_html assert_equal 'Plant Trees by Searching', toc_links[2].inner_html end test 'should not remove non-anchor tags from contents of entries in table of contents' do input = <<~'EOS' = Document Title :toc: :icons: font == `run` command content == icon:bug[] Issues content == https://ecosia.org[_Sustainable_ Searches] content EOS output = convert_string_to_embedded input, safe: :safe assert_xpath '/*[@id="toc"]', output, 1 toc_links = xmlnodes_at_xpath '/*[@id="toc"]//li', output assert_equal 3, toc_links.size assert_equal 'run command', toc_links[0].inner_html assert_equal ' Issues', toc_links[1].inner_html assert_equal 'Sustainable Searches', toc_links[2].inner_html end end context 'article doctype' do test 'should create only sections in docbook backend' do input = <<~'EOS' = Article Doc Writer == Section 1 The adventure. === Subsection One It was a dark and stormy night... === Subsection Two They couldn't believe their eyes when... == Section 2 The return. === Subsection Three While they were returning... === Subsection Four That's all she wrote! EOS output = convert_string input, backend: 'docbook' assert_xpath '//part', output, 0 assert_xpath '//chapter', output, 0 assert_xpath '/article/section', output, 2 assert_xpath '/article/section[1]/title[text() = "Section 1"]', output, 1 assert_xpath '/article/section[2]/title[text() = "Section 2"]', output, 1 assert_xpath '/article/section/section', output, 4 assert_xpath '/article/section[1]/section[1]/title[text() = "Subsection One"]', output, 1 assert_xpath '/article/section[2]/section[1]/title[text() = "Subsection Three"]', output, 1 end end context 'book doctype' do test 'document title with level 0 headings' do input = <<~'EOS' = Book Doc Writer :doctype: book = Chapter One [partintro] It was a dark and stormy night... == Scene One Someone's gonna get axed. = Chapter Two [partintro] They couldn't believe their eyes when... == Interlude While they were waiting... = Chapter Three == Scene One That's all she wrote! EOS output = convert_string(input) assert_css 'body.book', output, 1 assert_css 'h1', output, 4 assert_css '#header h1', output, 1 assert_css '#content h1', output, 3 assert_css '#content h1.sect0', output, 3 assert_css 'h2', output, 3 assert_css '#content h2', output, 3 assert_xpath '//h1[@id="_chapter_one"][text() = "Chapter One"]', output, 1 assert_xpath '//h1[@id="_chapter_two"][text() = "Chapter Two"]', output, 1 assert_xpath '//h1[@id="_chapter_three"][text() = "Chapter Three"]', output, 1 assert_css '#_chapter_one + .openblock.partintro p', output, 1 assert_css '#_chapter_two + .openblock.partintro p', output, 1 end test 'should print error if level 0 section comes after nested section and doctype is not book' do input = <<~'EOS' = Document Title == Level 1 Section === Level 2 Section = Level 0 Section EOS using_memory_logger do |logger| convert_string input assert_message logger, :ERROR, ': line 7: level 0 sections can only be used when doctype is book', Hash end end test 'should add class matching role to part' do input = <<~'EOS' = Book Title :doctype: book [.newbie] = Part 1 == Chapter A content = Part 2 == Chapter B content EOS result = convert_string_to_embedded input assert_css 'h1.sect0', result, 2 assert_css 'h1.sect0.newbie', result, 1 assert_css 'h1.sect0.newbie#_part_1', result, 1 end test 'should assign appropriate sectname for section type' do input = <<~'EOS' = Book Title :doctype: book :idprefix: :idseparator: - = Part Title == Chapter Title === Section Title content [appendix] == Appendix Title === Appendix Section Title content EOS doc = document_from_string input assert_equal 'header', doc.header.sectname assert_equal 'part', (doc.find_by id: 'part-title')[0].sectname assert_equal 'chapter', (doc.find_by id: 'chapter-title')[0].sectname assert_equal 'section', (doc.find_by id: 'section-title')[0].sectname assert_equal 'appendix', (doc.find_by id: 'appendix-title')[0].sectname assert_equal 'section', (doc.find_by id: 'appendix-section-title')[0].sectname end test 'should allow part intro to be defined using special section' do input = <<~'EOS' = Book :doctype: book = Part 1 [partintro] == Part Intro Part intro content == Chapter 1 Chapter content EOS output = convert_string input, backend: 'docbook' assert_xpath '/book/part[@xml:id="_part_1"]', output, 1 assert_xpath '/book/part[@xml:id="_part_1"]/partintro', output, 1 assert_xpath '/book/part[@xml:id="_part_1"]/partintro[@xml:id="_part_intro"]', output, 1 assert_xpath '/book/part[@xml:id="_part_1"]/partintro[@xml:id="_part_intro"]/title[text()="Part Intro"]', output, 1 assert_xpath '/book/part[@xml:id="_part_1"]/partintro[@xml:id="_part_intro"]/following-sibling::chapter[@xml:id="_chapter_1"]', output, 1 end test 'should add partintro style to child paragraph of part' do input = <<~'EOS' = Book :doctype: book = Part 1 part intro--a summary == Chapter 1 EOS doc = document_from_string input partintro = doc.blocks.first.blocks.first assert_equal :open, partintro.context assert_equal :compound, partintro.content_model assert_empty partintro.lines assert_empty partintro.subs assert_equal 'partintro', partintro.style assert_equal :paragraph, partintro.blocks[0].context assert_equal ['part intro--a summary'], partintro.blocks[0].lines assert_include 'part intro—​a summary', partintro.convert end test 'should preserve title on partintro defined as partintro paragraph' do input = <<~'EOS' = Book :doctype: book = Part 1 .Intro [partintro] Read this first. == Chapter 1 EOS doc = document_from_string input partintro = doc.blocks.first.blocks.first assert_equal :open, partintro.context assert_equal 'Intro', partintro.title end test 'should not promote title on partintro defined as normal paragraph' do input = <<~'EOS' = Book :doctype: book = Part 1 .Intro Read this first. == Chapter 1 EOS doc = document_from_string input partintro = doc.blocks.first.blocks.first assert_equal :open, partintro.context assert_nil partintro.title assert_equal 'Intro', partintro.blocks[0].title end test 'should add partintro style to child open block of part' do input = <<~'EOS' = Book :doctype: book = Part 1 -- part intro -- == Chapter 1 EOS doc = document_from_string input partintro = doc.blocks.first.blocks.first assert_equal :open, partintro.context assert_equal :compound, partintro.content_model assert_equal 'partintro', partintro.style assert_equal :paragraph, partintro.blocks[0].context end test 'should wrap child paragraphs of part in partintro open block' do input = <<~'EOS' = Book :doctype: book = Part 1 part intro more part intro == Chapter 1 EOS doc = document_from_string input partintro = doc.blocks.first.blocks.first assert_equal :open, partintro.context assert_equal :compound, partintro.content_model assert_equal 'partintro', partintro.style assert_equal 2, partintro.blocks.size assert_equal :paragraph, partintro.blocks[0].context assert_equal :paragraph, partintro.blocks[1].context end test 'should warn if part has no sections' do input = <<~'EOS' = Book :doctype: book = Part 1 [partintro] intro EOS using_memory_logger do |logger| document_from_string input assert_message logger, :ERROR, ': line 8: invalid part, must have at least one section (e.g., chapter, appendix, etc.)', Hash end end test 'should create parts and chapters in docbook backend' do input = <<~'EOS' = Book Doc Writer :doctype: book = Part 1 [partintro] The adventure. == Chapter One It was a dark and stormy night... == Chapter Two They couldn't believe their eyes when... = Part 2 [partintro] The return. == Chapter Three While they were returning... == Chapter Four That's all she wrote! EOS output = convert_string input, backend: 'docbook' assert_xpath '//chapter/chapter', output, 0 assert_xpath '/book/part', output, 2 assert_xpath '/book/part[1]/title[text() = "Part 1"]', output, 1 assert_xpath '/book/part[2]/title[text() = "Part 2"]', output, 1 assert_xpath '/book/part/chapter', output, 4 assert_xpath '/book/part[1]/chapter[1]/title[text() = "Chapter One"]', output, 1 assert_xpath '/book/part[2]/chapter[1]/title[text() = "Chapter Three"]', output, 1 end test 'subsections in preface and appendix should start at level 2' do input = <<~'EOS' = Multipart Book Doc Writer :doctype: book [preface] = Preface Preface content === Preface subsection Preface subsection content = Part 1 .Part intro title [partintro] Part intro content == Chapter 1 content [appendix] = Appendix Appendix content === Appendix subsection Appendix subsection content EOS output = nil using_memory_logger do |logger| output = convert_string input, backend: 'docbook' assert logger.empty? end assert_xpath '/book/preface', output, 1 assert_xpath '/book/preface/section', output, 1 assert_xpath '/book/part', output, 1 assert_xpath '/book/part/partintro', output, 1 assert_xpath '/book/part/partintro/title', output, 1 assert_xpath '/book/part/partintro/simpara', output, 1 assert_xpath '/book/appendix', output, 1 assert_xpath '/book/appendix/section', output, 1 end end end asciidoctor-2.0.20/test/substitutions_test.rb000066400000000000000000003651561443135032600214210ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' # TODO # - test negatives # - test role on every quote type context 'Substitutions' do BACKSLASH = ?\\ context 'Dispatcher' do test 'apply normal substitutions' do para = block_from_string("[blue]_http://asciidoc.org[AsciiDoc]_ & [red]*Ruby*\n§ Making +++documentation+++ together +\nsince (C) {inception_year}.") para.document.attributes['inception_year'] = '2012' result = para.apply_subs(para.source) assert_equal %{AsciiDoc & Ruby\n§ Making documentation together
    \nsince © 2012.}, result end test 'apply_subs should not modify string directly' do input = ' -- the root of all web' para = block_from_string input para_source = para.source result = para.apply_subs para_source assert_equal '<html> — the root of all web', result assert_equal input, para_source end test 'should not drop trailing blank lines when performing substitutions' do para = block_from_string %([%hardbreaks]\nthis\nis\n-> {program}) para.lines << '' para.lines << '' para.document.attributes['program'] = 'Asciidoctor' result = para.apply_subs(para.lines) assert_equal ['this
    ', 'is
    ', '→ Asciidoctor
    ', '
    ', ''], result result = para.apply_subs(para.lines * "\n") assert_equal %(this
    \nis
    \n→ Asciidoctor
    \n
    \n), result end test 'should expand subs passed to expand_subs' do para = block_from_string %({program}\n*bold*\n2 > 1) para.document.attributes['program'] = 'Asciidoctor' assert_equal [:specialcharacters], (para.expand_subs [:specialchars]) refute para.expand_subs([:none]) assert_equal [:specialcharacters, :quotes, :attributes, :replacements, :macros, :post_replacements], (para.expand_subs [:normal]) end test 'apply_subs should allow the subs argument to be nil' do block = block_from_string %([pass]\n*raw*) result = block.apply_subs block.source, nil assert_equal '*raw*', result end end context 'Quotes' do test 'single-line double-quoted string' do para = block_from_string(%q{``a few quoted words''}, attributes: { 'compat-mode' => '' }) assert_equal '“a few quoted words”', para.sub_quotes(para.source) para = block_from_string(%q{"`a few quoted words`"}) assert_equal '“a few quoted words”', para.sub_quotes(para.source) para = block_from_string(%q{"`a few quoted words`"}, backend: 'docbook') assert_equal 'a few quoted words', para.sub_quotes(para.source) end test 'escaped single-line double-quoted string' do para = block_from_string %(#{BACKSLASH}``a few quoted words''), attributes: { 'compat-mode' => '' } assert_equal %q(‘`a few quoted words’'), para.sub_quotes(para.source) para = block_from_string %(#{BACKSLASH * 2}``a few quoted words''), attributes: { 'compat-mode' => '' } assert_equal %q(``a few quoted words''), para.sub_quotes(para.source) para = block_from_string(%(#{BACKSLASH}"`a few quoted words`")) assert_equal %q("`a few quoted words`"), para.sub_quotes(para.source) para = block_from_string(%(#{BACKSLASH * 2}"`a few quoted words`")) assert_equal %(#{BACKSLASH}"`a few quoted words`"), para.sub_quotes(para.source) end test 'multi-line double-quoted string' do para = block_from_string(%Q{``a few\nquoted words''}, attributes: { 'compat-mode' => '' }) assert_equal "“a few\nquoted words”", para.sub_quotes(para.source) para = block_from_string(%Q{"`a few\nquoted words`"}) assert_equal "“a few\nquoted words”", para.sub_quotes(para.source) end test 'double-quoted string with inline single quote' do para = block_from_string(%q{``Here's Johnny!''}, attributes: { 'compat-mode' => '' }) assert_equal %q{“Here's Johnny!”}, para.sub_quotes(para.source) para = block_from_string(%q{"`Here's Johnny!`"}) assert_equal %q{“Here's Johnny!”}, para.sub_quotes(para.source) end test 'double-quoted string with inline backquote' do para = block_from_string(%q{``Here`s Johnny!''}, attributes: { 'compat-mode' => '' }) assert_equal %q{“Here`s Johnny!”}, para.sub_quotes(para.source) para = block_from_string(%q{"`Here`s Johnny!`"}) assert_equal %q{“Here`s Johnny!”}, para.sub_quotes(para.source) end test 'double-quoted string around monospaced text' do para = block_from_string(%q("``E=mc^2^` is the solution!`")) assert_equal %q(“`E=mc2` is the solution!”), para.apply_subs(para.source); para = block_from_string(%q("```E=mc^2^`` is the solution!`")) assert_equal %q(“E=mc2 is the solution!”), para.apply_subs(para.source); end test 'single-line single-quoted string' do para = block_from_string(%q{`a few quoted words'}, attributes: { 'compat-mode' => '' }) assert_equal '‘a few quoted words’', para.sub_quotes(para.source) para = block_from_string(%q{'`a few quoted words`'}) assert_equal '‘a few quoted words’', para.sub_quotes(para.source) para = block_from_string(%q{'`a few quoted words`'}, backend: 'docbook') assert_equal 'a few quoted words', para.sub_quotes(para.source) end test 'escaped single-line single-quoted string' do para = block_from_string(%(#{BACKSLASH}`a few quoted words'), attributes: { 'compat-mode' => '' }) assert_equal %(`a few quoted words'), para.sub_quotes(para.source) para = block_from_string(%(#{BACKSLASH}'`a few quoted words`')) assert_equal %('`a few quoted words`'), para.sub_quotes(para.source) end test 'multi-line single-quoted string' do para = block_from_string(%Q{`a few\nquoted words'}, attributes: { 'compat-mode' => '' }) assert_equal "‘a few\nquoted words’", para.sub_quotes(para.source) para = block_from_string(%Q{'`a few\nquoted words`'}) assert_equal "‘a few\nquoted words’", para.sub_quotes(para.source) end test 'single-quoted string with inline single quote' do para = block_from_string(%q{`That isn't what I did.'}, attributes: { 'compat-mode' => '' }) assert_equal %q{‘That isn't what I did.’}, para.sub_quotes(para.source) para = block_from_string(%q{'`That isn't what I did.`'}) assert_equal %q{‘That isn't what I did.’}, para.sub_quotes(para.source) end test 'single-quoted string with inline backquote' do para = block_from_string(%q{`Here`s Johnny!'}, attributes: { 'compat-mode' => '' }) assert_equal %q{‘Here`s Johnny!’}, para.sub_quotes(para.source) para = block_from_string(%q{'`Here`s Johnny!`'}) assert_equal %q{‘Here`s Johnny!’}, para.sub_quotes(para.source) end test 'single-line constrained marked string' do #para = block_from_string(%q{#a few words#}, attributes: { 'compat-mode' => '' }) #assert_equal 'a few words', para.sub_quotes(para.source) para = block_from_string(%q{#a few words#}) assert_equal 'a few words', para.sub_quotes(para.source) end test 'escaped single-line constrained marked string' do para = block_from_string(%(#{BACKSLASH}#a few words#)) assert_equal '#a few words#', para.sub_quotes(para.source) end test 'multi-line constrained marked string' do #para = block_from_string(%Q{#a few\nwords#}, attributes: { 'compat-mode' => '' }) #assert_equal "a few\nwords", para.sub_quotes(para.source) para = block_from_string(%Q{#a few\nwords#}) assert_equal "a few\nwords", para.sub_quotes(para.source) end test 'constrained marked string should not match entity references' do para = block_from_string('111 #mark a# 222 "`quote a`" 333 #mark b# 444') assert_equal %(111 mark a 222 “quote a” 333 mark b 444), para.sub_quotes(para.source) end test 'single-line unconstrained marked string' do #para = block_from_string(%q{##--anything goes ##}, attributes: { 'compat-mode' => '' }) #assert_equal '--anything goes ', para.sub_quotes(para.source) para = block_from_string(%q{##--anything goes ##}) assert_equal '--anything goes ', para.sub_quotes(para.source) end test 'escaped single-line unconstrained marked string' do para = block_from_string(%(#{BACKSLASH}#{BACKSLASH}##--anything goes ##)) assert_equal '##--anything goes ##', para.sub_quotes(para.source) end test 'multi-line unconstrained marked string' do #para = block_from_string(%Q{##--anything\ngoes ##}, attributes: { 'compat-mode' => '' }) #assert_equal "--anything\ngoes ", para.sub_quotes(para.source) para = block_from_string(%Q{##--anything\ngoes ##}) assert_equal "--anything\ngoes ", para.sub_quotes(para.source) end test 'single-line constrained marked string with role' do para = block_from_string(%q{[statement]#a few words#}) assert_equal 'a few words', para.sub_quotes(para.source) end test 'single-line constrained strong string' do para = block_from_string(%q{*a few strong words*}) assert_equal 'a few strong words', para.sub_quotes(para.source) end test 'escaped single-line constrained strong string' do para = block_from_string(%(#{BACKSLASH}*a few strong words*)) assert_equal '*a few strong words*', para.sub_quotes(para.source) end test 'multi-line constrained strong string' do para = block_from_string(%Q{*a few\nstrong words*}) assert_equal "a few\nstrong words", para.sub_quotes(para.source) end test 'constrained strong string containing an asterisk' do para = block_from_string(%q{*bl*ck*-eye}) assert_equal 'bl*ck-eye', para.sub_quotes(para.source) end test 'constrained strong string containing an asterisk and multibyte word chars' do para = block_from_string(%q{*黑*眼圈*}) assert_equal '黑*眼圈', para.sub_quotes(para.source) end test 'single-line constrained quote variation emphasized string' do para = block_from_string(%q{_a few emphasized words_}) assert_equal 'a few emphasized words', para.sub_quotes(para.source) end test 'escaped single-line constrained quote variation emphasized string' do para = block_from_string(%(#{BACKSLASH}_a few emphasized words_)) assert_equal %q(_a few emphasized words_), para.sub_quotes(para.source) end test 'escaped single quoted string' do para = block_from_string(%(#{BACKSLASH}'a few emphasized words')) # NOTE the \' is replaced with ' by the :replacements substitution, later in the substitution pipeline assert_equal %(#{BACKSLASH}'a few emphasized words'), para.sub_quotes(para.source) end test 'multi-line constrained emphasized quote variation string' do para = block_from_string(%Q{_a few\nemphasized words_}) assert_equal "a few\nemphasized words", para.sub_quotes(para.source) end test 'single-quoted string containing an emphasized phrase' do para = block_from_string(%q{`I told him, 'Just go for it!''}, attributes: { 'compat-mode' => '' }) assert_equal '‘I told him, Just go for it!’', para.sub_quotes(para.source) para = block_from_string(%q{'`I told him, 'Just go for it!'`'}) assert_equal %q(‘I told him, 'Just go for it!'’), para.sub_quotes(para.source) end test 'escaped single-quotes inside emphasized words are restored' do para = block_from_string(%('Here#{BACKSLASH}'s Johnny!'), attributes: { 'compat-mode' => '' }) assert_equal %q(Here's Johnny!), para.apply_subs(para.source) para = block_from_string(%('Here#{BACKSLASH}'s Johnny!')) assert_equal %q('Here's Johnny!'), para.apply_subs(para.source) end test 'single-line constrained emphasized underline variation string' do para = block_from_string(%q{_a few emphasized words_}) assert_equal 'a few emphasized words', para.sub_quotes(para.source) end test 'escaped single-line constrained emphasized underline variation string' do para = block_from_string(%(#{BACKSLASH}_a few emphasized words_)) assert_equal '_a few emphasized words_', para.sub_quotes(para.source) end test 'multi-line constrained emphasized underline variation string' do para = block_from_string(%Q{_a few\nemphasized words_}) assert_equal "a few\nemphasized words", para.sub_quotes(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'single-line constrained monospaced string' do para = block_from_string(%(`a few <{monospaced}> words`), attributes: { 'monospaced' => 'monospaced', 'compat-mode' => '' }) assert_equal 'a few <{monospaced}> words', para.apply_subs(para.source) para = block_from_string(%(`a few <{monospaced}> words`), attributes: { 'monospaced' => 'monospaced' }) assert_equal 'a few <monospaced> words', para.apply_subs(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'single-line constrained monospaced string with role' do para = block_from_string(%([input]`a few <{monospaced}> words`), attributes: { 'monospaced' => 'monospaced', 'compat-mode' => '' }) assert_equal 'a few <{monospaced}> words', para.apply_subs(para.source) para = block_from_string(%([input]`a few <{monospaced}> words`), attributes: { 'monospaced' => 'monospaced' }) assert_equal 'a few <monospaced> words', para.apply_subs(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'escaped single-line constrained monospaced string' do para = block_from_string(%(#{BACKSLASH}`a few words`), attributes: { 'compat-mode' => '' }) assert_equal '`a few <monospaced> words`', para.apply_subs(para.source) para = block_from_string(%(#{BACKSLASH}`a few words`)) assert_equal '`a few <monospaced> words`', para.apply_subs(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'escaped single-line constrained monospaced string with role' do para = block_from_string(%([input]#{BACKSLASH}`a few words`), attributes: { 'compat-mode' => '' }) assert_equal '[input]`a few <monospaced> words`', para.apply_subs(para.source) para = block_from_string(%([input]#{BACKSLASH}`a few words`)) assert_equal '[input]`a few <monospaced> words`', para.apply_subs(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'escaped role on single-line constrained monospaced string' do para = block_from_string(%(#{BACKSLASH}[input]`a few words`), attributes: { 'compat-mode' => '' }) assert_equal '[input]a few <monospaced> words', para.apply_subs(para.source) para = block_from_string(%(#{BACKSLASH}[input]`a few words`)) assert_equal '[input]a few <monospaced> words', para.apply_subs(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'escaped role on escaped single-line constrained monospaced string' do para = block_from_string(%(#{BACKSLASH}[input]#{BACKSLASH}`a few words`), attributes: { 'compat-mode' => '' }) assert_equal %(#{BACKSLASH}[input]`a few <monospaced> words`), para.apply_subs(para.source) para = block_from_string(%(#{BACKSLASH}[input]#{BACKSLASH}`a few words`)) assert_equal %(#{BACKSLASH}[input]`a few <monospaced> words`), para.apply_subs(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'should ignore role that ends with transitional role on constrained monospace span' do para = block_from_string %([foox-]`leave it alone`) assert_equal 'leave it alone', para.apply_subs(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'escaped single-line constrained monospace string with forced compat role' do para = block_from_string %([x-]#{BACKSLASH}`leave it alone`) assert_equal '[x-]`leave it alone`', para.apply_subs(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'escaped forced compat role on single-line constrained monospace string' do para = block_from_string %(#{BACKSLASH}[x-]`just *mono*`) assert_equal '[x-]just mono', para.apply_subs(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'multi-line constrained monospaced string' do para = block_from_string(%(`a few\n<{monospaced}> words`), attributes: { 'monospaced' => 'monospaced', 'compat-mode' => '' }) assert_equal "a few\n<{monospaced}> words", para.apply_subs(para.source) para = block_from_string(%(`a few\n<{monospaced}> words`), attributes: { 'monospaced' => 'monospaced' }) assert_equal "a few\n<monospaced> words", para.apply_subs(para.source) end test 'single-line unconstrained strong chars' do para = block_from_string(%q{**Git**Hub}) assert_equal 'GitHub', para.sub_quotes(para.source) end test 'escaped single-line unconstrained strong chars' do para = block_from_string(%(#{BACKSLASH}**Git**Hub)) assert_equal '*Git*Hub', para.sub_quotes(para.source) end test 'multi-line unconstrained strong chars' do para = block_from_string(%Q{**G\ni\nt\n**Hub}) assert_equal "G\ni\nt\nHub", para.sub_quotes(para.source) end test 'unconstrained strong chars with inline asterisk' do para = block_from_string(%q{**bl*ck**-eye}) assert_equal 'bl*ck-eye', para.sub_quotes(para.source) end test 'unconstrained strong chars with role' do para = block_from_string(%q{Git[blue]**Hub**}) assert_equal %q{GitHub}, para.sub_quotes(para.source) end # TODO this is not the same result as AsciiDoc, though I don't understand why AsciiDoc gets what it gets test 'escaped unconstrained strong chars with role' do para = block_from_string(%(Git#{BACKSLASH}[blue]**Hub**)) assert_equal %q{Git[blue]*Hub*}, para.sub_quotes(para.source) end test 'single-line unconstrained emphasized chars' do para = block_from_string(%q{__Git__Hub}) assert_equal 'GitHub', para.sub_quotes(para.source) end test 'escaped single-line unconstrained emphasized chars' do para = block_from_string(%(#{BACKSLASH}__Git__Hub)) assert_equal '__Git__Hub', para.sub_quotes(para.source) end test 'escaped single-line unconstrained emphasized chars around word' do para = block_from_string(%(#{BACKSLASH}#{BACKSLASH}__GitHub__)) assert_equal '__GitHub__', para.sub_quotes(para.source) end test 'multi-line unconstrained emphasized chars' do para = block_from_string(%Q{__G\ni\nt\n__Hub}) assert_equal "G\ni\nt\nHub", para.sub_quotes(para.source) end test 'unconstrained emphasis chars with role' do para = block_from_string(%q{[gray]__Git__Hub}) assert_equal %q{GitHub}, para.sub_quotes(para.source) end test 'escaped unconstrained emphasis chars with role' do para = block_from_string(%(#{BACKSLASH}[gray]__Git__Hub)) assert_equal %q{[gray]__Git__Hub}, para.sub_quotes(para.source) end test 'single-line constrained monospaced chars' do para = block_from_string(%q{call +save()+ to persist the changes}, attributes: { 'compat-mode' => '' }) assert_equal 'call save() to persist the changes', para.sub_quotes(para.source) para = block_from_string(%q{call [x-]+save()+ to persist the changes}) assert_equal 'call save() to persist the changes', para.apply_subs(para.source) para = block_from_string(%q{call `save()` to persist the changes}) assert_equal 'call save() to persist the changes', para.sub_quotes(para.source) end test 'single-line constrained monospaced chars with role' do para = block_from_string(%q{call [method]+save()+ to persist the changes}, attributes: { 'compat-mode' => '' }) assert_equal 'call save() to persist the changes', para.sub_quotes(para.source) para = block_from_string(%q{call [method x-]+save()+ to persist the changes}) assert_equal 'call save() to persist the changes', para.apply_subs(para.source) para = block_from_string(%q{call [method]`save()` to persist the changes}) assert_equal 'call save() to persist the changes', para.sub_quotes(para.source) end test 'escaped single-line constrained monospaced chars' do para = block_from_string(%(call #{BACKSLASH}+save()+ to persist the changes), attributes: { 'compat-mode' => '' }) assert_equal 'call +save()+ to persist the changes', para.sub_quotes(para.source) para = block_from_string(%(call #{BACKSLASH}`save()` to persist the changes)) assert_equal 'call `save()` to persist the changes', para.sub_quotes(para.source) end test 'escaped single-line constrained monospaced chars with role' do para = block_from_string(%(call [method]#{BACKSLASH}+save()+ to persist the changes), attributes: { 'compat-mode' => '' }) assert_equal 'call [method]+save()+ to persist the changes', para.sub_quotes(para.source) para = block_from_string(%(call [method]#{BACKSLASH}`save()` to persist the changes)) assert_equal 'call [method]`save()` to persist the changes', para.sub_quotes(para.source) end test 'escaped role on single-line constrained monospaced chars' do para = block_from_string(%(call #{BACKSLASH}[method]+save()+ to persist the changes), attributes: { 'compat-mode' => '' }) assert_equal 'call [method]save() to persist the changes', para.sub_quotes(para.source) para = block_from_string(%(call #{BACKSLASH}[method]`save()` to persist the changes)) assert_equal 'call [method]save() to persist the changes', para.sub_quotes(para.source) end test 'escaped role on escaped single-line constrained monospaced chars' do para = block_from_string(%(call #{BACKSLASH}[method]#{BACKSLASH}+save()+ to persist the changes), attributes: { 'compat-mode' => '' }) assert_equal %(call #{BACKSLASH}[method]+save()+ to persist the changes), para.sub_quotes(para.source) para = block_from_string(%(call #{BACKSLASH}[method]#{BACKSLASH}`save()` to persist the changes)) assert_equal %(call #{BACKSLASH}[method]`save()` to persist the changes), para.sub_quotes(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'escaped single-line constrained passthrough string with forced compat role' do para = block_from_string %([x-]#{BACKSLASH}+leave it alone+) assert_equal '[x-]+leave it alone+', para.apply_subs(para.source) end test 'single-line unconstrained monospaced chars' do para = block_from_string(%q{Git++Hub++}, attributes: { 'compat-mode' => '' }) assert_equal 'GitHub', para.sub_quotes(para.source) para = block_from_string(%q{Git[x-]++Hub++}) assert_equal 'GitHub', para.apply_subs(para.source) para = block_from_string(%q{Git``Hub``}) assert_equal 'GitHub', para.sub_quotes(para.source) end test 'escaped single-line unconstrained monospaced chars' do para = block_from_string(%(Git#{BACKSLASH}++Hub++), attributes: { 'compat-mode' => '' }) assert_equal 'Git+Hub+', para.sub_quotes(para.source) para = block_from_string(%(Git#{BACKSLASH * 2}++Hub++), attributes: { 'compat-mode' => '' }) assert_equal 'Git++Hub++', para.sub_quotes(para.source) para = block_from_string(%(Git#{BACKSLASH}``Hub``)) assert_equal 'Git``Hub``', para.sub_quotes(para.source) end test 'multi-line unconstrained monospaced chars' do para = block_from_string(%Q{Git++\nH\nu\nb++}, attributes: { 'compat-mode' => '' }) assert_equal "Git\nH\nu\nb", para.sub_quotes(para.source) para = block_from_string(%Q{Git[x-]++\nH\nu\nb++}) assert_equal %(Git\nH\nu\nb), para.apply_subs(para.source) para = block_from_string(%Q{Git``\nH\nu\nb``}) assert_equal "Git\nH\nu\nb", para.sub_quotes(para.source) end test 'single-line superscript chars' do para = block_from_string(%(x^2^ = x * x, e = mc^2^, there's a 1^st^ time for everything)) assert_equal %(x2 = x * x, e = mc2, there\'s a 1st time for everything), para.sub_quotes(para.source) end test 'escaped single-line superscript chars' do para = block_from_string(%(x#{BACKSLASH}^2^ = x * x)) assert_equal 'x^2^ = x * x', para.sub_quotes(para.source) end test 'does not match superscript across whitespace' do para = block_from_string(%Q{x^(n\n-\n1)^}) assert_equal para.source, para.sub_quotes(para.source) end test 'allow spaces in superscript if spaces are inserted using an attribute reference' do para = block_from_string 'Night ^A{sp}poem{sp}by{sp}Jane{sp}Kondo^.' assert_equal 'Night A poem by Jane Kondo.', para.apply_subs(para.source) end test 'allow spaces in superscript if text is wrapped in a passthrough' do para = block_from_string 'Night ^+A poem by Jane Kondo+^.' assert_equal 'Night A poem by Jane Kondo.', para.apply_subs(para.source) end test 'does not match adjacent superscript chars' do para = block_from_string 'a ^^ b' assert_equal 'a ^^ b', para.sub_quotes(para.source) end test 'does not confuse superscript and links with blank window shorthand' do para = block_from_string(%Q{http://localhost[Text^] on the 21^st^ and 22^nd^}) assert_equal 'Text on the 21st and 22nd', para.content end test 'single-line subscript chars' do para = block_from_string(%q{H~2~O}) assert_equal 'H2O', para.sub_quotes(para.source) end test 'escaped single-line subscript chars' do para = block_from_string(%(H#{BACKSLASH}~2~O)) assert_equal 'H~2~O', para.sub_quotes(para.source) end test 'does not match subscript across whitespace' do para = block_from_string(%Q{project~ view\non\nGitHub~}) assert_equal para.source, para.sub_quotes(para.source) end test 'does not match adjacent subscript chars' do para = block_from_string 'a ~~ b' assert_equal 'a ~~ b', para.sub_quotes(para.source) end test 'does not match subscript across distinct URLs' do para = block_from_string(%Q{http://www.abc.com/~def[DEF] and http://www.abc.com/~ghi[GHI]}) assert_equal para.source, para.sub_quotes(para.source) end test 'quoted text with role shorthand' do para = block_from_string(%q{[.white.red-background]#alert#}) assert_equal 'alert', para.sub_quotes(para.source) end test 'quoted text with id shorthand' do para = block_from_string(%q{[#bond]#007#}) assert_equal '007', para.sub_quotes(para.source) end test 'quoted text with id and role shorthand' do para = block_from_string(%q{[#bond.white.red-background]#007#}) assert_equal '007', para.sub_quotes(para.source) end test 'quoted text with id and role shorthand with roles before id' do para = block_from_string(%q{[.white.red-background#bond]#007#}) assert_equal '007', para.sub_quotes(para.source) end test 'quoted text with id and role shorthand with roles around id' do para = block_from_string(%q{[.white#bond.red-background]#007#}) assert_equal '007', para.sub_quotes(para.source) end test 'quoted text with id and role shorthand using docbook backend' do para = block_from_string(%q{[#bond.white.red-background]#007#}, backend: 'docbook') assert_equal '007', para.sub_quotes(para.source) end test 'should not assign role attribute if shorthand style has no roles' do para = block_from_string '[#idname]*blah*' assert_equal 'blah', para.content end test 'should remove trailing spaces from role defined using shorthand' do para = block_from_string '[.rolename ]*blah*' assert_equal 'blah', para.content end test 'should ignore attributes after comma' do para = block_from_string(%q{[red, foobar]#alert#}) assert_equal 'alert', para.sub_quotes(para.source) end test 'should remove leading and trailing spaces around role after ignoring attributes after comma' do para = block_from_string(%q{[ red , foobar]#alert#}) assert_equal 'alert', para.sub_quotes(para.source) end test 'should not assign role if value before comma is empty' do para = block_from_string(%q{[,]#anonymous#}) assert_equal 'anonymous', para.sub_quotes(para.source) end test 'inline passthrough with id and role set using shorthand' do %w(#idname.rolename .rolename#idname).each do |attrlist| para = block_from_string %([#{attrlist}]+pass+) assert_equal 'pass', para.content end end end context 'Macros' do test 'a single-line link macro should be interpreted as a link' do para = block_from_string('link:/home.html[]') assert_equal %q{/home.html}, para.sub_macros(para.source) end test 'a single-line link macro with text should be interpreted as a link' do para = block_from_string('link:/home.html[Home]') assert_equal %q{Home}, para.sub_macros(para.source) end test 'a mailto macro should be interpreted as a mailto link' do para = block_from_string('mailto:doc.writer@asciidoc.org[]') assert_equal %q{doc.writer@asciidoc.org}, para.sub_macros(para.source) end test 'a mailto macro with text should be interpreted as a mailto link' do para = block_from_string('mailto:doc.writer@asciidoc.org[Doc Writer]') assert_equal %q{Doc Writer}, para.sub_macros(para.source) end test 'a mailto macro with text and subject should be interpreted as a mailto link' do para = block_from_string('mailto:doc.writer@asciidoc.org[Doc Writer, Pull request]') assert_equal %q{Doc Writer}, para.sub_macros(para.source) end test 'a mailto macro with text, subject and body should be interpreted as a mailto link' do para = block_from_string('mailto:doc.writer@asciidoc.org[Doc Writer, Pull request, Please accept my pull request]') assert_equal %q{Doc Writer}, para.sub_macros(para.source) end test 'a mailto macro with subject and body only should use e-mail as text' do para = block_from_string('mailto:doc.writer@asciidoc.org[,Pull request,Please accept my pull request]') assert_equal %q{doc.writer@asciidoc.org}, para.sub_macros(para.source) end test 'a mailto macro supports id and role attributes' do para = block_from_string('mailto:doc.writer@asciidoc.org[,id=contact,role=icon]') assert_equal %q{doc.writer@asciidoc.org}, para.sub_macros(para.source) end test 'should recognize inline email addresses' do %w( doc.writer@asciidoc.org author+website@4fs.no john@domain.uk.co name@somewhere.else.com joe_bloggs@mail_server.com joe-bloggs@mail-server.com joe.bloggs@mail.server.com FOO@BAR.COM docs@writing.ninja ).each do |input| para = block_from_string input assert_equal %(#{input}), (para.sub_macros para.source) end end test 'should recognize inline email address containing an ampersand' do para = block_from_string('bert&ernie@sesamestreet.com') assert_equal %q{bert&ernie@sesamestreet.com}, para.apply_subs(para.source) end test 'should recognize inline email address surrounded by angle brackets' do para = block_from_string('') assert_equal %q{<doc.writer@asciidoc.org>}, para.apply_subs(para.source) end test 'should ignore escaped inline email address' do para = block_from_string(%(#{BACKSLASH}doc.writer@asciidoc.org)) assert_equal %q{doc.writer@asciidoc.org}, para.sub_macros(para.source) end test 'a single-line raw url should be interpreted as a link' do para = block_from_string('http://google.com') assert_equal %q{http://google.com}, para.sub_macros(para.source) end test 'a single-line raw url with text should be interpreted as a link' do para = block_from_string('http://google.com[Google]') assert_equal %q{Google}, para.sub_macros(para.source) end test 'a multi-line raw url with text should be interpreted as a link' do para = block_from_string("http://google.com[Google\nHomepage]") assert_equal %{Google\nHomepage}, para.sub_macros(para.source) end test 'a single-line raw url with attribute as text should be interpreted as a link with resolved attribute' do para = block_from_string("http://google.com[{google_homepage}]") para.document.attributes['google_homepage'] = 'Google Homepage' assert_equal %q{Google Homepage}, para.sub_macros(para.sub_attributes(para.source)) end test 'should not resolve an escaped attribute in link text' do { 'http://google.com' => "http://google.com[#{BACKSLASH}{google_homepage}]", 'http://google.com?q=,' => "link:http://google.com?q=,[#{BACKSLASH}{google_homepage}]", }.each do |uri, macro| para = block_from_string macro para.document.attributes['google_homepage'] = 'Google Homepage' assert_equal %({google_homepage}), para.sub_macros(para.sub_attributes(para.source)) end end test 'a single-line escaped raw url should not be interpreted as a link' do para = block_from_string(%(#{BACKSLASH}http://google.com)) assert_equal %q{http://google.com}, para.sub_macros(para.source) end test 'a comma separated list of links should not include commas in links' do para = block_from_string('http://foo.com, http://bar.com, http://example.org') assert_equal %q{http://foo.com, http://bar.com, http://example.org}, para.sub_macros(para.source) end test 'a single-line image macro should be interpreted as an image' do para = block_from_string('image:tiger.png[]') assert_equal %{tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'should replace underscore and hyphen with space in generated alt text for an inline image' do para = block_from_string('image:tiger-with-family_1.png[]') assert_equal %{tiger with family 1}, para.sub_macros(para.source).gsub(/>\s+<') end test 'a single-line image macro with text should be interpreted as an image with alt text' do para = block_from_string('image:tiger.png[Tiger]') assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'should encode special characters in alt text of inline image' do input = 'A tiger\'s "roar" is < a bear\'s "growl"' expected = 'A tiger’s "roar" is < a bear’s "growl"' output = (convert_inline_string %(image:tiger-roar.png[#{input}])).gsub(/>\s+<') assert_equal %(#{expected}), output end test 'an image macro with SVG image and text should be interpreted as an image with alt text' do para = block_from_string('image:tiger.svg[Tiger]') assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an image macro with an interactive SVG image and alt text should be converted to an object element' do para = block_from_string('image:tiger.svg[Tiger,opts=interactive]', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'imagesdir' => 'images' }) assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an image macro with an interactive SVG image, fallback and alt text should be converted to an object element' do para = block_from_string('image:tiger.svg[Tiger,fallback=tiger.png,opts=interactive]', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'imagesdir' => 'images' }) assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an image macro with an inline SVG image should be converted to an svg element' do para = block_from_string('image:circle.svg[Tiger,100,opts=inline]', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'imagesdir' => 'fixtures', 'docdir' => testdir }) result = para.sub_macros(para.source).gsub(/>\s+<') assert_match(/]*width="100"[^>]*>/, result) refute_match(/]*width="500"[^>]*>/, result) refute_match(/]*height="500"[^>]*>/, result) refute_match(/]*style="[^>]*>/, result) end test 'an image macro with an inline SVG image should be converted to an svg element even when data-uri is set' do para = block_from_string('image:circle.svg[Tiger,100,opts=inline]', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'data-uri' => '', 'imagesdir' => 'fixtures', 'docdir' => testdir }) assert_match(/]*width="100">/, para.sub_macros(para.source).gsub(/>\s+<')) end test 'an image macro with an SVG image should not use an object element when safe mode is secure' do para = block_from_string('image:tiger.svg[Tiger,opts=interactive]', attributes: { 'imagesdir' => 'images' }) assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'a single-line image macro with text containing escaped square bracket should be interpreted as an image with alt text' do para = block_from_string(%(image:tiger.png[[Another#{BACKSLASH}] Tiger])) assert_equal %{[Another] Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'a single-line image macro with text and dimensions should be interpreted as an image with alt text and dimensions' do para = block_from_string('image:tiger.png[Tiger, 200, 100]') assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'a single-line image macro with text and dimensions should be interpreted as an image with alt text and dimensions in docbook' do para = block_from_string 'image:tiger.png[Tiger, 200, 100]', backend: 'docbook' assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'should pass through role on image macro to DocBook output' do para = block_from_string 'image:tiger.png[Tiger,200,role=animal]', backend: 'docbook' result = para.sub_macros(para.source) assert_includes result, '' end test 'a single-line image macro with text and link should be interpreted as a linked image with alt text' do para = block_from_string('image:tiger.png[Tiger, link="http://en.wikipedia.org/wiki/Tiger"]') assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'rel=noopener should be added to an image with a link that targets the _blank window' do para = block_from_string 'image:tiger.png[Tiger,link=http://en.wikipedia.org/wiki/Tiger,window=_blank]' assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'rel=noopener should be added to an image with a link that targets a named window when the noopener option is set' do para = block_from_string 'image:tiger.png[Tiger,link=http://en.wikipedia.org/wiki/Tiger,window=name,opts=noopener]' assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'rel=nofollow should be added to an image with a link when the nofollow option is set' do para = block_from_string 'image:tiger.png[Tiger,link=http://en.wikipedia.org/wiki/Tiger,opts=nofollow]' assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'a multi-line image macro with text and dimensions should be interpreted as an image with alt text and dimensions' do para = block_from_string(%(image:tiger.png[Another\nAwesome\nTiger, 200,\n100])) assert_equal %{Another Awesome Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an inline image macro with a url target should be interpreted as an image' do para = block_from_string %(Beware of the image:http://example.com/images/tiger.png[tiger].) assert_equal %{Beware of the tiger.}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an inline image macro with a float attribute should be interpreted as a floating image' do para = block_from_string %(image:http://example.com/images/tiger.png[tiger, float="right"] Beware of the tigers!) assert_equal %{tiger Beware of the tigers!}, para.sub_macros(para.source).gsub(/>\s+<') end test 'should prepend value of imagesdir attribute to inline image target if target is relative path' do para = block_from_string %(Beware of the image:tiger.png[tiger].), attributes: { 'imagesdir' => './images' } assert_equal %{Beware of the tiger.}, para.sub_macros(para.source).gsub(/>\s+<') end test 'should not prepend value of imagesdir attribute to inline image target if target is absolute path' do para = block_from_string %(Beware of the image:/tiger.png[tiger].), attributes: { 'imagesdir' => './images' } assert_equal %{Beware of the tiger.}, para.sub_macros(para.source).gsub(/>\s+<') end test 'should not prepend value of imagesdir attribute to inline image target if target is url' do para = block_from_string %(Beware of the image:http://example.com/images/tiger.png[tiger].), attributes: { 'imagesdir' => './images' } assert_equal %{Beware of the tiger.}, para.sub_macros(para.source).gsub(/>\s+<') end test 'should match an inline image macro if target contains a space character' do para = block_from_string(%(Beware of the image:big cats.png[] around here.)) assert_equal %(Beware of the big cats around here.), para.sub_macros(para.source).gsub(/>\s+<') end test 'should not match an inline image macro if target contains a newline character' do para = block_from_string(%(Fear not. There are no image:big\ncats.png[] around here.)) result = para.sub_macros(para.source) refute_includes result, ' '', 'iconsdir' => 'fixtures', 'docdir' => testdir }, safe: :server, catalog_assets: true assert 1, sect.document.catalog[:images].size assert_equal 'fixtures/dot.gif', sect.document.catalog[:images][0].to_s assert_nil sect.document.catalog[:images][0].imagesdir assert logger.empty? end end test 'an icon macro should be interpreted as an icon if icons are enabled' do para = block_from_string 'icon:github[]', attributes: { 'icons' => '' } assert_equal %{github}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an icon macro should be interpreted as alt text if icons are disabled' do para = block_from_string 'icon:github[]' assert_equal %{[github]}, para.sub_macros(para.source).gsub(/>\s+<') end test 'should not mangle icon with link if icons are disabled' do para = block_from_string 'icon:github[link=https://github.com]' assert_equal '[github]', para.sub_macros(para.source).gsub(/>\s+<') end test 'should not mangle icon inside link if icons are disabled' do para = block_from_string 'https://github.com[icon:github[] GitHub]' assert_equal '[github] GitHub', para.sub_macros(para.source).gsub(/>\s+<') end test 'an icon macro should output alt text if icons are disabled and alt is given' do para = block_from_string 'icon:github[alt="GitHub"]' assert_equal %{[GitHub]}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an icon macro should be interpreted as a font-based icon when icons=font' do para = block_from_string 'icon:github[]', attributes: { 'icons' => 'font' } assert_equal %{}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an icon macro with a size should be interpreted as a font-based icon with a size when icons=font' do para = block_from_string 'icon:github[4x]', attributes: { 'icons' => 'font' } assert_equal %{}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an icon macro with flip should be interpreted as a flipped font-based icon when icons=font' do para = block_from_string 'icon:shield[fw,flip=horizontal]', attributes: { 'icons' => 'font' } assert_equal '', para.sub_macros(para.source).gsub(/>\s+<') end test 'an icon macro with rotate should be interpreted as a rotated font-based icon when icons=font' do para = block_from_string 'icon:shield[fw,rotate=90]', attributes: { 'icons' => 'font' } assert_equal '', para.sub_macros(para.source).gsub(/>\s+<') end test 'an icon macro with a role and title should be interpreted as a font-based icon with a class and title when icons=font' do para = block_from_string 'icon:heart[role="red", title="Heart me"]', attributes: { 'icons' => 'font' } assert_equal %{}, para.sub_macros(para.source).gsub(/>\s+<') end test 'a single-line footnote macro should be registered and output as a footnote' do para = block_from_string('Sentence text footnote:[An example footnote.].') assert_equal %(Sentence text [1].), para.sub_macros(para.source) assert_equal 1, para.document.catalog[:footnotes].size footnote = para.document.catalog[:footnotes].first assert_equal 1, footnote.index assert_nil footnote.id assert_equal 'An example footnote.', footnote.text end test 'a multi-line footnote macro should be registered and output as a footnote without newline' do para = block_from_string("Sentence text footnote:[An example footnote\nwith wrapped text.].") assert_equal %(Sentence text [1].), para.sub_macros(para.source) assert_equal 1, para.document.catalog[:footnotes].size footnote = para.document.catalog[:footnotes].first assert_equal 1, footnote.index assert_nil footnote.id assert_equal "An example footnote with wrapped text.", footnote.text end test 'an escaped closing square bracket in a footnote should be unescaped when converted' do para = block_from_string(%(footnote:[a #{BACKSLASH}] b].)) assert_equal %([1].), para.sub_macros(para.source) assert_equal 1, para.document.catalog[:footnotes].size footnote = para.document.catalog[:footnotes].first assert_equal "a ] b", footnote.text end test 'a footnote macro can be directly adjacent to preceding word' do para = block_from_string('Sentence textfootnote:[An example footnote.].') assert_equal %(Sentence text[1].), para.sub_macros(para.source) end test 'a footnote macro may contain an escaped backslash' do para = block_from_string("footnote:[\\]]\nfootnote:[a \\] b]\nfootnote:[a \\]\\] b]") para.sub_macros(para.source) assert_equal 3, para.document.catalog[:footnotes].size footnote1 = para.document.catalog[:footnotes][0] assert_equal ']', footnote1.text footnote2 = para.document.catalog[:footnotes][1] assert_equal 'a ] b', footnote2.text footnote3 = para.document.catalog[:footnotes][2] assert_equal 'a ]] b', footnote3.text end test 'a footnote macro may contain a link macro' do para = block_from_string('Share your code. footnote:[https://github.com[GitHub]]') assert_equal %(Share your code. [1]), para.sub_macros(para.source) assert_equal 1, para.document.catalog[:footnotes].size footnote1 = para.document.catalog[:footnotes][0] assert_equal 'GitHub', footnote1.text end test 'a footnote macro may contain a plain URL' do para = block_from_string %(the JLine footnote:[https://github.com/jline/jline2]\nlibrary.) result = para.sub_macros para.source assert_equal %(the JLine [1]\nlibrary.), result assert_equal 1, para.document.catalog[:footnotes].size fn1 = para.document.catalog[:footnotes].first assert_equal 'https://github.com/jline/jline2', fn1.text end test 'a footnote macro followed by a semi-colon may contain a plain URL' do para = block_from_string %(the JLine footnote:[https://github.com/jline/jline2];\nlibrary.) result = para.sub_macros para.source assert_equal %(the JLine [1];\nlibrary.), result assert_equal 1, para.document.catalog[:footnotes].size fn1 = para.document.catalog[:footnotes].first assert_equal 'https://github.com/jline/jline2', fn1.text end test 'a footnote macro may contain text formatting' do para = block_from_string 'You can download patches from the product page.footnote:[Only available with an _active_ subscription.]' para.convert footnotes = para.document.catalog[:footnotes] assert_equal 1, footnotes.size assert_equal 'Only available with an active subscription.', footnotes[0].text end test 'an externalized footnote macro may contain text formatting' do input = <<~'EOS' :fn-disclaimer: pass:q[footnote:[Only available with an _active_ subscription.]] You can download patches from the production page.{fn-disclaimer} EOS doc = document_from_string input doc.convert footnotes = doc.catalog[:footnotes] assert_equal 1, footnotes.size assert_equal 'Only available with an active subscription.', footnotes[0].text end test 'a footnote macro may contain a shorthand xref' do # specialcharacters escaping is simulated para = block_from_string('text footnote:[<<_install,install>>]') doc = para.document doc.register :refs, ['_install', (Asciidoctor::Inline.new doc, :anchor, 'Install', type: :ref, target: '_install'), 'Install'] catalog = doc.catalog assert_equal %(text [1]), para.sub_macros(para.source) assert_equal 1, catalog[:footnotes].size footnote1 = catalog[:footnotes][0] assert_equal 'install', footnote1.text end test 'a footnote macro may contain an xref macro' do para = block_from_string('text footnote:[xref:_install[install]]') doc = para.document doc.register :refs, ['_install', (Asciidoctor::Inline.new doc, :anchor, 'Install', type: :ref, target: '_install'), 'Install'] catalog = doc.catalog assert_equal %(text [1]), para.sub_macros(para.source) assert_equal 1, catalog[:footnotes].size footnote1 = catalog[:footnotes][0] assert_equal 'install', footnote1.text end test 'a footnote macro may contain an anchor macro' do para = block_from_string('text footnote:[a [[b]] [[c\]\] d]') assert_equal %(text [1]), para.sub_macros(para.source) assert_equal 1, para.document.catalog[:footnotes].size footnote1 = para.document.catalog[:footnotes][0] assert_equal 'a [[c]] d', footnote1.text end test 'subsequent footnote macros with escaped URLs should be restored in DocBook' do input = 'foofootnote:[+http://example.com+]barfootnote:[+http://acme.com+]baz' result = convert_string_to_embedded input, doctype: 'inline', backend: 'docbook' assert_equal 'foohttp://example.combarhttp://acme.combaz', result end test 'should increment index of subsequent footnote macros' do para = block_from_string("Sentence text footnote:[An example footnote.]. Sentence text footnote:[Another footnote.].") assert_equal %(Sentence text [1]. Sentence text [2].), para.sub_macros(para.source) assert_equal 2, para.document.catalog[:footnotes].size footnote1 = para.document.catalog[:footnotes][0] assert_equal 1, footnote1.index assert_nil footnote1.id assert_equal "An example footnote.", footnote1.text footnote2 = para.document.catalog[:footnotes][1] assert_equal 2, footnote2.index assert_nil footnote2.id assert_equal "Another footnote.", footnote2.text end test 'a footnoteref macro with id and single-line text should be registered and output as a footnote' do para = block_from_string 'Sentence text footnoteref:[ex1, An example footnote.].', attributes: { 'compat-mode' => '' } assert_equal %(Sentence text [1].), para.sub_macros(para.source) assert_equal 1, para.document.catalog[:footnotes].size footnote = para.document.catalog[:footnotes].first assert_equal 1, footnote.index assert_equal 'ex1', footnote.id assert_equal 'An example footnote.', footnote.text end test 'a footnoteref macro with id and multi-line text should be registered and output as a footnote without newlines' do para = block_from_string "Sentence text footnoteref:[ex1, An example footnote\nwith wrapped text.].", attributes: { 'compat-mode' => '' } assert_equal %(Sentence text [1].), para.sub_macros(para.source) assert_equal 1, para.document.catalog[:footnotes].size footnote = para.document.catalog[:footnotes].first assert_equal 1, footnote.index assert_equal 'ex1', footnote.id assert_equal "An example footnote with wrapped text.", footnote.text end test 'a footnoteref macro with id should refer to footnoteref with same id' do para = block_from_string 'Sentence text footnoteref:[ex1, An example footnote.]. Sentence text footnoteref:[ex1].', attributes: { 'compat-mode' => '' } assert_equal %(Sentence text [1]. Sentence text [1].), para.sub_macros(para.source) assert_equal 1, para.document.catalog[:footnotes].size footnote = para.document.catalog[:footnotes].first assert_equal 1, footnote.index assert_equal 'ex1', footnote.id assert_equal 'An example footnote.', footnote.text end test 'an unresolved footnote reference should produce a warning message and output fallback text in red' do input = 'Sentence text.footnote:ex1[]' using_memory_logger do |logger| para = block_from_string input output = para.sub_macros para.source assert_equal 'Sentence text.[ex1]', output assert_message logger, :WARN, 'invalid footnote reference: ex1' end end test 'using a footnoteref macro should generate a warning when compat mode is not enabled' do input = 'Sentence text.footnoteref:[fn1,Commentary on this sentence.]' using_memory_logger do |logger| para = block_from_string input para.sub_macros para.source assert_message logger, :WARN, 'found deprecated footnoteref macro: footnoteref:[fn1,Commentary on this sentence.]; use footnote macro with target instead' end end test 'inline footnote macro can be used to define and reference a footnote reference' do input = <<~'EOS' You can download the software from the product page.footnote:sub[Option only available if you have an active subscription.] You can also file a support request.footnote:sub[] If all else fails, you can give us a call.footnoteref:[sub] EOS using_memory_logger do |logger| output = convert_string_to_embedded input, attributes: { 'compat-mode' => '' } assert_css '#_footnotedef_1', output, 1 assert_css 'p a[href="#_footnotedef_1"]', output, 3 assert_css '#footnotes .footnote', output, 1 assert logger.empty? end end test 'should parse multiple footnote references in a single line' do input = 'notable text.footnote:id[about this [text\]], footnote:id[], footnote:id[]' output = convert_string_to_embedded input assert_xpath '(//p)[1]/sup[starts-with(@class,"footnote")]', output, 3 assert_xpath '(//p)[1]/sup[@class="footnote"]', output, 1 assert_xpath '(//p)[1]/sup[@class="footnoteref"]', output, 2 assert_xpath '(//p)[1]/sup[starts-with(@class,"footnote")]/a[@class="footnote"][text()="1"]', output, 3 assert_css '#footnotes .footnote', output, 1 end test 'should not register footnote with id and text if id already registered' do input = <<~'EOS' :fn-notable-text: footnote:id[about this text] notable text.{fn-notable-text} more notable text.{fn-notable-text} EOS output = convert_string_to_embedded input assert_xpath '(//p)[1]/sup[@class="footnote"]', output, 1 assert_xpath '(//p)[2]/sup[@class="footnoteref"]', output, 1 assert_css '#footnotes .footnote', output, 1 end test 'should not resolve an inline footnote macro missing both id and text' do input = <<~'EOS' The footnote:[] macro can be used for defining and referencing footnotes. The footnoteref:[] macro is now deprecated. EOS output = convert_string_to_embedded input assert_includes output, 'The footnote:[] macro' assert_includes output, 'The footnoteref:[] macro' end test 'inline footnote macro can define a numeric id without conflicting with auto-generated ID' do input = 'You can download the software from the product page.footnote:1[Option only available if you have an active subscription.]' output = convert_string_to_embedded input assert_css '#_footnote_1', output, 1 assert_css 'p sup#_footnote_1', output, 1 assert_css 'p a#_footnoteref_1', output, 1 assert_css 'p a[href="#_footnotedef_1"]', output, 1 assert_css '#footnotes #_footnotedef_1', output, 1 end test 'inline footnote macro can define an id that uses any word characters in Unicode' do input = <<~'EOS' L'origine du mot forêt{blank}footnote:forêt[un massif forestier] est complexe. Qu'est-ce qu'une forêt ?{blank}footnote:forêt[] EOS output = convert_string_to_embedded input assert_css '#_footnote_forêt', output, 1 assert_css '#_footnotedef_1', output, 1 assert_xpath '//a[@class="footnote"][text()="1"]', output, 2 end test 'should be able to reference a bibliography entry in a footnote' do input = <<~'EOS' Choose a design pattern.footnote:[See <> to find a collection of design patterns.] [bibliography] == Bibliography * [[[gof]]] Erich Gamma, et al. _Design Patterns: Elements of Reusable Object-Oriented Software._ Addison-Wesley. 1994. EOS result = convert_string_to_embedded input assert_include '1. See [gof] to find a collection of design patterns.', result end test 'footnotes in headings are expected to be numbered out of sequence' do input = <<~'EOS' == Section 1 para.footnote:[first footnote] == Section 2footnote:[second footnote] para.footnote:[third footnote] EOS result = convert_string_to_embedded input footnote_refs = xmlnodes_at_css 'a.footnote', result footnote_defs = xmlnodes_at_css 'div.footnote', result assert_equal 3, footnote_refs.length assert_equal %w(1 1 2), footnote_refs.map(&:text) assert_equal 3, footnote_defs.length assert_equal ['1. second footnote', '1. first footnote', '2. third footnote'], footnote_defs.map(&:text).map(&:strip) end test 'a single-line index term macro with a primary term should be registered as an index reference' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macros = ['indexterm:[Tigers]', '(((Tigers)))'] macros.each do |macro| para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['Tigers'], para.document.catalog[:indexterms].first end end test 'a single-line index term macro with primary and secondary terms should be registered as an index reference' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macros = ['indexterm:[Big cats, Tigers]', '(((Big cats, Tigers)))'] macros.each do |macro| para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['Big cats', 'Tigers'], para.document.catalog[:indexterms].first end end test 'a single-line index term macro with primary, secondary and tertiary terms should be registered as an index reference' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macros = ['indexterm:[Big cats,Tigers , Panthera tigris]', '(((Big cats,Tigers , Panthera tigris)))'] macros.each do |macro| para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['Big cats', 'Tigers', 'Panthera tigris'], para.document.catalog[:indexterms].first end end test 'a multi-line index term macro should be compacted and registered as an index reference' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macros = ["indexterm:[Panthera\ntigris]", "(((Panthera\ntigris)))"] macros.each do |macro| para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['Panthera tigris'], para.document.catalog[:indexterms].first end end test 'should escape concealed index term if second bracket is preceded by a backslash' do input = %[National Institute of Science and Technology (#{BACKSLASH}((NIST)))] doc = document_from_string input, standalone: false output = doc.convert assert_xpath '//p[text()="National Institute of Science and Technology (((NIST)))"]', output, 1 #assert doc.catalog[:indexterms].empty? end test 'should only escape enclosing brackets if concealed index term is preceded by a backslash' do input = %[National Institute of Science and Technology #{BACKSLASH}(((NIST)))] doc = document_from_string input, standalone: false output = doc.convert assert_xpath '//p[text()="National Institute of Science and Technology (NIST)"]', output, 1 #term = doc.catalog[:indexterms].first #assert_equal 1, term.size #assert_equal 'NIST', term.first end test 'should not split index terms on commas inside of quoted terms' do inputs = [] inputs.push <<~'EOS' Tigers are big, scary cats. indexterm:[Tigers, "[Big\], scary cats"] EOS inputs.push <<~'EOS' Tigers are big, scary cats. (((Tigers, "[Big], scary cats"))) EOS inputs.each do |input| para = block_from_string input output = para.sub_macros(para.source) assert_equal input.lines.first, output #assert_equal 1, para.document.catalog[:indexterms].size #terms = para.document.catalog[:indexterms].first #assert_equal 2, terms.size #assert_equal 'Tigers', terms.first #assert_equal '[Big], scary cats', terms.last end end test 'normal substitutions are performed on an index term macro' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macros = ['indexterm:[*Tigers*]', '(((*Tigers*)))'] macros.each do |macro| para = block_from_string("#{sentence}#{macro}") output = para.apply_subs(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['Tigers'], para.document.catalog[:indexterms].first end end test 'registers multiple index term macros' do sentence = "The tiger (Panthera tigris) is the largest cat species." macros = "(((Tigers)))\n(((Animals,Cats)))" para = block_from_string("#{sentence}\n#{macros}") output = para.sub_macros(para.source) assert_equal sentence, output.rstrip #assert_equal 2, para.document.catalog[:indexterms].size #assert_equal ['Tigers'], para.document.catalog[:indexterms][0] #assert_equal ['Animals', 'Cats'], para.document.catalog[:indexterms][1] end test 'an index term macro with round bracket syntax may contain round brackets in term' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macro = '(((Tiger (Panthera tigris))))' para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['Tiger (Panthera tigris)'], para.document.catalog[:indexterms].first end test 'visible shorthand index term macro should not consume trailing round bracket' do input = '(text with ((index term)))' expected = <<~'EOS'.chop (text with index term index term) EOS #expected_term = ['index term'] para = block_from_string input, backend: :docbook output = para.sub_macros para.source assert_equal expected, output #indexterms_table = para.document.catalog[:indexterms] #assert_equal 1, indexterms_table.size #assert_equal expected_term, indexterms_table[0] end test 'visible shorthand index term macro should not consume leading round bracket' do input = '(((index term)) for text)' expected = <<~'EOS'.chop ( index term index term for text) EOS #expected_term = ['index term'] para = block_from_string input, backend: :docbook output = para.sub_macros para.source assert_equal expected, output #indexterms_table = para.document.catalog[:indexterms] #assert_equal 1, indexterms_table.size #assert_equal expected_term, indexterms_table[0] end test 'an index term macro with square bracket syntax may contain square brackets in term' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macro = 'indexterm:[Tiger [Panthera tigris\\]]' para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['Tiger [Panthera tigris]'], para.document.catalog[:indexterms].first end test 'a single-line index term 2 macro should be registered as an index reference and retain term inline' do sentence = 'The tiger (Panthera tigris) is the largest cat species.' macros = ['The indexterm2:[tiger] (Panthera tigris) is the largest cat species.', 'The ((tiger)) (Panthera tigris) is the largest cat species.'] macros.each do |macro| para = block_from_string(macro) output = para.sub_macros(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['tiger'], para.document.catalog[:indexterms].first end end test 'a multi-line index term 2 macro should be compacted and registered as an index reference and retain term inline' do sentence = 'The panthera tigris is the largest cat species.' macros = ["The indexterm2:[ panthera\ntigris ] is the largest cat species.", "The (( panthera\ntigris )) is the largest cat species."] macros.each do |macro| para = block_from_string(macro) output = para.sub_macros(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['panthera tigris'], para.document.catalog[:indexterms].first end end test 'registers multiple index term 2 macros' do sentence = "The ((tiger)) (Panthera tigris) is the largest ((cat)) species." para = block_from_string(sentence) output = para.sub_macros(para.source) assert_equal 'The tiger (Panthera tigris) is the largest cat species.', output #assert_equal 2, para.document.catalog[:indexterms].size #assert_equal ['tiger'], para.document.catalog[:indexterms][0] #assert_equal ['cat'], para.document.catalog[:indexterms][1] end test 'should escape visible index term if preceded by a backslash' do sentence = "The #{BACKSLASH}((tiger)) (Panthera tigris) is the largest #{BACKSLASH}((cat)) species." para = block_from_string(sentence) output = para.sub_macros(para.source) assert_equal 'The ((tiger)) (Panthera tigris) is the largest ((cat)) species.', output #assert para.document.catalog[:indexterms].empty? end test 'normal substitutions are performed on an index term 2 macro' do sentence = 'The ((*tiger*)) (Panthera tigris) is the largest cat species.' para = block_from_string sentence output = para.apply_subs(para.source) assert_equal 'The tiger (Panthera tigris) is the largest cat species.', output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['tiger'], para.document.catalog[:indexterms].first end test 'index term 2 macro with round bracket syntex should not interfer with index term macro with round bracket syntax' do sentence = "The ((panthera tigris)) is the largest cat species.\n(((Big cats,Tigers)))" para = block_from_string sentence output = para.sub_macros(para.source) assert_equal "The panthera tigris is the largest cat species.\n", output #terms = para.document.catalog[:indexterms] #assert_equal 2, terms.size #assert_equal ['panthera tigris'], terms[0] #assert_equal ['Big cats', 'Tigers'], terms[1] end test 'should parse visible shorthand index term with see and seealso' do sentence = '((Flash >> HTML 5)) has been supplanted by ((HTML 5 &> CSS 3 &> SVG)).' output = convert_string_to_embedded sentence, backend: 'docbook' indexterm_flash = <<~'EOS'.chop Flash HTML 5 EOS indexterm_html5 = <<~'EOS'.chop HTML 5 CSS 3 SVG EOS assert_includes output, indexterm_flash assert_includes output, indexterm_html5 end test 'should parse concealed shorthand index term with see and seealso' do sentence = 'Flash(((Flash >> HTML 5))) has been supplanted by HTML 5(((HTML 5 &> CSS 3 &> SVG))).' output = convert_string_to_embedded sentence, backend: 'docbook' indexterm_flash = <<~'EOS'.chop Flash HTML 5 EOS indexterm_html5 = <<~'EOS'.chop HTML 5 CSS 3 SVG EOS assert_includes output, indexterm_flash assert_includes output, indexterm_html5 end test 'should parse visible index term macro with see and seealso' do sentence = 'indexterm2:[Flash,see=HTML 5] has been supplanted by indexterm2:[HTML 5,see-also="CSS 3, SVG"].' output = convert_string_to_embedded sentence, backend: 'docbook' indexterm_flash = <<~'EOS'.chop Flash HTML 5 EOS indexterm_html5 = <<~'EOS'.chop HTML 5 CSS 3 SVG EOS assert_includes output, indexterm_flash assert_includes output, indexterm_html5 end test 'should parse concealed index term macro with see and seealso' do sentence = 'Flashindexterm:[Flash,see=HTML 5] has been supplanted by HTML 5indexterm:[HTML 5,see-also="CSS 3, SVG"].' output = convert_string_to_embedded sentence, backend: 'docbook' indexterm_flash = <<~'EOS'.chop Flash HTML 5 EOS indexterm_html5 = <<~'EOS'.chop HTML 5 CSS 3 SVG EOS assert_includes output, indexterm_flash assert_includes output, indexterm_html5 end context 'Button macro' do test 'btn macro' do para = block_from_string('btn:[Save]', attributes: { 'experimental' => '' }) assert_equal %q{Save}, para.sub_macros(para.source) end test 'btn macro that spans multiple lines' do para = block_from_string(%(btn:[Rebase and\nmerge]), attributes: { 'experimental' => '' }) assert_equal %q{Rebase and merge}, para.sub_macros(para.source) end test 'btn macro for docbook backend' do para = block_from_string('btn:[Save]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{Save}, para.sub_macros(para.source) end end context 'Keyboard macro' do test 'kbd macro with single key' do para = block_from_string('kbd:[F3]', attributes: { 'experimental' => '' }) assert_equal %q{F3}, para.sub_macros(para.source) end test 'kbd macro with single backslash key' do para = block_from_string("kbd:[#{BACKSLASH} ]", attributes: { 'experimental' => '' }) assert_equal %q(\), para.sub_macros(para.source) end test 'kbd macro with single key, docbook backend' do para = block_from_string('kbd:[F3]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{F3}, para.sub_macros(para.source) end test 'kbd macro with key combination' do para = block_from_string('kbd:[Ctrl+Shift+T]', attributes: { 'experimental' => '' }) assert_equal %q{Ctrl+Shift+T}, para.sub_macros(para.source) end test 'kbd macro with key combination that spans multiple lines' do para = block_from_string(%(kbd:[Ctrl +\nT]), attributes: { 'experimental' => '' }) assert_equal %q{Ctrl+T}, para.sub_macros(para.source) end test 'kbd macro with key combination, docbook backend' do para = block_from_string('kbd:[Ctrl+Shift+T]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{CtrlShiftT}, para.sub_macros(para.source) end test 'kbd macro with key combination delimited by pluses with spaces' do para = block_from_string('kbd:[Ctrl + Shift + T]', attributes: { 'experimental' => '' }) assert_equal %q{Ctrl+Shift+T}, para.sub_macros(para.source) end test 'kbd macro with key combination delimited by commas' do para = block_from_string('kbd:[Ctrl,Shift,T]', attributes: { 'experimental' => '' }) assert_equal %q{Ctrl+Shift+T}, para.sub_macros(para.source) end test 'kbd macro with key combination delimited by commas with spaces' do para = block_from_string('kbd:[Ctrl, Shift, T]', attributes: { 'experimental' => '' }) assert_equal %q{Ctrl+Shift+T}, para.sub_macros(para.source) end test 'kbd macro with key combination delimited by plus containing a comma key' do para = block_from_string('kbd:[Ctrl+,]', attributes: { 'experimental' => '' }) assert_equal %q{Ctrl+,}, para.sub_macros(para.source) end test 'kbd macro with key combination delimited by commas containing a plus key' do para = block_from_string('kbd:[Ctrl, +, Shift]', attributes: { 'experimental' => '' }) assert_equal %q{Ctrl+++Shift}, para.sub_macros(para.source) end test 'kbd macro with key combination where last key matches plus delimiter' do para = block_from_string('kbd:[Ctrl + +]', attributes: { 'experimental' => '' }) assert_equal %q{Ctrl++}, para.sub_macros(para.source) end test 'kbd macro with key combination where last key matches comma delimiter' do para = block_from_string('kbd:[Ctrl, ,]', attributes: { 'experimental' => '' }) assert_equal %q{Ctrl+,}, para.sub_macros(para.source) end test 'kbd macro with key combination containing escaped bracket' do para = block_from_string('kbd:[Ctrl + \]]', attributes: { 'experimental' => '' }) assert_equal %q{Ctrl+]}, para.sub_macros(para.source) end test 'kbd macro with key combination ending in backslash' do para = block_from_string("kbd:[Ctrl + #{BACKSLASH} ]", attributes: { 'experimental' => '' }) assert_equal %q(Ctrl+\\), para.sub_macros(para.source) end test 'kbd macro looks for delimiter beyond first character' do para = block_from_string('kbd:[,te]', attributes: { 'experimental' => '' }) assert_equal %q(,te), para.sub_macros(para.source) end test 'kbd macro restores trailing delimiter as key value' do para = block_from_string('kbd:[te,]', attributes: { 'experimental' => '' }) assert_equal %q(te,), para.sub_macros(para.source) end end context 'Menu macro' do test 'should process menu using macro sytnax' do para = block_from_string('menu:File[]', attributes: { 'experimental' => '' }) assert_equal %q{File}, para.sub_macros(para.source) end test 'should process menu for docbook backend' do para = block_from_string('menu:File[]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{File}, para.sub_macros(para.source) end test 'should process multiple menu macros in same line' do para = block_from_string('menu:File[] and menu:Edit[]', attributes: { 'experimental' => '' }) assert_equal 'File and Edit', para.sub_macros(para.source) end test 'should process menu with menu item using macro syntax' do para = block_from_string('menu:File[Save As…]', attributes: { 'experimental' => '' }) assert_equal %q{File  Save As…}, para.sub_macros(para.source) end test 'should process menu macro that spans multiple lines' do input = %(menu:Preferences[Compile\non\nSave]) para = block_from_string input, attributes: { 'experimental' => '' } assert_equal %(Preferences  Compile\non\nSave), para.sub_macros(para.source) end test 'should unescape escaped closing bracket in menu macro' do input = 'menu:Preferences[Compile [on\\] Save]' para = block_from_string input, attributes: { 'experimental' => '' } assert_equal %q(Preferences  Compile [on] Save), para.sub_macros(para.source) end test 'should process menu with menu item using macro syntax when fonts icons are enabled' do para = block_from_string('menu:Tools[More Tools > Extensions]', attributes: { 'experimental' => '', 'icons' => 'font' }) assert_equal %q{Tools  More Tools  Extensions}, para.sub_macros(para.source) end test 'should process menu with menu item for docbook backend' do para = block_from_string('menu:File[Save As…]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{File Save As…}, para.sub_macros(para.source) end test 'should process menu with menu item in submenu using macro syntax' do para = block_from_string('menu:Tools[Project > Build]', attributes: { 'experimental' => '' }) assert_equal %q{Tools  Project  Build}, para.sub_macros(para.source) end test 'should process menu with menu item in submenu for docbook backend' do para = block_from_string('menu:Tools[Project > Build]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{Tools Project Build}, para.sub_macros(para.source) end test 'should process menu with menu item in submenu using macro syntax and comma delimiter' do para = block_from_string('menu:Tools[Project, Build]', attributes: { 'experimental' => '' }) assert_equal %q{Tools  Project  Build}, para.sub_macros(para.source) end test 'should process menu with menu item using inline syntax' do para = block_from_string('"File > Save As…"', attributes: { 'experimental' => '' }) assert_equal %q{File  Save As…}, para.sub_macros(para.source) end test 'should process menu with menu item in submenu using inline syntax' do para = block_from_string('"Tools > Project > Build"', attributes: { 'experimental' => '' }) assert_equal %q{Tools  Project  Build}, para.sub_macros(para.source) end test 'inline menu syntax should not match closing quote of XML attribute' do para = block_from_string('<node>r', attributes: { 'experimental' => '' }) assert_equal %q{<node>r}, para.sub_macros(para.source) end test 'should process menu macro with items containing multibyte characters' do para = block_from_string('menu:视图[放大, 重置]', attributes: { 'experimental' => '' }) assert_equal %q{视图  放大  重置}, para.sub_macros(para.source) end test 'should process inline menu with items containing multibyte characters' do para = block_from_string('"视图 > 放大 > 重置"', attributes: { 'experimental' => '' }) assert_equal %q{视图  放大  重置}, para.sub_macros(para.source) end test 'should process a menu macro with a target that begins with a character reference' do para = block_from_string('menu:⋮[More Tools, Extensions]', attributes: { 'experimental' => '' }) assert_equal %q{  More Tools  Extensions}, para.sub_macros(para.source) end test 'should not process a menu macro with a target that ends with a space' do input = 'menu:foo [bar] menu:File[Save]' para = block_from_string input, attributes: { 'experimental' => '' } result = para.sub_macros para.source assert_xpath '/span[@class="menuseq"]', result, 1 assert_xpath '//b[@class="menu"][text()="File"]', result, 1 end test 'should process an inline menu that begins with a character reference' do para = block_from_string('"⋮ > More Tools > Extensions"', attributes: { 'experimental' => '' }) assert_equal %q{  More Tools  Extensions}, para.sub_macros(para.source) end end end context 'Passthroughs' do test 'collect inline triple plus passthroughs' do para = block_from_string('+++inline code+++') result = para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, passthroughs.size assert_equal 'inline code', passthroughs[0][:text] assert_empty passthroughs[0][:subs] end test 'collect multi-line inline triple plus passthroughs' do para = block_from_string("+++inline\ncode+++") result = para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, passthroughs.size assert_equal "inline\ncode", passthroughs[0][:text] assert_empty passthroughs[0][:subs] end test 'collect inline double dollar passthroughs' do para = block_from_string('$${code}$$') result = para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, passthroughs.size assert_equal '{code}', passthroughs[0][:text] assert_equal [:specialcharacters], passthroughs[0][:subs] end test 'collect inline double plus passthroughs' do para = block_from_string('++{code}++') result = para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, passthroughs.size assert_equal '{code}', passthroughs[0][:text] assert_equal [:specialcharacters], passthroughs[0][:subs] end test 'should not crash if role on passthrough is enclosed in quotes' do %W( ['role']#{BACKSLASH}++This++++++++++++ ['role']#{BACKSLASH}+++++++++This++++++++++++ ).each do |input| para = block_from_string input assert_includes para.content, %() end end test 'should allow inline double plus passthrough to be escaped using backslash' do para = block_from_string("you need to replace `int a = n#{BACKSLASH}++;` with `int a = ++n;`!") result = para.apply_subs para.source assert_equal 'you need to replace int a = n++; with int a = ++n;!', result end test 'should allow inline double plus passthrough with attributes to be escaped using backslash' do para = block_from_string("=[attrs]#{BACKSLASH}#{BACKSLASH}++text++") result = para.apply_subs para.source assert_equal '=[attrs]++text++', result end test 'collect multi-line inline double dollar passthroughs' do para = block_from_string("$$\n{code}\n$$") result = para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, passthroughs.size assert_equal "\n{code}\n", passthroughs[0][:text] assert_equal [:specialcharacters], passthroughs[0][:subs] end test 'collect multi-line inline double plus passthroughs' do para = block_from_string("++\n{code}\n++") result = para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, passthroughs.size assert_equal "\n{code}\n", passthroughs[0][:text] assert_equal [:specialcharacters], passthroughs[0][:subs] end test 'collect passthroughs from inline pass macro' do para = block_from_string(%Q{pass:specialcharacters,quotes[['code'\\]]}) result = para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, passthroughs.size assert_equal %q{['code']}, passthroughs[0][:text] assert_equal [:specialcharacters, :quotes], passthroughs[0][:subs] end test 'collect multi-line passthroughs from inline pass macro' do para = block_from_string(%Q{pass:specialcharacters,quotes[['more\ncode'\\]]}) result = para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, passthroughs.size assert_equal %Q{['more\ncode']}, passthroughs[0][:text] assert_equal [:specialcharacters, :quotes], passthroughs[0][:subs] end test 'should find and replace placeholder duplicated by substitution' do input = %q(+first passthrough+ followed by link:$$http://example.com/__u_no_format_me__$$[] with passthrough) result = convert_inline_string input assert_equal 'first passthrough followed by http://example.com/__u_no_format_me__ with passthrough', result end test 'resolves sub shorthands on inline pass macro' do para = block_from_string 'pass:q,a[*<{backend}>*]' result = para.extract_passthroughs para.source passthroughs = para.instance_variable_get :@passthroughs assert_equal 1, passthroughs.size assert_equal [:quotes, :attributes], passthroughs[0][:subs] result = para.restore_passthroughs result assert_equal '', result end test 'inline pass macro supports incremental subs' do para = block_from_string 'pass:n,-a[<{backend}>]' result = para.extract_passthroughs para.source passthroughs = para.instance_variable_get :@passthroughs assert_equal 1, passthroughs.size result = para.restore_passthroughs result assert_equal '<{backend}>', result end test 'should not recognize pass macro with invalid substitution list' do [',', '42', 'a,'].each do |subs| para = block_from_string %(pass:#{subs}[foobar]) result = para.extract_passthroughs para.source assert_equal %(pass:#{subs}[foobar]), result end end test 'should allow content of inline pass macro to be empty' do para = block_from_string 'pass:[]' result = para.extract_passthroughs para.source passthroughs = para.instance_variable_get :@passthroughs assert_equal 1, passthroughs.size assert_equal '', para.restore_passthroughs(result) end # NOTE placeholder is surrounded by text to prevent reader from stripping trailing boundary char (unique to test scenario) test 'restore inline passthroughs without subs' do para = block_from_string("some #{Asciidoctor::Substitutors::PASS_START}" + '0' + "#{Asciidoctor::Substitutors::PASS_END} to study") para.extract_passthroughs '' passthroughs = para.instance_variable_get :@passthroughs passthroughs[0] = { text: 'inline code', subs: [] } result = para.restore_passthroughs(para.source) assert_equal "some inline code to study", result end # NOTE placeholder is surrounded by text to prevent reader from stripping trailing boundary char (unique to test scenario) test 'restore inline passthroughs with subs' do para = block_from_string("some #{Asciidoctor::Substitutors::PASS_START}" + '0' + "#{Asciidoctor::Substitutors::PASS_END} to study in the #{Asciidoctor::Substitutors::PASS_START}" + '1' + "#{Asciidoctor::Substitutors::PASS_END} programming language") para.extract_passthroughs '' passthroughs = para.instance_variable_get :@passthroughs passthroughs[0] = { text: '{code}', subs: [:specialcharacters] } passthroughs[1] = { text: '{language}', subs: [:specialcharacters] } result = para.restore_passthroughs(para.source) assert_equal 'some <code>{code}</code> to study in the {language} programming language', result end test 'should restore nested passthroughs' do result = convert_inline_string %q(+Sometimes you feel pass:q[`mono`].+ Sometimes you +$$don't$$+.) assert_equal %q(Sometimes you feel mono. Sometimes you don't.), result end test 'should not fail to restore remaining passthroughs after processing inline passthrough with macro substitution' do input = 'pass:m[.] pass:[.]' assert_equal '. .', (convert_inline_string input) end test 'should honor role on double plus passthrough' do result = convert_inline_string 'Print the version using [var]++{asciidoctor-version}++.' assert_equal 'Print the version using {asciidoctor-version}.', result end test 'complex inline passthrough macro' do text_to_escape = %q{[(] <'basic form'> <'logical operator'> <'basic form'> [)]} para = block_from_string %($$#{text_to_escape}$$) para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal 1, passthroughs.size assert_equal text_to_escape, passthroughs[0][:text] text_to_escape_escaped = %q{[(\] <'basic form'> <'logical operator'> <'basic form'> [)\]} para = block_from_string %(pass:specialcharacters[#{text_to_escape_escaped}]) para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal 1, passthroughs.size assert_equal text_to_escape, passthroughs[0][:text] end test 'inline pass macro with a composite sub' do para = block_from_string %(pass:verbatim[<{backend}>]) assert_equal '<{backend}>', para.content end context 'Math macros' do test 'should passthrough text in asciimath macro and surround with AsciiMath delimiters' do using_memory_logger do |logger| input = 'asciimath:[x/x={(1,if x!=0),(text{undefined},if x=0):}]' para = block_from_string input, attributes: { 'attribute-missing' => 'warn' } assert_equal '\$x/x={(1,if x!=0),(text{undefined},if x=0):}\$', para.content assert logger.empty? end end test 'should not recognize asciimath macro with no content' do input = 'asciimath:[]' para = block_from_string input assert_equal 'asciimath:[]', para.content end test 'should perform specialcharacters subs on asciimath macro content in html backend by default' do input = 'asciimath:[a < b]' para = block_from_string input assert_equal '\$a < b\$', para.content end test 'should convert contents of asciimath macro to MathML in DocBook output if asciimath gem is available' do asciimath_available = !(Asciidoctor::Helpers.require_library 'asciimath', true, :ignore).nil? input = 'asciimath:[a < b]' expected = 'a<b' using_memory_logger do |logger| para = block_from_string input, backend: :docbook actual = para.content if asciimath_available assert_equal expected, actual assert_equal :loaded, para.document.converter.instance_variable_get(:@asciimath_status) else assert_message logger, :WARN, 'optional gem \'asciimath\' is not available. Functionality disabled.' assert_equal :unavailable, para.document.converter.instance_variable_get(:@asciimath_status) end end end test 'should not perform specialcharacters subs on asciimath macro content in Docbook output if asciimath gem not available' do asciimath_available = !(Asciidoctor::Helpers.require_library 'asciimath', true, :ignore).nil? input = 'asciimath:[a < b]' para = block_from_string input, backend: :docbook para.document.converter.instance_variable_set :@asciimath_status, :unavailable if asciimath_available old_asciimath = ::AsciiMath Object.send :remove_const, 'AsciiMath' end assert_equal '', para.content ::AsciiMath = old_asciimath if asciimath_available end test 'should honor explicit subslist on asciimath macro' do input = 'asciimath:attributes[{expr}]' para = block_from_string input, attributes: { 'expr' => 'x != 0' } assert_equal '\$x != 0\$', para.content end test 'should passthrough text in latexmath macro and surround with LaTeX math delimiters' do input = 'latexmath:[C = \alpha + \beta Y^{\gamma} + \epsilon]' para = block_from_string input assert_equal '\(C = \alpha + \beta Y^{\gamma} + \epsilon\)', para.content end test 'should strip legacy LaTeX math delimiters around latexmath content if present' do input = 'latexmath:[$C = \alpha + \beta Y^{\gamma} + \epsilon$]' para = block_from_string input assert_equal '\(C = \alpha + \beta Y^{\gamma} + \epsilon\)', para.content end test 'should not recognize latexmath macro with no content' do input = 'latexmath:[]' para = block_from_string input assert_equal 'latexmath:[]', para.content end test 'should unescape escaped square bracket in equation' do input = 'latexmath:[\sqrt[3\]{x}]' para = block_from_string input assert_equal '\(\sqrt[3]{x}\)', para.content end test 'should perform specialcharacters subs on latexmath macro in html backend by default' do input = 'latexmath:[a < b]' para = block_from_string input assert_equal '\(a < b\)', para.content end test 'should not perform specialcharacters subs on latexmath macro content in docbook backend by default' do input = 'latexmath:[a < b]' para = block_from_string input, backend: :docbook assert_equal '', para.content end test 'should honor explicit subslist on latexmath macro' do input = 'latexmath:attributes[{expr}]' para = block_from_string input, attributes: { 'expr' => '\sqrt{4} = 2' } assert_equal '\(\sqrt{4} = 2\)', para.content end test 'should passthrough math macro inside another passthrough' do input = 'the text `asciimath:[x = y]` should be passed through as +literal+ text' para = block_from_string input, attributes: { 'compat-mode' => '' } assert_equal 'the text asciimath:[x = y] should be passed through as literal text', para.content input = 'the text [x-]`asciimath:[x = y]` should be passed through as `literal` text' para = block_from_string input assert_equal 'the text asciimath:[x = y] should be passed through as literal text', para.content input = 'the text `+asciimath:[x = y]+` should be passed through as `literal` text' para = block_from_string input assert_equal 'the text asciimath:[x = y] should be passed through as literal text', para.content end test 'should support constrained passthrough in middle of monospace span' do input = 'a `foo +bar+ baz` kind of thing' para = block_from_string input assert_equal 'a foo bar baz kind of thing', para.content end test 'should support constrained passthrough in monospace span preceded by escaped boxed attrlist with transitional role' do input = %(#{BACKSLASH}[x-]`foo +bar+ baz`) para = block_from_string input assert_equal '[x-]foo bar baz', para.content end test 'should treat monospace phrase with escaped boxed attrlist with transitional role as monospace' do input = %(#{BACKSLASH}[x-]`*foo* +bar+ baz`) para = block_from_string input assert_equal '[x-]foo bar baz', para.content end test 'should ignore escaped attrlist with transitional role on monospace phrase if not proceeded by [' do input = %(#{BACKSLASH}x-]`*foo* +bar+ baz`) para = block_from_string input assert_equal %(#{BACKSLASH}x-]foo bar baz), para.content end test 'should not process passthrough inside transitional literal monospace span' do input = 'a [x-]`foo +bar+ baz` kind of thing' para = block_from_string input assert_equal 'a foo +bar+ baz kind of thing', para.content end test 'should support constrained passthrough in monospace phrase with attrlist' do input = '[.role]`foo +bar+ baz`' para = block_from_string input assert_equal 'foo bar baz', para.content end test 'should support attrlist on a literal monospace phrase' do input = '[.baz]`+foo--bar+`' para = block_from_string input assert_equal 'foo--bar', para.content end test 'should not process an escaped passthrough macro inside a monospaced phrase' do input = 'use the `\pass:c[]` macro' para = block_from_string input assert_equal 'use the pass:c[] macro', para.content end test 'should not process an escaped passthrough macro inside a monospaced phrase with attributes' do input = 'use the [syntax]`\pass:c[]` macro' para = block_from_string input assert_equal 'use the pass:c[] macro', para.content end test 'should honor an escaped single plus passthrough inside a monospaced phrase' do input = 'use `\+{author}+` to show an attribute reference' para = block_from_string input, attributes: { 'author' => 'Dan' } assert_equal 'use +Dan+ to show an attribute reference', para.content end test 'should not recognize stem macro with no content' do input = 'stem:[]' para = block_from_string input assert_equal input, para.content end test 'should passthrough text in stem macro and surround with AsciiMath delimiters if stem attribute is asciimath, empty, or not set' do [ {}, { 'stem' => '' }, { 'stem' => 'asciimath' }, { 'stem' => 'bogus' }, ].each do |attributes| using_memory_logger do |logger| input = 'stem:[x/x={(1,if x!=0),(text{undefined},if x=0):}]' para = block_from_string input, attributes: (attributes.merge 'attribute-missing' => 'warn') assert_equal '\$x/x={(1,if x!=0),(text{undefined},if x=0):}\$', para.content assert logger.empty? end end end test 'should passthrough text in stem macro and surround with LaTeX math delimiters if stem attribute is latexmath, latex, or tex' do [ { 'stem' => 'latexmath' }, { 'stem' => 'latex' }, { 'stem' => 'tex' }, ].each do |attributes| input = 'stem:[C = \alpha + \beta Y^{\gamma} + \epsilon]' para = block_from_string input, attributes: attributes assert_equal '\(C = \alpha + \beta Y^{\gamma} + \epsilon\)', para.content end end test 'should apply substitutions specified on stem macro' do ['stem:c,a[sqrt(x) <=> {solve-for-x}]', 'stem:n,-r[sqrt(x) <=> {solve-for-x}]'].each do |input| para = block_from_string input, attributes: { 'stem' => 'asciimath', 'solve-for-x' => '13' } assert_equal '\$sqrt(x) <=> 13\$', para.content end end test 'should replace passthroughs inside stem expression' do [ ['stem:[+1+]', '\$1\$'], ['stem:[+\infty-(+\infty)]', '\$\infty-(\infty)\$'], ['stem:[+++\infty-(+\infty)++]', '\$+\infty-(+\infty)\$'], ].each do |input, expected| para = block_from_string input, attributes: { 'stem' => '', } assert_equal expected, para.content end end test 'should allow passthrough inside stem expression to be escaped' do [ ['stem:[\+] and stem:[+]', '\$+\$ and \$+\$'], ['stem:[\+1+]', '\$+1+\$'], ].each do |input, expected| para = block_from_string input, attributes: { 'stem' => '', } assert_equal expected, para.content end end test 'should not recognize stem macro with invalid substitution list' do [',', '42', 'a,'].each do |subs| input = %(stem:#{subs}[x^2]) para = block_from_string input, attributes: { 'stem' => 'asciimath' } assert_equal %(stem:#{subs}[x^2]), para.content end end end end context 'Replacements' do test 'unescapes XML entities' do para = block_from_string '< " ∴ " " >' assert_equal '< " ∴ " " >', para.apply_subs(para.source) end test 'replaces arrows' do para = block_from_string '<- -> <= => \<- \-> \<= \=>' assert_equal '← → ⇐ ⇒ <- -> <= =>', para.apply_subs(para.source) end test 'replaces dashes' do input = <<~'EOS' -- foo foo--bar foo\--bar foo -- bar foo \-- bar stuff in between -- foo stuff in between foo -- stuff in between foo -- EOS expected = <<~'EOS'.chop  — foo foo—​bar foo--bar foo — bar foo -- bar stuff in between — foo stuff in between foo — stuff in between foo —  EOS para = block_from_string input assert_equal expected, para.sub_replacements(para.source) end test 'replaces dashes between multibyte word characters' do para = block_from_string %(富--巴) expected = '富—​巴' assert_equal expected, para.sub_replacements(para.source) end test 'replaces marks' do para = block_from_string '(C) (R) (TM) \(C) \(R) \(TM)' assert_equal '© ® ™ (C) (R) (TM)', para.sub_replacements(para.source) end test 'preserves entity references' do input = '& © ✔ 😀 • 😀' result = convert_inline_string input assert_equal input, result end test 'only preserves named entities with two or more letters' do input = '& &a; >' result = convert_inline_string input assert_equal '& &a; >', result end test 'replaces punctuation' do para = block_from_string %(John's Hideout is the Whites`' place... foo\\'bar) assert_equal "John’s Hideout is the Whites’ place…​ foo'bar", para.sub_replacements(para.source) end test 'should replace right single quote marks' do given = [ %(`'Twas the night), %(a `'57 Chevy!), %(the whites`' place), %(the whites`'.), %(the whites`'--where the wild things are), %(the whites`'\nhave), %(It's Mary`'s little lamb.), %(consecutive single quotes '' are not modified), %(he is 6' tall), %(\\`') ] expected = [ %(’Twas the night), %(a ’57 Chevy!), %(the whites’ place), %(the whites’.), %(the whites’--where the wild things are), %(the whites’\nhave), %(It’s Mary’s little lamb.), %(consecutive single quotes '' are not modified), %(he is 6' tall), %(`') ] given.size.times do |i| para = block_from_string given[i] assert_equal expected[i], para.sub_replacements(para.source) end end end context 'Post replacements' do test 'line break inserted after line with line break character' do para = block_from_string("First line +\nSecond line") result = para.apply_subs para.lines, (para.expand_subs :post_replacements) assert_equal 'First line
    ', result.first end test 'line break inserted after line wrap with hardbreaks enabled' do para = block_from_string("First line\nSecond line", attributes: { 'hardbreaks' => '' }) result = para.apply_subs para.lines, (para.expand_subs :post_replacements) assert_equal 'First line
    ', result.first end test 'line break character stripped from end of line with hardbreaks enabled' do para = block_from_string("First line +\nSecond line", attributes: { 'hardbreaks' => '' }) result = para.apply_subs para.lines, (para.expand_subs :post_replacements) assert_equal 'First line
    ', result.first end test 'line break not inserted for single line with hardbreaks enabled' do para = block_from_string('First line', attributes: { 'hardbreaks' => '' }) result = para.apply_subs para.lines, (para.expand_subs :post_replacements) assert_equal 'First line', result.first end end context 'Resolve subs' do test 'should resolve subs for block' do doc = empty_document parse: true block = Asciidoctor::Block.new doc, :paragraph block.attributes['subs'] = 'quotes,normal' block.commit_subs assert_equal [:quotes, :specialcharacters, :attributes, :replacements, :macros, :post_replacements], block.subs end test 'should resolve specialcharacters sub as highlight for source block when source highlighter is coderay' do doc = empty_document attributes: { 'source-highlighter' => 'coderay' }, parse: true block = Asciidoctor::Block.new doc, :listing, content_model: :verbatim block.style = 'source' block.attributes['subs'] = 'specialcharacters' block.attributes['language'] = 'ruby' block.commit_subs assert_equal [:highlight], block.subs end test 'should resolve specialcharacters sub as highlight for source block when source highlighter is pygments', if: ENV['PYGMENTS_VERSION'] do doc = empty_document attributes: { 'source-highlighter' => 'pygments' }, parse: true block = Asciidoctor::Block.new doc, :listing, content_model: :verbatim block.style = 'source' block.attributes['subs'] = 'specialcharacters' block.attributes['language'] = 'ruby' block.commit_subs assert_equal [:highlight], block.subs end test 'should not replace specialcharacters sub with highlight for source block when source highlighter is not set' do doc = empty_document parse: true block = Asciidoctor::Block.new doc, :listing, content_model: :verbatim block.style = 'source' block.attributes['subs'] = 'specialcharacters' block.attributes['language'] = 'ruby' block.commit_subs assert_equal [:specialcharacters], block.subs end test 'should not use subs if subs option passed to block constructor is nil' do doc = empty_document parse: true block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', subs: nil, attributes: { 'subs' => 'quotes' } assert_empty block.subs block.commit_subs assert_empty block.subs end test 'should not use subs if subs option passed to block constructor is empty array' do doc = empty_document parse: true block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', subs: [], attributes: { 'subs' => 'quotes' } assert_empty block.subs block.commit_subs assert_empty block.subs end test 'should use subs from subs option passed to block constructor' do doc = empty_document parse: true block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', subs: [:specialcharacters], attributes: { 'subs' => 'quotes' } assert_equal [:specialcharacters], block.subs block.commit_subs assert_equal [:specialcharacters], block.subs end test 'should use subs from subs attribute if subs option is not passed to block constructor' do doc = empty_document parse: true block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', attributes: { 'subs' => 'quotes' } assert_empty block.subs # in this case, we have to call commit_subs to resolve the subs block.commit_subs assert_equal [:quotes], block.subs end test 'should use subs from subs attribute if subs option passed to block constructor is :default' do doc = empty_document parse: true block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', subs: :default, attributes: { 'subs' => 'quotes' } assert_equal [:quotes], block.subs block.commit_subs assert_equal [:quotes], block.subs end test 'should use built-in subs if subs option passed to block constructor is :default and subs attribute is absent' do doc = empty_document parse: true block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', subs: :default assert_equal [:specialcharacters, :quotes, :attributes, :replacements, :macros, :post_replacements], block.subs block.commit_subs assert_equal [:specialcharacters, :quotes, :attributes, :replacements, :macros, :post_replacements], block.subs end end end asciidoctor-2.0.20/test/syntax_highlighter_test.rb000066400000000000000000001267461443135032600223660ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context 'Syntax Highlighter' do test 'should set syntax_highlighter property on document if source highlighter is set and basebackend is html' do input = <<~'EOS' :source-highlighter: coderay [source, ruby] ---- puts 'Hello, World!' ---- EOS doc = document_from_string input, safe: :safe, parse: true assert doc.basebackend? 'html' refute_nil doc.syntax_highlighter assert_kind_of Asciidoctor::SyntaxHighlighter, doc.syntax_highlighter end test 'should not set syntax_highlighter property on document if source highlighter is set and basebackend is not html' do input = <<~'EOS' :source-highlighter: coderay [source, ruby] ---- puts 'Hello, World!' ---- EOS doc = document_from_string input, safe: :safe, backend: 'docbook', parse: true refute doc.basebackend? 'html' assert_nil doc.syntax_highlighter end test 'should not set syntax_highlighter property on document if source highlighter is not set' do input = <<~'EOS' [source, ruby] ---- puts 'Hello, World!' ---- EOS doc = document_from_string input, safe: :safe, parse: true assert_nil doc.syntax_highlighter end test 'should not set syntax_highlighter property on document if syntax highlighter cannot be resolved' do input = <<~'EOS' :source-highlighter: unknown [source, ruby] ---- puts 'Hello, World!' ---- EOS doc = document_from_string input, safe: :safe, parse: true assert_nil doc.syntax_highlighter end test 'should not allow document to enable syntax highlighter if safe mode is at least SERVER' do input = ':source-highlighter: coderay' doc = document_from_string input, safe: Asciidoctor::SafeMode::SERVER, parse: true assert_nil doc.attributes['source-highlighter'] assert_nil doc.syntax_highlighter end test 'should not invoke highlight method on syntax highlighter if highlight? is false' do Class.new Asciidoctor::SyntaxHighlighter::Base do register_for 'unavailable' def format node, language, opts %(
    #{node.content}
    ) end def highlight? false end end input = <<~'EOS' [source,ruby] ---- puts 'Hello, World!' ---- EOS doc = document_from_string input, attributes: { 'source-highlighter' => 'unavailable' } output = doc.convert assert_css 'pre.highlight > code.language-ruby', output, 1 source_block = (doc.find_by {|candidate| candidate.style == 'source' })[0] assert_raises NotImplementedError do doc.syntax_highlighter.highlight source_block, source_block.source, (source_block.attr 'language'), {} end end test 'should be able to register syntax highlighter from syntax highlighter class itself' do syntax_highlighter = Class.new Asciidoctor::SyntaxHighlighter::Base do def format node, language, opts %(
    #{node.content}
    ) end def highlight? false end end syntax_highlighter.register_for 'foobar' assert_equal syntax_highlighter, (Asciidoctor::SyntaxHighlighter.for 'foobar') end test 'should be able to register syntax highlighter using symbol' do syntax_highlighter = Class.new Asciidoctor::SyntaxHighlighter::Base do register_for :foobaz def format node, language, opts %(
    #{node.content}
    ) end def highlight? false end end assert_equal syntax_highlighter, (Asciidoctor::SyntaxHighlighter.for 'foobaz') end test 'should set language on output of source block when source-highlighter attribute is not set' do input = <<~'EOS' [source, ruby] ---- puts 'Hello, World!' ---- EOS output = convert_string input, safe: Asciidoctor::SafeMode::SAFE assert_css 'pre.highlight', output, 1 assert_css 'pre.highlight > code.language-ruby', output, 1 assert_css 'pre.highlight > code.language-ruby[data-lang="ruby"]', output, 1 end test 'should set language on output of source block when source-highlighter attribute is not recognized' do input = <<~'EOS' :source-highlighter: unknown [source, ruby] ---- puts 'Hello, World!' ---- EOS output = convert_string input, safe: Asciidoctor::SafeMode::SAFE assert_css 'pre.highlight', output, 1 assert_css 'pre.highlight > code.language-ruby', output, 1 assert_css 'pre.highlight > code.language-ruby[data-lang="ruby"]', output, 1 end test 'should highlight source if source highlighter is set even if language is not set' do input = <<~'EOS' :source-highlighter: coderay [source] ---- [numbers] one two three ---- EOS output = convert_string input, safe: :safe assert_css 'pre.CodeRay.highlight', output, 1 assert_includes output, '' end test 'should not crash if source block has no lines and source highlighter is set' do input = <<~'EOS' :source-highlighter: coderay [source,text] ---- ---- EOS output = convert_string_to_embedded input, safe: :safe assert_css 'pre.CodeRay', output, 1 assert_css 'pre.CodeRay > code', output, 1 assert_css 'pre.CodeRay > code:empty', output, 1 end test 'should highlight source inside AsciiDoc table cell if source-highlighter attribute is set' do input = <<~'EOS' :source-highlighter: coderay |=== a| [source, ruby] ---- require 'coderay' html = CodeRay.scan("puts 'Hello, world!'", :ruby).div line_numbers: :table ---- |=== EOS output = convert_string_to_embedded input, safe: :safe assert_xpath '/table//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@class = "constant"][text() = "CodeRay"]', output, 1 end test 'should set starting line number in DocBook output if linenums option is enabled and start attribute is set' do input = <<~'EOS' [source%linenums,java,start=3] ---- public class HelloWorld { public static void main(String[] args) { out.println("Hello, World!"); } } ---- EOS output = convert_string_to_embedded input, backend: :docbook, safe: Asciidoctor::SafeMode::SAFE assert_css 'programlisting[startinglinenumber]', output, 1 assert_css 'programlisting[startinglinenumber="3"]', output, 1 end test 'should read source language from source-language document attribute if not specified on source block' do input = <<~'EOS' :source-highlighter: coderay :source-language: ruby [source] ---- require 'coderay' html = CodeRay.scan("puts 'Hello, world!'", :ruby).div line_numbers: :table ---- EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE, linkcss_default: true assert_xpath '//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@class = "constant"][text() = "CodeRay"]', output, 1 end test 'should rename document attribute named language to source-language when compat-mode is enabled' do input = <<~'EOS' :language: ruby {source-language} EOS assert_equal 'ruby', (convert_inline_string input, attributes: { 'compat-mode' => '' }) input = <<~'EOS' :language: ruby {source-language} EOS assert_equal '{source-language}', (convert_inline_string input) end context 'CodeRay' do test 'should highlight source if source-highlighter attribute is set' do input = <<~'EOS' :source-highlighter: coderay [source, ruby] ---- require 'coderay' html = CodeRay.scan("puts 'Hello, world!'", :ruby).div line_numbers: :table ---- EOS output = convert_string input, safe: Asciidoctor::SafeMode::SAFE, linkcss_default: true assert_xpath '//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@class = "constant"][text() = "CodeRay"]', output, 1 assert_match(/\.CodeRay *\{/, output) style_node = xmlnodes_at_xpath '//style[contains(text(), ".CodeRay")]', output, 1 refute_nil style_node assert_equal 'head', style_node.parent.name end test 'should not fail if source language is invalid' do input = <<~'EOS' :source-highlighter: coderay [source, n/a] ---- PRINT 'yo' ---- EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE assert_css 'code[data-lang="n/a"]', output, 1 end test 'should number lines if third positional attribute is set' do input = <<~'EOS' :source-highlighter: coderay [source,ruby,linenums] ---- puts 'Hello, World!' ---- EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE assert_xpath '//td[@class="line-numbers"]', output, 1 end test 'should number lines if linenums option is set on source block' do input = <<~'EOS' :source-highlighter: coderay [source%linenums,ruby] ---- puts 'Hello, World!' ---- EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE assert_xpath '//td[@class="line-numbers"]', output, 1 end test 'should number lines of source block if source-linenums-option document attribute is set' do input = <<~'EOS' :source-highlighter: coderay :source-linenums-option: [source,ruby] ---- puts 'Hello, World!' ---- EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE assert_xpath '//td[@class="line-numbers"]', output, 1 end test 'should set starting line number in HTML output if linenums option is enabled and start attribute is set' do input = <<~'EOS' :source-highlighter: coderay :coderay-linenums-mode: inline [source%linenums,ruby,start=10] ---- puts 'Hello, World!' ---- EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE assert_xpath '//span[@class="line-numbers"]', output, 1 assert_xpath '//span[@class="line-numbers"][text()="10"]', output, 1 end test 'should highlight lines specified in highlight attribute if linenums is set and source-highlighter is coderay' do %w(highlight="1,4-6" highlight="4-6,1" highlight="5-6,1,4,5" highlight=1;4..6 highlight=1;4..;!7).each do |highlight_attr| input = <<~EOS :source-highlighter: coderay [source%linenums,java,#{highlight_attr}] ---- import static java.lang.System.out; public class HelloWorld { public static void main(String[] args) { out.println("Hello, World!"); } } ---- EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE assert_css 'strong.highlighted', output, 4 assert_xpath '//strong[@class="highlighted"][text()="1"]', output, 1 assert_xpath '//strong[@class="highlighted"][text()="2"]', output, 0 assert_xpath '//strong[@class="highlighted"][text()="3"]', output, 0 assert_xpath '//strong[@class="highlighted"][text()="4"]', output, 1 assert_xpath '//strong[@class="highlighted"][text()="5"]', output, 1 assert_xpath '//strong[@class="highlighted"][text()="6"]', output, 1 assert_xpath '//strong[@class="highlighted"][text()="7"]', output, 0 end end test 'should replace callout marks but not highlight them if source-highlighter attribute is coderay' do input = <<~'EOS' :source-highlighter: coderay [source, ruby] ---- require 'coderay' # <1> html = CodeRay.scan("puts 'Hello, world!'", :ruby).div line_numbers: :table # <2> puts html # <3> <4> exit 0 # <5><6> ---- <1> Load library <2> Highlight source <3> Print to stdout <4> Redirect to a file to capture output <5> Exit program <6> Reports success EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE assert_match(/coderay<\/span>.* # \(1\)<\/b>$/, output) assert_match(/puts 'Hello, world!'<\/span>.* # \(2\)<\/b>$/, output) assert_match(/puts html.* # \(3\)<\/b> \(4\)<\/b>$/, output) assert_match(/exit.* # \(5\)<\/b> \(6\)<\/b><\/code>/, output) end test 'should support autonumbered callout marks if source-highlighter attribute is coderay' do input = <<~'EOS' :source-highlighter: coderay [source, ruby] ---- require 'coderay' # <.><.> html = CodeRay.scan("puts 'Hello, world!'", :ruby).div line_numbers: :table # <.> puts html # <.> ---- <.> Load library <.> Gem must be installed <.> Highlight source <.> Print to stdout EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE assert_match(/coderay<\/span>.* # \(1\)<\/b> \(2\)<\/b>$/, output) assert_match(/puts 'Hello, world!'<\/span>.* # \(3\)<\/b>$/, output) assert_match(/puts html.* # \(4\)<\/b><\/code>/, output) assert_css '.colist ol', output, 1 assert_css '.colist ol li', output, 4 end test 'should restore callout marks to correct lines if source highlighter is coderay and table line numbering is enabled' do input = <<~'EOS' :source-highlighter: coderay :coderay-linenums-mode: table [source, ruby, numbered] ---- require 'coderay' # <1> html = CodeRay.scan("puts 'Hello, world!'", :ruby).div line_numbers: :table # <2> puts html # <3> <4> exit 0 # <5><6> ---- <1> Load library <2> Highlight source <3> Print to stdout <4> Redirect to a file to capture output <5> Exit program <6> Reports success EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE assert_match(/coderay<\/span>.* # \(1\)<\/b>$/, output) assert_match(/puts 'Hello, world!'<\/span>.* # \(2\)<\/b>$/, output) assert_match(/puts html.* # \(3\)<\/b> \(4\)<\/b>$/, output) # NOTE notice there's a newline before the closing tag assert_match(/exit.* # \(5\)<\/b> \(6\)<\/b>\n<\/pre>/, output) end test 'should restore isolated callout mark on last line of source when source highlighter is coderay' do input = <<~'EOS' :source-highlighter: coderay [source,ruby,linenums] ---- require 'app' launch_app # <1> ---- <1> Profit. EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE # NOTE notice there's a newline before the closing tag assert_match(/\n# \(1\)<\/b>\n<\/pre>/, output) end test 'should preserve space before callout on final line' do inputs = [] inputs << <<~'EOS' [source,yaml] ---- a: 'a' key: 'value' #<1> ---- <1> key-value pair EOS inputs << <<~'EOS' [source,ruby] ---- puts 'hi' puts 'value' #<1> ---- <1> print to stdout EOS inputs << <<~'EOS' [source,python] ---- print 'hi' print 'value' #<1> ---- <1> print to stdout EOS inputs.each do |input| output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'source-highlighter' => 'coderay' } output = output.gsub(/<\/?span.*?>/, '') assert_includes output, '\'value\' #(1)' end end test 'should preserve passthrough placeholders when highlighting source using coderay' do input = <<~'EOS' :source-highlighter: coderay [source,java] [subs="specialcharacters,macros,callouts"] ---- public class Printer { public static void main(String[] args) { System.pass:quotes[_out_].println("*asterisks* make text pass:quotes[*bold*]"); } } ---- EOS output = convert_string input, safe: Asciidoctor::SafeMode::SAFE assert_match(/\.out<\/em>\./, output, 1) assert_match(/\*asterisks\*/, output, 1) assert_match(/bold<\/strong>/, output, 1) refute_includes output, Asciidoctor::Substitutors::PASS_START end test 'should link to CodeRay stylesheet if source-highlighter is coderay and linkcss is set' do input = <<~'EOS' :source-highlighter: coderay [source, ruby] ---- require 'coderay' html = CodeRay.scan("puts 'Hello, world!'", :ruby).div line_numbers: :table ---- EOS output = convert_string input, safe: Asciidoctor::SafeMode::SAFE, attributes: { 'linkcss' => '' } assert_xpath '//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@class = "constant"][text() = "CodeRay"]', output, 1 link_node = xmlnodes_at_xpath '//link[@rel="stylesheet"][@href="./coderay-asciidoctor.css"]', output, 1 refute_nil link_node assert_equal 'head', link_node.parent.name end test 'should highlight source inline if source-highlighter attribute is coderay and coderay-css is style' do input = <<~'EOS' :source-highlighter: coderay :coderay-css: style [source, ruby] ---- require 'coderay' html = CodeRay.scan("puts 'Hello, world!'", :ruby).div line_numbers: :table ---- EOS output = convert_string input, safe: Asciidoctor::SafeMode::SAFE, linkcss_default: true assert_xpath '//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@style = "color:#036;font-weight:bold"][text() = "CodeRay"]', output, 1 refute_match(/\.CodeRay \{/, output) end test 'should read stylesheet' do css = (Asciidoctor::SyntaxHighlighter.for 'coderay').read_stylesheet refute_nil css assert_includes css, 'pre.CodeRay{background:#f7f7f8}' end end context 'Highlight.js' do test 'should add data-lang as last attribute on code tag when source-highlighter is highlight.js' do input = <<~'EOS' :source-highlighter: highlight.js [source,ruby] ---- puts 'Hello, World!' ---- EOS output = convert_string_to_embedded input, safe: :safe assert_includes output, '' end test 'should include remote highlight.js assets if source-highlighter attribute is highlight.js' do input = <<~'EOS' :source-highlighter: highlight.js [source,html] ----

    Highlight me!

    ---- EOS output = convert_string input, safe: Asciidoctor::SafeMode::SAFE assert_css 'pre.highlightjs.highlight', output, 1 assert_css 'pre.highlightjs.highlight > code.language-html.hljs[data-lang="html"]', output, 1 assert_includes output, '<p>Highlight me!</p>' assert_css 'head > link[href*="highlight.js"]', output, 1 assert_css '#footer ~ script[src*="highlight.min.js"]', output, 1 assert_xpath '//script[contains(text(), "hljs.highlightBlock(el)")]', output, 1 end test 'should add language-none class to source block when source-highlighter is highlight.js and language is not set' do input = <<~'EOS' :source-highlighter: highlight.js [source] ---- [numbers] one two three ---- EOS output = convert_string input, safe: :safe assert_css 'code.language-none', output, 1 end test 'should load additional languages specified by highlightjs-languages' do input = <<~'EOS' :source-highlighter: highlight.js :highlightjs-languages: yaml, scilab [source,yaml] ---- key: value ---- EOS output = convert_string input, safe: Asciidoctor::SafeMode::SAFE assert_css '#footer ~ script[src*="languages/yaml.min.js"]', output, 1 assert_css '#footer ~ script[src*="languages/scilab.min.js"]', output, 1 end end context 'Prettify' do test 'should add language classes to child code element when source-highlighter is prettify' do input = <<~'EOS' [source,ruby] ---- puts "foo" ---- EOS output = convert_string_to_embedded input, attributes: { 'source-highlighter' => 'prettify' } assert_css 'pre[class="prettyprint highlight"]', output, 1 assert_css 'pre > code[data-lang="ruby"]', output, 1 end test 'should set linenums start if linenums are enabled and start attribute is set when source-highlighter is prettify' do input = <<~'EOS' [source%linenums,ruby,start=5] ---- puts "foo" ---- EOS output = convert_string_to_embedded input, attributes: { 'source-highlighter' => 'prettify' } assert_css 'pre[class="prettyprint highlight linenums:5"]', output, 1 assert_css 'pre > code[data-lang="ruby"]', output, 1 end end context 'HTML Pipeline' do test 'should set lang attribute on pre when source-highlighter is html-pipeline' do input = <<~'EOS' [source,ruby] ---- filters = [ HTML::Pipeline::AsciiDocFilter, HTML::Pipeline::SanitizationFilter, HTML::Pipeline::SyntaxHighlightFilter ] puts HTML::Pipeline.new(filters, {}).call(input)[:output] ---- EOS output = convert_string input, attributes: { 'source-highlighter' => 'html-pipeline' } assert_css 'pre[lang="ruby"]', output, 1 assert_css 'pre[lang="ruby"] > code', output, 1 assert_css 'pre[class]', output, 0 assert_css 'code[class]', output, 0 end end context 'Rouge' do test 'should syntax highlight source if source-highlighter attribute is set' do input = <<~'EOS' :source-highlighter: rouge [source,ruby] ---- require 'rouge' html = Rouge::Formatters::HTML.format(Rouge::Lexers::Ruby.lex('puts "Hello, world!"')) ---- EOS output = convert_string input, safe: :safe, linkcss_default: true assert_xpath '//pre[@class="rouge highlight"]/code[@data-lang="ruby"]/span[@class="no"][text()="Rouge"]', output, 2 assert_includes output, 'pre.rouge .no {' style_node = xmlnodes_at_xpath '//style[contains(text(), "pre.rouge")]', output, 1 refute_nil style_node assert_equal 'head', style_node.parent.name end test 'should highlight source using a mixed lexer (HTML + JavaScript)' do input = <<~'EOS' [,html] ---- ---- EOS output = convert_string_to_embedded input, safe: :safe, attributes: { 'source-highlighter' => 'rouge' } assert_css 'pre.rouge > code[data-lang="html"]', output, 1 end test 'should enable start_inline for PHP by default' do input = <<~'EOS' [,php] ---- echo " 'rouge' } assert_css 'pre.rouge > code[data-lang="php"]', output, 1 assert_include 'echo', output end test 'should not enable start_inline for PHP if disabled using cgi-style option on language' do input = <<~'EOS' [,php?start_inline=0] ---- echo " 'rouge' } assert_css 'pre.rouge > code[data-lang="php"]', output, 1 refute_include 'echo', output assert_include '<?php', output end test 'should not enable start_inline for PHP if mixed option is set' do input = <<~'EOS' [%mixed,php] ---- echo " 'rouge' } assert_css 'pre.rouge > code[data-lang="php"]', output, 1 refute_include 'echo', output assert_include '<?php', output end test 'should preserve cgi-style options on language when setting start_inline option for PHP', if: (Rouge.version >= '2.1.0') do input = <<~'EOS' [,php?funcnamehighlighting=0] ---- cal_days_in_month(CAL_GREGORIAN, 6, 2019) ---- EOS output = convert_string_to_embedded input, safe: :safe, attributes: { 'source-highlighter' => 'rouge' } assert_css 'pre.rouge > code[data-lang="php"]', output, 1 # if class is "nb", then the funcnamehighlighting option is not honored refute_include 'cal_days_in_month', output assert_include '2019', output end test 'should not crash if source-highlighter attribute is set and source block does not define a language' do input = <<~'EOS' :source-highlighter: rouge [source] ---- require 'rouge' html = Rouge::Formatters::HTML.format(Rouge::Lexers::Ruby.lex('puts "Hello, world!"')) ---- EOS output = convert_string_to_embedded input, safe: :safe assert_css 'pre > code:not([data-lang])', output, 1 end test 'should default to plain text lexer if lexer cannot be resolved for language' do input = <<~'EOS' :source-highlighter: rouge [source,lolcode] ---- CAN HAS STDIO? PLZ OPEN FILE "LOLCATS.TXT"? KTHXBYE ---- EOS output = convert_string_to_embedded input, safe: :safe assert_css 'code[data-lang=lolcode]', output, 1 assert_css 'code span', output, 0 assert_xpath %(//code[text()='CAN HAS STDIO?\nPLZ OPEN FILE "LOLCATS.TXT"?\nKTHXBYE']), output, 1 end test 'should honor cgi-style options on language', if: (Rouge.version >= '2.1.0') do input = <<~'EOS' :source-highlighter: rouge [source,console?prompt=$>] ---- $> asciidoctor --version ---- EOS output = convert_string_to_embedded input, safe: :safe assert_css 'code[data-lang=console]', output, 1 assert_css 'code span.gp', output, 1 end test 'should set starting line number to 1 by default in HTML output if linenums option is enabled' do input = <<~'EOS' [source%linenums,ruby] ---- puts 'Hello, World!' puts 'Goodbye, World!' ---- EOS output = convert_string_to_embedded input, attributes: { 'source-highlighter' => 'rouge' } assert_css 'table.linenotable', output, 1 assert_css 'table.linenotable td.linenos', output, 1 assert_css 'table.linenotable td.linenos pre.lineno', output, 1 assert_css 'table.linenotable td.code', output, 1 assert_css 'table.linenotable td.code pre:not([class])', output, 1 assert_xpath %(//pre[@class="lineno"][text()="1\n2\n"]), output, 1 end test 'should set starting line number in HTML output if linenums option is enabled and start attribute is set' do input = <<~'EOS' [source%linenums,ruby,start=9] ---- puts 'Hello, World!' puts 'Goodbye, World!' ---- EOS output = convert_string_to_embedded input, attributes: { 'source-highlighter' => 'rouge' } assert_css 'table.linenotable', output, 1 assert_css 'table.linenotable td.linenos', output, 1 assert_css 'table.linenotable td.linenos pre.lineno', output, 1 assert_css 'table.linenotable td.code', output, 1 assert_css 'table.linenotable td.code pre:not([class])', output, 1 assert_xpath %(//pre[@class="lineno"][text()=" 9\n10\n"]), output, 1 end test 'should restore callout marks to correct lines' do ['', '%linenums'].each do |opts| input = <<~EOS :source-highlighter: rouge [source#{opts},ruby] ---- require 'rouge' # <1> html = Rouge::Formatters::HTML.format(Rouge::Lexers::Ruby.lex('puts "Hello, world!"')) # <2> puts html # <3> <4> exit 0 # <5><6> ---- <1> Load library <2> Highlight source <3> Print to stdout <4> Redirect to a file to capture output <5> Exit program <6> Reports success EOS output = convert_string_to_embedded input, safe: :safe assert_match(/'rouge'<\/span>.* # \(1\)<\/b>$/, output) assert_match(/'puts "Hello, world!"'<\/span>.* # \(2\)<\/b>$/, output) assert_match(/html<\/span>.* # \(3\)<\/b> \(4\)<\/b>$/, output) # NOTE notice there's a newline before the closing tag when linenums are enabled assert_match(/0<\/span>.* # \(5\)<\/b> \(6\)<\/b>#{opts == '%linenums' ? ?\n : '
    '}<\/pre>/, output) end end test 'should line highlight specified lines when last line is not highlighted' do ['', '%linenums'].each do |opts| input = <<~EOS :source-highlighter: rouge [source#{opts},ruby,highlight=1] ---- puts 'Hello, world!' puts 'Goodbye, world!' ---- EOS # NOTE notice the newline in inside the closing
    of the highlight span expected = <<~EOS.chop puts 'Hello, world!' puts 'Goodbye, world!'#{opts == '%linenums' ? ?\n : '
    '} EOS output = convert_string_to_embedded input, safe: :safe assert_includes output, expected end end test 'should line highlight specified lines when last line is highlighted' do ['', '%linenums'].each do |opts| input = <<~EOS :source-highlighter: rouge [source#{opts},ruby,highlight=2] ---- puts 'Hello, world!' puts 'Goodbye, world!' ---- EOS # NOTE notice the newline in inside the closing
    of the highlight span expected = <<~EOS.chop puts 'Hello, world!' puts 'Goodbye, world!' #{opts == '%linenums' ? '' : '
    '} EOS output = convert_string_to_embedded input, safe: :safe assert_includes output, expected end end test 'should line highlight specified lines relative to start value' do input = <<~EOS :source-highlighter: rouge [source%linenums,ruby,start=5,highlight=6] ---- get { render "Hello, World!" } ---- EOS expected = <<~EOS.chop get { render "Hello, World!" } EOS output = convert_string_to_embedded input, safe: :safe assert_includes output, expected end test 'should ignore start attribute when the value is 0' do input = <<~EOS :source-highlighter: rouge [source%linenums,ruby,start=0,highlight=6] ---- get { render "Hello, World!" } ---- EOS expected = <<~EOS.chop get { render "Hello, World!" } EOS output = convert_string_to_embedded input, safe: :safe assert_includes output, expected end test 'should not line highlight when the start attribute is greater than highlight' do input = <<~EOS :source-highlighter: rouge [source%linenums,ruby,start=7,highlight=6] ---- get { render "Hello, World!" } ---- EOS expected = <<~EOS.chop get { render "Hello, World!" } EOS output = convert_string_to_embedded input, safe: :safe assert_includes output, expected end test 'should restore callout marks to correct lines if line numbering and line highlighting are enabled' do [1, 2].each do |highlight| input = <<~EOS :source-highlighter: rouge [source%linenums,ruby,highlight=#{highlight}] ---- require 'rouge' # <1> exit 0 # <2> ---- <1> Load library <2> Exit program EOS output = convert_string_to_embedded input, safe: :safe assert_match(/'rouge'<\/span>.* # \(1\)<\/b>$/, output) # NOTE notice there's a newline before the closing tag assert_match(/0<\/span>.* # \(2\)<\/b>\n#{highlight == 2 ? '' : ''}<\/pre>/, output) end end test 'should gracefully fallback to default style if specified style not recognized' do input = <<~'EOS' :source-highlighter: rouge :rouge-style: unknown [source,ruby] ---- puts 'Hello, world!' ---- EOS output = convert_string input, safe: :safe, linkcss_default: true assert_css 'pre.rouge', output, 1 expected_css = (Asciidoctor::SyntaxHighlighter.for 'rouge').read_stylesheet 'github' assert_includes output, expected_css end test 'should restore isolated callout mark on last line of source' do input = <<~'EOS' :source-highlighter: rouge [source%linenums,ruby] ---- require 'app' launch_app # <1> ---- <1> Profit. EOS output = convert_string_to_embedded input, safe: :safe # NOTE notice there's a newline before the closing tag, but not before the closing tag assert_match(/\n# \(1\)<\/b>\n<\/pre><\/td>/, output) end test 'should number all lines when isolated callout mark is on last line of source and starting line number is set' do input = <<~'EOS' :source-highlighter: rouge [source%linenums,ruby,start=5] ---- require 'app' launch_app # <1> ---- <1> Profit. EOS output = convert_string_to_embedded input, safe: :safe assert_xpath %(//pre[@class="lineno"][text()="5\n6\n7\n8\n"]), output, 1 # NOTE notice there's a newline before the closing tag, but not before the closing tag assert_match(/\n# \(1\)<\/b>\n<\/pre><\/td>/, output) end test 'should preserve guard in front of callout if icons are not enabled' do input = <<~'EOS' [,ruby] ---- puts 'Hello, World!' # <1> puts 'Goodbye, World ;(' # <2> ---- EOS result = convert_string_to_embedded input assert_include ' # (1)', result assert_include ' # (2)', result end test 'should preserve guard around callout if icons are not enabled' do input = <<~'EOS' ---- ---- EOS result = convert_string_to_embedded input assert_include ' <!--(1)-->', result assert_include ' <!--(2)-->', result end test 'should read stylesheet for specified style' do css = (Asciidoctor::SyntaxHighlighter.for 'rouge').read_stylesheet 'monokai' refute_nil css assert_includes css, 'pre.rouge {' assert_includes css, 'background-color: #49483e;' end test 'should not fail to load rouge if the Asciidoctor module is included into the global namespace', unless: jruby_9_1_windows? do result = run_command(asciidoctor_cmd, '-r', (fixture_path 'include-asciidoctor.rb'), '-s', '-o', '-', '-a', 'source-highlighter=rouge', (fixture_path 'source-block.adoc'), use_bundler: true) {|out| out.read } assert_xpath '//pre[@class="rouge highlight"]', result, 1 end end context 'Pygments', if: ENV['PYGMENTS_VERSION'] do test 'should syntax highlight source if source-highlighter attribute is set' do input = <<~'EOS' :source-highlighter: pygments :pygments-style: monokai [source,python] ---- from pygments import highlight from pygments.lexers import PythonLexer from pygments.formatters import HtmlFormatter source = 'print "Hello World"' print(highlight(source, PythonLexer(), HtmlFormatter())) ---- EOS output = convert_string input, safe: :safe, linkcss_default: true assert_xpath '//pre[@class="pygments highlight"]/code[@data-lang="python"]/span[@class="tok-kn"][text()="import"]', output, 3 assert_includes output, 'pre.pygments ' style_node = xmlnodes_at_xpath '//style[contains(text(), "pre.pygments")]', output, 1 refute_nil style_node assert_equal 'head', style_node.parent.name end test 'should embed stylesheet for pygments style' do input = <<~'EOS' :source-highlighter: pygments :pygments-style: monokai [source,python] ---- from pygments import highlight from pygments.lexers import PythonLexer from pygments.formatters import HtmlFormatter source = 'print "Hello World"' print(highlight(source, PythonLexer(), HtmlFormatter())) ---- EOS output = convert_string input, safe: :safe, linkcss_default: false assert_css 'style', output, 2 style_nodes = xmlnodes_at_xpath '//style[contains(text(), "pre.pygments")]', output assert_equal 2, style_nodes.size pygments_styles = style_nodes[1].to_s assert_match %r/^pre\.pygments *\{/, pygments_styles refute_match %r/^pre *\{/, pygments_styles end test 'should gracefully fallback to default style if specified style not recognized' do input = <<~'EOS' :source-highlighter: pygments :pygments-style: unknown [source,python] ---- from pygments import highlight from pygments.lexers import PythonLexer from pygments.formatters import HtmlFormatter source = 'print "Hello World"' print(highlight(source, PythonLexer(), HtmlFormatter())) ---- EOS output = convert_string input, safe: :safe, linkcss_default: true assert_css 'pre.pygments', output, 1 assert_includes output, 'pre.pygments ' assert_includes output, '.tok-c { color: #408080;' end test 'should number lines if linenums option is set on source block' do input = <<~'EOS' :source-highlighter: pygments [source%linenums,ruby] ---- puts 'Hello, World!' puts 'Goodbye, World!' ---- EOS output = convert_string_to_embedded input, safe: Asciidoctor::SafeMode::SAFE assert_css 'table.linenotable', output, 1 assert_css 'table.linenotable td.linenos', output, 1 assert_css 'table.linenotable td.linenos .linenodiv', output, 1 assert_css 'table.linenotable td.linenos .linenodiv pre:not([class])', output, 1 assert_css 'table.linenotable td.code', output, 1 assert_css 'table.linenotable td.code pre:not([class])', output, 1 # NOTE new versions of Pygments wrap the numbers in span linenos_node = xmlnodes_at_xpath %(//*[@class="linenodiv"]/pre), output, 1 linenos = linenos_node.content.gsub %r(\d+), '\1' assert_equal %(1\n2), linenos end test 'should restore callout marks to correct lines if table line numbering is enabled' do input = <<~'EOS' :source-highlighter: pygments :pygments-linenums-mode: table [source%linenums,ruby] ---- from pygments import highlight # <1> from pygments.lexers import PythonLexer from pygments.formatters import HtmlFormatter code = 'print "Hello World"' print(highlight(code, PythonLexer(), HtmlFormatter())) # <2><3> ---- <1> Load library <2> Highlight source <3> Print to stdout EOS output = convert_string_to_embedded input, safe: :safe assert_match(/highlight<\/span> # \(1\)<\/b>$/, output) # NOTE notice there's a newline before the closing tag assert_match(/\(\)\)\).*<\/span> # \(2\)<\/b> \(3\)<\/b>$/, output) end test 'should restore isolated callout mark on last line of source' do input = <<~'EOS' :source-highlighter: pygments [source,ruby,linenums] ---- require 'app' launch_app # <1> ---- <1> Profit. EOS output = convert_string_to_embedded input, safe: :safe # NOTE notice there's a newline before the closing tag, but not before the closing tag assert_match(/\n# \(1\)<\/b>\n<\/pre><\/td>/, output) end test 'should not hardcode inline styles on lineno div and pre elements when linenums are enabled in table mode' do input = <<~'EOS' :source-highlighter: pygments :pygments-css: inline [source%linenums,ruby] ---- puts 'Hello, World!' ---- EOS output = convert_string_to_embedded input, safe: :safe assert_css 'td.linenos', output, 1 assert_css 'div.linenodiv:not([style])', output, 1 assert_includes output, '
    '
          assert_css 'pre:not([style])', output, 2
        end
    
        test 'should add lineno spans with class and trim trailing space when linenums are enabled and source-highlighter is pygments' do
          %w(class style).each do |css_mode|
            input = <<~EOS
            :source-highlighter: pygments
            :pygments-css: #{css_mode}
            :pygments-linenums-mode: inline
    
            [source%linenums,ruby]
            ----
            puts 'Hello, World!'
            puts 'Hello, World!'
            puts 'Hello, World!'
            puts 'Hello, World!'
            puts 'Hello, World!'
            puts 'Hello, World!'
            puts 'Hello, World!'
            puts 'Hello, World!'
            puts 'Hello, World!'
            exit 0
            ----
            EOS
    
            output = convert_string_to_embedded input, safe: :safe
            assert_css 'table.linenotable', output, 0
            assert_css 'pre', output, 1
            assert_includes output, ' 1'
            assert_includes output, '10'
          end
        end
    
        test 'should line highlight specified lines' do
          input = <<~'EOS'
          :source-highlighter: pygments
    
          [source,ruby,highlight=1..2]
          ----
          puts 'Hello, world!'
          puts 'Goodbye, world!'
          ----
          EOS
          # NOTE notice the newline is inside the closing  of the highlight span
          expected = <<~'EOS'.chop
          
    puts 'Hello, world!'
          puts 'Goodbye, world!'
          
    EOS output = convert_string_to_embedded input, safe: :safe assert_includes output, expected end end end asciidoctor-2.0.20/test/tables_test.rb000066400000000000000000002256141443135032600177260ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context 'Tables' do context 'PSV' do test 'converts simple psv table' do input = <<~'EOS' |======= |A |B |C |a |b |c |1 |2 |3 |======= EOS cells = [%w(A B C), %w(a b c), %w(1 2 3)] doc = document_from_string input, standalone: false table = doc.blocks[0] assert_equal 100, table.columns.map {|col| col.attributes['colpcwidth'] }.reduce(:+) output = doc.convert assert_css 'table', output, 1 assert_css 'table.tableblock.frame-all.grid-all.stretch', output, 1 assert_css 'table > colgroup > col[style*="width: 33.3333%"]', output, 2 assert_css 'table > colgroup > col:last-of-type[style*="width: 33.3334%"]', output, 1 assert_css 'table tr', output, 3 assert_css 'table > tbody > tr', output, 3 assert_css 'table td', output, 9 assert_css 'table > tbody > tr > td.tableblock.halign-left.valign-top > p.tableblock', output, 9 cells.each_with_index do |row, rowi| assert_css "table > tbody > tr:nth-child(#{rowi + 1}) > td", output, row.size assert_css "table > tbody > tr:nth-child(#{rowi + 1}) > td > p", output, row.size row.each_with_index do |cell, celli| assert_xpath "(//tr)[#{rowi + 1}]/td[#{celli + 1}]/p[text()='#{cell}']", output, 1 end end end test 'should add direction CSS class if float attribute is set on table' do input = <<~'EOS' [float=left] |======= |A |B |C |a |b |c |1 |2 |3 |======= EOS output = convert_string_to_embedded input assert_css 'table.left', output, 1 end test 'should set stripes class if stripes option is set' do input = <<~'EOS' [stripes=odd] |======= |A |B |C |a |b |c |1 |2 |3 |======= EOS output = convert_string_to_embedded input assert_css 'table.stripes-odd', output, 1 end test 'outputs a caption on simple psv table' do input = <<~'EOS' .Simple psv table |======= |A |B |C |a |b |c |1 |2 |3 |======= EOS output = convert_string_to_embedded input assert_xpath '/table/caption[@class="title"][text()="Table 1. Simple psv table"]', output, 1 assert_xpath '/table/caption/following-sibling::colgroup', output, 1 end test 'only increments table counter for tables that have a title' do input = <<~'EOS' .First numbered table |======= |1 |2 |3 |======= |======= |4 |5 |6 |======= .Second numbered table |======= |7 |8 |9 |======= EOS output = convert_string_to_embedded input assert_css 'table:root', output, 3 assert_xpath '(/table)[1]/caption', output, 1 assert_xpath '(/table)[1]/caption[text()="Table 1. First numbered table"]', output, 1 assert_xpath '(/table)[2]/caption', output, 0 assert_xpath '(/table)[3]/caption', output, 1 assert_xpath '(/table)[3]/caption[text()="Table 2. Second numbered table"]', output, 1 end test 'uses explicit caption in front of title in place of default caption and number' do input = <<~'EOS' [caption="All the Data. "] .Simple psv table |======= |A |B |C |a |b |c |1 |2 |3 |======= EOS output = convert_string_to_embedded input assert_xpath '/table/caption[@class="title"][text()="All the Data. Simple psv table"]', output, 1 assert_xpath '/table/caption/following-sibling::colgroup', output, 1 end test 'disables caption when caption attribute on table is empty' do input = <<~'EOS' [caption=] .Simple psv table |======= |A |B |C |a |b |c |1 |2 |3 |======= EOS output = convert_string_to_embedded input assert_xpath '/table/caption[@class="title"][text()="Simple psv table"]', output, 1 assert_xpath '/table/caption/following-sibling::colgroup', output, 1 end test 'disables caption when caption attribute on table is empty string' do input = <<~'EOS' [caption=""] .Simple psv table |======= |A |B |C |a |b |c |1 |2 |3 |======= EOS output = convert_string_to_embedded input assert_xpath '/table/caption[@class="title"][text()="Simple psv table"]', output, 1 assert_xpath '/table/caption/following-sibling::colgroup', output, 1 end test 'disables caption on table when table-caption document attribute is unset' do input = <<~'EOS' :!table-caption: .Simple psv table |======= |A |B |C |a |b |c |1 |2 |3 |======= EOS output = convert_string_to_embedded input assert_xpath '/table/caption[@class="title"][text()="Simple psv table"]', output, 1 assert_xpath '/table/caption/following-sibling::colgroup', output, 1 end test 'ignores escaped separators' do input = <<~'EOS' |=== |A \| here| a \| there |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 1 assert_css 'table > tbody > tr > td', output, 2 assert_xpath '/table/tbody/tr/td[1]/p[text()="A | here"]', output, 1 assert_xpath '/table/tbody/tr/td[2]/p[text()="a | there"]', output, 1 end test 'preserves escaped delimiters at the end of the line' do input = <<~'EOS' [%header,cols="1,1"] |=== |A |B\| |A1 |B1\| |A2 |B2\| |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead > tr', output, 1 assert_css 'table > thead > tr:nth-child(1) > th', output, 2 assert_xpath '/table/thead/tr[1]/th[2][text()="B|"]', output, 1 assert_css 'table > tbody > tr', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td', output, 2 assert_xpath '/table/tbody/tr[1]/td[2]/p[text()="B1|"]', output, 1 assert_css 'table > tbody > tr:nth-child(2) > td', output, 2 assert_xpath '/table/tbody/tr[2]/td[2]/p[text()="B2|"]', output, 1 end test 'should treat trailing pipe as an empty cell' do input = <<~'EOS' |=== |A1 | |B1 |B2 |C1 |C2 |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 3 assert_xpath '/table/tbody/tr[1]/td', output, 2 assert_xpath '/table/tbody/tr[1]/td[1]/p[text()="A1"]', output, 1 assert_xpath '/table/tbody/tr[1]/td[2]/p', output, 0 assert_xpath '/table/tbody/tr[2]/td[1]/p[text()="B1"]', output, 1 end test 'should auto recover with warning if missing leading separator on first cell' do input = <<~'EOS' |=== A | here| a | there | x | y | z | end |=== EOS using_memory_logger do |logger| output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > tbody > tr', output, 2 assert_css 'table > tbody > tr > td', output, 8 assert_xpath '/table/tbody/tr[1]/td[1]/p[text()="A"]', output, 1 assert_xpath '/table/tbody/tr[1]/td[2]/p[text()="here"]', output, 1 assert_xpath '/table/tbody/tr[1]/td[3]/p[text()="a"]', output, 1 assert_xpath '/table/tbody/tr[1]/td[4]/p[text()="there"]', output, 1 assert_message logger, :ERROR, ': line 2: table missing leading separator; recovering automatically', Hash end end test 'performs normal substitutions on cell content' do input = <<~'EOS' :show_title: Cool new show |=== |{show_title} |Coming soon... |=== EOS output = convert_string_to_embedded input assert_xpath '//tbody/tr/td[1]/p[text()="Cool new show"]', output, 1 assert_xpath %(//tbody/tr/td[2]/p[text()='Coming soon#{decode_char 8230}#{decode_char 8203}']), output, 1 end test 'should only substitute specialchars for literal table cells' do input = <<~'EOS' |=== l|one *two* three |=== EOS output = convert_string_to_embedded input result = xmlnodes_at_xpath('/table//pre', output, 1) assert_equal %(
    one\n*two*\nthree\n<four>
    ), result.to_s end test 'should preserving leading spaces but not leading newlines or trailing spaces in literal table cells' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS [cols=2*] |=== l| one two three | normal |=== EOS output = convert_string_to_embedded input result = xmlnodes_at_xpath('/table//pre', output, 1) assert_equal %(
      one\n  two\nthree
    ), result.to_s end test 'should ignore v table cell style' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS [cols=2*] |=== v| one two three | normal |=== EOS output = convert_string_to_embedded input result = xmlnodes_at_xpath('/table//p[@class="tableblock"]', output, 1) assert_equal %(

    one\n two\nthree

    ), result.to_s end test 'table and column width not assigned when autowidth option is specified' do input = <<~'EOS' [options="autowidth"] |======= |A |B |C |a |b |c |1 |2 |3 |======= EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table.fit-content', output, 1 assert_css 'table[style*="width"]', output, 0 assert_css 'table colgroup col', output, 3 assert_css 'table colgroup col[style*="width"]', output, 0 end test 'does not assign column width for autowidth columns in HTML output' do input = <<~'EOS' [cols="15%,3*~"] |======= |A |B |C |D |a |b |c |d |1 |2 |3 |4 |======= EOS doc = document_from_string input table_row0 = doc.blocks[0].rows.body[0] assert_equal 15, table_row0[0].attributes['width'] assert_equal 15, table_row0[0].attributes['colpcwidth'] refute_equal '', table_row0[0].attributes['autowidth-option'] expected_pcwidths = { 1 => 28.3333, 2 => 28.3333, 3 => 28.3334 } (1..3).each do |i| assert_equal 28.3333, table_row0[i].attributes['width'] assert_equal expected_pcwidths[i], table_row0[i].attributes['colpcwidth'] assert_equal '', table_row0[i].attributes['autowidth-option'] end output = doc.convert standalone: false assert_css 'table', output, 1 assert_css 'table colgroup col', output, 4 assert_css 'table colgroup col[style]', output, 1 assert_css 'table colgroup col[style*="width: 15%"]', output, 1 end test 'can assign autowidth to all columns even when table has a width' do input = <<~'EOS' [cols="4*~",width=50%] |======= |A |B |C |D |a |b |c |d |1 |2 |3 |4 |======= EOS doc = document_from_string input table_row0 = doc.blocks[0].rows.body[0] (0..3).each do |i| assert_equal 25, table_row0[i].attributes['width'] assert_equal 25, table_row0[i].attributes['colpcwidth'] assert_equal '', table_row0[i].attributes['autowidth-option'] end output = doc.convert standalone: false assert_css 'table', output, 1 assert_css 'table[style*="width: 50%"]', output, 1 assert_css 'table colgroup col', output, 4 assert_css 'table colgroup col[style]', output, 0 end test 'equally distributes remaining column width to autowidth columns in DocBook output' do input = <<~'EOS' [cols="15%,3*~"] |======= |A |B |C |D |a |b |c |d |1 |2 |3 |4 |======= EOS output = convert_string_to_embedded input, backend: 'docbook5' assert_css 'tgroup[cols="4"]', output, 1 assert_css 'tgroup colspec', output, 4 assert_css 'tgroup colspec[colwidth]', output, 4 assert_css 'tgroup colspec[colwidth="15*"]', output, 1 assert_css 'tgroup colspec[colwidth="28.3333*"]', output, 2 assert_css 'tgroup colspec[colwidth="28.3334*"]', output, 1 end test 'should compute column widths based on pagewidth when width is set on table in DocBook output' do input = <<~'EOS' :pagewidth: 500 [width=50%] |======= |A |B |C |D |a |b |c |d |1 |2 |3 |4 |======= EOS output = convert_string_to_embedded input, backend: 'docbook5' assert_css 'tgroup[cols="4"]', output, 1 assert_css 'tgroup colspec', output, 4 assert_css 'tgroup colspec[colwidth]', output, 4 assert_css 'tgroup colspec[colwidth="62.5*"]', output, 4 end test 'explicit table width is used even when autowidth option is specified' do input = <<~'EOS' [%autowidth,width=75%] |======= |A |B |C |a |b |c |1 |2 |3 |======= EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table[style*="width"]', output, 1 assert_css 'table colgroup col', output, 3 assert_css 'table colgroup col[style*="width"]', output, 0 end test 'first row sets number of columns when not specified' do input = <<~'EOS' |=== |first |second |third |fourth |1 |2 |3 |4 |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 4 assert_css 'table > tbody > tr', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td', output, 4 assert_css 'table > tbody > tr:nth-child(2) > td', output, 4 end test 'colspec attribute using asterisk syntax sets number of columns' do input = <<~'EOS' [cols="3*"] |=== |A |B |C |a |b |c |1 |2 |3 |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > tbody > tr', output, 3 end test 'table with explicit column count can have multiple rows on a single line' do input = <<~'EOS' [cols="3*"] |=== |one |two |1 |2 |a |b |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 end test 'table with explicit deprecated colspec syntax can have multiple rows on a single line' do input = <<~'EOS' [cols="3"] |=== |one |two |1 |2 |a |b |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 end test 'columns are added for empty records in colspec attribute' do input = <<~'EOS' [cols="<,"] |=== |one |two |1 |2 |a |b |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 3 end test 'cols may be separated by semi-colon instead of comma' do input = <<~'EOS' [cols="1s;3m"] |=== | strong | mono |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'col[style="width: 25%;"]', output, 1 assert_css 'col[style="width: 75%;"]', output, 1 assert_xpath '(//td)[1]//strong', output, 1 assert_xpath '(//td)[2]//code', output, 1 end test 'cols attribute may include spaces' do input = <<~'EOS' [cols=" 1, 1 "] |=== |one |two |1 |2 |a |b |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'col[style="width: 50%;"]', output, 2 assert_css 'table > tbody > tr', output, 3 end test 'blank cols attribute should be ignored' do input = <<~'EOS' [cols=" "] |=== |one |two |1 |2 |a |b |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'col[style="width: 50%;"]', output, 2 assert_css 'table > tbody > tr', output, 3 end test 'empty cols attribute should be ignored' do input = <<~'EOS' [cols=""] |=== |one |two |1 |2 |a |b |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'col[style="width: 50%;"]', output, 2 assert_css 'table > tbody > tr', output, 3 end test 'table with header and footer' do input = <<~'EOS' [options="header,footer"] |=== |Item |Quantity |Item 1 |1 |Item 2 |2 |Item 3 |3 |Total |6 |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 1 assert_css 'table > thead > tr', output, 1 assert_css 'table > thead > tr > th', output, 2 assert_css 'table > tfoot', output, 1 assert_css 'table > tfoot > tr', output, 1 assert_css 'table > tfoot > tr > td', output, 2 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 3 table_section_names = (xmlnodes_at_css 'table > *', output).map(&:node_name).select {|n| n.start_with? 't' } assert_equal %w(thead tbody tfoot), table_section_names end test 'table with header and footer docbook' do input = <<~'EOS' .Table with header, body and footer [options="header,footer"] |=== |Item |Quantity |Item 1 |1 |Item 2 |2 |Item 3 |3 |Total |6 |=== EOS output = convert_string_to_embedded input, backend: 'docbook' assert_css 'table', output, 1 assert_css 'table > title', output, 1 assert_css 'table > tgroup', output, 1 assert_css 'table > tgroup[cols="2"]', output, 1 assert_css 'table > tgroup[cols="2"] > colspec', output, 2 assert_css 'table > tgroup[cols="2"] > colspec[colwidth="50*"]', output, 2 assert_css 'table > tgroup > thead', output, 1 assert_css 'table > tgroup > thead > row', output, 1 assert_css 'table > tgroup > thead > row > entry', output, 2 assert_css 'table > tgroup > thead > row > entry > simpara', output, 0 assert_css 'table > tgroup > tfoot', output, 1 assert_css 'table > tgroup > tfoot > row', output, 1 assert_css 'table > tgroup > tfoot > row > entry', output, 2 assert_css 'table > tgroup > tfoot > row > entry > simpara', output, 2 assert_css 'table > tgroup > tbody', output, 1 assert_css 'table > tgroup > tbody > row', output, 3 assert_css 'table > tgroup > tbody > row', output, 3 table_section_names = (xmlnodes_at_css 'table > tgroup > *', output).map(&:node_name).select {|n| n.start_with? 't' } assert_equal %w(thead tbody tfoot), table_section_names end test 'should set horizontal and vertical alignment when converting to DocBook' do input = <<~'EOS' |=== |A ^.^|B >|C |A1 ^.^|B1 >|C1 |=== EOS output = convert_string input, backend: 'docbook' assert_css 'informaltable', output, 1 assert_css 'informaltable thead > row > entry[align="left"][valign="top"]', output, 1 assert_css 'informaltable thead > row > entry[align="center"][valign="middle"]', output, 1 assert_css 'informaltable thead > row > entry[align="right"][valign="top"]', output, 1 assert_css 'informaltable tbody > row > entry[align="left"][valign="top"]', output, 1 assert_css 'informaltable tbody > row > entry[align="center"][valign="middle"]', output, 1 assert_css 'informaltable tbody > row > entry[align="right"][valign="top"]', output, 1 end test 'should preserve frame value ends when converting to HTML' do input = <<~'EOS' [frame=ends] |=== |A |B |C |=== EOS output = convert_string_to_embedded input assert_css 'table.frame-ends', output, 1 end test 'should normalize frame value topbot as ends when converting to HTML' do input = <<~'EOS' [frame=topbot] |=== |A |B |C |=== EOS output = convert_string_to_embedded input assert_css 'table.frame-ends', output, 1 end test 'should preserve frame value topbot when converting to DocBook' do input = <<~'EOS' [frame=topbot] |=== |A |B |C |=== EOS output = convert_string_to_embedded input, backend: 'docbook' assert_css 'informaltable[frame="topbot"]', output, 1 end test 'should convert frame value ends to topbot when converting to DocBook' do input = <<~'EOS' [frame=ends] |=== |A |B |C |=== EOS output = convert_string_to_embedded input, backend: 'docbook' assert_css 'informaltable[frame="topbot"]', output, 1 end test 'table with landscape orientation in DocBook' do ['orientation=landscape', '%rotate'].each do |attrs| input = <<~EOS [#{attrs}] |=== |Column A | Column B | Column C |=== EOS output = convert_string_to_embedded input, backend: 'docbook' assert_css 'informaltable', output, 1 assert_css 'informaltable[orient="land"]', output, 1 end end test 'table with implicit header row' do input = <<~'EOS' |=== |Column 1 |Column 2 |Data A1 |Data B1 |Data A2 |Data B2 |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 1 assert_css 'table > thead > tr', output, 1 assert_css 'table > thead > tr > th', output, 2 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 2 end test 'table with implicit header row only' do input = <<~'EOS' |=== |Column 1 |Column 2 |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 1 assert_css 'table > thead > tr', output, 1 assert_css 'table > thead > tr > th', output, 2 assert_css 'table > tbody', output, 0 end test 'table with implicit header row when other options set' do input = <<~'EOS' [%autowidth] |=== |Column 1 |Column 2 |Data A1 |Data B1 |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table[style*="width"]', output, 0 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 1 assert_css 'table > thead > tr', output, 1 assert_css 'table > thead > tr > th', output, 2 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 1 end test 'no implicit header row if second line not blank' do input = <<~'EOS' |=== |Column 1 |Column 2 |Data A1 |Data B1 |Data A2 |Data B2 |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 0 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 3 end test 'no implicit header row if cell in first line spans multiple lines' do input = <<~'EOS' [cols=2*] |=== |A1 A1 continued|B1 |A2 |B2 |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 0 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 2 assert_xpath '(//td)[1]/p', output, 2 end test 'should format first cell as literal if there is no implicit header row and column has l style' do input = <<~'EOS' [cols="1l,1"] |=== |literal |normal |=== EOS output = convert_string_to_embedded input assert_css 'tbody pre', output, 1 assert_css 'tbody p.tableblock', output, 1 end test 'should format first cell as AsciiDoc if there is no implicit header row and column has a style' do input = <<~'EOS' [cols="1a,1"] |=== | * list | normal |=== EOS output = convert_string_to_embedded input assert_css 'tbody .ulist', output, 1 assert_css 'tbody p.tableblock', output, 1 end test 'should interpret leading indent if first cell is AsciiDoc and there is no implicit header row' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS [cols="1a,1"] |=== | literal | normal |=== EOS output = convert_string_to_embedded input assert_css 'tbody pre', output, 1 assert_css 'tbody p.tableblock', output, 1 end test 'should format first cell as AsciiDoc if there is no implicit header row and cell has a style' do input = <<~'EOS' |=== a| * list | normal |=== EOS output = convert_string_to_embedded input assert_css 'tbody .ulist', output, 1 assert_css 'tbody p.tableblock', output, 1 end test 'no implicit header row if AsciiDoc cell in first line spans multiple lines' do input = <<~'EOS' [cols=2*] |=== a|contains AsciiDoc content * a * b * c a|contains no AsciiDoc content just text |A2 |B2 |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 0 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 2 assert_xpath '(//td)[1]//ul', output, 1 end test 'no implicit header row if first line blank' do input = <<~'EOS' |=== |Column 1 |Column 2 |Data A1 |Data B1 |Data A2 |Data B2 |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 0 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 3 end test 'no implicit header row if noheader option is specified' do input = <<~'EOS' [%noheader] |=== |Column 1 |Column 2 |Data A1 |Data B1 |Data A2 |Data B2 |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 0 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 3 end test 'styles not applied to header cells' do input = <<~'EOS' [cols="1h,1s,1e",options="header,footer"] |=== |Name |Occupation| Website |Octocat |Social coding| https://github.com |Name |Occupation| Website |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > thead > tr > th', output, 3 assert_css 'table > thead > tr > th > *', output, 0 assert_css 'table > tfoot > tr > th', output, 1 assert_css 'table > tfoot > tr > td', output, 2 assert_css 'table > tfoot > tr > td > p > strong', output, 1 assert_css 'table > tfoot > tr > td > p > em', output, 1 assert_css 'table > tbody > tr > th', output, 1 assert_css 'table > tbody > tr > td', output, 2 assert_css 'table > tbody > tr > td > p.header', output, 0 assert_css 'table > tbody > tr > td > p > strong', output, 1 assert_css 'table > tbody > tr > td > p > em > a', output, 1 end test 'should apply text formatting to cells in implicit header row when column has a style' do input = <<~'EOS' [cols="2*a"] |=== | _foo_ | *bar* | * list item | paragraph |=== EOS output = convert_string_to_embedded input assert_xpath '(//thead/tr/th)[1]/em[text()="foo"]', output, 1 assert_xpath '(//thead/tr/th)[2]/strong[text()="bar"]', output, 1 assert_css 'tbody .ulist', output, 1 assert_css 'tbody .paragraph', output, 1 end test 'should apply style and text formatting to cells in first row if no implicit header' do input = <<~'EOS' [cols="s,e"] |=== | _strong_ | *emphasis* | strong | emphasis |=== EOS output = convert_string_to_embedded input assert_xpath '((//tbody/tr)[1]/td)[1]//strong/em[text()="strong"]', output, 1 assert_xpath '((//tbody/tr)[1]/td)[2]//em/strong[text()="emphasis"]', output, 1 assert_xpath '((//tbody/tr)[2]/td)[1]//strong[text()="strong"]', output, 1 assert_xpath '((//tbody/tr)[2]/td)[2]//em[text()="emphasis"]', output, 1 end test 'vertical table headers use th element instead of header class' do input = <<~'EOS' [cols="1h,1s,1e"] |=== |Name |Occupation| Website |Octocat |Social coding| https://github.com |Name |Occupation| Website |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > tbody > tr > th', output, 3 assert_css 'table > tbody > tr > td', output, 6 assert_css 'table > tbody > tr .header', output, 0 assert_css 'table > tbody > tr > td > p > strong', output, 3 assert_css 'table > tbody > tr > td > p > em', output, 3 assert_css 'table > tbody > tr > td > p > em > a', output, 1 end test 'supports horizontal and vertical source data with blank lines and table header' do input = <<~'EOS' .Horizontal and vertical source data [width="80%",cols="3,^2,^2,10",options="header"] |=== |Date |Duration |Avg HR |Notes |22-Aug-08 |10:24 | 157 | Worked out MSHR (max sustainable heart rate) by going hard for this interval. |22-Aug-08 |23:03 | 152 | Back-to-back with previous interval. |24-Aug-08 |40:00 | 145 | Moderately hard interspersed with 3x 3min intervals (2 min hard + 1 min really hard taking the HR up to 160). I am getting in shape! |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table[style*="width: 80%"]', output, 1 assert_xpath '/table/caption[@class="title"][text()="Table 1. Horizontal and vertical source data"]', output, 1 assert_css 'table > colgroup > col', output, 4 assert_css 'table > colgroup > col:nth-child(1)[style*="width: 17.647%"]', output, 1 assert_css 'table > colgroup > col:nth-child(2)[style*="width: 11.7647%"]', output, 1 assert_css 'table > colgroup > col:nth-child(3)[style*="width: 11.7647%"]', output, 1 assert_css 'table > colgroup > col:nth-child(4)[style*="width: 58.8236%"]', output, 1 assert_css 'table > thead', output, 1 assert_css 'table > thead > tr', output, 1 assert_css 'table > thead > tr > th', output, 4 assert_css 'table > tbody > tr', output, 3 assert_css 'table > tbody > tr:nth-child(1) > td', output, 4 assert_css 'table > tbody > tr:nth-child(2) > td', output, 4 assert_css 'table > tbody > tr:nth-child(3) > td', output, 4 assert_xpath "/table/tbody/tr[1]/td[4]/p[text()='Worked out MSHR (max sustainable heart rate) by going hard\nfor this interval.']", output, 1 assert_css 'table > tbody > tr:nth-child(3) > td:nth-child(4) > p', output, 2 assert_xpath '/table/tbody/tr[3]/td[4]/p[2][text()="I am getting in shape!"]', output, 1 end test 'percentages as column widths' do input = <<~'EOS' [cols="<.^10%,<90%"] |=== |column A |column B |=== EOS output = convert_string_to_embedded input assert_xpath '/table/colgroup/col', output, 2 assert_xpath '(/table/colgroup/col)[1][@style="width: 10%;"]', output, 1 assert_xpath '(/table/colgroup/col)[2][@style="width: 90%;"]', output, 1 end test 'spans, alignments and styles' do input = <<~'EOS' [cols="e,m,^,>s",width="25%"] |=== |1 >s|2 |3 |4 ^|5 2.2+^.^|6 .3+<.>m|7 ^|8 d|9 2+>|10 |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col[style*="width: 25%"]', output, 4 assert_css 'table > tbody > tr', output, 4 assert_css 'table > tbody > tr > td', output, 10 assert_css 'table > tbody > tr:nth-child(1) > td', output, 4 assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 assert_css 'table > tbody > tr:nth-child(3) > td', output, 1 assert_css 'table > tbody > tr:nth-child(4) > td', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(1).halign-left.valign-top p em', output, 1 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(2).halign-right.valign-top p strong', output, 1 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(3).halign-center.valign-top p', output, 1 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(3).halign-center.valign-top p *', output, 0 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(4).halign-right.valign-top p strong', output, 1 assert_css 'table > tbody > tr:nth-child(2) > td:nth-child(1).halign-center.valign-top p em', output, 1 assert_css 'table > tbody > tr:nth-child(2) > td:nth-child(2).halign-center.valign-middle[colspan="2"][rowspan="2"] p code', output, 1 assert_css 'table > tbody > tr:nth-child(2) > td:nth-child(3).halign-left.valign-bottom[rowspan="3"] p code', output, 1 assert_css 'table > tbody > tr:nth-child(3) > td:nth-child(1).halign-center.valign-top p em', output, 1 assert_css 'table > tbody > tr:nth-child(4) > td:nth-child(1).halign-left.valign-top p', output, 1 assert_css 'table > tbody > tr:nth-child(4) > td:nth-child(1).halign-left.valign-top p em', output, 0 assert_css 'table > tbody > tr:nth-child(4) > td:nth-child(2).halign-right.valign-top[colspan="2"] p code', output, 1 end test 'sets up columns correctly if first row has cell that spans columns' do input = <<~'EOS' |=== 2+^|AAA |CCC |AAA |BBB |CCC |AAA |BBB |CCC |=== EOS output = convert_string_to_embedded input assert_css 'table > tbody > tr:nth-child(1) > td', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(1)[colspan="2"]', output, 1 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(2):not([colspan])', output, 1 assert_css 'table > tbody > tr:nth-child(2) > td:not([colspan])', output, 3 assert_css 'table > tbody > tr:nth-child(3) > td:not([colspan])', output, 3 end test 'supports repeating cells' do input = <<~'EOS' |=== 3*|A |1 3*|2 |b |c |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 3 assert_css 'table > tbody > tr:nth-child(1) > td', output, 3 assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 assert_css 'table > tbody > tr:nth-child(3) > td', output, 3 assert_xpath '/table/tbody/tr[1]/td[1]/p[text()="A"]', output, 1 assert_xpath '/table/tbody/tr[1]/td[2]/p[text()="A"]', output, 1 assert_xpath '/table/tbody/tr[1]/td[3]/p[text()="A"]', output, 1 assert_xpath '/table/tbody/tr[2]/td[1]/p[text()="1"]', output, 1 assert_xpath '/table/tbody/tr[2]/td[2]/p[text()="2"]', output, 1 assert_xpath '/table/tbody/tr[2]/td[3]/p[text()="2"]', output, 1 assert_xpath '/table/tbody/tr[3]/td[1]/p[text()="2"]', output, 1 assert_xpath '/table/tbody/tr[3]/td[2]/p[text()="b"]', output, 1 assert_xpath '/table/tbody/tr[3]/td[3]/p[text()="c"]', output, 1 end test 'calculates colnames correctly when using implicit column count and single cell with colspan' do input = <<~'EOS' |=== 2+|Two Columns |One Column |One Column |=== EOS output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '//colspec', output, 2 assert_xpath '(//colspec)[1][@colname="col_1"]', output, 1 assert_xpath '(//colspec)[2][@colname="col_2"]', output, 1 assert_xpath '//row', output, 2 assert_xpath '(//row)[1]/entry', output, 1 assert_xpath '(//row)[1]/entry[@namest="col_1"][@nameend="col_2"]', output, 1 end test 'calculates colnames correctly when using implicit column count and cells with mixed colspans' do input = <<~'EOS' |=== 2+|Two Columns | One Column |One Column |One Column |One Column |=== EOS output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '//colspec', output, 3 assert_xpath '(//colspec)[1][@colname="col_1"]', output, 1 assert_xpath '(//colspec)[2][@colname="col_2"]', output, 1 assert_xpath '(//colspec)[3][@colname="col_3"]', output, 1 assert_xpath '//row', output, 2 assert_xpath '(//row)[1]/entry', output, 2 assert_xpath '(//row)[1]/entry[@namest="col_1"][@nameend="col_2"]', output, 1 assert_xpath '(//row)[2]/entry[@namest]', output, 0 assert_xpath '(//row)[2]/entry[@nameend]', output, 0 end test 'assigns unique column names for table with implicit column count and colspans in first row' do input = <<~'EOS' |=== | 2+| Node 0 2+| Node 1 | Host processes | Core 0 | Core 1 | Core 4 | Core 5 | Guest processes | Core 2 | Core 3 | Core 6 | Core 7 |=== EOS output = convert_string_to_embedded input, backend: 'docbook' assert_xpath '//colspec', output, 5 (1..5).each do |n| assert_xpath %((//colspec)[#{n}][@colname="col_#{n}"]), output, 1 end assert_xpath '(//row)[1]/entry', output, 3 assert_xpath '((//row)[1]/entry)[1][@namest]', output, 0 assert_xpath '((//row)[1]/entry)[1][@namend]', output, 0 assert_xpath '((//row)[1]/entry)[2][@namest="col_2"][@nameend="col_3"]', output, 1 assert_xpath '((//row)[1]/entry)[3][@namest="col_4"][@nameend="col_5"]', output, 1 end test 'ignores cell with colspan that exceeds colspec' do input = <<~'EOS' [cols=2*] |=== 3+|A |B a|C more C |=== EOS using_memory_logger do |logger| output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table *', output, 0 assert_message logger, :ERROR, ': line 5: dropping cell because it exceeds specified number of columns', Hash end end test 'paragraph and literal repeated content' do input = <<~'EOS' [cols=",^l"] |=== |Paragraphs |Literal 3*|The discussion about what is good, what is beautiful, what is noble, what is pure, and what is true could always go on. Why is that important? Why would I like to do that? Because that's the only conversation worth having. And whether it goes on or not after I die, I don't know. But, I do know that it is the conversation I want to have while I am still alive. Which means that to me the offer of certainty, the offer of complete security, the offer of an impermeable faith that can't give way is an offer of something not worth having. I want to live my life taking the risk all the time that I don't know anything like enough yet... that I haven't understood enough... that I can't know enough... that I am always hungrily operating on the margins of a potentially great harvest of future knowledge and wisdom. I wouldn't have it any other way. |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 1 assert_css 'table > thead > tr', output, 1 assert_css 'table > thead > tr > th', output, 2 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 1 assert_css 'table > tbody > tr > td', output, 2 assert_css 'table > tbody > tr > td:nth-child(1).halign-left.valign-top > p.tableblock', output, 7 assert_css 'table > tbody > tr > td:nth-child(2).halign-center.valign-top > div.literal > pre', output, 1 literal = xmlnodes_at_css 'table > tbody > tr > td:nth-child(2).halign-center.valign-top > div.literal > pre', output, 1 assert_equal 26, literal.text.lines.size end test 'should not split paragraph at line containing only {blank} that is directly adjacent to non-blank lines' do input = <<~'EOS' |=== |paragraph {blank} still one paragraph {blank} still one paragraph |=== EOS result = convert_string_to_embedded input assert_css 'p.tableblock', result, 1 end test 'should strip trailing newlines when splitting paragraphs' do input = <<~'EOS' |=== |first wrapped paragraph second paragraph third paragraph |=== EOS result = convert_string_to_embedded input assert_xpath %((//p[@class="tableblock"])[1][text()="first wrapped\nparagraph"]), result, 1 assert_xpath %((//p[@class="tableblock"])[2][text()="second paragraph"]), result, 1 assert_xpath %((//p[@class="tableblock"])[3][text()="third paragraph"]), result, 1 end test 'basic AsciiDoc cell' do input = <<~'EOS' |=== a|-- NOTE: content content -- |=== EOS result = convert_string_to_embedded input assert_css 'table.tableblock', result, 1 assert_css 'table.tableblock td.tableblock', result, 1 assert_css 'table.tableblock td.tableblock .openblock', result, 1 assert_css 'table.tableblock td.tableblock .openblock .admonitionblock', result, 1 assert_css 'table.tableblock td.tableblock .openblock .paragraph', result, 1 end test 'AsciiDoc table cell should be wrapped in div with class "content"' do input = <<~'EOS' |=== a|AsciiDoc table cell |=== EOS result = convert_string_to_embedded input assert_css 'table.tableblock td.tableblock > div.content', result, 1 assert_css 'table.tableblock td.tableblock > div.content > div.paragraph', result, 1 end test 'doctype can be set in AsciiDoc table cell' do input = <<~'EOS' |=== a| :doctype: inline content |=== EOS result = convert_string_to_embedded input assert_css 'table.tableblock', result, 1 assert_css 'table.tableblock .paragraph', result, 0 end test 'should reset doctype to default in AsciiDoc table cell' do input = <<~'EOS' = Book Title :doctype: book == Chapter 1 |=== a| = AsciiDoc Table Cell doctype={doctype} {backend-html5-doctype-article} {backend-html5-doctype-book} |=== EOS result = convert_string_to_embedded input, attributes: { 'attribute-missing' => 'skip' } assert_includes result, 'doctype=article' refute_includes result, '{backend-html5-doctype-article}' assert_includes result, '{backend-html5-doctype-book}' end test 'should update doctype-related attributes in AsciiDoc table cell when doctype is set' do input = <<~'EOS' = Document Title :doctype: article == Chapter 1 |=== a| = AsciiDoc Table Cell :doctype: book doctype={doctype} {backend-html5-doctype-book} {backend-html5-doctype-article} |=== EOS result = convert_string_to_embedded input, attributes: { 'attribute-missing' => 'skip' } assert_includes result, 'doctype=book' refute_includes result, '{backend-html5-doctype-book}' assert_includes result, '{backend-html5-doctype-article}' end test 'should not allow AsciiDoc table cell to set a document attribute that was hard set by the API' do input = <<~'EOS' |=== a| :icons: NOTE: This admonition does not have a font-based icon. |=== EOS result = convert_string_to_embedded input, safe: :safe, attributes: { 'icons' => 'font' } assert_css 'td.icon .title', result, 0 assert_css 'td.icon i.icon-note', result, 1 end test 'should not allow AsciiDoc table cell to set a document attribute that was hard unset by the API' do input = <<~'EOS' |=== a| :icons: font NOTE: This admonition does not have a font-based icon. |=== EOS result = convert_string_to_embedded input, safe: :safe, attributes: { 'icons' => nil } assert_css 'td.icon .title', result, 1 assert_css 'td.icon i.icon-note', result, 0 assert_xpath '//td[@class="icon"]/*[@class="title"][text()="Note"]', result, 1 end test 'should keep attribute unset in AsciiDoc table cell if unset in parent document' do input = <<~'EOS' :!sectids: :!table-caption: == Outer Heading .Outer Table |=== a| == Inner Heading .Inner Table !=== ! table cell !=== |=== EOS result = convert_string_to_embedded input assert_xpath 'h2[id]', result, 0 assert_xpath '//caption[text()="Outer Table"]', result, 1 assert_xpath '//caption[text()="Inner Table"]', result, 1 end test 'should allow attribute unset in parent document to be set in AsciiDoc table cell' do input = <<~'EOS' :!sectids: == No ID |=== a| == No ID :sectids: == Has ID |=== EOS result = convert_string_to_embedded input headings = xmlnodes_at_css 'h2', result assert_equal 3, headings.size assert_nil headings[0].attr :id assert_nil headings[1].attr :id assert_equal '_has_id', (headings[2].attr :id) end test 'should not allow locked attribute unset in parent document to be set in AsciiDoc table cell' do input = <<~'EOS' == No ID |=== a| == No ID :sectids: == Has ID |=== EOS result = convert_string_to_embedded input, attributes: { 'sectids' => nil } headings = xmlnodes_at_css 'h2', result assert_equal 3, headings.size headings.each {|heading| assert_nil heading.attr :id } end test 'showtitle can be enabled in AsciiDoc table cell if unset in parent document' do %w(showtitle notitle).each do |name| input = <<~EOS = Document Title :#{name == 'showtitle' ? '!' : ''}#{name}: |=== a| = Nested Document Title :#{name == 'showtitle' ? '' : '!'}#{name}: content |=== EOS result = convert_string_to_embedded input assert_css 'h1', result, 1 assert_css '.tableblock h1', result, 1 end end test 'showtitle can be enabled in AsciiDoc table cell if unset by API' do %w(showtitle notitle).each do |name| input = <<~EOS = Document Title |=== a| = Nested Document Title :#{name == 'showtitle' ? '' : '!'}#{name}: content |=== EOS result = convert_string_to_embedded input, attributes: { name => (name == 'showtitle' ? nil : '') } assert_css 'h1', result, 1 assert_css '.tableblock h1', result, 1 end end test 'showtitle can be disabled in AsciiDoc table cell if set in parent document' do %w(showtitle notitle).each do |name| input = <<~EOS = Document Title :#{name == 'showtitle' ? '' : '!'}#{name}: |=== a| = Nested Document Title :#{name == 'showtitle' ? '!' : ''}#{name}: content |=== EOS result = convert_string_to_embedded input assert_css 'h1', result, 1 assert_css '.tableblock h1', result, 0 end end test 'showtitle can be disabled in AsciiDoc table cell if set by API' do %w(showtitle notitle).each do |name| input = <<~EOS = Document Title |=== a| = Nested Document Title :#{name == 'showtitle' ? '!' : ''}#{name}: content |=== EOS result = convert_string_to_embedded input, attributes: { name => (name == 'showtitle' ? '' : nil) } assert_css 'h1', result, 1 assert_css '.tableblock h1', result, 0 end end test 'AsciiDoc content' do input = <<~'EOS' [cols="1e,1,5a"] |=== |Name |Backends |Description |badges |xhtml11, html5 | Link badges ('XHTML 1.1' and 'CSS') in document footers. [NOTE] ==== The path names of images, icons and scripts are relative path names to the output document not the source document. ==== |[[X97]] docinfo, docinfo1, docinfo2 |All backends | These three attributes control which document information files will be included in the the header of the output file: docinfo:: Include `-docinfo.` docinfo1:: Include `docinfo.` docinfo2:: Include `docinfo.` and `-docinfo.` Where `` is the file name (sans extension) of the AsciiDoc input file and `` is `.html` for HTML outputs or `.xml` for DocBook outputs. If the input file is the standard input then the output file name is used. |=== EOS doc = document_from_string input, sourcemap: true table = doc.blocks.first refute_nil table tbody = table.rows.body assert_equal 2, tbody.size body_cell_1_2 = tbody[0][1] assert_equal 5, body_cell_1_2.lineno body_cell_1_3 = tbody[0][2] refute_nil body_cell_1_3.inner_document assert body_cell_1_3.inner_document.nested? assert_equal doc, body_cell_1_3.inner_document.parent_document assert_equal doc.converter, body_cell_1_3.inner_document.converter assert_equal 5, body_cell_1_3.lineno assert_equal 6, body_cell_1_3.inner_document.lineno note = (body_cell_1_3.inner_document.find_by context: :admonition)[0] assert_equal 9, note.lineno output = doc.convert standalone: false # NOTE JRuby matches the table inside the admonition block if the class is not specified on the table assert_css 'table.tableblock > tbody > tr', output, 2 assert_css 'table.tableblock > tbody > tr:nth-child(1) > td:nth-child(3) div.admonitionblock', output, 1 assert_css 'table.tableblock > tbody > tr:nth-child(2) > td:nth-child(3) div.dlist', output, 1 end test 'should preserve leading indentation in contents of AsciiDoc table cell if contents starts with newline' do # NOTE cannot use single-quoted heredoc because of https://github.com/jruby/jruby/issues/4260 input = <<~EOS |=== a| $ command a| paragraph |=== EOS doc = document_from_string input, sourcemap: true table = doc.blocks[0] tbody = table.rows.body assert_equal 1, table.lineno assert_equal 2, tbody[0][0].lineno assert_equal 3, tbody[0][0].inner_document.lineno assert_equal 4, tbody[1][0].lineno output = doc.convert standalone: false assert_css 'td', output, 2 assert_xpath '(//td)[1]//*[@class="literalblock"]', output, 1 assert_xpath '(//td)[2]//*[@class="paragraph"]', output, 1 assert_xpath '(//pre)[1][text()="$ command"]', output, 1 assert_xpath '(//p)[1][text()="paragraph"]', output, 1 end test 'preprocessor directive on first line of an AsciiDoc table cell should be processed' do input = <<~'EOS' |=== a|include::fixtures/include-file.adoc[] |=== EOS output = convert_string_to_embedded input, safe: :safe, base_dir: testdir assert_match(/included content/, output) end test 'cross reference link in an AsciiDoc table cell should resolve to reference in main document' do input = <<~'EOS' == Some |=== a|See <<_more>> |=== == More content EOS result = convert_string input assert_xpath '//a[@href="#_more"]', result, 1 assert_xpath '//a[@href="#_more"][text()="More"]', result, 1 end test 'should discover anchor at start of cell and register it as a reference' do input = <<~'EOS' The highest peak in the Front Range is <>, which tops <> by just a few feet. [cols="1s,1"] |=== |[[mount-evans,Mount Evans]]Mount Evans |14,271 feet h|[[grays-peak,Grays Peak]] Grays Peak |14,278 feet |=== EOS doc = document_from_string input refs = doc.catalog[:refs] assert refs.key?('mount-evans') assert refs.key?('grays-peak') output = doc.convert standalone: false assert_xpath '(//p)[1]/a[@href="#grays-peak"][text()="Grays Peak"]', output, 1 assert_xpath '(//p)[1]/a[@href="#mount-evans"][text()="Mount Evans"]', output, 1 assert_xpath '(//table/tbody/tr)[1]//td//a[@id="mount-evans"]', output, 1 assert_xpath '(//table/tbody/tr)[2]//th//a[@id="grays-peak"]', output, 1 end test 'should catalog anchor at start of cell in implicit header row when column has a style' do input = <<~'EOS' [cols=1a] |=== |[[foo,Foo]]* not AsciiDoc | AsciiDoc |=== EOS doc = document_from_string input refs = doc.catalog[:refs] assert refs.key?('foo') end test 'should catalog anchor at start of cell in explicit header row when column has a style' do input = <<~'EOS' [%header,cols=1a] |=== |[[foo,Foo]]* not AsciiDoc | AsciiDoc |=== EOS doc = document_from_string input refs = doc.catalog[:refs] assert refs.key?('foo') end test 'should catalog anchor at start of cell in first row' do input = <<~'EOS' |=== |[[foo,Foo]]foo | bar |=== EOS doc = document_from_string input refs = doc.catalog[:refs] assert refs.key?('foo') end test 'footnotes should not be shared between an AsciiDoc table cell and the main document' do input = <<~'EOS' |=== a|AsciiDoc footnote:[A lightweight markup language.] |=== EOS result = convert_string input assert_css '#_footnotedef_1', result, 1 end test 'callout numbers should be globally unique, including AsciiDoc table cells' do input = <<~'EOS' = Document Title == Section 1 |=== a| [source, yaml] ---- key: value <1> ---- <1> First callout |=== == Section 2 |=== a| [source, yaml] ---- key: value <1> ---- <1> Second callout |=== == Section 3 [source, yaml] ---- key: value <1> ---- <1> Third callout EOS result = convert_string_to_embedded input, backend: 'docbook' conums = xmlnodes_at_xpath '//co', result assert_equal 3, conums.size ['CO1-1', 'CO2-1', 'CO3-1'].each_with_index do |conum, idx| assert_equal conum, conums[idx].attribute('xml:id').value end callouts = xmlnodes_at_xpath '//callout', result assert_equal 3, callouts.size ['CO1-1', 'CO2-1', 'CO3-1'].each_with_index do |callout, idx| assert_equal callout, callouts[idx].attribute('arearefs').value end end test 'compat mode can be activated in AsciiDoc table cell' do input = <<~'EOS' |=== a| :compat-mode: The word 'italic' is emphasized. |=== EOS result = convert_string_to_embedded input assert_xpath '//em[text()="italic"]', result, 1 end test 'compat mode in AsciiDoc table cell inherits from parent document' do input = <<~'EOS' :compat-mode: The word 'italic' is emphasized. [cols=1*] |=== |The word 'oblique' is emphasized. a| The word 'slanted' is emphasized. |=== The word 'askew' is emphasized. EOS result = convert_string_to_embedded input assert_xpath '//em[text()="italic"]', result, 1 assert_xpath '//em[text()="oblique"]', result, 1 assert_xpath '//em[text()="slanted"]', result, 1 assert_xpath '//em[text()="askew"]', result, 1 end test 'compat mode in AsciiDoc table cell can be unset if set in parent document' do input = <<~'EOS' :compat-mode: The word 'italic' is emphasized. [cols=1*] |=== |The word 'oblique' is emphasized. a| :!compat-mode: The word 'slanted' is not emphasized. |=== The word 'askew' is emphasized. EOS result = convert_string_to_embedded input assert_xpath '//em[text()="italic"]', result, 1 assert_xpath '//em[text()="oblique"]', result, 1 assert_xpath '//em[text()="slanted"]', result, 0 assert_xpath '//em[text()="askew"]', result, 1 end test 'nested table' do input = <<~'EOS' [cols="1,2a"] |=== |Normal cell |Cell with nested table [cols="2,1"] !=== !Nested table cell 1 !Nested table cell 2 !=== |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 2 assert_css 'table table', output, 1 assert_css 'table > tbody > tr > td:nth-child(2) table', output, 1 assert_css 'table > tbody > tr > td:nth-child(2) table > tbody > tr > td', output, 2 end test 'can set format of nested table to psv' do input = <<~'EOS' [cols="2*"] |=== |normal cell a| [format=psv] !=== !nested cell !=== |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 2 assert_css 'table table', output, 1 assert_css 'table > tbody > tr > td:nth-child(2) table', output, 1 assert_css 'table > tbody > tr > td:nth-child(2) table > tbody > tr > td', output, 1 end test 'AsciiDoc table cell should inherit to_dir option from parent document' do doc = document_from_string <<~'EOS', parse: true, to_dir: testdir |=== a| AsciiDoc table cell |=== EOS nested_doc = (doc.blocks[0].find_by context: :document, traverse_documents: true)[0] assert nested_doc.nested? assert_equal doc.options[:to_dir], nested_doc.options[:to_dir] end test 'AsciiDoc table cell should not inherit toc setting from parent document' do input = <<~'EOS' = Document Title :toc: == Section |=== a| == Section in Nested Document content |=== EOS output = convert_string input assert_css '.toc', output, 1 assert_css 'table .toc', output, 0 end test 'should be able to enable toc in an AsciiDoc table cell' do input = <<~'EOS' = Document Title == Section A |=== a| = Subdocument Title :toc: == Subdocument Section A content |=== EOS output = convert_string input assert_css '.toc', output, 1 assert_css 'table .toc', output, 1 end test 'should be able to enable toc in an AsciiDoc table cell even if hard unset by API' do input = <<~'EOS' = Document Title == Section A |=== a| = Subdocument Title :toc: == Subdocument Section A content |=== EOS output = convert_string input, attributes: { 'toc' => nil } assert_css '.toc', output, 1 assert_css 'table .toc', output, 1 end test 'should be able to enable toc in both outer document and in an AsciiDoc table cell' do input = <<~'EOS' = Document Title :toc: == Section A |=== a| = Subdocument Title :toc: macro [#table-cell-toc] toc::[] == Subdocument Section A content |=== EOS output = convert_string input assert_css '.toc', output, 2 assert_css '#toc', output, 1 assert_css 'table .toc', output, 1 assert_css 'table #table-cell-toc', output, 1 end test 'document in an AsciiDoc table cell should not see doctitle of parent' do input = <<~'EOS' = Document Title [cols="1a"] |=== |AsciiDoc content |=== EOS output = convert_string input assert_css 'table', output, 1 assert_css 'table > tbody > tr > td', output, 1 assert_css 'table > tbody > tr > td #preamble', output, 0 assert_css 'table > tbody > tr > td .paragraph', output, 1 end test 'cell background color' do input = <<~'EOS' [cols="1e,1", options="header"] |=== |{set:cellbgcolor:green}green |{set:cellbgcolor!} plain |{set:cellbgcolor:red}red |{set:cellbgcolor!} plain |=== EOS output = convert_string_to_embedded input assert_xpath '(/table/thead/tr/th)[1][@style="background-color: green;"]', output, 1 assert_xpath '(/table/thead/tr/th)[2][@style="background-color: green;"]', output, 0 assert_xpath '(/table/tbody/tr/td)[1][@style="background-color: red;"]', output, 1 assert_xpath '(/table/tbody/tr/td)[2][@style="background-color: green;"]', output, 0 end test 'should warn if table block is not terminated' do input = <<~'EOS' outside |=== | inside still inside eof EOS using_memory_logger do |logger| output = convert_string_to_embedded input assert_xpath '/table', output, 1 assert_message logger, :WARN, ': line 3: unterminated table block', Hash end end test 'should show correct line number in warning about unterminated block inside AsciiDoc table cell' do input = <<~'EOS' outside * list item + |=== |cell a|inside ==== unterminated example block |=== eof EOS using_memory_logger do |logger| output = convert_string_to_embedded input assert_xpath '//ul//table', output, 1 assert_message logger, :WARN, ': line 9: unterminated example block', Hash end end test 'custom separator for an AsciiDoc table cell' do input = <<~'EOS' [cols=2,separator=!] |=== !Pipe output to vim a! ---- asciidoctor -o - -s test.adoc | view - ---- |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 1 assert_css 'table > tbody > tr:nth-child(1) > td', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(1) p', output, 1 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(2) .listingblock', output, 1 end test 'table with breakable option docbook 5' do input = <<~'EOS' .Table with breakable [%breakable] |=== |Item |Quantity |Item 1 |1 |=== EOS output = convert_string_to_embedded input, backend: 'docbook5' assert_includes output, '' end test 'table with unbreakable option docbook 5' do input = <<~'EOS' .Table with unbreakable [%unbreakable] |=== |Item |Quantity |Item 1 |1 |=== EOS output = convert_string_to_embedded input, backend: 'docbook5' assert_includes output, '' end test 'no implicit header row if cell in first line is quoted and spans multiple lines' do input = <<~'EOS' [cols=2*l] ,=== "A1 A1 continued",B1 A2,B2 ,=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 0 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 2 assert_xpath %((//td)[1]//pre[text()="A1\n\nA1 continued"]), output, 1 end end context 'DSV' do test 'converts simple dsv table' do input = <<~'EOS' [width="75%",format="dsv"] |=== root:x:0:0:root:/root:/bin/bash bin:x:1:1:bin:/bin:/sbin/nologin mysql:x:27:27:MySQL\:Server:/var/lib/mysql:/bin/bash gdm:x:42:42::/var/lib/gdm:/sbin/nologin sshd:x:74:74:Privilege-separated SSH:/var/empty/sshd:/sbin/nologin nobody:x:99:99:Nobody:/:/sbin/nologin |=== EOS doc = document_from_string input, standalone: false table = doc.blocks[0] assert_equal 100, table.columns.map {|col| col.attributes['colpcwidth'] }.reduce(:+) output = doc.convert assert_css 'table', output, 1 assert_css 'table > colgroup > col[style*="width: 14.2857%"]', output, 6 assert_css 'table > colgroup > col:last-of-type[style*="width: 14.2858%"]', output, 1 assert_css 'table > tbody > tr', output, 6 assert_xpath '//tr[4]/td[5]/p/text()', output, 0 assert_xpath '//tr[3]/td[5]/p[text()="MySQL:Server"]', output, 1 end test 'dsv format shorthand' do input = <<~'EOS' :=== a:b:c 1:2:3 :=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td', output, 3 assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 end test 'single cell in DSV table should only produce single row' do input = <<~'EOS' :=== single cell :=== EOS output = convert_string_to_embedded input assert_css 'table td', output, 1 end test 'should treat trailing colon as an empty cell' do input = <<~'EOS' :=== A1: B1:B2 C1:C2 :=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 3 assert_xpath '/table/tbody/tr[1]/td', output, 2 assert_xpath '/table/tbody/tr[1]/td[1]/p[text()="A1"]', output, 1 assert_xpath '/table/tbody/tr[1]/td[2]/p', output, 0 assert_xpath '/table/tbody/tr[2]/td[1]/p[text()="B1"]', output, 1 end end context 'CSV' do test 'should treat trailing comma as an empty cell' do input = <<~'EOS' ,=== A1, B1,B2 C1,C2 ,=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 3 assert_xpath '/table/tbody/tr[1]/td', output, 2 assert_xpath '/table/tbody/tr[1]/td[1]/p[text()="A1"]', output, 1 assert_xpath '/table/tbody/tr[1]/td[2]/p', output, 0 assert_xpath '/table/tbody/tr[2]/td[1]/p[text()="B1"]', output, 1 end test 'should log error but not crash if cell data has unclosed quote' do input = <<~'EOS' ,=== a,b c," ,=== EOS using_memory_logger do |logger| output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table td', output, 4 assert_xpath '(/table/td)[4]/p', output, 0 assert_message logger, :ERROR, ': line 3: unclosed quote in CSV data; setting cell to empty', Hash end end test 'should preserve newlines in quoted CSV values' do input = <<~'EOS' [cols="1,1,1l"] ,=== "A B C","one two three","do re me" ,=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 1 assert_xpath '/table/tbody/tr[1]/td', output, 3 assert_xpath %(/table/tbody/tr[1]/td[1]/p[text()="A\nB\nC"]), output, 1 assert_xpath '/table/tbody/tr[1]/td[2]/p', output, 3 assert_xpath '/table/tbody/tr[1]/td[2]/p[1][text()="one"]', output, 1 assert_xpath '/table/tbody/tr[1]/td[2]/p[2][text()="two"]', output, 1 assert_xpath '/table/tbody/tr[1]/td[2]/p[3][text()="three"]', output, 1 assert_xpath %(/table/tbody/tr[1]/td[3]//pre[text()="do\n\nre\n\nme"]), output, 1 end test 'should not drop trailing empty cell in TSV data when loaded from an include file' do input = <<~'EOS' [%header,format=tsv] |=== include::fixtures/data.tsv[] |=== EOS output = convert_string_to_embedded input, safe: :safe, base_dir: ASCIIDOCTOR_TEST_DIR assert_css 'table > tbody > tr', output, 3 assert_css 'table > tbody > tr:nth-child(1) > td', output, 3 assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 assert_css 'table > tbody > tr:nth-child(3) > td', output, 3 assert_css 'table > tbody > tr:nth-child(2) > td:nth-child(3):empty', output, 1 end test 'mixed unquoted records and quoted records with escaped quotes, commas, and wrapped lines' do input = <<~'EOS' [format="csv",options="header"] |=== Year,Make,Model,Description,Price 1997,Ford,E350,"ac, abs, moon",3000.00 1999,Chevy,"Venture ""Extended Edition""","",4900.00 1999,Chevy,"Venture ""Extended Edition, Very Large""",,5000.00 1996,Jeep,Grand Cherokee,"MUST SELL! air, moon roof, loaded",4799.00 2000,Toyota,Tundra,"""This one's gonna to blow you're socks off,"" per the sticker",10000.00 2000,Toyota,Tundra,"Check it, ""this one's gonna to blow you're socks off"", per the sticker",10000.00 |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col[style*="width: 20%"]', output, 5 assert_css 'table > thead > tr', output, 1 assert_css 'table > tbody > tr', output, 6 assert_xpath '((//tbody/tr)[1]/td)[4]/p[text()="ac, abs, moon"]', output, 1 assert_xpath %(((//tbody/tr)[2]/td)[3]/p[text()='Venture "Extended Edition"']), output, 1 assert_xpath %(((//tbody/tr)[4]/td)[4]/p[text()="MUST SELL!\nair, moon roof, loaded"]), output, 1 assert_xpath %(((//tbody/tr)[5]/td)[4]/p[text()='"This one#{decode_char 8217}s gonna to blow you#{decode_char 8217}re socks off," per the sticker']), output, 1 assert_xpath %(((//tbody/tr)[6]/td)[4]/p[text()='Check it, "this one#{decode_char 8217}s gonna to blow you#{decode_char 8217}re socks off", per the sticker']), output, 1 end test 'should allow quotes around a CSV value to be on their own lines' do input = <<~'EOS' [cols=2*] ,=== " A "," B " ,=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 1 assert_xpath '/table/tbody/tr[1]/td', output, 2 assert_xpath '/table/tbody/tr[1]/td[1]/p[text()="A"]', output, 1 assert_xpath '/table/tbody/tr[1]/td[2]/p[text()="B"]', output, 1 end test 'csv format shorthand' do input = <<~'EOS' ,=== a,b,c 1,2,3 ,=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td', output, 3 assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 end test 'tsv as format' do input = <<~EOS [format=tsv] ,=== a\tb\tc 1\t2\t3 ,=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td', output, 3 assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 end test 'custom csv separator' do input = <<~'EOS' [format=csv,separator=;] |=== a;b;c 1;2;3 |=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td', output, 3 assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 end test 'tab as separator' do input = <<~EOS [separator=\\t] ,=== a\tb\tc 1\t2\t3 ,=== EOS output = convert_string_to_embedded input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td', output, 3 assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 end test 'single cell in CSV table should only produce single row' do input = <<~'EOS' ,=== single cell ,=== EOS output = convert_string_to_embedded input assert_css 'table td', output, 1 end test 'cell formatted with AsciiDoc style' do input = <<~'EOS' [cols="1,1,1a",separator=;] ,=== element;description;example thematic break,a visible break; also known as a horizontal rule;--- ,=== EOS output = convert_string_to_embedded input assert_css 'table tbody hr', output, 1 end test 'should strip whitespace around contents of AsciiDoc cell' do input = <<~'EOS' [cols="1,1,1a",separator=;] ,=== element;description;example paragraph;contiguous lines of words and phrases;" one sentence, one line " ,=== EOS output = convert_string_to_embedded input assert_xpath '/table/tbody//*[@class="paragraph"]/p[text()="one sentence, one line"]', output, 1 end end end asciidoctor-2.0.20/test/test_helper.rb000066400000000000000000000352371443135032600177330ustar00rootroot00000000000000# frozen_string_literal: true ASCIIDOCTOR_TEST_DIR = File.absolute_path __dir__ ASCIIDOCTOR_LIB_DIR = ENV['ASCIIDOCTOR_LIB_DIR'] || (File.join ASCIIDOCTOR_TEST_DIR, '../lib') require 'simplecov' if ENV['COVERAGE'] == 'true' require File.join ASCIIDOCTOR_LIB_DIR, 'asciidoctor' Dir.chdir Asciidoctor::ROOT_DIR require 'nokogiri' # NOTE rouge has all sorts of warnings we don't want to see, so silence them proc do old_verbose, $VERBOSE = $VERBOSE, nil require 'rouge' $VERBOSE = old_verbose end.call require 'socket' require 'tempfile' require 'tmpdir' autoload :FileUtils, 'fileutils' autoload :Pathname, 'pathname' RE_XMLNS_ATTRIBUTE = / xmlns="[^"]+"/ RE_DOCTYPE = /\s* "<" # # Returns the decoded String that corresponds to the numeric character reference def decode_char number [number].pack 'U1' end def haml_template_class (defined? Haml::Template) ? Haml::Template : Tilt::HamlTemplate end def invoke_cli_with_filenames argv = [], filenames = [], &block filepaths = [] filenames.each do |filename| if filenames.nil? || (Pathname.new filename).absolute? filepaths << filename else filepaths << (fixture_path filename) end end invoker = Asciidoctor::Cli::Invoker.new argv + filepaths invoker.invoke!(&block) invoker end def invoke_cli_to_buffer argv = [], filename = 'sample.adoc', &block invoke_cli argv, filename, [StringIO.new, StringIO.new], &block end def invoke_cli argv = [], filename = 'sample.adoc', buffers = nil, &block if filename.nil? || filename == '-' || (Pathname.new filename).absolute? filepath = filename else filepath = fixture_path filename end invoker = Asciidoctor::Cli::Invoker.new argv + [filepath] invoker.redirect_streams(*buffers) if buffers invoker.invoke!(&block) invoker end def redirect_streams old_stdout, $stdout = $stdout, StringIO.new old_stderr, $stderr = $stderr, StringIO.new old_logger = Asciidoctor::LoggerManager.logger old_logger_level = old_logger.level new_logger = (Asciidoctor::LoggerManager.logger = Asciidoctor::Logger.new $stderr) new_logger.level = old_logger_level yield $stdout, $stderr ensure $stdout, $stderr = old_stdout, old_stderr Asciidoctor::LoggerManager.logger = old_logger end def resolve_localhost Socket.ip_address_list.find(&:ipv4?).ip_address end def using_memory_logger level = nil old_logger = Asciidoctor::LoggerManager.logger memory_logger = Asciidoctor::MemoryLogger.new memory_logger.level = level if level begin Asciidoctor::LoggerManager.logger = memory_logger yield memory_logger ensure Asciidoctor::LoggerManager.logger = old_logger end end def in_verbose_mode begin old_logger_level, Asciidoctor::LoggerManager.logger.level = Asciidoctor::LoggerManager.logger.level, Logger::Severity::DEBUG yield ensure Asciidoctor::LoggerManager.logger.level = old_logger_level end end def asciidoctor_cmd ruby_args = nil [Gem.ruby, *ruby_args, (File.join bindir, 'asciidoctor')] end # NOTE run_command fails on JRuby 9.1 for Windows with the following error: # Java::JavaLang::ClassCastException at org.jruby.util.ShellLauncher.getModifiedEnv(ShellLauncher.java:271) def run_command cmd, *args, &block if Array === cmd args.unshift(*cmd) cmd = args.shift end kw_args = Hash === args[-1] ? args.pop : {} env = kw_args[:env] (env ||= {})['RUBYOPT'] = nil unless kw_args[:use_bundler] # JRuby 9.1 on Windows doesn't support popen options; therefore, test cannot capture / assert on stderr opts = jruby_9_1_windows? ? {} : { err: [:child, :out] } if env # NOTE while JRuby 9.2.10.0 implements support for unsetenv_others, it doesn't work in child #if jruby? && (Gem::Version.new JRUBY_VERSION) < (Gem::Version.new '9.2.10.0') if jruby? begin env = (old_env = ENV.to_h).merge env env.each {|key, val| env.delete key if val.nil? } if env.value? nil ENV.replace env popen [cmd, *args, opts], &block ensure ENV.replace old_env end elsif env.value? nil env = env.each_with_object ENV.to_h do |(key, val), accum| val.nil? ? (accum.delete key) : (accum[key] = val) end popen [env, cmd, *args, (opts.merge unsetenv_others: true)], &block else popen [env, cmd, *args, opts], &block end else popen [cmd, *args, opts], &block end end def popen args, &block # When block is passed to IO.popen, JRuby for Windows does not return value of block as return value if jruby? && windows? result = nil IO.popen args do |io| result = yield io end result else IO.popen args, &block end end def using_test_webserver host = resolve_localhost, port = 9876 base_dir = testdir server = TCPServer.new host, port server_thread = Thread.start do Thread.current[:requests] = requests = [] while (session = server.accept) requests << (request = session.gets) if /^GET (\S+) HTTP\/1\.1$/ =~ request.chomp resource = (resource = $1) == '' ? '.' : resource else session.print %(HTTP/1.1 405 Method Not Allowed\r\nContent-Type: text/plain\r\n\r\n) session.print %(405 - Method not allowed\n) session.close next end if resource == '/name/asciidoctor' session.print %(HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n\r\n) session.print %({"name": "asciidoctor"}\n) elsif File.file?(resource_file = (File.join base_dir, resource)) mimetype = if (ext = File.extname(resource_file)[1..-1]) ext == 'adoc' ? 'text/plain' : %(image/#{ext}) else 'text/plain' end session.print %(HTTP/1.1 200 OK\r\nContent-Type: #{mimetype}\r\n\r\n) File.open resource_file, Asciidoctor::FILE_READ_MODE do |fd| session.write fd.read 256 until fd.eof? end else session.print %(HTTP/1.1 404 File Not Found\r\nContent-Type: text/plain\r\n\r\n) session.print %(404 - Resource not found.\n) end session.close end end begin yield %(http://#{host}:#{port}), server_thread ensure server_thread.exit server_thread.value server.close end end end ### # # Context goodness provided by @citrusbyte's contest. # See https://github.com/citrusbyte/contest # ### # Contest adds +teardown+, +test+ and +context+ as class methods, and the # instance methods +setup+ and +teardown+ now iterate on the corresponding # blocks. Note that all setup and teardown blocks must be defined with the # block syntax. Adding setup or teardown instance methods defeats the purpose # of this library. class Minitest::Test class << self def setup &block define_method :setup do super(&block) instance_eval(&block) end end def teardown &block define_method :teardown do instance_eval(&block) super(&block) end end def context name, opts = {}, &block if opts.key? :if return unless opts[:if] elsif opts.key? :unless return if opts[:unless] end subclass = Class.new self remove_tests subclass subclass.class_eval(&block) if block_given? const_set (context_name name), subclass end def test name, opts = {}, &block if opts.key? :if return unless opts[:if] elsif opts.key? :unless return if opts[:unless] end define_method (test_name name), &block end def remove_tests subclass subclass.public_instance_methods.each do |m| subclass.send :undef_method, m if m.to_s.start_with? 'test_' end end alias should test alias describe context private def context_name name %(Test#{(sanitize_name name).gsub(/(^| )(\w)/) { $2.upcase }}).to_sym end def test_name name %(test_#{(sanitize_name name).gsub %r/\s+/, '_'}).to_sym end def sanitize_name name (name.gsub %r/\W+/, ' ').strip end end end def context name, &block Minitest::Test.context name, &block end asciidoctor-2.0.20/test/text_test.rb000066400000000000000000000250471443135032600174360ustar00rootroot00000000000000# frozen_string_literal: true require_relative 'test_helper' context "Text" do test "proper encoding to handle utf8 characters in document using html backend" do output = example_document(:encoding).convert assert_xpath '//p', output, 4 assert_xpath '//a', output, 1 end test "proper encoding to handle utf8 characters in embedded document using html backend" do output = example_document(:encoding, standalone: false).convert assert_xpath '//p', output, 4 assert_xpath '//a', output, 1 end test 'proper encoding to handle utf8 characters in document using docbook backend' do output = example_document(:encoding, attributes: { 'backend' => 'docbook', 'xmlns' => '' }).convert assert_xpath '//xmlns:simpara', output, 4 assert_xpath '//xmlns:link', output, 1 end test 'proper encoding to handle utf8 characters in embedded document using docbook backend' do output = example_document(:encoding, standalone: false, attributes: { 'backend' => 'docbook' }).convert assert_xpath '//simpara', output, 4 assert_xpath '//link', output, 1 end # NOTE this test ensures we have the encoding line on block templates too test 'proper encoding to handle utf8 characters in arbitrary block' do input = [] input << "[verse]\n" input += (File.readlines (sample_doc_path :encoding), mode: Asciidoctor::FILE_READ_MODE) doc = empty_document reader = Asciidoctor::PreprocessorReader.new doc, input, nil, normalize: true block = Asciidoctor::Parser.next_block(reader, doc) assert_xpath '//pre', block.convert.gsub(/^\s*\n/, ''), 1 end test 'proper encoding to handle utf8 characters from included file' do input = 'include::fixtures/encoding.adoc[tags=romé]' doc = empty_safe_document base_dir: testdir reader = Asciidoctor::PreprocessorReader.new doc, input, nil, normalize: true block = Asciidoctor::Parser.next_block(reader, doc) output = block.convert assert_css '.paragraph', output, 1 end test 'escaped text markup' do assert_match(/All your <em>inline<\/em> markup belongs to <strong>us<\/strong>!/, convert_string('All your inline markup belongs to us!')) end test "line breaks" do assert_xpath "//br", convert_string("Well this is +\njust fine and dandy, isn't it?"), 1 end test 'single- and double-quoted text' do output = convert_string_to_embedded(%q(``Where?,'' she said, flipping through her copy of `The New Yorker.'), attributes: { 'compat-mode' => '' }) assert_match(/“Where\?,”/, output) assert_match(/‘The New Yorker.’/, output) output = convert_string_to_embedded(%q("`Where?,`" she said, flipping through her copy of '`The New Yorker.`')) assert_match(/“Where\?,”/, output) assert_match(/‘The New Yorker.’/, output) end test 'multiple double-quoted text on a single line' do assert_equal '“Our business is constantly changing” or “We need faster time to market.”', convert_inline_string(%q(``Our business is constantly changing'' or ``We need faster time to market.''), attributes: { 'compat-mode' => '' }) assert_equal '“Our business is constantly changing” or “We need faster time to market.”', convert_inline_string(%q("`Our business is constantly changing`" or "`We need faster time to market.`")) end test 'horizontal rule' do input = <<~'EOS' This line is separated by a horizontal rule... ''' ...from this line. EOS output = convert_string_to_embedded input assert_xpath "//hr", output, 1 assert_xpath "/*[@class='paragraph']", output, 2 assert_xpath "(/*[@class='paragraph'])[1]/following-sibling::hr", output, 1 assert_xpath "/hr/following-sibling::*[@class='paragraph']", output, 1 end test 'markdown horizontal rules' do variants = [ '---', '- - -', '***', '* * *', '___', '_ _ _' ] offsets = [ '', ' ', ' ', ' ' ] variants.each do |variant| offsets.each do |offset| input = <<~EOS This line is separated by a horizontal rule... #{offset}#{variant} ...from this line. EOS output = convert_string_to_embedded input assert_xpath "//hr", output, 1 assert_xpath "/*[@class='paragraph']", output, 2 assert_xpath "(/*[@class='paragraph'])[1]/following-sibling::hr", output, 1 assert_xpath "/hr/following-sibling::*[@class='paragraph']", output, 1 end end end test 'markdown horizontal rules negative case' do bad_variants = [ '- - - -', '* * * *', '_ _ _ _' ] good_offsets = [ '', ' ', ' ', ' ' ] bad_variants.each do |variant| good_offsets.each do |offset| input = <<~EOS This line is separated by something that is not a horizontal rule... #{offset}#{variant} ...from this line. EOS output = convert_string_to_embedded input assert_xpath '//hr', output, 0 end end good_variants = [ '- - -', '* * *', '_ _ _' ] bad_offsets = [ "\t", ' ' ] good_variants.each do |variant| bad_offsets.each do |offset| input = <<~EOS This line is separated by something that is not a horizontal rule... #{offset}#{variant} ...from this line. EOS output = convert_string_to_embedded input assert_xpath '//hr', output, 0 end end end test "emphasized text using underscore characters" do assert_xpath "//em", convert_string("An _emphatic_ no") end test 'emphasized text with single quote using apostrophe characters' do rsquo = decode_char 8217 assert_xpath %(//em[text()="Johnny#{rsquo}s"]), convert_string(%q(It's 'Johnny's' phone), attributes: { 'compat-mode' => '' }) assert_xpath %(//p[text()="It#{rsquo}s 'Johnny#{rsquo}s' phone"]), convert_string(%q(It's 'Johnny's' phone)) end test 'emphasized text with escaped single quote using apostrophe characters' do assert_xpath %(//em[text()="Johnny's"]), convert_string(%q(It's 'Johnny\\'s' phone), attributes: { 'compat-mode' => '' }) assert_xpath %(//p[text()="It's 'Johnny's' phone"]), convert_string(%q(It\\'s 'Johnny\\'s' phone)) end test "escaped single quote is restored as single quote" do assert_xpath "//p[contains(text(), \"Let's do it!\")]", convert_string("Let\\'s do it!") end test 'unescape escaped single quote emphasis in compat mode only' do assert_xpath %(//p[text()="A 'single quoted string' example"]), convert_string_to_embedded(%(A \\'single quoted string' example), attributes: { 'compat-mode' => '' }) assert_xpath %(//p[text()="'single quoted string'"]), convert_string_to_embedded(%(\\'single quoted string'), attributes: { 'compat-mode' => '' }) assert_xpath %(//p[text()="A \\'single quoted string' example"]), convert_string_to_embedded(%(A \\'single quoted string' example)) assert_xpath %(//p[text()="\\'single quoted string'"]), convert_string_to_embedded(%(\\'single quoted string')) end test "emphasized text at end of line" do assert_xpath "//em", convert_string("This library is _awesome_") end test "emphasized text at beginning of line" do assert_xpath "//em", convert_string("_drop_ it") end test "emphasized text across line" do assert_xpath "//em", convert_string("_check it_") end test "unquoted text" do refute_match(/#/, convert_string("An #unquoted# word")) end test 'backticks and straight quotes in text' do backslash = '\\' assert_equal %q(run foo dog), convert_inline_string(%q(run `foo` 'dog'), attributes: { 'compat-mode' => '' }) assert_equal %q(run foo 'dog'), convert_inline_string(%q(run `foo` 'dog')) assert_equal %q(run `foo` 'dog'), convert_inline_string(%(run #{backslash}`foo` 'dog')) assert_equal %q(run ‘foo` 'dog’), convert_inline_string(%q(run '`foo` 'dog`')) assert_equal %q(run '`foo` 'dog`'), convert_inline_string(%(run #{backslash}'`foo` 'dog#{backslash}`')) end test 'plus characters inside single plus passthrough' do assert_xpath '//p[text()="+"]', convert_string_to_embedded('+++') assert_xpath '//p[text()="+="]', convert_string_to_embedded('++=+') end test 'plus passthrough escapes entity reference' do assert_match(/&#44;/, convert_string_to_embedded('+,+')) assert_match(/one&#44;two/, convert_string_to_embedded('one++,++two')) end context "basic styling" do setup do @output = convert_string("A *BOLD* word. An _italic_ word. A `mono` word. ^superscript!^ and some ~subscript~.") end test "strong" do assert_xpath "//strong", @output, 1 end test "italic" do assert_xpath "//em", @output, 1 end test "monospaced" do assert_xpath "//code", @output, 1 end test "superscript" do assert_xpath "//sup", @output, 1 end test "subscript" do assert_xpath "//sub", @output, 1 end test "passthrough" do assert_xpath "//code", convert_string("This is +passed through+."), 0 assert_xpath "//code", convert_string("This is +passed through and monospaced+.", attributes: { 'compat-mode' => '' }), 1 end test "nested styles" do output = convert_string("Winning *big _time_* in the +city *boyeeee*+.", attributes: { 'compat-mode' => '' }) assert_xpath "//strong/em", output assert_xpath "//code/strong", output output = convert_string("Winning *big _time_* in the `city *boyeeee*`.") assert_xpath "//strong/em", output assert_xpath "//code/strong", output end test 'unconstrained quotes' do output = convert_string('**B**__I__++M++[role]++M++', attributes: { 'compat-mode' => '' }) assert_xpath '//strong', output, 1 assert_xpath '//em', output, 1 assert_xpath '//code[not(@class)]', output, 1 assert_xpath '//code[@class="role"]', output, 1 output = convert_string('**B**__I__``M``[role]``M``') assert_xpath '//strong', output, 1 assert_xpath '//em', output, 1 assert_xpath '//code[not(@class)]', output, 1 assert_xpath '//code[@class="role"]', output, 1 end end test 'should format Asian characters as words' do assert_xpath '//strong', (convert_string_to_embedded 'bold *要* bold') assert_xpath '//strong', (convert_string_to_embedded 'bold *素* bold') assert_xpath '//strong', (convert_string_to_embedded 'bold *要素* bold') end end