pax_global_header00006660000000000000000000000064127751374140014525gustar00rootroot0000000000000052 comment=580422964c8dd77adc2bebe7b9db871d9ce55b80 asciidoctor-1.5.5/000077500000000000000000000000001277513741400140405ustar00rootroot00000000000000asciidoctor-1.5.5/.gitignore000066400000000000000000000001551277513741400160310ustar00rootroot00000000000000pkg/ /Gemfile.lock /.bundle /*.gem /*.html /.idea/ /.ruby-gemset /.ruby-version /.yardoc/ /rdoc/ /shippable/ asciidoctor-1.5.5/.simplecov000066400000000000000000000006201277513741400160400ustar00rootroot00000000000000SimpleCov.start do load_profile 'test_frameworks' coverage_dir ENV['COVERAGE_REPORTS'] || 'tmp/coverage' if ENV['SHIPPABLE'] require 'simplecov-csv' formatter SimpleCov::Formatter::CSVFormatter else #formatter SimpleCov::Formatter::MultiFormatter[SimpleCov::Formatter::HTMLFormatter, SimpleCov::Formatter::CSVFormatter] formatter SimpleCov::Formatter::HTMLFormatter end end asciidoctor-1.5.5/.travis.yml000066400000000000000000000005761277513741400161610ustar00rootroot00000000000000sudo: false git: depth: 1 language: ruby rvm: - 2.3.1 - 2.2.5 - 2.1.10 - 2.0.0 - 1.9.3 - 1.8.7 - jruby-9.0.5.0 - jruby-9.1.2.0 - jruby-19mode # based on jruby-1.7.19 - jruby-18mode # based on jruby-1.7.19 #- rbx-3.60 # NOTE not working currently script: bundle exec rake coverage test:all notifications: email: false #irc: 'irc.freenode.org#asciidoctor' asciidoctor-1.5.5/.yardopts000066400000000000000000000001601277513741400157030ustar00rootroot00000000000000--exclude opal_ext --hide-api private --plugin tomdoc --title "Asciidoctor API Documentation" --output-dir rdoc asciidoctor-1.5.5/CHANGELOG.adoc000066400000000000000000001350271277513741400161670ustar00rootroot00000000000000= Asciidoctor Changelog :uri-asciidoctor: http://asciidoctor.org :uri-asciidoc: {uri-asciidoctor}/docs/what-is-asciidoc :uri-repo: https://github.com/asciidoctor/asciidoctor :icons: font :star: icon:star[role=red] ifndef::icons[] :star: ★ endif::[] {uri-asciidoctor}[Asciidoctor] is a _fast_, open source text processor and publishing toolchain for converting {uri-asciidoc}[AsciiDoc] content into HTML5, DocBook 5 (or 4.5) and other formats. This document provides a high-level view of the changes introduced in Asciidoctor by release. For a detailed view of what has changed, refer to the {uri-repo}/commits/master[commit history] on GitHub. // tag::compact[] == 1.5.5 (2016-10-05) - @mojavelinux Enhancements:: * Add preference to limit the maximum size of an attribute value (#1861) * Honor SOURCE_DATE_EPOCH environment variable to accomodate reproducible builds (@JojoBoulix) (#1721) * Add reversed attribute to ordered list if reversed option is enabled (#1830) * Add support for additional docinfo locations (e.g., :header) * Configure default stylesheet to break monospace word if exceeds length of line; add roles to prevent breaks (#1814) * Introduce translation file for built-in labels (@ciampix) * Provide translations for built-in labels (@JmyL - kr, @ciampix - it, @ivannov - bg, @maxandersen - da, @radcortez - pt, @eddumelendez - es, @leathersole - jp, @aslakknutsen - no, @shahryareiv - fa, @AlexanderZobkov - ru, @dongwq - zh, @rmpestano - pt_BR, @ncomet - fr, @lgvz - fi, @patoi - hu, @BojanStipic - sr, @fwilhe - de, @rahmanusta - tr, @abelsromero - ca, @aboullaite - ar, @roelvs - nl) * Translate README to Chinese (@diguage) * Translate README to Japanese (@Mizuho32) Improvements:: * Style nested emphasized phrases properly when using default stylesheet (#1691) * Honor explicit table width even when autowidth option is set (#1843) * Only explicit noheader option on table should disable implicit table header (#1849) * Support docbook orient="land" attribute on tables (#1815) * Add alias named list to retrieve parent List of ListItem * Update push_include method to support chaining (#1836) * Enable font smoothing on Firefox on OSX (#1837) * Support combined use of sectanchors and sectlinks in HTML5 output (#1806) * fix API docs for find_by * Upgrade to Font Awesome 4.6.3 (@allenan, @mogztter) (#1723) * README: add install instructions for Alpine Linux * README: Switch yum commands to dnf in README * README: Mention Mint as a Debian distro that packages Asciidoctor * README: Add caution advising against using gem update to update a system-managed gem (@oddhack) * README: sync French version with English version (@flashcode) * Add missing endline after title element when converting open block to HTML * Move list_marker_keyword method from AbstractNode to AbstractBlock * Rename definition list to description list internally Compliance:: * Support 6-digit decimal char refs, 5-digit hexidecimal char refs (#1824) * Compatibility fixes for Opal * Check for number using Integer instead of Fixnum class for compatibility with Ruby 2.4 Bug fixes:: * Use method_defined? instead of respond_to? to check if method is already defined when patching (#1838) * Fix invalid conditional in HTML5 converter when handling of SVG * Processor#parse_content helper no longer shares attribute list between blocks (#1651) * Fix infinite loop if unordered list marker is immediately followed by a dot (#1679) * Don't break SVG source when cleaning if svg start tag name is immediately followed by endline (#1676) * Prevent template converter from crashing if .rb file found in template directory (#1827) * Fix crash when generating section ID when both idprefix & idseparator are blank (#1821) * Use stronger CSS rule for general text color in Pygments stylesheet (#1802) * Don't duplicate forward slash for path relative to root (#1822) Infrastructure:: * Build gem properly in the absense of a git workspace, make compatible with JRuby (#1779) * Run tests in CI using latest versions of Ruby, including Ruby 2.3 (@ferdinandrosario) Distribution Packages:: * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * http://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * http://packages.ubuntu.com/saucy/asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.5[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.5[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v1.5.4...v1.5.5[full diff] == 1.5.4 (2016-01-03) - @mojavelinux Enhancements:: * translate README into French (@anthonny, @mogztter, @gscheibel, @mgreau) (#1630) * allow linkstyle in manpage output to be configured (#1610) Improvements:: * upgrade to MathJax 2.6.0 and disable loading messages * upgrade to Font Awesome 4.5.0 * disable toc if document has no sections (#1633) * convert inline asciimath to MathML (using asciimath gem) in DocBook converter (#1622) * add attribute to control build reproducibility (@bk2204) (#1453) * recognize \file:/// as a file root in Opal browser env (#1561) * honor icon attribute on admonition block when font-based icons are enabled (@robertpanzer) (#1593) * resolve custom icon relative to iconsdir; add file extension if absent (#1634) * allow asciidoctor cli to resolve library path when invoked without leading ./ Compliance:: * allow special section to be nested at any depth (#1591) * ensure colpcwidth values add up to 100%; increase precision of values to 4 decimal places (#1647) * ignore blank cols attribute on table (#1647) * support shorthand syntax for block attributes on document title (#1650) Bug fixes:: * don't include default toc in AsciiDoc table cell; don't pass toc location attributes to nested document (#1582) * guard against nil dlist list item in find_by (#1618) * don't swallow trailing line when include file is not readable (#1602) * change xlink namespace to xl in DocBook 5 output to prevent parse error (#1597) * make callouts globally unique within document, including AsciiDoc table cells (#1626) * initialize Slim-related attributes regardless of when Slim was loaded (@terceiro) (#1576) * differentiate literal backslash from escape sequence in manpage output (@ds26gte) (#1604) * don't mistake line beginning with \. for troff macro in manpage output (@ds26gte) (#1589) * escape leading dots so user content doesn't trigger troff macros in manpage output (@ds26gte) (#1631) * use \c after .URL macro to remove extraneous space in manpage output (@ds26gte) (#1590) * fix missing endline after .URL macro in manpage output (#1613) * properly handle spacing around .URL/.MTO macro in manpage output (@ds26gte) (#1641) * don't swallow doctitle attribute followed by block title (#1587) * change strategy for splitting names of author; fixes bug in Opal/Asciidoctor.js * don't fail if library is loaded more than once Infrastructure:: * remove trailing endlines in project source code * update contributing guidelines * explicitly test ifeval scenario raised in issue #1585 * remove backreference substitution hack for Opal/Asciidoctor.js * fix assignment of default Hash value for Opal/Asciidoctor.js * add JRuby 9.0.4.0 and Ruby 2.3.0 to the Travis CI build matrix Distribution Packages:: * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * http://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * http://packages.ubuntu.com/saucy/asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.4[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.4[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v1.5.3...v1.5.4[full diff] == 1.5.3 (2015-10-31) - @mojavelinux Enhancements:: * add support for interactive & inline SVGs (#1301, #1224) * add built-in manpage backend (@davidgamba) (#651) * create Mallard backend; asciidoctor/asciidoctor-mallard (@bk2204) (#425) * add AsciiMath to MathML converter to support AsciiMath in DocBook converter (@pepijnve) (#954) * allow text of selected lines to be highlighted in source block by Pygments or CodeRay (#1429) * use value of `docinfo` attribute to control docinfo behavior (#1510) * add `docinfosubs` attribute to control which substitutions are performed on docinfo files (@mogztter) (#405) * drop ability to specify multiple attributes with a single `-a` flag when using the CLI (@mogztter) (#405) * make subtitle separator chars for document title configurable (@rmannibucau) (#1350) * make XrefInlineRx regexp more permissive (Mathieu Boespflug) (#844) Improvements:: * load JavaScript and CSS at bottom of HTML document (@mogztter) (#1238) * list available backends in help text (@plaindocs) (#1271) * properly expand tabs in literal text (#1170, #841) * add `source-indent` as document attribute (@mogztter) (#1169) * upgrade MathJax to 2.5.3 (#1329) * upgrade Font Awesome to 4.4.0 (@mogztter) (#1465) * upgrade highlight.js to 8.6 (now 8.9.1) (#1390) * don't abort if syntax highlighter isn't available (#1253) * insert docinfo footer below footer div (#1503) * insert toc at default location in embeddable HTML (#1443) * replace _ and - in generated alt text for inline images * restore attributes to header attributes after parse (#1255) * allow docdate and doctime to be overridden (#1495) * add CSS class `.center` for center block alignment (#1456) * recognize U+2022 (bullet) as alternative marker for unordered lists (@mogztter) (#1177) * allow videos to work for local files by prepending asset-uri-scheme (Chris) (#1320) * always assign playlist param when loop option is enabled for YouTube video * parse isolated version in revision line (@bk2204) (#790) * autoload Tilt when template converter is instantiated (#1313) * don't overwrite existing id entry in references table (#1256) * use outfilesuffix attribute defined in header when resolving outfile (#1412) * make AsciiDoc safe mode option on Slim engine match document (#1347) * honor htmlsyntax attribute when backend is html/html5 (#1530) * tighten spacing of wrapped lines in TOC (#1542) * tune padding around table cells in horizontal dlist (#1418) * load Droid Sans Mono 700 in default stylesheet * set line height of table cells used for syntax highlighting * set font-family of kbd; refine styling (#1423) * extract condition into `quote_lines?` method (@mogztter) * extract inline code into `read_paragraph` method (@mogztter) * parent of block in ListItem should be ListItem (#1359) * add helper methods to List and ListItem (#1551) * add method `AbstractNode#add_role` and `AbstractNode#remove_role` (@robertpanzer) (#1366) * introduce helper methods for sniffing URIs (#1422) * add helper to calculate basename without file extension * document `-I` and `-r` options in the manual page (@bk2204) * fix `+--help+` output text for `-I` (@bk2204) * don't require open-uri-cached if already loaded * do not attempt to scan pattern of non-existent directory in template converter * prevent CodeRay from bolding every 10th line number Compliance:: * use `` for footnote reference in text instead of `` (#1523) * fix alignment of wrapped text in footnote (#1524) * include full stop after footnote number in embeddable HTML * show manpage title & name section in embeddable HTML (#1179) * resolve missing attribute in ifeval to empty string (#1387) * support unbreakable & breakable options on table (rockyallen) (#1140) Bug fixes:: * don't truncate exception stack in `Asciidoctor.load` (#1248) * don't fail to save cause of Java exception (@robertpanzer) (#1458) * fix precision error in timings report (#1342) * resolve regexp for inline macro lazily (#1336) * block argument to `find_by` should filter results (#1393) * strip comment lines in indented text of dlist item (#1537) * preserve escaped delimiter at end of line in a table (#1306) * correctly calculate colnames for implicit columns (#1556) * don't crash if colspan exceeds colspec (#1460) * account for empty records in colspec (#1375) * ignore empty cols attribute on table * use `.inspect` to print MathJax delimiters (again) (#1198) * use while loop instead of begin/while loop to address bug in Asciidoctor.js (#1408) * force encoding of attribute values passed from cli (#1191) * don't copy css if stylesheet or stylesdir is a URI (#1400) * fix invalid color value in default CodeRay theme * built-in writer no longer fails if output is nil (#1544) * custom template engine options should take precedence * fallback to require with a non-relative path to support Debian package (@mogztter) * pass opts to recursive invocations of `PathResolver#system_path` * fix and test external links in docbook backend * use format symbol `:html` instead of `:html5` for Slim to fix warnings * fix documentation for inline_macro and block_macro (Andrea Bedini) * fix grammar in warning messages regarding thread_safe gem Infrastructure:: * migrate opal_ext from core to Asciidoctor.js (#1517) * add Ruby 2.2 to CI build; only specify minor Ruby versions * enable containerized builds on Travis CI * add config to run CI build on AppVeyor * exclude benchmark folder from gem (#1522) Distribution Packages:: * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * http://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * http://packages.ubuntu.com/saucy/asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.3[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.3[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v1.5.2...v1.5.3[full diff] // end::compact[] == 1.5.2 (2014-11-27) - @mojavelinux Enhancements:: * add docinfo extension (@mogztter) (#1162) * allow docinfo to be in separate directory from content, specified by `docinfodir` attribute (@mogztter) (#511) * enable TeX equation auto-numbering if `eqnums` attribute is set (@jxxcarlson) (#1110) Improvements:: * recognize `--` as valid line comment for callout numbers; make line comment configurable (#1068) * upgrade highlight.js to version 8.4 (#1216) * upgrade Font Awesome to version 4.2.0 (@clojens) (#1201) * define JAVASCRIPT_PLATFORM constant to simplify conditional logic in the JavaScript environment (#897) * provide access to destination directory, outfile and outdir via Document object (#1203) * print encoding information in version report produced by `asciidoctor -v` (#1210) * add intrinsic attribute named `cpp` with value `C++` (#1208) * preserve URI targets passed to `stylesheet` and related attributes (#1192) * allow numeric characters in block attribute name (#1103) * support custom YouTube playlists (#1105) * make start number for unique id generation configurable (#1148) * normalize and force UTF-8 encoding of docinfo content (#831) * allow subs and default_subs to be specified in Block constructor (#749) * enhance error message when reading binary input files (@mogztter) (#1158) * add `append` method as alias to `<<` method on AbstractBlock (#1085) * assign value of `preface-title` as title of preface node (#1090) * fix spacing around checkbox in checklist (#1138) * automatically load Slim's include plugin when using slim templates (@jirutka) (#1151) * mixin Slim helpers into execution scope of slim templates (@jirutka) (#1143) * improve DocBook output for manpage doctype (@bk2204) (#1134, #1142) Compliance:: * substitute attribute entry value in attributes defined outside of header (#1130) * allow empty cell to appear at end of table row (#1106) * only produce one row for table in CSV or DSV format with a single cell (#1180) Bug fixes:: * add explicit to_s call to generate delimiter settings for MathJax config (#1198) * fix includes that reference absolute Windows paths (#1144) * apply DSL to extension block in a way compatible with Opal Distribution Packages:: * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * http://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * http://packages.ubuntu.com/saucy/asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.2[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.2[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v1.5.1...v1.5.2[full diff] == 1.5.1 (2014-09-29) - @mojavelinux Bug fixes:: * recognize tag directives inside comments within XML files for including tagged regions * restore passthroughs inside footnotes when more than one footnote appears on the same line * -S flag in cli recognizes safe mode name as lowercase string * do not match # in character reference when looking for marked text * add namespace to lang attribute in DocBook 5 backend * restore missing space before conum on last line of listing when highlighting with Pygments * place conums on correct lines when line numbers are enabled when highlighting with Pygments * don't expand mailto links in print styles Improvements:: * implement File.read in Node (JavaScript) environment * assign sectnumlevels and toclevels values to maxdepth attribute on AsciiDoc processing instructions in DocBook output * add test for usage of image block macro with data URI * use badges from shields.io in README Distribution Packages:: * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * http://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * http://packages.ubuntu.com/saucy/asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?q=milestone%3Av1.5.1[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.1[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v1.5.0...v1.5.1[full diff] == 1.5.0 (2014-08-12) - @mojavelinux Performance:: * 10% increase in speed compared to 0.1.4 * rewrite built-in converters in Ruby instead of ERB Enhancements:: * {star} introduce new curved quote syntax (pass:["`double quotes`"], pass:['`single quotes`']) if compat-mode attribute not set (#1046) * {star} add single curved quote replacement for pass:[`'] (#715) * {star} use backtick (`) for monospaced text if compat-mode attribute not set (#714, #718) * {star} use single and double plus (pass:[+], pass:[++]) for inline passthrough if compat-mode attribute not set (#714, #718) * {star} disable single quotes as formatting marks for emphasized text if compat-mode attribute not set (#717) * {star} enable compat-mode by default if document has atx-style doctitle * {star} output phrase surrounded by # as marked text (i.e., ) (#225) * {star} add MathJax integration and corresponding blocks and macros (#492, #760) * {star} switch to open source fonts (Open Sans, Noto Serif and Droid Sans Mono) in default stylesheet, major refinements to theme (#879) * {star} embed remote images when data-uri and allow-uri-read attributes are set (#612) * {star} support leveloffset on include directive and honor relative leveloffset values (#530) * {star} switch default docbook backend to docbook5 (@bk2204) (#554) * {star} added hide-uri-scheme attribute to hide uri scheme in automatic links (#800) * {star} allow substitutions to be incrementally added & removed (#522) * {star} add compatibility with Opal, add shim compat library, use compatibility regexp, require libraries properly (@mogztter) (#679, #836, #846) * {star} output XHTML when backend is xhtml or xhtml5 (#494) * {star} add shorthand subs and specialchars as an alias for specialcharacters (#579) * {star} deprecate toc2 attribute in favor of position and placement values on toc attribute (e.g., toc=left) (#706) * {star} add source map (file and line number) information to blocks (#861) * {star} write to file by default if input is file (#907) * {star} add -r and -I flags from ruby command to asciidoctor command for loading additional libraries (#574) * support backslash (\) as line continuation character in the value of an attribute entry (#1022) * disable subs on pass block by default (#737) * add basic support for resolving xref target from reftext (#589) * add time range anchor to video element (#886) * match implicit URLs that use the file scheme (#853) * added sectnumlevels to control depth of section numbering (#549) * add hardbreaks option to block (#630) * sub attributes in manname (e.g., pass:[{docname}]) * warn on reference to missing attribute if attribute-missing is "warn" * only enable toc macro if toc is enabled and toc-placement attribute has the value macro (#706) * add sectnums attribute as alternative alias to numbered attribute (#684) Improvements:: * {star} don't select lines that contain a tag directive when including tagged lines, make tag regexp more strict (#1027) * {star} use https scheme for assets by default * {star} upgrade to Font Awesome 4.1 (@mogztter) (#752) * {star} improve print styles, add print styles for book doctype (@leif81) (#997, #952) * {star} add proper grid and frame styles for tables (@leif81) (#569) * {star} use glyphs for checkboxes when not using font icons (#878) * {star} prefer source-language attribute over language attribute for defining default source language (#888) * {star} pass document as first argument to process method on Preprocessor * don't parse link attributes when linkattrs is set unless text contains equal sign * detect bare links, mark with bare class; don't repeat URL of bare link in print styles * allow Treeprocessor#process method to replace tree (#1035) * add AbstractNode#find_by method to locate nodes in tree (#862) * add API for parsing title and subtitle (#1000) * add use_fallback option to doctitle, document method * constrain subscript & superscript markup (#564, #936) * match cell specs when cell separator is customized (#985) * use stylesheet to set default table width (#975) * display nested elements correctly in toc (@kenfinnigan) (#967) * add support for id attribute on links (@mogztter) (#935) * add support for title attribute on links (@aslakknutsen) * add -t flag to cli to control output of timing information (@mogztter) (#909) * rewrite converter API (#778) * rewrite extensions to support extension instances for AsciidoctorJ (#804) * integrate thread_safe gem (#638) * allow inline macro extensions that define a custom regexp to be matched (#792) * make Reader#push_include work with default file, path and dir (@bk2204) (#743) * honor custom outfilesuffix and introduce relfileprefix (#801) * add author and copyright to meta in HTML5 backend (#838) * output attribution in front of citetitle for quote and verse blocks * recognize float style with shorthand syntax outside block (#818) * honor background color in syntax highlighting themes (#813) * print runtime environment in version output, support -v as version flag (#785) * unwrap preamble if standalone (#533) * drop leading & trailing blank lines in verbatim & raw content (#724) * remove trailing endlines from source data (#727) * add flag to cli to suppress warnings (#557) * emit warning if tag(s) not found in include file (#639) * use element for vertical table headers instead of header class (@davidgamba) (#738) * share select references between AsciiDoc-style cell & main document (#729) * number chapters sequentially, always (#685) * add vbar attribute, make brvbar resolve properly (#643) * add implicit user-home attribute that resolves to user's home directory (#629) * enable sidebar toc for small screens (#628) * add square brackets around button in HTML output (#631) * make language hover text work for all languages in listing block * set background color on toc2 to cover scrolling content (@neher) * make document parsing a discrete step, make Reader accessible as property on Document * allow custom converter to set backend info such as outfilesuffix and htmlsyntax * report an informative error message when a converter cannot be resolved (@mogztter) * add conum class to b element when icons are disabled, make conum CSS selector more specific * expose Document object to extension point IncludeProcessor (@aslakknutsen) * style audioblock title, simplify rules for block titles * alias :name_attributes to :positional_attributes in extension DSL * upgrade to highlight.js 7.4 (and later 8.0) (@mogztter) (#756) Compliance:: * only include xmlns in docbook45 backend if xmlns attribute is specified (#929) * add xmlns attribute for xhtml output (@bk2204) * warn if table without a body is converted to DocBook (#961) * wrap around admonition inside example block in DocBook 4.5 (#931) * use if block image doesn't have a title (#927) * fix invalid docbook when adding role to formatted text (#956) * move all compliance flags to Compliance module (#624) * add compliance setting to control use of shorthand property syntax (#789) * wrap top-level content inside preamble in DocBook backend when doctype is book (#971) * escape special chars in image alt text (#972) * set starting number in ordered list for docbook (@megathaum) (#925) * match word characters in regular expressions as defined by Unicode (#892) * put source language class names on child code element of pre element (#921) * ignore case of attribute in conditional directives (#903) * allow attribute entry to reset / reseed counter (#870) * allow doctype to be set in AsciiDoc table cell (#863) * match URL macro following entity (@jmbruel) (#819) * handle BOM when normalizing source (#824) * don't output revhistory if revdate is not set (#802) * perform normal subs on verse content (#799) * automatically wrap part intro content in partintro block, emit warning if part is invalid (#768) * force encoding of docinfo content to UTF-8 (#773) * add scaling & alignment attributes to block image in DocBook backend (#763) * add support for pass:[anchor:[\]] macro (#531) * substitute anchor and xref macros in footnotes (#676) * remove all string mutation operations for compatibility with Opal (#735) * honor reftext defined in embedded section title anchor (#697) * allow spaces in reftext defined in block anchor (#695) * use reftext of section or block in text of xref link (#693) * number sections in appendix using appendix number (#683) * unescape escaped square closing bracket in footnote text (#677) * support quoted index terms that may contain commas (#597) * don't assign role attribute if quoted text has no roles (#647) * disallow quoted values in block and inline anchors * add % to scaledwidth if no units given * ignore block attribute with unquoted value None * preserve entity references with 5 digits Bug Fixes:: * resolve relative paths relative to base_dir in unsafe mode (#690) * properly handle nested passthroughs (#1034) * don't clobber outfilesuffix attribute if locked (#1024) * correctly calculate columns if colspan used in first row of table (#924) * pass theme to Pygments when pygments-css=style (#919) * fallback to text lexer when using pygments for source highlighting (#987) * only make special section if style is specified (#917) * an unresolved footnote ref should not crash processor (#876) * rescue failure to resolve ::Dir.home (#896) * recognize Windows UNC path as absolute and preserve it (#806) * adjust file glob to account for backslash in Windows paths (#805) * don't match e-mail address inside URL (#866) * test include directive resolves file with space in name (#798) * return nil from Reader#push_include and Reader#pop_include methods (#745) * fixed broken passthroughs caused by source highlighting (#720) * copy custom stylesheet if linkcss is set (#300) * honor list continuations for indented, nested list items (#664) * fix syntax errors in converters (@jljouannic) * fix iconfont-remote setting * fix syntax error (target -> node.target) in Docbook 5 converter (@jf647) * output and style HTML for toc macro correctly Infrastructure:: * add Ruby 2.1 to list of supported platforms * reenable rbx in Travis build * switch tests to minitest (@ktdreyer) * update RPM for Fedora Rawhide (@ktdreyer) * refactor unit tests so they work in RubyMine (@cmoulliard) * add preliminary benchmark files to repository (#1021) * clean out old fixtures from test suite (#960) * add initial Cucumber test infrastructure (#731) * use gem tasks from Bundler in Rakefile (#654) * build gemspec files using git ls-tree (#653) * use in-process web server for URI tests * update manpage to reflect updates in 1.5.0 * rework README (@mogztter) (#651) Distribution Packages:: * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * http://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * http://packages.ubuntu.com/saucy/asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?milestone=8&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v1.5.0[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v0.1.4...v1.5.0[full diff] == 0.1.4 (2013-09-05) - @mojavelinux Performance:: * 15% increase in speed compared to 0.1.3 Enhancements:: * updated xref inline macro to support inter-document references (#417) * added extension API for document processing (#79) * added include directive processor extension (#100) * added id and role shorthand for formatted (quoted) text (#517) * added shorthand syntax for specifying block options (#481) * added support for checklists in unordered list (#200) * added support for inline style for unordered lists (#620) * added DocBook 5 backend (#411) * added docinfo option for footer (#486) * added Pygments as source highlighter option (pygments) (#538) * added icon inline macro (#529) * recognize implicit table header row (#387) * uri can be used in inline image (#470) * add float attribute to inline image (#616) * allow role to be specified on text enclosed in backticks (#419) * added XML comment-style callouts for use in XML listings (#582) * made callout bullets non-selectable in HTML output (#478) * pre-wrap literal blocks, added nowrap option to listing blocks (#303) * skip (retain) missing attribute references by default (#523) * added attribute-missing attribute to control how a missing attribute is handled (#495) * added attribute-undefined attribute to control how an undefined attribute is handled (#495) * permit !name syntax for undefining attribute (#498) * ignore front matter used by static site generators if skip-front-matter attribute is set (#502) * sanitize contents of HTML title element in html5 backend (#504) * support toc position for toc2 (#467) * cli accepts multiple files as input (@lordofthejars) (#227) * added Markdown-style horizontal rules and pass Markdown tests (#455) * added float clearing classes (.clearfix, .float-group) (#602) * don't disable syntax highlighting when explicit subs is used on listing block * asciidoctor package now available in Debian Sid and Ubuntu Saucy (@avtobiff) (#216) Compliance:: * embed CSS by default, copy stylesheet when linkcss is set unless copycss! is set (#428) * refactor reader to track include stack (#572) * made include directive resolve relative to current file (#572) * track include stack to enforce maximum depth (#581) * fixed greedy comment blocks and paragraphs (#546) * enable toc and numbered by default in DocBook backend (#540) * ignore comment lines when matching labeled list item (#524) * correctly parse footnotes that contain a URL (#506) * parse manpage metadata, output manpage-specific HTML, set docname and outfilesuffix (#488, #489) * recognize preprocessor directives on first line of AsciiDoc table cell (#453) * include directive can retrieve data from uri if allow-uri-read attribute is set (#445) * support escaping attribute list that precedes formatted (quoted) text (#421) * made improvements to list processing (#472, #469, #364) * support percentage for column widths (#465) * substitute attributes in docinfo files (#403) * numbering no longer increments on unnumbered sections (#393) * fixed false detection of list item with hyphen marker * skip include directives when processing comment blocks * added xmlns to root element in docbook45 backend, set noxmlns attribute to disable * added a Compliance module to control compliance-related behavior * added linkattrs feature to AsciiDoc compatibility file (#441) * added level-5 heading to AsciiDoc compatibility file (#388) * added new XML-based callouts to AsciiDoc compatibility file * added absolute and uri image target matching to AsciiDoc compatibility file * added float attribute on inline image macro to AsciiDoc compatibility file * removed linkcss in AsciiDoc compatibility file * fixed fenced code entry in compatibility file Bug Fixes:: * lowercase attribute names passed to API (#508) * numbered can still be toggled even when enabled in API (#393) * allow JRuby Map as attributes (#396) * don't attempt to highlight callouts when using CodeRay and Pygments (#534) * correctly calculate line length in Ruby 1.8 (#167) * write to specified outfile even when input is stdin (#500) * only split quote attribution on first comma in Markdown blockquotes (#389) * don't attempt to print render times when doc is not rendered * don't recognize line with four backticks as a fenced code block (#611) Improvements:: * upgraded Font Awesome to 3.2.1 (#451) * improved the built-in CodeRay theme to match Asciidoctor styles * link to CodeRay stylesheet if linkcss is set (#381) * style the video block (title & margin) (#590) * added Groovy, Clojure, Python and YAML to floating language hint * only process callouts for blocks in which callouts are found * added content_model to AbstractBlock, rename buffer to lines * use Untitled as document title in rendered output if document has no title * rename include-depth attribute to max-include-depth, set 64 as default value (#591) * the tag attribute can be used on the include directive to identify a single tagged region * output multiple authors in HTML backend (#399) * allow multiple template directories to be specified, document in usage and manpage (#437) * added option to cli to specify template engine (#406) * added support for external video hosting services in video block macro (@xcoulon) (#587) * strip leading separator(s) on section id if idprefix is blank (#551) * customized styling of toc placed inside body content (#507) * consolidate toc attribute so toc with or without toc-position can make sidebar toc (#618) * properly style floating images (inline & block) (#460) * add float attribute to inline images (#616) * use ul list for TOC in HTML5 backend (#431) * support multiple terms per labeled list item in model (#532) * added role?, has_role?, option? and roles methods to AbstractNode (#423, 474) * added captioned_title method to AbstractBlock * honor showtitle attribute as alternate to notitle! (#457) * strip leading indent from literal paragraph blocks assigned the style normal * only process lines in AsciiDoc files * emit message that tilt gem is required to use custom backends if missing (#433) * use attributes for version and last updated messages in footer (#596) * added a basic template cache (#438) * include line info in several of the warnings (for lists and tables) * print warning/error messages using warn (#556) * lines are not preprocessed when peeking ahead for section underline * introduced Cursor object to track line info * fixed table valign classes, no underline on image link * removed dependency on pending library, lock Nokogiri version to 1.5.10 * removed require rubygems line in asciidoctor.rb, add to cli if RUBY_VERSION < 1.9 * added tests for custom backends * added test that shorthand doesn't clobber explicit options (#481) * removed unnecessary monospace class from literal and listing blocks Distribution Packages:: * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] * http://packages.debian.org/sid/asciidoctor[Debian (asciidoctor)] * http://packages.ubuntu.com/saucy/asciidoctor[Ubuntu (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?milestone=7&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v0.1.4[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v0.1.3...v0.1.4[full diff] == 0.1.3 (2013-05-30) - @mojavelinux Performance:: * 10% increase in speed compared to 0.1.2 Enhancements:: * added support for inline rendering by setting doctype to inline (#328) * added support for using font-based icons (#115) * honor haml/slim/jade-style shorthand for id and role attributes (#313) * support Markdown-style headings as section titles (#373) * support Markdown-style quote blocks * added section level 5 (maps to h6 element in the html5 backend) (#334) * added btn inline macro (#259) * added menu inline menu to identify a menu selection (@bleathem) (#173) * added kbd inline macro to identify a key or key combination (@bleathem) (#172) * support alternative quote forms (#196) * added indent attribute to verbatim blocks (#365) * added prettify source-highlighter (#202) * link section titles (#122) * introduce shorthand syntax for table format (#350) * parse attributes in link when use-link-attrs attribute is set (#214) * support preamble toc-placement (#295) * exclude attribute div if quote has no attribution (#309) * support attributes passed to API as string or string array (#289) * allow safe mode to be set using string, symbol or int in API (#290) * make level 0 section titles more prominent in TOC (#369) Compliance:: * ~ 99.5% compliance with AsciiDoc * drop line if target of include directive is blank (#376) * resolve attribute references in target of include directive (#367) * added irc scheme to link detection (#314) * toc should honor numbered attribute (#341) * added toc2 layout to default stylesheet (#285) * consecutive terms in labeled list share same entry (#315) * support set:name:value attribute syntax (#228) * block title not allowed above document title (#175) * assign caption even if no title (#321) * horizontal dlist layout in docbook backend (#298) * set doctitle attribute (#337) * allow any backend to be specified in cli (@lightguard) (#320) * support for abstract and partintro (#297) Bug Fixes:: * fixed file path resolution on Windows (#330) * fixed bad variable name that was causing crash, add test for it (#335) * set proper encoding on input data (#308) * don't leak doctitle into nested document (#382) * handle author(s) defined using attributes (#301) Improvements:: * added tests for all special sections (#80) * added test for attributes defined as string or string array (@lightguard) (#291) Distribution Packages:: * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] * https://apps.fedoraproject.org/packages/rubygem-asciidoctor[Fedora (rubygem-asciidoctor)] http://asciidoctor.org/news/2013/05/31/asciidoctor-0-1-3-released[release notes] | https://github.com/asciidoctor/asciidoctor/issues?milestone=4&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v0.1.3[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v0.1.2...v0.1.3[full diff] == 0.1.2 (2013-04-25) - @mojavelinux Performance:: * 28% increase in speed compared to 0.1.1, 32% increase compared to 0.1.0 Enhancements:: * new website at http://asciidoctor.org * added a default stylesheet (#76) * added viewport meta tag for mobile browsers (#238) * set attributes based on safe mode (#244) * added admonition name as style class (#265) * removed hardcoded CSS, no one likes hardcoded CSS (#165) * support multiple authors in document header (#223) * include footnotes block in embedded document (#206) * allow comma delimiter in include attribute values (#226) * support including tagged lines (#226) * added line selection to include directive (#226) * Asciidoctor#render APIs return Document when document is written to file Compliance:: * added AsciiDoc compatibility file to make AsciiDoc behave like Asciidoctor (#257) * restore alpha-based xml entities (#211) * implement video and audio block macros (#155) * implement toc block macro (#269) * correctly handle multi-part books (#222) * complete masquerade functionality for blocks & paragraphs (#187) * support explicit subs on blocks (#220) * use code element instead of tt (#260) * honor toc2 attribute (#221) * implement leveloffset feature (#212) * include docinfo files in header when safe mode < SERVER (#116) * support email links and mailto inline macros (#213) * question must be wrapped in simpara (#231) * allow round bracket in link (#218) Bug Fixes:: * trailing comma shouldn't be included in link (#280) * warn if file in include directive doesn't exist (#262) * negative case for inline ifndef should only affect current line (#241) * don't compact nested documents (#217) * nest revision info inside revision element (#236) Distribution Packages:: * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] http://asciidoctor.org/news/2013/04/25/asciidoctor-0-1-2-released[release notes] | https://github.com/asciidoctor/asciidoctor/issues?milestone=3&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v0.1.2[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v0.1.1...v0.1.2[full diff] == 0.1.1 (2013-02-26) - @erebor Performance:: * 15% increase in speed compared to 0.1.0 Enhancements:: * migrated repository to asciidoctor organization on GitHub (#77) * include document title when header/footer disabled and notitle attribute is unset (#103) * honor GitHub-flavored Markdown fenced code blocks (#118) * added :doctype and :backend keys to options hash in API (#163) * added :to_dir option to the Asciidoctor#render API * added option :header_only to stop parsing after reading the header * preliminary line number tracking * auto-select backend sub-folder containing custom templates * rubygem-asciidoctor package now available in Fedora (#92) Compliance:: * refactor reader, process attribute entries and conditional blocks while parsing (#143) * support limited value comparison functionality of ifeval (#83) * added support for multiple attributes in ifdef and ifndef directives * don't attempt to embed image with uri reference when data-uri is set (#157) * accomodate trailing dot in author name (#156) * don't hardcode language attribute in html backend (#185) * removed language from DocBook root node (#188) * fixed revinfo line swallowing attribute entry * auto-generate caption for listing blocks if listing-caption attribute is set * support nested includes * support literal and listing paragraphs * support em dash shorthand at the end of a line * added ftp support to link inline macro * added support for the page break block macro Bug Fixes:: * pass through image with uri reference when data-uri is set (#157) * print message for failed arg (#152) * normalize whitespace at the end of lines (improved) * properly load custom templates and required libraries Improvements:: * parse document header in distinct parsing step * moved hardcoded english captions to attributes Distribution Packages:: * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?milestone=1&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v0.1.1[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v0.1.0...v0.1.1[full diff] == 0.1.0 (2013-02-04) - @erebor Enhancements:: * introduced Asciidoctor API (Asciidoctor#load and Asciidoctor#render methods) (#34) * added SERVER safe mode level (minimum recommended security for serverside usage) (#93) * added the asciidoctor commandline interface (cli) * added asciidoctor-safe command, enables safe mode by default * added man page for the asciidoctor command * use blockquote tag for quote block content (#124) * added hardbreaks option to preserve line breaks in paragraph text (#119) * :header_footer option defaults to false when using the API, unless rendering to file * added idseparator attribute to customized separator used in generated section ids * do not number special sections (differs from AsciiDoc) Compliance:: * use callout icons if icons are enabled, unless safe mode is SECURE * added support for name=value@ attribute syntax passed via cli (#97) * attr refs no longer case sensitive (#109) * fixed several cases of incorrect list handling * don't allow links to consume endlines or surrounding angled brackets * recognize single quote in author name * support horizontal labeled list style * added support for the d cell style * added support for bibliography anchors * added support for special sections (e.g., appendix) * added support for index term inline macros * added support for footnote and footnoteref inline macros * added auto-generated numbered captions for figures, tables and examples * added counter inline macros * added support for floating (discrete) section titles Bug Fixes:: * fixed UTF-8 encoding issue by adding magic encoding line to ERB templates (#144) * resolved Windows compatibility issues * clean CRLF from end of lines (#125) * enabled warnings when running tests, fixed warnings (#69) Improvements:: * renamed iconstype attribute to icontype Distribution Packages:: * http://rubygems.org/gems/asciidoctor[RubyGem (asciidoctor)] https://github.com/asciidoctor/asciidoctor/issues?milestone=12&state=closed[issues resolved] | https://github.com/asciidoctor/asciidoctor/releases/tag/v0.1.0[git tag] | https://github.com/asciidoctor/asciidoctor/compare/v0.0.9...v0.1.0[full diff] == Older releases (pre-0.0.1) For information about older releases, refer to the https://github.com/asciidoctor/asciidoctor/tags[commit history] on GitHub. asciidoctor-1.5.5/CONTRIBUTING.adoc000066400000000000000000000162171277513741400166060ustar00rootroot00000000000000= Contributing // settings: :idprefix: :idseparator: - :source-language: ruby :language: {source-language} ifdef::env-github,env-browser[:outfilesuffix: .adoc] // URIs: :uri-repo: https://github.com/asciidoctor/asciidoctor :uri-help-base: https://help.github.com/articles :uri-issues: {uri-repo}/issues :uri-fork-help: {uri-help-base}/fork-a-repo :uri-branch-help: {uri-fork-help}#create-branches :uri-pr-help: {uri-help-base}/using-pull-requests :uri-gist: https://gist.github.com == License Agreement By contributing changes to this repository, you agree to license your contributions under the <>. This ensures your contributions have the same license as the project and that the community is free to use your contributions. You also assert that you are the original author of the work that you are contributing unless otherwise stated. == Submitting an Issue We use the {uri-issues}[issue tracker on GitHub] associated with this project to track bugs and features. Before submitting a bug report or feature request, check to make sure it hasn't already been submitted. When submitting a bug report, please include a {uri-gist}[Gist] that includes any details that may help reproduce the bug, including your gem version, Ruby version, and operating system. Most importantly, since Asciidoctor is a text processor, reproducing most bugs requires that we have some snippet of text on which Asciidoctor exhibits the bad behavior. An ideal bug report would include a pull request with failing specs. == Submitting a Pull Request . {uri-fork-help}[Fork the repository]. . Run `bundle` to install development dependencies. * If the `bundle` command is not available, run `gem install bundler` to install it. . {uri-branch-help}[Create a topic branch] (preferably using the pattern `issue-XYZ`, where `XYZ` is the issue number). . Add tests for your unimplemented feature or bug fix. (See <>) . Run `bundle exec rake` to run the tests. If your tests pass, return to step 4. . Implement your feature or bug fix. . Run `bundle exec rake` to run the tests. If your tests fail, return to step 6. . Add documentation for your feature or bug fix. . If your changes are not 100% documented, go back to step 8. . Add, commit, and push your changes. . {uri-pr-help}[Submit a pull request]. For ideas about how to use pull requests, see the post http://blog.quickpeople.co.uk/2013/07/10/useful-github-patterns[Useful GitHub Patterns]. === Background Knowledge As Asciidoctor is built using Ruby some basic knowledge of Ruby, RubyGems and Minitest is beneficial. The following resources provide a good starting point for contributors who may not be completely comfortable with these tools: * https://www.ruby-lang.org/en/documentation/quickstart/[Ruby in 20 minutes] * https://www.ruby-lang.org/en/documentation/ruby-from-other-languages/[Ruby from other languages] * http://guides.rubygems.org/rubygems-basics/[RubyGems basics] * http://guides.rubygems.org/what-is-a-gem/[What is a Gem?] * http://blog.teamtreehouse.com/short-introduction-minitest[How to use Minitest] * http://www.rubyinside.com/a-minitestspec-tutorial-elegant-spec-style-testing-that-comes-with-ruby-5354.html[Minitest spec tutorial] * https://github.com/seattlerb/minitest/blob/master/README.rdoc[Minitest Project Documentation] While these resources don't cover everything needed they serve as a good starting off point for beginners. === Writing and Executing Tests Tests live inside the test directory and are named _test.rb. For instance, tests for the different types of blocks can be found in the file test/blocks_test.rb. Within a test file, individual test cases are organized inside of contexts. A context is type of logical container that groups related tests together. Each test case follows the same structure: [source] ---- test 'description of test' do # test logic end ---- At the moment, the tests are quite primitive. Here's how a typical test operates: . Defines sample AsciiDoc source . Renders the document to HTML or DocBook . Uses XPath and CSS expressions to verify expected output Here's how we might test the open block syntax: [source] ---- test 'should render content bounded by two consecutive hyphens as an open block' do input = <<-EOS -- This is an open block. -- EOS result = render_embedded_string input assert_css '.openblock', result, 1 assert_css '.openblock p', result, 1 assert_xpath '/div[@class="openblock"]//p[text()="This is an open block."]', result, 1 end ---- As you can see, several helpers are used to facilitate the test scenario. The `render_embedded_string` invokes Asciidoctor's render method with the header and footer option disabled. This method is ideal for unit-level tests. If you need to test the whole document, use `render_string` instead. The `assert_css` and `assert_xpath` assertion methods take a CSS or XPath selector, respectively, the rendered result and the number of expected matches. You can also use built-in assertions in Ruby's test library. To run all the tests, simply execute `rake`: $ rake NOTE: The tests should only take a few seconds to run using Ruby 2.1. If you want to run a single test file, you can use `ruby`: $ ruby test/blocks_test.rb To test a single test case, first add the string "wip" to the beginning of the description. For example: [source] ---- test 'wip should render ...' do ... end ---- Then, run `ruby` again, but this time pass a selector argument so it finds matching tests: $ ruby test/blocks_test.rb -n /wip/ You can also turn on verbose mode if you want to see more output: $ ruby test/blocks_test.rb -n /wip/ -v Once you are done with your test, make sure to remove `wip` from the description and run all the tests again using `rake`. We plan on switching to a more elegant testing framework in the future, such as RSpec or Cucumber, in order to make the tests more clear and robust. === Running Asciidoctor in Development Mode Asciidoctor is designed so that you can run the script directly out of the cloned repository. Simply execute the `asciidoctor` command directly (referencing it either by relative or absolute path). There's no need to install it using the `gem` command first. For example, to convert the README file, switch to the root of the project and run: $ ./bin/asciidoctor README.adoc IMPORTANT: You'll need to make sure you reference the correct relative path to the `asciidoctor` command. If you want to be able to execute the `asciidoctor` command from any directory without worrying about the relative (or absolute) path, you can setup the following Bash alias: alias asciidoctor-dev="/path/to/asciidoctor/bin/asciidoctor" Now you can execute the `asciidoctor` command from any folder as follows: $ asciidoctor-dev README.adoc == Supporting Additional Ruby Versions If you would like this library to support another Ruby version, you may volunteer to be a maintainer. Being a maintainer entails making sure all tests run and pass on that implementation. When something breaks on your implementation, you will be expected to provide patches in a timely fashion. If critical issues for a particular implementation exist at the time of a major release, support for that Ruby version may be dropped. asciidoctor-1.5.5/Gemfile000066400000000000000000000011111277513741400153250ustar00rootroot00000000000000source 'https://rubygems.org' # Look in asciidoctor.gemspec for runtime and development dependencies gemspec # enable this group to use Guard for continuous testing # after removing comments, run `bundle install` then `guard` #group :guardtest do # gem 'guard' # gem 'guard-test' # gem 'libnotify' # gem 'listen', :github => 'guard/listen' #end group :ci do gem 'simplecov', '~> 0.9.1' if ENV['SHIPPABLE'] gem 'simplecov-csv', '~> 0.1.3' gem 'ci_reporter', '~> 2.0.0' gem 'ci_reporter_minitest', '~> 1.0.0' #gem 'ci_reporter_cucumber', '~> 1.0.0' end end asciidoctor-1.5.5/Guardfile000066400000000000000000000006461277513741400156730ustar00rootroot00000000000000# use `guard start -n f` to disable notifications # or set the environment variable GUARD_NOTIFY=false notification :libnotify, :display_message => true, :timeout => 5, # in seconds :append => false, :transient => true, :urgency => :critical guard :test do watch(%r{^lib/(.+)\.rb$}) do |m| "test/#{m[1]}_test.rb" end watch(%r{^test.+_test\.rb$}) watch('test/test_helper.rb') do "test" end end asciidoctor-1.5.5/LICENSE.adoc000066400000000000000000000021471277513741400157560ustar00rootroot00000000000000.The MIT License .... Copyright (C) 2012-2016 Dan Allen, Ryan Waldron and the Asciidoctor Project Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. .... asciidoctor-1.5.5/README-fr.adoc000066400000000000000000000476751277513741400162550ustar00rootroot00000000000000= Asciidoctor Dan Allen ; Sarah White ; Ryan Waldron // FIXME use build system to expand includes statically so document renders properly on GitHub ifeval::[{safe-mode-level} < 20] include::_settings-README.adoc[] endif::[] ifeval::[{safe-mode-level} >= 20] // settings: :page-layout: base :idprefix: :idseparator: - :source-language: ruby :language: {source-language} ifdef::env-github[:status:] // URIs: :uri-org: https://github.com/asciidoctor :uri-repo: {uri-org}/asciidoctor :uri-asciidoctorj: {uri-org}/asciidoctorj :uri-asciidoctorjs: {uri-org}/asciidoctor.js :uri-project: http://asciidoctor.org ifdef::env-site[:uri-project: link:] :uri-docs: {uri-project}/docs :uri-news: {uri-project}/news :uri-manpage: {uri-project}/man/asciidoctor :uri-issues: {uri-repo}/issues :uri-contributors: {uri-repo}/graphs/contributors :uri-rel-file-base: link: :uri-rel-tree-base: link: ifdef::env-site[] :uri-rel-file-base: {uri-repo}/blob/master/ :uri-rel-tree-base: {uri-repo}/tree/master/ endif::[] :uri-changelog: {uri-rel-file-base}CHANGELOG.adoc :uri-contribute: {uri-rel-file-base}CONTRIBUTING.adoc :uri-license: {uri-rel-file-base}LICENSE.adoc :uri-tests: {uri-rel-tree-base}test :uri-discuss: http://discuss.asciidoctor.org :uri-irc: irc://irc.freenode.org/#asciidoctor :uri-rubygem: https://rubygems.org/gems/asciidoctor :uri-what-is-asciidoc: {uri-docs}/what-is-asciidoc :uri-user-manual: {uri-docs}/user-manual :uri-install-docker: https://github.com/asciidoctor/docker-asciidoctor //:uri-install-doc: {uri-docs}/install-toolchain :uri-install-osx-doc: {uri-docs}/install-asciidoctor-macosx :uri-render-doc: {uri-docs}/render-documents :uri-themes-doc: {uri-docs}/produce-custom-themes-using-asciidoctor-stylesheet-factory :uri-gitscm-repo: https://github.com/git/git-scm.com :uri-prototype: {uri-gitscm-repo}/commits/master/lib/asciidoc.rb :uri-freesoftware: https://www.gnu.org/philosophy/free-sw.html :uri-foundation: http://foundation.zurb.com :uri-tilt: https://github.com/rtomayko/tilt :uri-ruby: https://ruby-lang.org // images: :image-uri-screenshot: https://raw.githubusercontent.com/asciidoctor/asciidoctor/master/screenshot.png endif::[] {uri-project}/[Asciidoctor] est un processeur de texte _rapide_ et une chaîne de publication pour convertir du contenu {uri-what-is-asciidoc}[AsciiDoc] en HTML5, DocBook 5 (ou 4.5) et d'autres formats. Asciidoctor est écrit en Ruby, packagé sous forme de RubyGem et publié sur {uri-rubygem}[RubyGems.org]. La gemme est aussi incluse dans plusieurs distributions Linux, dont Fedora, Debian et Ubuntu. Asciidoctor est open source, {uri-repo}[hébergé sur GitHub] et distribué sous {uri-license}[licence MIT]. ifndef::env-site[] .Ce document est traduit dans les langues suivantes : * {uri-rel-file-base}README.adoc[Anglais] * {uri-rel-file-base}README-zh_CN.adoc[Chinois] * {uri-rel-file-base}README-jp.adoc[Japonais] endif::[] .Documentation clé [.compact] * {uri-docs}/what-is-asciidoc[Qu'est ce qu'AsciiDoc ?] * {uri-docs}/asciidoc-writers-guide[Guide pour Rédacteur AsciiDoc] * {uri-docs}/asciidoc-syntax-quick-reference[Syntaxe de Référence AsciiDoc] * {uri-docs}/user-manual[Manuel Utilisateur Asciidoctor] .Asciidoctor est disponible partout où Ruby est disponible **** Vous pouvez exécuter Asciidoctor dans la JVM en utilisant JRuby. Pour invoquer l'API Asciidoctor directement depuis Java ou d'autres langages de la JVM, utilisez {uri-asciidoctorj}[AsciidoctorJ]. Des plugins basés sur AsciidoctorJ permettent d'intégrer le processeur Asciidoctor avec Apache Maven, Gradle ou Javadoc. Asciidoctor s'exécute également au sein de JavaScript. Nous utilisons http://opalrb.org[Opal] pour transcrire le code source Ruby en JavaScript afin de produire {uri-asciidoctorjs}[Asciidoctor.js], une version pleinement fonctionnelle d'Asciidoctor qui s'intègre dans tout environnement JavaScript, comme un navigateur web ou Node.js. Asciidoctor.js est utilisé pour faire fonctionner les extensions AsciiDoc Preview pour Chrome, Atom, Brackets et autres outils web. **** ifdef::status[] .*Santé du projet* image:https://img.shields.io/travis/asciidoctor/asciidoctor/master.svg[Build Status (Travis CI), link=https://travis-ci.org/asciidoctor/asciidoctor] image:https://ci.appveyor.com/api/projects/status/ifplu67oxvgn6ceq/branch/master?svg=true&passingText=green%20bar&failingText=%23fail&pendingText=building%2E%2E%2E[Build Status (AppVeyor), link=https://ci.appveyor.com/project/asciidoctor/asciidoctor] //image:https://img.shields.io/coveralls/asciidoctor/asciidoctor/master.svg[Coverage Status, link=https://coveralls.io/r/asciidoctor/asciidoctor] image:https://codeclimate.com/github/asciidoctor/asciidoctor/badges/gpa.svg[Code Climate, link="https://codeclimate.com/github/asciidoctor/asciidoctor"] image:https://inch-ci.org/github/asciidoctor/asciidoctor.svg?branch=master[Inline docs, link="https://inch-ci.org/github/asciidoctor/asciidoctor"] endif::[] == En un mot Asciidoctor lit du contenu écrit en texte brut, comme présenté dans la partie gauche de l'image ci-dessous, et le convertit en HTML5, comme présenté dans la partie droite. Asciidoctor applique une feuille de style par défaut au document HTML5 afin de fournir une expérience de lecture agréable, clé en main. image::{image-uri-screenshot}[Prévisualisation d'une source AsciiDoc et le rendu HTML correspondant] == Le traitement d'AsciiDoc Asciidoctor lit et analyse la syntaxe du texte écrit en AsciiDoc afin de créer une représentation, sous forme d'arbre, à partir de laquelle des templates sont appliqués pour produire de l'HTML5, du DocBook 5 (ou 4.5). Vous avez la possibilité d'écrire votre propre convertisseur ou de fournir des templates supportant {uri-tilt}[Tilt] pour personnaliser le résultat généré ou pour produire des formats alternatifs. NOTE: Asciidoctor est un remplaçant du processeur AsciiDoc original écrit en Python (`asciidoc.py`). La suite de tests Asciidoctor possède {uri-tests}[> 1,600 tests] pour garantir la compatibilité avec la syntaxe AsciiDoc. En plus de la syntaxe AsciiDoc standard, Asciidoctor reconnaît des balises additionnelles ainsi que des options de formatage, comme les polices d'icônes (par exemple `+icon:fire[]+`) et des éléments d'interface (par exemple `+button:[Enregistrer]+`). Asciidoctor offre aussi un thème moderne et « responsive » basé sur {uri-foundation}[Foundation] pour styliser le document HTML5 généré. == Prérequis Asciidoctor fonctionne sur Linux, OS X (Mac), Windows et requiert une des implémentations suivantes : * MRI (Ruby 1.8.7, 1.9.3, 2.0, 2.1, 2.2 & 2.3) * JRuby (1.7 dans les modes Ruby 1.8 et 1.9, 9000) * Rubinius 2.2.x * Opal (JavaScript) Votre aide est appréciée pour tester Asciidoctor sur l'une de ces plateformes. Référez-vous au paragraphe <> si vous souhaitez vous impliquer dans ce projet. [CAUTION] ==== Si vous utilisez un environnement Windows dans une autre langue que l'anglais, vous pourriez tomber sur l'erreur `Encoding::UndefinedConversionError` lors du lancement d'Asciidoctor. Pour corriger ce problème, nous recommandons de changer la page de code en UTF-8 dans votre console : chcp 65001 Après ce changement, tous les maux de tête liés à l'Unicode seront derrière vous. Si vous utilisez un environnement de développement comme Eclipse, assurez-vous de définir l'encodage en UTF-8. Asciidoctor fonctionne mieux lorsque vous utilisez UTF-8 partout. ==== == Installation Asciidoctor peut être installé en utilisant la commande (a) `gem install`, (b) Bundler ou (c) les gestionnaires de paquets pour les distributions Linux populaires. TIP: L'avantage d'utiliser le gestionnaire de paquets pour installer la gemme est que l'installation englobe celle des librairies Ruby et RubyGems si elles ne sont pas déjà installés. L'inconvénient est que le paquet n'est pas forcément mis à jour immédiatement après la mise à disposition de la gemme. Si vous avez besoin de la dernière version, vous devez passer par la commande `gem`. === (a) Installation de la gemme Ouvrir un terminal et taper (en excluant le `$`) : $ gem install asciidoctor Si vous souhaitez installer une version pre-release (c'est-à-dire, une « release candidate »), utilisez : $ gem install asciidoctor --pre .Mettre à jour votre installation [TIP] ==== Si vous avez une précédente version d'Asciidoctor installée, vous pouvez la mettre à jour en utilisant : $ gem update asciidoctor Si vous installez une nouvelle version de la gemme en utilisant `gem install` au lieu de `gem update`, vous aurez plusieurs versions d'installées. Si c'est le cas, utilisez la commande gem suivante pour supprimer la vieille version : $ gem cleanup asciidoctor ==== === (b) Bundler . Créez un fichier Gemfile à la racine de votre projet (ou du répertoire courant) . Ajoutez la gemme `asciidoctor` dans votre fichier Gemfile comme ci-dessous : + [source] ---- source 'https://rubygems.org' gem 'asciidoctor' # ou spécifier la version explicitement # gem 'asciidoctor', '1.5.4' ---- . Sauvegardez le fichier Gemfile . Ouvrez un terminal et installez la gemme en utilisant : $ bundle Pour mettre à jour la gemme, spécifiez la nouvelle version dans le fichier Gemfile et exécutez `bundle` à nouveau. Utiliser `bundle update` *n*'est *pas* recommandé car les autres gemmes seront également mises à jour, ce qui n'est pas forcément le résultat voulu. === (c) Gestionnaire de paquets Linux ==== DNF (Fedora 21 ou supérieure) Pour installer la gemme sur Fedora 21 ou supérieure en utilisant dnf, ouvrez un terminal et tapez : $ sudo dnf install -y asciidoctor Pour mettre à jour la gemme, utilisez : $ sudo dnf update -y asciidoctor TIP: Votre système peut être configuré pour mettre à jour automatiquement les paquets rpm, auquel cas aucune action de votre part ne sera nécessaire pour mettre à jour la gemme. ==== apt-get (Debian, Ubuntu, Mint) Pour installer la gemme sur Debian, Ubuntu ou Mint, ouvrez un terminal et tapez : $ sudo apt-get install -y asciidoctor Pour mettre à jour la gemme, utilisez : $ sudo apt-get upgrade -y asciidoctor TIP: Votre système peut être configuré pour mettre à jour automatiquement les paquets deb, auquel cas aucune action de votre part ne sera nécessaire pour mettre à jour la gemme. La version d'Asciidoctor installé par le gestionnaire de paquets (apt-get) peut ne pas correspondre à la dernière version d'Asciidoctor. Consultez le dépôt de paquets de votre distribution pour trouver quelle version est disponible par version de distribution. * https://packages.debian.org/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[Paquet asciidoctor par version de Debian] * http://packages.ubuntu.com/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[Paquet asciidoctor par version d'Ubuntu] * https://community.linuxmint.com/software/view/asciidoctor[Paquet asciidoctor par version de Mint] [CAUTION] ==== Il est déconseillé d'utiliser la commande `gem update` pour mettre à jour la gemme gérée par le gestionnaire de paquets. Le faire mettrait la système dans un état incohérent car le gestionnaire de paquets ne pourrait plus gérer les fichiers (qui sont installés dans /usr/local). En résumé, les gemmes du système doivent être gérées seulement par le gestionnaire de paquets. Si vous souhaitez utiliser une version d'Asciidoctor qui est plus récente que celle installée par votre gestionnaire de paquets, vous devriez utiliser http://rvm.io[RVM] pour installer Ruby dans votre répertoire personnel (dans votre espace utilisateur). Vous pouvez alors utiliser la commande `gem` pour installer ou mettre à jour la gemme Asciidoctor. En utilisant RVM, les gemmes sont installées dans un emplacement isolé du système. ==== ==== apk (Alpine Linux) Pour installer la gemme sur Alpine Linux, ouvrez un terminal et tapez : $ sudo apk add asciidoctor Pour mettre à jour la gemme, utilisez : $ sudo apk add -u asciidoctor TIP: Votre système peut être configuré pour mettre à jour automatiquement les paquets apk, auquel cas aucune action de votre part ne sera nécessaire pour mettre à jour la gemme. === Autres options d'installation * {uri-install-docker}[Installation d'Asciidoctor avec Docker] * {uri-install-osx-doc}[Installation d'Asciidoctor sur Mac OS X] // pour l'instant, l'entrée suivante est juste une répétition de l'information dans ce README //* {uri-install-doc}[Installation de l'outillage Asciidoctor] == Utilisation Si la gemme Asciidoctor s'est installée correctement, la ligne de commande (CLI) `asciidoctor` sera disponible dans votre PATH. Pour vérifier sa disponibilité, exécutez la commande suivante dans votre terminal : $ asciidoctor --version Vous devriez voir les informations concernant la version d'Asciidoctor et celle de votre environnement Ruby s'afficher dans le terminal. [.output] .... Asciidoctor 1.5.4 [http://asciidoctor.org] Runtime Environment (ruby 2.2.2p95 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) .... Asciidoctor fournit aussi une API. Cette API permet une intégration avec d'autres logiciels Ruby, comme Rails, Sinatra et GitHub, ainsi que d'autres langages comme Java (via {uri-asciidoctorj}[AsciidoctorJ]) ou JavaScript (via {uri-asciidoctorjs}[Asciidoctor.js]). === Interface de Ligne de Commande (CLI) La commande `asciidoctor` vous permet d'invoquer Asciidoctor à partir de la ligne de commande (c'est-à-dire, un terminal). La commande suivante convertit le fichier README.adoc en HTML et sauvegarde le résultat dans le fichier README.html dans le même répertoire. Le nom du fichier HTML généré est tiré de celui du fichier source, l'extension a été changée pour `.html`. $ asciidoctor README.adoc Vous pouvez contrôler le processeur Asciidoctor en ajoutant plusieurs paramètres, vous pouvez en apprendre plus sur ces derniers en utilisant la commande : $ asciidoctor --help Par exemple, pour écrire le fichier dans un répertoire différent, utilisez : $ asciidoctor -D output README.adoc La {uri-manpage}[page man] `asciidoctor` fournit une référence complète sur l'interface de ligne de commande. Référez-vous aux ressources suivantes pour en apprendre davantage sur la façon d'utiliser la commande `asciidoctor`. * {uri-render-doc}[Comment convertir un document ?] * {uri-themes-doc}[Comment utiliser la fabrique de feuilles de style Asciidoctor pour produire des thèmes personnalisés ?] === API Ruby Pour utiliser Asciidoctor dans votre application, vous avez tout d'abord besoin de faire un « require » sur la gemme : [source] require 'asciidoctor' Vous pouvez ensuite convertir un fichier AsciiDoc en fichier HTML en utilisant : [source] Asciidoctor.convert_file 'README.adoc', to_file: true, safe: :safe WARNING: Quand vous utilisez Asciidoctor via l'API, le mode de sûreté par défaut est `:secure`. Dans le mode « secure », plusieurs fonctionnalités centrales sont désactivées, comme la directive `include`. Si vous souhaitez activer ces fonctionnalités, vous aurez besoin de définir explicitement le mode de sûreté avec une la valeur `:server` (recommandée) ou `:safe`. Vous pouvez aussi convertir une chaîne de texte en fragment HTML (pour une insertion dans une page HTML) en utilisant : [source] ---- content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' Asciidoctor.convert content, safe: :safe ---- Si vous voulez le document HTML complet, activez l'option `header_footer` comme ci-dessous : [source] ---- content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' html = Asciidoctor.convert content, header_footer: true, safe: :safe ---- Si vous avez besoin d'accéder au document analysé, vous pouvez séparer la conversion en deux étapes distinctes : [source] ---- content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' document = Asciidoctor.load content, header_footer: true, safe: :safe puts document.doctitle html = document.convert ---- Gardez en tête que si vous n'aimez pas le contenu généré par Asciidoctor, _vous pouvez le changer !_ Asciidoctor supporte des convertisseurs personnalisés qui peuvent prendre en charge la conversion depuis le document analysé jusqu'au contenu généré. Une façon simple de personnaliser les morceaux de contenu générés est d'utiliser le convertisseur de template. Le convertisseur de template vous permet, en utilisant un template supporté par {uri-tilt}[Tilt], de prendre en charge la conversion de n'importe quel élément dans le document. Vous l'aurez compris, vous _pouvez_ complètement prendre le contrôle sur le contenu généré. Pour plus d'informations sur comment utiliser l'API ou personnaliser le contenu généré, référez-vous au {uri-user-manual}[manuel utilisateur]. == Contributions Dans l'esprit du {uri-freesoftware}[logiciel libre], _tout le monde_ est encouragé à aider en vue d'améliorer le projet. Si vous découvrez des erreurs ou des oublis dans le code source, la documentation, ou le contenu du site web, s'il vous plaît n'hésitez pas à ouvrir un ticket ou une « pull request » avec un correctif. Les contributeurs et contributrices sont toujours les bienvenus ! Voici quelques façons de contribuer : * en utilisant les versions prerelease (alpha, beta ou preview), * en rapportant des anomalies, * en suggérant de nouvelles fonctionnalités, * en écrivant ou éditant la documentation, * en écrivant des spécifications, * en écrivant du code -- _Aucun patch n'est trop petit_ ** corriger une coquille, ** ajouter des commentaires, ** nettoyer des espaces inutiles, ** écrire des tests ! * en refactorant le code, * en corrigeant des {uri-issues}[anomalies], * en effectuant des relectures des patches. Le guide du {uri-contribute}[parfait Contributeur] fournit des informations sur comment créer, styliser et soumettre des tickets, des demandes de fonctionnalités, du code et de la documentation pour le projet Asciidoctor. == Être aidé Le projet Asciidoctor est développé pour vous aider à écrire et publier du contenu. Mais nous ne pouvons pas le faire sans avoir vos avis ! Nous vous encourageons à poser vos questions et discuter de n'importe quels aspects du projet sur la liste de discussion, Twitter ou dans le salon de discussion. Mailing list:: {uri-discuss} Twitter (Chat):: hashtag #asciidoctor ou la mention @asciidoctor Gitter (Chat):: image:https://badges.gitter.im/Join%20In.svg[Gitter, link=https://gitter.im/asciidoctor/asciidoctor] //// IRC (Chat):: {uri-irc}[#asciidoctor] sur FreeNode IRC //// ifdef::env-github[] De plus amples informations et documentations sur Asciidoctor peuvent être trouvées sur le site web du projet. {uri-project}/[Home] | {uri-news}[News] | {uri-docs}[Docs] endif::[] L'organisation Asciidoctor sur GitHub héberge le code source du projet, le gestionnaire de tickets ainsi que des sous-projets. Dépôt des sources (git):: {uri-repo} Gestionnaire de tickets:: {uri-issues} L'organisation Asciidoctor sur GitHub:: {uri-org} == Copyright et licence Copyright (C) 2012-2016 Dan Allen, Ryan Waldron et le projet Asciidoctor. Une utilisation libre de ce logiciel est autorisée sous les termes de la licence MIT. Consultez le fichier {uri-license}[LICENSE] pour plus de détails. == Auteurs *Asciidoctor* est mené par https://github.com/mojavelinux[Dan Allen] et https://github.com/graphitefriction[Sarah White] et reçoit de nombreuses contributions de la part de la {uri-contributors}[géniale communauté] Asciidoctor. Le projet a été initié en 2012 par https://github.com/erebor[Ryan Waldron] et est basé sur {uri-prototype}[un prototype] écrit par https://github.com/nickh[Nick Hengeveld]. *AsciiDoc* a été démarré par Stuart Rackham et a reçu de nombreuses contributions de la part de la communauté AsciiDoc. == Changelog ifeval::[{safe-mode-level} < 20] include::CHANGELOG.adoc[tags=compact;parse,leveloffset=+1] endif::[] Référez-vous au fichier {uri-changelog}[CHANGELOG] pour une liste complète des changements des versions précédentes. asciidoctor-1.5.5/README-jp.adoc000066400000000000000000000502761277513741400162460ustar00rootroot00000000000000= Asciidoctor Dan Allen ; Sarah White ; Ryan Waldron // settings: :page-layout: base :idprefix: :idseparator: - :source-language: ruby :language: {source-language} ifdef::env-github[:status:] // URIs: :uri-org: https://github.com/asciidoctor :uri-repo: {uri-org}/asciidoctor :uri-asciidoctorj: {uri-org}/asciidoctorj :uri-asciidoctorjs: {uri-org}/asciidoctor.js :uri-project: http://asciidoctor.org ifdef::env-site[:uri-project: link:] :uri-docs: {uri-project}/docs :uri-news: {uri-project}/news :uri-manpage: {uri-project}/man/asciidoctor :uri-issues: {uri-repo}/issues :uri-contributors: {uri-repo}/graphs/contributors :uri-rel-file-base: link: :uri-rel-tree-base: link: ifdef::env-site[] :uri-rel-file-base: {uri-repo}/blob/master/ :uri-rel-tree-base: {uri-repo}/tree/master/ endif::[] :uri-changelog: {uri-rel-file-base}CHANGELOG.adoc :uri-contribute: {uri-rel-file-base}CONTRIBUTING.adoc :uri-license: {uri-rel-file-base}LICENSE.adoc :uri-tests: {uri-rel-tree-base}test :uri-discuss: http://discuss.asciidoctor.org :uri-irc: irc://irc.freenode.org/#asciidoctor :uri-rubygem: https://rubygems.org/gems/asciidoctor :uri-what-is-asciidoc: {uri-docs}/what-is-asciidoc :uri-user-manual: {uri-docs}/user-manual :uri-install-docker: https://github.com/asciidoctor/docker-asciidoctor //:uri-install-doc: {uri-docs}/install-toolchain :uri-install-osx-doc: {uri-docs}/install-asciidoctor-macosx :uri-render-doc: {uri-docs}/render-documents :uri-themes-doc: {uri-docs}/produce-custom-themes-using-asciidoctor-stylesheet-factory :uri-gitscm-repo: https://github.com/git/git-scm.com :uri-prototype: {uri-gitscm-repo}/commits/master/lib/asciidoc.rb :uri-freesoftware: https://www.gnu.org/philosophy/free-sw.html :uri-foundation: http://foundation.zurb.com :uri-tilt: https://github.com/rtomayko/tilt :uri-ruby: https://ruby-lang.org // images: :image-uri-screenshot: https://raw.githubusercontent.com/asciidoctor/asciidoctor/master/screenshot.png {uri-project}/[Asciidoctor]は _高速な_ テキストプロセッサで {uri-what-is-asciidoc}[Asciidoc] をHTML5, DocBook 5(4.5)や他のフォーマットに変換するツールチェインを配布しています. AsciidoctorはRubyで書かれており, RubyGemとしてパッケージされ, {uri-rubygem}[RubyGems.org] で配布されています. gemはいくつかのLinuxディストリビューション, Fedora, Debian, Ubuntuにも含まれています. Asciidoctorはオープンソース {uri-repo}[hosted on Github] で {uri-license}[the MIT licence]のもとに配布されます. ifndef::env-site[] .Translations of the document are available in the following languages: * {uri-rel-file-base}README-zh_CN.adoc[汉语] * {uri-rel-file-base}README.adoc[English] * {uri-rel-file-base}README-fr.adoc[Français] endif::[] .Key documentation [.compact] * {uri-docs}/what-is-asciidoc[What is Asciidoc?] * {uri-docs}/asciidoc-writers-guide[AsciiDoc Writer's Guide] * {uri-docs}/asciidoc-syntax-quick-reference[AsciiDoc Syntax Reference] * {uri-docs}/user-manual[Asciidoctor User Manual] .Rubyの行く先, Asciidoctorの追うところ **** AsciidoctorはJRubyを用いてJVM上でも実行できます. Javaや他のJVM言語からAsciidoctor APIを直接呼び出すには, {uri-asciidoctorj}[AsciidoctorJ] を使ってください. AsciidoctorJに基づいた, AsciidoctorプロセッサをApache Maven, GradleやJavadocに統合するプラグインがあります. AsciidoctorはJavaScriptでも実行可能です. {uri-asciidoctorjs}[Asciidoctor.js], WebブラウザやNode.jsのようなJavaScript環境で動くAsciidoctorの完全機能版, を生成するために, RubyのソースをJavaScriptにトランスパイルするのに http://opalrb.org[Opal]を使います. Asciidoctor.jsはChrome, Atom, Brackets や他のウェブベースのツールの拡張機能としてAsciiDocのプレビューのために使われます. **** ifdef::status[] .*Project health* image:https://img.shields.io/travis/asciidoctor/asciidoctor/master.svg[Build Status (Travis CI), link=https://travis-ci.org/asciidoctor/asciidoctor] image:https://ci.appveyor.com/api/projects/status/ifplu67oxvgn6ceq/branch/master?svg=true&passingText=green%20bar&failingText=%23fail&pendingText=building%2E%2E%2E[Build Status (AppVeyor), link=https://ci.appveyor.com/project/asciidoctor/asciidoctor] //image:https://img.shields.io/coveralls/asciidoctor/asciidoctor/master.svg[Coverage Status, link=https://coveralls.io/r/asciidoctor/asciidoctor] image:https://codeclimate.com/github/asciidoctor/asciidoctor/badges/gpa.svg[Code Climate, link="https://codeclimate.com/github/asciidoctor/asciidoctor"] image:https://inch-ci.org/github/asciidoctor/asciidoctor.svg?branch=master[Inline docs, link="https://inch-ci.org/github/asciidoctor/asciidoctor"] endif::[] == The Big Picture Asciidoctorは下図の左側のパネルに示されるように, 平文で書かれた内容を読み, 右のパネルに描かれるようにHTML5に変換します. Asciidoctorは枠にとらわれない快適なエクスペリエンスのためにデフォルトスタイルシートをHTML5時メントに適用します. image::{image-uri-screenshot}[Preview of AsciiDoc source and corresponding rendered HTML] == AsciiDoc Processing AsciidoctorはAsciiDoc文法で書かれたテキストを読み込み解釈し, それからHTML5, DocBook5(4.5)やman(ual)を出力するために内蔵コンバータセットにパースツリーを渡します. 生成された出力をカスタマイズ, あるいは追加のフォーマットをつくるためにあなた自身のコンバータを使うことや {uri-tilt}[Tilt]-supported テンプレートを読み込むオプションがあります. NOTE: AsciidoctorはオリジナルのAsciiDoc Pythonプロセッサ(`asciidoc.py`)の完全互換です. Asciidoctorテストスイートは {uri-tests}[> 1,600 tests] をAsciiDoc文法との互換性を保証するために有しています. クラシックなAsciiDoc文法に加えて, Asciidoctorは追加のマークアップとフォントベースのicons(例えば, `+icon:fire[]+`)などのフォーマッティングオプションとUIエレメント(`+button:[Save]+`)を 受け付けます. AsciidoctorはHTML5出力をスタイルするため, モダンで, {uri-foundation}[Foundation] に基づいたレスポンシブテーマをも提供します. == Requirements AsciidoctorはLinux, OS X (Mac)とWindowsで動き, 下記の {uri-ruby}[Ruby]実装の一つを必要とします. * MRI (Ruby 1.8.7, 1.9.3, 2.0, 2.1, 2.2 & 2.3) * JRuby (1.7 in Ruby 1.8 and 1.9 modes, 9000) * Rubinius 2.2.x * Opal (JavaScript) [CAUTION] ==== もし非英語環境のWindowsを使っているなら, Asciidoctorを起動した時に`Encoding::UndefinedConversionError`に遭遇するでしょう. これを解決するには使っているコンソールの有効なコードページをUTF-8: chcp 65001 に変更することを推奨します. 一度この変更をすると, Unicode関連の頭痛の種は消えるでしょう. もしEclipseのようなIDEを使っているなら, 同様にエンコーディングをUTF-8にするのを忘れないでください. AsciidoctorはUTF-8が使われているところで最高の働きを見せます. ==== == Installation Asciidoctorは (a) `gem install` コマンド, (b) Bundler あるいは (c) 有名Linuxディストリビューションのパッケージマネージャ を用いてインストールされます. TIP: Linuxパッケージマネージャを用いてインストールすることの利点は, もしRubyやRubyGemsライブラリがまだインストールされていなかったら, それらを処理してくれることです. 欠点はgemのリリース直後にはすぐには有効にならないことです. もし最新バージョンを使いたければ, 必ず `gem` コマンドを使いましょう. === (a) gem install ターミナルを開, 入力しましょう (先頭の`$`は除く): $ gem install asciidoctor もし, 先行リリースバージョン(例えばリリース候補版)をインストールしたければ $ gem install asciidoctor --pre .アップグレード [TIP] ==== もしAsciidoctorの以前のバージョンあインストール済みであれば, 以下によってアップデートできます: $ gem update asciidoctor もし gem update の代わりに `gem install` を使ってgemを新バージョンにした場合, 複数バージョンばインストールされるでしょう. そのときは, 以下のgemコマンドで古いバージョンを削除しましょう: $ gem cleanup asciidoctor ==== === (b) Bundler . プロジェクトフォルダーのルート(かカレントディレクトリ)にGemfileを作成 . `asciidoctor` gemをGemfileに以下のように追加: + [source] ---- source 'https://rubygems.org' gem 'asciidoctor' # or specify the version explicitly # gem 'asciidoctor', '1.5.4' ---- . Gemfileを保存 . ターミナルを開き, gemをインストール: $ bundle gemをアップグレードするには, Gemfileで新バージョンを指定し, `bundle` を再び実行してください. `bundle update` は他のgemもアップデートするため推奨されて *いない* ので, 思わぬ結果になるかも知れません. === (c) Linux package managers ==== DNF (Fedora 21 or greater) dnfを使いFedora21かそれ以上にインストールするには, ターミナルを開き, 以下を入力してください: $ sudo dnf install -y asciidoctor gemをアップグレードするには: $ sudo dnf update -y asciidoctor TIP: お使いのシステムは自動的にrpmパッケージをアップデートするよう設定されているかも知れません.その場合, gemのアップデートのためにあなたがすべきことはありません. ==== apt-get (Debian, Ubuntu, Mint) Debian, UbuntuまたはMintにインストールするには, ターミナルを開き, 以下を入力してください: $ sudo apt-get install -y asciidoctor gemをアップグレードするには: $ sudo apt-get upgrade -y asciidoctor TIP: お使いのシステムは自動的にdebパッケージをアップデートするよう設定されているかも知れません.その場合, gemのアップデートのためにあなたがすべきことはありません. パッケージマネージャ(apt-get)によってインストールされたバージョンのAsciidoctorは最新リリースのAsciidoctorではないかもしれません. ディストリビューションのリリース毎に, どのバージョンがパッケージされているかはパッケージリポジトリを調べてください. * https://packages.debian.org/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[asciidoctor package by Debian release] * http://packages.ubuntu.com/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[asciidoctor package by Ubuntu release] * https://community.linuxmint.com/software/view/asciidoctor[asciidoctor package by Mint release] [CAUTION] ==== パッケージマネージャによって管理されているgemをアップデートするのに `gem udpate` コマンドを使うなといわれるでしょう. そのようなことをするのは, パッケージマネージャがファイル(/usr/local下にインストールされた)を追跡できなくなるためにシステムが不安定な状態にするためです. 単純に, システムgemはパッケージマネージャによってのみ管理されるべきです. もし, パッケージマネージャによってインストールされたのより新しいバージョンのAsciidoctorを使いたければ, http://rvm.io[RVM] や https://github.com/rbenv/rbenv[rbenv]を使ってRubyをホームディレクトリ(すなわち, ユーザースペース)にインストールするべきです. それから, 安心して `gem` コマンドをAsciidoctorのアップデート, インストールのために使うことができます. RVMやrbenvを使っているなら, gemはシステムからは孤立した場所にインストールされます. ==== ==== apk (Alpine Linux) Alpine Linuxにgemをインストールするには, ターミナルを開き, 以下を入力してください: $ sudo apk add asciidoctor gemをアップグレードするには: $ sudo apk add -u asciidoctor TIP: お使いのシステムは自動的にapkパッケージをアップデートするよう設定されているかも知れません.その場合, gemのアップデートのためにあなたがすべきことはありません. === Other installation options * {uri-install-docker}[Installing Asciidoctor using Docker] * {uri-install-osx-doc}[Installing Asciidoctor on Mac OS X] // at the moment, the following entry is just a reiteration of the information in this README //* {uri-install-doc}[Installing the Asciidoctor toolchain] == Usage Asciidoctorのインストールに成功すれば, `asciidoctor` コマンドラインインターフェース(CLI)がPATH中で有効になります. 確認のために, 以下をターミナルで実行しましょう: $ asciidoctor --version AsciidoctorのバージョンとRuby環境についての情報がターミナルに出力されたのを見ることができるはずです. [.output] .... Asciidoctor 1.5.4 [http://asciidoctor.org] Runtime Environment (ruby 2.2.2p95 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) .... AsciidoctorはAPIを提供します. APIは他のRubyソフトウェア, Rails, SinatraとGitHub, そして他の言語, Java (via {uri-asciidoctorj}[AsciidoctorJ] )とJavaScript (via {uri-asciidoctorjs}[Asciidoctor.js])との統合を意図しています. === Command line interface (CLI) `asciidoctorjs` コマンドはAsciidoctorをコマンドライン(つまりターミナル)から起動することを可能にします. 次のコマンドはファイルREADME.adocをHTMLに変換し, 結果を同じディレクトリのREADME.htmlに保存します. 生成されたHTMLファイルの名前はソースファイル依存し, その拡張子を `.html` に変えます. $ asciidoctor README.adoc Asciidoctorプロセッサに様々なフラグやスイッチを与えることで制御できます.それは以下を用いて調べることができます: $ asciidoctor --help 例えば, ファイルを異なるディレクトリに書き出すには: $ asciidoctor -D output README.adoc `asciidoctor` {uri-manpage}[man page] はコマンドライン・インタフェースの完全なリファレンスを提供します. `asciidoctor` コマンドの使い方についてもっと学ぶには以下を参照してください. * {uri-render-doc}[How do I convert a document?] * {uri-themes-doc}[How do I use the Asciidoctor stylesheet factory to produce custom themes?] === Ruby API Asciidoctorをアプリケーションの中で使うには, まずgemをrequireする必要があります: [source] require 'asciidoctor' それから, AsciiDocソースファイルをHTMLファイルに変換できます: [source] Asciidoctor.convert_file 'README.adoc', to_file: true, safe: :safe WARNING: AsciidoctorをAPI経由で使っている時, デフォルトのセーフモードは `:secure` です. セキュアモードでは, `include` ディレクティブを含むいくつかのコア機能は無効化されています. もしこれらの機能を有効化したい場合, 明示的にセーフモードを `:server` (推奨)か `:safe` にする必要があります. AsciiDoc文字列を埋め込みHTML(HTMLページヘの挿入)へ変換することもできます: [source] ---- content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' Asciidoctor.convert content, safe: :safe ---- もし完全なHTMLドキュメントを求めるのであれば, `header_footer` オプションを以下の通り有効にしてください: [source] ---- content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' html = Asciidoctor.convert content, header_footer: true, safe: :safe ---- パースされたドキュメントにアクセスしたいのなら, 変換を個々のステップに分割することが出来ます: [source] ---- content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' document = Asciidoctor.load content, header_footer: true, safe: :safe puts document.doctitle html = document.convert ---- Asciidoctorの生成する出力が気に入らないのであれば, _あなたはそれを変更できる_ ことを忘れないでください! Asciidoctorはパースされたドキュメントを生成された出力に変換する処理を扱うカスタムコンバーターをサポートしています. 断片的な出力をカスタマイズする簡単な方法の一つはテンプレートコンバーターを使うことです. テンプレートコンバーターによって, ドキュメント中のあらゆるノードの変換を扱うために {uri-tilt}[Tilt]-supportedテンプレートファイルを使うことができます. そのようにすれば, 出力を100%制御することが _できます_ . APIの使い方や出力のカスタマイズ方法についてのより詳しい情報は {uri-user-manual}[user manual] を参照してください. == Contributing {uri-freesoftware}[free software] の精神においては, _everyone_ がこのプロジェクトを改良するのをたすけることが勧められている. もしエラーや手抜かりをソースコード, ドキュメント, あるいはウェブサイトに見つけたのなら, 恥じることなく修正と共にpull requestの開設やissueの送信をしてください. New contributors are always welcome! *あなた* にもできることがあります: * 先行バージョン(alpha, beta or preview)の使用 * バグレポート * 新機能提案 * ドキュメントの執筆 * 仕様の執筆 * コーディング -- _パッチでも, 足りなすぎるなんてことはありません_ ** typoの修正 ** コメントの追加 ** 一貫性のないホワイトスペースの除去 ** テストの記述! * リファクタリング * {uri-issues}[issues] の修正 * パッチの批評 {uri-contribute}[Contributing] ガイドはどうやってスタイルをつくるか, issueを送るか, 機能リクエスト, コーディング, ドキュメンテーションをAsciidoctor Projectにするかの情報を提供しています. == Getting Help Asciidoctorプロジェクトはあなたが簡単に著作を書いて, 配布するのをたすけるため開発されています. しかしあなたのフィードバックなしにはできません! ディスカッションリストで, Twitterで, チャットルームで, 質問し, プロジェクトのあらゆる側面について話し合うようお勧めします. Discussion list (Nabble):: {uri-discuss} Twitter:: #asciidoctor hashtag or @asciidoctor mention Chat (Gitter):: image:https://badges.gitter.im/Join%20In.svg[Gitter, link=https://gitter.im/asciidoctor/asciidoctor] ifdef::env-github[] Further information and documentation about Asciidoctor can be found on the project's website. {uri-project}/[Home] | {uri-news}[News] | {uri-docs}[Docs] endif::[] GitHub上のAsciidoctorはプロジェクトのソースコード, イシュートラッカー, サブプロジェクトを管理しています. Source repository (git):: {uri-repo} Issue tracker:: {uri-issues} Asciidoctor organization on GitHub:: {uri-org} == Copyright and Licensing Copyright (C) 2012-2016 Dan Allen, Ryan Waldron and the Asciidoctor Project. Free use of this software is granted under the terms of the MIT License. See the {uri-license}[LICENSE] file for details. == Authors *Asciidoctor* is led by https://github.com/mojavelinux[Dan Allen] and https://github.com/graphitefriction[Sarah White] and has received contributions from {uri-contributors}[many other individuals] in Asciidoctor's awesome community. The project was initiated in 2012 by https://github.com/erebor[Ryan Waldron] and based on {uri-prototype}[a prototype] written by https://github.com/nickh[Nick Hengeveld]. *AsciiDoc* was started by Stuart Rackham and has received contributions from many other individuals in the AsciiDoc community. asciidoctor-1.5.5/README-zh_CN.adoc000066400000000000000000000411701277513741400166270ustar00rootroot00000000000000= Asciidoctor Dan Allen ; Sarah White ; Ryan Waldron // settings: :page-layout: base :idprefix: :idseparator: - :source-language: ruby :language: {source-language} ifdef::env-github[:status:] // URIs: :uri-org: https://github.com/asciidoctor :uri-repo: {uri-org}/asciidoctor :uri-asciidoctorj: {uri-org}/asciidoctorj :uri-asciidoctorjs: {uri-org}/asciidoctor.js :uri-project: http://asciidoctor.org ifdef::env-site[:uri-project: link:] :uri-docs: {uri-project}/docs :uri-news: {uri-project}/news :uri-manpage: {uri-project}/man/asciidoctor :uri-issues: {uri-repo}/issues :uri-contributors: {uri-repo}/graphs/contributors :uri-rel-file-base: link: :uri-rel-tree-base: link: ifdef::env-site[] :uri-rel-file-base: {uri-repo}/blob/master/ :uri-rel-tree-base: {uri-repo}/tree/master/ endif::[] :uri-changelog: {uri-rel-file-base}CHANGELOG.adoc :uri-contribute: {uri-rel-file-base}CONTRIBUTING.adoc :uri-license: {uri-rel-file-base}LICENSE.adoc :uri-tests: {uri-rel-tree-base}test :uri-discuss: http://discuss.asciidoctor.org :uri-irc: irc://irc.freenode.org/#asciidoctor :uri-rubygem: https://rubygems.org/gems/asciidoctor :uri-what-is-asciidoc: {uri-docs}/what-is-asciidoc :uri-user-manual: {uri-docs}/user-manual :uri-install-docker: https://github.com/asciidoctor/docker-asciidoctor //:uri-install-doc: {uri-docs}/install-toolchain :uri-install-osx-doc: {uri-docs}/install-asciidoctor-macosx :uri-render-doc: {uri-docs}/render-documents :uri-themes-doc: {uri-docs}/produce-custom-themes-using-asciidoctor-stylesheet-factory :uri-gitscm-repo: https://github.com/git/git-scm.com :uri-prototype: {uri-gitscm-repo}/commits/master/lib/asciidoc.rb :uri-freesoftware: https://www.gnu.org/philosophy/free-sw.html :uri-foundation: http://foundation.zurb.com :uri-tilt: https://github.com/rtomayko/tilt :uri-ruby: https://ruby-lang.org // images: :image-uri-screenshot: https://raw.githubusercontent.com/asciidoctor/asciidoctor/master/screenshot.png {uri-project}/[Asciidoctor] 是一个 _快速_ 文本处理器和发布工具链,它可以将 {uri-what-is-asciidoc}[AsciiDoc] 文档转化成 HTML5、 DocBook 5 (或 4.5) 以及其他格式。 Asciidoctor 由 Ruby 编写,打包成 RubyGem,然后发布到 {uri-rubygem}[RubyGems.org] 上。 这个 gem 还被包含道几个 Linux 发行版中,其中包括 Fedora、Debian 和 Ubuntu。 Asciidoctor 是开源的,{uri-repo}[代码托管在 GitHub],并且是以 {uri-license}[MIT 协议]授权。 .该文档有如下语言的翻译版: * {uri-rel-file-base}README.adoc[English] * {uri-rel-file-base}README-fr.adoc[Français] .关键文档 [.compact] * {uri-docs}/what-is-asciidoc[什么是 Asciidoctor?] * {uri-docs}/asciidoc-writers-guide[AsciiDoc 作家指南] * {uri-docs}/asciidoc-syntax-quick-reference[AsciiDoc 语法快速参考] * {uri-docs}/user-manual[Asciidoctor 用户手册] .Ruby 所至, Asciidoctor 相随 **** 使用 JRuby 可以让 Asciidoctor 运行在 Java 虚拟机上。 使用 {uri-asciidoctorj}[AsciidoctorJ] 就可以让 Java 或者其他 Java 虚拟机语言直接调用 Asciidoctor API。 基于 AsciidoctorJ 有好多好多插件可用,这些插件可以将 Asciidoctor 整合到 Apache Maven,Gradle 或 Javadoc 构建中。 Asciidoctor 也可以运行在 JavaScript 上。 我们可以使用 http://opalrb.org[Opal] 将 Ruby 源码编译成 JavaScript 并生成 {uri-asciidoctorjs}[Asciidoctor.js],这是一个全功能版的 Asciidoctor,可以运行在任意的 JavaScript 环境中,比如 Web 浏览器 或 Node.js。 Asciidoctor.js 被用于 AsciiDoc 预览,支持 Chrome 扩展,Atom,Brackets 或其他基于 Web 的工具。 **** ifdef::badges[] .*Project health* image:https://img.shields.io/travis/asciidoctor/asciidoctor/master.svg[Build Status (Travis CI), link=https://travis-ci.org/asciidoctor/asciidoctor] image:https://ci.appveyor.com/api/projects/status/ifplu67oxvgn6ceq/branch/master?svg=true&passingText=green%20bar&failingText=%23fail&pendingText=building%2E%2E%2E[Build Status (AppVeyor), link=https://ci.appveyor.com/project/asciidoctor/asciidoctor] //image:https://img.shields.io/coveralls/asciidoctor/asciidoctor/master.svg[Coverage Status, link=https://coveralls.io/r/asciidoctor/asciidoctor] image:https://codeclimate.com/github/asciidoctor/asciidoctor/badges/gpa.svg[Code Climate, link="https://codeclimate.com/github/asciidoctor/asciidoctor"] image:https://inch-ci.org/github/asciidoctor/asciidoctor.svg?branch=master[Inline docs, link="https://inch-ci.org/github/asciidoctor/asciidoctor"] endif::[] [#the-big-picture] == 全局概况 Asciidoctor 以纯文本格式读取内容,见下图左边的面板,将它转换成 HTML5 呈现在右侧面板中。 Asciidoctor 将默认的样式表应用到 HTML5 文档上,提供一个愉快的开箱即用的体验。 image::{image-uri-screenshot}[AsciiDoc 源文预览和相应的 HTML 渲染] [#asciidoc-processing] == AsciiDoc Processing Asciidoctor 读取并处理以 AsciiDoc 语法写作的文件,然后然后将解析出来的解析树交给内置的转化器生成 HTML5,DocBook 5 (或 4.5) 或帮助手册页面输出。 你可以选择使用你自己的转化器或者加载 {uri-tilt}[Tilt] - 支持通过模板来自定义输出或产生附加的格式。 NOTE: Asciidoctor是为了直接替换原 AsciiDoc Python 处理器(`asciidoc.py`)。 Asciidoctor 测试套件含有 {uri-tests}[> 1,600 测试用例] 来确保和 AsciiDoc 语法的兼容性。 除了经典的 AsciiDoc 语法,Asciidoctor 还添加额外的标记和格式设置选项,例如 font-based 图标(例如: `+icon:fire[]+`)和 UI 元素(例如: `+button:[Save]+`)。 Asciidoctor 还提供了一个基于 {uri-foundation}[Foundation] 的现代的、响应式主题来美化 HTML5 输出。 [#requirements] == 要求 Asciidoctor 可以在 Linux,OSX (Mac) 和 Windows,并且需要下面其中一个 {uri-ruby}[Ruby] 实现: * MRI (Ruby 1.8.7, 1.9.3, 2.0, 2.1, 2.2 & 2.3) * JRuby (1.7 in Ruby 1.8 and 1.9 modes, 9000) * Rubinius 2.2.x * Opal (JavaScript) 我们欢迎你来帮助在这些以及其他平台测试 Asciidoctor。 参考 <<{idprefix}contributing,Contributing>> 来学习如何参与进来。 [CAUTION] ==== 如果你使用一个非英语的 Windows 环境,当调用 Asciidoctor 时,可能会碰到 `Encoding::UndefinedConversionError` 错误。 为了解决这个问题,我们建议将控制台的编码更改为 UTF-8: chcp 65001 一旦你做了这个改变,所有的编码问题,都将迎刃而解。 只要你在任何地方都是 UTF-8,Asciidoctor 总会工作地很好。 ==== [#installation] == 安装 Asciidoctor 可以通过三种方式安装:(a) 使用 `gem install` 命令;(b) 使用 Bundler;(c) 流行的 Linux 发行版的包管理器 TIP: 使用 Linux 包管理器安装的好处是如果 Ruby 和 RubyGems 库没有在你的机器上安装,它会一并安装上去。 不利的是在 gem 发布之后,这类安装包并不是立即可用。 如果你需要最新版,你应该总是优先使用 `gem` 命令安装。 [#a-gem-install] === (a) gem 安装 打开一个终端输入如下命令(不含开头的 `$`): $ gem install asciidoctor 如果想安装一个预览版(比如:候选发布版),请使用: $ gem install asciidoctor --pre .升级 [TIP] ==== 如果你安装有一个旧版本的 Asciidoctor,你可以使用下面的命令来升级: $ gem update asciidoctor 如果使用 `gem install` 命令来安装一个新版本的 gem 来代替升级,则会安装多个版本。 如果是这种情况,使用下面的 gem 命令来移除旧版本: $ gem cleanup asciidoctor ==== [#b-bundler] === (b) Bundler . 在项目的根目录(或者当前路径),创建一个 `Gemfile` 文件; . 在这个文件中添加 `asciidoctor` gem 如下: + [source] ---- source 'https://rubygems.org' gem 'asciidoctor' # 或者明确指明版本 # gem 'asciidoctor', '1.5.4' ---- . 保存 `Gemfile` 文件 . 打开终端,使用如下命令安装 gem: $ bundle 要升级 gem 的话,在 `Gemfile` 文件中,指明新版本,然后再次运行 `bundle` 即可。 *不推荐* 直接使用 `bundle update` 命令,因为它还会升级其他 gem,也许会造成不可预料的结果。 [#c-linux-package-managers] === (c) Linux 包管理 [#dnf-fedora-21-or-greater] ==== DNF (Fedora 21 或更高版本) 在 Fedora 21 或更高版本中安装这个 gem,可以使用 dnf。打开终端并输入如下命令: $ sudo dnf install -y asciidoctor 升级则使用: $ sudo dnf update -y asciidoctor TIP: 如果你的 Fedora 系统配置的是自动升级包,在这种情况下,不需要你亲自动手升级。 [#apt-get-debian-ubuntu-mint] ==== apt-get (Debian, Ubuntu, Mint) 在 Debian,Ubuntu 或 Mint 中安装这个 gem,请打开终端并输入如下命令: $ sudo apt-get install -y asciidoctor 升级则使用: $ sudo apt-get upgrade -y asciidoctor TIP: 如果你的 Debian 或 Ubuntu 系统配置的是自动升级包,在这种情况下,不需要你亲自动手升级。 使用包管理器( apt-get )安装的 Asciidoctor 的版本也许不是最新发布版。 请查看发行版的包库,来确定每个发行版是打包的哪个版本。 * https://packages.debian.org/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[Debian 发行版中的 asciidoctor] * http://packages.ubuntu.com/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[Ubuntu 发行版中的 asciidoctor] * https://community.linuxmint.com/software/view/asciidoctor[Mint 发行版中的 asciidoctor] [CAUTION] ==== 我们建议不要使用 `gem update` 来升级包管理的 gem。 这样做会使系统进入不一致的状态,包管理工具将不再跟踪相关文件(通常安装在 /usr/local 下。) 简单地说,系统的 gem 只能由包管理器进行管理。 如果你想使用一个比包管理器安装的更新版本的 Asciidoctor,你应该使用 http://rvm.io[RVM] 在你的用户家目录(比如:用户空间)下安装 Ruby。 然后,你就可以放心地使用 `gem` 命令来安装或者更新 Asciidoctor gem。 当使用 RVM 时,gem 将被安装到与系统隔离的位置。 ==== [#apk-alpine-linux] ==== apk (Alpine Linux) 在 Alpine Linux 中安装这个 gem,请打开终端并输入如下命令: $ sudo apk add asciidoctor 升级则使用: $ sudo apk add -u asciidoctor TIP: 如果你的 Alpine Linux 系统配置的是自动升级包,在这种情况下,不需要你亲自动手升级。 [#other-installation-options] === 其他安装选项 * {uri-install-docker}[使用 Docker 安装 Asciidoctor ] * {uri-install-osx-doc}[在 Mac OS X 安装 Asciidoctor ] [#usage] == 使用 如果成功安装 Asciidoctor,则在可执行程序路径中,`asciidoctor` 就可用了。 为了验证它的可用性,你可以在终端中执行如下命令: $ asciidoctor --version 你应该看到关于 Asciidoctor 和 Ruby 环境信息将打印到你的终端上。 [.output] .... Asciidoctor 1.5.4 [http://asciidoctor.org] Runtime Environment (ruby 2.2.2p95 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) .... Asciidoctor 还提供了一套 API。 这套 API 是为了整合其他的 Ruby 软件,例如 Rails、Sinatra、Github,甚至其他语言,比如 Java (通过 {uri-asciidoctorj}[AsciidoctorJ]) 和 JavaScript (通过 {uri-asciidoctorjs}[Asciidoctor.js])。 [#command-line-interface-cli] === 命令行(CLI) `asciidoctor` 命令可以让你通过命令行(比如:终端)来调用 Asciidoctor。 下面的命令将 README.adoc 文件转化为 HTML,并且保存到同一目录下的 README.html 文件中。 生成的 HTML 文件名源自源文件名,只是将其扩展名改为了 `.html`。 $ asciidoctor README.adoc 您可以通过添加各种标志和开关控制 Asciidoctor 处理器,通过下面的命令你可以学习它的更多用法: $ asciidoctor --help 比如,将文件写入到不同路径里,使用如下命令: $ asciidoctor -D output README.adoc `asciidoctor` {uri-manpage}[帮助页面] 提供了这个命令的完整参考。 点击下面的资源,学习更多关于 `asciidoctor` 命令的用法。 * {uri-render-doc}[如何转化文档?] * {uri-themes-doc}[如何使用 Asciidoctor 样式工厂来创建自定义主题?] [#ruby-api] === Ruby API 为了在你应用中使用 Asciidoctor,首先需要引入这个 gem: [source] require 'asciidoctor' 然后,你可以通过下面的代码将 AsciiDoc 源文件转化成一个 HTML 文件: [source] Asciidoctor.convert_file 'README.adoc', to_file: true, safe: :safe WARNING: 当你通过 API 使用 Asciidoctor 时,默认的安全模式是 `:secure`。 在 secure 模式下,很多核心特性将不可用,包括 `include` 特性。 如果你想启用这些特性,你需要明确设置安全模式为 `:server` (推荐)或 `:safe`。 你也可以将 AsciiDoc 字符串转化我内嵌的 HTML (为了插入到一个 HTML 页面),用法如下: [source] ---- content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' Asciidoctor.convert content, safe: :safe ---- 如果你想得到完整的 HTML 文档,只需要启用 `header_footer` 选项即可。如下: [source] ---- content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' html = Asciidoctor.convert content, header_footer: true, safe: :safe ---- 如果你想访问已经处理过的文档,可以将转化过程拆分成离散的几步: [source] ---- content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' document = Asciidoctor.load content, header_footer: true, safe: :safe puts document.doctitle html = document.convert ---- 请注意:如果你不喜欢 Asciidoctor 输出结果,_你完全可以改变它。_ Asciidoctor 支持自定义转化器,它可以操作从待处理文件到生成文档整个环节。 一个简单的、细微地自定义输出的方式是使用模板转化器。 模板转化器运行你提供一个 {uri-tilt}[Tilt] 模板,这样通过模板文件来操作转化出的文档的每个节点。 这样,你就 _可以_ 百分之百地控制你的输出。 关于更多关于 API 或自定义输出信息,请参考 {uri-user-manual}[用户帮助手册]。 [#contributing] == 贡献 自由软件的精神鼓励 _每个人_ 来帮助改善这个项目。 如果你在源码、文档或网站内容中发现错误或漏洞,请不要犹豫,提交一个议题或者推送一个修复请求。 随时欢迎新的贡献者! 这里有几种 *你* 可以做出贡献的方式: * 使用预发布版本(alpha, beta 或 preview) * 报告 Bug * 提议新功能 * 编写文档 * 编写规范 * 编写 -- _任何补丁都不小。_ ** 修正错别字 ** 添加评论 ** 清理多余空白 ** 编写测试! * 重构代码 * 修复 {uri-issues}[议题] * 审查补丁 {uri-contribute}[贡献] 指南提供了如何提供贡献,包括如何创建、修饰和提交问题、特性、需求、代码和文档给 Asciidoctor 项目。 [#getting-help] == 获得帮助 开发 Asciidoctor 项目是未来了帮助你更容易地书写和发布你的内容。 但是,如果没有反馈,我们将寸步难行。 我们鼓励你在讨论组、Twitter或聊天室里,提问为题,讨论项目的方方面面, 讨论组 (Nabble):: {uri-discuss} Twitter:: #asciidoctor 井号或 @asciidoctor 提醒 聊天 (Gitter):: image:https://badges.gitter.im/Join%20In.svg[Gitter, link=https://gitter.im/asciidoctor/asciidoctor] ifdef::env-github[] Further information and documentation about Asciidoctor can be found on the project's website. {uri-project}/[Home] | {uri-news}[News] | {uri-docs}[Docs] endif::[] Asciidoctor 组织在 Github 托管代码、议案跟踪和相关子项目。 代码库 (git):: {uri-repo} 议案跟踪:: {uri-issues} 在 GitHub 的 Asciidoctor 组织:: {uri-org} [#copyright-and-licensing] == 版权和协议 Copyright (C) 2012-2016 Dan Allen, Ryan Waldron and the Asciidoctor Project. 这个软件的免费使用是在MIT许可条款授予的。 请看 {uri-license}[版权声明] 文件来获取更多详细信息。 [#authors] == 作者 *Asciidoctor* 由 https://github.com/mojavelinux[Dan Allen] 和 https://github.com/graphitefriction[Sarah White] 领导,并从 Asciidoctor 社区的 {uri-contributors}[很多其他独立开发者] 上收到了很多贡献。 项目最初由 https://github.com/erebor[Ryan Waldron] 于 2012年基于 https://github.com/nickh[Nick Hengeveld] 的 {uri-prototype}[原型] 创建。 *AsciiDoc* 由 Stuart Rackham 启动,并从 AsciiDoc 社区的其他独立开发者上收到很多贡献。 == Changelog 请看 {uri-changelog}[CHANGELOG]。 asciidoctor-1.5.5/README.adoc000066400000000000000000000717061277513741400156400ustar00rootroot00000000000000= Asciidoctor Dan Allen ; Sarah White ; Ryan Waldron v1.5.5, 2016-10-05 // settings: :page-layout: base :idprefix: :idseparator: - :source-language: ruby :language: {source-language} ifdef::env-github[:status:] // URIs: :uri-org: https://github.com/asciidoctor :uri-repo: {uri-org}/asciidoctor :uri-asciidoctorj: {uri-org}/asciidoctorj :uri-asciidoctorjs: {uri-org}/asciidoctor.js :uri-project: http://asciidoctor.org ifdef::env-site[:uri-project: link:] :uri-docs: {uri-project}/docs :uri-news: {uri-project}/news :uri-manpage: {uri-project}/man/asciidoctor :uri-issues: {uri-repo}/issues :uri-contributors: {uri-repo}/graphs/contributors :uri-rel-file-base: link: :uri-rel-tree-base: link: ifdef::env-site[] :uri-rel-file-base: {uri-repo}/blob/master/ :uri-rel-tree-base: {uri-repo}/tree/master/ endif::[] :uri-changelog: {uri-rel-file-base}CHANGELOG.adoc :uri-contribute: {uri-rel-file-base}CONTRIBUTING.adoc :uri-license: {uri-rel-file-base}LICENSE.adoc :uri-tests: {uri-rel-tree-base}test :uri-discuss: http://discuss.asciidoctor.org :uri-irc: irc://irc.freenode.org/#asciidoctor :uri-rubygem: https://rubygems.org/gems/asciidoctor :uri-what-is-asciidoc: {uri-docs}/what-is-asciidoc :uri-user-manual: {uri-docs}/user-manual :uri-install-docker: https://github.com/asciidoctor/docker-asciidoctor //:uri-install-doc: {uri-docs}/install-toolchain :uri-install-osx-doc: {uri-docs}/install-asciidoctor-macosx :uri-render-doc: {uri-docs}/render-documents :uri-themes-doc: {uri-docs}/produce-custom-themes-using-asciidoctor-stylesheet-factory :uri-gitscm-repo: https://github.com/git/git-scm.com :uri-prototype: {uri-gitscm-repo}/commits/master/lib/asciidoc.rb :uri-freesoftware: https://www.gnu.org/philosophy/free-sw.html :uri-foundation: http://foundation.zurb.com :uri-tilt: https://github.com/rtomayko/tilt :uri-ruby: https://ruby-lang.org // images: :image-uri-screenshot: https://raw.githubusercontent.com/asciidoctor/asciidoctor/master/screenshot.png {uri-project}/[Asciidoctor] is a _fast_ text processor and publishing toolchain for converting {uri-what-is-asciidoc}[AsciiDoc] content to HTML5, DocBook 5 (or 4.5) and other formats. Asciidoctor is written in Ruby, packaged as a RubyGem and published to {uri-rubygem}[RubyGems.org]. The gem is also included in several Linux distributions, including Fedora, Debian and Ubuntu. Asciidoctor is open source, {uri-repo}[hosted on GitHub] and released under {uri-license}[the MIT license]. ifndef::env-site[] .Translations of this document are available in the following languages: * {uri-rel-file-base}README-zh_CN.adoc[汉语] * {uri-rel-file-base}README-fr.adoc[Français] * {uri-rel-file-base}README-jp.adoc[日本語] endif::[] .Key documentation [.compact] * {uri-docs}/what-is-asciidoc[What is Asciidoc?] * {uri-docs}/asciidoc-writers-guide[AsciiDoc Writer's Guide] * {uri-docs}/asciidoc-syntax-quick-reference[AsciiDoc Syntax Reference] * {uri-docs}/user-manual[Asciidoctor User Manual] .Where Ruby goes, Asciidoctor follows **** You can run Asciidoctor on the JVM using JRuby. To invoke the Asciidoctor API directly from Java and other JVM languages, use {uri-asciidoctorj}[AsciidoctorJ]. There are plugins available, based on AsciidoctorJ, that integrate the Asciidoctor processor into Apache Maven, Gradle or Javadoc builds. Asciidoctor also runs in JavaScript. We use http://opalrb.org[Opal] to transcompile the Ruby source to JavaScript to produce {uri-asciidoctorjs}[Asciidoctor.js], a fully-functional version of Asciidoctor that works in any JavaScript environment, such as a web browser or Node.js. Asciidoctor.js is used to power the AsciiDoc preview extensions for Chrome, Atom, Brackets and other web-based tooling. **** ifdef::status[] .*Project health* image:https://img.shields.io/travis/asciidoctor/asciidoctor/master.svg[Build Status (Travis CI), link=https://travis-ci.org/asciidoctor/asciidoctor] image:https://ci.appveyor.com/api/projects/status/ifplu67oxvgn6ceq/branch/master?svg=true&passingText=green%20bar&failingText=%23fail&pendingText=building%2E%2E%2E[Build Status (AppVeyor), link=https://ci.appveyor.com/project/asciidoctor/asciidoctor] //image:https://img.shields.io/coveralls/asciidoctor/asciidoctor/master.svg[Coverage Status, link=https://coveralls.io/r/asciidoctor/asciidoctor] image:https://codeclimate.com/github/asciidoctor/asciidoctor/badges/gpa.svg[Code Climate, link="https://codeclimate.com/github/asciidoctor/asciidoctor"] image:https://inch-ci.org/github/asciidoctor/asciidoctor.svg?branch=master[Inline docs, link="https://inch-ci.org/github/asciidoctor/asciidoctor"] endif::[] == The Big Picture Asciidoctor reads content written in plain text, as shown in the panel on the left in the image below, and converts it to HTML5, as shown rendered in the right panel. Asciidoctor applies a default stylesheet to the HTML5 document to provide a pleasant out-of-the-box experience. image::{image-uri-screenshot}[Preview of AsciiDoc source and corresponding rendered HTML] == AsciiDoc Processing Asciidoctor reads and parses text written in the AsciiDoc syntax, then feeds the parse tree to a set of built-in converters to produce HTML5, DocBook 5 (or 4.5) or man(ual) page output. You have the option of using your own converter or loading {uri-tilt}[Tilt]-supported templates to customize the generated output or produce additional formats. NOTE: Asciidoctor is a drop-in replacement for the original AsciiDoc Python processor (`asciidoc.py`). The Asciidoctor test suite has {uri-tests}[> 1,600 tests] to ensure compatibility with the AsciiDoc syntax. In addition to the classic AsciiDoc syntax, Asciidoctor recognizes additional markup and formatting options, such as font-based icons (e.g., `+icon:fire[]+`) and UI elements (e.g., `+button:[Save]+`). Asciidoctor also offers a modern, responsive theme based on {uri-foundation}[Foundation] to style the HTML5 output. == Requirements Asciidoctor works on Linux, OS X (Mac) and Windows and requires one of the following implementations of {uri-ruby}[Ruby]: * MRI (Ruby 1.8.7, 1.9.3, 2.0, 2.1, 2.2 & 2.3) * JRuby (1.7 in Ruby 1.8 and 1.9 modes, 9000) * Rubinius 2.2.x * Opal (JavaScript) We welcome your help testing Asciidoctor on these and other platforms. Refer to the <> section to learn how to get involved. [CAUTION] ==== If you're using a non-English Windows environment, you may bump into an `Encoding::UndefinedConversionError` when invoking Asciidoctor. To solve this issue, we recommend changing the active code page in your console to UTF-8: chcp 65001 Once you make this change, all your Unicode headaches will be behind you. If you're using an IDE like Eclipse, make sure you set the encoding to UTF-8 there as well. Asciidoctor works best when you use UTF-8 everywhere. ==== == Installation Asciidoctor can be installed using (a) the `gem install` command, (b) Bundler or (c) package managers for popular Linux distributions. TIP: The benefit of using a Linux package manager to install the gem is that it handles installing Ruby and the RubyGems library if those packages are not already installed on your machine. The drawback is that the package may not be available immediately after the release of the gem. If you need the latest version, you can always fallback to using the `gem` command. === (a) gem install Open a terminal and type (excluding the leading `$`): $ gem install asciidoctor If you want to install a pre-release version (e.g., a release candidate), use: $ gem install asciidoctor --pre .Upgrading your installation [TIP] ==== If you have an earlier version of Asciidoctor installed, you can update it using: $ gem update asciidoctor If you install a new version of the gem using `gem install` instead of gem update, you'll have multiple versions installed. If that's the case, use the following gem command to remove the old versions: $ gem cleanup asciidoctor ==== === (b) Bundler . Create a Gemfile in the root folder of your project (or the current directory) . Add the `asciidoctor` gem to your Gemfile as follows: + [source] ---- source 'https://rubygems.org' gem 'asciidoctor' # or specify the version explicitly # gem 'asciidoctor', '1.5.5' ---- . Save the Gemfile . Open a terminal and install the gem using: $ bundle To upgrade the gem, specify the new version in the Gemfile and run `bundle` again. Using `bundle update` is *not* recommended as it will also update other gems, which may not be the desired result. === (c) Linux package managers ==== DNF (Fedora 21 or greater) To install the gem on Fedora 21 or greater using dnf, open a terminal and type: $ sudo dnf install -y asciidoctor To upgrade the gem, use: $ sudo dnf update -y asciidoctor TIP: Your system may be configured to automatically update rpm packages, in which case no action is required by you to update the gem. ==== apt-get (Debian, Ubuntu, Mint) To install the gem on Debian, Ubuntu or Mint, open a terminal and type: $ sudo apt-get install -y asciidoctor To upgrade the gem, use: $ sudo apt-get upgrade -y asciidoctor TIP: Your system may be configured to automatically update deb packages, in which case no action is required by you to update the gem. The version of Asciidoctor installed by the package manager (apt-get) may not match the latest release of Asciidoctor. Consult the package repository for your distribution to find out which version is packaged per distribution release. * https://packages.debian.org/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[asciidoctor package by Debian release] * http://packages.ubuntu.com/search?keywords=asciidoctor&searchon=names&exact=1&suite=all§ion=all[asciidoctor package by Ubuntu release] * https://community.linuxmint.com/software/view/asciidoctor[asciidoctor package by Mint release] [CAUTION] ==== You're advised against using the `gem update` command to update a gem managed by the package manager. Doing so puts the system into an inconsistent state as the package manager can no longer track the files (which get installed under /usr/local). Simply put, system gems should only be managed by the package manager. If you want to use a version of Asciidoctor that is newer than what is installed by the package manager, you should use http://rvm.io[RVM] to install Ruby in your home directory (i.e., user space). Then, you can safely use the `gem` command to install or update the Asciidoctor gem. When using RVM, gems are installed in a location isolated from the system. ==== ==== apk (Alpine Linux) To install the gem on Alpine Linux, open a terminal and type: $ sudo apk add asciidoctor To upgrade the gem, use: $ sudo apk add -u asciidoctor TIP: Your system may be configured to automatically update apk packages, in which case no action is required by you to update the gem. === Other installation options * {uri-install-docker}[Installing Asciidoctor using Docker] * {uri-install-osx-doc}[Installing Asciidoctor on Mac OS X] // at the moment, the following entry is just a reiteration of the information in this README //* {uri-install-doc}[Installing the Asciidoctor toolchain] == Usage If the Asciidoctor gem installed successfully, the `asciidoctor` command line interface (CLI) will be available on your PATH. To verify it's available, run the following in your terminal: $ asciidoctor --version You should see information about the Asciidoctor version and your Ruby environment printed in the terminal. [.output] .... Asciidoctor 1.5.5 [http://asciidoctor.org] Runtime Environment (ruby 2.2.2p95 [x86_64-linux]) (lc:UTF-8 fs:UTF-8 in:- ex:UTF-8) .... Asciidoctor also provides an API. The API is intended for integration with other Ruby software, such as Rails, Sinatra and GitHub, and other languages, such as Java (via {uri-asciidoctorj}[AsciidoctorJ]) and JavaScript (via {uri-asciidoctorjs}[Asciidoctor.js]). === Command line interface (CLI) The `asciidoctor` command allows you to invoke Asciidoctor from the command line (i.e., a terminal). The following command converts the file README.adoc to HTML and saves the result to the file README.html in the same directory. The name of the generated HTML file is derived from the source file by changing its file extension to `.html`. $ asciidoctor README.adoc You can control the Asciidoctor processor by adding various flags and switches, which you can learn about using: $ asciidoctor --help For instance, to write the file to a different directory, use: $ asciidoctor -D output README.adoc The `asciidoctor` {uri-manpage}[man page] provides a complete reference of the command line interface. Refer to the following resources to learn more about how to use the `asciidoctor` command. * {uri-render-doc}[How do I convert a document?] * {uri-themes-doc}[How do I use the Asciidoctor stylesheet factory to produce custom themes?] === Ruby API To use Asciidoctor in your application, you first need to require the gem: [source] require 'asciidoctor' You can then convert an AsciiDoc source file to an HTML file using: [source] Asciidoctor.convert_file 'README.adoc', to_file: true, safe: :safe WARNING: When using Asciidoctor via the API, the default safe mode is `:secure`. In secure mode, several core features are disabled, including the `include` directive. If you want to enable these features, you'll need to explicitly set the safe mode to `:server` (recommended) or `:safe`. You can also convert an AsciiDoc string to embeddable HTML (for inserting in an HTML page) using: [source] ---- content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' Asciidoctor.convert content, safe: :safe ---- If you want the full HTML document, enable the `header_footer` option as follows: [source] ---- content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' html = Asciidoctor.convert content, header_footer: true, safe: :safe ---- If you need access to the parsed document, you can split the conversion into discrete steps: [source] ---- content = '_Zen_ in the art of writing http://asciidoctor.org[AsciiDoc].' document = Asciidoctor.load content, header_footer: true, safe: :safe puts document.doctitle html = document.convert ---- Keep in mind that if you don't like the output Asciidoctor produces, _you can change it!_ Asciidoctor supports custom converters that can handle converting from the parsed document to the generated output. One easy way to customize the output piecemeal is by using the template converter. The template converter allows you to supply a {uri-tilt}[Tilt]-supported template file to handle converting any node in the document. However you go about it, you _can_ have 100% control over the output. For more information about how to use the API or to customize the output, refer to the {uri-user-manual}[user manual]. == Contributing In the spirit of {uri-freesoftware}[free software], _everyone_ is encouraged to help improve this project. If you discover errors or omissions in the source code, documentation, or website content, please don't hesitate to submit an issue or open a pull request with a fix. New contributors are always welcome! Here are some ways *you* can contribute: * by using prerelease (alpha, beta or preview) versions * by reporting bugs * by suggesting new features * by writing or editing documentation * by writing specifications * by writing code -- _No patch is too small._ ** fix typos ** add comments ** clean up inconsistent whitespace ** write tests! * by refactoring code * by fixing {uri-issues}[issues] * by reviewing patches The {uri-contribute}[Contributing] guide provides information on how to create, style, and submit issues, feature requests, code, and documentation to the Asciidoctor Project. == Getting Help The Asciidoctor project is developed to help you easily write and publish your content. But we can't do it without your feedback! We encourage you to ask questions and discuss any aspects of the project on the discussion list, on Twitter or in the chat room. Discussion list (Nabble):: {uri-discuss} Twitter:: #asciidoctor hashtag or @asciidoctor mention Chat (Gitter):: image:https://badges.gitter.im/Join%20In.svg[Gitter, link=https://gitter.im/asciidoctor/asciidoctor] //// Chat (IRC):: {uri-irc}[#asciidoctor] on FreeNode IRC //// ifdef::env-github[] Further information and documentation about Asciidoctor can be found on the project's website. {uri-project}/[Home] | {uri-news}[News] | {uri-docs}[Docs] endif::[] The Asciidoctor organization on GitHub hosts the project's source code, issue tracker, and sub-projects. Source repository (git):: {uri-repo} Issue tracker:: {uri-issues} Asciidoctor organization on GitHub:: {uri-org} == Copyright and Licensing Copyright (C) 2012-2016 Dan Allen, Ryan Waldron and the Asciidoctor Project. Free use of this software is granted under the terms of the MIT License. See the {uri-license}[LICENSE] file for details. == Authors *Asciidoctor* is led by https://github.com/mojavelinux[Dan Allen] and https://github.com/graphitefriction[Sarah White] and has received contributions from {uri-contributors}[many other individuals] in Asciidoctor's awesome community. The project was initiated in 2012 by https://github.com/erebor[Ryan Waldron] and based on {uri-prototype}[a prototype] written by https://github.com/nickh[Nick Hengeveld]. *AsciiDoc* was started by Stuart Rackham and has received contributions from many other individuals in the AsciiDoc community. == Changelog == 1.5.5 (2016-10-05) - @mojavelinux Enhancements:: * Add preference to limit the maximum size of an attribute value (#1861) * Honor SOURCE_DATE_EPOCH environment variable to accomodate reproducible builds (@JojoBoulix) (#1721) * Add reversed attribute to ordered list if reversed option is enabled (#1830) * Add support for additional docinfo locations (e.g., :header) * Configure default stylesheet to break monospace word if exceeds length of line; add roles to prevent breaks (#1814) * Introduce translation file for built-in labels (@ciampix) * Provide translations for built-in labels (@JmyL - kr, @ciampix - it, @ivannov - bg, @maxandersen - da, @radcortez - pt, @eddumelendez - es, @leathersole - jp, @aslakknutsen - no, @shahryareiv - fa, @AlexanderZobkov - ru, @dongwq - zh, @rmpestano - pt_BR, @ncomet - fr, @lgvz - fi, @patoi - hu, @BojanStipic - sr, @fwilhe - de, @rahmanusta - tr, @abelsromero - ca, @aboullaite - ar, @roelvs - nl) * Translate README to Chinese (@diguage) * Translate README to Japanese (@Mizuho32) Improvements:: * Style nested emphasized phrases properly when using default stylesheet (#1691) * Honor explicit table width even when autowidth option is set (#1843) * Only explicit noheader option on table should disable implicit table header (#1849) * Support docbook orient="land" attribute on tables (#1815) * Add alias named list to retrieve parent List of ListItem * Update push_include method to support chaining (#1836) * Enable font smoothing on Firefox on OSX (#1837) * Support combined use of sectanchors and sectlinks in HTML5 output (#1806) * fix API docs for find_by * Upgrade to Font Awesome 4.6.3 (@allenan, @mogztter) (#1723) * README: add install instructions for Alpine Linux * README: Switch yum commands to dnf in README * README: Mention Mint as a Debian distro that packages Asciidoctor * README: Add caution advising against using gem update to update a system-managed gem (@oddhack) * README: sync French version with English version (@flashcode) * Add missing endline after title element when converting open block to HTML * Move list_marker_keyword method from AbstractNode to AbstractBlock * Rename definition list to description list internally Compliance:: * Support 6-digit decimal char refs, 5-digit hexidecimal char refs (#1824) * Compatibility fixes for Opal * Check for number using Integer instead of Fixnum class for compatibility with Ruby 2.4 Bug fixes:: * Use method_defined? instead of respond_to? to check if method is already defined when patching (#1838) * Fix invalid conditional in HTML5 converter when handling of SVG * Processor#parse_content helper no longer shares attribute list between blocks (#1651) * Fix infinite loop if unordered list marker is immediately followed by a dot (#1679) * Don't break SVG source when cleaning if svg start tag name is immediately followed by endline (#1676) * Prevent template converter from crashing if .rb file found in template directory (#1827) * Fix crash when generating section ID when both idprefix & idseparator are blank (#1821) * Use stronger CSS rule for general text color in Pygments stylesheet (#1802) * Don't duplicate forward slash for path relative to root (#1822) Infrastructure:: * Build gem properly in the absense of a git workspace, make compatible with JRuby (#1779) * Run tests in CI using latest versions of Ruby, including Ruby 2.3 (@ferdinandrosario) == 1.5.4 (2016-01-03) - @mojavelinux Enhancements:: * translate README into French (@anthonny, @mogztter, @gscheibel, @mgreau) (#1630) * allow linkstyle in manpage output to be configured (#1610) Improvements:: * upgrade to MathJax 2.6.0 and disable loading messages * upgrade to Font Awesome 4.5.0 * disable toc if document has no sections (#1633) * convert inline asciimath to MathML (using asciimath gem) in DocBook converter (#1622) * add attribute to control build reproducibility (@bk2204) (#1453) * recognize \file:/// as a file root in Opal browser env (#1561) * honor icon attribute on admonition block when font-based icons are enabled (@robertpanzer) (#1593) * resolve custom icon relative to iconsdir; add file extension if absent (#1634) * allow asciidoctor cli to resolve library path when invoked without leading ./ Compliance:: * allow special section to be nested at any depth (#1591) * ensure colpcwidth values add up to 100%; increase precision of values to 4 decimal places (#1647) * ignore blank cols attribute on table (#1647) * support shorthand syntax for block attributes on document title (#1650) Bug fixes:: * don't include default toc in AsciiDoc table cell; don't pass toc location attributes to nested document (#1582) * guard against nil dlist list item in find_by (#1618) * don't swallow trailing line when include file is not readable (#1602) * change xlink namespace to xl in DocBook 5 output to prevent parse error (#1597) * make callouts globally unique within document, including AsciiDoc table cells (#1626) * initialize Slim-related attributes regardless of when Slim was loaded (@terceiro) (#1576) * differentiate literal backslash from escape sequence in manpage output (@ds26gte) (#1604) * don't mistake line beginning with \. for troff macro in manpage output (@ds26gte) (#1589) * escape leading dots so user content doesn't trigger troff macros in manpage output (@ds26gte) (#1631) * use \c after .URL macro to remove extraneous space in manpage output (@ds26gte) (#1590) * fix missing endline after .URL macro in manpage output (#1613) * properly handle spacing around .URL/.MTO macro in manpage output (@ds26gte) (#1641) * don't swallow doctitle attribute followed by block title (#1587) * change strategy for splitting names of author; fixes bug in Opal/Asciidoctor.js * don't fail if library is loaded more than once Infrastructure:: * remove trailing endlines in project source code * update contributing guidelines * explicitly test ifeval scenario raised in issue #1585 * remove backreference substitution hack for Opal/Asciidoctor.js * fix assignment of default Hash value for Opal/Asciidoctor.js * add JRuby 9.0.4.0 and Ruby 2.3.0 to the Travis CI build matrix == 1.5.3 (2015-10-31) - @mojavelinux Enhancements:: * add support for interactive & inline SVGs (#1301, #1224) * add built-in manpage backend (@davidgamba) (#651) * create Mallard backend; asciidoctor/asciidoctor-mallard (@bk2204) (#425) * add AsciiMath to MathML converter to support AsciiMath in DocBook converter (@pepijnve) (#954) * allow text of selected lines to be highlighted in source block by Pygments or CodeRay (#1429) * use value of `docinfo` attribute to control docinfo behavior (#1510) * add `docinfosubs` attribute to control which substitutions are performed on docinfo files (@mogztter) (#405) * drop ability to specify multiple attributes with a single `-a` flag when using the CLI (@mogztter) (#405) * make subtitle separator chars for document title configurable (@rmannibucau) (#1350) * make XrefInlineRx regexp more permissive (Mathieu Boespflug) (#844) Improvements:: * load JavaScript and CSS at bottom of HTML document (@mogztter) (#1238) * list available backends in help text (@plaindocs) (#1271) * properly expand tabs in literal text (#1170, #841) * add `source-indent` as document attribute (@mogztter) (#1169) * upgrade MathJax to 2.5.3 (#1329) * upgrade Font Awesome to 4.4.0 (@mogztter) (#1465) * upgrade highlight.js to 8.6 (now 8.9.1) (#1390) * don't abort if syntax highlighter isn't available (#1253) * insert docinfo footer below footer div (#1503) * insert toc at default location in embeddable HTML (#1443) * replace _ and - in generated alt text for inline images * restore attributes to header attributes after parse (#1255) * allow docdate and doctime to be overridden (#1495) * add CSS class `.center` for center block alignment (#1456) * recognize U+2022 (bullet) as alternative marker for unordered lists (@mogztter) (#1177) * allow videos to work for local files by prepending asset-uri-scheme (Chris) (#1320) * always assign playlist param when loop option is enabled for YouTube video * parse isolated version in revision line (@bk2204) (#790) * autoload Tilt when template converter is instantiated (#1313) * don't overwrite existing id entry in references table (#1256) * use outfilesuffix attribute defined in header when resolving outfile (#1412) * make AsciiDoc safe mode option on Slim engine match document (#1347) * honor htmlsyntax attribute when backend is html/html5 (#1530) * tighten spacing of wrapped lines in TOC (#1542) * tune padding around table cells in horizontal dlist (#1418) * load Droid Sans Mono 700 in default stylesheet * set line height of table cells used for syntax highlighting * set font-family of kbd; refine styling (#1423) * extract condition into `quote_lines?` method (@mogztter) * extract inline code into `read_paragraph` method (@mogztter) * parent of block in ListItem should be ListItem (#1359) * add helper methods to List and ListItem (#1551) * add method `AbstractNode#add_role` and `AbstractNode#remove_role` (@robertpanzer) (#1366) * introduce helper methods for sniffing URIs (#1422) * add helper to calculate basename without file extension * document `-I` and `-r` options in the manual page (@bk2204) * fix `+--help+` output text for `-I` (@bk2204) * don't require open-uri-cached if already loaded * do not attempt to scan pattern of non-existent directory in template converter * prevent CodeRay from bolding every 10th line number Compliance:: * use `` for footnote reference in text instead of `` (#1523) * fix alignment of wrapped text in footnote (#1524) * include full stop after footnote number in embeddable HTML * show manpage title & name section in embeddable HTML (#1179) * resolve missing attribute in ifeval to empty string (#1387) * support unbreakable & breakable options on table (rockyallen) (#1140) Bug fixes:: * don't truncate exception stack in `Asciidoctor.load` (#1248) * don't fail to save cause of Java exception (@robertpanzer) (#1458) * fix precision error in timings report (#1342) * resolve regexp for inline macro lazily (#1336) * block argument to `find_by` should filter results (#1393) * strip comment lines in indented text of dlist item (#1537) * preserve escaped delimiter at end of line in a table (#1306) * correctly calculate colnames for implicit columns (#1556) * don't crash if colspan exceeds colspec (#1460) * account for empty records in colspec (#1375) * ignore empty cols attribute on table * use `.inspect` to print MathJax delimiters (again) (#1198) * use while loop instead of begin/while loop to address bug in Asciidoctor.js (#1408) * force encoding of attribute values passed from cli (#1191) * don't copy css if stylesheet or stylesdir is a URI (#1400) * fix invalid color value in default CodeRay theme * built-in writer no longer fails if output is nil (#1544) * custom template engine options should take precedence * fallback to require with a non-relative path to support Debian package (@mogztter) * pass opts to recursive invocations of `PathResolver#system_path` * fix and test external links in docbook backend * use format symbol `:html` instead of `:html5` for Slim to fix warnings * fix documentation for inline_macro and block_macro (Andrea Bedini) * fix grammar in warning messages regarding thread_safe gem Infrastructure:: * migrate opal_ext from core to Asciidoctor.js (#1517) * add Ruby 2.2 to CI build; only specify minor Ruby versions * enable containerized builds on Travis CI * add config to run CI build on AppVeyor * exclude benchmark folder from gem (#1522) Refer to the {uri-changelog}[CHANGELOG] for a complete list of changes in older releases. asciidoctor-1.5.5/Rakefile000066400000000000000000000102061277513741400155040ustar00rootroot00000000000000require File.expand_path '../lib/asciidoctor/version', __FILE__ def prepare_test_env # rather than hardcoding gc settings in test task, # could use https://gist.github.com/benders/788695 ENV['RUBY_GC_MALLOC_LIMIT'] = 128_000_000.to_s ENV['RUBY_GC_OLDMALLOC_LIMIT'] = 128_000_000.to_s if RUBY_VERSION >= '2.1' ENV['RUBY_GC_HEAP_INIT_SLOTS'] = 800_000.to_s ENV['RUBY_GC_HEAP_FREE_SLOTS'] = 800_000.to_s ENV['RUBY_GC_HEAP_GROWTH_MAX_SLOTS'] = 250_000.to_s ENV['RUBY_GC_HEAP_GROWTH_FACTOR'] = 1.25.to_s else ENV['RUBY_FREE_MIN'] = 800_000.to_s end end begin require 'rake/testtask' Rake::TestTask.new(:test) do |test| prepare_test_env puts %(LANG: #{ENV['LANG']}) if ENV.key? 'TRAVIS_BUILD_ID' test.libs << 'test' test.pattern = 'test/**/*_test.rb' test.verbose = true test.warning = true end task :default => :test rescue LoadError end =begin # Run tests with Encoding.default_external set to US-ASCII begin Rake::TestTask.new(:test_us_ascii) do |test| prepare_test_env puts "LANG: #{ENV['LANG']}" test.libs << 'test' test.pattern = 'test/**/*_test.rb' test.ruby_opts << '-EUS-ASCII' if RUBY_VERSION >= '1.9' test.verbose = true test.warning = true end rescue LoadError end =end begin require 'cucumber/rake/task' Cucumber::Rake::Task.new(:features) do |t| end rescue LoadError end def ci_setup_tasks tasks = [] begin require 'ci/reporter/rake/minitest' tasks << 'ci:setup:minitest' # FIXME reporter for Cucumber tests not activating #require 'ci/reporter/rake/cucumber' #tasks << 'ci:setup:cucumber' rescue LoadError end if ENV['SHIPPABLE'] && RUBY_VERSION >= '1.9.3' tasks end desc 'Activates coverage and JUnit-style XML reports for tests' task :coverage => ci_setup_tasks do # exclude coverage run for Ruby 1.8.7 or (disabled) if running on Travis CI ENV['COVERAGE'] = 'true' if RUBY_VERSION >= '1.9.3' # && (ENV['SHIPPABLE'] || !ENV['TRAVIS_BUILD_ID']) ENV['CI_REPORTS'] = 'shippable/testresults' ENV['COVERAGE_REPORTS'] = 'shippable/codecoverage' end namespace :test do desc 'Run unit and feature tests' task :all => [:test,:features] end =begin begin require 'rdoc/task' RDoc::Task.new do |rdoc| rdoc.rdoc_dir = 'rdoc' rdoc.title = "Asciidoctor #{Asciidoctor::VERSION}" rdoc.markup = 'tomdoc' if rdoc.respond_to?(:markup) rdoc.rdoc_files.include('LICENSE.adoc', 'lib/**/*.rb') end rescue LoadError end =end begin require 'yard' require 'yard-tomdoc' require './lib/asciidoctor' require './lib/asciidoctor/extensions' # Prevent YARD from breaking command statements in literal paragraphs class CommandBlockPostprocessor < Asciidoctor::Extensions::Postprocessor def process document, output output.gsub(/
\$ (.+?)<\/pre>/m, '
$ \1
') end end Asciidoctor::Extensions.register do postprocessor CommandBlockPostprocessor end # register .adoc extension for AsciiDoc markup helper YARD::Templates::Helpers::MarkupHelper::MARKUP_EXTENSIONS[:asciidoc] = %w(adoc) YARD::Rake::YardocTask.new do |yard| yard.files = %w( lib/**/*.rb - CHANGELOG.adoc LICENSE.adoc ) # --no-highlight enabled to prevent verbatim blocks in AsciiDoc that begin with $ from being dropped # need to patch htmlify method to not attempt to syntax highlight blocks (or fix what's wrong) yard.options = (IO.readlines '.yardopts').map {|l| l.chomp.delete('"').split ' ', 2 }.flatten end rescue LoadError end begin require 'bundler/gem_tasks' # Enhance the release task to create an explicit commit for the release #Rake::Task[:release].enhance [:commit_release] # NOTE you don't need to push after updating version and committing locally # WARNING no longer works; it's now necessary to get master in a state ready for tagging task :commit_release do Bundler::GemHelper.new.send(:guard_clean) sh "git commit --allow-empty -a -m 'Release #{Asciidoctor::VERSION}'" end rescue LoadError end desc 'Open an irb session preloaded with this library' task :console do sh 'bundle console', :verbose => false end asciidoctor-1.5.5/_settings-README.adoc000066400000000000000000000035351277513741400176300ustar00rootroot00000000000000// settings: :page-layout: base :idprefix: :idseparator: - :source-language: ruby :language: {source-language} ifdef::env-github[:badges:] // URIs: :uri-org: https://github.com/asciidoctor :uri-repo: {uri-org}/asciidoctor :uri-asciidoctorj: {uri-org}/asciidoctorj :uri-asciidoctorjs: {uri-org}/asciidoctor.js :uri-project: http://asciidoctor.org ifdef::env-site[:uri-project: link:] :uri-docs: {uri-project}/docs :uri-news: {uri-project}/news :uri-manpage: {uri-project}/man/asciidoctor :uri-issues: {uri-repo}/issues :uri-contributors: {uri-repo}/graphs/contributors :uri-rel-file-base: link: :uri-rel-tree-base: link: ifdef::env-site[] :uri-rel-file-base: {uri-repo}/blob/master/ :uri-rel-tree-base: {uri-repo}/tree/master/ endif::[] :uri-changelog: {uri-rel-file-base}CHANGELOG.adoc :uri-contribute: {uri-rel-file-base}CONTRIBUTING.adoc :uri-license: {uri-rel-file-base}LICENSE.adoc :uri-tests: {uri-rel-tree-base}test :uri-discuss: http://discuss.asciidoctor.org :uri-irc: irc://irc.freenode.org/#asciidoctor :uri-rubygem: https://rubygems.org/gems/asciidoctor :uri-what-is-asciidoc: {uri-docs}/what-is-asciidoc :uri-user-manual: {uri-docs}/user-manual :uri-install-docker: https://github.com/asciidoctor/docker-asciidoctor //:uri-install-doc: {uri-docs}/install-toolchain :uri-install-osx-doc: {uri-docs}/install-asciidoctor-macosx :uri-render-doc: {uri-docs}/render-documents :uri-themes-doc: {uri-docs}/produce-custom-themes-using-asciidoctor-stylesheet-factory :uri-gitscm-repo: https://github.com/git/git-scm.com :uri-prototype: {uri-gitscm-repo}/commits/master/lib/asciidoc.rb :uri-freesoftware: https://www.gnu.org/philosophy/free-sw.html :uri-foundation: http://foundation.zurb.com :uri-tilt: https://github.com/rtomayko/tilt :uri-ruby: https://ruby-lang.org // images: :image-uri-screenshot: https://raw.githubusercontent.com/asciidoctor/asciidoctor/master/screenshot.png asciidoctor-1.5.5/appveyor.yml000066400000000000000000000014751277513741400164370ustar00rootroot00000000000000version: '{build}' skip_tags: true # AppVeyor automatically skips the build if the commit contains [ci skip] or [skip ci] #skip_commits: # message: /\[ci skip\]/ clone_depth: 10 environment: matrix: # there's a problem loading nokogiri 1.5.11 in Ruby 2.x on Windows #- ruby_version: '21' #- ruby_version: '21-x64' #- ruby_version: '200' #- ruby_version: '200-x64' - ruby_version: '193' install: # Take default Ruby out of path - SET PATH=%PATH:C:\Ruby193\bin;=% # Add Ruby to path from build matrix - SET PATH=C:\Ruby%ruby_version%\bin;%PATH% - echo %PATH% - ruby --version - gem --version - gem install bundler --quiet --no-ri --no-rdoc - bundler --version - bundle build_script: - bundle exec rake build test_script: - bundle exec rake test:all artifacts: - path: pkg\*.gem asciidoctor-1.5.5/asciidoctor.gemspec000066400000000000000000000047201277513741400177130ustar00rootroot00000000000000# -*- encoding: utf-8 -*- require File.expand_path '../lib/asciidoctor/version', __FILE__ require 'open3' unless defined? Open3 Gem::Specification.new do |s| s.name = 'asciidoctor' s.version = Asciidoctor::VERSION s.summary = 'An implementation of the AsciiDoc text processor and publishing toolchain in Ruby' s.description = 'A fast, open source text processor and publishing toolchain, written in Ruby, for converting AsciiDoc content to HTML5, DocBook 5 (or 4.5) and other formats.' s.authors = ['Dan Allen', 'Sarah White', 'Ryan Waldron', 'Jason Porter', 'Nick Hengeveld', 'Jeremy McAnally'] s.email = ['dan.j.allen@gmail.com'] s.homepage = 'http://asciidoctor.org' s.license = 'MIT' files = begin (result = Open3.popen3('git ls-files -z') {|_, out| out.read }.split %(\0)).empty? ? Dir['**/*'] : result rescue Dir['**/*'] end s.files = files.grep(/^(?:(?:data|lib|man)\/.+|Gemfile|Rakefile|(?:CHANGELOG|CONTRIBUTING|LICENSE|README(?:-\w+)?)\.adoc|#{s.name}\.gemspec)$/) s.executables = files.grep(/^bin\//).map {|f| File.basename f } s.test_files = files.grep(/^(?:test\/.*_test\.rb|features\/.*\.(?:feature|rb))$/) s.require_paths = ['lib'] s.has_rdoc = true s.rdoc_options = ['--charset=UTF-8'] s.extra_rdoc_files = ['CHANGELOG.adoc', 'CONTRIBUTING.adoc', 'LICENSE.adoc'] # asciimath is needed for testing AsciiMath in DocBook backend s.add_development_dependency 'asciimath', '~> 1.0.2' # coderay is needed for testing syntax highlighting s.add_development_dependency 'coderay', '~> 1.1.0' s.add_development_dependency 'cucumber', '~> 1.3.1' # erubis is needed for testing use of alternative eRuby impls s.add_development_dependency 'erubis', '~> 2.7.0' # haml is needed for testing custom templates s.add_development_dependency 'haml', '~> 4.0.0' s.add_development_dependency 'nokogiri', '~> 1.5.10' s.add_development_dependency 'rake', '~> 10.0.0' s.add_development_dependency 'rspec-expectations', '~> 2.14.0' # slim is needed for testing custom templates s.add_development_dependency 'slim', '~> 2.0.0' s.add_development_dependency 'thread_safe', '~> 0.3.4' # tilt is needed for testing custom templates s.add_development_dependency 'tilt', '~> 2.0.0' s.add_development_dependency 'yard', '~> 0.8.7' s.add_development_dependency 'yard-tomdoc', '~> 0.7.0' s.add_development_dependency 'minitest', '~> 5.3.0' s.add_development_dependency 'racc', '~> 1.4.10' if RUBY_VERSION == '2.1.0' && RUBY_ENGINE == 'rbx' end asciidoctor-1.5.5/benchmark/000077500000000000000000000000001277513741400157725ustar00rootroot00000000000000asciidoctor-1.5.5/benchmark/.gitignore000066400000000000000000000000671277513741400177650ustar00rootroot00000000000000/sample-data/userguide.adoc /sample-data/customers.csv asciidoctor-1.5.5/benchmark/.ruby-gemset000066400000000000000000000000221277513741400202300ustar00rootroot00000000000000asciidoctor-bench asciidoctor-1.5.5/benchmark/.ruby-version000066400000000000000000000000041277513741400204310ustar00rootroot000000000000002.1 asciidoctor-1.5.5/benchmark/benchmark.rb000077500000000000000000000115071277513741400202600ustar00rootroot00000000000000#!/usr/bin/env ruby =begin Use this script to monitor changes in performance when making code changes to Asciidoctor. $ ruby benchmark.rb The most common benchmark is the userguide-loop. It will download the AsciiDoc User Guide automatically the first time, then convert it in memory. Running it 10 times provides a good picture. $ ruby benchmark.rb userguide-loop 10 Only worry about the relative change to the numbers before and after the code change. Absolute times are highly dependent on the capabilities of the machine the the version of Ruby. To get the best results under MRI, tune Ruby using environment variables as follows: .Ruby < 2.1 $ RUBY_GC_MALLOC_LIMIT=90000000 RUBY_FREE_MIN=650000 ruby benchmark.rb userguide-loop 10 .Ruby >= 2.1 $ RUBY_GC_MALLOC_LIMIT=128000000 RUBY_GC_OLDMALLOC_LIMIT=128000000 RUBY_GC_HEAP_INIT_SLOTS=800000 RUBY_GC_HEAP_FREE_SLOTS=800000 RUBY_GC_HEAP_GROWTH_MAX_SLOTS=250000 RUBY_GC_HEAP_GROWTH_FACTOR=2 ruby benchmark.rb userguide-loop 10 Asciidoctor starts with ~ 12,500 objects, adds ~ 300,000 each run, so tune RUBY_GC_HEAP_* accordingly See http://globaldev.co.uk/2014/05/ruby-2-1-in-detail/#gc-tuning-environment-variables Execute Ruby using the `--disable=gems` flag to speed up the initial load time, as shown below: $ ruby --disable=gems ... =end require 'benchmark' include Benchmark bench = ARGV[0] $repeat = ARGV[1].to_i || 10000 if bench.nil? raise 'You must specify a benchmark to run.' end def fetch_userguide require 'open-uri' userguide_uri = 'https://raw.githubusercontent.com/asciidoc/asciidoc/d43faae38c4a8bf366dcba545971da99f2b2d625/doc/asciidoc.txt' customers_uri = 'https://raw.githubusercontent.com/asciidoc/asciidoc/d43faae38c4a8bf366dcba545971da99f2b2d625/doc/customers.csv' userguide_content = open(userguide_uri) {|fd2| fd2.read } customers_content = open(customers_uri) {|fd2| fd2.read } File.open('sample-data/userguide.adoc', 'w') {|fd1| fd1.write userguide_content } File.open('sample-data/customers.csv', 'w') {|fd1| fd1.write customers_content } end case bench =begin # benchmark template when 'name' sample = 'value' Benchmark.bmbm(12) {|bm| bm.report('operation a') { $repeat.times { call_a_on sample } } bm.report('operation b') { $repeat.times { call_b_on sample } } } =end when 'userguide' require '../lib/asciidoctor.rb' Asciidoctor::Compliance.markdown_syntax = false Asciidoctor::Compliance.shorthand_property_syntax = false if Asciidoctor::VERSION > '0.1.4' sample_file = ENV['BENCH_TEST_FILE'] || 'sample-data/userguide.adoc' backend = ENV['BENCH_BACKEND'] || 'html5' fetch_userguide if sample_file == 'sample-data/userguide.adoc' && !(File.exist? sample_file) result = Benchmark.bmbm {|bm| bm.report(%(Convert #{sample_file} (x#{$repeat}))) { $repeat.times { Asciidoctor.render_file sample_file, :backend => backend, :safe => Asciidoctor::SafeMode::SAFE, :eruby => 'erubis', :header_footer => true, :to_file => false, :attributes => {'linkcss' => '', 'toc' => nil, 'numbered' => nil, 'icons' => nil, 'compat-mode' => ''} } } } # prints average for real run puts %(>avg: #{result.first.real / $repeat}) when 'userguide-loop' require '../lib/asciidoctor.rb' GC.start Asciidoctor::Compliance.markdown_syntax = false Asciidoctor::Compliance.shorthand_property_syntax = false if Asciidoctor::VERSION > '0.1.4' sample_file = ENV['BENCH_TEST_FILE'] || 'sample-data/userguide.adoc' backend = ENV['BENCH_BACKEND'] || 'html5' fetch_userguide if sample_file == 'sample-data/userguide.adoc' && !(File.exist? sample_file) best = nil 2.times.each do outer_start = Time.now (1..$repeat).each do inner_start = Time.now Asciidoctor.render_file sample_file, :backend => backend, :safe => Asciidoctor::SafeMode::SAFE, :eruby => 'erubis', :header_footer => true, :to_file => false, :attributes => {'linkcss' => '', 'toc' => nil, 'numbered' => nil, 'icons' => nil, 'compat-mode' => ''} puts (elapsed = Time.now - inner_start) best = (best ? [best, elapsed].min : elapsed) end puts %(Run Total: #{Time.now - outer_start}) end puts %(Best Time: #{best}) when 'mdbasics-loop' require '../lib/asciidoctor.rb' GC.start sample_file = ENV['BENCH_TEST_FILE'] || 'sample-data/mdbasics.adoc' backend = ENV['BENCH_BACKEND'] || 'html5' best = nil 2.times do outer_start = Time.now (1..$repeat).each do inner_start = Time.now Asciidoctor.render_file sample_file, :backend => backend, :safe => Asciidoctor::SafeMode::SAFE, :header_footer => false, :to_file => false, :attributes => {'linkcss' => '', 'idprefix' => '', 'idseparator' => '-', 'showtitle' => ''} puts (elapsed = Time.now - inner_start) best = (best ? [best, elapsed].min : elapsed) end puts %(Run Total: #{Time.now - outer_start}) end puts %(Best Time: #{best}) end asciidoctor-1.5.5/benchmark/sample-data/000077500000000000000000000000001277513741400201625ustar00rootroot00000000000000asciidoctor-1.5.5/benchmark/sample-data/mdbasics.adoc000066400000000000000000000172371277513741400226110ustar00rootroot00000000000000// converted to AsciiDoc from https://github.com/gettalong/kramdown/blob/master/benchmark/mdbasics.text # Markdown: Basics John Gruber :s: link:/projects/markdown/syntax :d: link:/projects/markdown/dingus :src: link:/projects/markdown/basics.text ++++ ++++ ## Getting the Gist of Markdown's Formatting Syntax This page offers a brief overview of what it's like to use Markdown. The {s}[syntax page] provides complete, detailed documentation for every feature, but Markdown should be very easy to pick up simply by looking at a few examples of it in action. The examples on this page are written in a before/after style, showing example syntax and the HTML output produced by Markdown. It's also helpful to simply try Markdown out; the {d}[Dingus] is a web application that allows you type your own Markdown-formatted text and translate it to XHTML. NOTE: This document is itself written using Markdown; you can {src}[see the source for it by adding \'.text' to the URL]. ### Paragraphs, Headers, Blockquotes A paragraph is simply one or more consecutive lines of text, separated by one or more blank lines. (A blank line is any line that looks like a blank line -- a line containing nothing spaces or tabs is considered blank.) Normal paragraphs should not be intended with spaces or tabs. Markdown offers two styles of headers: _Setext_ and _atx_. Setext-style headers for +

+ and +

+ are created by "underlining" with equal signs (+=+) and hyphens (+-+), respectively. To create an atx-style header, you put 1-6 hash marks (+#+) at the beginning of the line -- the number of hashes equals the resulting HTML header level. Blockquotes are indicated using email-style \'+>+' angle brackets. .Markdown: [listing] .... A First Level Header ==================== A Second Level Header --------------------- Now is the time for all good men to come to the aid of their country. This is just a regular paragraph. The quick brown fox jumped over the lazy dog's back. ### Header 3 > This is a blockquote. > > This is the second paragraph in the blockquote. > > ## This is an H2 in a blockquote .... .Output: ....

A First Level Header

A Second Level Header

Now is the time for all good men to come to the aid of their country. This is just a regular paragraph.

The quick brown fox jumped over the lazy dog's back.

Header 3

This is a blockquote.

This is the second paragraph in the blockquote.

This is an H2 in a blockquote

.... ### Phrase Emphasis Markdown uses asterisks and underscores to indicate spans of emphasis. .Markdown: ---- Some of these words *are emphasized*. Some of these words _are emphasized also_. Use two asterisks for **strong emphasis**. Or, if you prefer, __use two underscores instead__. ---- .Output: ....

Some of these words are emphasized. Some of these words are emphasized also.

Use two asterisks for strong emphasis. Or, if you prefer, use two underscores instead.

.... ### Lists Unordered (bulleted) lists use asterisks, pluses, and hyphens (+*+, +++, and +-+) as list markers. These three markers are interchangable; this: ---- * Candy. * Gum. * Booze. ---- this: ---- + Candy. + Gum. + Booze. ---- and this: ---- - Candy. - Gum. - Booze. ---- all produce the same output: ....
  • Candy.
  • Gum.
  • Booze.
.... Ordered (numbered) lists use regular numbers, followed by periods, as list markers: ---- 1. Red 2. Green 3. Blue ---- .Output: ....
  1. Red
  2. Green
  3. Blue
.... If you put blank lines between items, you'll get +

+ tags for the list item text. You can create multi-paragraph list items by indenting the paragraphs by 4 spaces or 1 tab: ---- * A list item. With multiple paragraphs. * Another item in the list. ---- .Output: ....

  • A list item.

    With multiple paragraphs.

  • Another item in the list.

.... ### Links Markdown supports two styles for creating links: _inline_ and _reference_. With both styles, you use square brackets to delimit the text you want to turn into a link. Inline-style links use parentheses immediately after the link text. For example: ---- This is an [example link](http://example.com/). ---- .Output: ....

This is an example link.

.... Optionally, you may include a title attribute in the parentheses: ---- This is an [example link](http://example.com/ "With a Title"). ---- .Output: ....

This is an example link.

.... Reference-style links allow you to refer to your links by names, which you define elsewhere in your document: ---- I get 10 times more traffic from [Google][1] than from [Yahoo][2] or [MSN][3]. [1]: http://google.com/ "Google" [2]: http://search.yahoo.com/ "Yahoo Search" [3]: http://search.msn.com/ "MSN Search" ---- .Output: ....

I get 10 times more traffic from Google than from Yahoo or MSN.

.... The title attribute is optional. Link names may contain letters, numbers and spaces, but are _not_ case sensitive: ---- I start my morning with a cup of coffee and [The New York Times][NY Times]. [ny times]: http://www.nytimes.com/ ---- .Output: ....

I start my morning with a cup of coffee and The New York Times.

.... ### Images Image syntax is very much like link syntax. .Inline (titles are optional): ---- ![alt text](/path/to/img.jpg "Title") ---- .Reference-style: ---- ![alt text][id] [id]: /path/to/img.jpg "Title" ---- Both of the above examples produce the same output: .... alt text .... ### Code In a regular paragraph, you can create code span by wrapping text in backtick quotes. Any ampersands (+&+) and angle brackets (+<+ or +>+) will automatically be translated into HTML entities. This makes it easy to use Markdown to write about HTML example code: ---- I strongly recommend against using any `` tags. I wish SmartyPants used named entities like `—` instead of decimal-encoded entites like `—`. ---- .Output: ....

I strongly recommend against using any <blink> tags.

I wish SmartyPants used named entities like &mdash; instead of decimal-encoded entites like &#8212;.

.... To specify an entire block of pre-formatted code, indent every line of the block by 4 spaces or 1 tab. Just like with code spans, +&+, +<+, and +>+ characters will be escaped automatically. .Markdown: ---- If you want your page to validate under XHTML 1.0 Strict, you've got to put paragraph tags in your blockquotes:

For example.

---- .Output: ....

If you want your page to validate under XHTML 1.0 Strict, you've got to put paragraph tags in your blockquotes:

<blockquote>
    <p>For example.</p>
</blockquote>
.... asciidoctor-1.5.5/bin/000077500000000000000000000000001277513741400146105ustar00rootroot00000000000000asciidoctor-1.5.5/bin/asciidoctor000077500000000000000000000004721277513741400170440ustar00rootroot00000000000000#!/usr/bin/env ruby require 'rubygems' unless defined? Gem if File.exist?(asciidoctor = (File.expand_path '../../lib/asciidoctor', __FILE__)) require asciidoctor else require 'asciidoctor' end require 'asciidoctor/cli' invoker = Asciidoctor::Cli::Invoker.new ARGV GC.start invoker.invoke! exit invoker.code asciidoctor-1.5.5/bin/asciidoctor-safe000077500000000000000000000005141277513741400177550ustar00rootroot00000000000000#!/usr/bin/env ruby require 'rubygems' unless defined? Gem if File.exist?(asciidoctor = (File.expand_path '../../lib/asciidoctor', __FILE__)) require asciidoctor else require 'asciidoctor' end require 'asciidoctor/cli' invoker = Asciidoctor::Cli::Invoker.new(ARGV + ['-S', 'safe']) GC.start invoker.invoke! exit invoker.code asciidoctor-1.5.5/compat/000077500000000000000000000000001277513741400153235ustar00rootroot00000000000000asciidoctor-1.5.5/compat/asciidoc.conf000066400000000000000000000360041277513741400177530ustar00rootroot00000000000000# This file is an AsciiDoc configuration file that makes # AsciiDoc conform with Asciidoctor's fixes and customizations. # # Place this file in the same directory as your AsciiDoc document and the # AsciiDoc processor (asciidoc) will automatically use it. [miscellaneous] newline=\n [attributes] # make html5 the default html backend backend-alias-html=html5 asterisk=* backtick=` brvbar=¦ caret=^ # plus introduced in AsciiDoc 8.6.9 plus=+ blank= tilde=~ cpp=C++ user-home={eval:os.path.expanduser('~')} vbar=| # NOTE use -a no-inline-literal to set compat-mode to default when using AsciiDoc Python ifndef::no-inline-literal[] compat-mode=legacy endif::[] [replacements] # right single quote (?[\S].*?)(?: +\1)?$ sect1=^(==|##) +(?P[\S].*?)(?: +\1)?$ sect2=^(===|###) +(?P<title>[\S].*?)(?: +\1)?$ sect3=^(====|####) +(?P<title>[\S].*?)(?: +\1)?$ sect4=^(=====|#####) +(?P<title>[\S].*?)(?: +\1)?$ sect5=^(======|######) +(?P<title>[\S].*?)(?: +\1)?$ # Disable subs on pass block by default [blockdef-pass] subs=none # enables fenced code blocks # FIXME I haven't sorted out yet how to do syntax highlighting [blockdef-fenced-code] delimiter=^```(?:\w+(?:,numbered)?)?$ ifdef::language[] style=source template::[source-filter-style] endif::language[] ifndef::language[] template=listingblock subs=verbatim posattrs=style endif::language[] # enables blockquotes to be defined using two double quotes [blockdef-air-quote] template::[blockdef-quote] delimiter=^""$ # markdown-style blockquote (paragraph only) # FIXME does not strip leading > on subsequent lines [paradef-markdown-quote] delimiter=(?s)>\s*(?P<text>\S.*) style=quote quote-style=template="quoteparagraph",posattrs=("style","attribution","citetitle") # fix regex for callout list to require number; also makes markdown-style blockquote work [listdef-callout] posattrs=style delimiter=^<?(?P<index>\d+>) +(?P<text>.+)$ type=callout tags=callout style=arabic # enables literal block to be used as source block [blockdef-literal] template::[source-filter-style] # enables source block when source-highlighter is not defined ifndef::source-highlighter[] [source-filter-style] source-style=template="listingblock",subs=("specialcharacters","callouts"),posattrs=("style","language","src_numbered","src_tab") [paradef-default] template::[source-filter-style] [paradef-literal] template::[source-filter-style] [blockdef-open] template::[source-filter-style] [blockdef-listing] template::[source-filter-style] endif::source-highlighter[] [tabledef-csv] template::[tabledef-default] delimiter=^,={3,}$ format=csv [tabledef-dsv] template::[tabledef-default] delimiter=^:={3,}$ format=dsv [macros] ifdef::no-inline-literal[] (?su)\\?\+\+(?P<passtext>.*?)\+\+=pass[specialcharacters] (?su)(?<![+\w])(\\?\+(?P<passtext>\S|\S.*?\S)\+)(?![+\w])=pass[specialcharacters] endif::no-inline-literal[] # additional callout match behind line comments #(?://|#|;;) ?\((?P<index>\d+)\)=callout # additional callout match for XML [\\]?<!--(?P<index>\d+)-->=callout # --- or *** or ___ or - - - or * * * or _ _ _ (in addition to the built-in ''') ^ {0,3}([-\*_])( *)\1\2\1$=#ruler # btn:[Save] (?su)(?<!\w)\\?btn:\[(?P<attrlist>(?:\\\]|[^\]])+?)\]=button # kbd:[F11] or kbd:[Ctrl+T] or kbd:[Ctrl,T] (?su)(?<!\w)\\?kbd:\[(?P<attrlist>(?:\\\]|[^\]])+?)\]=keyboard # menu:Search[] or menu:File[New...] or menu:View[Page Style, No Style] # TODO implement menu:View[Page Style > No Style] syntax (?su)(?<!\w)[\\]?(?P<name>menu):(?P<target>\w|\w.*?\S)?\[(?P<attrlist>.*?)\]= ifdef::basebackend-html[] [sect5] <div class="sect5{style? {style}}{role? {role}}"> <h6{id? id="{id}"}>{title}</h6> | </div> [button-inlinemacro] <b class="button">{1}</b> [keyboard-inlinemacro] {set2:keys:{eval:re.split(r'(?<!\+ |.\+)\+', '{1}')}} {2%}{eval:len({keys}) == 1}<kbd>{1}</kbd> {2%}{eval:len({keys}) == 2}<kbd class="combo"><kbd>{eval:{keys}[0].strip()}</kbd>+<kbd>{eval:{keys}[1].strip()}</kbd></kbd> {2%}{eval:len({keys}) == 3}<kbd class="combo"><kbd>{eval:{keys}[0].strip()}</kbd>+<kbd>{eval:{keys}[1].strip()}</kbd>+<kbd>{eval:{keys}[2].strip()}</kbd></kbd> {2#}{3%}<kbd class="combo"><kbd>{1}</kbd>+<kbd>{2}</kbd></kbd> {3#}<kbd class="combo"><kbd>{1}</kbd>+<kbd>{2}</kbd>+<kbd>{3}</kbd></kbd> [menu-inlinemacro] {1%}<span class="menu">{target}</span> {1#}{2%}<span class="menuseq"><span class="menu">{target}</span> ▸ <span class="menuitem">{1}</span></span> {2#}{3%}<span class="menuseq"><span class="menu">{target}</span> ▸ <span class="submenu">{1}</span> ▸ <span class="menuitem">{2}</span></span> {3#}<span class="menuseq"><span class="menu">{target}</span> ▸ <span class="submenu">{1}</span> ▸ <span class="submenu">{2}</span> ▸ <span class="menuitem">{3}</span></span> [literal-inlinemacro] <code>{passtext}</code> [tags] emphasis=<em{1? class="{1}"}>|</em> strong=<strong{1? class="{1}"}>|</strong> monospaced=<code{1? class="{1}"}>|</code> superscript=<sup{1? class="{1}"}>|</sup> subscript=<sub{1? class="{1}"}>|</sub> mark={1=<mark>}{1?<span class="{1}">}|{1?</span>}{1=</mark>} [monospacedwords] <code>{words}</code> ifdef::linkattrs[] [http-inlinemacro] <a href="{name}:{target}"{id? id="{id}"}{role? class="{role}"}{window? target="{window}"}>{1={name}:{target}}</a> [https-inlinemacro] <a href="{name}:{target}"{id? id="{id}"}{role? class="{role}"}{window? target="{window}"}>{1={name}:{target}}</a> [ftp-inlinemacro] <a href="{name}:{target}"{id? id="{id}"}{role? class="{role}"}{window? target="{window}"}>{1={name}:{target}}</a> [file-inlinemacro] <a href="{name}:{target}"{id? id="{id}"}{role? class="{role}"}{window? target="{window}"}>{1={name}:{target}}</a> [irc-inlinemacro] <a href="{name}:{target}"{id? id="{id}"}{role? class="{role}"}{window? target="{window}"}>{1={name}:{target}}</a> [mailto-inlinemacro] <a href="mailto:{target}"{id? id="{id}"}{role? class="{role}"}{window? target="{window}"}>{1={target}}</a> [link-inlinemacro] <a href="{target}"{id? id="{id}"}{role? class="{role}"}{window? target="{window}"}>{1={target}}</a> endif::linkattrs[] [listtags-numbered] list=<div class="olist{style? {style}}{compact-option? compact}{role? {role}}"{id? id="{id}"}>{title?<div class="title">{title}</div>}<ol class="{style}"{style@loweralpha: type="a"}{style@lowerroman: type="i"}{style@upperalpha: type="A"}{style@upperroman: type="I"}{start? start="{start}"}>|</ol></div> [tabletags-monospaced] paragraph=<p class="tableblock"><code>|</code></p> [sect0] <h1{id? id="{id}"} class="sect0">{title}</h1> | # support for document title in embedded documents ifeval::[not config.header_footer] [preamble] <h1>{title={doctitle}}</h1>{set:title-rendered:} <div id="preamble"> <div class="sectionbody"> | </div> {toc,toc2#}{toc-placement$preamble:}{template:toc} </div> [sect1] {title-rendered%}<h1>{doctitle}</h1> <div class="sect1{style? {style}}{role? {role}}"> <h2{id? id="{id}"}>{numbered?{sectnum} }{title}</h2> <div class="sectionbody"> | </div> </div> endif::[] # override to add the admonition name to the class attribute of the outer element [admonitionblock] <div class="admonitionblock {name}{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}> <table><tr> <td class="icon"> {data-uri%}{icons#}<img src="{icon={iconsdir}/{name}.png}" alt="{caption}"> {data-uri#}{icons#}<img alt="{caption}" src="data:image/png;base64, {data-uri#}{icons#}{sys:"{python}" -u -c "import base64,sys; base64.encode(sys.stdin,sys.stdout)" < "{eval:os.path.join(r"{indir={outdir}}",r"{icon={iconsdir}/{name}.png}")}"}"> {icons%}<div class="title">{caption}</div> </td> <td class="content"> <div class="title">{title}</div> | </td> </tr></table> </div> # modified so that: # a. imagesdir is only prepended if target is not a uri or absolute path (relative path only) # b. automatic alt text is calculated from basename of target without extension # note that the escaped_target attribute must be set in order to use a uri in the conditional attribute reference [image-inlinemacro] <span class="image{role? {role}}"{float? style="float: {float}"}>{set2:escaped_target:{eval:'{target}'.replace(':','\:')}} <a class="image" href="{link}"> {data-uri%}<img src="{target@^(/|https?\://).*:{escaped_target}:{imagesdir?{imagesdir}}{imagesdir?/}{escaped_target}}" alt="{alt={eval:os.path.splitext(os.path.basename('{target}'))[0]}}"{width? width="{width}"}{height? height="{height}"}{title? title="{title}"}> {data-uri#}<img alt="{alt={target}}"{width? width="{width}"}{height? height="{height}"}{title? title="{title}"} {data-uri#}{sys:"{python}" -u -c "import mimetypes,base64,sys; print 'src=\"data:'+mimetypes.guess_type(r'{target}')[0]+';base64,'; base64.encode(sys.stdin,sys.stdout)" < "{eval:os.path.join(r"{indir={outdir}}",r"{imagesdir=}",r"{target}")}"}"> {link#}</a> </span> # modified so that: # a. imagesdir is only prepended if target is not a uri or absolute path (relative path only) # b. automatic alt text is calculated from basename of target without extension # note that the escaped_target attribute must be set in order to use a uri in the conditional attribute reference [image-blockmacro] <div class="imageblock{style? {style}}{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}{align? style="text-align:{align};"}{float? style="float: {float}"}> <div class="content">{set2:escaped_target:{eval:'{target}'.replace(':','\:')}} <a class="image" href="{link}"> {data-uri%}<img src="{target@^(/|https?\://).*:{escaped_target}:{imagesdir?{imagesdir}}{imagesdir?/}{escaped_target}}" alt="{alt={eval:os.path.splitext(os.path.basename('{target}'))[0]}}"{width? width="{width}"}{height? height="{height}"}> {data-uri#}<img alt="{alt={target}}"{width? width="{width}"}{height? height="{height}"} {data-uri#}{sys:"{python}" -u -c "import mimetypes,base64,sys; print 'src=\"data:'+mimetypes.guess_type(r'{target}')[0]+';base64,'; base64.encode(sys.stdin,sys.stdout)" < "{eval:os.path.join(r"{indir={outdir}}",r"{imagesdir=}",r"{target}")}"}"> {link#}</a> </div> <div class="title">{caption={figure-caption} {counter:figure-number}. }{title}</div> </div> # a common template for emitting the attribute for a quote or verse block # don't output attribution div if attribution or citetitle are both empty [attribution] {attribution,citetitle#}<div class="attribution"> — {attribution}{citetitle?<br>} <cite>{citetitle}</cite> {attribution,citetitle#}</div> # override to use blockquote element for content and cite element for cite title [quoteblock] <div class="quoteblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}> <div class="title">{title}</div> <blockquote> | </blockquote> template::[attribution] </div> # override to use cite element for cite title [verseblock] <div class="verseblock{role? {role}}{unbreakable-option? unbreakable}"{id? id="{id}"}> <div class="title">{title}</div> <pre class="content"> | </pre> template::[attribution] </div> # override tabletags to support cellbgcolor [tabletags-default] headdata=<th class="tableblock halign-{halign=left} valign-{valign=top}"{colspan@1:: colspan="{colspan}"}{rowspan@1:: rowspan="{rowspan}"}{cellbgcolor? style="background-color:{cellbgcolor};"}>|</th> bodydata=<td class="tableblock halign-{halign=left} valign-{valign=top}"{colspan@1:: colspan="{colspan}"}{rowspan@1:: rowspan="{rowspan}"}{cellbgcolor? style="background-color:{cellbgcolor};"}>|</td> # override header cells to use th [tabletags-header] bodydata=<th class="tableblock halign-{halign=left} valign-{valign=top}" {colspan@1::colspan="{colspan}" }{rowspan@1::rowspan="{rowspan}" }>|</th> paragraph=<p class="tableblock">|</p> [toc] <div id="toc"> <div id="toctitle">{toc-title}</div> ifdef::toc2[] <script type="text/javascript"> document.body.className += ' toc2'; document.getElementById('toc').className = 'toc2'; </script> endif::toc2[] <noscript><p><b>JavaScript must be enabled in your browser to display the table of contents.</b></p></noscript> </div> endif::basebackend-html[] # Override docinfo to support subtitle ifdef::basebackend-docbook[] [sect5] <section{id? id="{id}"}{role? role="{role}"}{reftext? xreflabel="{reftext}"}> <title>{title} | [tags] monospaced=| subscript=| superscript=| [button-inlinemacro] {1} [keyboard-inlinemacro] {set2:keys:{eval:re.split(r'(?{1} {2%}{eval:len({keys}) == 2}{eval:{keys}[0].strip()}{eval:{keys}[1].strip()} {2%}{eval:len({keys}) == 3}{eval:{keys}[0].strip()}{eval:{keys}[1].strip()}{eval:{keys}[2].strip()} {2#}{3%}{1}{2} {3#}{1}{2}{3} [menu-inlinemacro] {1%}{target} {1#}{2%}{target} {1} {2#}{3%}{target} {1} {2} {3#}{target} {1} {2} {3} # override tabletags to support cellbgcolor [tabletags-default] headdata={cellbgcolor?}| bodydata={cellbgcolor?}| [docinfo] ifndef::notitle[] {set2:subtitle_offset:{eval:'{doctitle}'.rfind(': ')}} {eval:{subtitle_offset} != -1}{eval:'{doctitle}'[0:{subtitle_offset}]} {eval:{subtitle_offset} != -1}{eval:'{doctitle}'[{subtitle_offset} + 2:]} {eval:{subtitle_offset} < 0}{doctitle} endif::notitle[] {revdate} # To ensure valid articleinfo/bookinfo when there is no AsciiDoc header. {doctitle%}{revdate%}{docdate} {authored#} {firstname} {middlename} {lastname} {email} {authored#} {authorinitials} {revnumber?{revnumber}}{revdate}{authorinitials?{authorinitials}}{revremark?{revremark}} {docinfo1,docinfo2#}{include:{docdir}/docinfo.xml} {docinfo,docinfo2#}{include:{docdir}/{docname}-docinfo.xml} {orgname} endif::basebackend-docbook[] asciidoctor-1.5.5/compat/font-awesome-3-compat.css000066400000000000000000000141331277513741400220640ustar00rootroot00000000000000.fa-envelope-alt:before { content: "\f003"; } .fa-star-empty:before { content: "\f006"; } .fa-ok:before { content: "\f00c"; } .fa-remove:before { content: "\f00d"; } .fa-zoom-in:before { content: "\f00e"; } .fa-zoom-out:before { content: "\f010"; } .fa-off:before { content: "\f011"; } .fa-trash:before { content: "\f014"; } .fa-file-alt:before { content: "\f016"; } .fa-time:before { content: "\f017"; } .fa-download-alt:before { content: "\f019"; } .fa-download-alt:before { content: "\f01a"; } .fa-upload-alt:before { content: "\f01b"; } .fa-play-sign:before { content: "\f01d"; } .fa-indent-right-left:before { content: "\f03b"; } .fa-indent-right:before { content: "\f03c"; } .fa-facetime-video:before { content: "\f03d"; } .fa-picture:before { content: "\f03e"; } .fa-edit-sign-o:before { content: "\f044"; } .fa-share-alt-square-o:before { content: "\f045"; } .fa-ok-sign-o:before { content: "\f046"; } .fa-move:before { content: "\f047"; } .fa-plus-sign:before { content: "\f055"; } .fa-minus-sign:before { content: "\f056"; } .fa-remove-sign:before { content: "\f057"; } .fa-ok-sign:before { content: "\f058"; } .fa-question-sign:before { content: "\f059"; } .fa-info-sign:before { content: "\f05a"; } .fa-screenshot:before { content: "\f05b"; } .fa-remove-circle:before { content: "\f05c"; } .fa-ok-circle:before { content: "\f05d"; } .fa-ban-circle:before { content: "\f05e"; } .fa-share-alt:before { content: "\f064"; } .fa-resize-full:before { content: "\f065"; } .fa-resize-small:before { content: "\f066"; } .fa-exclamation-sign:before { content: "\f06a"; } .fa-eye-open:before { content: "\f06e"; } .fa-eye-open-close:before { content: "\f070"; } .fa-warning-sign:before { content: "\f071"; } .fa-folder-close:before { content: "\f07b"; } .fa-folder-close-close-altpen:before { content: "\f07c"; } .fa-move-v:before { content: "\f07d"; } .fa-move-h:before { content: "\f07e"; } .fa-bar-chart:before { content: "\f080"; } .fa-twitter-sign:before { content: "\f081"; } .fa-facebook-sign:before { content: "\f082"; } .fa-thumbs-up-alt:before { content: "\f087"; } .fa-thumbs-down-alt:before { content: "\f088"; } .fa-heart-empty:before { content: "\f08a"; } .fa-signout:before { content: "\f08b"; } .fa-linkedin-sign:before { content: "\f08c"; } .fa-pushpin:before { content: "\f08d"; } .fa-signin:before { content: "\f090"; } .fa-github-sign:before { content: "\f092"; } .fa-upload-alt:before { content: "\f093"; } .fa-lemon:before { content: "\f094"; } .fa-ok-empty:before { content: "\f096"; } .fa-bookmark-empty:before { content: "\f097"; } .fa-phone-sign:before { content: "\f098"; } .fa-hdd:before { content: "\f0a0"; } .fa-bell-alt:before { content: "\f0f3"; } .fa-hand-right:before { content: "\f0a4"; } .fa-hand-left:before { content: "\f0a5"; } .fa-hand-up:before { content: "\f0a6"; } .fa-hand-down:before { content: "\f0a7"; } .fa-circle-arrow-left:before { content: "\f0a8"; } .fa-circle-arrow-right:before { content: "\f0a9"; } .fa-circle-arrow-up:before { content: "\f0aa"; } .fa-circle-arrow-down:before { content: "\f0ab"; } .fa-fullscreen:before { content: "\f0b2"; } .fa-group:before { content: "\f0c0"; } .fa-beaker:before { content: "\f0c3"; } .fa-paper-clip:before { content: "\f0c6"; } .fa-sign-blank:before { content: "\f0c8"; } .fa-pinterest-sign:before { content: "\f0d3"; } .fa-google-plus-sign:before { content: "\f0d4"; } .fa-comment-alt:before { content: "\f0e5"; } .fa-comments-alt:before { content: "\f0e6"; } .fa-lightbulb:before { content: "\f0eb"; } .fa-bell-alt:before { content: "\f0a2"; } .fa-food:before { content: "\f0f5"; } .fa-file-text-alt:before { content: "\f0f6"; } .fa-building:before { content: "\f0f7"; } .fa-hospital:before { content: "\f0f8"; } .fa-h-sign:before { content: "\f0fd"; } .fa-plus-sign-alt:before { content: "\f0fe"; } .fa-double-angle-left:before { content: "\f100"; } .fa-double-angle-right:before { content: "\f101"; } .fa-double-angle-up:before { content: "\f102"; } .fa-double-angle-down:before { content: "\f103"; } .fa-circle-blank:before { content: "\f10c"; } .fa-folder-close-close-alt:before { content: "\f114"; } .fa-folder-close-close-altpen-o:before { content: "\f115"; } .fa-smile:before { content: "\f118"; } .fa-frown:before { content: "\f119"; } .fa-meh:before { content: "\f11a"; } .fa-keyboard:before { content: "\f11c"; } .fa-flag-alt:before { content: "\f11d"; } .fa-microphone-off:before { content: "\f131"; } .fa-calendar-empty:before { content: "\f133"; } .fa-chevron-sign-left:before { content: "\f137"; } .fa-chevron-sign-right:before { content: "\f138"; } .fa-chevron-sign-up:before { content: "\f139"; } .fa-chevron-sign-down:before { content: "\f13a"; } .fa-ellipsis-horizontal:before { content: "\f141"; } .fa-ellipsis-vertical:before { content: "\f142"; } .fa-rss-sign:before { content: "\f143"; } .fa-play-sign:before { content: "\f144"; } .fa-minus-sign-alt:before { content: "\f146"; } .fa-ok-minus:before { content: "\f147"; } .fa-ok-sign:before { content: "\f14a"; } .fa-edit-sign:before { content: "\f14b"; } .fa-external-link-sign:before { content: "\f14c"; } .fa-share-alt-square:before { content: "\f14d"; } .fa-collapse:before { content: "\f150"; } .fa-collapse-top:before { content: "\f151"; } .fa-resize-full:before { content: "\f152"; } .fa-cnyle:before, .fa-cny:before { content: "\f158"; } .fa-sort-by-alphabet:before { content: "\f15d"; } .fa-sort-by-alphabet-alt:before { content: "\f15e"; } .fa-sort-by-attributes:before { content: "\f160"; } .fa-sort-by-attributes-alt:before { content: "\f161"; } .fa-sort-by-order:before { content: "\f162"; } .fa-sort-by-order-alt:before { content: "\f163"; } .fa-youtube-sign:before { content: "\f166"; } .fa-xing-sign:before { content: "\f169"; } .fa-stackexchange:before { content: "\f16c"; } .fa-bitbucket-sign:before { content: "\f172"; } .fa-tumblr-sign:before { content: "\f174"; } .fa-sun:before { content: "\f185"; } .fa-moon:before { content: "\f186"; } .fa-expand-alt:before { content: "\f196"; } asciidoctor-1.5.5/data/000077500000000000000000000000001277513741400147515ustar00rootroot00000000000000asciidoctor-1.5.5/data/locale/000077500000000000000000000000001277513741400162105ustar00rootroot00000000000000asciidoctor-1.5.5/data/locale/attributes.adoc000066400000000000000000000316761277513741400212430ustar00rootroot00000000000000// This file provides translations for all built-in attributes in Asciidoctor that output localized content. // See http://asciidoctor.org/docs/user-manual/#customizing-built-in-labels to learn how to use it. // // NOTE: Please use a line comment in front of the listing-caption and preface-title entries. // These attributes are optional and not set by default. // // IMPORTANT: Do not add any blank lines. // // Arabic translation, courtesy of Aboullaite Mohammed ifeval::["{lang}" == "ar"] :appendix-caption: ملحق :caution-caption: تنبيه :example-caption: مثال :figure-caption: الشكل :important-caption: مهم :last-update-label: اخر تحديث //:listing-caption: قائمة :manname-title: اسم :note-caption: ملاحظة //:preface-title: تمهيد :table-caption: جدول :tip-caption: تلميح :toc-title: فهرس :untitled-label: بدون عنوان :version-label: نسخة :warning-caption: تحذير endif::[] // // Bulgarian translation, courtesy of Ivan St. Ivanov ifeval::["{lang}" == "bg"] :appendix-caption: Приложение :caution-caption: Внимание :example-caption: Пример :figure-caption: Фигура :important-caption: Важно :last-update-label: Последно обновен //:listing-caption: Листинг :manname-title: ИМЕ :note-caption: Забележка //:preface-title: Предговор :table-caption: Таблица :tip-caption: Подсказка :toc-title: Съдържание :untitled-label: Без заглавие :version-label: Версия :warning-caption: Внимание endif::[] // // Catalan translation, courtesy of Abel Salgado Romero and Alex Soto ifeval::["{lang}" == "ca"] :appendix-caption: Apendix :caution-caption: Atenció :example-caption: Exemple :figure-caption: Figura :important-caption: Important :last-update-label: Última actualització //:listing-caption: Llista :manname-title: NOM :note-caption: Nota //:preface-title: Prefaci :table-caption: Taula :tip-caption: Suggeriment :toc-title: Índex :untitled-label: Sense títol :version-label: Versió :warning-caption: Advertència endif::[] // // Danish translation, courtesy of Max Rydahl Andersen ifeval::["{lang}" == "da"] :appendix-caption: Appendix :caution-caption: Forsigtig :example-caption: Eksempel :figure-caption: Figur :important-caption: Vigtig :last-update-label: Sidst opdateret :listing-caption: List :manname-title: NAVN :note-caption: Notat //:preface-title: :table-caption: Tabel :tip-caption: Tips :toc-title: Indholdsfortegnelse :untitled-label: Unavngivet :version-label: Version :warning-caption: Advarsel endif::[] // // German translation, courtesy of Florian Wilhelm ifeval::["{lang}" == "de"] :appendix-caption: Anhang :caution-caption: Achtung :example-caption: Beispiel :figure-caption: Abbildung :important-caption: Wichtig :last-update-label: Zuletzt aktualisiert //:listing-caption: Listing :manname-title: BEZEICHNUNG :note-caption: Anmerkung //:preface-title: Vorwort :table-caption: Tabelle :tip-caption: Hinweis :toc-title: Inhalt :untitled-label: Ohne Titel :version-label: Version :warning-caption: Warnung endif::[] // // Spanish translation, courtesy of Eddú Meléndez ifeval::["{lang}" == "es"] :appendix-caption: Apéndice :caution-caption: Precaución :example-caption: Ejemplo :figure-caption: Figura :important-caption: Importante :last-update-label: Ultima actualización //:listing-caption: Lista :manname-title: NOMBRE :note-caption: Nota //:preface-title: Prefacio :table-caption: Tabla :tip-caption: Sugerencia :toc-title: Tabla de Contenido :untitled-label: Sin título :version-label: Versión :warning-caption: Aviso endif::[] // // Persian (Farsi) translation, courtesy of Shahryar Eivazzadeh ifeval::["{lang}" == "fa"] :appendix-caption: پیوست :caution-caption: گوشزد :example-caption: نمونه :figure-caption: نمودار :important-caption: مهم :last-update-label: آخرین به روز رسانی //:listing-caption: فهرست :manname-title: نام :note-caption: یادداشت //:preface-title: پیشگفتار :table-caption: جدول :tip-caption: نکته :toc-title: فهرست مطالب :untitled-label: بی‌نام :version-label: نگارش :warning-caption: هشدار endif::[] // // Finnish translation by Tero Hänninen ifeval::["{lang}" == "fi"] :appendix-caption: Liitteet :caution-caption: Huom :example-caption: Esimerkki :figure-caption: Kuvio :important-caption: Tärkeää :last-update-label: Viimeksi päivitetty //:listing-caption: Listaus :manname-title: NIMI :note-caption: Huomio //:preface-title: Esipuhe :table-caption: Taulukko :tip-caption: Vinkki :toc-title: Sisällysluettelo :untitled-label: Nimetön :version-label: Versio :warning-caption: Varoitus endif::[] // // French translation, courtesy of Nicolas Comet ifeval::["{lang}" == "fr"] :appendix-caption: Appendice :caution-caption: Avertissement :example-caption: Exemple :figure-caption: Figure :important-caption: Important :last-update-label: Dernière mise à jour //:listing-caption: Liste :manname-title: NOM :note-caption: Note //:preface-title: Préface :table-caption: Tableau :tip-caption: Astuce :toc-title: Table des matières :untitled-label: Sans titre :version-label: Version :warning-caption: Attention endif::[] // // Hungarian translation, courtesy of István Pató ifeval::["{lang}" == "hu"] :appendix-caption: függelék :caution-caption: Figyelmeztetés :example-caption: Példa :figure-caption: Ábra :important-caption: Fontos :last-update-label: Utolsó frissítés //:listing-caption: Lista :manname-title: NÉV :note-caption: Megjegyzés //:preface-title: Előszó :table-caption: Táblázat :tip-caption: Tipp :toc-title: Tartalomjegyzék :untitled-label: Névtelen :version-label: Verzió :warning-caption: Figyelem endif::[] // // Italian translation, courtesy of Marco Ciampa ifeval::["{lang}" == "it"] :appendix-caption: Appendice :caution-caption: Attenzione :chapter-label: Capitolo :example-caption: Esempio :figure-caption: Figura :important-caption: Importante :last-update-label: Ultimo aggiornamento //:listing-caption: Elenco :manname-title: NOME :note-caption: Nota //:preface-title: Prefazione :table-caption: Tabella :tip-caption: Suggerimento :toc-title: Indice :untitled-label: Senza titolo :version-label: Versione :warning-caption: Attenzione endif::[] // // Japanese translation, courtesy of Takayuki Konishi ifeval::["{lang}" == "ja"] :appendix-caption: 付録 :caution-caption: 注意 :example-caption: 例 :figure-caption: 図 :important-caption: 重要 :last-update-label: 最終更新 //:listing-caption: リスト :manname-title: 名前 :note-caption: 注記 //:preface-title: まえがき :table-caption: 表 :tip-caption: ヒント :toc-title: 目次 :untitled-label: 無題 :version-label: バージョン :warning-caption: 警告 endif::[] // // Korean translation, courtesy of Sungsik Nam ifeval::["{lang}" == "kr"] :appendix-caption: 부록 :caution-caption: 주의 :example-caption: 예시 :figure-caption: 그림 :important-caption: 중요 :last-update-label: 마지막 업데이트 //:listing-caption: 목록 :manname-title: 이름 :note-caption: 노트 //:preface-title: 머리말 :table-caption: 표 :tip-caption: 힌트 :toc-title: 차례 :untitled-label: 익명 :version-label: 버전 :warning-caption: 경고 endif::[] // // Dutch translation, courtesy of Roel Van Steenberghe ifeval::["{lang}" == "nl"] :appendix-caption: Bijlage :caution-caption: Opgelet :example-caption: Voorbeeld :figure-caption: Figuur :important-caption: Belangrijk :last-update-label: Laatste aanpassing //:listing-caption: Lijst :manname-title: NAAM :note-caption: Noot //:preface-title: Inleiding :table-caption: Tabel :tip-caption: Tip :toc-title: Ínhoudsopgave :untitled-label: Naamloos :version-label: Versie :warning-caption: Waarschuwing endif::[] // // Norwegian, courtesy of Aslak Knutsen ifeval::["{lang}" == "no"] :appendix-caption: Vedlegg :caution-caption: Forsiktig :example-caption: Eksempel :figure-caption: Figur :important-caption: Viktig :last-update-label: Sist oppdatert //:listing-caption: :manname-title: NAVN :note-caption: Notat //:preface-title: :table-caption: Tabell :tip-caption: Tips :toc-title: Innholdsfortegnelse :untitled-label: Navnløs :version-label: Versjon :warning-caption: Advarsel endif::[] // // Portuguese translation, courtesy of Roberto Cortez ifeval::["{lang}" == "pt"] :appendix-caption: Apêndice :caution-caption: Atenção :example-caption: Exemplo :figure-caption: Figura :important-caption: Importante :last-update-label: Última actualização //:listing-caption: Listagem :manname-title: NOME :note-caption: Nota //:preface-title: Prefácio :table-caption: Tabela :tip-caption: Sugestão :toc-title: Índice :untitled-label: Sem título :version-label: Versão :warning-caption: Aviso endif::[] // // Brazilian Portuguese translation, courtesy of Rafael Pestano ifeval::["{lang}" == "pt_BR"] :appendix-caption: Apêndice :caution-caption: Cuidado :example-caption: Exemplo :figure-caption: Figura :important-caption: Importante :last-update-label: Última atualização //:listing-caption: Listagem :manname-title: NOME :note-caption: Nota //:preface-title: Prefácio :table-caption: Tabela :tip-caption: Dica :toc-title: Índice :untitled-label: Sem título :version-label: Versão :warning-caption: Aviso endif::[] // // Russian translation, courtesy of Alexander Zobkov ifeval::["{lang}" == "ru"] :appendix-caption: Приложение :caution-caption: Внимание :example-caption: Пример :figure-caption: Рисунок :important-caption: Важно :last-update-label: Последний раз обновлено //:listing-caption: Листинг :manname-title: НАЗВАНИЕ :note-caption: Примечание //:preface-title: Предисловие :table-caption: Таблица :tip-caption: Подсказка :toc-title: Содержание :untitled-label: Без названия :version-label: Версия :warning-caption: Предупреждение endif::[] // // Serbian Cyrillic translation, courtesy of Bojan Stipic ifeval::["{lang}" == "sr"] :appendix-caption: Додатак :caution-caption: Опрез //:chapter-label: Поглавље :example-caption: Пример :figure-caption: Слика :important-caption: Важно :last-update-label: Последње ажурирано //:listing-caption: Списак :manname-title: НАЗИВ :note-caption: Белешка //:preface-title: Предговор :table-caption: Табела :tip-caption: Савет :toc-title: Садржај :untitled-label: Без назива :version-label: Верзија :warning-caption: Упозорење endif::[] // // Serbian Latin translation, courtesy of Bojan Stipic ifeval::["{lang}" == "sr_Latn"] :appendix-caption: Dodatak :caution-caption: Oprez //:chapter-label: Poglavlje :example-caption: Primer :figure-caption: Slika :important-caption: Važno :last-update-label: Poslednje ažurirano //:listing-caption: Spisak :manname-title: NAZIV :note-caption: Beleška //:preface-title: Predgovor :table-caption: Tabela :tip-caption: Savet :toc-title: Sadržaj :untitled-label: Bez naziva :version-label: Verzija :warning-caption: Upozorenje endif::[] // // Turkish translation, courtesy of Rahman Usta ifeval::["{lang}" == "tr"] :appendix-caption: Ek bölüm :caution-caption: Dikkat :example-caption: Örnek :figure-caption: Görsel :important-caption: Önemli :last-update-label: Son güncelleme //:listing-caption: Listeleme :manname-title: İSİM :note-caption: Not //:preface-title: Ön söz :table-caption: Tablo :tip-caption: İpucu :toc-title: İçindekiler :untitled-label: İsimsiz :version-label: Versiyon :warning-caption: Uyarı endif::[] // // Simplified Chinese translation, courtesy of John Dong ifeval::["{lang}" == "zh_CN"] :appendix-caption: 附录 :caution-caption: 注意 :example-caption: 示例 :figure-caption: 图表 :important-caption: 重要 :last-update-label: 最后更新 //:listing-caption: 列表 :manname-title: 名称 :note-caption: 笔记 //:preface-title: 序言 :table-caption: 表格 :tip-caption: 提示 :toc-title: 目录 :untitled-label: 暂无标题 :version-label: 版本 :warning-caption: 警告 endif::[] // // Traditional Chinese translation, courtesy of John Dong ifeval::["{lang}" == "zh_TW"] :appendix-caption: 附錄 :caution-caption: 注意 :example-caption: 示例 :figure-caption: 圖表 :important-caption: 重要 :last-update-label: 最後更新 //:listing-caption: 列表 :manname-title: 名稱 :note-caption: 筆記 //:preface-title: 序言 :table-caption: 表格 :tip-caption: 提示 :toc-title: 目錄 :untitled-label: 暫無標題 :version-label: 版本 :warning-caption: 警告 endif::[] asciidoctor-1.5.5/data/stylesheets/000077500000000000000000000000001277513741400173255ustar00rootroot00000000000000asciidoctor-1.5.5/data/stylesheets/asciidoctor-default.css000066400000000000000000000721231277513741400237710ustar00rootroot00000000000000/* Asciidoctor default stylesheet | MIT License | http://asciidoctor.org */ /* Remove comment around @import statement below when using as a custom stylesheet */ /*@import "https://fonts.googleapis.com/css?family=Open+Sans:300,300italic,400,400italic,600,600italic%7CNoto+Serif:400,400italic,700,700italic%7CDroid+Sans+Mono:400,700";*/ article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block} audio,canvas,video{display:inline-block} audio:not([controls]){display:none;height:0} [hidden],template{display:none} script{display:none!important} html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%} a{background:transparent} a:focus{outline:thin dotted} a:active,a:hover{outline:0} h1{font-size:2em;margin:.67em 0} abbr[title]{border-bottom:1px dotted} b,strong{font-weight:bold} dfn{font-style:italic} hr{-moz-box-sizing:content-box;box-sizing:content-box;height:0} mark{background:#ff0;color:#000} code,kbd,pre,samp{font-family:monospace;font-size:1em} pre{white-space:pre-wrap} q{quotes:"\201C" "\201D" "\2018" "\2019"} small{font-size:80%} sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline} sup{top:-.5em} sub{bottom:-.25em} img{border:0} svg:not(:root){overflow:hidden} figure{margin:0} fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em} legend{border:0;padding:0} button,input,select,textarea{font-family:inherit;font-size:100%;margin:0} button,input{line-height:normal} button,select{text-transform:none} button,html input[type="button"],input[type="reset"],input[type="submit"]{-webkit-appearance:button;cursor:pointer} button[disabled],html input[disabled]{cursor:default} input[type="checkbox"],input[type="radio"]{box-sizing:border-box;padding:0} input[type="search"]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box} input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none} button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0} textarea{overflow:auto;vertical-align:top} table{border-collapse:collapse;border-spacing:0} *,*:before,*:after{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box} html,body{font-size:100%} body{background:#fff;color:rgba(0,0,0,.8);padding:0;margin:0;font-family:"Noto Serif","DejaVu Serif",serif;font-weight:400;font-style:normal;line-height:1;position:relative;cursor:auto;tab-size:4;-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased} a:hover{cursor:pointer} img,object,embed{max-width:100%;height:auto} object,embed{height:100%} img{-ms-interpolation-mode:bicubic} .left{float:left!important} .right{float:right!important} .text-left{text-align:left!important} .text-right{text-align:right!important} .text-center{text-align:center!important} .text-justify{text-align:justify!important} .hide{display:none} img,object,svg{display:inline-block;vertical-align:middle} textarea{height:auto;min-height:50px} select{width:100%} .center{margin-left:auto;margin-right:auto} .spread{width:100%} p.lead,.paragraph.lead>p,#preamble>.sectionbody>.paragraph:first-of-type p{font-size:1.21875em;line-height:1.6} .subheader,.admonitionblock td.content>.title,.audioblock>.title,.exampleblock>.title,.imageblock>.title,.listingblock>.title,.literalblock>.title,.stemblock>.title,.openblock>.title,.paragraph>.title,.quoteblock>.title,table.tableblock>.title,.verseblock>.title,.videoblock>.title,.dlist>.title,.olist>.title,.ulist>.title,.qlist>.title,.hdlist>.title{line-height:1.45;color:#7a2518;font-weight:400;margin-top:0;margin-bottom:.25em} div,dl,dt,dd,ul,ol,li,h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6,pre,form,p,blockquote,th,td{margin:0;padding:0;direction:ltr} a{color:#2156a5;text-decoration:underline;line-height:inherit} a:hover,a:focus{color:#1d4b8f} a img{border:none} p{font-family:inherit;font-weight:400;font-size:1em;line-height:1.6;margin-bottom:1.25em;text-rendering:optimizeLegibility} p aside{font-size:.875em;line-height:1.35;font-style:italic} h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{font-family:"Open Sans","DejaVu Sans",sans-serif;font-weight:300;font-style:normal;color:#ba3925;text-rendering:optimizeLegibility;margin-top:1em;margin-bottom:.5em;line-height:1.0125em} h1 small,h2 small,h3 small,#toctitle small,.sidebarblock>.content>.title small,h4 small,h5 small,h6 small{font-size:60%;color:#e99b8f;line-height:0} h1{font-size:2.125em} h2{font-size:1.6875em} h3,#toctitle,.sidebarblock>.content>.title{font-size:1.375em} h4,h5{font-size:1.125em} h6{font-size:1em} hr{border:solid #ddddd8;border-width:1px 0 0;clear:both;margin:1.25em 0 1.1875em;height:0} em,i{font-style:italic;line-height:inherit} strong,b{font-weight:bold;line-height:inherit} small{font-size:60%;line-height:inherit} code{font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;font-weight:400;color:rgba(0,0,0,.9)} ul,ol,dl{font-size:1em;line-height:1.6;margin-bottom:1.25em;list-style-position:outside;font-family:inherit} ul,ol,ul.no-bullet,ol.no-bullet{margin-left:1.5em} ul li ul,ul li ol{margin-left:1.25em;margin-bottom:0;font-size:1em} ul.square li ul,ul.circle li ul,ul.disc li ul{list-style:inherit} ul.square{list-style-type:square} ul.circle{list-style-type:circle} ul.disc{list-style-type:disc} ul.no-bullet{list-style:none} ol li ul,ol li ol{margin-left:1.25em;margin-bottom:0} dl dt{margin-bottom:.3125em;font-weight:bold} dl dd{margin-bottom:1.25em} abbr,acronym{text-transform:uppercase;font-size:90%;color:rgba(0,0,0,.8);border-bottom:1px dotted #ddd;cursor:help} abbr{text-transform:none} blockquote{margin:0 0 1.25em;padding:.5625em 1.25em 0 1.1875em;border-left:1px solid #ddd} blockquote cite{display:block;font-size:.9375em;color:rgba(0,0,0,.6)} blockquote cite:before{content:"\2014 \0020"} blockquote cite a,blockquote cite a:visited{color:rgba(0,0,0,.6)} blockquote,blockquote p{line-height:1.6;color:rgba(0,0,0,.85)} @media only screen and (min-width:768px){h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{line-height:1.2} h1{font-size:2.75em} h2{font-size:2.3125em} h3,#toctitle,.sidebarblock>.content>.title{font-size:1.6875em} h4{font-size:1.4375em}} table{background:#fff;margin-bottom:1.25em;border:solid 1px #dedede} table thead,table tfoot{background:#f7f8f7;font-weight:bold} table thead tr th,table thead tr td,table tfoot tr th,table tfoot tr td{padding:.5em .625em .625em;font-size:inherit;color:rgba(0,0,0,.8);text-align:left} table tr th,table tr td{padding:.5625em .625em;font-size:inherit;color:rgba(0,0,0,.8)} table tr.even,table tr.alt,table tr:nth-of-type(even){background:#f8f8f7} table thead tr th,table tfoot tr th,table tbody tr td,table tr td,table tfoot tr td{display:table-cell;line-height:1.6} h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{line-height:1.2;word-spacing:-.05em} h1 strong,h2 strong,h3 strong,#toctitle strong,.sidebarblock>.content>.title strong,h4 strong,h5 strong,h6 strong{font-weight:400} .clearfix:before,.clearfix:after,.float-group:before,.float-group:after{content:" ";display:table} .clearfix:after,.float-group:after{clear:both} *:not(pre)>code{font-size:.9375em;font-style:normal!important;letter-spacing:0;padding:.1em .5ex;word-spacing:-.15em;background-color:#f7f7f8;-webkit-border-radius:4px;border-radius:4px;line-height:1.45;text-rendering:optimizeSpeed;word-wrap:break-word} *:not(pre)>code.nobreak{word-wrap:normal} *:not(pre)>code.nowrap{white-space:nowrap} pre,pre>code{line-height:1.45;color:rgba(0,0,0,.9);font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;font-weight:400;text-rendering:optimizeSpeed} em em{font-style:normal} strong strong{font-weight:400} .keyseq{color:rgba(51,51,51,.8)} kbd{font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;display:inline-block;color:rgba(0,0,0,.8);font-size:.65em;line-height:1.45;background-color:#f7f7f7;border:1px solid #ccc;-webkit-border-radius:3px;border-radius:3px;-webkit-box-shadow:0 1px 0 rgba(0,0,0,.2),0 0 0 .1em white inset;box-shadow:0 1px 0 rgba(0,0,0,.2),0 0 0 .1em #fff inset;margin:0 .15em;padding:.2em .5em;vertical-align:middle;position:relative;top:-.1em;white-space:nowrap} .keyseq kbd:first-child{margin-left:0} .keyseq kbd:last-child{margin-right:0} .menuseq,.menu{color:rgba(0,0,0,.8)} b.button:before,b.button:after{position:relative;top:-1px;font-weight:400} b.button:before{content:"[";padding:0 3px 0 2px} b.button:after{content:"]";padding:0 2px 0 3px} p a>code:hover{color:rgba(0,0,0,.9)} #header,#content,#footnotes,#footer{width:100%;margin-left:auto;margin-right:auto;margin-top:0;margin-bottom:0;max-width:62.5em;*zoom:1;position:relative;padding-left:.9375em;padding-right:.9375em} #header:before,#header:after,#content:before,#content:after,#footnotes:before,#footnotes:after,#footer:before,#footer:after{content:" ";display:table} #header:after,#content:after,#footnotes:after,#footer:after{clear:both} #content{margin-top:1.25em} #content:before{content:none} #header>h1:first-child{color:rgba(0,0,0,.85);margin-top:2.25rem;margin-bottom:0} #header>h1:first-child+#toc{margin-top:8px;border-top:1px solid #ddddd8} #header>h1:only-child,body.toc2 #header>h1:nth-last-child(2){border-bottom:1px solid #ddddd8;padding-bottom:8px} #header .details{border-bottom:1px solid #ddddd8;line-height:1.45;padding-top:.25em;padding-bottom:.25em;padding-left:.25em;color:rgba(0,0,0,.6);display:-ms-flexbox;display:-webkit-flex;display:flex;-ms-flex-flow:row wrap;-webkit-flex-flow:row wrap;flex-flow:row wrap} #header .details span:first-child{margin-left:-.125em} #header .details span.email a{color:rgba(0,0,0,.85)} #header .details br{display:none} #header .details br+span:before{content:"\00a0\2013\00a0"} #header .details br+span.author:before{content:"\00a0\22c5\00a0";color:rgba(0,0,0,.85)} #header .details br+span#revremark:before{content:"\00a0|\00a0"} #header #revnumber{text-transform:capitalize} #header #revnumber:after{content:"\00a0"} #content>h1:first-child:not([class]){color:rgba(0,0,0,.85);border-bottom:1px solid #ddddd8;padding-bottom:8px;margin-top:0;padding-top:1rem;margin-bottom:1.25rem} #toc{border-bottom:1px solid #efefed;padding-bottom:.5em} #toc>ul{margin-left:.125em} #toc ul.sectlevel0>li>a{font-style:italic} #toc ul.sectlevel0 ul.sectlevel1{margin:.5em 0} #toc ul{font-family:"Open Sans","DejaVu Sans",sans-serif;list-style-type:none} #toc li{line-height:1.3334;margin-top:.3334em} #toc a{text-decoration:none} #toc a:active{text-decoration:underline} #toctitle{color:#7a2518;font-size:1.2em} @media only screen and (min-width:768px){#toctitle{font-size:1.375em} body.toc2{padding-left:15em;padding-right:0} #toc.toc2{margin-top:0!important;background-color:#f8f8f7;position:fixed;width:15em;left:0;top:0;border-right:1px solid #efefed;border-top-width:0!important;border-bottom-width:0!important;z-index:1000;padding:1.25em 1em;height:100%;overflow:auto} #toc.toc2 #toctitle{margin-top:0;margin-bottom:.8rem;font-size:1.2em} #toc.toc2>ul{font-size:.9em;margin-bottom:0} #toc.toc2 ul ul{margin-left:0;padding-left:1em} #toc.toc2 ul.sectlevel0 ul.sectlevel1{padding-left:0;margin-top:.5em;margin-bottom:.5em} body.toc2.toc-right{padding-left:0;padding-right:15em} body.toc2.toc-right #toc.toc2{border-right-width:0;border-left:1px solid #efefed;left:auto;right:0}} @media only screen and (min-width:1280px){body.toc2{padding-left:20em;padding-right:0} #toc.toc2{width:20em} #toc.toc2 #toctitle{font-size:1.375em} #toc.toc2>ul{font-size:.95em} #toc.toc2 ul ul{padding-left:1.25em} body.toc2.toc-right{padding-left:0;padding-right:20em}} #content #toc{border-style:solid;border-width:1px;border-color:#e0e0dc;margin-bottom:1.25em;padding:1.25em;background:#f8f8f7;-webkit-border-radius:4px;border-radius:4px} #content #toc>:first-child{margin-top:0} #content #toc>:last-child{margin-bottom:0} #footer{max-width:100%;background-color:rgba(0,0,0,.8);padding:1.25em} #footer-text{color:rgba(255,255,255,.8);line-height:1.44} .sect1{padding-bottom:.625em} @media only screen and (min-width:768px){.sect1{padding-bottom:1.25em}} .sect1+.sect1{border-top:1px solid #efefed} #content h1>a.anchor,h2>a.anchor,h3>a.anchor,#toctitle>a.anchor,.sidebarblock>.content>.title>a.anchor,h4>a.anchor,h5>a.anchor,h6>a.anchor{position:absolute;z-index:1001;width:1.5ex;margin-left:-1.5ex;display:block;text-decoration:none!important;visibility:hidden;text-align:center;font-weight:400} #content h1>a.anchor:before,h2>a.anchor:before,h3>a.anchor:before,#toctitle>a.anchor:before,.sidebarblock>.content>.title>a.anchor:before,h4>a.anchor:before,h5>a.anchor:before,h6>a.anchor:before{content:"\00A7";font-size:.85em;display:block;padding-top:.1em} #content h1:hover>a.anchor,#content h1>a.anchor:hover,h2:hover>a.anchor,h2>a.anchor:hover,h3:hover>a.anchor,#toctitle:hover>a.anchor,.sidebarblock>.content>.title:hover>a.anchor,h3>a.anchor:hover,#toctitle>a.anchor:hover,.sidebarblock>.content>.title>a.anchor:hover,h4:hover>a.anchor,h4>a.anchor:hover,h5:hover>a.anchor,h5>a.anchor:hover,h6:hover>a.anchor,h6>a.anchor:hover{visibility:visible} #content h1>a.link,h2>a.link,h3>a.link,#toctitle>a.link,.sidebarblock>.content>.title>a.link,h4>a.link,h5>a.link,h6>a.link{color:#ba3925;text-decoration:none} #content h1>a.link:hover,h2>a.link:hover,h3>a.link:hover,#toctitle>a.link:hover,.sidebarblock>.content>.title>a.link:hover,h4>a.link:hover,h5>a.link:hover,h6>a.link:hover{color:#a53221} .audioblock,.imageblock,.literalblock,.listingblock,.stemblock,.videoblock{margin-bottom:1.25em} .admonitionblock td.content>.title,.audioblock>.title,.exampleblock>.title,.imageblock>.title,.listingblock>.title,.literalblock>.title,.stemblock>.title,.openblock>.title,.paragraph>.title,.quoteblock>.title,table.tableblock>.title,.verseblock>.title,.videoblock>.title,.dlist>.title,.olist>.title,.ulist>.title,.qlist>.title,.hdlist>.title{text-rendering:optimizeLegibility;text-align:left;font-family:"Noto Serif","DejaVu Serif",serif;font-size:1rem;font-style:italic} table.tableblock>caption.title{white-space:nowrap;overflow:visible;max-width:0} .paragraph.lead>p,#preamble>.sectionbody>.paragraph:first-of-type p{color:rgba(0,0,0,.85)} table.tableblock #preamble>.sectionbody>.paragraph:first-of-type p{font-size:inherit} .admonitionblock>table{border-collapse:separate;border:0;background:none;width:100%} .admonitionblock>table td.icon{text-align:center;width:80px} .admonitionblock>table td.icon img{max-width:none} .admonitionblock>table td.icon .title{font-weight:bold;font-family:"Open Sans","DejaVu Sans",sans-serif;text-transform:uppercase} .admonitionblock>table td.content{padding-left:1.125em;padding-right:1.25em;border-left:1px solid #ddddd8;color:rgba(0,0,0,.6)} .admonitionblock>table td.content>:last-child>:last-child{margin-bottom:0} .exampleblock>.content{border-style:solid;border-width:1px;border-color:#e6e6e6;margin-bottom:1.25em;padding:1.25em;background:#fff;-webkit-border-radius:4px;border-radius:4px} .exampleblock>.content>:first-child{margin-top:0} .exampleblock>.content>:last-child{margin-bottom:0} .sidebarblock{border-style:solid;border-width:1px;border-color:#e0e0dc;margin-bottom:1.25em;padding:1.25em;background:#f8f8f7;-webkit-border-radius:4px;border-radius:4px} .sidebarblock>:first-child{margin-top:0} .sidebarblock>:last-child{margin-bottom:0} .sidebarblock>.content>.title{color:#7a2518;margin-top:0;text-align:center} .exampleblock>.content>:last-child>:last-child,.exampleblock>.content .olist>ol>li:last-child>:last-child,.exampleblock>.content .ulist>ul>li:last-child>:last-child,.exampleblock>.content .qlist>ol>li:last-child>:last-child,.sidebarblock>.content>:last-child>:last-child,.sidebarblock>.content .olist>ol>li:last-child>:last-child,.sidebarblock>.content .ulist>ul>li:last-child>:last-child,.sidebarblock>.content .qlist>ol>li:last-child>:last-child{margin-bottom:0} .literalblock pre,.listingblock pre:not(.highlight),.listingblock pre[class="highlight"],.listingblock pre[class^="highlight "],.listingblock pre.CodeRay,.listingblock pre.prettyprint{background:#f7f7f8} .sidebarblock .literalblock pre,.sidebarblock .listingblock pre:not(.highlight),.sidebarblock .listingblock pre[class="highlight"],.sidebarblock .listingblock pre[class^="highlight "],.sidebarblock .listingblock pre.CodeRay,.sidebarblock .listingblock pre.prettyprint{background:#f2f1f1} .literalblock pre,.literalblock pre[class],.listingblock pre,.listingblock pre[class]{-webkit-border-radius:4px;border-radius:4px;word-wrap:break-word;padding:1em;font-size:.8125em} .literalblock pre.nowrap,.literalblock pre[class].nowrap,.listingblock pre.nowrap,.listingblock pre[class].nowrap{overflow-x:auto;white-space:pre;word-wrap:normal} @media only screen and (min-width:768px){.literalblock pre,.literalblock pre[class],.listingblock pre,.listingblock pre[class]{font-size:.90625em}} @media only screen and (min-width:1280px){.literalblock pre,.literalblock pre[class],.listingblock pre,.listingblock pre[class]{font-size:1em}} .literalblock.output pre{color:#f7f7f8;background-color:rgba(0,0,0,.9)} .listingblock pre.highlightjs{padding:0} .listingblock pre.highlightjs>code{padding:1em;-webkit-border-radius:4px;border-radius:4px} .listingblock pre.prettyprint{border-width:0} .listingblock>.content{position:relative} .listingblock code[data-lang]:before{display:none;content:attr(data-lang);position:absolute;font-size:.75em;top:.425rem;right:.5rem;line-height:1;text-transform:uppercase;color:#999} .listingblock:hover code[data-lang]:before{display:block} .listingblock.terminal pre .command:before{content:attr(data-prompt);padding-right:.5em;color:#999} .listingblock.terminal pre .command:not([data-prompt]):before{content:"$"} table.pyhltable{border-collapse:separate;border:0;margin-bottom:0;background:none} table.pyhltable td{vertical-align:top;padding-top:0;padding-bottom:0;line-height:1.45} table.pyhltable td.code{padding-left:.75em;padding-right:0} pre.pygments .lineno,table.pyhltable td:not(.code){color:#999;padding-left:0;padding-right:.5em;border-right:1px solid #ddddd8} pre.pygments .lineno{display:inline-block;margin-right:.25em} table.pyhltable .linenodiv{background:none!important;padding-right:0!important} .quoteblock{margin:0 1em 1.25em 1.5em;display:table} .quoteblock>.title{margin-left:-1.5em;margin-bottom:.75em} .quoteblock blockquote,.quoteblock blockquote p{color:rgba(0,0,0,.85);font-size:1.15rem;line-height:1.75;word-spacing:.1em;letter-spacing:0;font-style:italic;text-align:justify} .quoteblock blockquote{margin:0;padding:0;border:0} .quoteblock blockquote:before{content:"\201c";float:left;font-size:2.75em;font-weight:bold;line-height:.6em;margin-left:-.6em;color:#7a2518;text-shadow:0 1px 2px rgba(0,0,0,.1)} .quoteblock blockquote>.paragraph:last-child p{margin-bottom:0} .quoteblock .attribution{margin-top:.5em;margin-right:.5ex;text-align:right} .quoteblock .quoteblock{margin-left:0;margin-right:0;padding:.5em 0;border-left:3px solid rgba(0,0,0,.6)} .quoteblock .quoteblock blockquote{padding:0 0 0 .75em} .quoteblock .quoteblock blockquote:before{display:none} .verseblock{margin:0 1em 1.25em 1em} .verseblock pre{font-family:"Open Sans","DejaVu Sans",sans;font-size:1.15rem;color:rgba(0,0,0,.85);font-weight:300;text-rendering:optimizeLegibility} .verseblock pre strong{font-weight:400} .verseblock .attribution{margin-top:1.25rem;margin-left:.5ex} .quoteblock .attribution,.verseblock .attribution{font-size:.9375em;line-height:1.45;font-style:italic} .quoteblock .attribution br,.verseblock .attribution br{display:none} .quoteblock .attribution cite,.verseblock .attribution cite{display:block;letter-spacing:-.025em;color:rgba(0,0,0,.6)} .quoteblock.abstract{margin:0 0 1.25em 0;display:block} .quoteblock.abstract blockquote,.quoteblock.abstract blockquote p{text-align:left;word-spacing:0} .quoteblock.abstract blockquote:before,.quoteblock.abstract blockquote p:first-of-type:before{display:none} table.tableblock{max-width:100%;border-collapse:separate} table.tableblock td>.paragraph:last-child p>p:last-child,table.tableblock th>p:last-child,table.tableblock td>p:last-child{margin-bottom:0} table.tableblock,th.tableblock,td.tableblock{border:0 solid #dedede} table.grid-all th.tableblock,table.grid-all td.tableblock{border-width:0 1px 1px 0} table.grid-all tfoot>tr>th.tableblock,table.grid-all tfoot>tr>td.tableblock{border-width:1px 1px 0 0} table.grid-cols th.tableblock,table.grid-cols td.tableblock{border-width:0 1px 0 0} table.grid-all *>tr>.tableblock:last-child,table.grid-cols *>tr>.tableblock:last-child{border-right-width:0} table.grid-rows th.tableblock,table.grid-rows td.tableblock{border-width:0 0 1px 0} table.grid-all tbody>tr:last-child>th.tableblock,table.grid-all tbody>tr:last-child>td.tableblock,table.grid-all thead:last-child>tr>th.tableblock,table.grid-rows tbody>tr:last-child>th.tableblock,table.grid-rows tbody>tr:last-child>td.tableblock,table.grid-rows thead:last-child>tr>th.tableblock{border-bottom-width:0} table.grid-rows tfoot>tr>th.tableblock,table.grid-rows tfoot>tr>td.tableblock{border-width:1px 0 0 0} table.frame-all{border-width:1px} table.frame-sides{border-width:0 1px} table.frame-topbot{border-width:1px 0} th.halign-left,td.halign-left{text-align:left} th.halign-right,td.halign-right{text-align:right} th.halign-center,td.halign-center{text-align:center} th.valign-top,td.valign-top{vertical-align:top} th.valign-bottom,td.valign-bottom{vertical-align:bottom} th.valign-middle,td.valign-middle{vertical-align:middle} table thead th,table tfoot th{font-weight:bold} tbody tr th{display:table-cell;line-height:1.6;background:#f7f8f7} tbody tr th,tbody tr th p,tfoot tr th,tfoot tr th p{color:rgba(0,0,0,.8);font-weight:bold} p.tableblock>code:only-child{background:none;padding:0} p.tableblock{font-size:1em} td>div.verse{white-space:pre} ol{margin-left:1.75em} ul li ol{margin-left:1.5em} dl dd{margin-left:1.125em} dl dd:last-child,dl dd:last-child>:last-child{margin-bottom:0} ol>li p,ul>li p,ul dd,ol dd,.olist .olist,.ulist .ulist,.ulist .olist,.olist .ulist{margin-bottom:.625em} ul.unstyled,ol.unnumbered,ul.checklist,ul.none{list-style-type:none} ul.unstyled,ol.unnumbered,ul.checklist{margin-left:.625em} ul.checklist li>p:first-child>.fa-square-o:first-child,ul.checklist li>p:first-child>.fa-check-square-o:first-child{width:1em;font-size:.85em} ul.checklist li>p:first-child>input[type="checkbox"]:first-child{width:1em;position:relative;top:1px} ul.inline{margin:0 auto .625em auto;margin-left:-1.375em;margin-right:0;padding:0;list-style:none;overflow:hidden} ul.inline>li{list-style:none;float:left;margin-left:1.375em;display:block} ul.inline>li>*{display:block} .unstyled dl dt{font-weight:400;font-style:normal} ol.arabic{list-style-type:decimal} ol.decimal{list-style-type:decimal-leading-zero} ol.loweralpha{list-style-type:lower-alpha} ol.upperalpha{list-style-type:upper-alpha} ol.lowerroman{list-style-type:lower-roman} ol.upperroman{list-style-type:upper-roman} ol.lowergreek{list-style-type:lower-greek} .hdlist>table,.colist>table{border:0;background:none} .hdlist>table>tbody>tr,.colist>table>tbody>tr{background:none} td.hdlist1,td.hdlist2{vertical-align:top;padding:0 .625em} td.hdlist1{font-weight:bold;padding-bottom:1.25em} .literalblock+.colist,.listingblock+.colist{margin-top:-.5em} .colist>table tr>td:first-of-type{padding:0 .75em;line-height:1} .colist>table tr>td:last-of-type{padding:.25em 0} .thumb,.th{line-height:0;display:inline-block;border:solid 4px #fff;-webkit-box-shadow:0 0 0 1px #ddd;box-shadow:0 0 0 1px #ddd} .imageblock.left,.imageblock[style*="float: left"]{margin:.25em .625em 1.25em 0} .imageblock.right,.imageblock[style*="float: right"]{margin:.25em 0 1.25em .625em} .imageblock>.title{margin-bottom:0} .imageblock.thumb,.imageblock.th{border-width:6px} .imageblock.thumb>.title,.imageblock.th>.title{padding:0 .125em} .image.left,.image.right{margin-top:.25em;margin-bottom:.25em;display:inline-block;line-height:0} .image.left{margin-right:.625em} .image.right{margin-left:.625em} a.image{text-decoration:none;display:inline-block} a.image object{pointer-events:none} sup.footnote,sup.footnoteref{font-size:.875em;position:static;vertical-align:super} sup.footnote a,sup.footnoteref a{text-decoration:none} sup.footnote a:active,sup.footnoteref a:active{text-decoration:underline} #footnotes{padding-top:.75em;padding-bottom:.75em;margin-bottom:.625em} #footnotes hr{width:20%;min-width:6.25em;margin:-.25em 0 .75em 0;border-width:1px 0 0 0} #footnotes .footnote{padding:0 .375em 0 .225em;line-height:1.3334;font-size:.875em;margin-left:1.2em;text-indent:-1.05em;margin-bottom:.2em} #footnotes .footnote a:first-of-type{font-weight:bold;text-decoration:none} #footnotes .footnote:last-of-type{margin-bottom:0} #content #footnotes{margin-top:-.625em;margin-bottom:0;padding:.75em 0} .gist .file-data>table{border:0;background:#fff;width:100%;margin-bottom:0} .gist .file-data>table td.line-data{width:99%} div.unbreakable{page-break-inside:avoid} .big{font-size:larger} .small{font-size:smaller} .underline{text-decoration:underline} .overline{text-decoration:overline} .line-through{text-decoration:line-through} .aqua{color:#00bfbf} .aqua-background{background-color:#00fafa} .black{color:#000} .black-background{background-color:#000} .blue{color:#0000bf} .blue-background{background-color:#0000fa} .fuchsia{color:#bf00bf} .fuchsia-background{background-color:#fa00fa} .gray{color:#606060} .gray-background{background-color:#7d7d7d} .green{color:#006000} .green-background{background-color:#007d00} .lime{color:#00bf00} .lime-background{background-color:#00fa00} .maroon{color:#600000} .maroon-background{background-color:#7d0000} .navy{color:#000060} .navy-background{background-color:#00007d} .olive{color:#606000} .olive-background{background-color:#7d7d00} .purple{color:#600060} .purple-background{background-color:#7d007d} .red{color:#bf0000} .red-background{background-color:#fa0000} .silver{color:#909090} .silver-background{background-color:#bcbcbc} .teal{color:#006060} .teal-background{background-color:#007d7d} .white{color:#bfbfbf} .white-background{background-color:#fafafa} .yellow{color:#bfbf00} .yellow-background{background-color:#fafa00} span.icon>.fa{cursor:default} .admonitionblock td.icon [class^="fa icon-"]{font-size:2.5em;text-shadow:1px 1px 2px rgba(0,0,0,.5);cursor:default} .admonitionblock td.icon .icon-note:before{content:"\f05a";color:#19407c} .admonitionblock td.icon .icon-tip:before{content:"\f0eb";text-shadow:1px 1px 2px rgba(155,155,0,.8);color:#111} .admonitionblock td.icon .icon-warning:before{content:"\f071";color:#bf6900} .admonitionblock td.icon .icon-caution:before{content:"\f06d";color:#bf3400} .admonitionblock td.icon .icon-important:before{content:"\f06a";color:#bf0000} .conum[data-value]{display:inline-block;color:#fff!important;background-color:rgba(0,0,0,.8);-webkit-border-radius:100px;border-radius:100px;text-align:center;font-size:.75em;width:1.67em;height:1.67em;line-height:1.67em;font-family:"Open Sans","DejaVu Sans",sans-serif;font-style:normal;font-weight:bold} .conum[data-value] *{color:#fff!important} .conum[data-value]+b{display:none} .conum[data-value]:after{content:attr(data-value)} pre .conum[data-value]{position:relative;top:-.125em} b.conum *{color:inherit!important} .conum:not([data-value]):empty{display:none} dt,th.tableblock,td.content,div.footnote{text-rendering:optimizeLegibility} h1,h2,p,td.content,span.alt{letter-spacing:-.01em} p strong,td.content strong,div.footnote strong{letter-spacing:-.005em} p,blockquote,dt,td.content,span.alt{font-size:1.0625rem} p{margin-bottom:1.25rem} .sidebarblock p,.sidebarblock dt,.sidebarblock td.content,p.tableblock{font-size:1em} .exampleblock>.content{background-color:#fffef7;border-color:#e0e0dc;-webkit-box-shadow:0 1px 4px #e0e0dc;box-shadow:0 1px 4px #e0e0dc} .print-only{display:none!important} @media print{@page{margin:1.25cm .75cm} *{-webkit-box-shadow:none!important;box-shadow:none!important;text-shadow:none!important} a{color:inherit!important;text-decoration:underline!important} a.bare,a[href^="#"],a[href^="mailto:"]{text-decoration:none!important} a[href^="http:"]:not(.bare):after,a[href^="https:"]:not(.bare):after{content:"(" attr(href) ")";display:inline-block;font-size:.875em;padding-left:.25em} abbr[title]:after{content:" (" attr(title) ")"} pre,blockquote,tr,img,object,svg{page-break-inside:avoid} thead{display:table-header-group} svg{max-width:100%} p,blockquote,dt,td.content{font-size:1em;orphans:3;widows:3} h2,h3,#toctitle,.sidebarblock>.content>.title{page-break-after:avoid} #toc,.sidebarblock,.exampleblock>.content{background:none!important} #toc{border-bottom:1px solid #ddddd8!important;padding-bottom:0!important} .sect1{padding-bottom:0!important} .sect1+.sect1{border:0!important} #header>h1:first-child{margin-top:1.25rem} body.book #header{text-align:center} body.book #header>h1:first-child{border:0!important;margin:2.5em 0 1em 0} body.book #header .details{border:0!important;display:block;padding:0!important} body.book #header .details span:first-child{margin-left:0!important} body.book #header .details br{display:block} body.book #header .details br+span:before{content:none!important} body.book #toc{border:0!important;text-align:left!important;padding:0!important;margin:0!important} body.book #toc,body.book #preamble,body.book h1.sect0,body.book .sect1>h2{page-break-before:always} .listingblock code[data-lang]:before{display:block} #footer{background:none!important;padding:0 .9375em} #footer-text{color:rgba(0,0,0,.6)!important;font-size:.9em} .hide-on-print{display:none!important} .print-only{display:block!important} .hide-for-print{display:none!important} .show-for-print{display:inherit!important}} asciidoctor-1.5.5/data/stylesheets/coderay-asciidoctor.css000066400000000000000000000070171277513741400237730ustar00rootroot00000000000000/* Stylesheet for CodeRay to match GitHub theme | MIT License | http://foundation.zurb.com */ /*pre.CodeRay {background-color:#f7f7f8;}*/ .CodeRay .line-numbers{border-right:1px solid #d8d8d8;padding:0 0.5em 0 .25em} .CodeRay span.line-numbers{display:inline-block;margin-right:.5em;color:rgba(0,0,0,.3)} .CodeRay .line-numbers strong{color:rgba(0,0,0,.4)} table.CodeRay{border-collapse:separate;border-spacing:0;margin-bottom:0;border:0;background:none} table.CodeRay td{vertical-align: top;line-height:1.45} table.CodeRay td.line-numbers{text-align:right} table.CodeRay td.line-numbers>pre{padding:0;color:rgba(0,0,0,.3)} table.CodeRay td.code{padding:0 0 0 .5em} table.CodeRay td.code>pre{padding:0} .CodeRay .debug{color:#fff !important;background:#000080 !important} .CodeRay .annotation{color:#007} .CodeRay .attribute-name{color:#000080} .CodeRay .attribute-value{color:#700} .CodeRay .binary{color:#509} .CodeRay .comment{color:#998;font-style:italic} .CodeRay .char{color:#04d} .CodeRay .char .content{color:#04d} .CodeRay .char .delimiter{color:#039} .CodeRay .class{color:#458;font-weight:bold} .CodeRay .complex{color:#a08} .CodeRay .constant,.CodeRay .predefined-constant{color:#008080} .CodeRay .color{color:#099} .CodeRay .class-variable{color:#369} .CodeRay .decorator{color:#b0b} .CodeRay .definition{color:#099} .CodeRay .delimiter{color:#000} .CodeRay .doc{color:#970} .CodeRay .doctype{color:#34b} .CodeRay .doc-string{color:#d42} .CodeRay .escape{color:#666} .CodeRay .entity{color:#800} .CodeRay .error{color:#808} .CodeRay .exception{color:inherit} .CodeRay .filename{color:#099} .CodeRay .function{color:#900;font-weight:bold} .CodeRay .global-variable{color:#008080} .CodeRay .hex{color:#058} .CodeRay .integer,.CodeRay .float{color:#099} .CodeRay .include{color:#555} .CodeRay .inline{color:#000} .CodeRay .inline .inline{background:#ccc} .CodeRay .inline .inline .inline{background:#bbb} .CodeRay .inline .inline-delimiter{color:#d14} .CodeRay .inline-delimiter{color:#d14} .CodeRay .important{color:#555;font-weight:bold} .CodeRay .interpreted{color:#b2b} .CodeRay .instance-variable{color:#008080} .CodeRay .label{color:#970} .CodeRay .local-variable{color:#963} .CodeRay .octal{color:#40e} .CodeRay .predefined{color:#369} .CodeRay .preprocessor{color:#579} .CodeRay .pseudo-class{color:#555} .CodeRay .directive{font-weight:bold} .CodeRay .type{font-weight:bold} .CodeRay .predefined-type{color:inherit} .CodeRay .reserved,.CodeRay .keyword {color:#000;font-weight:bold} .CodeRay .key{color:#808} .CodeRay .key .delimiter{color:#606} .CodeRay .key .char{color:#80f} .CodeRay .value{color:#088} .CodeRay .regexp .delimiter{color:#808} .CodeRay .regexp .content{color:#808} .CodeRay .regexp .modifier{color:#808} .CodeRay .regexp .char{color:#d14} .CodeRay .regexp .function{color:#404;font-weight:bold} .CodeRay .string{color:#d20} .CodeRay .string .string .string{background:#ffd0d0} .CodeRay .string .content{color:#d14} .CodeRay .string .char{color:#d14} .CodeRay .string .delimiter{color:#d14} .CodeRay .shell{color:#d14} .CodeRay .shell .delimiter{color:#d14} .CodeRay .symbol{color:#990073} .CodeRay .symbol .content{color:#a60} .CodeRay .symbol .delimiter{color:#630} .CodeRay .tag{color:#008080} .CodeRay .tag-special{color:#d70} .CodeRay .variable{color:#036} .CodeRay .insert{background:#afa} .CodeRay .delete{background:#faa} .CodeRay .change{color:#aaf;background:#007} .CodeRay .head{color:#f8f;background:#505} .CodeRay .insert .insert{color:#080} .CodeRay .delete .delete{color:#800} .CodeRay .change .change{color:#66f} .CodeRay .head .head{color:#f4f} asciidoctor-1.5.5/features/000077500000000000000000000000001277513741400156565ustar00rootroot00000000000000asciidoctor-1.5.5/features/open_block.feature000066400000000000000000000036011277513741400213460ustar00rootroot00000000000000# language: en Feature: Open Blocks In order to group content in a generic container As a writer I want to be able to wrap content in an open block Scenario: Render an open block that contains a paragraph to HTML Given the AsciiDoc source """ -- A paragraph in an open block. -- """ When it is converted to html Then the result should match the HTML source """

A paragraph in an open block.

""" Scenario: Render an open block that contains a paragraph to DocBook Given the AsciiDoc source """ -- A paragraph in an open block. -- """ When it is converted to docbook Then the result should match the XML source """ A paragraph in an open block. """ Scenario: Render an open block that contains a paragraph to HTML (alt) Given the AsciiDoc source """ -- A paragraph in an open block. -- """ When it is converted to html Then the result should match the HTML structure """ .openblock .content .paragraph p A paragraph in an open block. """ Scenario: Render an open block that contains a paragraph to DocBook (alt) Given the AsciiDoc source """ -- A paragraph in an open block. -- """ When it is converted to docbook Then the result should match the XML structure """ simpara A paragraph in an open block. """ Scenario: Render an open block that contains a list to HTML Given the AsciiDoc source """ -- * one * two * three -- """ When it is converted to html Then the result should match the HTML structure """ .openblock .content .ulist ul li: p one li: p two li: p three """ asciidoctor-1.5.5/features/pass_block.feature000066400000000000000000000024021277513741400213510ustar00rootroot00000000000000# language: en Feature: Open Blocks In order to pass content through unprocessed As a writer I want to be able to mark passthrough content using a pass block Scenario: Render a pass block without performing substitutions by default to HTML Given the AsciiDoc source """ :name: value ++++

{name}

image:tiger.png[] ++++ """ When it is converted to html Then the result should match the HTML source """

{name}

image:tiger.png[] """ Scenario: Render a pass block without performing substitutions by default to DocBook Given the AsciiDoc source """ :name: value ++++ {name} image:tiger.png[] ++++ """ When it is converted to docbook Then the result should match the XML source """ {name} image:tiger.png[] """ Scenario: Render a pass block performing explicit substitutions to HTML Given the AsciiDoc source """ :name: value [subs="attributes,macros"] ++++

{name}

image:tiger.png[] ++++ """ When it is converted to html Then the result should match the HTML source """

value

tiger """ asciidoctor-1.5.5/features/step_definitions.rb000066400000000000000000000026351277513741400215570ustar00rootroot00000000000000# encoding: UTF-8 ASCIIDOCTOR_PROJECT_DIR = File.dirname File.dirname(__FILE__) Dir.chdir ASCIIDOCTOR_PROJECT_DIR if RUBY_VERSION < '1.9' require 'rubygems' end require 'simplecov' if ENV['COVERAGE'] == 'true' require File.join(ASCIIDOCTOR_PROJECT_DIR, 'lib', 'asciidoctor') require 'rspec/expectations' require 'tilt' require 'slim' Given /the AsciiDoc source/ do |source| @source = source end When /it is converted to html/ do @output = Asciidoctor.convert @source #File.open('/tmp/test.adoc', 'w') {|f| f.write @source } #@output = %x{asciidoc -f compat/asciidoc.conf -o - -s /tmp/test.adoc | XMLLINT_INDENT='' xmllint --format - | tail -n +2}.rstrip ##@output = %x{asciidoc -f compat/asciidoc.conf -o - -s /tmp/test.adoc} end When /it is converted to docbook/ do @output = Asciidoctor.convert @source, :backend => :docbook end Then /the result should match the (HTML|XML) source/ do |format, expect| @output.should == expect end Then /the result should match the (HTML|XML) structure/ do |format, expect| case format when 'HTML' options = {:format => :html5} when 'XML' options = {:format => :xhtml} else options = {} end slim_friendly_output = @output.lines.entries.map {|line| if line.start_with? '<' line else %(|#{line}) end }.join Slim::Template.new(options) { slim_friendly_output }.render.should == Slim::Template.new(options) { expect }.render end asciidoctor-1.5.5/features/text_formatting.feature000066400000000000000000000031611277513741400224520ustar00rootroot00000000000000# language: en Feature: Text Formatting In order to apply formatting to the text As a writer I want to be able to markup inline text with formatting characters Scenario: Convert text that contains superscript and subscript characters Given the AsciiDoc source """ _v_~rocket~ is the value ^3^He is the isotope log~4~x^n^ is the expression M^me^ White is the address the 10^th^ point has coordinate (x~10~, y~10~) """ When it is converted to html Then the result should match the HTML source """

vrocket is the value 3He is the isotope log4xn is the expression Mme White is the address the 10th point has coordinate (x10, y10)

""" Scenario: Convert text that has ex-inline literal formatting Given the AsciiDoc source """ Use [x-]`{asciidoctor-version}` to print the version of Asciidoctor. """ When it is converted to html Then the result should match the HTML source """

Use {asciidoctor-version} to print the version of Asciidoctor.

""" Scenario: Convert text that has ex-inline monospaced formatting Given the AsciiDoc source """ :encoding: UTF-8 The document is assumed to be encoded as [x-]+{encoding}+. """ When it is converted to html Then the result should match the HTML source """

The document is assumed to be encoded as UTF-8.

""" asciidoctor-1.5.5/features/xref.feature000066400000000000000000000051271277513741400202040ustar00rootroot00000000000000# language: en Feature: Cross References In order to create links to other sections As a writer I want to be able to use a cross reference macro Scenario: Create a cross reference from an AsciiDoc cell to a section Given the AsciiDoc source """ |=== a|See <<_install>> |=== == Install Instructions go here. """ When it is converted to html Then the result should match the HTML structure """ table.tableblock.frame-all.grid-all.spread colgroup col style='width: 100%;' tbody tr td.tableblock.halign-left.valign-top div .paragraph: p 'See a href='#_install' Install .sect1 h2#_install Install .sectionbody .paragraph: p Instructions go here. """ Scenario: Create a cross reference using the target section title Given the AsciiDoc source """ == Section One content == Section Two refer to <
> """ When it is converted to html Then the result should match the HTML structure """ .sect1 h2#_section_one Section One .sectionbody: .paragraph: p content .sect1 h2#_section_two Section Two .sectionbody: .paragraph: p 'refer to a href='#_section_one' Section One """ Scenario: Create a cross reference using the target reftext Given the AsciiDoc source """ [reftext="the first section"] == Section One content == Section Two refer to <> """ When it is converted to html Then the result should match the HTML structure """ .sect1 h2#_section_one Section One .sectionbody: .paragraph: p content .sect1 h2#_section_two Section Two .sectionbody: .paragraph: p 'refer to a href='#_section_one' the first section """ Scenario: Create a cross reference using the formatted target title Given the AsciiDoc source """ == Section *One* content == Section Two refer to <
> """ When it is converted to html Then the result should match the HTML structure """ .sect1 h2#_section_strong_one_strong 'Section strong One .sectionbody: .paragraph: p content .sect1 h2#_section_two Section Two .sectionbody: .paragraph: p 'refer to a href='#_section_strong_one_strong' 'Section strong One """ asciidoctor-1.5.5/lib/000077500000000000000000000000001277513741400146065ustar00rootroot00000000000000asciidoctor-1.5.5/lib/asciidoctor.rb000066400000000000000000001512401277513741400174410ustar00rootroot00000000000000# encoding: UTF-8 RUBY_ENGINE = 'unknown' unless defined? RUBY_ENGINE RUBY_ENGINE_OPAL = (RUBY_ENGINE == 'opal') RUBY_ENGINE_JRUBY = (RUBY_ENGINE == 'jruby') RUBY_MIN_VERSION_1_9 = (RUBY_VERSION >= '1.9') RUBY_MIN_VERSION_2 = (RUBY_VERSION >= '2') require 'set' # NOTE RUBY_ENGINE == 'opal' conditional blocks are filtered by the Opal preprocessor if RUBY_ENGINE == 'opal' # NOTE asciidoctor/opal_ext is supplied by the Asciidoctor.js build require 'asciidoctor/opal_ext' else autoload :Base64, 'base64' autoload :OpenURI, 'open-uri' autoload :StringScanner, 'strscan' end # ideally we should use require_relative instead of modifying the LOAD_PATH $:.unshift File.dirname __FILE__ # Public: Methods for parsing AsciiDoc input files and converting documents # using eRuby templates. # # AsciiDoc documents comprise a header followed by zero or more sections. # Sections are composed of blocks of content. For example: # # = Doc Title # # == Section 1 # # This is a paragraph block in the first section. # # == Section 2 # # This section has a paragraph block and an olist block. # # . Item 1 # . Item 2 # # Examples: # # Use built-in converter: # # Asciidoctor.convert_file 'sample.adoc' # # Use custom (Tilt-supported) templates: # # Asciidoctor.convert_file 'sample.adoc', :template_dir => 'path/to/templates' # module Asciidoctor # alias the RUBY_ENGINE constant inside the Asciidoctor namespace RUBY_ENGINE = ::RUBY_ENGINE module SafeMode # A safe mode level that disables any of the security features enforced # by Asciidoctor (Ruby is still subject to its own restrictions). UNSAFE = 0; # A safe mode level that closely parallels safe mode in AsciiDoc. This value # prevents access to files which reside outside of the parent directory of # the source file and disables any macro other than the include::[] macro. SAFE = 1; # A safe mode level that disallows the document from setting attributes # that would affect the conversion of the document, in addition to all the # security features of SafeMode::SAFE. For instance, this level disallows # changing the backend or the source-highlighter using an attribute defined # in the source document. This is the most fundamental level of security # for server-side deployments (hence the name). SERVER = 10; # A safe mode level that disallows the document from attempting to read # files from the file system and including the contents of them into the # document, in additional to all the security features of SafeMode::SERVER. # For instance, this level disallows use of the include::[] macro and the # embedding of binary content (data uri), stylesheets and JavaScripts # referenced by the document.(Asciidoctor and trusted extensions may still # be allowed to embed trusted content into the document). # # Since Asciidoctor is aiming for wide adoption, this level is the default # and is recommended for server-side deployments. SECURE = 20; # A planned safe mode level that disallows the use of passthrough macros and # prevents the document from setting any known attributes, in addition to all # the security features of SafeMode::SECURE. # # Please note that this level is not currently implemented (and therefore not # enforced)! #PARANOID = 100; end # Flags to control compliance with the behavior of AsciiDoc module Compliance @keys = ::Set.new class << self attr :keys end # Defines a new compliance key and assigns an initial value. def self.define key, value instance_variable_set %(@#{key}), value class << self; self; end.send :attr_accessor, key @keys << key nil end # AsciiDoc terminates paragraphs adjacent to # block content (delimiter or block attribute list) # This option allows this behavior to be modified # TODO what about literal paragraph? # Compliance value: true define :block_terminates_paragraph, true # AsciiDoc does not treat paragraphs labeled with a verbatim style # (literal, listing, source, verse) as verbatim # This options allows this behavior to be modified # Compliance value: false define :strict_verbatim_paragraphs, true # NOT CURRENTLY USED # AsciiDoc allows start and end delimiters around # a block to be different lengths # Enabling this option requires matching lengths # Compliance value: false #define :congruent_block_delimiters, true # AsciiDoc supports both single-line and underlined # section titles. # This option disables the underlined variant. # Compliance value: true define :underline_style_section_titles, true # Asciidoctor will unwrap the content in a preamble # if the document has a title and no sections. # Compliance value: false define :unwrap_standalone_preamble, true # AsciiDoc drops lines that contain references to missing attributes. # This behavior is not intuitive to most writers # Compliance value: 'drop-line' define :attribute_missing, 'skip' # AsciiDoc drops lines that contain an attribute unassignemnt. # This behavior may need to be tuned depending on the circumstances. # Compliance value: 'drop-line' define :attribute_undefined, 'drop-line' # Asciidoctor will allow the id, role and options to be set # on blocks using a shorthand syntax (e.g., #idname.rolename%optionname) # Compliance value: false define :shorthand_property_syntax, true # Asciidoctor will start counting at the following number # when creating a unique id when there is a conflict # Compliance value: 2 define :unique_id_start_index, 2 # Asciidoctor will recognize commonly-used Markdown syntax # to the degree it does not interfere with existing # AsciiDoc syntax and behavior. # Compliance value: false define :markdown_syntax, true end # The absolute root path of the Asciidoctor RubyGem ROOT_PATH = ::File.dirname ::File.dirname ::File.expand_path __FILE__ # The absolute lib path of the Asciidoctor RubyGem LIB_PATH = ::File.join ROOT_PATH, 'lib' # The absolute data path of the Asciidoctor RubyGem DATA_PATH = ::File.join ROOT_PATH, 'data' # The user's home directory, as best we can determine it # NOTE not using infix rescue for performance reasons, see: https://github.com/jruby/jruby/issues/1816 begin USER_HOME = ::Dir.home rescue USER_HOME = ::ENV['HOME'] || ::Dir.pwd end # Flag to indicate whether encoding can be coerced to UTF-8 # _All_ input data must be force encoded to UTF-8 if Encoding.default_external is *not* UTF-8 # Addresses failures performing string operations that are reported as "invalid byte sequence in US-ASCII" # Ruby 1.8 doesn't seem to experience this problem (perhaps because it isn't validating the encodings) COERCE_ENCODING = !::RUBY_ENGINE_OPAL && ::RUBY_MIN_VERSION_1_9 # Flag to indicate whether encoding of external strings needs to be forced to UTF-8 FORCE_ENCODING = COERCE_ENCODING && ::Encoding.default_external != ::Encoding::UTF_8 # Byte arrays for UTF-* Byte Order Marks # hex escape sequence used for Ruby 1.8 compatibility BOM_BYTES_UTF_8 = "\xef\xbb\xbf".bytes.to_a BOM_BYTES_UTF_16LE = "\xff\xfe".bytes.to_a BOM_BYTES_UTF_16BE = "\xfe\xff".bytes.to_a # Flag to indicate that line length should be calculated using a unicode mode hint FORCE_UNICODE_LINE_LENGTH = !::RUBY_MIN_VERSION_1_9 # Flag to indicate whether gsub can use a Hash to map matches to replacements SUPPORTS_GSUB_RESULT_HASH = ::RUBY_MIN_VERSION_1_9 && !::RUBY_ENGINE_OPAL # The endline character used for output; stored in constant table as an optimization EOL = "\n" # The null character to use for splitting attribute values NULL = "\0" # String for matching tab character TAB = "\t" # The default document type # Can influence markup generated by the converters DEFAULT_DOCTYPE = 'article' # The backend determines the format of the converted output, default to html5 DEFAULT_BACKEND = 'html5' DEFAULT_STYLESHEET_KEYS = ['', 'DEFAULT'].to_set DEFAULT_STYLESHEET_NAME = 'asciidoctor.css' # Pointers to the preferred version for a given backend. BACKEND_ALIASES = { 'html' => 'html5', 'docbook' => 'docbook5' } # Default page widths for calculating absolute widths DEFAULT_PAGE_WIDTHS = { 'docbook' => 425 } # Default extensions for the respective base backends DEFAULT_EXTENSIONS = { 'html' => '.html', 'docbook' => '.xml', 'pdf' => '.pdf', 'epub' => '.epub', 'asciidoc' => '.adoc' } # Set of file extensions recognized as AsciiDoc documents (stored as a truth hash) ASCIIDOC_EXTENSIONS = { '.asciidoc' => true, '.adoc' => true, '.ad' => true, '.asc' => true, # TODO .txt should be deprecated '.txt' => true } SECTION_LEVELS = { '=' => 0, '-' => 1, '~' => 2, '^' => 3, '+' => 4 } ADMONITION_STYLES = ['NOTE', 'TIP', 'IMPORTANT', 'WARNING', 'CAUTION'].to_set PARAGRAPH_STYLES = ['comment', 'example', 'literal', 'listing', 'normal', 'pass', 'quote', 'sidebar', 'source', 'verse', 'abstract', 'partintro'].to_set VERBATIM_STYLES = ['literal', 'listing', 'source', 'verse'].to_set DELIMITED_BLOCKS = { '--' => [:open, ['comment', 'example', 'literal', 'listing', 'pass', 'quote', 'sidebar', 'source', 'verse', 'admonition', 'abstract', 'partintro'].to_set], '----' => [:listing, ['literal', 'source'].to_set], '....' => [:literal, ['listing', 'source'].to_set], '====' => [:example, ['admonition'].to_set], '****' => [:sidebar, ::Set.new], '____' => [:quote, ['verse'].to_set], '""' => [:quote, ['verse'].to_set], '++++' => [:pass, ['stem', 'latexmath', 'asciimath'].to_set], '|===' => [:table, ::Set.new], ',===' => [:table, ::Set.new], ':===' => [:table, ::Set.new], '!===' => [:table, ::Set.new], '////' => [:comment, ::Set.new], '```' => [:fenced_code, ::Set.new] } DELIMITED_BLOCK_LEADERS = DELIMITED_BLOCKS.keys.map {|key| key[0..1] }.to_set LAYOUT_BREAK_LINES = { '\'' => :thematic_break, '-' => :thematic_break, '*' => :thematic_break, '_' => :thematic_break, '<' => :page_break } #LIST_CONTEXTS = [:ulist, :olist, :dlist, :colist] NESTABLE_LIST_CONTEXTS = [:ulist, :olist, :dlist] # TODO validate use of explicit style name above ordered list (this list is for selecting an implicit style) ORDERED_LIST_STYLES = [:arabic, :loweralpha, :lowerroman, :upperalpha, :upperroman] #, :lowergreek] ORDERED_LIST_KEYWORDS = { 'loweralpha' => 'a', 'lowerroman' => 'i', 'upperalpha' => 'A', 'upperroman' => 'I' #'lowergreek' => 'a' #'arabic' => '1' #'decimal' => '1' } LIST_CONTINUATION = '+' # NOTE AsciiDoc Python recognizes both a preceding TAB and a space LINE_BREAK = ' +' LINE_CONTINUATION = ' \\' LINE_CONTINUATION_LEGACY = ' +' BLOCK_MATH_DELIMITERS = { :asciimath => ['\\$', '\\$'], :latexmath => ['\\[', '\\]'], } INLINE_MATH_DELIMITERS = { :asciimath => ['\\$', '\\$'], :latexmath => ['\\(', '\\)'], } # attributes which be changed within the content of the document (but not # header) because it has semantic meaning; ex. sectnums FLEXIBLE_ATTRIBUTES = %w(sectnums) # A collection of regular expressions used by the parser. # # NOTE: The following pattern, which appears frequently, captures the # contents between square brackets, ignoring escaped closing brackets # (closing brackets prefixed with a backslash '\' character) # # Pattern: (?:\[((?:\\\]|[^\]])*?)\]) # Matches: [enclosed text here] or [enclosed [text\] here] # #(pseudo)module Rx ## Regular expression character classes (to ensure regexp compatibility between Ruby and JavaScript) ## CC stands for "character class", CG stands for "character class group" # NOTE \w matches only the ASCII word characters, whereas [[:word:]] or \p{Word} matches any character in the Unicode word category. # character classes for the Regexp engine(s) in JavaScript if RUBY_ENGINE == 'opal' CC_ALPHA = 'a-zA-Z' CG_ALPHA = '[a-zA-Z]' CC_ALNUM = 'a-zA-Z0-9' CG_ALNUM = '[a-zA-Z0-9]' CG_BLANK = '[ \\t]' CC_EOL = '(?=\\n|$)' CG_GRAPH = '[\\x21-\\x7E]' # non-blank character CC_ALL = '[\s\S]' # any character, including newlines (alternatively, [^]) CC_WORD = 'a-zA-Z0-9_' CG_WORD = '[a-zA-Z0-9_]' # character classes for the Regexp engine in Ruby >= 2 (Ruby 1.9 supports \p{} but has problems w/ encoding) elsif ::RUBY_MIN_VERSION_2 CC_ALPHA = CG_ALPHA = '\p{Alpha}' CC_ALNUM = CG_ALNUM = '\p{Alnum}' CC_ALL = '.' CG_BLANK = '\p{Blank}' CC_EOL = '$' CG_GRAPH = '\p{Graph}' CC_WORD = CG_WORD = '\p{Word}' # character classes for the Regexp engine in Ruby < 2 else CC_ALPHA = '[:alpha:]' CG_ALPHA = '[[:alpha:]]' CC_ALL = '.' CC_ALNUM = '[:alnum:]' CG_ALNUM = '[[:alnum:]]' CG_BLANK = '[[:blank:]]' CC_EOL = '$' CG_GRAPH = '[[:graph:]]' # non-blank character if ::RUBY_MIN_VERSION_1_9 CC_WORD = '[:word:]' CG_WORD = '[[:word:]]' else # NOTE Ruby 1.8 cannot match word characters beyond the ASCII range; if you need this feature, upgrade! CC_WORD = '[:alnum:]_' CG_WORD = '[[:alnum:]_]' end end ## Document header # Matches the author info line immediately following the document title. # # Examples # # Doc Writer # Mary_Sue Brontë # AuthorInfoLineRx = /^(#{CG_WORD}[#{CC_WORD}\-'.]*)(?: +(#{CG_WORD}[#{CC_WORD}\-'.]*))?(?: +(#{CG_WORD}[#{CC_WORD}\-'.]*))?(?: +<([^>]+)>)?$/ # Matches the revision info line, which appears immediately following # the author info line beneath the document title. # # Examples # # v1.0 # 2013-01-01 # v1.0, 2013-01-01: Ring in the new year release # 1.0, Jan 01, 2013 # RevisionInfoLineRx = /^(?:\D*(.*?),)?(?:\s*(?!:)(.*?))(?:\s*(?!^):\s*(.*))?$/ # Matches the title and volnum in the manpage doctype. # # Examples # # = asciidoctor ( 1 ) # ManpageTitleVolnumRx = /^(.*)\((.*)\)$/ # Matches the name and purpose in the manpage doctype. # # Examples # # asciidoctor - converts AsciiDoc source files to HTML, DocBook and other formats # ManpageNamePurposeRx = /^(.*?)#{CG_BLANK}+-#{CG_BLANK}+(.*)$/ ## Preprocessor directives # Matches a conditional preprocessor directive (e.g., ifdef, ifndef, ifeval and endif). # # Examples # # ifdef::basebackend-html[] # ifndef::theme[] # ifeval::["{asciidoctor-version}" >= "0.1.0"] # ifdef::asciidoctor[Asciidoctor!] # endif::theme[] # endif::basebackend-html[] # endif::[] # ConditionalDirectiveRx = /^\\?(ifdef|ifndef|ifeval|endif)::(\S*?(?:([,\+])\S+?)?)\[(.+)?\]$/ # Matches a restricted (read as safe) eval expression. # # Examples # # "{asciidoctor-version}" >= "0.1.0" # EvalExpressionRx = /^(\S.*?)#{CG_BLANK}*(==|!=|<=|>=|<|>)#{CG_BLANK}*(\S.*)$/ # Matches an include preprocessor directive. # # Examples # # include::chapter1.ad[] # include::example.txt[lines=1;2;5..10] # IncludeDirectiveRx = /^\\?include::([^\[]+)\[(.*?)\]$/ # Matches a trailing tag directive in an include file. # # Examples # # // tag::try-catch[] # try { # someMethod(); # catch (Exception e) { # log(e); # } # // end::try-catch[] TagDirectiveRx = /\b(?:tag|end)::\S+\[\]$/ ## Attribute entries and references # Matches a document attribute entry. # # Examples # # :foo: bar # :First Name: Dan # :sectnums!: # :!toc: # :long-entry: Attribute value lines ending in ' +' # are joined together as a single value, # collapsing the line breaks and indentation to # a single space. # AttributeEntryRx = /^:(!?\w.*?):(?:#{CG_BLANK}+(.*))?$/ # Matches invalid characters in an attribute name. InvalidAttributeNameCharsRx = /[^\w\-]/ # Matches the pass inline macro allowed in value of attribute assignment. # # Examples # # pass:[text] # AttributeEntryPassMacroRx = /^pass:([a-z,]*)\[(.*)\]$/ # Matches an inline attribute reference. # # Examples # # {foo} # {counter:pcount:1} # {set:foo:bar} # {set:name!} # AttributeReferenceRx = /(\\)?\{((set|counter2?):.+?|\w+(?:[\-]\w+)*)(\\)?\}/ ## Paragraphs and delimited blocks # Matches an anchor (i.e., id + optional reference text) on a line above a block. # # Examples # # [[idname]] # [[idname,Reference Text]] # BlockAnchorRx = /^\[\[(?:|([#{CC_ALPHA}:_][#{CC_WORD}:.-]*)(?:,#{CG_BLANK}*(\S.*))?)\]\]$/ # Matches an attribute list above a block element. # # Examples # # # strictly positional # [quote, Adam Smith, Wealth of Nations] # # # name/value pairs # [NOTE, caption="Good to know"] # # # as attribute reference # [{lead}] # BlockAttributeListRx = /^\[(|#{CG_BLANK}*[#{CC_WORD}\{,.#"'%].*)\]$/ # A combined pattern that matches either a block anchor or a block attribute list. # # TODO this one gets hit a lot, should be optimized as much as possible BlockAttributeLineRx = /^\[(|#{CG_BLANK}*[#{CC_WORD}\{,.#"'%].*|\[(?:|[#{CC_ALPHA}:_][#{CC_WORD}:.-]*(?:,#{CG_BLANK}*\S.*)?)\])\]$/ # Matches a title above a block. # # Examples # # .Title goes here # BlockTitleRx = /^\.([^\s.].*)$/ # Matches an admonition label at the start of a paragraph. # # Examples # # NOTE: Just a little note. # TIP: Don't forget! # AdmonitionParagraphRx = /^(#{ADMONITION_STYLES.to_a * '|'}):#{CG_BLANK}/ # Matches a literal paragraph, which is a line of text preceded by at least one space. # # Examples # # Foo # Foo LiteralParagraphRx = /^(#{CG_BLANK}+.*)$/ # Matches a comment block. # # Examples # # //// # This is a block comment. # It can span one or more lines. # //// CommentBlockRx = %r{^/{4,}$} # Matches a comment line. # # Examples # # // an then whatever # CommentLineRx = %r{^//(?:[^/]|$)} ## Section titles # Matches a single-line (Atx-style) section title. # # Examples # # == Foo # # ^ a level 1 (h2) section title # # == Foo == # # ^ also a level 1 (h2) section title # # match[1] is the delimiter, whose length determines the level # match[2] is the title itself # match[3] is an inline anchor, which becomes the section id AtxSectionRx = /^((?:=|#){1,6})#{CG_BLANK}+(\S.*?)(?:#{CG_BLANK}+\1)?$/ # Matches the restricted section name for a two-line (Setext-style) section title. # The name cannot begin with a dot and has at least one alphanumeric character. SetextSectionTitleRx = /^((?=.*#{CG_WORD}+.*)[^.].*?)$/ # Matches the underline in a two-line (Setext-style) section title. # # Examples # # ====== || ------ || ~~~~~~ || ^^^^^^ || ++++++ # SetextSectionLineRx = /^(?:=|-|~|\^|\+)+$/ # Matches an anchor (i.e., id + optional reference text) inside a section title. # # Examples # # Section Title [[idname]] # Section Title [[idname,Reference Text]] # InlineSectionAnchorRx = /^(.*?)#{CG_BLANK}+(\\)?\[\[([#{CC_ALPHA}:_][#{CC_WORD}:.-]*)(?:,#{CG_BLANK}*(\S.*?))?\]\]$/ # Matches invalid characters in a section id. InvalidSectionIdCharsRx = /&(?:[a-zA-Z]{2,}|#\d{2,6}|#x[a-fA-F0-9]{2,5});|[^#{CC_WORD}]+?/ # Matches the block style used to designate a section title as a floating title. # # Examples # # [float] # = Floating Title # FloatingTitleStyleRx = /^(?:float|discrete)\b/ ## Lists # Detects the start of any list item. # # NOTE we only have to check as far as the blank character because we know it means non-whitespace follows. AnyListRx = /^(?:#{CG_BLANK}*(?:-|([*.\u2022])\1{0,4}|\d+\.|[a-zA-Z]\.|[IVXivx]+\))#{CG_BLANK}|#{CG_BLANK}*.*?(?::{2,4}|;;)(?:$|#{CG_BLANK})|#{CG_BLANK})/ # Matches an unordered list item (one level for hyphens, up to 5 levels for asterisks). # # Examples # # * Foo # - Foo # # NOTE we know trailing (.*) will match at least one character because we strip trailing spaces # NOTE I want to use (-|([*\u2022])\2{0,4}) but breaks the parser since it relies on fixed match positions UnorderedListRx = /^#{CG_BLANK}*(-|\*{1,5}|\u2022{1,5})#{CG_BLANK}+(.*)$/ # Matches an ordered list item (explicit numbering or up to 5 consecutive dots). # # Examples # # . Foo # .. Foo # 1. Foo (arabic, default) # a. Foo (loweralpha) # A. Foo (upperalpha) # i. Foo (lowerroman) # I. Foo (upperroman) # # NOTE leading space match is not always necessary, but is used for list reader # NOTE we know trailing (.*) will match at least one character because we strip trailing spaces OrderedListRx = /^#{CG_BLANK}*(\.{1,5}|\d+\.|[a-zA-Z]\.|[IVXivx]+\))#{CG_BLANK}+(.*)$/ # Matches the ordinals for each type of ordered list. OrderedListMarkerRxMap = { :arabic => /\d+[.>]/, :loweralpha => /[a-z]\./, :lowerroman => /[ivx]+\)/, :upperalpha => /[A-Z]\./, :upperroman => /[IVX]+\)/ #:lowergreek => /[a-z]\]/ } # Matches a description list entry. # # Examples # # foo:: # foo::: # foo:::: # foo;; # # # the term can be followed by a description on the same line... # # foo:: That which precedes 'bar' (see also, <>) # # # ...or on a separate line (optionally indented) # # foo:: # That which precedes 'bar' (see also, <>) # # # the term or description may be an attribute reference # # {foo_term}:: {foo_def} # # NOTE negative match for comment line is intentional since that isn't handled when looking for next list item # TODO check for line comment when scanning lines instead of in regex # DescriptionListRx = /^(?!\/\/)#{CG_BLANK}*(.*?)(:{2,4}|;;)(?:#{CG_BLANK}+(.*))?$/ # Matches a sibling description list item (which does not include the type in the key). DescriptionListSiblingRx = { # (?:.*?[^:])? - a non-capturing group which grabs longest sequence of characters that doesn't end w/ colon '::' => /^(?!\/\/)#{CG_BLANK}*((?:.*[^:])?)(::)(?:#{CG_BLANK}+(.*))?$/, ':::' => /^(?!\/\/)#{CG_BLANK}*((?:.*[^:])?)(:::)(?:#{CG_BLANK}+(.*))?$/, '::::' => /^(?!\/\/)#{CG_BLANK}*((?:.*[^:])?)(::::)(?:#{CG_BLANK}+(.*))?$/, ';;' => /^(?!\/\/)#{CG_BLANK}*(.*)(;;)(?:#{CG_BLANK}+(.*))?$/ } # Matches a callout list item. # # Examples # # <1> Foo # # NOTE we know trailing (.*) will match at least one character because we strip trailing spaces CalloutListRx = /^#{CG_BLANK}+(.*)$/ # Matches a callout reference inside literal text. # # Examples # <1> (optionally prefixed by //, #, -- or ;; line comment chars) # <1> <2> (multiple callouts on one line) # (for XML-based languages) # # NOTE extract regexps are applied line-by-line, so we can use $ as end-of-line char CalloutExtractRx = /(?:(?:\/\/|#|--|;;) ?)?(\\)?(?=(?: ?\\?)*$)/ CalloutExtractRxt = '(\\\\)?<()(\\d+)>(?=(?: ?\\\\?<\\d+>)*$)' # NOTE special characters have not been replaced when scanning CalloutQuickScanRx = /\\?(?=(?: ?\\?)*#{CC_EOL})/ # NOTE special characters have already been replaced when converting to an SGML format CalloutSourceRx = /(?:(?:\/\/|#|--|;;) ?)?(\\)?<!?(--|)(\d+)\2>(?=(?: ?\\?<!?\2\d+\2>)*#{CC_EOL})/ CalloutSourceRxt = "(\\\\)?<()(\\d+)>(?=(?: ?\\\\?<\\d+>)*#{CC_EOL})" # A Hash of regexps for lists used for dynamic access. ListRxMap = { :ulist => UnorderedListRx, :olist => OrderedListRx, :dlist => DescriptionListRx, :colist => CalloutListRx } ## Tables # Parses the column spec (i.e., colspec) for a table. # # Examples # # 1*h,2*,^3e # ColumnSpecRx = /^(?:(\d+)\*)?([<^>](?:\.[<^>]?)?|(?:[<^>]?\.)?[<^>])?(\d+%?)?([a-z])?$/ # Parses the start and end of a cell spec (i.e., cellspec) for a table. # # Examples # # 2.3+<.>m # # FIXME use step-wise scan (or treetop) rather than this mega-regexp CellSpecStartRx = /^#{CG_BLANK}*(?:(\d+(?:\.\d*)?|(?:\d*\.)?\d+)([*+]))?([<^>](?:\.[<^>]?)?|(?:[<^>]?\.)?[<^>])?([a-z])?$/ CellSpecEndRx = /#{CG_BLANK}+(?:(\d+(?:\.\d*)?|(?:\d*\.)?\d+)([*+]))?([<^>](?:\.[<^>]?)?|(?:[<^>]?\.)?[<^>])?([a-z])?$/ # Block macros # Matches the general block macro pattern. # # Examples # # gist::123456[] # #-- # NOTE we've relaxed the match for target to accomodate the short format (e.g., name::[attrlist]) GenericBlockMacroRx = /^(#{CG_WORD}+)::(\S*?)\[((?:\\\]|[^\]])*?)\]$/ # Matches an image, video or audio block macro. # # Examples # # image::filename.png[Caption] # video::http://youtube.com/12345[Cats vs Dogs] # MediaBlockMacroRx = /^(image|video|audio)::(\S+?)\[((?:\\\]|[^\]])*?)\]$/ # Matches the TOC block macro. # # Examples # # toc::[] # toc::[levels=2] # TocBlockMacroRx = /^toc::\[(.*?)\]$/ ## Inline macros # Matches an anchor (i.e., id + optional reference text) in the flow of text. # # Examples # # [[idname]] # [[idname,Reference Text]] # anchor:idname[] # anchor:idname[Reference Text] # InlineAnchorRx = /\\?(?:\[\[([#{CC_ALPHA}:_][#{CC_WORD}:.-]*)(?:,#{CG_BLANK}*(\S.*?))?\]\]|anchor:(\S+)\[(.*?[^\\])?\])/ # Matches a bibliography anchor anywhere inline. # # Examples # # [[[Foo]]] # InlineBiblioAnchorRx = /\\?\[\[\[([#{CC_WORD}:][#{CC_WORD}:.-]*?)\]\]\]/ # Matches an inline e-mail address. # # doc.writer@example.com # EmailInlineMacroRx = /([\\>:\/])?#{CG_WORD}[#{CC_WORD}.%+-]*@#{CG_ALNUM}[#{CC_ALNUM}.-]*\.#{CG_ALPHA}{2,4}\b/ # Matches an inline footnote macro, which is allowed to span multiple lines. # # Examples # footnote:[text] # footnoteref:[id,text] # footnoteref:[id] # FootnoteInlineMacroRx = /\\?(footnote(?:ref)?):\[(#{CC_ALL}*?[^\\])\]/m # Matches an image or icon inline macro. # # Examples # # image:filename.png[Alt Text] # image:http://example.com/images/filename.png[Alt Text] # image:filename.png[More [Alt\] Text] (alt text becomes "More [Alt] Text") # icon:github[large] # ImageInlineMacroRx = /\\?(?:image|icon):([^:\[][^\[]*)\[((?:\\\]|[^\]])*?)\]/ # Matches an indexterm inline macro, which may span multiple lines. # # Examples # # indexterm:[Tigers,Big cats] # (((Tigers,Big cats))) # indexterm2:[Tigers] # ((Tigers)) # IndextermInlineMacroRx = /\\?(?:(indexterm2?):\[(#{CC_ALL}*?[^\\])\]|\(\((#{CC_ALL}+?)\)\)(?!\)))/m # Matches either the kbd or btn inline macro. # # Examples # # kbd:[F3] # kbd:[Ctrl+Shift+T] # kbd:[Ctrl+\]] # kbd:[Ctrl,T] # btn:[Save] # KbdBtnInlineMacroRx = /\\?(?:kbd|btn):\[((?:\\\]|[^\]])+?)\]/ # Matches the delimiter used for kbd value. # # Examples # # Ctrl + Alt+T # Ctrl,T # KbdDelimiterRx = /(?:\+|,)(?=#{CG_BLANK}*[^\1])/ # Matches an implicit link and some of the link inline macro. # # Examples # # http://github.com # http://github.com[GitHub] # # FIXME revisit! the main issue is we need different rules for implicit vs explicit LinkInlineRx = %r{(^|link:|<|[\s>\(\)\[\];])(\\?(?:https?|file|ftp|irc)://[^\s\[\]<]*[^\s.,\[\]<])(?:\[((?:\\\]|[^\]])*?)\])?} # Match a link or e-mail inline macro. # # Examples # # link:path[label] # mailto:doc.writer@example.com[] # LinkInlineMacroRx = /\\?(?:link|mailto):([^\s\[]+)(?:\[((?:\\\]|[^\]])*?)\])/ # Matches a stem (and alternatives, asciimath and latexmath) inline macro, which may span multiple lines. # # Examples # # stem:[x != 0] # asciimath:[x != 0] # latexmath:[\sqrt{4} = 2] # StemInlineMacroRx = /\\?(stem|(?:latex|ascii)math):([a-z,]*)\[(#{CC_ALL}*?[^\\])\]/m # Matches a menu inline macro. # # Examples # # menu:File[New...] # menu:View[Page Style > No Style] # menu:View[Page Style, No Style] # MenuInlineMacroRx = /\\?menu:(#{CG_WORD}|#{CG_WORD}.*?\S)\[#{CG_BLANK}*(.+?)?\]/ # Matches an implicit menu inline macro. # # Examples # # "File > New..." # MenuInlineRx = /\\?"(#{CG_WORD}[^"]*?#{CG_BLANK}*>#{CG_BLANK}*[^" \t][^"]*)"/ # Matches an inline passthrough value, which may span multiple lines. # # Examples # # +text+ # `text` (compat) # # NOTE we always capture the attributes so we know when to use compatible (i.e., legacy) behavior PassInlineRx = { false => ['+', '`', /(^|[^#{CC_WORD};:])(?:\[([^\]]+?)\])?(\\?(\+|`)(\S|\S#{CC_ALL}*?\S)\4)(?!#{CG_WORD})/m], true => ['`', nil, /(^|[^`#{CC_WORD}])(?:\[([^\]]+?)\])?(\\?(`)([^`\s]|[^`\s]#{CC_ALL}*?\S)\4)(?![`#{CC_WORD}])/m] } # Matches several variants of the passthrough inline macro, which may span multiple lines. # # Examples # # +++text+++ # $$text$$ # pass:quotes[text] # PassInlineMacroRx = /(?:(?:(\\?)\[([^\]]+?)\])?(\\{0,2})(\+{2,3}|\${2})(#{CC_ALL}*?)\4|(\\?)pass:([a-z,]*)\[(#{CC_ALL}*?[^\\])\])/m # Matches an xref (i.e., cross-reference) inline macro, which may span multiple lines. # # Examples # # <> # xref:id[reftext] # # NOTE special characters have already been escaped, hence the entity references XrefInlineMacroRx = /\\?(?:<<([#{CC_WORD}":.\/]#{CC_ALL}*?)>>|xref:([#{CC_WORD}":.\/]#{CC_ALL}*?)\[(#{CC_ALL}*?)\])/m ## Layout # Matches a trailing + preceded by at least one space character, # which forces a hard line break (
tag in HTML output). # # Examples # # Humpty Dumpty sat on a wall, + # Humpty Dumpty had a great fall. # if RUBY_ENGINE == 'opal' # NOTE JavaScript only treats ^ and $ as line boundaries in multiline regexp; . won't match newlines LineBreakRx = /^(.*)[ \t]\+$/m else LineBreakRx = /^(.*)[[:blank:]]\+$/ end # Matches an AsciiDoc horizontal rule or AsciiDoc page break. # # Examples # # ''' (horizontal rule) # <<< (page break) # LayoutBreakLineRx = /^('|<){3,}$/ # Matches an AsciiDoc or Markdown horizontal rule or AsciiDoc page break. # # Examples # # ''' or ' ' ' (horizontal rule) # --- or - - - (horizontal rule) # *** or * * * (horizontal rule) # <<< (page break) # LayoutBreakLinePlusRx = /^(?:'|<){3,}$|^ {0,3}([-\*_])( *)\1\2\1$/ ## General # Matches a blank line. # # NOTE allows for empty space in line as it could be left by the template engine BlankLineRx = /^#{CG_BLANK}*\n/ # Matches a comma or semi-colon delimiter. # # Examples # # one,two # three;four # DataDelimiterRx = /,|;/ # Matches a single-line of text enclosed in double quotes, capturing the quote char and text. # # Examples # # "Who goes there?" # DoubleQuotedRx = /^("|)(.*)\1$/ # Matches multiple lines of text enclosed in double quotes, capturing the quote char and text. # # Examples # # "I am a run-on sentence and I like # to take up multiple lines and I # still want to be matched." # DoubleQuotedMultiRx = /^("|)(#{CC_ALL}*)\1$/m # Matches one or more consecutive digits at the end of a line. # # Examples # # docbook45 # html5 # TrailingDigitsRx = /\d+$/ # Matches a space escaped by a backslash. # # Examples # # one\ two\ three # EscapedSpaceRx = /\\(#{CG_BLANK})/ # Matches a space delimiter that's not escaped. # # Examples # # one two three four # SpaceDelimiterRx = /([^\\])#{CG_BLANK}+/ # Matches a + or - modifier in a subs list # SubModifierSniffRx = /[+-]/ # Matches any character with multibyte support explicitly enabled (length of multibyte char = 1) # # NOTE If necessary to hide use of the language modifier (u) from JavaScript, use (Regexp.new '.', false, 'u') # UnicodeCharScanRx = unless RUBY_ENGINE == 'opal' FORCE_UNICODE_LINE_LENGTH ? /./u : nil end # Detects strings that resemble URIs. # # Examples # http://domain # https://domain # file:///path # data:info # # not c:/sample.adoc or c:\sample.adoc # UriSniffRx = %r{^#{CG_ALPHA}[#{CC_ALNUM}.+-]+:/{0,2}} # Detects the end of an implicit URI in the text # # Examples # # (http://google.com) # >http://google.com< # (See http://google.com): # UriTerminator = /[);:]$/ # Detects XML tags XmlSanitizeRx = /<[^>]+>/ # Unused # Detects any fenced block delimiter, including: # listing, literal, example, sidebar, quote, passthrough, table and fenced code # Does not match open blocks or air quotes # TIP position the most common blocks towards the front of the pattern #BlockDelimiterRx = %r{^(?:(?:-|\.|=|\*|_|\+|/){4,}|[\|,;!]={3,}|(?:`|~){3,}.*)$} # Matches an escaped single quote within a word # # Examples # # Here\'s Johnny! # #EscapedSingleQuoteRx = /(#{CG_WORD})\\'(#{CG_WORD})/ # an alternative if our backend generates single-quoted html/xml attributes #EscapedSingleQuoteRx = /(#{CG_WORD}|=)\\'(#{CG_WORD})/ # Matches whitespace at the beginning of the line #LeadingSpacesRx = /^(#{CG_BLANK}*)/ # Matches parent directory references at the beginning of a path #LeadingParentDirsRx = /^(?:\.\.\/)*/ #StripLineWise = /\A(?:\s*\n)?(#{CC_ALL}*?)\s*\z/m #end INTRINSIC_ATTRIBUTES = { 'startsb' => '[', 'endsb' => ']', 'vbar' => '|', 'caret' => '^', 'asterisk' => '*', 'tilde' => '~', 'plus' => '+', 'backslash' => '\\', 'backtick' => '`', 'blank' => '', 'empty' => '', 'sp' => ' ', 'two-colons' => '::', 'two-semicolons' => ';;', 'nbsp' => ' ', 'deg' => '°', 'zwsp' => '​', 'quot' => '"', 'apos' => ''', 'lsquo' => '‘', 'rsquo' => '’', 'ldquo' => '“', 'rdquo' => '”', 'wj' => '⁠', 'brvbar' => '¦', 'cpp' => 'C++', 'amp' => '&', 'lt' => '<', 'gt' => '>' } # unconstrained quotes:: can appear anywhere # constrained quotes:: must be bordered by non-word characters # NOTE these substitutions are processed in the order they appear here and # the order in which they are replaced is important quote_subs = [ # **strong** [:strong, :unconstrained, /\\?(?:\[([^\]]+?)\])?\*\*(#{CC_ALL}+?)\*\*/m], # *strong* [:strong, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+?)\])?\*(\S|\S#{CC_ALL}*?\S)\*(?!#{CG_WORD})/m], # "`double-quoted`" [:double, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+?)\])?"`(\S|\S#{CC_ALL}*?\S)`"(?!#{CG_WORD})/m], # '`single-quoted`' [:single, :constrained, /(^|[^#{CC_WORD};:`}])(?:\[([^\]]+?)\])?'`(\S|\S#{CC_ALL}*?\S)`'(?!#{CG_WORD})/m], # ``monospaced`` [:monospaced, :unconstrained, /\\?(?:\[([^\]]+?)\])?``(#{CC_ALL}+?)``/m], # `monospaced` [:monospaced, :constrained, /(^|[^#{CC_WORD};:"'`}])(?:\[([^\]]+?)\])?`(\S|\S#{CC_ALL}*?\S)`(?![#{CC_WORD}"'`])/m], # __emphasis__ [:emphasis, :unconstrained, /\\?(?:\[([^\]]+?)\])?__(#{CC_ALL}+?)__/m], # _emphasis_ [:emphasis, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+?)\])?_(\S|\S#{CC_ALL}*?\S)_(?!#{CG_WORD})/m], # ##mark## (referred to in AsciiDoc Python as unquoted) [:mark, :unconstrained, /\\?(?:\[([^\]]+?)\])?##(#{CC_ALL}+?)##/m], # #mark# (referred to in AsciiDoc Python as unquoted) [:mark, :constrained, /(^|[^#{CC_WORD}&;:}])(?:\[([^\]]+?)\])?#(\S|\S#{CC_ALL}*?\S)#(?!#{CG_WORD})/m], # ^superscript^ [:superscript, :unconstrained, /\\?(?:\[([^\]]+?)\])?\^(\S+?)\^/], # ~subscript~ [:subscript, :unconstrained, /\\?(?:\[([^\]]+?)\])?~(\S+?)~/] ] compat_quote_subs = quote_subs.dup # ``quoted'' compat_quote_subs[2] = [:double, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+?)\])?``(\S|\S#{CC_ALL}*?\S)''(?!#{CG_WORD})/m] # `quoted' compat_quote_subs[3] = [:single, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+?)\])?`(\S|\S#{CC_ALL}*?\S)'(?!#{CG_WORD})/m] # ++monospaced++ compat_quote_subs[4] = [:monospaced, :unconstrained, /\\?(?:\[([^\]]+?)\])?\+\+(#{CC_ALL}+?)\+\+/m] # +monospaced+ compat_quote_subs[5] = [:monospaced, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+?)\])?\+(\S|\S#{CC_ALL}*?\S)\+(?!#{CG_WORD})/m] # #unquoted# #compat_quote_subs[8] = [:unquoted, *compat_quote_subs[8][1..-1]] # ##unquoted## #compat_quote_subs[9] = [:unquoted, *compat_quote_subs[9][1..-1]] # 'emphasis' compat_quote_subs.insert 3, [:emphasis, :constrained, /(^|[^#{CC_WORD};:}])(?:\[([^\]]+?)\])?'(\S|\S#{CC_ALL}*?\S)'(?!#{CG_WORD})/m] QUOTE_SUBS = { false => quote_subs, true => compat_quote_subs } quote_subs = nil compat_quote_subs = nil # NOTE in Ruby 1.8.7, [^\\] does not match start of line, # so we need to match it explicitly # order is significant REPLACEMENTS = [ # (C) [/\\?\(C\)/, '©', :none], # (R) [/\\?\(R\)/, '®', :none], # (TM) [/\\?\(TM\)/, '™', :none], # foo -- bar # FIXME this drops the endline if it appears at end of line [/(^|\n| |\\)--( |\n|$)/, ' — ', :none], # foo--bar [/(#{CG_WORD})\\?--(?=#{CG_WORD})/, '—​', :leading], # ellipsis [/\\?\.\.\./, '…​', :leading], # right single quote [/\\?`'/, '’', :none], # apostrophe (inside a word) [/(#{CG_ALNUM})\\?'(?=#{CG_ALPHA})/, '’', :leading], # right arrow -> [/\\?->/, '→', :none], # right double arrow => [/\\?=>/, '⇒', :none], # left arrow <- [/\\?<-/, '←', :none], # left double arrow <= [/\\?<=/, '⇐', :none], # restore entities [/\\?(&)amp;((?:[a-zA-Z]{2,}|#\d{2,6}|#x[a-fA-F0-9]{2,5});)/, '', :bounding] ] class << self # Public: Parse the AsciiDoc source input into a {Document} # # Accepts input as an IO (or StringIO), String or String Array object. If the # input is a File, information about the file is stored in attributes on the # Document object. # # input - the AsciiDoc source as a IO, String or Array. # options - a String, Array or Hash of options to control processing (default: {}) # String and Array values are converted into a Hash. # See {Document#initialize} for details about these options. # # Returns the Document def load input, options = {} options = options.dup if (timings = options[:timings]) timings.start :read end attributes = options[:attributes] = if !(attrs = options[:attributes]) {} elsif ::Hash === attrs || (::RUBY_ENGINE_JRUBY && ::Java::JavaUtil::Map === attrs) attrs.dup elsif ::Array === attrs attrs.inject({}) do |accum, entry| k, v = entry.split '=', 2 accum[k] = v || '' accum end elsif ::String === attrs # convert non-escaped spaces into null character, so we split on the # correct spaces chars, and restore escaped spaces capture_1 = '\1' attrs = attrs.gsub(SpaceDelimiterRx, %(#{capture_1}#{NULL})).gsub(EscapedSpaceRx, capture_1) attrs.split(NULL).inject({}) do |accum, entry| k, v = entry.split '=', 2 accum[k] = v || '' accum end elsif (attrs.respond_to? :keys) && (attrs.respond_to? :[]) # convert it to a Hash as we know it original_attrs = attrs attrs = {} original_attrs.keys.each do |key| attrs[key] = original_attrs[key] end attrs else raise ::ArgumentError, %(illegal type for attributes option: #{attrs.class.ancestors}) end lines = nil if ::File === input # TODO cli checks if input path can be read and is file, but might want to add check to API input_path = ::File.expand_path input.path # See https://reproducible-builds.org/specs/source-date-epoch/ input_mtime = ::ENV['SOURCE_DATE_EPOCH'] ? (::Time.at ::ENV['SOURCE_DATE_EPOCH'].to_i).utc : input.mtime lines = input.readlines # hold off on setting infile and indir until we get a better sense of their purpose attributes['docfile'] = input_path attributes['docdir'] = ::File.dirname input_path attributes['docname'] = Helpers.basename input_path, true docdate = (attributes['docdate'] ||= input_mtime.strftime('%Y-%m-%d')) doctime = (attributes['doctime'] ||= input_mtime.strftime('%H:%M:%S %Z')) attributes['docdatetime'] = %(#{docdate} #{doctime}) elsif input.respond_to? :readlines # NOTE tty, pipes & sockets can't be rewound, but can't be sniffed easily either # just fail the rewind operation silently to handle all cases begin input.rewind rescue end lines = input.readlines elsif ::String === input lines = input.lines.entries elsif ::Array === input lines = input.dup else raise ::ArgumentError, %(unsupported input type: #{input.class}) end if timings timings.record :read timings.start :parse end if options[:parse] == false doc = Document.new lines, options else doc = (Document.new lines, options).parse end timings.record :parse if timings doc rescue => ex begin context = %(asciidoctor: FAILED: #{attributes['docfile'] || ''}: Failed to load AsciiDoc document) if ex.respond_to? :exception # The original message must be explicitely preserved when wrapping a Ruby exception wrapped_ex = ex.exception %(#{context} - #{ex.message}) # JRuby automatically sets backtrace, but not MRI wrapped_ex.set_backtrace ex.backtrace else # Likely a Java exception class wrapped_ex = ex.class.new context, ex wrapped_ex.stack_trace = ex.stack_trace end rescue wrapped_ex = ex end raise wrapped_ex end # Public: Parse the contents of the AsciiDoc source file into an Asciidoctor::Document # # Accepts input as an IO, String or String Array object. If the # input is a File, information about the file is stored in # attributes on the Document. # # input - the String AsciiDoc source filename # options - a String, Array or Hash of options to control processing (default: {}) # String and Array values are converted into a Hash. # See Asciidoctor::Document#initialize for details about options. # # Returns the Asciidoctor::Document def load_file filename, options = {} self.load ::File.new(filename || ''), options end # Public: Parse the AsciiDoc source input into an Asciidoctor::Document and # convert it to the specified backend format. # # Accepts input as an IO, String or String Array object. If the # input is a File, information about the file is stored in # attributes on the Document. # # If the :in_place option is true, and the input is a File, the output is # written to a file adjacent to the input file, having an extension that # corresponds to the backend format. Otherwise, if the :to_file option is # specified, the file is written to that file. If :to_file is not an absolute # path, it is resolved relative to :to_dir, if given, otherwise the # Document#base_dir. If the target directory does not exist, it will not be # created unless the :mkdirs option is set to true. If the file cannot be # written because the target directory does not exist, or because it falls # outside of the Document#base_dir in safe mode, an IOError is raised. # # If the output is going to be written to a file, the header and footer are # included unless specified otherwise (writing to a file implies creating a # standalone document). Otherwise, the header and footer are not included by # default and the converted result is returned. # # input - the String AsciiDoc source filename # options - a String, Array or Hash of options to control processing (default: {}) # String and Array values are converted into a Hash. # See Asciidoctor::Document#initialize for details about options. # # Returns the Document object if the converted String is written to a # file, otherwise the converted String def convert input, options = {} options = options.dup options.delete(:parse) to_file = options.delete(:to_file) to_dir = options.delete(:to_dir) mkdirs = options.delete(:mkdirs) || false timings = options[:timings] case to_file when true, nil write_to_same_dir = !to_dir && ::File === input stream_output = false write_to_target = to_dir to_file = nil when false write_to_same_dir = false stream_output = false write_to_target = false to_file = nil when '/dev/null' return self.load input, options else write_to_same_dir = false stream_output = to_file.respond_to? :write write_to_target = stream_output ? false : to_file end unless options.key? :header_footer options[:header_footer] = true if write_to_same_dir || write_to_target end # NOTE at least make intended target directory available, if there is one if write_to_same_dir input_path = ::File.expand_path input.path options[:to_dir] = (outdir = ::File.dirname input_path) elsif write_to_target if to_dir if to_file options[:to_dir] = ::File.dirname ::File.expand_path(::File.join to_dir, to_file) else options[:to_dir] = ::File.expand_path to_dir end elsif to_file options[:to_dir] = ::File.dirname ::File.expand_path to_file end else options[:to_dir] = nil end doc = self.load input, options if write_to_same_dir outfile = ::File.join outdir, %(#{doc.attributes['docname']}#{doc.outfilesuffix}) if outfile == input_path raise ::IOError, %(input file and output file cannot be the same: #{outfile}) end elsif write_to_target working_dir = options.has_key?(:base_dir) ? ::File.expand_path(options[:base_dir]) : ::File.expand_path(::Dir.pwd) # QUESTION should the jail be the working_dir or doc.base_dir??? jail = doc.safe >= SafeMode::SAFE ? working_dir : nil if to_dir outdir = doc.normalize_system_path(to_dir, working_dir, jail, :target_name => 'to_dir', :recover => false) if to_file outfile = doc.normalize_system_path(to_file, outdir, nil, :target_name => 'to_dir', :recover => false) # reestablish outdir as the final target directory (in the case to_file had directory segments) outdir = ::File.dirname outfile else outfile = ::File.join outdir, %(#{doc.attributes['docname']}#{doc.outfilesuffix}) end elsif to_file outfile = doc.normalize_system_path(to_file, working_dir, jail, :target_name => 'to_dir', :recover => false) # establish outdir as the final target directory (in the case to_file had directory segments) outdir = ::File.dirname outfile end unless ::File.directory? outdir if mkdirs Helpers.mkdir_p outdir else # NOTE we intentionally refer to the directory as it was passed to the API raise ::IOError, %(target directory does not exist: #{to_dir}) end end else outfile = to_file outdir = nil end timings.start :convert if timings opts = outfile && !stream_output ? { 'outfile' => outfile, 'outdir' => outdir } : {} output = doc.convert opts timings.record :convert if timings if outfile timings.start :write if timings doc.write output, outfile timings.record :write if timings # NOTE document cannot control this behavior if safe >= SafeMode::SERVER # NOTE skip if stylesdir is a URI if !stream_output && doc.safe < SafeMode::SECURE && (doc.attr? 'linkcss') && (doc.attr? 'copycss') && (doc.attr? 'basebackend-html') && !((stylesdir = (doc.attr 'stylesdir')) && (Helpers.uriish? stylesdir)) copy_asciidoctor_stylesheet = false copy_user_stylesheet = false if (stylesheet = (doc.attr 'stylesheet')) if DEFAULT_STYLESHEET_KEYS.include? stylesheet copy_asciidoctor_stylesheet = true elsif !(Helpers.uriish? stylesheet) copy_user_stylesheet = true end end copy_coderay_stylesheet = (doc.attr? 'source-highlighter', 'coderay') && (doc.attr 'coderay-css', 'class') == 'class' copy_pygments_stylesheet = (doc.attr? 'source-highlighter', 'pygments') && (doc.attr 'pygments-css', 'class') == 'class' if copy_asciidoctor_stylesheet || copy_user_stylesheet || copy_coderay_stylesheet || copy_pygments_stylesheet stylesoutdir = doc.normalize_system_path(stylesdir, outdir, doc.safe >= SafeMode::SAFE ? outdir : nil) Helpers.mkdir_p stylesoutdir if mkdirs if copy_asciidoctor_stylesheet Stylesheets.instance.write_primary_stylesheet stylesoutdir # FIXME should Stylesheets also handle the user stylesheet? elsif copy_user_stylesheet if (stylesheet_src = (doc.attr 'copycss')).empty? stylesheet_src = doc.normalize_system_path stylesheet else # NOTE in this case, copycss is a source location (but cannot be a URI) stylesheet_src = doc.normalize_system_path stylesheet_src end stylesheet_dst = doc.normalize_system_path stylesheet, stylesoutdir, (doc.safe >= SafeMode::SAFE ? outdir : nil) unless stylesheet_src == stylesheet_dst || (stylesheet_content = doc.read_asset stylesheet_src).nil? ::File.open(stylesheet_dst, 'w') {|f| f.write stylesheet_content } end end if copy_coderay_stylesheet Stylesheets.instance.write_coderay_stylesheet stylesoutdir elsif copy_pygments_stylesheet Stylesheets.instance.write_pygments_stylesheet stylesoutdir, (doc.attr 'pygments-style') end end end doc else output end end # Alias render to convert to maintain backwards compatibility alias :render :convert # Public: Parse the contents of the AsciiDoc source file into an # Asciidoctor::Document and convert it to the specified backend format. # # input - the String AsciiDoc source filename # options - a String, Array or Hash of options to control processing (default: {}) # String and Array values are converted into a Hash. # See Asciidoctor::Document#initialize for details about options. # # Returns the Document object if the converted String is written to a # file, otherwise the converted String def convert_file filename, options = {} self.convert ::File.new(filename || ''), options end # Alias render_file to convert_file to maintain backwards compatibility alias :render_file :convert_file end if RUBY_ENGINE == 'opal' require 'asciidoctor/version' require 'asciidoctor/timings' else autoload :VERSION, 'asciidoctor/version' autoload :Timings, 'asciidoctor/timings' end end # core extensions require 'asciidoctor/core_ext' # modules require 'asciidoctor/helpers' require 'asciidoctor/substitutors' # abstract classes require 'asciidoctor/abstract_node' require 'asciidoctor/abstract_block' # concrete classes require 'asciidoctor/attribute_list' require 'asciidoctor/block' require 'asciidoctor/callouts' require 'asciidoctor/converter' require 'asciidoctor/converter/html5' if RUBY_ENGINE_OPAL require 'asciidoctor/document' require 'asciidoctor/inline' require 'asciidoctor/list' require 'asciidoctor/parser' require 'asciidoctor/path_resolver' require 'asciidoctor/reader' require 'asciidoctor/section' require 'asciidoctor/stylesheets' require 'asciidoctor/table' asciidoctor-1.5.5/lib/asciidoctor/000077500000000000000000000000001277513741400171115ustar00rootroot00000000000000asciidoctor-1.5.5/lib/asciidoctor/abstract_block.rb000066400000000000000000000304661277513741400224240ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor class AbstractBlock < AbstractNode # Public: The types of content that this block can accomodate attr_accessor :content_model # Public: Substitutions to be applied to content in this block attr_reader :subs # Public: Get the Array of Asciidoctor::AbstractBlock sub-blocks for this block attr_reader :blocks # Public: Set the Integer level of this Section or the Section level in which this Block resides attr_accessor :level # Public: Set the String block title. attr_writer :title # Public: Get/Set the String style (block type qualifier) for this block. attr_accessor :style # Public: Get/Set the caption for this block attr_accessor :caption # Public: Gets/Sets the location in the AsciiDoc source where this block begins attr_accessor :source_location def initialize parent, context, opts = {} super @content_model = :compound @subs = [] @default_subs = nil @blocks = [] @id = nil @title = nil @caption = nil @style = nil @level = if context == :document 0 elsif parent && context != :section parent.level end @next_section_index = 0 @next_section_number = 1 @source_location = nil end def block? true end def inline? false end # Public: Update the context of this block. # # This method changes the context of this block. It also # updates the node name accordingly. def context=(context) @context = context @node_name = context.to_s end # Public: Get the converted String content for this Block. If the block # has child blocks, the content method should cause them to be # converted and returned as content that can be included in the # parent block's template. def convert @document.playback_attributes @attributes converter.convert self end # Alias render to convert to maintain backwards compatibility alias :render :convert # Public: Get the converted result of the child blocks by converting the # children appropriate to content model that this block supports. def content @blocks.map {|b| b.convert } * EOL end # Public: Get the source file where this block started def file @source_location ? @source_location.file : nil end # Public: Get the source line number where this block started def lineno @source_location ? @source_location.lineno : nil end # Public: A convenience method that checks whether the specified # substitution is enabled for this block. # # name - The Symbol substitution name # # Returns A Boolean indicating whether the specified substitution is # enabled for this block def sub? name @subs.include? name end # Public: A convenience method that indicates whether the title instance # variable is blank (nil or empty) def title? !@title.nil_or_empty? end # Public: Get the String title of this Block with title substitions applied # # The following substitutions are applied to block and section titles: # # :specialcharacters, :quotes, :replacements, :macros, :attributes and :post_replacements # # Examples # # block.title = "Foo 3^ # {two-colons} Bar(1)" # block.title # => "Foo 3^ # :: Bar(1)" # # Returns the String title of this Block def title # prevent substitutions from being applied multiple times if defined?(@subbed_title) @subbed_title elsif @title @subbed_title = apply_title_subs(@title) else @title end end # Public: Convenience method that returns the interpreted title of the Block # with the caption prepended. # # Concatenates the value of this Block's caption instance variable and the # return value of this Block's title method. No space is added between the # two values. If the Block does not have a caption, the interpreted title is # returned. # # Returns the String title prefixed with the caption, or just the title if no # caption is set def captioned_title %(#{@caption}#{title}) end # Public: Determine whether this Block contains block content # # Returns A Boolean indicating whether this Block has block content def blocks? !@blocks.empty? end # Public: Append a content block to this block's list of blocks. # # block - The new child block. # # Examples # # block = Block.new(parent, :preamble, :content_model => :compound) # # block << Block.new(block, :paragraph, :source => 'p1') # block << Block.new(block, :paragraph, :source => 'p2') # block.blocks? # # => true # block.blocks.size # # => 2 # # Returns The parent Block def << block # parent assignment pending refactor #block.parent = self @blocks << block self end # NOTE append alias required for adapting to a Java API alias :append :<< # Public: Get the Array of child Section objects # # Only applies to Document and Section instances # # Examples # # doc << (sect1 = Section.new doc, 1, false) # sect1.title = 'Section 1' # para1 = Block.new sect1, :paragraph, :source => 'Paragraph 1' # para2 = Block.new sect1, :paragraph, :source => 'Paragraph 2' # sect1 << para1 << para2 # sect1 << (sect1_1 = Section.new sect1, 2, false) # sect1_1.title = 'Section 1.1' # sect1_1 << (Block.new sect1_1, :paragraph, :source => 'Paragraph 3') # sect1.blocks? # # => true # sect1.blocks.size # # => 3 # sect1.sections.size # # => 1 # # Returns an [Array] of Section objects def sections @blocks.select {|block| block.context == :section } end # Public: Check whether this block has any child Section objects. # # Only applies to Document and Section instances # # Returns A [Boolean] to indicate whether this block has child Section objects def sections? @next_section_index > 0 end # stage the Enumerable mixin until we're sure we've got it right =begin include ::Enumerable # Public: Yield the block on this block node and all its descendant # block node children to satisfy the Enumerable contract. # # Returns nothing def each &block # yucky, dlist is a special case if @context == :dlist @blocks.flatten.each &block else #yield self.header if @context == :document && header? @blocks.each &block end end #-- # TODO is there a way to make this lazy? def each_recursive &block block = lambda {|node| node } unless block_given? results = [] self.each do |node| results << block.call(node) results.concat(node.each_recursive(&block)) if ::Enumerable === node end block_given? ? results : results.to_enum end =end # Public: Query for all descendant block-level nodes in the document tree # that match the specified selector (context, style, id, and/or role). If a # Ruby block is given, it's used as an additional filter. If no selector or # Ruby block is supplied, all block-level nodes in the tree are returned. # # Examples # # doc.find_by context: :section # #=> Asciidoctor::Section@14459860 { level: 0, title: "Hello, AsciiDoc!", blocks: 0 } # #=> Asciidoctor::Section@14505460 { level: 1, title: "First Section", blocks: 1 } # # doc.find_by(context: :section) {|section| section.level == 1 } # #=> Asciidoctor::Section@14505460 { level: 1, title: "First Section", blocks: 1 } # # doc.find_by context: :listing, style: 'source' # #=> Asciidoctor::Block@13136720 { context: :listing, content_model: :verbatim, style: "source", lines: 1 } # # Returns An Array of block-level nodes that match the filter or an empty Array if no matches are found #-- # TODO support jQuery-style selector (e.g., image.thumb) def find_by selector = {}, &block result = [] if ((any_context = !(context_selector = selector[:context])) || context_selector == @context) && (!(style_selector = selector[:style]) || style_selector == @style) && (!(role_selector = selector[:role]) || (has_role? role_selector)) && (!(id_selector = selector[:id]) || id_selector == @id) if id_selector if block_given? return (yield self) ? [self] : result else return [self] end elsif block_given? result << self if (yield self) else result << self end end # process document header as a section if present if @context == :document && (any_context || context_selector == :section) && header? result.concat(@header.find_by selector, &block) end unless context_selector == :document # optimization # yuck, dlist is a special case if @context == :dlist if any_context || context_selector != :section # optimization @blocks.flatten.each do |li| # NOTE the list item of a dlist can be nil, so we have to check result.concat(li.find_by selector, &block) if li end end elsif @blocks.each do |b| next if (context_selector == :section && b.context != :section) # optimization result.concat(b.find_by selector, &block) end end end result end alias :query :find_by # Public: Remove a substitution from this block # # sub - The Symbol substitution name # # Returns nothing def remove_sub sub @subs.delete sub nil end # Public: Generate a caption and assign it to this block if one # is not already assigned. # # If the block has a title and a caption prefix is available # for this block, then build a caption from this information, # assign it a number and store it to the caption attribute on # the block. # # If an explicit caption has been specified on this block, then # do nothing. # # key - The prefix of the caption and counter attribute names. # If not provided, the name of the context for this block # is used. (default: nil). # # Returns nothing def assign_caption(caption = nil, key = nil) return unless title? || !@caption if caption @caption = caption else if (value = @document.attributes['caption']) @caption = value elsif title? key ||= @context.to_s caption_key = "#{key}-caption" if (caption_title = @document.attributes[caption_key]) caption_num = @document.counter_increment("#{key}-number", self) @caption = "#{caption_title} #{caption_num}. " end end end nil end # Public: Retrieve the list marker keyword for the specified list type. # # For use in the HTML type attribute. # # list_type - the type of list; default to the @style if not specified # # Returns the single-character [String] keyword that represents the marker for the specified list type def list_marker_keyword list_type = nil ORDERED_LIST_KEYWORDS[list_type || @style] end # Internal: Assign the next index (0-based) to this section # # Assign the next index of this section within the parent # Block (in document order) # # Returns nothing def assign_index(section) section.index = @next_section_index @next_section_index += 1 if section.sectname == 'appendix' appendix_number = @document.counter 'appendix-number', 'A' section.number = appendix_number if section.numbered if (caption = @document.attr 'appendix-caption', '').empty? section.caption = %(#{appendix_number}. ) else section.caption = %(#{caption} #{appendix_number}: ) end elsif section.numbered # chapters in a book doctype should be sequential even when divided into parts if (section.level == 1 || (section.level == 0 && section.special)) && @document.doctype == 'book' section.number = @document.counter('chapter-number', 1) else section.number = @next_section_number @next_section_number += 1 end end end # Internal: Reassign the section indexes # # Walk the descendents of the current Document or Section # and reassign the section 0-based index value to each Section # as it appears in document order. # # IMPORTANT You must invoke this method on a node after removing # child sections or else the internal counters will be off. # # Returns nothing def reindex_sections @next_section_index = 0 @next_section_number = 0 @blocks.each {|block| if block.context == :section assign_index(block) block.reindex_sections end } end end end asciidoctor-1.5.5/lib/asciidoctor/abstract_node.rb000066400000000000000000000531161277513741400222540ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # Public: An abstract base class that provides state and methods for managing a # node of AsciiDoc content. The state and methods on this class are comment to # all content segments in an AsciiDoc document. class AbstractNode include Substitutors # Public: Get the element which is the parent of this node attr_reader :parent # Public: Get the Asciidoctor::Document to which this node belongs attr_reader :document # Public: Get the Symbol context for this node attr_reader :context # Public: Get the String name of this node attr_reader :node_name # Public: Get/Set the id of this node attr_accessor :id # Public: Get the Hash of attributes for this node attr_reader :attributes def initialize parent, context, opts = {} # document is a special case, should refer to itself if context == :document @document = parent else if parent @parent = parent @document = parent.document else @parent = nil @document = nil end end @context = context @node_name = context.to_s # QUESTION are we correct in duplicating the attributes (seems to be just as fast) @attributes = (opts.key? :attributes) ? opts[:attributes].dup : {} @passthroughs = {} end # Public: Associate this Block with a new parent Block # # parent - The Block to set as the parent of this Block # # Returns nothing def parent=(parent) @parent = parent @document = parent.document nil end # Public: Returns whether this {AbstractNode} is an instance of {Inline} # # Returns [Boolean] def inline? # :nocov: raise ::NotImplementedError # :nocov: end # Public: Returns whether this {AbstractNode} is an instance of {Block} # # Returns [Boolean] def block? # :nocov: raise ::NotImplementedError # :nocov: end # Public: Get the value of the specified attribute # # Get the value for the specified attribute. First look in the attributes on # this node and return the value of the attribute if found. Otherwise, if # this node is a child of the Document node, look in the attributes of the # Document node and return the value of the attribute if found. Otherwise, # return the default value, which defaults to nil. # # name - the String or Symbol name of the attribute to lookup # default_value - the Object value to return if the attribute is not found (default: nil) # inherit - a Boolean indicating whether to check for the attribute on the # AsciiDoctor::Document if not found on this node (default: false) # # return the value of the attribute or the default value if the attribute # is not found in the attributes of this node or the document node def attr(name, default_value = nil, inherit = true) name = name.to_s if ::Symbol === name inherit = false if self == @document if inherit @attributes[name] || @document.attributes[name] || default_value else @attributes[name] || default_value end end # Public: Check if the attribute is defined, optionally performing a # comparison of its value if expected is not nil # # Check if the attribute is defined. First look in the attributes on this # node. If not found, and this node is a child of the Document node, look in # the attributes of the Document node. If the attribute is found and a # comparison value is specified (not nil), return whether the two values match. # Otherwise, return whether the attribute was found. # # name - the String or Symbol name of the attribute to lookup # expect - the expected Object value of the attribute (default: nil) # inherit - a Boolean indicating whether to check for the attribute on the # AsciiDoctor::Document if not found on this node (default: false) # # return a Boolean indicating whether the attribute exists and, if a # comparison value is specified, whether the value of the attribute matches # the comparison value def attr?(name, expect = nil, inherit = true) name = name.to_s if ::Symbol === name inherit = false if self == @document if expect.nil? @attributes.has_key?(name) || (inherit && @document.attributes.has_key?(name)) elsif inherit expect == (@attributes[name] || @document.attributes[name]) else expect == @attributes[name] end end # Public: Assign the value to the attribute name for the current node. # # name - The String attribute name to assign # value - The Object value to assign to the attribute # overwrite - A Boolean indicating whether to assign the attribute # if currently present in the attributes Hash (default: true) # # Returns a [Boolean] indicating whether the assignment was performed def set_attr name, value, overwrite = true if overwrite == false && (@attributes.key? name) false else @attributes[name] = value true end end # TODO document me def set_option(name) if @attributes.has_key? 'options' @attributes['options'] = "#{@attributes['options']},#{name}" else @attributes['options'] = name end @attributes["#{name}-option"] = '' end # Public: A convenience method to check if the specified option attribute is # enabled on the current node. # # Check if the option is enabled. This method simply checks to see if the # %name%-option attribute is defined on the current node. # # name - the String or Symbol name of the option # # return a Boolean indicating whether the option has been specified def option?(name) @attributes.has_key? %(#{name}-option) end # Public: Update the attributes of this node with the new values in # the attributes argument. # # If an attribute already exists with the same key, it's value will # be overridden. # # attributes - A Hash of attributes to assign to this node. # # Returns nothing def update_attributes(attributes) @attributes.update(attributes) nil end # Public: Get the Asciidoctor::Converter instance being used to convert the # current Asciidoctor::Document. def converter @document.converter end # Public: A convenience method that checks if the role attribute is specified def role?(expect = nil) if expect expect == (@attributes['role'] || @document.attributes['role']) else @attributes.has_key?('role') || @document.attributes.has_key?('role') end end # Public: A convenience method that returns the value of the role attribute def role @attributes['role'] || @document.attributes['role'] end # Public: A convenience method that checks if the specified role is present # in the list of roles on this node def has_role?(name) if (val = (@attributes['role'] || @document.attributes['role'])) val.split(' ').include?(name) else false end end # Public: A convenience method that returns the role names as an Array def roles if (val = (@attributes['role'] || @document.attributes['role'])) val.split(' ') else [] end end # Public: A convenience method that adds the given role directly to this node def add_role(name) unless (roles = (@attributes['role'] || '').split(' ')).include? name @attributes['role'] = roles.push(name) * ' ' end end # Public: A convenience method that removes the given role directly from this node def remove_role(name) if (roles = (@attributes['role'] || '').split(' ')).include? name roles.delete name @attributes['role'] = roles * ' ' end end # Public: A convenience method that checks if the reftext attribute is specified def reftext? @attributes.has_key?('reftext') || @document.attributes.has_key?('reftext') end # Public: A convenience method that returns the value of the reftext attribute def reftext @attributes['reftext'] || @document.attributes['reftext'] end # Public: Construct a reference or data URI to an icon image for the # specified icon name. # # If the 'icon' attribute is set on this block, the name is ignored and the # value of this attribute is used as the target image path. Otherwise, # construct a target image path by concatenating the value of the 'iconsdir' # attribute, the icon name and the value of the 'icontype' attribute # (defaulting to 'png'). # # The target image path is then passed through the #image_uri() method. If # the 'data-uri' attribute is set on the document, the image will be # safely converted to a data URI. # # The return value of this method can be safely used in an image tag. # # name - The String name of the icon # # Returns A String reference or data URI for an icon image def icon_uri name if attr? 'icon' # QUESTION should we add extension if resolved value is an absolute URI? if ::File.extname(uri = (image_uri attr('icon'), 'iconsdir')).empty? %(#{uri}.#{@document.attr 'icontype', 'png'}) else uri end else image_uri %(#{name}.#{@document.attr 'icontype', 'png'}), 'iconsdir' end end # Public: Construct a URI reference to the target media. # # If the target media is a URI reference, then leave it untouched. # # The target media is resolved relative to the directory retrieved from the # specified attribute key, if provided. # # The return value can be safely used in a media tag (img, audio, video). # # target - A String reference to the target media # asset_dir_key - The String attribute key used to lookup the directory where # the media is located (default: 'imagesdir') # # Returns A String reference for the target media def media_uri(target, asset_dir_key = 'imagesdir') normalize_web_path target, (asset_dir_key ? @document.attr(asset_dir_key) : nil) end # Public: Construct a URI reference or data URI to the target image. # # If the target image is a URI reference, then leave it untouched. # # The target image is resolved relative to the directory retrieved from the # specified attribute key, if provided. # # If the 'data-uri' attribute is set on the document, and the safe mode level # is less than SafeMode::SECURE, the image will be safely converted to a data URI # by reading it from the same directory. If neither of these conditions # are satisfied, a relative path (i.e., URL) will be returned. # # The return value of this method can be safely used in an image tag. # # target_image - A String path to the target image # asset_dir_key - The String attribute key used to lookup the directory where # the image is located (default: 'imagesdir') # # Returns A String reference or data URI for the target image def image_uri(target_image, asset_dir_key = 'imagesdir') if (doc = @document).safe < SafeMode::SECURE && doc.attr?('data-uri') if (Helpers.uriish? target_image) || (asset_dir_key && (images_base = doc.attr(asset_dir_key)) && (Helpers.uriish? images_base) && (target_image = normalize_web_path(target_image, images_base, false))) if doc.attr?('allow-uri-read') generate_data_uri_from_uri target_image, doc.attr?('cache-uri') else target_image end else generate_data_uri target_image, asset_dir_key end else normalize_web_path target_image, (asset_dir_key ? doc.attr(asset_dir_key) : nil) end end # Public: Generate a data URI that can be used to embed an image in the output document # # First, and foremost, the target image path is cleaned if the document safe mode level # is set to at least SafeMode::SAFE (a condition which is true by default) to prevent access # to ancestor paths in the filesystem. The image data is then read and converted to # Base64. Finally, a data URI is built which can be used in an image tag. # # target_image - A String path to the target image # asset_dir_key - The String attribute key used to lookup the directory where # the image is located (default: nil) # # Returns A String data URI containing the content of the target image def generate_data_uri(target_image, asset_dir_key = nil) ext = ::File.extname target_image # QUESTION what if ext is empty? mimetype = (ext == '.svg' ? 'image/svg+xml' : %(image/#{ext[1..-1]})) if asset_dir_key image_path = normalize_system_path(target_image, @document.attr(asset_dir_key), nil, :target_name => 'image') else image_path = normalize_system_path(target_image) end unless ::File.readable? image_path warn %(asciidoctor: WARNING: image to embed not found or not readable: #{image_path}) # must enclose string following return in " for Opal return "data:#{mimetype}:base64," # uncomment to return 1 pixel white dot instead #return 'data:image/gif;base64,R0lGODlhAQABAAAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw==' end bindata = nil if ::IO.respond_to? :binread bindata = ::IO.binread(image_path) else bindata = ::File.open(image_path, 'rb') {|file| file.read } end # NOTE base64 is autoloaded by reference to ::Base64 %(data:#{mimetype};base64,#{::Base64.encode64(bindata).delete EOL}) end # Public: Read the image data from the specified URI and generate a data URI # # The image data is read from the URI and converted to Base64. A data URI is # constructed from the content_type header and Base64 data and returned, # which can then be used in an image tag. # # image_uri - The URI from which to read the image data. Can be http://, https:// or ftp:// # cache_uri - A Boolean to control caching. When true, the open-uri-cached library # is used to cache the image for subsequent reads. (default: false) # # Returns A data URI string built from Base64 encoded data read from the URI # and the mime type specified in the Content Type header. def generate_data_uri_from_uri image_uri, cache_uri = false if cache_uri # caching requires the open-uri-cached gem to be installed # processing will be automatically aborted if these libraries can't be opened Helpers.require_library 'open-uri/cached', 'open-uri-cached' elsif !::RUBY_ENGINE_OPAL # autoload open-uri ::OpenURI end begin mimetype = nil bindata = open(image_uri, 'rb') {|file| mimetype = file.content_type file.read } # NOTE base64 is autoloaded by reference to ::Base64 %(data:#{mimetype};base64,#{::Base64.encode64(bindata).delete EOL}) rescue warn %(asciidoctor: WARNING: could not retrieve image data from URI: #{image_uri}) image_uri # uncomment to return empty data (however, mimetype needs to be resolved) #%(data:#{mimetype}:base64,) # uncomment to return 1 pixel white dot instead #'data:image/gif;base64,R0lGODlhAQABAAAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw==' end end # Public: Resolve the URI or system path to the specified target, then read and return its contents # # The URI or system path of the target is first resolved. If the resolved path is a URI, read the # contents from the URI if the allow-uri-read attribute is set, enabling caching if the cache-uri # attribute is also set. If the resolved path is not a URI, read the contents of the file from the # file system. If the normalize option is set, the data will be normalized. # # target - The URI or local path from which to read the data. # opts - a Hash of options to control processing (default: {}) # * :label the String label of the target to use in warning messages (default: 'asset') # * :normalize a Boolean that indicates whether the data should be normalized (default: false) # * :start the String relative base path to use when resolving the target (default: nil) # * :warn_on_failure a Boolean that indicates whether warnings are issued if the target cannot be read (default: true) # Returns the contents of the resolved target or nil if the resolved target cannot be read # -- # TODO refactor other methods in this class to use this method were possible (repurposing if necessary) def read_contents target, opts = {} doc = @document if (Helpers.uriish? target) || ((start = opts[:start]) && (Helpers.uriish? start) && (target = (@path_resolver ||= PathResolver.new).web_path target, start)) if doc.attr? 'allow-uri-read' Helpers.require_library 'open-uri/cached', 'open-uri-cached' if doc.attr? 'cache-uri' begin data = ::OpenURI.open_uri(target) {|fd| fd.read } data = (Helpers.normalize_lines_from_string data) * EOL if opts[:normalize] rescue warn %(asciidoctor: WARNING: could not retrieve contents of #{opts[:label] || 'asset'} at URI: #{target}) if opts.fetch :warn_on_failure, true data = nil end else warn %(asciidoctor: WARNING: cannot retrieve contents of #{opts[:label] || 'asset'} at URI: #{target} (allow-uri-read attribute not enabled)) if opts.fetch :warn_on_failure, true data = nil end else target = normalize_system_path target, opts[:start], nil, :target_name => (opts[:label] || 'asset') data = read_asset target, :normalize => opts[:normalize], :warn_on_failure => (opts.fetch :warn_on_failure, true) end data end # Public: Read the contents of the file at the specified path. # This method assumes that the path is safe to read. It checks # that the file is readable before attempting to read it. # # path - the String path from which to read the contents # opts - a Hash of options to control processing (default: {}) # * :warn_on_failure a Boolean that controls whether a warning # is issued if the file cannot be read (default: false) # * :normalize a Boolean that controls whether the lines # are normalized and coerced to UTF-8 (default: false) # # Returns the [String] content of the file at the specified path, or nil # if the file does not exist. def read_asset(path, opts = {}) # remap opts for backwards compatibility opts = { :warn_on_failure => (opts != false) } unless ::Hash === opts if ::File.readable? path if opts[:normalize] Helpers.normalize_lines_from_string(::IO.read(path)) * EOL else # QUESTION should we chomp or rstrip content? ::IO.read(path) end else warn %(asciidoctor: WARNING: file does not exist or cannot be read: #{path}) if opts[:warn_on_failure] nil end end # Public: Normalize the web page using the PathResolver. # # See {PathResolver#web_path} for details. # # target - the String target path # start - the String start (i.e, parent) path (optional, default: nil) # preserve_uri_target - a Boolean indicating whether target should be preserved if contains a URI (default: true) # # Returns the resolved [String] path def normalize_web_path(target, start = nil, preserve_uri_target = true) if preserve_uri_target && (Helpers.uriish? target) target else (@path_resolver ||= PathResolver.new).web_path target, start end end # Public: Resolve and normalize a secure path from the target and start paths # using the PathResolver. # # See {PathResolver#system_path} for details. # # The most important functionality in this method is to prevent resolving a # path outside of the jail (which defaults to the directory of the source # file, stored in the base_dir instance variable on Document) if the document # safe level is set to SafeMode::SAFE or greater (a condition which is true # by default). # # target - the String target path # start - the String start (i.e., parent) path # jail - the String jail path to confine the resolved path # opts - an optional Hash of options to control processing (default: {}): # * :recover is used to control whether the processor should auto-recover # when an illegal path is encountered # * :target_name is used in messages to refer to the path being resolved # # raises a SecurityError if a jail is specified and the resolved path is # outside the jail. # # Returns the [String] path resolved from the start and target paths, with any # parent references resolved and self references removed. If a jail is provided, # this path will be guaranteed to be contained within the jail. def normalize_system_path target, start = nil, jail = nil, opts = {} path_resolver = (@path_resolver ||= PathResolver.new) if (doc = @document).safe < SafeMode::SAFE if start start = ::File.join doc.base_dir, start unless path_resolver.is_root? start else start = doc.base_dir end else start = doc.base_dir unless start jail = doc.base_dir unless jail end path_resolver.system_path target, start, jail, opts end # Public: Normalize the asset file or directory to a concrete and rinsed path # # Delegates to normalize_system_path, with the start path set to the value of # the base_dir instance variable on the Document object. def normalize_asset_path(asset_ref, asset_name = 'path', autocorrect = true) normalize_system_path(asset_ref, @document.base_dir, nil, :target_name => asset_name, :recover => autocorrect) end # Public: Calculate the relative path to this absolute filename from the Document#base_dir #def relative_path(filename) # (@path_resolver ||= PathResolver.new).relative_path filename, @document.base_dir #end # Public: Check whether the specified String is a URI by # matching it against the Asciidoctor::UriSniffRx regex. # # @deprecated Use Helpers.uriish? instead def is_uri? str Helpers.uriish? str end end end asciidoctor-1.5.5/lib/asciidoctor/attribute_list.rb000066400000000000000000000134421277513741400225000ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # Public: Handles parsing AsciiDoc attribute lists into a Hash of key/value # pairs. By default, attributes must each be separated by a comma and quotes # may be used around the value. If a key is not detected, the value is assigned # to a 1-based positional key, The positional attributes can be "rekeyed" when # given a posattrs array either during parsing or after the fact. # # Examples # # attrlist = Asciidoctor::AttributeList.new('astyle') # # attrlist.parse # => {0 => 'astyle'} # # attrlist.rekey(['style']) # => {'style' => 'astyle'} # # attrlist = Asciidoctor::AttributeList.new('quote, Famous Person, Famous Book (2001)') # # attrlist.parse(['style', 'attribution', 'citetitle']) # => {'style' => 'quote', 'attribution' => 'Famous Person', 'citetitle' => 'Famous Book (2001)'} # class AttributeList # FIXME Opal not inheriting constants from parent scope # NOTE can't use ::RUBY_ENGINE_OPAL here either if ::RUBY_ENGINE == 'opal' CG_BLANK = '[ \\t]' CC_WORD = 'a-zA-Z0-9_' CG_WORD = '[a-zA-Z0-9_]' end # Public: Regular expressions for detecting the boundary of a value BoundaryRxs = { '"' => /.*?[^\\](?=")/, '\'' => /.*?[^\\](?=')/, ',' => /.*?(?=#{CG_BLANK}*(,|$))/ } # Public: Regular expressions for unescaping quoted characters EscapedQuoteRxs = { '"' => /\\"/, '\'' => /\\'/ } # Public: A regular expression for an attribute name (approx. name token from XML) # TODO named attributes cannot contain dash characters NameRx = /#{CG_WORD}[#{CC_WORD}\-.]*/ BlankRx = /#{CG_BLANK}+/ # Public: Regular expressions for skipping blanks and delimiters SkipRxs = { :blank => BlankRx, ',' => /#{CG_BLANK}*(,|$)/ } def initialize source, block = nil, delimiter = ',' @scanner = ::StringScanner.new source @block = block @delimiter = delimiter @delimiter_skip_pattern = SkipRxs[delimiter] @delimiter_boundary_pattern = BoundaryRxs[delimiter] @attributes = nil end def parse_into attributes, posattrs = [] attributes.update(parse posattrs) end def parse posattrs = [] # return if already parsed return @attributes if @attributes @attributes = {} # QUESTION do we want to store the attribute list as the zero-index attribute? #attributes[0] = @scanner.string index = 0 while parse_attribute index, posattrs break if @scanner.eos? skip_delimiter index += 1 end @attributes end def rekey posattrs AttributeList.rekey @attributes, posattrs end def self.rekey attributes, pos_attrs pos_attrs.each_with_index do |key, index| next unless key pos = index + 1 if (val = attributes[pos]) # QUESTION should we delete the positional key? attributes[key] = val end end attributes end def parse_attribute index = 0, pos_attrs = [] single_quoted_value = false skip_blank # example: "quote" if (first = @scanner.peek(1)) == '"' name = parse_attribute_value @scanner.get_byte value = nil # example: 'quote' elsif first == '\'' name = parse_attribute_value @scanner.get_byte value = nil single_quoted_value = true else name = scan_name skipped = 0 c = nil if @scanner.eos? return false unless name else skipped = skip_blank || 0 c = @scanner.get_byte end # example: quote if !c || c == @delimiter value = nil # example: Sherlock Holmes || =foo= elsif c != '=' || !name name = %(#{name}#{' ' * skipped}#{c}#{scan_to_delimiter}) value = nil else skip_blank if @scanner.peek(1) # example: foo="bar" || foo="ba\"zaar" if (c = @scanner.get_byte) == '"' value = parse_attribute_value c # example: foo='bar' || foo='ba\'zaar' || foo='ba"zaar' elsif c == '\'' value = parse_attribute_value c single_quoted_value = true # example: foo=, elsif c == @delimiter value = nil # example: foo=bar (all spaces ignored) else value = %(#{c}#{scan_to_delimiter}) return true if value == 'None' end end end end if value # example: options="opt1,opt2,opt3" # opts is an alias for options case name when 'options', 'opts' name = 'options' value.tr(' ', '').split(',').each {|opt| @attributes[%(#{opt}-option)] = '' } @attributes[name] = value when 'title' @attributes[name] = value else @attributes[name] = single_quoted_value && !value.empty? && @block ? (@block.apply_normal_subs value) : value end else resolved_name = single_quoted_value && !name.empty? && @block ? (@block.apply_normal_subs name) : name if (pos_name = pos_attrs[index]) @attributes[pos_name] = resolved_name end # QUESTION should we always assign the positional key? @attributes[index + 1] = resolved_name # QUESTION should we assign the resolved name as an attribute? #@attributes[resolved_name] = nil end true end def parse_attribute_value quote # empty quoted value if @scanner.peek(1) == quote @scanner.get_byte return '' end if (value = scan_to_quote quote) @scanner.get_byte value.gsub EscapedQuoteRxs[quote], quote else %(#{quote}#{scan_to_delimiter}) end end def skip_blank @scanner.skip BlankRx end def skip_delimiter @scanner.skip @delimiter_skip_pattern end def scan_name @scanner.scan NameRx end def scan_to_delimiter @scanner.scan @delimiter_boundary_pattern end def scan_to_quote quote @scanner.scan BoundaryRxs[quote] end end end asciidoctor-1.5.5/lib/asciidoctor/block.rb000066400000000000000000000127141277513741400205350ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # Public: Methods for managing blocks of Asciidoc content in a section. # # Examples # # block = Asciidoctor::Block.new(parent, :paragraph, :source => '_This_ is a ') # block.content # => "This is a <test>" class Block < AbstractBlock (DEFAULT_CONTENT_MODEL = { # TODO should probably fill in all known blocks :audio => :empty, :image => :empty, :listing => :verbatim, :literal => :verbatim, :stem => :raw, :open => :compound, :page_break => :empty, :pass => :raw, :thematic_break => :empty, :video => :empty }).default = :simple # Public: Create alias for context to be consistent w/ AsciiDoc alias :blockname :context # Public: Get/Set the original Array content for this block, if applicable attr_accessor :lines # Public: Initialize an Asciidoctor::Block object. # # parent - The parent AbstractBlock with a compound content model to which this Block will be appended. # context - The Symbol context name for the type of content (e.g., :paragraph). # opts - a Hash of options to customize block initialization: (default: {}) # * :content_model indicates whether blocks can be nested in this Block (:compound), otherwise # how the lines should be processed (:simple, :verbatim, :raw, :empty). (default: :simple) # * :attributes a Hash of attributes (key/value pairs) to assign to this Block. (default: {}) # * :source a String or Array of raw source for this Block. (default: nil) # # IMPORTANT: If you don't specify the `:subs` option, you must explicitly call # the `lock_in_subs` method to resolve and assign the substitutions to this # block (which are resolved from the `subs` attribute, if specified, or the # default substitutions based on this block's context). If you want to use the # default subs for a block, pass the option `:subs => :default`. You can # override the default subs using the `:default_subs` option. #-- # QUESTION should we store source_data as lines for blocks that have compound content models? def initialize parent, context, opts = {} super @content_model = opts[:content_model] || DEFAULT_CONTENT_MODEL[context] if opts.key? :subs # FIXME feels funky; we have to be defensive to get lock_in_subs to honor override # FIXME does not resolve substitution groups inside Array (e.g., [:normal]) if (subs = opts[:subs]) # e.g., :subs => :defult # subs attribute is honored; falls back to opts[:default_subs], then built-in defaults based on context if subs == :default @default_subs = opts[:default_subs] # e.g., :subs => [:quotes] # subs attribute is not honored elsif ::Array === subs @default_subs = subs.dup @attributes.delete 'subs' # e.g., :subs => :normal or :subs => 'normal' # subs attribute is not honored else @default_subs = nil # interpolation is the fastest way to dup subs as a string @attributes['subs'] = %(#{subs}) end # resolve the subs eagerly only if subs option is specified lock_in_subs # e.g., :subs => nil else @subs = [] # prevent subs from being resolved @default_subs = [] @attributes.delete 'subs' end # defer subs resolution; subs attribute is honored else @subs = [] # QUESTION should we honor :default_subs option (i.e., @default_subs = opts[:default_subs])? @default_subs = nil end if (raw_source = opts[:source]).nil_or_empty? @lines = [] elsif ::String === raw_source @lines = Helpers.normalize_lines_from_string raw_source else @lines = raw_source.dup end end # Public: Get the converted result of the child blocks by converting the # children appropriate to content model that this block supports. # # Examples # # doc = Asciidoctor::Document.new # block = Asciidoctor::Block.new(doc, :paragraph, # :source => '_This_ is what happens when you a stranger in the !') # block.content # => "This is what happens when you <meet> a stranger in the <alps>!" def content case @content_model when :compound super when :simple apply_subs(@lines * EOL, @subs) when :verbatim, :raw #((apply_subs @lines.join(EOL), @subs).sub StripLineWiseRx, '\1') # QUESTION could we use strip here instead of popping empty lines? # maybe apply_subs can know how to strip whitespace? result = apply_subs @lines, @subs if result.size < 2 result[0] else result.shift while (first = result[0]) && first.rstrip.empty? result.pop while (last = result[-1]) && last.rstrip.empty? result * EOL end else warn %(Unknown content model '#{@content_model}' for block: #{to_s}) unless @content_model == :empty nil end end # Public: Returns the preprocessed source of this block # # Returns the a String containing the lines joined together or nil if there # are no lines def source @lines * EOL end def to_s content_summary = @content_model == :compound ? %(blocks: #{@blocks.size}) : %(lines: #{@lines.size}) %(#<#{self.class}@#{object_id} {context: #{@context.inspect}, content_model: #{@content_model.inspect}, style: #{@style.inspect}, #{content_summary}}>) end end end asciidoctor-1.5.5/lib/asciidoctor/callouts.rb000066400000000000000000000061001277513741400212610ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # Public: Maintains a catalog of callouts and their associations. class Callouts def initialize @lists = [] @list_index = 0 next_list end # Public: Register a new callout for the given list item ordinal. # # Generates a unique id for this callout based on the index of the next callout # list in the document and the index of this callout since the end of the last # callout list. # # li_ordinal - the Integer ordinal (1-based) of the list item to which this # callout is to be associated # # Examples # # callouts = Asciidoctor::Callouts.new # callouts.register(1) # # => "CO1-1" # callouts.next_list # callouts.register(2) # # => "CO2-1" # # Returns The unique String id of this callout def register li_ordinal current_list << { :ordinal => li_ordinal.to_i, :id => (id = generate_next_callout_id) } @co_index += 1 id end # Public: Get the next callout index in the document # # Reads the next callout index in the document and advances the pointer. # This method is used during conversion to retrieve the unique id of the # callout that was generated during parsing. # # Returns The unique String id of the next callout in the document def read_next_id id = nil list = current_list if @co_index <= list.size id = list[@co_index - 1][:id] end @co_index += 1 id end # Public: Get a space-separated list of callout ids for the specified list item # # li_ordinal - the Integer ordinal (1-based) of the list item for which to # retrieve the callouts # # Returns A space-separated String of callout ids associated with the specified list item def callout_ids li_ordinal current_list.map {|element| element[:ordinal] == li_ordinal ? %(#{element[:id]} ) : nil }.join.chop end # Public: The current list for which callouts are being collected # # Returns The Array of callouts at the position of the list index pointer def current_list @lists[@list_index - 1] end # Public: Advance to the next callout list in the document # # Returns nothing def next_list @list_index += 1 if @lists.size < @list_index @lists << [] end @co_index = 1 nil end # Public: Rewind the list index pointer, intended to be used when switching # from the parsing to conversion phase. # # Returns nothing def rewind @list_index = 1 @co_index = 1 nil end # Internal: Generate a unique id for the callout based on the internal indexes # # Returns A unique String id for this callout def generate_next_callout_id generate_callout_id @list_index, @co_index end # Internal: Generate a unique id for the callout at the specified position # # list_index - The 1-based Integer index of the callout list within the document # co_index - The 1-based Integer index of the callout since the end of the last callout list # # Returns A unique String id for a callout def generate_callout_id list_index, co_index %(CO#{list_index}-#{co_index}) end end end asciidoctor-1.5.5/lib/asciidoctor/cli.rb000066400000000000000000000001271277513741400202050ustar00rootroot00000000000000require 'optparse' require 'asciidoctor/cli/options' require 'asciidoctor/cli/invoker' asciidoctor-1.5.5/lib/asciidoctor/cli/000077500000000000000000000000001277513741400176605ustar00rootroot00000000000000asciidoctor-1.5.5/lib/asciidoctor/cli/invoker.rb000066400000000000000000000074741277513741400216760ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor module Cli # Public Invocation class for starting Asciidoctor via CLI class Invoker attr_reader :options attr_reader :documents attr_reader :code def initialize(*options) @documents = [] @out = nil @err = nil @code = 0 options = options.flatten case (first_option = options[0]) when Options @options = first_option when ::Hash @options = Options.new options else if ::Integer === (result = Options.parse! options) @code = result @options = nil else @options = result end end end def invoke! old_verbose = -1 return unless @options old_verbose = $VERBOSE case @options[:verbose] when 0 $VERBOSE = nil when 1 $VERBOSE = false when 2 $VERBOSE = true end opts = {} infiles = [] outfile = nil tofile = nil @options.map do |key, val| case key when :input_files infiles = val when :output_file outfile = val when :destination_dir opts[:to_dir] = val if val when :attributes # NOTE processor will dup attributes internally opts[:attributes] = val when :trace # currently, nothing else opts[key] = val unless val.nil? end end if infiles.size == 1 && infiles[0] == '-' # allows use of block to supply stdin, particularly useful for tests inputs = [block_given? ? yield : STDIN] else inputs = infiles.map {|infile| ::File.new infile, 'r'} end # NOTE if infile is stdin, default to outfile as stout if outfile == '-' || (!outfile && infiles.size == 1 && infiles[0] == '-') tofile = (@out || $stdout) elsif outfile tofile = outfile opts[:mkdirs] = true else # automatically calculate outfile based on infile unless to_dir is set tofile = nil opts[:mkdirs] = true end show_timings = @options[:timings] inputs.each do |input| # NOTE processor will dup options and attributes internally input_opts = tofile.nil? ? opts : opts.merge(:to_file => tofile) if show_timings timings = Timings.new @documents << ::Asciidoctor.convert(input, input_opts.merge(:timings => timings)) timings.print_report((@err || $stderr), ((input.respond_to? :path) ? input.path : '-')) else @documents << ::Asciidoctor.convert(input, input_opts) end end rescue ::Exception => e if ::SignalException === e @code = e.signo # add extra endline if Ctrl+C is used (@err || $stderr).puts if ::Interrupt === e else @code = (e.respond_to? :status) ? e.status : 1 if @options[:trace] raise e else err = (@err || $stderr) if ::RuntimeError === e err.puts %(#{e.message} (#{e.class})) else err.puts e.message end err.puts ' Use --trace for backtrace' end end nil ensure $VERBOSE = old_verbose unless old_verbose == -1 end def document @documents[0] end def redirect_streams(out, err = nil) @out = out @err = err end def read_output @out ? @out.string : '' end def read_error @err ? @err.string : '' end def reset_streams @out = nil @err = nil end end end end asciidoctor-1.5.5/lib/asciidoctor/cli/options.rb000066400000000000000000000255321277513741400217070ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor module Cli # Public: List of options that can be specified on the command line class Options < ::Hash def initialize(options = {}) self[:attributes] = options[:attributes] || {} self[:input_files] = options[:input_files] || nil self[:output_file] = options[:output_file] || nil self[:safe] = options[:safe] || SafeMode::UNSAFE self[:header_footer] = options[:header_footer] || true self[:template_dirs] = options[:template_dirs] || nil self[:template_engine] = options[:template_engine] || nil if options[:doctype] self[:attributes]['doctype'] = options[:doctype] end if options[:backend] self[:attributes]['backend'] = options[:backend] end self[:eruby] = options[:eruby] || nil self[:verbose] = options[:verbose] || 1 self[:load_paths] = options[:load_paths] || nil self[:requires] = options[:requires] || nil self[:base_dir] = options[:base_dir] self[:destination_dir] = options[:destination_dir] || nil self[:trace] = false self[:timings] = false end def self.parse!(args) Options.new.parse! args end def parse!(args) opts_parser = ::OptionParser.new do |opts| opts.banner = <<-EOS Usage: asciidoctor [OPTION]... FILE... Translate the AsciiDoc source FILE or FILE(s) into the backend output format (e.g., HTML 5, DocBook 4.5, etc.) By default, the output is written to a file with the basename of the source file and the appropriate extension. Example: asciidoctor -b html5 source.asciidoc EOS opts.on('-b', '--backend BACKEND', 'set output format backend: [html5, xhtml5, docbook5, docbook45, manpage] (default: html5)', 'additional backends are supported via extensions (e.g., pdf, latex)') do |backend| self[:attributes]['backend'] = backend end opts.on('-d', '--doctype DOCTYPE', ['article', 'book', 'manpage', 'inline'], 'document type to use when converting document: [article, book, manpage, inline] (default: article)') do |doc_type| self[:attributes]['doctype'] = doc_type end opts.on('-o', '--out-file FILE', 'output file (default: based on path of input file); use - to output to STDOUT') do |output_file| self[:output_file] = output_file end opts.on('--safe', 'set safe mode level to safe (default: unsafe)', 'enables include macros, but restricts access to ancestor paths of source file', 'provided for compatibility with the asciidoc command') do self[:safe] = SafeMode::SAFE end opts.on('-S', '--safe-mode SAFE_MODE', (safe_mode_names = SafeMode.constants.map(&:to_s).map(&:downcase)), %(set safe mode level explicitly: [#{safe_mode_names * ', '}] (default: unsafe)), 'disables potentially dangerous macros in source files, such as include::[]') do |safe_mode| self[:safe] = SafeMode.const_get safe_mode.upcase end opts.on('-s', '--no-header-footer', 'suppress output of header and footer (default: false)') do self[:header_footer] = false end opts.on('-n', '--section-numbers', 'auto-number section titles in the HTML backend; disabled by default') do self[:attributes]['sectnums'] = '' end opts.on('-e', '--eruby ERUBY', ['erb', 'erubis'], 'specify eRuby implementation to use when rendering custom ERB templates: [erb, erubis] (default: erb)') do |eruby| self[:eruby] = eruby end opts.on('-C', '--compact', 'compact the output by removing blank lines. (No longer in use)') do end opts.on('-a', '--attribute key[=value]', 'a document attribute to set in the form of key, key! or key=value pair', 'unless @ is appended to the value, this attributes takes precedence over attributes', 'defined in the source document') do |attr| key, val = attr.split '=', 2 val = val ? (FORCE_ENCODING ? (val.force_encoding ::Encoding::UTF_8) : val) : '' # move leading ! to end for internal processing #if !val && key.start_with?('!') # key = "#{key[1..-1]}!" #end self[:attributes][key] = val end opts.on('-T', '--template-dir DIR', 'a directory containing custom converter templates that override the built-in converter (requires tilt gem)', 'may be specified multiple times') do |template_dir| if self[:template_dirs].nil? self[:template_dirs] = [template_dir] elsif ::Array === self[:template_dirs] self[:template_dirs].push template_dir else self[:template_dirs] = [self[:template_dirs], template_dir] end end opts.on('-E', '--template-engine NAME', 'template engine to use for the custom converter templates (loads gem on demand)') do |template_engine| self[:template_engine] = template_engine end opts.on('-B', '--base-dir DIR', 'base directory containing the document and resources (default: directory of source file)') do |base_dir| self[:base_dir] = base_dir end opts.on('-D', '--destination-dir DIR', 'destination output directory (default: directory of source file)') do |dest_dir| self[:destination_dir] = dest_dir end opts.on('-IDIRECTORY', '--load-path DIRECTORY', 'add a directory to the $LOAD_PATH', 'may be specified more than once') do |path| (self[:load_paths] ||= []).concat(path.split ::File::PATH_SEPARATOR) end opts.on('-rLIBRARY', '--require LIBRARY', 'require the specified library before executing the processor (using require)', 'may be specified more than once') do |path| (self[:requires] ||= []).concat(path.split ',') end opts.on('-q', '--quiet', 'suppress warnings (default: false)') do |verbose| self[:verbose] = 0 end opts.on('--trace', 'include backtrace information on errors (default: false)') do |trace| self[:trace] = true end opts.on('-v', '--verbose', 'enable verbose mode (default: false)') do |verbose| self[:verbose] = 2 end opts.on('-t', '--timings', 'enable timings mode (default: false)') do |timing| self[:timings] = true end opts.on_tail('-h', '--help', 'show this message') do $stdout.puts opts return 0 end opts.on_tail('-V', '--version', 'display the version and runtime environment (or -v if no other flags or arguments)') do return print_version $stdout end end infiles = [] opts_parser.parse! args if args.empty? if self[:verbose] == 2 return print_version $stdout else $stderr.puts opts_parser return 1 end end # shave off the file to process so that options errors appear correctly if args.size == 1 && args[0] == '-' infiles.push args.pop elsif args.each do |file| if file == '-' || (file.start_with? '-') # warn, but don't panic; we may have enough to proceed, so we won't force a failure $stderr.puts "asciidoctor: WARNING: extra arguments detected (unparsed arguments: #{args.map{|a| "'#{a}'"} * ', '}) or incorrect usage of stdin" else if ::File.readable? file matches = [file] else # Tilt backslashes in Windows paths the Ruby-friendly way if ::File::ALT_SEPARATOR == '\\' && (file.include? '\\') file = file.tr '\\', '/' end if (matches = ::Dir.glob file).empty? $stderr.puts %(asciidoctor: FAILED: input file #{file} missing or cannot be read) return 1 end end infiles.concat matches end end end infiles.each do |file| unless file == '-' || (::File.file? file) if ::File.readable? file $stderr.puts %(asciidoctor: FAILED: input path #{file} is a #{(::File.stat file).ftype}, not a file) else $stderr.puts %(asciidoctor: FAILED: input file #{file} missing or cannot be read) end return 1 end end self[:input_files] = infiles self.delete(:attributes) if self[:attributes].empty? if self[:template_dirs] begin require 'tilt' unless defined? ::Tilt rescue ::LoadError raise $! if self[:trace] $stderr.puts 'asciidoctor: FAILED: \'tilt\' could not be loaded' $stderr.puts ' You must have the tilt gem installed (gem install tilt) to use custom backend templates' $stderr.puts ' Use --trace for backtrace' return 1 rescue ::SystemExit # not permitted here end end if (load_paths = self[:load_paths]) (self[:load_paths] = load_paths.uniq).reverse_each do |path| $:.unshift File.expand_path(path) end end if (requires = self[:requires]) (self[:requires] = requires.uniq).each do |path| begin require path rescue ::LoadError raise $! if self[:trace] $stderr.puts %(asciidoctor: FAILED: '#{path}' could not be loaded) $stderr.puts ' Use --trace for backtrace' return 1 rescue ::SystemExit # not permitted here end end end self rescue ::OptionParser::MissingArgument $stderr.puts %(asciidoctor: option #{$!.message}) $stdout.puts opts_parser return 1 rescue ::OptionParser::InvalidOption, ::OptionParser::InvalidArgument $stderr.puts %(asciidoctor: #{$!.message}) $stdout.puts opts_parser return 1 end def print_version os = $stdout os.puts %(Asciidoctor #{::Asciidoctor::VERSION} [http://asciidoctor.org]) if RUBY_VERSION >= '1.9.3' encoding_info = {'lc' => 'locale', 'fs' => 'filesystem', 'in' => 'internal', 'ex' => 'external'}.map do |k,v| %(#{k}:#{::Encoding.find(v) || '-'}) end os.puts %(Runtime Environment (#{RUBY_DESCRIPTION}) (#{encoding_info * ' '})) else os.puts %(Runtime Environment (#{RUBY_DESCRIPTION})) end 0 end end end end asciidoctor-1.5.5/lib/asciidoctor/converter.rb000066400000000000000000000153401277513741400214500ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # A base module for defining converters that can be used to convert {AbstractNode} # objects in a parsed AsciiDoc document to a backend format such as HTML or # DocBook. # # Implementing a converter involves: # # * including this module in a {Converter} implementation class # * overriding the {Converter#convert} method # * optionally associating the converter with one or more backends using # the {#register_for} DSL method imported by the {Config Converter::Config} module # # Examples # # class TextConverter # include Asciidoctor::Converter # register_for 'text' # def initialize backend, opts # super # outfilesuffix '.txt' # end # def convert node, transform = nil # case (transform ||= node.node_name) # when 'document' # node.content # when 'section' # [node.title, node.content] * "\n\n" # when 'paragraph' # node.content.tr("\n", ' ') << "\n" # else # if transform.start_with? 'inline_' # node.text # else # %(<#{transform}>\n) # end # end # end # end # # puts Asciidoctor.convert_file 'sample.adoc', backend: :text module Converter # A module that provides the {#register_for} method for statically # registering a converter with the default {Factory Converter::Factory} instance. module Config # Public: Statically registers the current {Converter} class with the default # {Factory Converter::Factory} to handle conversion to the specified backends. # # This method also defines the converts? method on the class which returns whether # the class is registered to convert a specified backend. # # backends - A String Array of backends with which to associate this {Converter} class. # # Returns nothing def register_for *backends Factory.register self, backends metaclass = class << self; self; end if backends == ['*'] metaclass.send :define_method, :converts? do |name| true end else metaclass.send :define_method, :converts? do |name| backends.include? name end end nil end end module BackendInfo def backend_info @backend_info ||= setup_backend_info end def setup_backend_info raise ::ArgumentError, %(Cannot determine backend for converter: #{self.class}) unless @backend base = @backend.sub TrailingDigitsRx, '' if (ext = DEFAULT_EXTENSIONS[base]) type = ext[1..-1] else # QUESTION should we be forcing the basebackend to html if unknown? base = 'html' ext = '.html' type = 'html' syntax = 'html' end { 'basebackend' => base, 'outfilesuffix' => ext, 'filetype' => type, 'htmlsyntax' => syntax } end def filetype value = nil if value backend_info['filetype'] = value else backend_info['filetype'] end end def basebackend value = nil if value backend_info['basebackend'] = value else backend_info['basebackend'] end end def outfilesuffix value = nil if value backend_info['outfilesuffix'] = value else backend_info['outfilesuffix'] end end def htmlsyntax value = nil if value backend_info['htmlsyntax'] = value else backend_info['htmlsyntax'] end end end class << self # Mixes the {Config Converter::Config} module into any class that includes the {Converter} module. # # converter - The Class that includes the {Converter} module # # Returns nothing def included converter converter.extend Config end end include Config include BackendInfo # Public: Creates a new instance of Converter # # backend - The String backend format to which this converter converts. # opts - An options Hash (optional, default: {}) # # Returns a new instance of [Converter] def initialize backend, opts = {} @backend = backend setup_backend_info end =begin # Public: Invoked when this converter is added to the chain of converters in a {CompositeConverter}. # # owner - The CompositeConverter instance # # Returns nothing def composed owner end =end # Public: Converts an {AbstractNode} using the specified transform along # with additional options. If a transform is not specified, implementations # typically derive one from the {AbstractNode#node_name} property. # # Implementations are free to decide how to carry out the conversion. In # the case of the built-in converters, the tranform value is used to # dispatch to a handler method. The {TemplateConverter} uses the value of # the transform to select a template to render. # # node - The concrete instance of AbstractNode to convert # transform - An optional String transform that hints at which transformation # should be applied to this node. If a transform is not specified, # the transform is typically derived from the value of the # node's node_name property. (optional, default: nil) # opts - An optional Hash of options that provide additional hints about # how to convert the node. (optional, default: {}) # # Returns the [String] result def convert node, transform = nil, opts = {} raise ::NotImplementedError end # Alias for backward compatibility. alias :convert_with_options :convert end # A module that can be used to mix the {#write} method into a {Converter} # implementation to allow the converter to control how the output is written # to disk. module Writer # Public: Writes the output to the specified target file name or stream. # # output - The output String to write # target - The String file name or stream object to which the output should # be written. # # Returns nothing def write output, target if target.respond_to? :write target.write output.chomp # ensure there's a trailing endline to be nice to terminals target.write EOL else ::File.open(target, 'w') {|f| f.write output } end nil end end module VoidWriter include Writer # Public: Does not write output def write output, target end end end require 'asciidoctor/converter/base' require 'asciidoctor/converter/factory' asciidoctor-1.5.5/lib/asciidoctor/converter/000077500000000000000000000000001277513741400211205ustar00rootroot00000000000000asciidoctor-1.5.5/lib/asciidoctor/converter/base.rb000066400000000000000000000032611277513741400223610ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor module Converter; end # required for Opal # An abstract base class for defining converters that can be used to convert # {AbstractNode} objects in a parsed AsciiDoc document to a backend format # such as HTML or DocBook. # # Concrete subclasses must implement the {#convert} method and, optionally, # the {#convert_with_options} method. class Converter::Base include Converter end # An abstract base class for built-in {Converter} classes. class Converter::BuiltIn def initialize backend, opts = {} end # Public: Converts the specified {AbstractNode} using the specified # transform and optionally additional options (when not empty). # # CAUTION: Method that handles the specified transform *may not* accept the # second argument with additional options, in which case an {ArgumentError} # is raised if the given +opts+ Hash is not nil. The additional options are # used in template-based backends to access convert helper methods such as # outline. # # See {Converter#convert} for more details. # # Returns the [String] result of conversion def convert node, transform = nil, opts = {} transform ||= node.node_name opts.empty? ? (send transform, node) : (send transform, node, opts) end alias :handles? :respond_to? # Public: Returns the converted content of the {AbstractNode}. # # Returns the converted [String] content of the {AbstractNode}. def content node node.content end alias :pass :content # Public: Skips conversion of the {AbstractNode}. # # Returns [NilClass] def skip node nil end end end asciidoctor-1.5.5/lib/asciidoctor/converter/composite.rb000066400000000000000000000040161277513741400234500ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # A {Converter} implementation that delegates to the chain of {Converter} # objects passed to the constructor. Selects the first {Converter} that # identifies itself as the handler for a given transform. class Converter::CompositeConverter < Converter::Base # Get the Array of Converter objects in the chain attr_reader :converters def initialize backend, *converters @backend = backend (@converters = converters.flatten.compact).each do |converter| converter.composed self if converter.respond_to? :composed end @converter_map = {} end # Public: Delegates to the first converter that identifies itself as the # handler for the given transform. The optional Hash is passed as the last # option to the delegate's convert method. # # node - the AbstractNode to convert # transform - the optional String transform, or the name of the node if no # transform is specified. (default: nil) # opts - an optional Hash that is passed to the delegate's convert method. (default: {}) # # Returns the String result returned from the delegate's convert method def convert node, transform = nil, opts = {} transform ||= node.node_name (converter_for transform).convert node, transform, opts end # Alias for backward compatibility. alias :convert_with_options :convert # Public: Retrieve the converter for the specified transform. # # Returns the matching [Converter] object def converter_for transform @converter_map[transform] ||= (find_converter transform) end # Internal: Find the converter for the specified transform. # Raise an exception if no converter is found. # # Returns the matching [Converter] object def find_converter transform @converters.each do |candidate| return candidate if candidate.handles? transform end raise %(Could not find a converter to handle transform: #{transform}) end end end asciidoctor-1.5.5/lib/asciidoctor/converter/docbook45.rb000066400000000000000000000065761277513741400232540ustar00rootroot00000000000000# encoding: UTF-8 require 'asciidoctor/converter/docbook5' module Asciidoctor # A built-in {Converter} implementation that generates DocBook 4.5 output # consistent with the docbook45 backend from AsciiDoc Python. class Converter::DocBook45Converter < Converter::DocBook5Converter def admonition node # address a bug in the DocBook 4.5 DTD if node.parent.context == :example %( #{super} ) else super end end def olist node result = [] num_attribute = node.style ? %( numeration="#{node.style}") : nil start_attribute = (node.attr? 'start') ? %( override="#{node.attr 'start'}") : nil result << %() result << %(#{node.title}) if node.title? node.items.each_with_index do |item, idx| result << (idx == 0 ? %() : '') result << %(#{item.text}) result << item.content if item.blocks? result << '' end result << %() result * EOL end def inline_anchor node case node.type when :ref %() when :xref if (path = node.attributes['path']) # QUESTION should we use refid as fallback text instead? (like the html5 backend?) %(#{node.text || path}) else linkend = node.attributes['fragment'] || node.target (text = node.text) ? %(#{text}) : %() end when :link %(#{node.text}) when :bibref target = node.target %([#{target}]) end end def author_element doc, index = nil firstname_key = index ? %(firstname_#{index}) : 'firstname' middlename_key = index ? %(middlename_#{index}) : 'middlename' lastname_key = index ? %(lastname_#{index}) : 'lastname' email_key = index ? %(email_#{index}) : 'email' result = [] result << '' result << %(#{doc.attr firstname_key}) if doc.attr? firstname_key result << %(#{doc.attr middlename_key}) if doc.attr? middlename_key result << %(#{doc.attr lastname_key}) if doc.attr? lastname_key result << %(#{doc.attr email_key}) if doc.attr? email_key result << '' result * EOL end def common_attributes id, role = nil, reftext = nil res = id ? %( id="#{id}") : '' res = %(#{res} role="#{role}") if role res = %(#{res} xreflabel="#{reftext}") if reftext res end def doctype_declaration root_tag_name %() end def document_info_element doc, info_tag_prefix super doc, info_tag_prefix, true end def lang_attribute_name 'lang' end def document_ns_attributes doc if (ns = doc.attr 'xmlns') ns.empty? ? ' xmlns="http://docbook.org/ns/docbook"' : %( xmlns="#{ns}") else nil end end end end asciidoctor-1.5.5/lib/asciidoctor/converter/docbook5.rb000066400000000000000000000663541277513741400231700ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # A built-in {Converter} implementation that generates DocBook 5 output # similar to the docbook45 backend from AsciiDoc Python, but migrated to the # DocBook 5 specification. class Converter::DocBook5Converter < Converter::BuiltIn def document node result = [] if (root_tag_name = node.doctype) == 'manpage' root_tag_name = 'refentry' end result << '' if (doctype_line = doctype_declaration root_tag_name) result << doctype_line end if node.attr? 'toc' if node.attr? 'toclevels' result << %() else result << '' end end if node.attr? 'sectnums' if node.attr? 'sectnumlevels' result << %() else result << '' end end lang_attribute = (node.attr? 'nolang') ? nil : %( #{lang_attribute_name}="#{node.attr 'lang', 'en'}") result << %(<#{root_tag_name}#{document_ns_attributes node}#{lang_attribute}>) result << (document_info_element node, root_tag_name) result << node.content if node.blocks? unless (footer_docinfo = node.docinfo :footer).empty? result << footer_docinfo end result << %() result * EOL end alias :embedded :content def section node doctype = node.document.doctype if node.special if (tag_name = node.sectname).start_with? 'sect' # a normal child section of a special section tag_name = 'section' end else tag_name = doctype == 'book' && node.level <= 1 ? (node.level == 0 ? 'part' : 'chapter') : 'section' end if doctype == 'manpage' if tag_name == 'section' tag_name = 'refsection' elsif tag_name == 'synopsis' tag_name = 'refsynopsisdiv' end end %(<#{tag_name}#{common_attributes node.id, node.role, node.reftext}> #{node.title} #{node.content} ) end def admonition node %(<#{tag_name = node.attr 'name'}#{common_attributes node.id, node.role, node.reftext}> #{title_tag node}#{resolve_content node} ) end alias :audio :skip def colist node result = [] result << %() result << %(#{node.title}) if node.title? node.items.each do |item| result << %() result << %(#{item.text}) result << item.content if item.blocks? result << '' end result << %() result * EOL end (DLIST_TAGS = { 'labeled' => { :list => 'variablelist', :entry => 'varlistentry', :term => 'term', :item => 'listitem' }, 'qanda' => { :list => 'qandaset', :entry => 'qandaentry', :label => 'question', :term => 'simpara', :item => 'answer' }, 'glossary' => { :list => nil, :entry => 'glossentry', :term => 'glossterm', :item => 'glossdef' } }).default = { # default value == DLIST['labeled'], expanded for Opal :list => 'variablelist', :entry => 'varlistentry', :term => 'term', :item => 'listitem' } def dlist node result = [] if node.style == 'horizontal' result << %(<#{tag_name = node.title? ? 'table' : 'informaltable'}#{common_attributes node.id, node.role, node.reftext} tabstyle="horizontal" frame="none" colsep="0" rowsep="0"> #{title_tag node} ) node.items.each do |terms, dd| result << %( ) [*terms].each do |dt| result << %(#{dt.text}) end result << %( ) unless dd.nil? result << %(#{dd.text}) if dd.text? result << dd.content if dd.blocks? end result << %( ) end result << %( ) else tags = DLIST_TAGS[node.style] list_tag = tags[:list] entry_tag = tags[:entry] label_tag = tags[:label] term_tag = tags[:term] item_tag = tags[:item] if list_tag result << %(<#{list_tag}#{common_attributes node.id, node.role, node.reftext}>) result << %(#{node.title}) if node.title? end node.items.each do |terms, dd| result << %(<#{entry_tag}>) result << %(<#{label_tag}>) if label_tag [*terms].each do |dt| result << %(<#{term_tag}>#{dt.text}) end result << %() if label_tag result << %(<#{item_tag}>) unless dd.nil? result << %(#{dd.text}) if dd.text? result << dd.content if dd.blocks? end result << %() result << %() end result << %() if list_tag end result * EOL end def example node if node.title? %( #{node.title} #{resolve_content node} ) else %( #{resolve_content node} ) end end def floating_title node %(#{node.title}) end def image node width_attribute = (node.attr? 'width') ? %( contentwidth="#{node.attr 'width'}") : nil depth_attribute = (node.attr? 'height') ? %( contentdepth="#{node.attr 'height'}") : nil # FIXME if scaledwidth is set, we should remove width & depth # See http://www.docbook.org/tdg/en/html/imagedata.html#d0e92271 for details swidth_attribute = (node.attr? 'scaledwidth') ? %( width="#{node.attr 'scaledwidth'}" scalefit="1") : nil scale_attribute = (node.attr? 'scale') ? %( scale="#{node.attr 'scale'}") : nil align_attribute = (node.attr? 'align') ? %( align="#{node.attr 'align'}") : nil mediaobject = %( #{node.attr 'alt'} ) if node.title? %( #{node.title} #{mediaobject} ) else %( #{mediaobject} ) end end def listing node informal = !node.title? listing_attributes = (common_attributes node.id, node.role, node.reftext) if node.style == 'source' && (node.attr? 'language') numbering = (node.attr? 'linenums') ? 'numbered' : 'unnumbered' listing_content = %(#{node.content}) else listing_content = %(#{node.content}) end if informal listing_content else %( #{node.title} #{listing_content} ) end end def literal node if node.title? %( #{node.title} #{node.content} ) else %(#{node.content}) end end def stem node if (idx = node.subs.index :specialcharacters) node.subs.delete :specialcharacters end equation = node.content node.subs.insert idx, :specialcharacters if idx if node.style == 'asciimath' if ((defined? ::AsciiMath) || ((defined? @asciimath_available) ? @asciimath_available : (@asciimath_available = Helpers.require_library 'asciimath', true, :warn))) # NOTE fop requires jeuclid to process raw mathml equation_data = (::AsciiMath.parse equation).to_mathml 'mml:', 'xmlns:mml' => 'http://www.w3.org/1998/Math/MathML' else equation_data = %() end else # unhandled math; pass source to alt and required mathphrase element; dblatex will process alt as LaTeX math equation_data = %( ) end if node.title? %( #{node.title} #{equation_data} ) else # WARNING dblatex displays the element inline instead of block as documented (except w/ mathml) %( #{equation_data} ) end end def olist node result = [] num_attribute = node.style ? %( numeration="#{node.style}") : nil start_attribute = (node.attr? 'start') ? %( startingnumber="#{node.attr 'start'}") : nil result << %() result << %(#{node.title}) if node.title? node.items.each do |item| result << '' result << %(#{item.text}) result << item.content if item.blocks? result << '' end result << %() result * EOL end def open node case node.style when 'abstract' if node.parent == node.document && node.document.attr?('doctype', 'book') warn 'asciidoctor: WARNING: abstract block cannot be used in a document without a title when doctype is book. Excluding block content.' '' else %( #{title_tag node}#{resolve_content node} ) end when 'partintro' unless node.level == 0 && node.parent.context == :section && node.document.doctype == 'book' warn 'asciidoctor: ERROR: partintro block can only be used when doctype is book and it\'s a child of a part section. Excluding block content.' '' else %( #{title_tag node}#{resolve_content node} ) end else node.content end end def page_break node '' end def paragraph node if node.title? %( #{node.title} #{node.content} ) else %(#{node.content}) end end def preamble node if node.document.doctype == 'book' %( #{title_tag node, false}#{node.content} ) else node.content end end def quote node result = [] result << %() result << %(#{node.title}) if node.title? if (node.attr? 'attribution') || (node.attr? 'citetitle') result << '' if node.attr? 'attribution' result << (node.attr 'attribution') end if node.attr? 'citetitle' result << %(#{node.attr 'citetitle'}) end result << '' end result << (resolve_content node) result << '' result * EOL end def thematic_break node '' end def sidebar node %( #{title_tag node}#{resolve_content node} ) end TABLE_PI_NAMES = ['dbhtml', 'dbfo', 'dblatex'] TABLE_SECTIONS = [:head, :foot, :body] def table node has_body = false result = [] pgwide_attribute = (node.option? 'pgwide') ? ' pgwide="1"' : nil result << %(<#{tag_name = node.title? ? 'table' : 'informaltable'}#{common_attributes node.id, node.role, node.reftext}#{pgwide_attribute} frame="#{node.attr 'frame', 'all'}" rowsep="#{['none', 'cols'].include?(node.attr 'grid') ? 0 : 1}" colsep="#{['none', 'rows'].include?(node.attr 'grid') ? 0 : 1}"#{(node.attr? 'orientation', 'landscape', nil) ? ' orient="land"' : nil}>) if (node.option? 'unbreakable') result << '' elsif (node.option? 'breakable') result << '' end result << %(#{node.title}) if tag_name == 'table' col_width_key = if (width = (node.attr? 'width') ? (node.attr 'width') : nil) TABLE_PI_NAMES.each do |pi_name| result << %() end 'colabswidth' else 'colpcwidth' end result << %() node.columns.each do |col| result << %() end TABLE_SECTIONS.select {|tblsec| !node.rows[tblsec].empty? }.each do |tblsec| has_body = true if tblsec == :body result << %() node.rows[tblsec].each do |row| result << '' row.each do |cell| halign_attribute = (cell.attr? 'halign') ? %( align="#{cell.attr 'halign'}") : nil valign_attribute = (cell.attr? 'valign') ? %( valign="#{cell.attr 'valign'}") : nil colspan_attribute = cell.colspan ? %( namest="col_#{colnum = cell.column.attr 'colnumber'}" nameend="col_#{colnum + cell.colspan - 1}") : nil rowspan_attribute = cell.rowspan ? %( morerows="#{cell.rowspan - 1}") : nil # NOTE may not have whitespace (e.g., line breaks) as a direct descendant according to DocBook rules entry_start = %() cell_content = if tblsec == :head cell.text else case cell.style when :asciidoc cell.content when :verse %(#{cell.text}) when :literal %(#{cell.text}) when :header cell.content.map {|text| %(#{text}) }.join else cell.content.map {|text| %(#{text}) }.join end end entry_end = (node.document.attr? 'cellbgcolor') ? %() : '' result << %(#{entry_start}#{cell_content}#{entry_end}) end result << '' end result << %() end result << '' result << %() warn 'asciidoctor: WARNING: tables must have at least one body row' unless has_body result * EOL end alias :toc :skip def ulist node result = [] if node.style == 'bibliography' result << %() result << %(#{node.title}) if node.title? node.items.each do |item| result << '' result << %(#{item.text}) result << item.content if item.blocks? result << '' end result << '' else mark_type = (checklist = node.option? 'checklist') ? 'none' : node.style mark_attribute = mark_type ? %( mark="#{mark_type}") : nil result << %() result << %(#{node.title}) if node.title? node.items.each do |item| text_marker = if checklist && (item.attr? 'checkbox') (item.attr? 'checked') ? '✓ ' : '❏ ' else nil end result << '' result << %(#{text_marker}#{item.text}) result << item.content if item.blocks? result << '' end result << '' end result * EOL end def verse node result = [] result << %() result << %(#{node.title}) if node.title? if (node.attr? 'attribution') || (node.attr? 'citetitle') result << '' if node.attr? 'attribution' result << (node.attr 'attribution') end if node.attr? 'citetitle' result << %(#{node.attr 'citetitle'}) end result << '' end result << %(#{node.content}) result << '' result * EOL end alias :video :skip def inline_anchor node case node.type when :ref %() when :xref if (path = node.attributes['path']) # QUESTION should we use refid as fallback text instead? (like the html5 backend?) %(#{node.text || path}) else linkend = node.attributes['fragment'] || node.target (text = node.text) ? %(#{text}) : %() end when :link %(#{node.text}) when :bibref target = node.target %([#{target}]) else warn %(asciidoctor: WARNING: unknown anchor type: #{node.type.inspect}) end end def inline_break node %(#{node.text}) end def inline_button node %(#{node.text}) end def inline_callout node %() end def inline_footnote node if node.type == :xref %() else %(#{node.text}) end end def inline_image node width_attribute = (node.attr? 'width') ? %( contentwidth="#{node.attr 'width'}") : nil depth_attribute = (node.attr? 'height') ? %( contentdepth="#{node.attr 'height'}") : nil %( #{node.attr 'alt'} ) end def inline_indexterm node if node.type == :visible %(#{node.text}#{node.text}) else terms = node.attr 'terms' result = [] if (numterms = terms.size) > 2 result << %( #{terms[0]}#{terms[1]}#{terms[2]} ) end if numterms > 1 result << %( #{terms[-2]}#{terms[-1]} ) end result << %( #{terms[-1]} ) result * EOL end end def inline_kbd node if (keys = node.attr 'keys').size == 1 %(#{keys[0]}) else %(#{keys.map {|key| "#{key}" }.join}) end end def inline_menu node menu = node.attr 'menu' if !(submenus = node.attr 'submenus').empty? submenu_path = submenus.map {|submenu| %(#{submenu} ) }.join.chop %(#{menu} #{submenu_path} #{node.attr 'menuitem'}) elsif (menuitem = node.attr 'menuitem') %(#{menu} #{menuitem}) else %(#{menu}) end end (QUOTE_TAGS = { :emphasis => ['', '', true], :strong => ['', '', true], :monospaced => ['', '', false], :superscript => ['', '', false], :subscript => ['', '', false], :double => ['“', '”', true], :single => ['‘', '’', true], :mark => ['', '', false] }).default = [nil, nil, true] def inline_quoted node if (type = node.type) == :asciimath if ((defined? ::AsciiMath) || ((defined? @asciimath_available) ? @asciimath_available : (@asciimath_available = Helpers.require_library 'asciimath', true, :warn))) # NOTE fop requires jeuclid to process raw mathml %(#{(::AsciiMath.parse node.text).to_mathml 'mml:', 'xmlns:mml' => 'http://www.w3.org/1998/Math/MathML'}) else %() end elsif type == :latexmath # unhandled math; pass source to alt and required mathphrase element; dblatex will process alt as LaTeX math %() else open, close, supports_phrase = QUOTE_TAGS[type] text = node.text if (role = node.role) if supports_phrase quoted_text = %(#{open}#{text}#{close}) else quoted_text = %(#{open.chop} role="#{role}">#{text}#{close}) end else quoted_text = %(#{open}#{text}#{close}) end node.id ? %(#{quoted_text}) : quoted_text end end def author_element doc, index = nil firstname_key = index ? %(firstname_#{index}) : 'firstname' middlename_key = index ? %(middlename_#{index}) : 'middlename' lastname_key = index ? %(lastname_#{index}) : 'lastname' email_key = index ? %(email_#{index}) : 'email' result = [] result << '' result << '' result << %(#{doc.attr firstname_key}) if doc.attr? firstname_key result << %(#{doc.attr middlename_key}) if doc.attr? middlename_key result << %(#{doc.attr lastname_key}) if doc.attr? lastname_key result << '' result << %(#{doc.attr email_key}) if doc.attr? email_key result << '' result * EOL end def common_attributes id, role = nil, reftext = nil res = id ? %( xml:id="#{id}") : '' res = %(#{res} role="#{role}") if role res = %(#{res} xreflabel="#{reftext}") if reftext res end def doctype_declaration root_tag_name nil end def document_info_element doc, info_tag_prefix, use_info_tag_prefix = false info_tag_prefix = '' unless use_info_tag_prefix result = [] result << %(<#{info_tag_prefix}info>) result << document_title_tags(doc.doctitle :partition => true, :use_fallback => true) unless doc.notitle if (date = (doc.attr? 'revdate') ? (doc.attr 'revdate') : ((doc.attr? 'reproducible') ? nil : (doc.attr 'docdate'))) result << %(#{date}) end if doc.has_header? if doc.attr? 'author' if (authorcount = (doc.attr 'authorcount').to_i) < 2 result << (author_element doc) result << %(#{doc.attr 'authorinitials'}) if doc.attr? 'authorinitials' else result << '' authorcount.times do |index| result << (author_element doc, index + 1) end result << '' end end if (doc.attr? 'revdate') && ((doc.attr? 'revnumber') || (doc.attr? 'revremark')) result << %( ) result << %(#{doc.attr 'revnumber'}) if doc.attr? 'revnumber' result << %(#{doc.attr 'revdate'}) if doc.attr? 'revdate' result << %(#{doc.attr 'authorinitials'}) if doc.attr? 'authorinitials' result << %(#{doc.attr 'revremark'}) if doc.attr? 'revremark' result << %( ) end unless (head_docinfo = doc.docinfo).empty? result << head_docinfo end result << %(#{doc.attr 'orgname'}) if doc.attr? 'orgname' end result << %() if doc.doctype == 'manpage' result << '' result << %(#{doc.attr 'mantitle'}) if doc.attr? 'mantitle' result << %(#{doc.attr 'manvolnum'}) if doc.attr? 'manvolnum' result << '' result << '' result << %(#{doc.attr 'manname'}) if doc.attr? 'manname' result << %(#{doc.attr 'manpurpose'}) if doc.attr? 'manpurpose' result << '' end result * EOL end def document_ns_attributes doc ' xmlns="http://docbook.org/ns/docbook" xmlns:xl="http://www.w3.org/1999/xlink" version="5.0"' end def lang_attribute_name 'xml:lang' end def document_title_tags title if title.subtitle? %(#{title.main} #{title.subtitle}) else %(#{title}) end end # FIXME this should be handled through a template mechanism def resolve_content node node.content_model == :compound ? node.content : %(#{node.content}) end def title_tag node, optional = true !optional || node.title? ? %(#{node.title}\n) : nil end end end asciidoctor-1.5.5/lib/asciidoctor/converter/factory.rb000066400000000000000000000225421277513741400231210ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor module Converter # A factory for instantiating converters that are used to convert a # {Document} (i.e., a parsed AsciiDoc tree structure) or {AbstractNode} to # a backend format such as HTML or DocBook. {Factory Converter::Factory} is # the primary entry point for creating, registering and accessing # converters. # # {Converter} objects are instantiated by passing a String backend name # and, optionally, an options Hash to the {Factory#create} method. The # backend can be thought of as an intent to convert a document to a # specified format. For example: # # converter = Asciidoctor::Converter::Factory.create 'html5', :htmlsyntax => 'xml' # # Converter objects are thread safe. They only survive the lifetime of a single conversion. # # A singleton instance of {Factory Converter::Factory} can be accessed # using the {Factory.default} method. This instance maintains the global # registry of statically registered converters. The registery includes # built-in converters for {Html5Converter HTML 5}, {DocBook5Converter # DocBook 5} and {DocBook45Converter DocBook 4.5}, as well as any custom # converters that have been discovered or explicitly registered. # # If the {https://rubygems.org/gems/thread_safe thread_safe} gem is # installed, access to the default factory is guaranteed to be thread safe. # Otherwise, a warning is issued to the user. class Factory @__default__ = nil class << self # Public: Retrieves a singleton instance of {Factory Converter::Factory}. # # If the thread_safe gem is installed, the registry of converters is # initialized as a ThreadSafe::Cache. Otherwise, a warning is issued and # the registry of converters is initialized using a normal Hash. # # initialize_singleton - A Boolean to indicate whether the singleton should # be initialize if it has not already been created. # If false, and a singleton has not been previously # initialized, a fresh instance is returned. # # Returns the default [Factory] singleton instance def default initialize_singleton = true return @__default__ || new unless initialize_singleton # FIXME this assignment is not thread_safe, may need to use a ::Threadsafe helper here @__default__ ||= begin require 'thread_safe'.to_s unless defined? ::ThreadSafe new ::ThreadSafe::Cache.new rescue ::LoadError warn 'asciidoctor: WARNING: gem \'thread_safe\' is not installed. This gem is recommended when registering custom converters.' new end end # Public: Register a custom converter in the global converter factory to # handle conversion to the specified backends. If the backend value is an # asterisk, the converter is used to handle any backend that does not have # an explicit converter. # # converter - The Converter class to register # backends - A String Array of backend names that this converter should # be registered to handle (optional, default: ['*']) # # Returns nothing def register converter, backends = ['*'] default.register converter, backends end # Public: Lookup the custom converter for the specified backend in the # global factory. # # This method does not resolve the built-in converters. # # backend - The String backend name # # Returns the [Converter] class registered to convert the specified backend # or nil if no match is found def resolve backend default.resolve backend end # Public: Lookup the converter for the specified backend in the global # factory and instantiate it, forwarding the Hash of options to the # constructor of the converter class. # # If the custom converter is not found, an attempt will be made to find # and instantiate a built-in converter. # # # backend - The String backend name # opts - A Hash of options to pass to the converter # # Returns an instance of [Converter] for converting the specified backend or # nil if no match is found. def create backend, opts = {} default.create backend, opts end # Public: Retrieve the global Hash of custom Converter classes keyed by backend. # # Returns the the global [Hash] of custom Converter classes def converters default.converters end # Public: Unregister all Converter classes in the global factory. # # Returns nothing def unregister_all default.unregister_all end end # Public: Get the Hash of Converter classes keyed by backend name attr_reader :converters def initialize converters = nil @converters = converters || {} @star_converter = nil end # Public: Register a custom converter with this factory to handle conversion # to the specified backends. If the backend value is an asterisk, the # converter is used to handle any backend that does not have an explicit # converter. # # converter - The Converter class to register # backends - A String Array of backend names that this converter should # be registered to handle (optional, default: ['*']) # # Returns nothing def register converter, backends = ['*'] backends.each do |backend| @converters[backend] = converter if backend == '*' @star_converter = converter end end nil end # Public: Lookup the custom converter registered with this factory to handle # the specified backend. # # backend - The String backend name # # Returns the [Converter] class registered to convert the specified backend # or nil if no match is found def resolve backend @converters && (@converters[backend] || @star_converter) end # Public: Unregister all Converter classes that are registered with this # factory. # # Returns nothing def unregister_all @converters.clear @star_converter = nil end # Public: Create a new Converter object that can be used to convert the # {AbstractNode} (typically a {Document}) to the specified String backend. # This method accepts an optional Hash of options that are passed to the # converter's constructor. # # If a custom Converter is found to convert the specified backend, it is # instantiated (if necessary) and returned immediately. If a custom # Converter is not found, an attempt is made to resolve a built-in # converter. If the `:template_dirs` key is found in the Hash passed as the # second argument, a {CompositeConverter} is created that delegates to a # {TemplateConverter} and, if resolved, the built-in converter. If the # `:template_dirs` key is not found, the built-in converter is returned # or nil if no converter is resolved. # # backend - the String backend name # opts - an optional Hash of options that get passed on to the converter's # constructor. If the :template_dirs key is found in the options # Hash, this method returns a {CompositeConverter} that delegates # to a {TemplateConverter}. (optional, default: {}) # # Returns the [Converter] object def create backend, opts = {} if (converter = resolve backend) return ::Class === converter ? (converter.new backend, opts) : converter end base_converter = case backend when 'html5' unless defined? ::Asciidoctor::Converter::Html5Converter require 'asciidoctor/converter/html5'.to_s end Html5Converter.new backend, opts when 'docbook5' unless defined? ::Asciidoctor::Converter::DocBook5Converter require 'asciidoctor/converter/docbook5'.to_s end DocBook5Converter.new backend, opts when 'docbook45' unless defined? ::Asciidoctor::Converter::DocBook45Converter require 'asciidoctor/converter/docbook45'.to_s end DocBook45Converter.new backend, opts when 'manpage' unless defined? ::Asciidoctor::Converter::ManPageConverter require 'asciidoctor/converter/manpage'.to_s end ManPageConverter.new backend, opts end return base_converter unless opts.key? :template_dirs unless defined? ::Asciidoctor::Converter::TemplateConverter require 'asciidoctor/converter/template'.to_s end unless defined? ::Asciidoctor::Converter::CompositeConverter require 'asciidoctor/converter/composite'.to_s end template_converter = TemplateConverter.new backend, opts[:template_dirs], opts # QUESTION should we omit the composite converter if built_in_converter is nil? CompositeConverter.new backend, template_converter, base_converter end end end end asciidoctor-1.5.5/lib/asciidoctor/converter/html5.rb000066400000000000000000001370121277513741400225020ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # A built-in {Converter} implementation that generates HTML 5 output # consistent with the html5 backend from AsciiDoc Python. class Converter::Html5Converter < Converter::BuiltIn (QUOTE_TAGS = { :emphasis => ['', '', true], :strong => ['', '', true], :monospaced => ['', '', true], :superscript => ['', '', true], :subscript => ['', '', true], :double => ['“', '”', false], :single => ['‘', '’', false], :mark => ['', '', true], :asciimath => ['\\$', '\\$', false], :latexmath => ['\\(', '\\)', false] # Opal can't resolve these constants when referenced here #:asciimath => INLINE_MATH_DELIMITERS[:asciimath] + [false], #:latexmath => INLINE_MATH_DELIMITERS[:latexmath] + [false] }).default = [nil, nil, nil] SvgPreambleRx = /\A.*?(?=]*>/ DimensionAttributeRx = /\s(?:width|height|style)=(["']).*?\1/ def initialize backend, opts = {} @xml_mode = opts[:htmlsyntax] == 'xml' @void_element_slash = @xml_mode ? '/' : nil @stylesheets = Stylesheets.instance end def document node result = [] slash = @void_element_slash br = %() unless (asset_uri_scheme = (node.attr 'asset-uri-scheme', 'https')).empty? asset_uri_scheme = %(#{asset_uri_scheme}:) end cdn_base = %(#{asset_uri_scheme}//cdnjs.cloudflare.com/ajax/libs) linkcss = node.safe >= SafeMode::SECURE || (node.attr? 'linkcss') result << '' lang_attribute = (node.attr? 'nolang') ? nil : %( lang="#{node.attr 'lang', 'en'}") result << %() result << %( ) result << %() if node.attr? 'app-name' result << %() if node.attr? 'description' result << %() if node.attr? 'keywords' result << %() if node.attr? 'authors' result << %() if node.attr? 'copyright' result << %(#{node.doctitle :sanitize => true, :use_fallback => true}) if DEFAULT_STYLESHEET_KEYS.include?(node.attr 'stylesheet') if (webfonts = node.attr 'webfonts') result << %() end if linkcss result << %() else result << @stylesheets.embed_primary_stylesheet end elsif node.attr? 'stylesheet' if linkcss result << %() else result << %() end end if node.attr? 'icons', 'font' if node.attr? 'iconfont-remote' result << %() else iconfont_stylesheet = %(#{node.attr 'iconfont-name', 'font-awesome'}.css) result << %() end end case (highlighter = node.attr 'source-highlighter') when 'coderay' if (node.attr 'coderay-css', 'class') == 'class' if linkcss result << %() else result << @stylesheets.embed_coderay_stylesheet end end when 'pygments' if (node.attr 'pygments-css', 'class') == 'class' pygments_style = node.attr 'pygments-style' if linkcss result << %() else result << (@stylesheets.embed_pygments_stylesheet pygments_style) end end end unless (docinfo_content = node.docinfo).empty? result << docinfo_content end result << '' body_attrs = [] body_attrs << %(id="#{node.id}") if node.id if (sectioned = node.sections?) && (node.attr? 'toc-class') && (node.attr? 'toc') && (node.attr? 'toc-placement', 'auto') body_attrs << %(class="#{node.doctype} #{node.attr 'toc-class'} toc-#{node.attr 'toc-position', 'header'}") else body_attrs << %(class="#{node.doctype}") end body_attrs << %(style="max-width: #{node.attr 'max-width'};") if node.attr? 'max-width' result << %() unless node.noheader result << '' end result << %(
#{node.content}
) if node.footnotes? && !(node.attr? 'nofootnotes') result << %(
) node.footnotes.each do |footnote| result << %(
#{footnote.index}. #{footnote.text}
) end result << '
' end unless node.nofooter result << '' end unless (docinfo_content = node.docinfo :footer).empty? result << docinfo_content end # Load Javascript at the end of body for performance # See http://www.html5rocks.com/en/tutorials/speed/script-loading/ case highlighter when 'highlightjs', 'highlight.js' highlightjs_path = node.attr 'highlightjsdir', %(#{cdn_base}/highlight.js/8.9.1) result << %() result << %( ) when 'prettify' prettify_path = node.attr 'prettifydir', %(#{cdn_base}/prettify/r298) result << %() result << %( ) end if node.attr? 'stem' eqnums_val = node.attr 'eqnums', 'none' eqnums_val = 'AMS' if eqnums_val.empty? eqnums_opt = %( equationNumbers: { autoNumber: "#{eqnums_val}" } ) # IMPORTANT inspect calls on delimiter arrays are intentional for JavaScript compat (emulates JSON.stringify) result << %( ) end result << '' result << '' result * EOL end def embedded node result = [] if node.doctype == 'manpage' # QUESTION should notitle control the manual page title? unless node.notitle id_attr = node.id ? %( id="#{node.id}") : nil result << %(#{node.doctitle} Manual Page) end # QUESTION should this h2 have an auto-generated id? result << %(

#{node.attr 'manname-title'}

#{node.attr 'manname'} - #{node.attr 'manpurpose'}

) else if node.has_header? && !node.notitle id_attr = node.id ? %( id="#{node.id}") : nil result << %(#{node.header.title}) end end if node.sections? && (node.attr? 'toc') && (toc_p = node.attr 'toc-placement') != 'macro' && toc_p != 'preamble' result << %(
#{node.attr 'toc-title'}
#{outline node}
) end result << node.content if node.footnotes? && !(node.attr? 'nofootnotes') result << %(
) node.footnotes.each do |footnote| result << %(
#{footnote.index}. #{footnote.text}
) end result << '
' end result * EOL end def outline node, opts = {} return unless node.sections? sectnumlevels = opts[:sectnumlevels] || (node.document.attr 'sectnumlevels', 3).to_i toclevels = opts[:toclevels] || (node.document.attr 'toclevels', 2).to_i result = [] sections = node.sections # FIXME the level for special sections should be set correctly in the model # slevel will only be 0 if we have a book doctype with parts slevel = (first_section = sections[0]).level slevel = 1 if slevel == 0 && first_section.special result << %(
    ) sections.each do |section| section_num = (section.numbered && !section.caption && section.level <= sectnumlevels) ? %(#{section.sectnum} ) : nil if section.level < toclevels && (child_toc_level = outline section, :toclevels => toclevels, :secnumlevels => sectnumlevels) result << %(
  • #{section_num}#{section.captioned_title}) result << child_toc_level result << '
  • ' else result << %(
  • #{section_num}#{section.captioned_title}
  • ) end end result << '
' result * EOL end def section node slevel = node.level # QUESTION should the check for slevel be done in section? slevel = 1 if slevel == 0 && node.special htag = %(h#{slevel + 1}) id_attr = anchor = link_start = link_end = nil if node.id id_attr = %( id="#{id = node.id}") if (doc = node.document).attr? 'sectanchors' anchor = %() # possible idea - anchor icons GitHub-style #if doc.attr? 'icons', 'font' # anchor = %() #else # anchor = %() #end end if doc.attr? 'sectlinks' link_start = %() link_end = '' end end if slevel == 0 %(#{anchor}#{link_start}#{node.title}#{link_end} #{node.content}) else class_attr = (role = node.role) ? %( class="sect#{slevel} #{role}") : %( class="sect#{slevel}") sectnum = if node.numbered && !node.caption && slevel <= (node.document.attr 'sectnumlevels', 3).to_i %(#{node.sectnum} ) end %( <#{htag}#{id_attr}>#{anchor}#{link_start}#{sectnum}#{node.captioned_title}#{link_end} #{slevel == 1 ? %[
\n#{node.content}\n
] : node.content} ) end end def admonition node id_attr = node.id ? %( id="#{node.id}") : nil name = node.attr 'name' title_element = node.title? ? %(
#{node.title}
\n) : nil caption = if node.document.attr? 'icons' if (node.document.attr? 'icons', 'font') && !(node.attr? 'icon') %() else %(#{node.caption}) end else %(
#{node.caption}
) end %(
#{caption} #{title_element}#{node.content}
) end def audio node xml = @xml_mode id_attribute = node.id ? %( id="#{node.id}") : nil classes = ['audioblock', node.role].compact class_attribute = %( class="#{classes * ' '}") title_element = node.title? ? %(
#{node.captioned_title}
\n) : nil %( #{title_element}
) end def colist node result = [] id_attribute = node.id ? %( id="#{node.id}") : nil classes = ['colist', node.style, node.role].compact class_attribute = %( class="#{classes * ' '}") result << %() result << %(
#{node.title}
) if node.title? if node.document.attr? 'icons' result << '' font_icons = node.document.attr? 'icons', 'font' node.items.each_with_index do |item, i| num = i + 1 num_element = if font_icons %(#{num}) else %(#{num}) end result << %() end result << '
#{num_element} #{item.text}
' else result << '
    ' node.items.each do |item| result << %(
  1. #{item.text}

  2. ) end result << '
' end result << '' result * EOL end def dlist node result = [] id_attribute = node.id ? %( id="#{node.id}") : nil classes = case node.style when 'qanda' ['qlist', 'qanda', node.role] when 'horizontal' ['hdlist', node.role] else ['dlist', node.style, node.role] end.compact class_attribute = %( class="#{classes * ' '}") result << %() result << %(
#{node.title}
) if node.title? case node.style when 'qanda' result << '
    ' node.items.each do |terms, dd| result << '
  1. ' [*terms].each do |dt| result << %(

    #{dt.text}

    ) end if dd result << %(

    #{dd.text}

    ) if dd.text? result << dd.content if dd.blocks? end result << '
  2. ' end result << '
' when 'horizontal' slash = @void_element_slash result << '' if (node.attr? 'labelwidth') || (node.attr? 'itemwidth') result << '' col_style_attribute = (node.attr? 'labelwidth') ? %( style="width: #{(node.attr 'labelwidth').chomp '%'}%;") : nil result << %() col_style_attribute = (node.attr? 'itemwidth') ? %( style="width: #{(node.attr 'itemwidth').chomp '%'}%;") : nil result << %() result << '' end node.items.each do |terms, dd| result << '' result << %(' result << '' result << '' end result << '
) terms_array = [*terms] last_term = terms_array[-1] terms_array.each do |dt| result << dt.text result << %() if dt != last_term end result << '' if dd result << %(

#{dd.text}

) if dd.text? result << dd.content if dd.blocks? end result << '
' else result << '
' dt_style_attribute = node.style ? nil : ' class="hdlist1"' node.items.each do |terms, dd| [*terms].each do |dt| result << %(#{dt.text}) end if dd result << '
' result << %(

#{dd.text}

) if dd.text? result << dd.content if dd.blocks? result << '
' end end result << '
' end result << '' result * EOL end def example node id_attribute = node.id ? %( id="#{node.id}") : nil title_element = node.title? ? %(
#{node.captioned_title}
\n) : nil %( #{title_element}
#{node.content}
) end def floating_title node tag_name = %(h#{node.level + 1}) id_attribute = node.id ? %( id="#{node.id}") : nil classes = [node.style, node.role].compact %(<#{tag_name}#{id_attribute} class="#{classes * ' '}">#{node.title}) end def image node target = node.attr 'target' width_attr = (node.attr? 'width') ? %( width="#{node.attr 'width'}") : nil height_attr = (node.attr? 'height') ? %( height="#{node.attr 'height'}") : nil if ((node.attr? 'format', 'svg', false) || (target.include? '.svg')) && node.document.safe < SafeMode::SECURE && ((svg = (node.option? 'inline')) || (obj = (node.option? 'interactive'))) if svg img = (read_svg_contents node, target) || %(#{node.attr 'alt'}) elsif obj fallback = (node.attr? 'fallback') ? %(#{node.attr 'alt'}) : %(#{node.attr 'alt'}) img = %(#{fallback}) end end img ||= %(#{node.attr 'alt'}) if (link = node.attr 'link') img = %(#{img}) end id_attr = node.id ? %( id="#{node.id}") : nil classes = ['imageblock', node.role].compact class_attr = %( class="#{classes * ' '}") styles = [] styles << %(text-align: #{node.attr 'align'}) if node.attr? 'align' styles << %(float: #{node.attr 'float'}) if node.attr? 'float' style_attr = styles.empty? ? nil : %( style="#{styles * ';'}") title_el = node.title? ? %(\n
#{node.captioned_title}
) : nil %(
#{img}
#{title_el} ) end def listing node nowrap = !(node.document.attr? 'prewrap') || (node.option? 'nowrap') if node.style == 'source' if (language = node.attr 'language', nil, false) code_attrs = %( data-lang="#{language}") else code_attrs = nil end case node.document.attr 'source-highlighter' when 'coderay' pre_class = %( class="CodeRay highlight#{nowrap ? ' nowrap' : nil}") when 'pygments' pre_class = %( class="pygments highlight#{nowrap ? ' nowrap' : nil}") when 'highlightjs', 'highlight.js' pre_class = %( class="highlightjs highlight#{nowrap ? ' nowrap' : nil}") code_attrs = %( class="language-#{language}"#{code_attrs}) if language when 'prettify' pre_class = %( class="prettyprint highlight#{nowrap ? ' nowrap' : nil}#{(node.attr? 'linenums') ? ' linenums' : nil}") code_attrs = %( class="language-#{language}"#{code_attrs}) if language when 'html-pipeline' pre_class = language ? %( lang="#{language}") : nil code_attrs = nil else pre_class = %( class="highlight#{nowrap ? ' nowrap' : nil}") code_attrs = %( class="language-#{language}"#{code_attrs}) if language end pre_start = %() pre_end = '
' else pre_start = %() pre_end = '' end id_attribute = node.id ? %( id="#{node.id}") : nil title_element = node.title? ? %(
#{node.captioned_title}
\n) : nil %( #{title_element}
#{pre_start}#{node.content}#{pre_end}
) end def literal node id_attribute = node.id ? %( id="#{node.id}") : nil title_element = node.title? ? %(
#{node.title}
\n) : nil nowrap = !(node.document.attr? 'prewrap') || (node.option? 'nowrap') %( #{title_element}
#{node.content}
) end def stem node id_attribute = node.id ? %( id="#{node.id}") : nil title_element = node.title? ? %(
#{node.title}
\n) : nil open, close = BLOCK_MATH_DELIMITERS[node.style.to_sym] unless ((equation = node.content).start_with? open) && (equation.end_with? close) equation = %(#{open}#{equation}#{close}) end %( #{title_element}
#{equation}
) end def olist node result = [] id_attribute = node.id ? %( id="#{node.id}") : nil classes = ['olist', node.style, node.role].compact class_attribute = %( class="#{classes * ' '}") result << %() result << %(
#{node.title}
) if node.title? type_attribute = (keyword = node.list_marker_keyword) ? %( type="#{keyword}") : nil start_attribute = (node.attr? 'start') ? %( start="#{node.attr 'start'}") : nil reversed_attribute = (node.option? 'reversed') ? (append_boolean_attribute 'reversed', @xml_mode) : nil result << %(
    ) node.items.each do |item| result << '
  1. ' result << %(

    #{item.text}

    ) result << item.content if item.blocks? result << '
  2. ' end result << '
' result << '' result * EOL end def open node if (style = node.style) == 'abstract' if node.parent == node.document && node.document.doctype == 'book' warn 'asciidoctor: WARNING: abstract block cannot be used in a document without a title when doctype is book. Excluding block content.' '' else id_attr = node.id ? %( id="#{node.id}") : nil title_el = node.title? ? %(
#{node.title}
\n) : nil %( #{title_el}
#{node.content}
) end elsif style == 'partintro' && (node.level > 0 || node.parent.context != :section || node.document.doctype != 'book') warn 'asciidoctor: ERROR: partintro block can only be used when doctype is book and it\'s a child of a book part. Excluding block content.' '' else id_attr = node.id ? %( id="#{node.id}") : nil title_el = node.title? ? %(
#{node.title}
\n) : nil %( #{title_el}
#{node.content}
) end end def page_break node '
' end def paragraph node class_attribute = node.role ? %(class="paragraph #{node.role}") : 'class="paragraph"' attributes = node.id ? %(id="#{node.id}" #{class_attribute}) : class_attribute if node.title? %(
#{node.title}

#{node.content}

) else %(

#{node.content}

) end end def preamble node if (doc = node.document).attr?('toc-placement', 'preamble') && doc.sections? && (doc.attr? 'toc') toc = %(
#{doc.attr 'toc-title'}
#{outline doc}
) else toc = nil end %(
#{node.content}
#{toc}
) end def quote node id_attribute = node.id ? %( id="#{node.id}") : nil classes = ['quoteblock', node.role].compact class_attribute = %( class="#{classes * ' '}") title_element = node.title? ? %(\n
#{node.title}
) : nil attribution = (node.attr? 'attribution') ? (node.attr 'attribution') : nil citetitle = (node.attr? 'citetitle') ? (node.attr 'citetitle') : nil if attribution || citetitle cite_element = citetitle ? %(#{citetitle}) : nil attribution_text = attribution ? %(— #{attribution}#{citetitle ? "\n" : nil}) : nil attribution_element = %(\n
\n#{attribution_text}#{cite_element}\n
) else attribution_element = nil end %(#{title_element}
#{node.content}
#{attribution_element} ) end def thematic_break node %() end def sidebar node id_attribute = node.id ? %( id="#{node.id}") : nil title_element = node.title? ? %(
#{node.title}
\n) : nil %(
#{title_element}#{node.content}
) end def table node result = [] id_attribute = node.id ? %( id="#{node.id}") : nil classes = ['tableblock', %(frame-#{node.attr 'frame', 'all'}), %(grid-#{node.attr 'grid', 'all'})] styles = [] unless (node.option? 'autowidth') && !(node.attr? 'width', nil, false) if node.attr? 'tablepcwidth', 100 classes << 'spread' else styles << %(width: #{node.attr 'tablepcwidth'}%;) end end if (role = node.role) classes << role end class_attribute = %( class="#{classes * ' '}") styles << %(float: #{node.attr 'float'};) if node.attr? 'float' style_attribute = styles.empty? ? nil : %( style="#{styles * ' '}") result << %() result << %(#{node.captioned_title}) if node.title? if (node.attr 'rowcount') > 0 slash = @void_element_slash result << '' if node.option? 'autowidth' tag = %() node.columns.size.times do result << tag end else node.columns.each do |col| result << %() end end result << '' [:head, :foot, :body].select {|tsec| !node.rows[tsec].empty? }.each do |tsec| result << %() node.rows[tsec].each do |row| result << '' row.each do |cell| if tsec == :head cell_content = cell.text else case cell.style when :asciidoc cell_content = %(
#{cell.content}
) when :verse cell_content = %(
#{cell.text}
) when :literal cell_content = %(
#{cell.text}
) else cell_content = '' cell.content.each do |text| cell_content = %(#{cell_content}

#{text}

) end end end cell_tag_name = (tsec == :head || cell.style == :header ? 'th' : 'td') cell_class_attribute = %( class="tableblock halign-#{cell.attr 'halign'} valign-#{cell.attr 'valign'}") cell_colspan_attribute = cell.colspan ? %( colspan="#{cell.colspan}") : nil cell_rowspan_attribute = cell.rowspan ? %( rowspan="#{cell.rowspan}") : nil cell_style_attribute = (node.document.attr? 'cellbgcolor') ? %( style="background-color: #{node.document.attr 'cellbgcolor'};") : nil result << %(<#{cell_tag_name}#{cell_class_attribute}#{cell_colspan_attribute}#{cell_rowspan_attribute}#{cell_style_attribute}>#{cell_content}) end result << '' end result << %(
) end end result << '' result * EOL end def toc node unless (doc = node.document).attr?('toc-placement', 'macro') && doc.sections? && (doc.attr? 'toc') return '' end if node.id id_attr = %( id="#{node.id}") title_id_attr = %( id="#{node.id}title") else id_attr = ' id="toc"' title_id_attr = ' id="toctitle"' end title = node.title? ? node.title : (doc.attr 'toc-title') levels = (node.attr? 'levels') ? (node.attr 'levels').to_i : nil role = node.role? ? node.role : (doc.attr 'toc-class', 'toc') %( #{title} #{outline doc, :toclevels => levels} ) end def ulist node result = [] id_attribute = node.id ? %( id="#{node.id}") : nil div_classes = ['ulist', node.style, node.role].compact marker_checked = nil marker_unchecked = nil if (checklist = node.option? 'checklist') div_classes.insert 1, 'checklist' ul_class_attribute = ' class="checklist"' if node.option? 'interactive' if @xml_mode marker_checked = ' ' marker_unchecked = ' ' else marker_checked = ' ' marker_unchecked = ' ' end else if node.document.attr? 'icons', 'font' marker_checked = ' ' marker_unchecked = ' ' else marker_checked = '✓ ' marker_unchecked = '❏ ' end end else ul_class_attribute = node.style ? %( class="#{node.style}") : nil end result << %() result << %(
#{node.title}
) if node.title? result << %() node.items.each do |item| result << '
  • ' if checklist && (item.attr? 'checkbox') result << %(

    #{(item.attr? 'checked') ? marker_checked : marker_unchecked}#{item.text}

    ) else result << %(

    #{item.text}

    ) end result << item.content if item.blocks? result << '
  • ' end result << '' result << '' result * EOL end def verse node id_attribute = node.id ? %( id="#{node.id}") : nil classes = ['verseblock', node.role].compact class_attribute = %( class="#{classes * ' '}") title_element = node.title? ? %(\n
    #{node.title}
    ) : nil attribution = (node.attr? 'attribution') ? (node.attr 'attribution') : nil citetitle = (node.attr? 'citetitle') ? (node.attr 'citetitle') : nil if attribution || citetitle cite_element = citetitle ? %(#{citetitle}) : nil attribution_text = attribution ? %(— #{attribution}#{citetitle ? "\n" : nil}) : nil attribution_element = %(\n
    \n#{attribution_text}#{cite_element}\n
    ) else attribution_element = nil end %(#{title_element}
    #{node.content}
    #{attribution_element} ) end def video node xml = @xml_mode id_attribute = node.id ? %( id="#{node.id}") : nil classes = ['videoblock', node.role].compact class_attribute = %( class="#{classes * ' '}") title_element = node.title? ? %(\n
    #{node.captioned_title}
    ) : nil width_attribute = (node.attr? 'width') ? %( width="#{node.attr 'width'}") : nil height_attribute = (node.attr? 'height') ? %( height="#{node.attr 'height'}") : nil case node.attr 'poster' when 'vimeo' unless (asset_uri_scheme = (node.document.attr 'asset-uri-scheme', 'https')).empty? asset_uri_scheme = %(#{asset_uri_scheme}:) end start_anchor = (node.attr? 'start', nil, false) ? %(#at=#{node.attr 'start'}) : nil delimiter = '?' autoplay_param = (node.option? 'autoplay') ? %(#{delimiter}autoplay=1) : nil delimiter = '&' if autoplay_param loop_param = (node.option? 'loop') ? %(#{delimiter}loop=1) : nil %(#{title_element}
    ) when 'youtube' unless (asset_uri_scheme = (node.document.attr 'asset-uri-scheme', 'https')).empty? asset_uri_scheme = %(#{asset_uri_scheme}:) end rel_param_val = (node.option? 'related') ? 1 : 0 # NOTE start and end must be seconds (t parameter allows XmYs where X is minutes and Y is seconds) start_param = (node.attr? 'start', nil, false) ? %(&start=#{node.attr 'start'}) : nil end_param = (node.attr? 'end', nil, false) ? %(&end=#{node.attr 'end'}) : nil autoplay_param = (node.option? 'autoplay') ? '&autoplay=1' : nil loop_param = (node.option? 'loop') ? '&loop=1' : nil controls_param = (node.option? 'nocontrols') ? '&controls=0' : nil # cover both ways of controlling fullscreen option if node.option? 'nofullscreen' fs_param = '&fs=0' fs_attribute = nil else fs_param = nil fs_attribute = append_boolean_attribute 'allowfullscreen', xml end modest_param = (node.option? 'modest') ? '&modestbranding=1' : nil theme_param = (node.attr? 'theme', nil, false) ? %(&theme=#{node.attr 'theme'}) : nil hl_param = (node.attr? 'lang') ? %(&hl=#{node.attr 'lang'}) : nil # parse video_id/list_id syntax where list_id (i.e., playlist) is optional target, list = (node.attr 'target').split '/', 2 if (list ||= (node.attr 'list', nil, false)) list_param = %(&list=#{list}) else # parse dynamic playlist syntax: video_id1,video_id2,... target, playlist = target.split ',', 2 if (playlist ||= (node.attr 'playlist', nil, false)) # INFO playlist bar doesn't appear in Firefox unless showinfo=1 and modestbranding=1 list_param = %(&playlist=#{playlist}) else # NOTE for loop to work, playlist must be specified; use VIDEO_ID if there's no explicit playlist list_param = loop_param ? %(&playlist=#{target}) : nil end end %(#{title_element}
    ) else poster_attribute = %(#{poster = node.attr 'poster'}).empty? ? nil : %( poster="#{node.media_uri poster}") start_t = node.attr 'start', nil, false end_t = node.attr 'end', nil, false time_anchor = (start_t || end_t) ? %(#t=#{start_t}#{end_t ? ',' : nil}#{end_t}) : nil %(#{title_element}
    ) end end def inline_anchor node target = node.target case node.type when :xref refid = node.attributes['refid'] || target # NOTE we lookup text in converter because DocBook doesn't need this logic text = node.text || (node.document.references[:ids][refid] || %([#{refid}])) # FIXME shouldn't target be refid? logic seems confused here %(#{text}) when :ref %() when :link attrs = [] attrs << %( id="#{node.id}") if node.id if (role = node.role) attrs << %( class="#{role}") end attrs << %( title="#{node.attr 'title'}") if node.attr? 'title', nil, false attrs << %( target="#{node.attr 'window'}") if node.attr? 'window', nil, false %(#{node.text}) when :bibref %([#{target}]) else warn %(asciidoctor: WARNING: unknown anchor type: #{node.type.inspect}) end end def inline_break node %(#{node.text}) end def inline_button node %(#{node.text}) end def inline_callout node if node.document.attr? 'icons', 'font' %((#{node.text})) elsif node.document.attr? 'icons' src = node.icon_uri("callouts/#{node.text}") %(#{node.text}) else %((#{node.text})) end end def inline_footnote node if (index = node.attr 'index') if node.type == :xref %([#{index}]) else id_attr = node.id ? %( id="_footnote_#{node.id}") : nil %([#{index}]) end elsif node.type == :xref %([#{node.text}]) end end def inline_image node if (type = node.type) == 'icon' && (node.document.attr? 'icons', 'font') class_attr_val = %(fa fa-#{node.target}) {'size' => 'fa-', 'rotate' => 'fa-rotate-', 'flip' => 'fa-flip-'}.each do |key, prefix| class_attr_val = %(#{class_attr_val} #{prefix}#{node.attr key}) if node.attr? key end title_attr = (node.attr? 'title') ? %( title="#{node.attr 'title'}") : nil img = %() elsif type == 'icon' && !(node.document.attr? 'icons') img = %([#{node.attr 'alt'}]) else target = node.target attrs = ['width', 'height', 'title'].map {|name| (node.attr? name) ? %( #{name}="#{node.attr name}") : nil }.join if type != 'icon' && ((node.attr? 'format', 'svg', false) || (target.include? '.svg')) && node.document.safe < SafeMode::SECURE && ((svg = (node.option? 'inline')) || (obj = (node.option? 'interactive'))) if svg img = (read_svg_contents node, target) || %(#{node.attr 'alt'}) elsif obj fallback = (node.attr? 'fallback') ? %(#{node.attr 'alt'}) : %(#{node.attr 'alt'}) img = %(#{fallback}) end end img ||= %(#{node.attr 'alt'}) end if node.attr? 'link' window_attr = (node.attr? 'window') ? %( target="#{node.attr 'window'}") : nil img = %(#{img}) end class_attr_val = (role = node.role) ? %(#{type} #{role}) : type style_attr = (node.attr? 'float') ? %( style="float: #{node.attr 'float'}") : nil %(#{img}) end def inline_indexterm node node.type == :visible ? node.text : '' end def inline_kbd node if (keys = node.attr 'keys').size == 1 %(#{keys[0]}) else key_combo = keys.map {|key| %(#{key}+) }.join.chop %(#{key_combo}) end end def inline_menu node menu = node.attr 'menu' if !(submenus = node.attr 'submenus').empty? submenu_path = submenus.map {|submenu| %(#{submenu} ▸ ) }.join.chop %(#{menu} ▸ #{submenu_path} #{node.attr 'menuitem'}) elsif (menuitem = node.attr 'menuitem') %(#{menu} ▸ #{menuitem}) else %(#{menu}) end end def inline_quoted node open, close, is_tag = QUOTE_TAGS[node.type] if (role = node.role) if is_tag quoted_text = %(#{open.chop} class="#{role}">#{node.text}#{close}) else quoted_text = %(#{open}#{node.text}#{close}) end else quoted_text = %(#{open}#{node.text}#{close}) end node.id ? %(#{quoted_text}) : quoted_text end def append_boolean_attribute name, xml xml ? %( #{name}="#{name}") : %( #{name}) end def read_svg_contents node, target if (svg = node.read_contents target, :start => (node.document.attr 'imagesdir'), :normalize => true, :label => 'SVG') svg = svg.sub SvgPreambleRx, '' unless svg.start_with? ') end end svg = %(#{new_start_tag}#{svg[old_start_tag.length..-1]}) if new_start_tag end svg end end end asciidoctor-1.5.5/lib/asciidoctor/converter/manpage.rb000066400000000000000000000507441277513741400230670ustar00rootroot00000000000000module Asciidoctor # A built-in {Converter} implementation that generates the man page (troff) format. # # The output follows the groff man page definition while also trying to be # consistent with the output produced by the a2x tool from AsciiDoc Python. # # See http://www.gnu.org/software/groff/manual/html_node/Man-usage.html#Man-usage class Converter::ManPageConverter < Converter::BuiltIn LF = %(\n) TAB = %(\t) WHITESPACE = %(#{LF}#{TAB} ) ET = ' ' * 8 ESC = %(\u001b) # troff leader marker ESC_BS = %(#{ESC}\\) # escaped backslash (indicates troff formatting sequence) ESC_FS = %(#{ESC}.) # escaped full stop (indicates troff macro) LiteralBackslashRx = /(?:\A|[^#{ESC}])\\/ LeadingPeriodRx = /^\./ EscapedMacroRx = /^(?:#{ESC}\\c\n)?#{ESC}\.((?:URL|MTO) ".*?" ".*?" )( |[^\s]*)(.*?)(?: *#{ESC}\\c)?$/ MockBoundaryRx = /<\/?BOUNDARY>/ EmDashCharRefRx = /—(?:;​)?/ EllipsisCharRefRx = /…(?:​)?/ # Converts HTML entity references back to their original form, escapes # special man characters and strips trailing whitespace. # # It's crucial that text only ever pass through manify once. # # str - the String to convert # opts - an Hash of options to control processing (default: {}) # * :preserve_space a Boolean that indicates whether to preserve spaces (only expanding tabs) if true # or to collapse all adjacent whitespace to a single space if false (default: true) # * :append_newline a Boolean that indicates whether to append an endline to the result (default: false) def manify str, opts = {} str = ((opts.fetch :preserve_space, true) ? (str.gsub TAB, ET) : (str.tr_s WHITESPACE, ' ')). gsub(LiteralBackslashRx, '\&(rs'). # literal backslash (not a troff escape sequence) gsub(LeadingPeriodRx, '\\\&.'). # leading . is used in troff for macro call or other formatting; replace with \&. # drop orphaned \c escape lines, unescape troff macro, quote adjacent character, isolate macro line gsub(EscapedMacroRx) { (rest = $3.lstrip).empty? ? %(.#$1"#$2") : %(.#$1"#$2"#{LF}#{rest}) }. gsub('-', '\-'). gsub('<', '<'). gsub('>', '>'). gsub(' ', '\~'). # non-breaking space gsub('©', '\(co'). # copyright sign gsub('®', '\(rg'). # registered sign gsub('™', '\(tm'). # trademark sign gsub(' ', ' '). # thin space gsub('–', '\(en'). # en dash gsub(EmDashCharRefRx, '\(em'). # em dash gsub('‘', '\(oq'). # left single quotation mark gsub('’', '\(cq'). # right single quotation mark gsub('“', '\(lq'). # left double quotation mark gsub('”', '\(rq'). # right double quotation mark gsub(EllipsisCharRefRx, '...'). # horizontal ellipsis gsub('←', '\(<-'). # leftwards arrow gsub('→', '\(->'). # rightwards arrow gsub('⇐', '\(lA'). # leftwards double arrow gsub('⇒', '\(rA'). # rightwards double arrow gsub('​', '\:'). # zero width space gsub('\'', '\(aq'). # apostrophe-quote gsub(MockBoundaryRx, ''). # mock boundary gsub(ESC_BS, '\\'). # unescape troff backslash (NOTE update if more escapes are added) rstrip # strip trailing space opts[:append_newline] ? %(#{str}#{LF}) : str end def skip_with_warning node, name = nil warn %(asciidoctor: WARNING: converter missing for #{name || node.node_name} node in manpage backend) nil end def document node unless node.attr? 'mantitle' raise 'asciidoctor: ERROR: doctype must be set to manpage when using manpage backend' end mantitle = node.attr 'mantitle' manvolnum = node.attr 'manvolnum', '1' manname = node.attr 'manname', mantitle docdate = (node.attr? 'reproducible') ? nil : (node.attr 'docdate') # NOTE the first line enables the table (tbl) preprocessor, necessary for non-Linux systems result = [%('\\" t .\\" Title: #{mantitle} .\\" Author: #{(node.attr? 'authors') ? (node.attr 'authors') : '[see the "AUTHORS" section]'} .\\" Generator: Asciidoctor #{node.attr 'asciidoctor-version'})] result << %(.\\" Date: #{docdate}) if docdate result << %(.\\" Manual: #{(manual = node.attr 'manmanual') || '\ \&'} .\\" Source: #{(source = node.attr 'mansource') || '\ \&'} .\\" Language: English .\\") # TODO add document-level setting to disable capitalization of manname result << %(.TH "#{manify manname.upcase}" "#{manvolnum}" "#{docdate}" "#{source ? (manify source) : '\ \&'}" "#{manual ? (manify manual) : '\ \&'}") # define portability settings # see http://bugs.debian.org/507673 # see http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html result << '.ie \n(.g .ds Aq \(aq' result << '.el .ds Aq \'' # set sentence_space_size to 0 to prevent extra space between sentences separated by a newline # the alternative is to add \& at the end of the line result << '.ss \n[.ss] 0' # disable hyphenation result << '.nh' # disable justification (adjust text to left margin only) result << '.ad l' # define URL macro for portability # see http://web.archive.org/web/20060102165607/http://people.debian.org/~branden/talks/wtfm/wtfm.pdf # # Use: .URL "http://www.debian.org" "Debian" "." # # * First argument: the URL # * Second argument: text to be hyperlinked # * Third (optional) argument: text that needs to immediately trail # the hyperlink without intervening whitespace result << '.de URL \\\\$2 \(laURL: \\\\$1 \(ra\\\\$3 .. .if \n[.g] .mso www.tmac' result << %(.LINKSTYLE #{node.attr 'man-linkstyle', 'blue R < >'}) unless node.noheader if node.attr? 'manpurpose' result << %(.SH "#{node.attr 'manname-title'}" #{manify mantitle} \\- #{manify node.attr 'manpurpose'}) end end result << node.content # QUESTION should NOTES come after AUTHOR(S)? if node.footnotes? && !(node.attr? 'nofootnotes') result << '.SH "NOTES"' result.concat(node.footnotes.map {|fn| %(#{fn.index}. #{fn.text}) }) end # FIXME detect single author and use appropriate heading; itemize the authors if multiple if node.attr? 'authors' result << %(.SH "AUTHOR(S)" .sp \\fB#{node.attr 'authors'}\\fP .RS 4 Author(s). .RE) end result * LF end # NOTE embedded doesn't really make sense in the manpage backend def embedded node result = [node.content] if node.footnotes? && !(node.attr? 'nofootnotes') result << '.SH "NOTES"' result.concat(node.footnotes.map {|fn| %(#{fn.index}. #{fn.text}) }) end # QUESTION should we add an AUTHOR(S) section? result * LF end def section node slevel = node.level # QUESTION should the check for slevel be done in section? slevel = 1 if slevel == 0 && node.special result = [] if slevel > 1 macro = 'SS' # QUESTION why captioned title? why not for slevel == 1? stitle = node.captioned_title else macro = 'SH' stitle = node.title.upcase end result << %(.#{macro} "#{manify stitle}" #{node.content}) result * LF end def admonition node result = [] result << %(.if n \\{\\ .sp .\\} .RS 4 .it 1 an-trap .nr an-no-space-flag 1 .nr an-break-flag 1 .br .ps +1 .B #{node.caption}#{node.title? ? "\\fP #{manify node.title}" : nil} .ps -1 .br #{resolve_content node} .sp .5v .RE) result * LF end alias :audio :skip_with_warning def colist node result = [] result << %(.sp .B #{manify node.title} .br) if node.title? result << '.TS tab(:); r lw(\n(.lu*75u/100u).' node.items.each_with_index do |item, index| result << %(\\fB(#{index + 1})\\fP\\h'-2n':T{ #{manify item.text} T}) end result << '.TE' result * LF end # TODO implement title for dlist # TODO implement horizontal (if it makes sense) def dlist node result = [] counter = 0 node.items.each do |terms, dd| counter += 1 case node.style when 'qanda' result << %(.sp #{counter}. #{manify([*terms].map {|dt| dt.text }.join ' ')} .RS 4) else result << %(.sp #{manify([*terms].map {|dt| dt.text }.join ', ')} .RS 4) end if dd result << (manify dd.text) if dd.text? result << dd.content if dd.blocks? end result << '.RE' end result * LF end def example node result = [] result << %(.sp .B #{manify node.captioned_title} .br) if node.title? result << %(.RS 4 #{resolve_content node} .RE) result * LF end def floating_title node %(.SS "#{manify node.title}") end alias :image :skip_with_warning def listing node result = [] result << %(.sp .B #{manify node.captioned_title} .br) if node.title? result << %(.sp .if n \\{\\ .RS 4 .\\} .nf #{manify node.content} .fi .if n \\{\\ .RE .\\}) result * LF end def literal node result = [] result << %(.sp .B #{manify node.title} .br) if node.title? result << %(.sp .if n \\{\\ .RS 4 .\\} .nf #{manify node.content} .fi .if n \\{\\ .RE .\\}) result * LF end def olist node result = [] result << %(.sp .B #{manify node.title} .br) if node.title? node.items.each_with_index do |item, idx| result << %(.sp .RS 4 .ie n \\{\\ \\h'-04' #{idx + 1}.\\h'+01'\\c .\\} .el \\{\\ .sp -1 .IP " #{idx + 1}." 4.2 .\\} #{manify item.text}) result << item.content if item.blocks? result << '.RE' end result * LF end def open node case node.style when 'abstract', 'partintro' resolve_content node else node.content end end # TODO use Page Control https://www.gnu.org/software/groff/manual/html_node/Page-Control.html#Page-Control alias :page_break :skip def paragraph node if node.title? %(.sp .B #{manify node.title} .br #{manify node.content}) else %(.sp #{manify node.content}) end end alias :preamble :content def quote node result = [] if node.title? result << %(.sp .in +.3i .B #{manify node.title} .br .in) end attribution_line = (node.attr? 'citetitle') ? %(#{node.attr 'citetitle'} ) : nil attribution_line = (node.attr? 'attribution') ? %[#{attribution_line}\\(em #{node.attr 'attribution'}] : nil result << %(.in +.3i .ll -.3i .nf #{resolve_content node} .fi .br .in .ll) if attribution_line result << %(.in +.5i .ll -.5i #{attribution_line} .in .ll) end result * LF end alias :sidebar :skip_with_warning def stem node title_element = node.title? ? %(.sp .B #{manify node.title} .br) : nil open, close = BLOCK_MATH_DELIMITERS[node.style.to_sym] unless ((equation = node.content).start_with? open) && (equation.end_with? close) equation = %(#{open}#{equation}#{close}) end %(#{title_element}#{equation}) end # FIXME: The reason this method is so complicated is because we are not # receiving empty(marked) cells when there are colspans or rowspans. This # method has to create a map of all cells and in the case of rowspans # create empty cells as placeholders of the span. # To fix this, asciidoctor needs to provide an API to tell the user if a # given cell is being used as a colspan or rowspan. def table node result = [] if node.title? result << %(.sp .it 1 an-trap .nr an-no-space-flag 1 .nr an-break-flag 1 .br .B #{manify node.captioned_title}) end result << '.TS allbox tab(:);' row_header = [] row_text = [] row_index = 0 [:head, :body, :foot].each do |tsec| node.rows[tsec].each do |row| row_header[row_index] ||= [] row_text[row_index] ||= [] # result << LF # l left-adjusted # r right-adjusted # c centered-adjusted # n numerical align # a alphabetic align # s spanned # ^ vertically spanned remaining_cells = row.size row.each_with_index do |cell, cell_index| remaining_cells -= 1 row_header[row_index][cell_index] ||= [] # Add an empty cell if this is a rowspan cell if row_header[row_index][cell_index] == ['^t'] row_text[row_index] << %(T{#{LF}.sp#{LF}T}:) end row_text[row_index] << %(T{#{LF}.sp#{LF}) cell_halign = (cell.attr 'halign', 'left')[0..0] if tsec == :head if row_header[row_index].empty? || row_header[row_index][cell_index].empty? row_header[row_index][cell_index] << %(#{cell_halign}tB) else row_header[row_index][cell_index + 1] ||= [] row_header[row_index][cell_index + 1] << %(#{cell_halign}tB) end row_text[row_index] << %(#{cell.text}#{LF}) elsif tsec == :body if row_header[row_index].empty? || row_header[row_index][cell_index].empty? row_header[row_index][cell_index] << %(#{cell_halign}t) else row_header[row_index][cell_index + 1] ||= [] row_header[row_index][cell_index + 1] << %(#{cell_halign}t) end case cell.style when :asciidoc cell_content = cell.content when :verse, :literal cell_content = cell.text else cell_content = cell.content.join end row_text[row_index] << %(#{cell_content}#{LF}) elsif tsec == :foot if row_header[row_index].empty? || row_header[row_index][cell_index].empty? row_header[row_index][cell_index] << %(#{cell_halign}tB) else row_header[row_index][cell_index + 1] ||= [] row_header[row_index][cell_index + 1] << %(#{cell_halign}tB) end row_text[row_index] << %(#{cell.text}#{LF}) end if cell.colspan && cell.colspan > 1 (cell.colspan - 1).times do |i| if row_header[row_index].empty? || row_header[row_index][cell_index].empty? row_header[row_index][cell_index + i] << 'st' else row_header[row_index][cell_index + 1 + i] ||= [] row_header[row_index][cell_index + 1 + i] << 'st' end end end if cell.rowspan && cell.rowspan > 1 (cell.rowspan - 1).times do |i| row_header[row_index + 1 + i] ||= [] if row_header[row_index + 1 + i].empty? || row_header[row_index + 1 + i][cell_index].empty? row_header[row_index + 1 + i][cell_index] ||= [] row_header[row_index + 1 + i][cell_index] << '^t' else row_header[row_index + 1 + i][cell_index + 1] ||= [] row_header[row_index + 1 + i][cell_index + 1] << '^t' end end end if remaining_cells >= 1 row_text[row_index] << 'T}:' else row_text[row_index] << %(T}#{LF}) end end row_index += 1 end end #row_header.each do |row| # result << LF # row.each_with_index do |cell, i| # result << (cell.join ' ') # result << ' ' if row.size > i + 1 # end #end # FIXME temporary fix to get basic table to display result << LF result << row_header.first.map {|r| 'lt'}.join(' ') result << %(.#{LF}) row_text.each do |row| result << row.join end result << %(.TE#{LF}.sp) result.join end def thematic_break node '.sp .ce \l\'\n(.lu*25u/100u\(ap\'' end alias :toc :skip def ulist node result = [] result << %(.sp .B #{manify node.title} .br) if node.title? node.items.map {|item| result << %[.sp .RS 4 .ie n \\{\\ \\h'-04'\\(bu\\h'+03'\\c .\\} .el \\{\\ .sp -1 .IP \\(bu 2.3 .\\} #{manify item.text}] result << item.content if item.blocks? result << '.RE' } result * LF end # FIXME git uses [verse] for the synopsis; detect this special case def verse node result = [] if node.title? result << %(.sp .B #{manify node.title} .br) end attribution_line = (node.attr? 'citetitle') ? %(#{node.attr 'citetitle'} ) : nil attribution_line = (node.attr? 'attribution') ? %[#{attribution_line}\\(em #{node.attr 'attribution'}] : nil result << %(.sp .nf #{manify node.content} .fi .br) if attribution_line result << %(.in +.5i .ll -.5i #{attribution_line} .in .ll) end result * LF end def video node start_param = (node.attr? 'start', nil, false) ? %(&start=#{node.attr 'start'}) : nil end_param = (node.attr? 'end', nil, false) ? %(&end=#{node.attr 'end'}) : nil %(.sp #{manify node.captioned_title} (video) <#{node.media_uri(node.attr 'target')}#{start_param}#{end_param}>) end def inline_anchor node target = node.target case node.type when :link if (text = node.text) == target text = nil else text = text.gsub '"', %[#{ESC_BS}(dq] end if target.start_with? 'mailto:' macro = 'MTO' target = target[7..-1].sub '@', %[#{ESC_BS}(at] else macro = 'URL' end %(#{ESC_BS}c#{LF}#{ESC_FS}#{macro} "#{target}" "#{text}" ) when :xref refid = (node.attr 'refid') || target node.text || (node.document.references[:ids][refid] || %([#{refid}])) when :ref, :bibref # These are anchor points, which shouldn't be visual '' else warn %(asciidoctor: WARNING: unknown anchor type: #{node.type.inspect}) end end def inline_break node %(#{node.text} .br) end def inline_button node %(#{ESC_BS}fB[#{ESC_BS}0#{node.text}#{ESC_BS}0]#{ESC_BS}fP) end def inline_callout node %(#{ESC_BS}fB(#{node.text})#{ESC_BS}fP) end # TODO supposedly groff has footnotes, but we're in search of an example def inline_footnote node if (index = node.attr 'index') %([#{index}]) elsif node.type == :xref %([#{node.text}]) end end def inline_image node # NOTE alt should always be set alt_text = (node.attr? 'alt') ? (node.attr 'alt') : node.target (node.attr? 'link') ? %([#{alt_text}] <#{node.attr 'link'}>) : %([#{alt_text}]) end def inline_indexterm node node.type == :visible ? node.text : '' end def inline_kbd node if (keys = node.attr 'keys').size == 1 keys[0] else keys.join %(#{ESC_BS}0+#{ESC_BS}0) end end def inline_menu node caret = %[#{ESC_BS}0#{ESC_BS}(fc#{ESC_BS}0] menu = node.attr 'menu' if !(submenus = node.attr 'submenus').empty? submenu_path = submenus.map {|item| %(#{ESC_BS}fI#{item}#{ESC_BS}fP) }.join caret %(#{ESC_BS}fI#{menu}#{ESC_BS}fP#{caret}#{submenu_path}#{caret}#{ESC_BS}fI#{node.attr 'menuitem'}#{ESC_BS}fP) elsif (menuitem = node.attr 'menuitem') %(#{ESC_BS}fI#{menu}#{caret}#{menuitem}#{ESC_BS}fP) else %(#{ESC_BS}fI#{menu}#{ESC_BS}fP) end end # NOTE use fake element to prevent creating artificial word boundaries def inline_quoted node case node.type when :emphasis %(#{ESC_BS}fI#{node.text}#{ESC_BS}fP) when :strong %(#{ESC_BS}fB#{node.text}#{ESC_BS}fP) when :monospaced %(#{ESC_BS}f[CR]#{node.text}#{ESC_BS}fP) when :single %[#{ESC_BS}(oq#{node.text}#{ESC_BS}(cq] when :double %[#{ESC_BS}(lq#{node.text}#{ESC_BS}(rq] else node.text end end def resolve_content node node.content_model == :compound ? node.content : %(.sp#{LF}#{manify node.content}) end end end asciidoctor-1.5.5/lib/asciidoctor/converter/template.rb000066400000000000000000000264251277513741400232710ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # A {Converter} implementation that uses templates composed in template # languages supported by {https://github.com/rtomayko/tilt Tilt} to convert # {AbstractNode} objects from a parsed AsciiDoc document tree to the backend # format. # # The converter scans the provided directories for template files that are # supported by Tilt. If an engine name (e.g., "slim") is specified in the # options Hash passed to the constructor, the scan is limited to template # files that have a matching extension (e.g., ".slim"). The scanner trims any # extensions from the basename of the file and uses the resulting name as the # key under which to store the template. When the {Converter#convert} method # is invoked, the transform argument is used to select the template from this # table and use it to convert the node. # # For example, the template file "path/to/templates/paragraph.html.slim" will # be registered as the "paragraph" transform. The template would then be used # to convert a paragraph {Block} object from the parsed AsciiDoc tree to an # HTML backend format (e.g., "html5"). # # As an optimization, scan results and templates are cached for the lifetime # of the Ruby process. If the {https://rubygems.org/gems/thread_safe # thread_safe} gem is installed, these caches are guaranteed to be thread # safe. If this gem is not present, they are not and a warning is issued. class Converter::TemplateConverter < Converter::Base DEFAULT_ENGINE_OPTIONS = { :erb => { :trim => '<' }, # TODO line 466 of haml/compiler.rb sorts the attributes; file an issue to make this configurable # NOTE AsciiDoc syntax expects HTML/XML output to use double quotes around attribute values :haml => { :format => :xhtml, :attr_wrapper => '"', :ugly => true, :escape_attrs => false }, :slim => { :disable_escape => true, :sort_attrs => false, :pretty => false } } # QUESTION are we handling how we load the thread_safe support correctly? begin require 'thread_safe' unless defined? ::ThreadSafe @caches = { :scans => ::ThreadSafe::Cache.new, :templates => ::ThreadSafe::Cache.new } rescue ::LoadError @caches = { :scans => {}, :templates => {} } # FIXME perhaps only warn if the cache option is enabled (meaning not disabled)? warn 'asciidoctor: WARNING: gem \'thread_safe\' is not installed. This gem is recommended when using custom backend templates.' end def self.caches @caches end def self.clear_caches @caches[:scans].clear if @caches[:scans] @caches[:templates].clear if @caches[:templates] end def initialize backend, template_dirs, opts = {} Helpers.require_library 'tilt' unless defined? ::Tilt @backend = backend @templates = {} @template_dirs = template_dirs @eruby = opts[:eruby] @safe = opts[:safe] @engine = opts[:template_engine] @engine_options = DEFAULT_ENGINE_OPTIONS.inject({}) do |accum, (engine, default_opts)| accum[engine] = default_opts.dup accum end if opts[:htmlsyntax] == 'html' @engine_options[:haml][:format] = :html5 @engine_options[:slim][:format] = :html end if (overrides = opts[:template_engine_options]) overrides.each do |engine, override_opts| (@engine_options[engine] ||= {}).update override_opts end end case opts[:template_cache] when true @caches = self.class.caches when ::Hash @caches = opts[:template_cache] else @caches = {} # the empty Hash effectively disables caching end scan #create_handlers end =begin # Public: Called when this converter is added to a composite converter. def composed parent # TODO set the backend info determined during the scan end =end # Internal: Scans the template directories specified in the constructor for Tilt-supported # templates, loads the templates and stores the in a Hash that is accessible via the # {TemplateConverter#templates} method. # # Returns nothing def scan path_resolver = PathResolver.new backend = @backend engine = @engine @template_dirs.each do |template_dir| # FIXME need to think about safe mode restrictions here next unless ::File.directory?(template_dir = (path_resolver.system_path template_dir, nil)) # NOTE last matching template wins for template name if no engine is given file_pattern = '*' if engine file_pattern = %(*.#{engine}) # example: templates/haml if ::File.directory?(engine_dir = (::File.join template_dir, engine)) template_dir = engine_dir end end # example: templates/html5 or templates/haml/html5 if ::File.directory?(backend_dir = (::File.join template_dir, backend)) template_dir = backend_dir end pattern = ::File.join template_dir, file_pattern if (scan_cache = @caches[:scans]) template_cache = @caches[:templates] unless (templates = scan_cache[pattern]) templates = (scan_cache[pattern] = (scan_dir template_dir, pattern, template_cache)) end templates.each do |name, template| @templates[name] = template_cache[template.file] = template end else @templates.update scan_dir(template_dir, pattern, @caches[:templates]) end nil end end =begin # Internal: Creates convert methods (e.g., inline_anchor) that delegate to the discovered templates. # # Returns nothing def create_handlers @templates.each do |name, template| create_handler name, template end nil end # Internal: Creates a convert method for the specified name that delegates to the specified template. # # Returns nothing def create_handler name, template metaclass = class << self; self; end if name == 'document' metaclass.send :define_method, name do |node| (template.render node).strip end else metaclass.send :define_method, name do |node| (template.render node).chomp end end end =end # Public: Convert an {AbstractNode} to the backend format using the named template. # # Looks for a template that matches the value of the # {AbstractNode#node_name} property if a template name is not specified. # # node - the AbstractNode to convert # template_name - the String name of the template to use, or the value of # the node_name property on the node if a template name is # not specified. (optional, default: nil) # opts - an optional Hash that is passed as local variables to the # template. (optional, default: {}) # # Returns the [String] result from rendering the template def convert node, template_name = nil, opts = {} template_name ||= node.node_name unless (template = @templates[template_name]) raise %(Could not find a custom template to handle transform: #{template_name}) end # Slim doesn't include helpers in the template's execution scope (like HAML), so do it ourselves node.extend ::Slim::Helpers if (defined? ::Slim::Helpers) && (::Slim::Template === template) # NOTE opts become locals in the template if template_name == 'document' (template.render node, opts).strip else (template.render node, opts).chomp end end # Public: Checks whether there is a Tilt template registered with the specified name. # # name - the String template name # # Returns a [Boolean] that indicates whether a Tilt template is registered for the # specified template name. def handles? name @templates.key? name end # Public: Retrieves the templates that this converter manages. # # Returns a [Hash] of Tilt template objects keyed by template name. def templates @templates.dup.freeze end # Public: Registers a Tilt template with this converter. # # name - the String template name # template - the Tilt template object to register # # Returns the Tilt template object def register name, template @templates[name] = if (template_cache = @caches[:templates]) template_cache[template.file] = template else template end #create_handler name, template end # Internal: Scan the specified directory for template files matching pattern and instantiate # a Tilt template for each matched file. # # Returns the scan result as a [Hash] def scan_dir template_dir, pattern, template_cache = nil result = {} eruby_loaded = nil # Grab the files in the top level of the directory (do not recurse) ::Dir.glob(pattern).select {|match| ::File.file? match }.each do |file| if (basename = ::File.basename file) == 'helpers.rb' || (path_segments = basename.split '.').size < 2 next end # TODO we could derive the basebackend from the minor extension of the template file #name, *rest, ext_name = *path_segments # this form only works in Ruby >= 1.9 name = path_segments[0] if name == 'block_ruler' name = 'thematic_break' elsif name.start_with? 'block_' name = name[6..-1] end template_class = ::Tilt extra_engine_options = {} case (ext_name = path_segments[-1]) when 'slim' # slim doesn't get loaded by Tilt, so we have to load it explicitly Helpers.require_library 'slim' unless defined? ::Slim # align safe mode of AsciiDoc embedded in Slim template with safe mode of current document (@engine_options[:slim][:asciidoc] ||= {})[:safe] ||= @safe if @safe && ::Slim::VERSION >= '3.0' # load include plugin when using Slim >= 2.1 require 'slim/include' unless (defined? ::Slim::Include) || ::Slim::VERSION < '2.1' when 'erb' template_class, extra_engine_options = (eruby_loaded ||= load_eruby(@eruby)) when 'rb' next else next unless ::Tilt.registered? ext_name end unless template_cache && (template = template_cache[file]) template = template_class.new file, 1, (@engine_options[ext_name.to_sym] || {}).merge(extra_engine_options) end result[name] = template end if ::File.file?(helpers = (::File.join template_dir, 'helpers.rb')) require helpers end result end # Internal: Load the eRuby implementation # # name - the String name of the eRuby implementation # # Returns an [Array] containing the Tilt template Class for the eRuby implementation # and a Hash of additional options to pass to the initializer def load_eruby name if !name || name == 'erb' require 'erb' unless defined? ::ERB [::Tilt::ERBTemplate, {}] elsif name == 'erubis' Helpers.require_library 'erubis' unless defined? ::Erubis::FastEruby [::Tilt::ErubisTemplate, { :engine_class => ::Erubis::FastEruby }] else raise ::ArgumentError, %(Unknown ERB implementation: #{name}) end end end end asciidoctor-1.5.5/lib/asciidoctor/core_ext.rb000066400000000000000000000004551277513741400212520ustar00rootroot00000000000000require 'asciidoctor/core_ext/nil_or_empty' if RUBY_MIN_VERSION_1_9 require 'asciidoctor/core_ext/string/limit' elsif RUBY_ENGINE != 'opal' require 'asciidoctor/core_ext/1.8.7/string/chr' require 'asciidoctor/core_ext/1.8.7/string/limit' require 'asciidoctor/core_ext/1.8.7/symbol/length' end asciidoctor-1.5.5/lib/asciidoctor/core_ext/000077500000000000000000000000001277513741400207215ustar00rootroot00000000000000asciidoctor-1.5.5/lib/asciidoctor/core_ext/1.8.7/000077500000000000000000000000001277513741400213745ustar00rootroot00000000000000asciidoctor-1.5.5/lib/asciidoctor/core_ext/1.8.7/string/000077500000000000000000000000001277513741400227025ustar00rootroot00000000000000asciidoctor-1.5.5/lib/asciidoctor/core_ext/1.8.7/string/chr.rb000066400000000000000000000001761277513741400240070ustar00rootroot00000000000000# Educate Ruby 1.8.7 about the String#chr method. class String def chr self[0..0] end unless method_defined? :chr end asciidoctor-1.5.5/lib/asciidoctor/core_ext/1.8.7/string/limit.rb000066400000000000000000000014631277513741400243510ustar00rootroot00000000000000if RUBY_ENGINE_JRUBY class String # Safely truncate the string to the specified number of bytes. # If a multibyte char gets split, the dangling fragment is removed. def limit size return self unless size < bytesize result = (unpack %(a#{size}))[0] begin result.unpack 'U*' rescue ArgumentError result.chop! retry end result end unless method_defined? :limit end else class String # Safely truncate the string to the specified number of bytes. # If a multibyte char gets split, the dangling fragment is removed. def limit size return self unless size < bytesize result = (unpack %(a#{size}))[0] result.chop! until result.empty? || /.$/u =~ result result end unless method_defined? :limit end end asciidoctor-1.5.5/lib/asciidoctor/core_ext/1.8.7/symbol/000077500000000000000000000000001277513741400227015ustar00rootroot00000000000000asciidoctor-1.5.5/lib/asciidoctor/core_ext/1.8.7/symbol/length.rb000066400000000000000000000002101277513741400245000ustar00rootroot00000000000000# Educate Ruby 1.8.7 about the Symbol#length method. class Symbol def length to_s.length end unless method_defined? :length end asciidoctor-1.5.5/lib/asciidoctor/core_ext/nil_or_empty.rb000066400000000000000000000011611277513741400237450ustar00rootroot00000000000000# A core library extension that defines the method nil_or_empty? as an alias to # optimize checks for nil? or empty? on common object types such as NilClass, # String, Array, Hash, and Numeric. class NilClass alias :nil_or_empty? :nil? unless method_defined? :nil_or_empty? end class String alias :nil_or_empty? :empty? unless method_defined? :nil_or_empty? end class Array alias :nil_or_empty? :empty? unless method_defined? :nil_or_empty? end class Hash alias :nil_or_empty? :empty? unless method_defined? :nil_or_empty? end class Numeric alias :nil_or_empty? :nil? unless method_defined? :nil_or_empty? end asciidoctor-1.5.5/lib/asciidoctor/core_ext/string/000077500000000000000000000000001277513741400222275ustar00rootroot00000000000000asciidoctor-1.5.5/lib/asciidoctor/core_ext/string/limit.rb000066400000000000000000000007221277513741400236730ustar00rootroot00000000000000class String # Safely truncate the string to the specified number of bytes. # If a multibyte char gets split, the dangling fragment is removed. def limit size return self unless size < bytesize # NOTE JRuby 1.7 & Rubinius fail to detect invalid encoding unless encoding is forced; impact is marginal. size -= 1 until ((result = byteslice 0, size).force_encoding ::Encoding::UTF_8).valid_encoding? result end unless method_defined? :limit end asciidoctor-1.5.5/lib/asciidoctor/document.rb000066400000000000000000001231001277513741400212510ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # Public: Methods for parsing and converting AsciiDoc documents. # # There are several strategies for getting the title of the document: # # doctitle - value of title attribute, if assigned and non-empty, # otherwise title of first section in document, if present # otherwise nil # name - an alias of doctitle # title - value of the title attribute, or nil if not present # first_section.title - title of first section in document, if present # header.title - title of section level 0 # # Keep in mind that you'll want to honor these document settings: # # notitle - The h1 heading should not be shown # noheader - The header block (h1 heading, author, revision info) should not be shown # nofooter - the footer block should not be shown class Document < AbstractBlock Footnote = ::Struct.new :index, :id, :text class AttributeEntry attr_reader :name, :value, :negate def initialize name, value, negate = nil @name = name @value = value @negate = negate.nil? ? value.nil? : negate end def save_to block_attributes (block_attributes[:attribute_entries] ||= []) << self end end # Public Parsed and stores a partitioned title (i.e., title & subtitle). class Title attr_reader :main alias :title :main attr_reader :subtitle attr_reader :combined def initialize val, opts = {} # TODO separate sanitization by type (:cdata for HTML/XML, :plain_text for non-SGML, false for none) if (@sanitized = opts[:sanitize]) && val.include?('<') val = val.gsub(XmlSanitizeRx, '').tr_s(' ', ' ').strip end if (sep = opts[:separator] || ':').empty? || !val.include?(sep = %(#{sep} )) @main = val @subtitle = nil else @main, _, @subtitle = val.rpartition sep end @combined = val end def sanitized? @sanitized end def subtitle? !!@subtitle end def to_s @combined end end # Public A read-only integer value indicating the level of security that # should be enforced while processing this document. The value must be # set in the Document constructor using the :safe option. # # A value of 0 (UNSAFE) disables any of the security features enforced # by Asciidoctor (Ruby is still subject to its own restrictions). # # A value of 1 (SAFE) closely parallels safe mode in AsciiDoc. In particular, # it prevents access to files which reside outside of the parent directory # of the source file and disables any macro other than the include macro. # # A value of 10 (SERVER) disallows the document from setting attributes that # would affect the conversion of the document, in addition to all the security # features of SafeMode::SAFE. For instance, this value disallows changing the # backend or the source-highlighter using an attribute defined in the source # document. This is the most fundamental level of security for server-side # deployments (hence the name). # # A value of 20 (SECURE) disallows the document from attempting to read files # from the file system and including the contents of them into the document, # in addition to all the security features of SafeMode::SECURE. In # particular, it disallows use of the include::[] macro and the embedding of # binary content (data uri), stylesheets and JavaScripts referenced by the # document. (Asciidoctor and trusted extensions may still be allowed to embed # trusted content into the document). # # Since Asciidoctor is aiming for wide adoption, 20 (SECURE) is the default # value and is recommended for server-side deployments. # # A value of 100 (PARANOID) is planned to disallow the use of passthrough # macros and prevents the document from setting any known attributes in # addition to all the security features of SafeMode::SECURE. Please note that # this level is not currently implemented (and therefore not enforced)! attr_reader :safe # Public: Get the Boolean AsciiDoc compatibility mode # # enabling this attribute activates the following syntax changes: # # * single quotes as constrained emphasis formatting marks # * single backticks parsed as inline literal, formatted as monospace # * single plus parsed as constrained, monospaced inline formatting # * double plus parsed as constrained, monospaced inline formatting # attr_reader :compat_mode # Public: Get the Boolean flag that indicates whether source map information is tracked by the parser attr_reader :sourcemap # Public: Get the Hash of document references attr_reader :references # Public: Get the Hash of document counters attr_reader :counters # Public: Get the Hash of callouts attr_reader :callouts # Public: Get the level-0 Section attr_reader :header # Public: Get the String base directory for converting this document. # # Defaults to directory of the source file. # If the source is a string, defaults to the current directory. attr_reader :base_dir # Public: Get the Hash of resolved options used to initialize this Document attr_reader :options # Public: Get the outfilesuffix defined at the end of the header. attr_reader :outfilesuffix # Public: Get a reference to the parent Document of this nested document. attr_reader :parent_document # Public: Get the Reader associated with this document attr_reader :reader # Public: Get the Converter associated with this document attr_reader :converter # Public: Get the extensions registry attr_reader :extensions # Public: Initialize a {Document} object. # # data - The AsciiDoc source data as a String or String Array. (default: nil) # options - A Hash of options to control processing (e.g., safe mode value (:safe), backend (:backend), # header/footer toggle (:header_footer), custom attributes (:attributes)). (default: {}) # # Duplication of the options Hash is handled in the enclosing API. # # Examples # # data = File.read filename # doc = Asciidoctor::Document.new data # puts doc.convert def initialize data = nil, options = {} super self, :document if (parent_doc = options.delete :parent) @parent_document = parent_doc options[:base_dir] ||= parent_doc.base_dir @references = parent_doc.references.inject({}) do |accum, (key,ref)| if key == :footnotes accum[:footnotes] = [] else accum[key] = ref end accum end @callouts = parent_doc.callouts # QUESTION should we support setting attribute in parent document from nested document? # NOTE we must dup or else all the assignments to the overrides clobbers the real attributes attr_overrides = parent_doc.attributes.dup ['doctype', 'compat-mode', 'toc', 'toc-placement', 'toc-position'].each do |key| attr_overrides.delete key end @attribute_overrides = attr_overrides @safe = parent_doc.safe @compat_mode = parent_doc.compat_mode @sourcemap = parent_doc.sourcemap @converter = parent_doc.converter initialize_extensions = false @extensions = parent_doc.extensions else @parent_document = nil @references = { :ids => {}, :footnotes => [], :links => [], :images => [], :indexterms => [], :includes => ::Set.new, } @callouts = Callouts.new # copy attributes map and normalize keys # attribute overrides are attributes that can only be set from the commandline # a direct assignment effectively makes the attribute a constant # a nil value or name with leading or trailing ! will result in the attribute being unassigned attr_overrides = {} (options[:attributes] || {}).each do |key, value| if key.start_with? '!' key = key[1..-1] value = nil elsif key.end_with? '!' key = key.chop value = nil end attr_overrides[key.downcase] = value end @attribute_overrides = attr_overrides # safely resolve the safe mode from const, int or string if !(safe_mode = options[:safe]) @safe = SafeMode::SECURE elsif ::Integer === safe_mode # be permissive in case API user wants to define new levels @safe = safe_mode else # NOTE: not using infix rescue for performance reasons, see https://github.com/jruby/jruby/issues/1816 begin @safe = SafeMode.const_get(safe_mode.to_s.upcase) rescue @safe = SafeMode::SECURE end end @compat_mode = attr_overrides.key? 'compat-mode' @sourcemap = options[:sourcemap] @converter = nil initialize_extensions = defined? ::Asciidoctor::Extensions @extensions = nil # initialize furthur down end @parsed = false @header = nil @counters = {} @attributes_modified = ::Set.new @options = options @docinfo_processor_extensions = {} header_footer = (options[:header_footer] ||= false) options.freeze attrs = @attributes #attrs['encoding'] = 'UTF-8' attrs['sectids'] = '' attrs['notitle'] = '' unless header_footer attrs['toc-placement'] = 'auto' attrs['stylesheet'] = '' attrs['webfonts'] = '' attrs['copycss'] = '' if header_footer attrs['prewrap'] = '' attrs['attribute-undefined'] = Compliance.attribute_undefined attrs['attribute-missing'] = Compliance.attribute_missing attrs['iconfont-remote'] = '' # language strings # TODO load these based on language settings attrs['caution-caption'] = 'Caution' attrs['important-caption'] = 'Important' attrs['note-caption'] = 'Note' attrs['tip-caption'] = 'Tip' attrs['warning-caption'] = 'Warning' attrs['appendix-caption'] = 'Appendix' attrs['example-caption'] = 'Example' attrs['figure-caption'] = 'Figure' #attrs['listing-caption'] = 'Listing' attrs['table-caption'] = 'Table' attrs['toc-title'] = 'Table of Contents' #attrs['preface-title'] = 'Preface' attrs['manname-title'] = 'NAME' attrs['untitled-label'] = 'Untitled' attrs['version-label'] = 'Version' attrs['last-update-label'] = 'Last updated' attr_overrides['asciidoctor'] = '' attr_overrides['asciidoctor-version'] = VERSION safe_mode_name = SafeMode.constants.find {|l| SafeMode.const_get(l) == @safe }.to_s.downcase attr_overrides['safe-mode-name'] = safe_mode_name attr_overrides["safe-mode-#{safe_mode_name}"] = '' attr_overrides['safe-mode-level'] = @safe # sync the embedded attribute w/ the value of options...do not allow override attr_overrides['embedded'] = header_footer ? nil : '' # the only way to set the max-include-depth attribute is via the API; default to 64 like AsciiDoc Python attr_overrides['max-include-depth'] ||= 64 # the only way to set the allow-uri-read attribute is via the API; disabled by default attr_overrides['allow-uri-read'] ||= nil attr_overrides['user-home'] = USER_HOME # legacy support for numbered attribute attr_overrides['sectnums'] = attr_overrides.delete 'numbered' if attr_overrides.key? 'numbered' # if the base_dir option is specified, it overrides docdir as the root for relative paths # otherwise, the base_dir is the directory of the source file (docdir) or the current # directory of the input is a string if options[:base_dir] @base_dir = attr_overrides['docdir'] = ::File.expand_path(options[:base_dir]) else if attr_overrides['docdir'] @base_dir = attr_overrides['docdir'] = ::File.expand_path(attr_overrides['docdir']) else #warn 'asciidoctor: WARNING: setting base_dir is recommended when working with string documents' unless nested? @base_dir = attr_overrides['docdir'] = ::File.expand_path(::Dir.pwd) end end # allow common attributes backend and doctype to be set using options hash, coerce values to string if (backend_val = options[:backend]) attr_overrides['backend'] = %(#{backend_val}) end if (doctype_val = options[:doctype]) attr_overrides['doctype'] = %(#{doctype_val}) end if @safe >= SafeMode::SERVER # restrict document from setting copycss, source-highlighter and backend attr_overrides['copycss'] ||= nil attr_overrides['source-highlighter'] ||= nil attr_overrides['backend'] ||= DEFAULT_BACKEND # restrict document from seeing the docdir and trim docfile to relative path if !parent_doc && attr_overrides.key?('docfile') attr_overrides['docfile'] = attr_overrides['docfile'][(attr_overrides['docdir'].length + 1)..-1] end attr_overrides['docdir'] = '' attr_overrides['user-home'] = '.' if @safe >= SafeMode::SECURE attr_overrides['max-attribute-value-size'] = 4096 unless attr_overrides.key? 'max-attribute-value-size' # assign linkcss (preventing css embedding) unless explicitly disabled from the commandline or API # effectively the same has "has key 'linkcss' and value == nil" unless attr_overrides.fetch('linkcss', '').nil? attr_overrides['linkcss'] = '' end # restrict document from enabling icons attr_overrides['icons'] ||= nil end end # the only way to set the max-attribute-value-size attribute is via the API; disabled by default @max_attribute_value_size = (val = (attr_overrides['max-attribute-value-size'] ||= nil)) ? val.to_i.abs : nil attr_overrides.delete_if do |key, val| verdict = false # a nil value undefines the attribute if val.nil? attrs.delete(key) else # a value ending in @ indicates this attribute does not override # an attribute with the same key in the document souce if ::String === val && (val.end_with? '@') val = val.chop verdict = true end attrs[key] = val end verdict end if parent_doc # setup default doctype (backend is fixed) attrs['doctype'] ||= DEFAULT_DOCTYPE # don't need to do the extra processing within our own document # FIXME line info isn't reported correctly within include files in nested document @reader = Reader.new data, options[:cursor] # Now parse the lines in the reader into blocks # Eagerly parse (for now) since a subdocument is not a publicly accessible object Parser.parse @reader, self # should we call some sort of post-parse function? restore_attributes @parsed = true else # setup default backend and doctype if (attrs['backend'] ||= DEFAULT_BACKEND) == 'manpage' attrs['doctype'] = attr_overrides['doctype'] = 'manpage' else attrs['doctype'] ||= DEFAULT_DOCTYPE end update_backend_attributes attrs['backend'], true #attrs['indir'] = attrs['docdir'] #attrs['infile'] = attrs['docfile'] # dynamic intrinstic attribute values # See https://reproducible-builds.org/specs/source-date-epoch/ now = ::ENV['SOURCE_DATE_EPOCH'] ? (::Time.at ::ENV['SOURCE_DATE_EPOCH'].to_i).utc : ::Time.now localdate = (attrs['localdate'] ||= now.strftime('%Y-%m-%d')) unless (localtime = attrs['localtime']) begin localtime = attrs['localtime'] = now.strftime('%H:%M:%S %Z') rescue # Asciidoctor.js fails if timezone string has characters outside basic Latin (see asciidoctor.js#23) localtime = attrs['localtime'] = now.strftime('%H:%M:%S %z') end end attrs['localdatetime'] ||= %(#{localdate} #{localtime}) # docdate, doctime and docdatetime should default to # localdate, localtime and localdatetime if not otherwise set attrs['docdate'] ||= localdate attrs['doctime'] ||= localtime attrs['docdatetime'] ||= %(#{localdate} #{localtime}) # fallback directories attrs['stylesdir'] ||= '.' attrs['iconsdir'] ||= ::File.join(attrs.fetch('imagesdir', './images'), 'icons') if initialize_extensions if (registry = options[:extensions_registry]) if Extensions::Registry === registry || (::RUBY_ENGINE_JRUBY && ::AsciidoctorJ::Extensions::ExtensionRegistry === registry) # take it as it is else registry = Extensions::Registry.new end elsif ::Proc === (ext_block = options[:extensions]) registry = Extensions.build_registry(&ext_block) else registry = Extensions::Registry.new end @extensions = registry.activate self end @reader = PreprocessorReader.new self, data, Reader::Cursor.new(attrs['docfile'], @base_dir) end end # Public: Parse the AsciiDoc source stored in the {Reader} into an abstract syntax tree. # # If the data parameter is not nil, create a new {PreprocessorReader} and assigned it to the reader # property of this object. Otherwise, continue with the reader that was created in {#initialize}. # Pass the reader to {Parser.parse} to parse the source data into an abstract syntax tree. # # If parsing has already been performed, this method returns without performing any processing. # # data - The optional replacement AsciiDoc source data as a String or String Array. (default: nil) # # Returns this [Document] def parse data = nil if @parsed self else doc = self # create reader if data is provided (used when data is not known at the time the Document object is created) @reader = PreprocessorReader.new doc, data, Reader::Cursor.new(@attributes['docfile'], @base_dir) if data if (exts = @parent_document ? nil : @extensions) && exts.preprocessors? exts.preprocessors.each do |ext| @reader = ext.process_method[doc, @reader] || @reader end end # Now parse the lines in the reader into blocks Parser.parse @reader, doc, :header_only => !!@options[:parse_header_only] # should we call sort of post-parse function? restore_attributes if exts && exts.treeprocessors? exts.treeprocessors.each do |ext| if (result = ext.process_method[doc]) && Document === result && result != doc doc = result end end end @parsed = true doc end end # Public: Get the named counter and take the next number in the sequence. # # name - the String name of the counter # seed - the initial value as a String or Integer # # returns the next number in the sequence for the specified counter def counter(name, seed = nil) if (attr_is_seed = !(attr_val = @attributes[name]).nil_or_empty?) && @counters.key?(name) @counters[name] = nextval(attr_val) else if seed.nil? seed = nextval(attr_is_seed ? attr_val : 0) elsif seed.to_i.to_s == seed seed = seed.to_i end @counters[name] = seed end (@attributes[name] = @counters[name]) end # Public: Increment the specified counter and store it in the block's attributes # # counter_name - the String name of the counter attribute # block - the Block on which to save the counter # # returns the next number in the sequence for the specified counter def counter_increment(counter_name, block) val = counter(counter_name) AttributeEntry.new(counter_name, val).save_to(block.attributes) val end # Internal: Get the next value in the sequence. # # Handles both integer and character sequences. # # current - the value to increment as a String or Integer # # returns the next value in the sequence according to the current value's type def nextval(current) if ::Integer === current current + 1 else intval = current.to_i if intval.to_s != current.to_s (current[0].ord + 1).chr else intval + 1 end end end def register(type, value, force = false) case type when :ids id, reftext = [*value] reftext ||= '[' + id + ']' if force @references[:ids][id] = reftext else @references[:ids][id] ||= reftext end when :footnotes, :indexterms @references[type] << value else if @options[:catalog_assets] @references[type] << value end end end def footnotes? !@references[:footnotes].empty? end def footnotes @references[:footnotes] end def nested? !!@parent_document end def embedded? # QUESTION should this be !@options[:header_footer] ? @attributes.key? 'embedded' end def extensions? !!@extensions end # Make the raw source for the Document available. def source @reader.source if @reader end # Make the raw source lines for the Document available. def source_lines @reader.source_lines if @reader end def doctype @doctype ||= @attributes['doctype'] end def backend @backend ||= @attributes['backend'] end def basebackend? base @attributes['basebackend'] == base end # The title explicitly defined in the document attributes def title @attributes['title'] end def title=(title) @header ||= Section.new(self, 0) @header.title = title end # Public: Resolves the primary title for the document # # Searches the locations to find the first non-empty # value: # # * document-level attribute named title # * header title (known as the document title) # * title of the first section # * document-level attribute named untitled-label (if :use_fallback option is set) # # If no value can be resolved, nil is returned. # # If the :partition attribute is specified, the value is parsed into an Document::Title object. # If the :sanitize attribute is specified, XML elements are removed from the value. # # TODO separate sanitization by type (:cdata for HTML/XML, :plain_text for non-SGML, false for none) # # Returns the resolved title as a [Title] if the :partition option is passed or a [String] if not # or nil if no value can be resolved. def doctitle opts = {} if !(val = @attributes['title'].nil_or_empty?) val = title elsif (sect = first_section) && sect.title? val = sect.title elsif opts[:use_fallback] && (val = @attributes['untitled-label']) # use val set in condition else return end if (separator = opts[:partition]) Title.new val, opts.merge({ :separator => (separator == true ? @attributes['title-separator'] : separator) }) elsif opts[:sanitize] && val.include?('<') val.gsub(XmlSanitizeRx, '').tr_s(' ', ' ').strip else val end end alias :name :doctitle # Public: Convenience method to retrieve the document attribute 'author' # # returns the full name of the author as a String def author @attributes['author'] end # Public: Convenience method to retrieve the document attribute 'revdate' # # returns the date of last revision for the document as a String def revdate @attributes['revdate'] end def notitle !@attributes.key?('showtitle') && @attributes.key?('notitle') end def noheader @attributes.key? 'noheader' end def nofooter @attributes.key? 'nofooter' end # QUESTION move to AbstractBlock? def first_section has_header? ? @header : (@blocks || []).find {|e| e.context == :section } end def has_header? @header ? true : false end alias :header? :has_header? # Public: Append a content Block to this Document. # # If the child block is a Section, assign an index to it. # # block - The child Block to append to this parent Block # # Returns The parent Block def << block assign_index block if block.context == :section super end # Internal: called after the header has been parsed and before the content # will be parsed. #-- # QUESTION should we invoke the Treeprocessors here, passing in a phase? # QUESTION is finalize_header the right name? def finalize_header unrooted_attributes, header_valid = true clear_playback_attributes unrooted_attributes save_attributes unrooted_attributes['invalid-header'] = true unless header_valid unrooted_attributes end # Internal: Branch the attributes so that the original state can be restored # at a future time. def save_attributes # enable toc and sectnums (i.e., numbered) by default in DocBook backend # NOTE the attributes_modified should go away once we have a proper attribute storage & tracking facility if (attrs = @attributes)['basebackend'] == 'docbook' attrs['toc'] = '' unless attribute_locked?('toc') || @attributes_modified.include?('toc') attrs['sectnums'] = '' unless attribute_locked?('sectnums') || @attributes_modified.include?('sectnums') end unless attrs.key?('doctitle') || !(val = doctitle) attrs['doctitle'] = val end # css-signature cannot be updated after header attributes are processed @id = attrs['css-signature'] unless @id toc_position_val = if (toc_val = (attrs.delete('toc2') ? 'left' : attrs['toc'])) # toc-placement allows us to separate position from using fitted slot vs macro (toc_placement = attrs.fetch('toc-placement', 'macro')) && toc_placement != 'auto' ? toc_placement : attrs['toc-position'] else nil end if toc_val && (!toc_val.empty? || !toc_position_val.nil_or_empty?) default_toc_position = 'left' # TODO rename toc2 to aside-toc default_toc_class = 'toc2' if !toc_position_val.nil_or_empty? position = toc_position_val elsif !toc_val.empty? position = toc_val else position = default_toc_position end attrs['toc'] = '' attrs['toc-placement'] = 'auto' case position when 'left', '<', '<' attrs['toc-position'] = 'left' when 'right', '>', '>' attrs['toc-position'] = 'right' when 'top', '^' attrs['toc-position'] = 'top' when 'bottom', 'v' attrs['toc-position'] = 'bottom' when 'preamble', 'macro' attrs['toc-position'] = 'content' attrs['toc-placement'] = position default_toc_class = nil else attrs.delete 'toc-position' default_toc_class = nil end attrs['toc-class'] ||= default_toc_class if default_toc_class end if attrs.key? 'compat-mode' attrs['source-language'] = attrs['language'] if attrs.has_key? 'language' @compat_mode = true else @compat_mode = false end # NOTE pin the outfilesuffix after the header is parsed @outfilesuffix = attrs['outfilesuffix'] @header_attributes = attrs.dup # unfreeze "flexible" attributes unless @parent_document FLEXIBLE_ATTRIBUTES.each do |name| # turning a flexible attribute off should be permanent # (we may need more config if that's not always the case) if @attribute_overrides.key?(name) && @attribute_overrides[name] @attribute_overrides.delete(name) end end end end # Internal: Restore the attributes to the previously saved state (attributes in header) def restore_attributes @callouts.rewind unless @parent_document # QUESTION shouldn't this be a dup in case we convert again? @attributes = @header_attributes end # Internal: Delete any attributes stored for playback def clear_playback_attributes(attributes) attributes.delete(:attribute_entries) end # Internal: Replay attribute assignments at the block level def playback_attributes(block_attributes) if block_attributes.key? :attribute_entries block_attributes[:attribute_entries].each do |entry| name = entry.name if entry.negate @attributes.delete name @compat_mode = false if name == 'compat-mode' else @attributes[name] = entry.value @compat_mode = true if name == 'compat-mode' end end end end # Public: Set the specified attribute on the document if the name is not locked # # If the attribute is locked, false is returned. Otherwise, the value is # assigned to the attribute name after first performing attribute # substitutions on the value. If the attribute name is 'backend', then the # value of backend-related attributes are updated. # # name - the String attribute name # value - the String attribute value # # returns true if the attribute was set, false if it was not set because it's locked def set_attribute(name, value) if attribute_locked?(name) false else if @max_attribute_value_size resolved_value = (apply_attribute_value_subs value).limit @max_attribute_value_size else resolved_value = apply_attribute_value_subs value end case name when 'backend' update_backend_attributes resolved_value, !!@attributes_modified.delete?('htmlsyntax') when 'doctype' update_doctype_attributes resolved_value else @attributes[name] = resolved_value end @attributes_modified << name true end end # Public: Delete the specified attribute from the document if the name is not locked # # If the attribute is locked, false is returned. Otherwise, the attribute is deleted. # # name - the String attribute name # # returns true if the attribute was deleted, false if it was not because it's locked def delete_attribute(name) if attribute_locked?(name) false else @attributes.delete(name) @attributes_modified << name true end end # Public: Determine if the attribute has been locked by being assigned in document options # # key - The attribute key to check # # Returns true if the attribute is locked, false otherwise def attribute_locked?(name) @attribute_overrides.key?(name) end # Internal: Apply substitutions to the attribute value # # If the value is an inline passthrough macro (e.g., pass:[value]), # apply the substitutions defined in to the value, or leave the value # unmodified if no substitutions are specified. If the value is not an # inline passthrough macro, apply header substitutions to the value. # # value - The String attribute value on which to perform substitutions # # Returns The String value with substitutions performed def apply_attribute_value_subs(value) if (m = AttributeEntryPassMacroRx.match(value)) if !m[1].empty? subs = resolve_pass_subs m[1] subs.empty? ? m[2] : (apply_subs m[2], subs) else m[2] end else apply_header_subs value end end # Public: Update the backend attributes to reflect a change in the selected backend # # This method also handles updating the related doctype attributes if the # doctype attribute is assigned at the time this method is called. def update_backend_attributes new_backend, force = false if force || (new_backend && new_backend != @attributes['backend']) attrs = @attributes current_backend = attrs['backend'] current_basebackend = attrs['basebackend'] current_doctype = attrs['doctype'] if new_backend.start_with? 'xhtml' attrs['htmlsyntax'] = 'xml' new_backend = new_backend[1..-1] elsif new_backend.start_with? 'html' attrs['htmlsyntax'] = 'html' unless attrs['htmlsyntax'] == 'xml' end if (resolved_name = BACKEND_ALIASES[new_backend]) new_backend = resolved_name end if current_backend attrs.delete %(backend-#{current_backend}) if current_doctype attrs.delete %(backend-#{current_backend}-doctype-#{current_doctype}) end end if current_doctype attrs[%(doctype-#{current_doctype})] = '' attrs[%(backend-#{new_backend}-doctype-#{current_doctype})] = '' end attrs['backend'] = new_backend attrs[%(backend-#{new_backend})] = '' # (re)initialize converter if Converter::BackendInfo === (@converter = create_converter) new_basebackend = @converter.basebackend attrs['outfilesuffix'] = @converter.outfilesuffix unless attribute_locked? 'outfilesuffix' new_filetype = @converter.filetype else new_basebackend = new_backend.sub TrailingDigitsRx, '' # QUESTION should we be forcing the basebackend to html if unknown? new_outfilesuffix = DEFAULT_EXTENSIONS[new_basebackend] || '.html' new_filetype = new_outfilesuffix[1..-1] attrs['outfilesuffix'] = new_outfilesuffix unless attribute_locked? 'outfilesuffix' end if (current_filetype = attrs['filetype']) attrs.delete %(filetype-#{current_filetype}) end attrs['filetype'] = new_filetype attrs[%(filetype-#{new_filetype})] = '' if (page_width = DEFAULT_PAGE_WIDTHS[new_basebackend]) attrs['pagewidth'] = page_width else attrs.delete 'pagewidth' end if new_basebackend != current_basebackend if current_basebackend attrs.delete %(basebackend-#{current_basebackend}) if current_doctype attrs.delete %(basebackend-#{current_basebackend}-doctype-#{current_doctype}) end end attrs['basebackend'] = new_basebackend attrs[%(basebackend-#{new_basebackend})] = '' attrs[%(basebackend-#{new_basebackend}-doctype-#{current_doctype})] = '' if current_doctype end # clear cached backend value @backend = nil end end def update_doctype_attributes new_doctype if new_doctype && new_doctype != @attributes['doctype'] attrs = @attributes current_doctype = attrs['doctype'] current_backend = attrs['backend'] current_basebackend = attrs['basebackend'] if current_doctype attrs.delete %(doctype-#{current_doctype}) attrs.delete %(backend-#{current_backend}-doctype-#{current_doctype}) if current_backend attrs.delete %(basebackend-#{current_basebackend}-doctype-#{current_doctype}) if current_basebackend end attrs['doctype'] = new_doctype attrs[%(doctype-#{new_doctype})] = '' attrs[%(backend-#{current_backend}-doctype-#{new_doctype})] = '' if current_backend attrs[%(basebackend-#{current_basebackend}-doctype-#{new_doctype})] = '' if current_basebackend # clear cached doctype value @doctype = nil end end # TODO document me def create_converter converter_opts = {} converter_opts[:htmlsyntax] = @attributes['htmlsyntax'] template_dirs = if (template_dir = @options[:template_dir]) converter_opts[:template_dirs] = [template_dir] elsif (template_dirs = @options[:template_dirs]) converter_opts[:template_dirs] = template_dirs end if template_dirs converter_opts[:template_cache] = @options.fetch :template_cache, true converter_opts[:template_engine] = @options[:template_engine] converter_opts[:template_engine_options] = @options[:template_engine_options] converter_opts[:eruby] = @options[:eruby] converter_opts[:safe] = @safe end if (converter = @options[:converter]) converter_factory = Converter::Factory.new ::Hash[backend, converter] else converter_factory = Converter::Factory.default false end # QUESTION should we honor the convert_opts? # QUESTION should we pass through all options and attributes too? #converter_opts.update opts converter_factory.create backend, converter_opts end # Public: Convert the AsciiDoc document using the templates # loaded by the Converter. If a :template_dir is not specified, # or a template is missing, the converter will fall back to # using the appropriate built-in template. #-- # QUESTION should we dup @header_attributes before converting? def convert opts = {} parse unless @parsed unless @safe >= SafeMode::SERVER || opts.empty? # QUESTION should we store these on the Document object? @attributes.delete 'outfile' unless (@attributes['outfile'] = opts['outfile']) @attributes.delete 'outdir' unless (@attributes['outdir'] = opts['outdir']) end # QUESTION should we add processors that execute before conversion begins? unless @converter fail %(asciidoctor: FAILED: missing converter for backend '#{backend}'. Processing aborted.) end if doctype == 'inline' # QUESTION should we warn if @blocks.size > 0 and the first block is not a paragraph? if (block = @blocks[0]) && block.content_model != :compound output = block.content else output = nil end else transform = ((opts.key? :header_footer) ? opts[:header_footer] : @options[:header_footer]) ? 'document' : 'embedded' output = @converter.convert self, transform end unless @parent_document if (exts = @extensions) && exts.postprocessors? exts.postprocessors.each do |ext| output = ext.process_method[self, output] end end end output end # Alias render to convert to maintain backwards compatibility alias :render :convert # Public: Write the output to the specified file # # If the converter responds to :write, delegate the work of writing the file # to that method. Otherwise, write the output the specified file. def write output, target if Writer === @converter @converter.write output, target else if target.respond_to? :write unless output.nil_or_empty? target.write output.chomp # ensure there's a trailing endline target.write EOL end else ::File.open(target, 'w') {|f| f.write output } end nil end end =begin def convert_to target, opts = {} start = ::Time.now.to_f if (monitor = opts[:monitor]) output = (r = converter opts).convert monitor[:convert] = ::Time.now.to_f - start if monitor unless target.respond_to? :write @attributes['outfile'] = target = ::File.expand_path target @attributes['outdir'] = ::File.dirname target end start = ::Time.now.to_f if monitor r.write output, target monitor[:write] = ::Time.now.to_f - start if monitor output end =end def content # NOTE per AsciiDoc-spec, remove the title before converting the body @attributes.delete('title') super end # Public: Read the docinfo file(s) for inclusion in the document template # # If the docinfo1 attribute is set, read the docinfo.ext file. If the docinfo # attribute is set, read the doc-name.docinfo.ext file. If the docinfo2 # attribute is set, read both files in that order. # # location - The Symbol location of the docinfo (e.g., :head, :footer, etc). (default: :head) # suffix - The suffix of the docinfo file(s). If not set, the extension # will be set to the outfilesuffix. (default: nil) # # returns The contents of the docinfo file(s) or empty string if no files are # found or the safe mode is secure or greater. def docinfo location = :head, suffix = nil if safe >= SafeMode::SECURE '' else qualifier = location == :head ? nil : %(-#{location}) suffix = @outfilesuffix unless suffix docinfodir = @attributes['docinfodir'] content = nil if (docinfo = @attributes['docinfo']).nil_or_empty? if @attributes.key? 'docinfo2' docinfo = ['private', 'shared'] elsif @attributes.key? 'docinfo1' docinfo = ['shared'] else docinfo = docinfo ? ['private'] : nil end else docinfo = docinfo.split(',').map(&:strip) end if docinfo docinfo_filename = %(docinfo#{qualifier}#{suffix}) unless (docinfo & ['shared', %(shared-#{location})]).empty? docinfo_path = normalize_system_path(docinfo_filename, docinfodir) # NOTE normalizing the lines is essential if we're performing substitutions if (content = read_asset(docinfo_path, :normalize => true)) if (docinfosubs ||= resolve_docinfo_subs) content = (docinfosubs == :attributes) ? sub_attributes(content) : apply_subs(content, docinfosubs) end end end unless @attributes['docname'].nil_or_empty? || (docinfo & ['private', %(private-#{location})]).empty? docinfo_path = normalize_system_path(%(#{@attributes['docname']}-#{docinfo_filename}), docinfodir) # NOTE normalizing the lines is essential if we're performing substitutions if (content2 = read_asset(docinfo_path, :normalize => true)) if (docinfosubs ||= resolve_docinfo_subs) content2 = (docinfosubs == :attributes) ? sub_attributes(content2) : apply_subs(content2, docinfosubs) end content = content ? %(#{content}#{EOL}#{content2}) : content2 end end end # TODO allow document to control whether extension docinfo is contributed if @extensions && docinfo_processors?(location) contentx = @docinfo_processor_extensions[location].map {|candidate| candidate.process_method[self] }.compact * EOL content = content ? %(#{content}#{EOL}#{contentx}) : contentx end # coerce to string (in case the value is nil) %(#{content}) end end def resolve_docinfo_subs if @attributes.key? 'docinfosubs' subs = resolve_subs @attributes['docinfosubs'], :block, nil, 'docinfo' subs.empty? ? nil : subs else :attributes end end def docinfo_processors?(location = :head) if @docinfo_processor_extensions.key?(location) # false means we already performed a lookup and didn't find any @docinfo_processor_extensions[location] != false else if @extensions && @document.extensions.docinfo_processors?(location) !!(@docinfo_processor_extensions[location] = @document.extensions.docinfo_processors(location)) else @docinfo_processor_extensions[location] = false end end end def to_s %(#<#{self.class}@#{object_id} {doctype: #{doctype.inspect}, doctitle: #{(@header != nil ? @header.title : nil).inspect}, blocks: #{@blocks.size}}>) end end end asciidoctor-1.5.5/lib/asciidoctor/extensions.rb000066400000000000000000001343241277513741400216440ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # Extensions provide a way to participate in the parsing and converting # phases of the AsciiDoc processor or extend the AsciiDoc syntax. # # The various extensions participate in AsciiDoc processing as follows: # # 1. After the source lines are normalized, {Preprocessor}s modify or replace # the source lines before parsing begins. {IncludeProcessor}s are used to # process include directives for targets which they claim to handle. # 2. The Parser parses the block-level content into an abstract syntax tree. # Custom blocks and block macros are processed by associated {BlockProcessor}s # and {BlockMacroProcessor}s, respectively. # 3. {Treeprocessor}s are run on the abstract syntax tree. # 4. Conversion of the document begins, at which point inline markup is processed # and converted. Custom inline macros are processed by associated {InlineMacroProcessor}s. # 5. {Postprocessor}s modify or replace the converted document. # 6. The output is written to the output stream. # # Extensions may be registered globally using the {Extensions.register} method # or added to a custom {Registry} instance and passed as an option to a single # Asciidoctor processor. module Extensions # Public: An abstract base class for document and syntax processors. # # This class provides access to a class-level Hash for holding default # configuration options defined using the {Processor.option} method. This # style of default configuration is specific to the native Ruby environment # and is only consulted inside the initializer. An overriding configuration # Hash can be passed to the initializer. Once the processor is initialized, # the configuration is accessed using the {Processor#config} instance variable. # # Instances of the Processor class provide convenience methods for creating # AST nodes, such as Block and Inline, and for parsing child content. class Processor class << self # Public: Get the static configuration for this processor class. # # Returns a configuration [Hash] def config @config ||= {} end # Public: Assigns a default value for the specified option that gets # applied to all instances of this processor. # # Examples # # option :contexts, [:open, :paragraph] # # Returns nothing def option key, default_value config[key] = default_value end # Include the DSL class for this processor into this processor class or instance. # # This method automatically detects whether to use the include or extend keyword # based on what is appropriate. # # NOTE Inspiration for this DSL design comes from https://corcoran.io/2013/09/04/simple-pattern-ruby-dsl/ # # Returns nothing def use_dsl if self.name.nil_or_empty? # NOTE contants(false) doesn't exist in Ruby 1.8.7 #include const_get :DSL if constants(false).grep :DSL include const_get :DSL if constants.grep :DSL else # NOTE contants(false) doesn't exist in Ruby 1.8.7 #extend const_get :DSL if constants(false).grep :DSL extend const_get :DSL if constants.grep :DSL end end alias :extend_dsl :use_dsl alias :include_dsl :use_dsl end # Public: Get the configuration Hash for this processor instance. attr_reader :config def initialize config = {} @config = self.class.config.merge config end def update_config config @config.update config end def process *args raise ::NotImplementedError end def create_block parent, context, source, attrs, opts = {} Block.new parent, context, { :source => source, :attributes => attrs }.merge(opts) end def create_image_block parent, attrs, opts = {} create_block parent, :image, nil, attrs, opts end def create_inline parent, context, text, opts = {} Inline.new parent, context, text, opts end # Public: Parses blocks in the content and attaches the block to the parent. # # Returns The parent node into which the blocks are parsed. #-- # QUESTION is parse_content the right method name? should we wrap in open block automatically? def parse_content parent, content, attributes = nil reader = Reader === content ? content : (Reader.new content) while reader.has_more_lines? block = Parser.next_block reader, parent, (attributes ? attributes.dup : {}) parent << block if block end parent end # TODO fill out remaining methods [ [:create_paragraph, :create_block, :paragraph], [:create_open_block, :create_block, :open], [:create_example_block, :create_block, :example], [:create_pass_block, :create_block, :pass], [:create_listing_block, :create_block, :listing], [:create_literal_block, :create_block, :literal], [:create_anchor, :create_inline, :anchor] ].each do |method_name, delegate_method_name, context| define_method method_name do |*args| send delegate_method_name, *args.dup.insert(1, context) end end end # Internal: Overlays a builder DSL for configuring the Processor instance. # Includes a method to define configuration options and another to define the # {Processor#process} method. module ProcessorDsl def option key, value config[key] = value end def process *args, &block # need to check for both block/proc and lambda # TODO need test for this! #if block_given? || (args.size == 1 && ::Proc === (block = args[0])) if block_given? @process_block = block elsif @process_block # NOTE Proc automatically expands a single array argument # ...but lambda doesn't (and we want to accept lambdas too) # TODO need a test for this! @process_block.call(*args) else raise ::NotImplementedError end end #alias :process_with :process def process_block_given? defined? @process_block end end # Public: Preprocessors are run after the source text is split into lines and # normalized, but before parsing begins. # # Prior to invoking the preprocessor, Asciidoctor splits the source text into # lines and normalizes them. The normalize process strips trailing whitespace # from each line and leaves behind a line-feed character (i.e., "\n"). # # Asciidoctor passes a reference to the Reader and a copy of the lines Array # to the {Processor#process} method of an instance of each registered # Preprocessor. The Preprocessor modifies the Array as necessary and either # returns a reference to the same Reader or a reference to a new Reader. # # Preprocessor implementations must extend the Preprocessor class. class Preprocessor < Processor def process document, reader raise ::NotImplementedError end end Preprocessor::DSL = ProcessorDsl # Public: Treeprocessors are run on the Document after the source has been # parsed into an abstract syntax tree (AST), as represented by the Document # object and its child Node objects (e.g., Section, Block, List, ListItem). # # Asciidoctor invokes the {Processor#process} method on an instance of each # registered Treeprocessor. # # Treeprocessor implementations must extend Treeprocessor. #-- # QUESTION should the treeprocessor get invoked after parse header too? class Treeprocessor < Processor def process document raise ::NotImplementedError end end Treeprocessor::DSL = ProcessorDsl # Public: Postprocessors are run after the document is converted, but before # it is written to the output stream. # # Asciidoctor passes a reference to the converted String to the {Processor#process} # method of each registered Postprocessor. The Preprocessor modifies the # String as necessary and returns the String replacement. # # The markup format in the String is determined by the backend used to convert # the Document. The backend and be looked up using the backend method on the # Document object, as well as various backend-related document attributes. # # TIP: Postprocessors can also be used to relocate assets needed by the published # document. # # Postprocessor implementations must Postprocessor. class Postprocessor < Processor def process document, output raise ::NotImplementedError end end Postprocessor::DSL = ProcessorDsl # Public: IncludeProcessors are used to process `include::[]` # directives in the source document. # # When Asciidoctor comes across a `include::[]` directive in the # source document, it iterates through the IncludeProcessors and delegates # the work of reading the content to the first processor that identifies # itself as capable of handling that target. # # IncludeProcessor implementations must extend IncludeProcessor. #-- # TODO add file extension or regexp to shortcut handles? class IncludeProcessor < Processor def process document, reader, target, attributes raise ::NotImplementedError end def handles? target true end end IncludeProcessor::DSL = ProcessorDsl # Public: DocinfoProcessors are used to add additional content to # the header and/or footer of the generated document. # # The placement of docinfo content is controlled by the converter. # # DocinfoProcessors implementations must extend DocinfoProcessor. # If a location is not specified, the DocinfoProcessor is assumed # to add content to the header. class DocinfoProcessor < Processor attr_accessor :location def initialize config = {} super config @config[:location] ||= :head end def process document raise ::NotImplementedError end end module DocinfoProcessorDsl include ProcessorDsl def at_location value option :location, value end end DocinfoProcessor::DSL = DocinfoProcessorDsl # Public: BlockProcessors are used to handle delimited blocks and paragraphs # that have a custom name. # # When Asciidoctor encounters a delimited block or paragraph with an # unrecognized name while parsing the document, it looks for a BlockProcessor # registered to handle this name and, if found, invokes its {Processor#process} # method to build a cooresponding node in the document tree. # # AsciiDoc example: # # [shout] # Get a move on. # # Recognized options: # # * :named - The name of the block (required: true) # * :contexts - The blocks contexts on which this style can be used (default: [:paragraph, :open] # * :content_model - The structure of the content supported in this block (default: :compound) # * :positional_attributes - A list of attribute names used to map positional attributes (default: nil) # * ... # # BlockProcessor implementations must extend BlockProcessor. class BlockProcessor < Processor attr_accessor :name def initialize name = nil, config = {} super config @name = name || @config[:name] # assign fallbacks case @config[:contexts] when ::NilClass @config[:contexts] ||= [:open, :paragraph].to_set when ::Symbol @config[:contexts] = [@config[:contexts]].to_set else @config[:contexts] = @config[:contexts].to_set end # QUESTION should the default content model be raw?? @config[:content_model] ||= :compound end def process parent, reader, attributes raise ::NotImplementedError end end module BlockProcessorDsl include ProcessorDsl # FIXME this isn't the prettiest thing def named value if Processor === self @name = value else option :name, value end end alias :match_name :named alias :bind_to :named def contexts *value option :contexts, value.flatten end alias :on_contexts :contexts alias :on_context :contexts def content_model value option :content_model, value end alias :parse_content_as :content_model def positional_attributes *value option :pos_attrs, value.flatten end alias :pos_attrs :positional_attributes alias :name_attributes :positional_attributes alias :name_positional_attributes :positional_attributes def default_attrs value option :default_attrs, value end alias :seed_attributes_with :default_attrs end BlockProcessor::DSL = BlockProcessorDsl class MacroProcessor < Processor attr_accessor :name def initialize name = nil, config = {} super config @name = name || @config[:name] @config[:content_model] ||= :attributes end def process parent, target, attributes raise ::NotImplementedError end end module MacroProcessorDsl include ProcessorDsl # QUESTION perhaps include a SyntaxDsl? def named value if Processor === self @name = value else option :name, value end end alias :match_name :named alias :bind_to :named def content_model value option :content_model, value end alias :parse_content_as :content_model def positional_attributes *value option :pos_attrs, value.flatten end alias :pos_attrs :positional_attributes alias :name_attributes :positional_attributes alias :name_positional_attributes :positional_attributes def default_attrs value option :default_attrs, value end alias :seed_attributes_with :default_attrs end # Public: BlockMacroProcessors are used to handle block macros that have a # custom name. # # BlockMacroProcessor implementations must extend BlockMacroProcessor. class BlockMacroProcessor < MacroProcessor end BlockMacroProcessor::DSL = MacroProcessorDsl # Public: InlineMacroProcessors are used to handle block macros that have a # custom name. # # InlineMacroProcessor implementations must extend InlineMacroProcessor. #-- # TODO break this out into different pattern types # for example, FormalInlineMacro, ShortInlineMacro (no target) and other patterns # FIXME for inline passthrough, we need to have some way to specify the text as a passthrough class InlineMacroProcessor < MacroProcessor # Lookup the regexp option, resolving it first if necessary. # Once this method is called, the regexp is considered frozen. def regexp @config[:regexp] ||= (resolve_regexp @name, @config[:format]) end def resolve_regexp name, format # TODO memoize these regular expressions! if format == :short %r(\\?#{name}:\[((?:\\\]|[^\]])*?)\]) else %r(\\?#{name}:(\S+?)\[((?:\\\]|[^\]])*?)\]) end end end module InlineMacroProcessorDsl include MacroProcessorDsl def using_format value option :format, value end def match value option :regexp, value end end InlineMacroProcessor::DSL = InlineMacroProcessorDsl # Public: Extension is a proxy object for an extension implementation such as # a processor. It allows the preparation of the extension instance to be # separated from its usage to provide consistency between different # interfaces and avoid tight coupling with the extension type. # # The proxy encapsulates the extension kind (e.g., :block), its config Hash # and the extension instance. This Proxy is what gets stored in the extension # registry when activated. #-- # QUESTION call this ExtensionInfo? class Extension attr :kind attr :config attr :instance def initialize kind, instance, config @kind = kind @instance = instance @config = config end end # Public: A specialization of the Extension proxy that additionally stores a # reference to the {Processor#process} method. By storing this reference, its # possible to accomodate both concrete extension implementations and Procs. class ProcessorExtension < Extension attr :process_method def initialize kind, instance, process_method = nil super kind, instance, instance.config @process_method = process_method || (instance.method :process) end end # Public: A Group is used to register one or more extensions with the Registry. # # The Group should be subclassed and registered with the Registry either by # invoking the {Group.register} method or passing the subclass to the # {Extensions.register} method. Extensions are registered with the Registry # inside the {Group#activate} method. class Group class << self def register name = nil Extensions.register name, self end end def activate registry raise ::NotImplementedError end end # Public: The primary entry point into the extension system. # # Registry holds the extensions which have been registered and activated, has # methods for registering or defining a processor and looks up extensions # stored in the registry during parsing. class Registry # Public: Returns the {Asciidoctor::Document} on which the extensions in this registry are being used. attr_reader :document # Public: Returns the Array of {Group} classes, instances and/or Procs that have been registered. attr_reader :groups def initialize groups = {} @groups = groups @preprocessor_extensions = @treeprocessor_extensions = @postprocessor_extensions = @include_processor_extensions = @docinfo_processor_extensions =nil @block_extensions = @block_macro_extensions = @inline_macro_extensions = nil @document = nil end # Public: Activates all the global extension {Group}s and the extension {Group}s # associated with this registry. # # document - the {Asciidoctor::Document} on which the extensions are to be used. # # Returns the instance of this [Registry]. def activate document @document = document (Extensions.groups.values + @groups.values).each do |group| case group when ::Proc case group.arity when 0, -1 instance_exec(&group) when 1 group.call self end when ::Class group.new.activate self else group.activate self end end self end # Public: Registers a {Preprocessor} with the extension registry to process # the AsciiDoc source before parsing begins. # # The Preprocessor may be one of four types: # # * A Preprocessor subclass # * An instance of a Preprocessor subclass # * The String name of a Preprocessor subclass # * A method block (i.e., Proc) that conforms to the Preprocessor contract # # Unless the Preprocessor is passed as the method block, it must be the # first argument to this method. # # Examples # # # as a Preprocessor subclass # preprocessor FrontMatterPreprocessor # # # as an instance of a Preprocessor subclass # preprocessor FrontMatterPreprocessor.new # # # as a name of a Preprocessor subclass # preprocessor 'FrontMatterPreprocessor' # # # as a method block # preprocessor do # process |reader, lines| # ... # end # end # # Returns the [Extension] stored in the registry that proxies the # instance of this Preprocessor. def preprocessor *args, &block add_document_processor :preprocessor, args, &block end # Public: Checks whether any {Preprocessor} extensions have been registered. # # Returns a [Boolean] indicating whether any Preprocessor extensions are registered. def preprocessors? !!@preprocessor_extensions end # Public: Retrieves the {Extension} proxy objects for all # Preprocessor instances in this registry. # # Returns an [Array] of Extension proxy objects. def preprocessors @preprocessor_extensions end # Public: Registers a {Treeprocessor} with the extension registry to process # the AsciiDoc source after parsing is complete. # # The Treeprocessor may be one of four types: # # * A Treeprocessor subclass # * An instance of a Treeprocessor subclass # * The String name of a Treeprocessor subclass # * A method block (i.e., Proc) that conforms to the Treeprocessor contract # # Unless the Treeprocessor is passed as the method block, it must be the # first argument to this method. # # Examples # # # as a Treeprocessor subclass # treeprocessor ShellTreeprocessor # # # as an instance of a Treeprocessor subclass # treeprocessor ShellTreeprocessor.new # # # as a name of a Treeprocessor subclass # treeprocessor 'ShellTreeprocessor' # # # as a method block # treeprocessor do # process |document| # ... # end # end # # Returns the [Extension] stored in the registry that proxies the # instance of this Treeprocessor. def treeprocessor *args, &block add_document_processor :treeprocessor, args, &block end # Public: Checks whether any {Treeprocessor} extensions have been registered. # # Returns a [Boolean] indicating whether any Treeprocessor extensions are registered. def treeprocessors? !!@treeprocessor_extensions end # Public: Retrieves the {Extension} proxy objects for all # Treeprocessor instances in this registry. # # Returns an [Array] of Extension proxy objects. def treeprocessors @treeprocessor_extensions end # Public: Registers a {Postprocessor} with the extension registry to process # the output after conversion is complete. # # The Postprocessor may be one of four types: # # * A Postprocessor subclass # * An instance of a Postprocessor subclass # * The String name of a Postprocessor subclass # * A method block (i.e., Proc) that conforms to the Postprocessor contract # # Unless the Postprocessor is passed as the method block, it must be the # first argument to this method. # # Examples # # # as a Postprocessor subclass # postprocessor AnalyticsPostprocessor # # # as an instance of a Postprocessor subclass # postprocessor AnalyticsPostprocessor.new # # # as a name of a Postprocessor subclass # postprocessor 'AnalyticsPostprocessor' # # # as a method block # postprocessor do # process |document, output| # ... # end # end # # Returns the [Extension] stored in the registry that proxies the # instance of this Postprocessor. def postprocessor *args, &block add_document_processor :postprocessor, args, &block end # Public: Checks whether any {Postprocessor} extensions have been registered. # # Returns a [Boolean] indicating whether any Postprocessor extensions are registered. def postprocessors? !!@postprocessor_extensions end # Public: Retrieves the {Extension} proxy objects for all # Postprocessor instances in this registry. # # Returns an [Array] of Extension proxy objects. def postprocessors @postprocessor_extensions end # Public: Registers an {IncludeProcessor} with the extension registry to have # a shot at handling the include directive. # # The IncludeProcessor may be one of four types: # # * A IncludeProcessor subclass # * An instance of a IncludeProcessor subclass # * The String name of a IncludeProcessor subclass # * A method block (i.e., Proc) that conforms to the IncludeProcessor contract # # Unless the IncludeProcessor is passed as the method block, it must be the # first argument to this method. # # Examples # # # as an IncludeProcessor subclass # include_processor GitIncludeProcessor # # # as an instance of a Postprocessor subclass # include_processor GitIncludeProcessor.new # # # as a name of a Postprocessor subclass # include_processor 'GitIncludeProcessor' # # # as a method block # include_processor do # process |document, output| # ... # end # end # # Returns the [Extension] stored in the registry that proxies the # instance of this IncludeProcessor. def include_processor *args, &block add_document_processor :include_processor, args, &block end # Public: Checks whether any {IncludeProcessor} extensions have been registered. # # Returns a [Boolean] indicating whether any IncludeProcessor extensions are registered. def include_processors? !!@include_processor_extensions end # Public: Retrieves the {Extension} proxy objects for all the # IncludeProcessor instances stored in this registry. # # Returns an [Array] of Extension proxy objects. def include_processors @include_processor_extensions end # Public: Registers an {DocinfoProcessor} with the extension registry to # add additionnal docinfo to the document. # # The DocinfoProcessor may be one of four types: # # * A DocinfoProcessor subclass # * An instance of a DocinfoProcessor subclass # * The String name of a DocinfoProcessor subclass # * A method block (i.e., Proc) that conforms to the DocinfoProcessor contract # # Unless the DocinfoProcessor is passed as the method block, it must be the # first argument to this method. # # Examples # # # as an DocinfoProcessor subclass # docinfo_processor MetaRobotsDocinfoProcessor # # # as an instance of a DocinfoProcessor subclass with an explicit location # docinfo_processor JQueryDocinfoProcessor.new, :location => :footer # # # as a name of a DocinfoProcessor subclass # docinfo_processor 'MetaRobotsDocinfoProcessor' # # # as a method block # docinfo_processor do # process |doc| # at_location :footer # 'footer content' # end # end # # Returns the [Extension] stored in the registry that proxies the # instance of this DocinfoProcessor. def docinfo_processor *args, &block add_document_processor :docinfo_processor, args, &block end # Public: Checks whether any {DocinfoProcessor} extensions have been registered. # # location - A Symbol for selecting docinfo extensions at a given location (:head or :footer) (default: nil) # # Returns a [Boolean] indicating whether any DocinfoProcessor extensions are registered. def docinfo_processors? location = nil if @docinfo_processor_extensions if location @docinfo_processor_extensions.any? {|ext| ext.config[:location] == location } else true end else false end end # Public: Retrieves the {Extension} proxy objects for all the # DocinfoProcessor instances stored in this registry. # # location - A Symbol for selecting docinfo extensions at a given location (:head or :footer) (default: nil) # # Returns an [Array] of Extension proxy objects. def docinfo_processors location = nil if @docinfo_processor_extensions if location @docinfo_processor_extensions.select {|ext| ext.config[:location] == location } else @docinfo_processor_extensions end else nil end end # Public: Registers a {BlockProcessor} with the extension registry to # process the block content (i.e., delimited block or paragraph) in the # AsciiDoc source annotated with the specified block name (i.e., style). # # The BlockProcessor may be one of four types: # # * A BlockProcessor subclass # * An instance of a BlockProcessor subclass # * The String name of a BlockProcessor subclass # * A method block (i.e., Proc) that conforms to the BlockProcessor contract # # Unless the BlockProcessor is passed as the method block, it must be the # first argument to this method. The second argument is the name (coersed # to a Symbol) of the AsciiDoc block content (i.e., delimited block or # paragraph) that this processor is registered to handle. If a block name # is not passed as an argument, it gets read from the name property of the # BlockProcessor instance. If a name still cannot be determined, an error # is raised. # # Examples # # # as a BlockProcessor subclass # block ShoutBlock # # # as a BlockProcessor subclass with an explicit block name # block ShoutBlock, :shout # # # as an instance of a BlockProcessor subclass # block ShoutBlock.new # # # as an instance of a BlockProcessor subclass with an explicit block name # block ShoutBlock.new, :shout # # # as a name of a BlockProcessor subclass # block 'ShoutBlock' # # # as a name of a BlockProcessor subclass with an explicit block name # block 'ShoutBlock', :shout # # # as a method block # block do # named :shout # process |parent, reader, attrs| # ... # end # end # # # as a method block with an explicit block name # block :shout do # process |parent, reader, attrs| # ... # end # end # # Returns an instance of the [Extension] proxy object that is stored in the # registry and manages the instance of this BlockProcessor. def block *args, &block add_syntax_processor :block, args, &block end # Public: Checks whether any {BlockProcessor} extensions have been registered. # # Returns a [Boolean] indicating whether any BlockProcessor extensions are registered. def blocks? !!@block_extensions end # Public: Checks whether any {BlockProcessor} extensions are registered to # handle the specified block name appearing on the specified context. # # Returns the [Extension] proxy object for the BlockProcessor that matches # the block name and context or false if no match is found. def registered_for_block? name, context if (ext = @block_extensions[name.to_sym]) (ext.config[:contexts].include? context) ? ext : false else false end end # Public: Retrieves the {Extension} proxy object for the BlockProcessor registered # to handle block content with the name. # # name - the String or Symbol (coersed to a Symbol) macro name # # Returns the [Extension] object stored in the registry that proxies the # corresponding BlockProcessor or nil if a match is not found. def find_block_extension name @block_extensions[name.to_sym] end # Public: Registers a {BlockMacroProcessor} with the extension registry to # process a block macro with the specified name. # # The BlockMacroProcessor may be one of four types: # # * A BlockMacroProcessor subclass # * An instance of a BlockMacroProcessor subclass # * The String name of a BlockMacroProcessor subclass # * A method block (i.e., Proc) that conforms to the BlockMacroProcessor contract # # Unless the BlockMacroProcessor is passed as the method block, it must be # the first argument to this method. The second argument is the name # (coersed to a Symbol) of the AsciiDoc block macro that this processor is # registered to handle. If a block macro name is not passed as an argument, # it gets read from the name property of the BlockMacroProcessor instance. # If a name still cannot be determined, an error is raised. # # Examples # # # as a BlockMacroProcessor subclass # block_macro GistBlockMacro # # # as a BlockMacroProcessor subclass with an explicit macro name # block_macro GistBlockMacro, :gist # # # as an instance of a BlockMacroProcessor subclass # block_macro GistBlockMacro.new # # # as an instance of a BlockMacroProcessor subclass with an explicit macro name # block_macro GistBlockMacro.new, :gist # # # as a name of a BlockMacroProcessor subclass # block_macro 'GistBlockMacro' # # # as a name of a BlockMacroProcessor subclass with an explicit macro name # block_macro 'GistBlockMacro', :gist # # # as a method block # block_macro do # named :gist # process |parent, target, attrs| # ... # end # end # # # as a method block with an explicit macro name # block_macro :gist do # process |parent, target, attrs| # ... # end # end # # Returns an instance of the [Extension] proxy object that is stored in the # registry and manages the instance of this BlockMacroProcessor. def block_macro *args, &block add_syntax_processor :block_macro, args, &block end # Public: Checks whether any {BlockMacroProcessor} extensions have been registered. # # Returns a [Boolean] indicating whether any BlockMacroProcessor extensions are registered. def block_macros? !!@block_macro_extensions end # Public: Checks whether any {BlockMacroProcessor} extensions are registered to # handle the block macro with the specified name. # # name - the String or Symbol (coersed to a Symbol) macro name # # Returns the [Extension] proxy object for the BlockMacroProcessor that matches # the macro name or false if no match is found. #-- # TODO only allow blank target if format is :short def registered_for_block_macro? name (ext = @block_macro_extensions[name.to_sym]) ? ext : false end # Public: Retrieves the {Extension} proxy object for the BlockMacroProcessor registered # to handle a block macro with the specified name. # # name - the String or Symbol (coersed to a Symbol) macro name # # Returns the [Extension] object stored in the registry that proxies the # cooresponding BlockMacroProcessor or nil if a match is not found. def find_block_macro_extension name @block_macro_extensions[name.to_sym] end # Public: Registers a {InlineMacroProcessor} with the extension registry to # process an inline macro with the specified name. # # The InlineMacroProcessor may be one of four types: # # * An InlineMacroProcessor subclass # * An instance of an InlineMacroProcessor subclass # * The String name of an InlineMacroProcessor subclass # * A method block (i.e., Proc) that conforms to the InlineMacroProcessor contract # # Unless the InlineMacroProcessor is passed as the method block, it must be # the first argument to this method. The second argument is the name # (coersed to a Symbol) of the AsciiDoc block macro that this processor is # registered to handle. If a block macro name is not passed as an argument, # it gets read from the name property of the InlineMacroProcessor instance. # If a name still cannot be determined, an error is raised. # # Examples # # # as an InlineMacroProcessor subclass # inline_macro ChromeInlineMacro # # # as an InlineMacroProcessor subclass with an explicit macro name # inline_macro ChromeInineMacro, :chrome # # # as an instance of an InlineMacroProcessor subclass # inline_macro ChromeInlineMacro.new # # # as an instance of an InlineMacroProcessor subclass with an explicit macro name # inline_macro ChromeInlineMacro.new, :chrome # # # as a name of an InlineMacroProcessor subclass # inline_macro 'ChromeInlineMacro' # # # as a name of an InlineMacroProcessor subclass with an explicit macro name # inline_macro 'ChromeInineMacro', :chrome # # # as a method block # inline_macro do # named :chrome # process |parent, target, attrs| # ... # end # end # # # as a method block with an explicit macro name # inline_macro :chrome do # process |parent, target, attrs| # ... # end # end # # Returns an instance of the [Extension] proxy object that is stored in the # registry and manages the instance of this InlineMacroProcessor. def inline_macro *args, &block add_syntax_processor :inline_macro, args, &block end # Public: Checks whether any {InlineMacroProcessor} extensions have been registered. # # Returns a [Boolean] indicating whether any IncludeMacroProcessor extensions are registered. def inline_macros? !!@inline_macro_extensions end # Public: Checks whether any {InlineMacroProcessor} extensions are registered to # handle the inline macro with the specified name. # # name - the String or Symbol (coersed to a Symbol) macro name # # Returns the [Extension] proxy object for the InlineMacroProcessor that matches # the macro name or false if no match is found. def registered_for_inline_macro? name (ext = @inline_macro_extensions[name.to_sym]) ? ext : false end # Public: Retrieves the {Extension} proxy object for the InlineMacroProcessor registered # to handle an inline macro with the specified name. # # name - the String or Symbol (coersed to a Symbol) macro name # # Returns the [Extension] object stored in the registry that proxies the # cooresponding InlineMacroProcessor or nil if a match is not found. def find_inline_macro_extension name @inline_macro_extensions[name.to_sym] end # Public: Retrieves the {Extension} proxy objects for all # InlineMacroProcessor instances in this registry. # # Returns an [Array] of Extension proxy objects. def inline_macros @inline_macro_extensions.values end private def add_document_processor kind, args, &block kind_name = kind.to_s.tr '_', ' ' kind_class_symbol = kind_name.split(' ').map {|word| %(#{word.chr.upcase}#{word[1..-1]}) }.join.to_sym kind_class = Extensions.const_get kind_class_symbol kind_java_class = (defined? ::AsciidoctorJ) ? (::AsciidoctorJ::Extensions.const_get kind_class_symbol) : nil kind_store = instance_variable_get(%(@#{kind}_extensions).to_sym) || instance_variable_set(%(@#{kind}_extensions).to_sym, []) # style 1: specified as block extension = if block_given? config = resolve_args args, 1 # TODO if block arity is 0, assume block is process method processor = kind_class.new config # NOTE class << processor idiom doesn't work in Opal #class << processor # include_dsl #end # NOTE kind_class.contants(false) doesn't exist in Ruby 1.8.7 processor.extend kind_class.const_get :DSL if kind_class.constants.grep :DSL processor.instance_exec(&block) processor.freeze unless processor.process_block_given? raise ::ArgumentError.new %(No block specified to process #{kind_name} extension at #{block.source_location}) end ProcessorExtension.new kind, processor else processor, config = resolve_args args, 2 # style 2: specified as class or class name if ::Class === processor || (::String === processor && (processor = Extensions.class_for_name processor)) unless processor < kind_class || (kind_java_class && processor < kind_java_class) raise ::ArgumentError.new %(Invalid type for #{kind_name} extension: #{processor}) end processor_instance = processor.new config processor_instance.freeze ProcessorExtension.new kind, processor_instance # style 3: specified as instance elsif kind_class === processor || (kind_java_class && kind_java_class === processor) processor.update_config config processor.freeze ProcessorExtension.new kind, processor else raise ::ArgumentError.new %(Invalid arguments specified for registering #{kind_name} extension: #{args}) end end if extension.config[:position] == :>> kind_store.unshift extension else kind_store << extension end end def add_syntax_processor kind, args, &block kind_name = kind.to_s.tr '_', ' ' kind_class_basename = kind_name.split(' ').map {|word| %(#{word.chr.upcase}#{word[1..-1]}) }.join kind_class_symbol = %(#{kind_class_basename}Processor).to_sym kind_class = Extensions.const_get kind_class_symbol kind_java_class = (defined? ::AsciidoctorJ) ? (::AsciidoctorJ::Extensions.const_get kind_class_symbol) : nil kind_store = instance_variable_get(%(@#{kind}_extensions).to_sym) || instance_variable_set(%(@#{kind}_extensions).to_sym, {}) # style 1: specified as block if block_given? name, config = resolve_args args, 2 processor = kind_class.new as_symbol(name), config # NOTE class << processor idiom doesn't work in Opal #class << processor # include_dsl #end # NOTE kind_class.contants(false) doesn't exist in Ruby 1.8.7 processor.extend kind_class.const_get :DSL if kind_class.constants.grep :DSL if block.arity == 1 yield processor else processor.instance_exec(&block) end unless (name = as_symbol processor.name) raise ::ArgumentError.new %(No name specified for #{kind_name} extension at #{block.source_location}) end unless processor.process_block_given? raise ::NoMethodError.new %(No block specified to process #{kind_name} extension at #{block.source_location}) end processor.freeze kind_store[name] = ProcessorExtension.new kind, processor else processor, name, config = resolve_args args, 3 # style 2: specified as class or class name if ::Class === processor || (::String === processor && (processor = Extensions.class_for_name processor)) unless processor < kind_class || (kind_java_class && processor < kind_java_class) raise ::ArgumentError.new %(Class specified for #{kind_name} extension does not inherit from #{kind_class}: #{processor}) end processor_instance = processor.new as_symbol(name), config unless (name = as_symbol processor_instance.name) raise ::ArgumentError.new %(No name specified for #{kind_name} extension: #{processor}) end processor.freeze kind_store[name] = ProcessorExtension.new kind, processor_instance # style 3: specified as instance elsif kind_class === processor || (kind_java_class && kind_java_class === processor) processor.update_config config # TODO need a test for this override! unless (name = name ? (processor.name = as_symbol name) : (as_symbol processor.name)) raise ::ArgumentError.new %(No name specified for #{kind_name} extension: #{processor}) end processor.freeze kind_store[name] = ProcessorExtension.new kind, processor else raise ::ArgumentError.new %(Invalid arguments specified for registering #{kind_name} extension: #{args}) end end end def resolve_args args, expect opts = ::Hash === args[-1] ? args.pop : {} return opts if expect == 1 num_args = args.size if (missing = expect - 1 - num_args) > 0 args.fill nil, num_args, missing elsif missing < 0 args.pop(-missing) end args << opts args end def as_symbol name name ? name.to_sym : nil end end class << self def generate_name %(extgrp#{next_auto_id}) end def next_auto_id @auto_id ||= -1 @auto_id += 1 end def groups @groups ||= {} end def build_registry name = nil, &block if block_given? name ||= generate_name Registry.new({ name => block }) else Registry.new end end # Public: Registers an extension Group that subsequently registers a # collection of extensions. # # Registers the extension Group specified under the given name. If a name is # not given, one is calculated by appending the next value in a 0-based # index to the string "extgrp". For instance, the first unnamed extension # group to be registered is assigned the name "extgrp0" if a name is not # specified. # # The names are not yet used, but are intended for selectively activating # extensions in the future. # # If the extension group argument is a String or a Symbol, it gets resolved # to a Class before being registered. # # name - The name under which this extension group is registered (optional, default: nil) # group - A block (Proc), a Class, a String or Symbol name of a Class or # an Object instance of a Class. # # Examples # # Asciidoctor::Extensions.register UmlExtensions # # Asciidoctor::Extensions.register :uml, UmlExtensions # # Asciidoctor::Extensions.register do # block_processor :plantuml, PlantUmlBlock # end # # Asciidoctor::Extensions.register :uml do # block_processor :plantuml, PlantUmlBlock # end # # Returns the [Proc, Class or Object] instance, matching the type passed to this method. def register *args, &block argc = args.length resolved_group = if block_given? block elsif !(group = args.pop) raise ::ArgumentError.new %(Extension group to register not specified) else # QUESTION should we instantiate the group class here or defer until # activation?? case group when ::Class group when ::String class_for_name group when ::Symbol class_for_name group.to_s else group end end name = args.pop || generate_name unless args.empty? raise ::ArgumentError.new %(Wrong number of arguments (#{argc} for 1..2)) end groups[name] = resolved_group end def unregister_all @groups = {} end # unused atm, but tested def resolve_class object ::Class === object ? object : (class_for_name object.to_s) end # Public: Resolves the Class object for the qualified name. # # Returns Class def class_for_name qualified_name resolved_class = ::Object qualified_name.split('::').each do |name| if name.empty? # do nothing elsif resolved_class.const_defined? name resolved_class = resolved_class.const_get name else raise %(Could not resolve class for name: #{qualified_name}) end end resolved_class end end end end asciidoctor-1.5.5/lib/asciidoctor/helpers.rb000066400000000000000000000173351277513741400211110ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor module Helpers # Internal: Require the specified library using Kernel#require. # # Attempts to load the library specified in the first argument using the # Kernel#require. Rescues the LoadError if the library is not available and # passes a message to Kernel#fail if on_failure is :abort or Kernel#warn if # on_failure is :warn to communicate to the user that processing is being # aborted or functionality is disabled, respectively. If a gem_name is # specified, the message communicates that a required gem is not installed. # # name - the String name of the library to require. # gem_name - a Boolean that indicates whether this library is provided by a RubyGem, # or the String name of the RubyGem if it differs from the library name # (default: true) # on_failure - a Symbol that indicates how to handle a load failure (:abort, :warn, :ignore) (default: :abort) # # returns The return value of Kernel#require if the library is available and can be, or was previously, loaded. # Otherwise, Kernel#fail is called with an appropriate message if on_failure is :abort. # Otherwise, Kernel#warn is called with an appropriate message and nil returned if on_failure is :warn. # Otherwise, nil is returned. def self.require_library name, gem_name = true, on_failure = :abort require name rescue ::LoadError => e if gem_name gem_name = name if gem_name == true case on_failure when :abort fail %(asciidoctor: FAILED: required gem '#{gem_name}' is not installed. Processing aborted.) when :warn warn %(asciidoctor: WARNING: optional gem '#{gem_name}' is not installed. Functionality disabled.) end else case on_failure when :abort fail %(asciidoctor: FAILED: #{e.message.chomp '.'}. Processing aborted.) when :warn warn %(asciidoctor: WARNING: #{e.message.chomp '.'}. Functionality disabled.) end end end # Public: Normalize the data to prepare for parsing # # Delegates to Helpers#normalize_lines_from_string if data is a String. # Delegates to Helpers#normalize_lines_array if data is a String Array. # # returns a String Array of normalized lines def self.normalize_lines data data.class == ::String ? (normalize_lines_from_string data) : (normalize_lines_array data) end # Public: Normalize the array of lines to prepare them for parsing # # Force encodes the data to UTF-8 and removes trailing whitespace from each line. # # If a BOM is present at the beginning of the data, a best attempt # is made to encode from the specified encoding to UTF-8. # # data - a String Array of lines to normalize # # returns a String Array of normalized lines def self.normalize_lines_array data return [] if data.empty? # NOTE if data encoding is UTF-*, we only need 0..1 leading_bytes = (first_line = data[0])[0..2].bytes.to_a if COERCE_ENCODING utf8 = ::Encoding::UTF_8 if (leading_2_bytes = leading_bytes[0..1]) == BOM_BYTES_UTF_16LE # Ruby messes up trailing whitespace on UTF-16LE, so take a different route return ((data.join.force_encoding ::Encoding::UTF_16LE)[1..-1].encode utf8).lines.map {|line| line.rstrip } elsif leading_2_bytes == BOM_BYTES_UTF_16BE data[0] = (first_line.force_encoding ::Encoding::UTF_16BE)[1..-1] return data.map {|line| "#{((line.force_encoding ::Encoding::UTF_16BE).encode utf8).rstrip}" } elsif leading_bytes[0..2] == BOM_BYTES_UTF_8 data[0] = (first_line.force_encoding utf8)[1..-1] end data.map {|line| line.encoding == utf8 ? line.rstrip : (line.force_encoding utf8).rstrip } else # Ruby 1.8 has no built-in re-encoding, so no point in removing the UTF-16 BOMs if leading_bytes == BOM_BYTES_UTF_8 data[0] = first_line[3..-1] end data.map {|line| line.rstrip } end end # Public: Normalize the String and split into lines to prepare them for parsing # # Force encodes the data to UTF-8 and removes trailing whitespace from each line. # Converts the data to a String Array. # # If a BOM is present at the beginning of the data, a best attempt # is made to encode from the specified encoding to UTF-8. # # data - a String of lines to normalize # # returns a String Array of normalized lines def self.normalize_lines_from_string data return [] if data.nil_or_empty? if COERCE_ENCODING utf8 = ::Encoding::UTF_8 # NOTE if data encoding is UTF-*, we only need 0..1 leading_bytes = data[0..2].bytes.to_a if (leading_2_bytes = leading_bytes[0..1]) == BOM_BYTES_UTF_16LE data = (data.force_encoding ::Encoding::UTF_16LE)[1..-1].encode utf8 elsif leading_2_bytes == BOM_BYTES_UTF_16BE data = (data.force_encoding ::Encoding::UTF_16BE)[1..-1].encode utf8 elsif leading_bytes[0..2] == BOM_BYTES_UTF_8 data = data.encoding == utf8 ? data[1..-1] : (data.force_encoding utf8)[1..-1] else data = data.force_encoding utf8 unless data.encoding == utf8 end else # Ruby 1.8 has no built-in re-encoding, so no point in removing the UTF-16 BOMs if data[0..2].bytes.to_a == BOM_BYTES_UTF_8 data = data[3..-1] end end data.each_line.map {|line| line.rstrip } end # Public: Efficiently checks whether the specified String resembles a URI # # Uses the Asciidoctor::UriSniffRx regex to check whether the String begins # with a URI prefix (e.g., http://). No validation of the URI is performed. # # str - the String to check # # returns true if the String is a URI, false if it is not def self.uriish? str (str.include? ':') && str =~ UriSniffRx end # Public: Efficiently retrieves the URI prefix of the specified String # # Uses the Asciidoctor::UriSniffRx regex to match the URI prefix in the # specified String (e.g., http://), if present. # # str - the String to check # # returns the string URI prefix if the string is a URI, otherwise nil def self.uri_prefix str (str.include? ':') && str =~ UriSniffRx ? $& : nil end # Matches the characters in a URI to encode REGEXP_ENCODE_URI_CHARS = /[^\w\-.!~*';:@=+$,()\[\]]/ # Public: Encode a string for inclusion in a URI # # str - the string to encode # # returns an encoded version of the str def self.encode_uri(str) str.gsub(REGEXP_ENCODE_URI_CHARS) do $&.each_byte.map {|c| sprintf '%%%02X', c}.join end end # Public: Removes the file extension from filename and returns the result # # file_name - The String file name to process # # Examples # # Helpers.rootname('part1/chapter1.adoc') # # => "part1/chapter1" # # Returns the String filename with the file extension removed def self.rootname(file_name) (ext = ::File.extname(file_name)).empty? ? file_name : file_name[0...-ext.length] end # Public: Retrieves the basename of the filename, optionally removing the extension, if present # # file_name - The String file name to process # drop_extname - A Boolean flag indicating whether to drop the extension (default: false) # # Examples # # Helpers.basename('images/tiger.png', true) # # => "tiger" # # Returns the String filename with leading directories removed and, if specified, the extension removed def self.basename(file_name, drop_extname = false) if drop_extname ::File.basename file_name, (::File.extname file_name) else ::File.basename file_name end end def self.mkdir_p(dir) unless ::File.directory? dir parent_dir = ::File.dirname(dir) if !::File.directory?(parent_dir = ::File.dirname(dir)) && parent_dir != '.' mkdir_p(parent_dir) end ::Dir.mkdir(dir) end end end end asciidoctor-1.5.5/lib/asciidoctor/inline.rb000066400000000000000000000016141277513741400207160ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # Public: Methods for managing inline elements in AsciiDoc block class Inline < AbstractNode # Public: Get the text of this inline element attr_reader :text # Public: Get the type (qualifier) of this inline element attr_reader :type # Public: Get/Set the target (e.g., uri) of this inline element attr_accessor :target def initialize(parent, context, text = nil, opts = {}) super(parent, context) @node_name = %(inline_#{context}) @text = text @id = opts[:id] @type = opts[:type] @target = opts[:target] unless (more_attributes = opts[:attributes]).nil_or_empty? update_attributes more_attributes end end def block? false end def inline? true end def convert converter.convert self end # Alias render to convert to maintain backwards compatibility alias :render :convert end end asciidoctor-1.5.5/lib/asciidoctor/list.rb000066400000000000000000000067511277513741400204220ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # Public: Methods for managing AsciiDoc lists (ordered, unordered and description lists) class List < AbstractBlock # Public: Create alias for blocks alias :items :blocks # Public: Get the items in this list as an Array alias :content :blocks # Public: Create alias to check if this list has blocks alias :items? :blocks? def initialize parent, context super end # Check whether this list is an outline list (unordered or ordered). # # Return true if this list is an outline list. Otherwise, return false. def outline? @context == :ulist || @context == :olist end def convert if @context == :colist result = super @document.callouts.next_list result else super end end # Alias render to convert to maintain backwards compatibility alias :render :convert def to_s %(#<#{self.class}@#{object_id} {context: #{@context.inspect}, style: #{@style.inspect}, items: #{items.size}}>) end end # Public: Methods for managing items for AsciiDoc olists, ulist, and dlists. class ListItem < AbstractBlock # A contextual alias for the list parent node; counterpart to the items alias on List alias :list :parent # Public: Get/Set the String used to mark this list item attr_accessor :marker # Public: Initialize an Asciidoctor::ListItem object. # # parent - The parent list block for this list item # text - the String text (default nil) def initialize parent, text = nil super parent, :list_item @text = text @level = parent.level end def text? !@text.nil_or_empty? end def text apply_subs @text end # Check whether this list item has simple content (no nested blocks aside from a single outline list). # Primarily relevant for outline lists. # # Return true if the list item contains no blocks or it contains a single outline list. Otherwise, return false. def simple? @blocks.empty? || (@blocks.size == 1 && List === (blk = @blocks[0]) && blk.outline?) end # Check whether this list item has compound content (nested blocks aside from a single outline list). # Primarily relevant for outline lists. # # Return true if the list item contains blocks other than a single outline list. Otherwise, return false. def compound? !simple? end # Public: Fold the first paragraph block into the text # # Here are the rules for when a folding occurs: # # Given: this list item has at least one block # When: the first block is a paragraph that's not connected by a list continuation # Or: the first block is an indented paragraph that's adjacent (wrapped line) # Or: the first block is an indented paragraph that's not connected by a list continuation # Then: then drop the first block and fold it's content (buffer) into the list text # # Returns nothing def fold_first(continuation_connects_first_block = false, content_adjacent = false) if (first_block = @blocks[0]) && Block === first_block && ((first_block.context == :paragraph && !continuation_connects_first_block) || ((content_adjacent || !continuation_connects_first_block) && first_block.context == :literal && first_block.option?('listparagraph'))) block = blocks.shift block.lines.unshift @text unless @text.nil_or_empty? @text = block.source end nil end def to_s %(#<#{self.class}@#{object_id} {list_context: #{parent.context.inspect}, text: #{@text.inspect}, blocks: #{(@blocks || []).size}}>) end end end asciidoctor-1.5.5/lib/asciidoctor/parser.rb000066400000000000000000003237371277513741400207510ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # Public: Methods to parse lines of AsciiDoc into an object hierarchy # representing the structure of the document. All methods are class methods and # should be invoked from the Parser class. The main entry point is ::next_block. # No Parser instances shall be discovered running around. (Any attempt to # instantiate a Parser will be futile). # # The object hierarchy created by the Parser consists of zero or more Section # and Block objects. Section objects may be nested and a Section object # contains zero or more Block objects. Block objects may be nested, but may # only contain other Block objects. Block objects which represent lists may # contain zero or more ListItem objects. # # Examples # # # Create a Reader for the AsciiDoc lines and retrieve the next block from it. # # Parser.next_block requires a parent, so we begin by instantiating an empty Document. # # doc = Document.new # reader = Reader.new lines # block = Parser.next_block(reader, doc) # block.class # # => Asciidoctor::Block class Parser BlockMatchData = Struct.new :context, :masq, :tip, :terminator # Regexp for replacing tab character TabRx = /\t/ # Regexp for leading tab indentation TabIndentRx = /^\t+/ StartOfBlockProc = lambda {|l| ((l.start_with? '[') && BlockAttributeLineRx =~ l) || (is_delimited_block? l) } StartOfListProc = lambda {|l| AnyListRx =~ l } StartOfBlockOrListProc = lambda {|l| (is_delimited_block? l) || ((l.start_with? '[') && BlockAttributeLineRx =~ l) || AnyListRx =~ l } NoOp = nil # Public: Make sure the Parser object doesn't get initialized. # # Raises RuntimeError if this constructor is invoked. def initialize raise 'Au contraire, mon frere. No lexer instances will be running around.' end # Public: Parses AsciiDoc source read from the Reader into the Document # # This method is the main entry-point into the Parser when parsing a full document. # It first looks for and, if found, processes the document title. It then # proceeds to iterate through the lines in the Reader, parsing the document # into nested Sections and Blocks. # # reader - the Reader holding the source lines of the document # document - the empty Document into which the lines will be parsed # options - a Hash of options to control processing # # returns the Document object def self.parse(reader, document, options = {}) block_attributes = parse_document_header(reader, document) unless options[:header_only] while reader.has_more_lines? new_section, block_attributes = next_section(reader, document, block_attributes) document << new_section if new_section end end document end # Public: Parses the document header of the AsciiDoc source read from the Reader # # Reads the AsciiDoc source from the Reader until the end of the document # header is reached. The Document object is populated with information from # the header (document title, document attributes, etc). The document # attributes are then saved to establish a save point to which to rollback # after parsing is complete. # # This method assumes that there are no blank lines at the start of the document, # which are automatically removed by the reader. # # returns the Hash of orphan block attributes captured above the header def self.parse_document_header(reader, document) # capture lines of block-level metadata and plow away comment lines that precede first block block_attributes = parse_block_metadata_lines(reader, document) # special case, block title is not allowed above document title, # carry attributes over to the document body if (has_doctitle_line = is_next_line_document_title?(reader, block_attributes)) && block_attributes.has_key?('title') return document.finalize_header block_attributes, false end # yep, document title logic in AsciiDoc is just insanity # definitely an area for spec refinement assigned_doctitle = nil unless (val = document.attributes['doctitle']).nil_or_empty? document.title = assigned_doctitle = val end section_title = nil # if the first line is the document title, add a header to the document and parse the header metadata if has_doctitle_line source_location = reader.cursor if document.sourcemap document.id, _, doctitle, _, single_line = parse_section_title reader, document unless assigned_doctitle document.title = assigned_doctitle = doctitle end # default to compat-mode if document uses atx-style doctitle document.set_attribute 'compat-mode', '' unless single_line if (separator = block_attributes.delete 'separator') document.set_attribute 'title-separator', separator end document.header.source_location = source_location if source_location document.attributes['doctitle'] = section_title = doctitle # QUESTION: should the id assignment on Document be encapsulated in the Document class? if document.id block_attributes.delete 1 block_attributes.delete 'id' else if (style = block_attributes.delete 1) style_attrs = { 1 => style } parse_style_attribute style_attrs, reader block_attributes['id'] = style_attrs['id'] if style_attrs.key? 'id' end document.id = block_attributes.delete 'id' end parse_header_metadata reader, document end unless (val = document.attributes['doctitle']).nil_or_empty? || val == section_title document.title = assigned_doctitle = val end # restore doctitle attribute to original assignment document.attributes['doctitle'] = assigned_doctitle if assigned_doctitle # parse title and consume name section of manpage document parse_manpage_header(reader, document) if document.doctype == 'manpage' # NOTE block_attributes are the block-level attributes (not document attributes) that # precede the first line of content (document title, first section or first block) document.finalize_header block_attributes end # Public: Parses the manpage header of the AsciiDoc source read from the Reader # # returns Nothing def self.parse_manpage_header(reader, document) if (m = ManpageTitleVolnumRx.match(document.attributes['doctitle'])) document.attributes['mantitle'] = document.sub_attributes(m[1].rstrip.downcase) document.attributes['manvolnum'] = m[2].strip else warn %(asciidoctor: ERROR: #{reader.prev_line_info}: malformed manpage title) # provide sensible fallbacks document.attributes['mantitle'] = document.attributes['doctitle'] document.attributes['manvolnum'] = '1' end reader.skip_blank_lines if is_next_line_section?(reader, {}) name_section = initialize_section(reader, document, {}) if name_section.level == 1 name_section_buffer = reader.read_lines_until(:break_on_blank_lines => true).join(' ').tr_s(' ', ' ') if (m = ManpageNamePurposeRx.match(name_section_buffer)) document.attributes['manname'] = document.sub_attributes m[1] document.attributes['manpurpose'] = m[2] # TODO parse multiple man names if document.backend == 'manpage' document.attributes['docname'] = document.attributes['manname'] document.attributes['outfilesuffix'] = %(.#{document.attributes['manvolnum']}) end else warn %(asciidoctor: ERROR: #{reader.prev_line_info}: malformed name section body) end else warn %(asciidoctor: ERROR: #{reader.prev_line_info}: name section title must be at level 1) end else warn %(asciidoctor: ERROR: #{reader.prev_line_info}: name section expected) end end # Public: Return the next section from the Reader. # # This method process block metadata, content and subsections for this # section and returns the Section object and any orphaned attributes. # # If the parent is a Document and has a header (document title), then # this method will put any non-section blocks at the start of document # into a preamble Block. If there are no such blocks, the preamble is # dropped. # # Since we are reading line-by-line, there's a chance that metadata # that should be associated with the following block gets consumed. # To deal with this case, the method returns a running Hash of # "orphaned" attributes that get passed to the next Section or Block. # # reader - the source Reader # parent - the parent Section or Document of this new section # attributes - a Hash of metadata that was left orphaned from the # previous Section. # # Examples # # source # # => "= Greetings\n\nThis is my doc.\n\n== Salutations\n\nIt is awesome." # # reader = Reader.new source, nil, :normalize => true # # create empty document to parent the section # # and hold attributes extracted from header # doc = Document.new # # Parser.next_section(reader, doc).first.title # # => "Greetings" # # Parser.next_section(reader, doc).first.title # # => "Salutations" # # returns a two-element Array containing the Section and Hash of orphaned attributes def self.next_section(reader, parent, attributes = {}) preamble = false part = false intro = false # FIXME if attributes[1] is a verbatim style, then don't check for section # check if we are at the start of processing the document # NOTE we could drop a hint in the attributes to indicate # that we are at a section title (so we don't have to check) if parent.context == :document && parent.blocks.empty? && ((has_header = parent.has_header?) || attributes.delete('invalid-header') || !is_next_line_section?(reader, attributes)) doctype = parent.doctype if has_header || (doctype == 'book' && attributes[1] != 'abstract') preamble = intro = Block.new(parent, :preamble, :content_model => :compound) if doctype == 'book' && (parent.attr? 'preface-title') preamble.title = parent.attr 'preface-title' end parent << preamble end section = parent current_level = 0 if parent.attributes.has_key? 'fragment' expected_next_levels = nil # small tweak to allow subsequent level-0 sections for book doctype elsif doctype == 'book' expected_next_levels = [0, 1] else expected_next_levels = [1] end else doctype = parent.document.doctype section = initialize_section(reader, parent, attributes) # clear attributes, except for title which carries over # section title to next block of content attributes = (title = attributes['title']) ? { 'title' => title } : {} current_level = section.level if current_level == 0 && doctype == 'book' part = !section.special # subsections in preface & appendix in multipart books start at level 2 if section.special && (['preface', 'appendix'].include? section.sectname) expected_next_levels = [current_level + 2] else expected_next_levels = [current_level + 1] end else expected_next_levels = [current_level + 1] end end reader.skip_blank_lines # Parse lines belonging to this section and its subsections until we # reach the end of this section level # # 1. first look for metadata thingies (anchor, attribute list, block title line, etc) # 2. then look for a section, recurse if found # 3. then process blocks # # We have to parse all the metadata lines before continuing with the loop, # otherwise subsequent metadata lines get interpreted as block content while reader.has_more_lines? parse_block_metadata_lines(reader, section, attributes) if (next_level = is_next_line_section? reader, attributes) next_level += section.document.attr('leveloffset', 0).to_i if next_level > current_level || (section.context == :document && next_level == 0) if next_level == 0 && doctype != 'book' warn %(asciidoctor: ERROR: #{reader.line_info}: only book doctypes can contain level 0 sections) elsif expected_next_levels && !expected_next_levels.include?(next_level) warn %(asciidoctor: WARNING: #{reader.line_info}: section title out of sequence: ) + %(expected #{expected_next_levels.size > 1 ? 'levels' : 'level'} #{expected_next_levels * ' or '}, ) + %(got level #{next_level}) end # the attributes returned are those that are orphaned new_section, attributes = next_section(reader, section, attributes) section << new_section else if next_level == 0 && doctype != 'book' warn %(asciidoctor: ERROR: #{reader.line_info}: only book doctypes can contain level 0 sections) end # close this section (and break out of the nesting) to begin a new one break end else # just take one block or else we run the risk of overrunning section boundaries block_line_info = reader.line_info if (new_block = next_block reader, (intro || section), attributes, :parse_metadata => false) # REVIEW this may be doing too much if part if !section.blocks? # if this block wasn't marked as [partintro], emulate behavior as if it had if new_block.style != 'partintro' # emulate [partintro] paragraph if new_block.context == :paragraph new_block.context = :open new_block.style = 'partintro' # emulate [partintro] open block else intro = Block.new section, :open, :content_model => :compound intro.style = 'partintro' new_block.parent = intro section << intro end end elsif section.blocks.size == 1 first_block = section.blocks[0] # open the [partintro] open block for appending if !intro && first_block.content_model == :compound #new_block.parent = (intro = first_block) warn %(asciidoctor: ERROR: #{block_line_info}: illegal block content outside of partintro block) # rebuild [partintro] paragraph as an open block elsif first_block.content_model != :compound intro = Block.new section, :open, :content_model => :compound intro.style = 'partintro' section.blocks.shift if first_block.style == 'partintro' first_block.context = :paragraph first_block.style = nil end first_block.parent = intro intro << first_block new_block.parent = intro section << intro end end end (intro || section) << new_block attributes = {} #else # # don't clear attributes if we don't find a block because they may # # be trailing attributes that didn't get associated with a block end end reader.skip_blank_lines end if part unless section.blocks? && section.blocks[-1].context == :section warn %(asciidoctor: ERROR: #{reader.line_info}: invalid part, must have at least one section (e.g., chapter, appendix, etc.)) end # NOTE we could try to avoid creating a preamble in the first place, though # that would require reworking assumptions in next_section since the preamble # is treated like an untitled section elsif preamble # implies parent == document document = parent if preamble.blocks? # unwrap standalone preamble (i.e., no sections), if permissible if Compliance.unwrap_standalone_preamble && document.blocks.size == 1 && doctype != 'book' document.blocks.shift while (child_block = preamble.blocks.shift) child_block.parent = document document << child_block end end # drop the preamble if it has no content else document.blocks.shift end end # The attributes returned here are orphaned attributes that fall at the end # of a section that need to get transfered to the next section # see "trailing block attributes transfer to the following section" in # test/attributes_test.rb for an example [section != parent ? section : nil, attributes.dup] end # Public: Return the next Section or Block object from the Reader. # # Begins by skipping over blank lines to find the start of the next Section # or Block. Processes each line of the reader in sequence until a Section or # Block is found or the reader has no more lines. # # Uses regular expressions from the Asciidoctor module to match Section # and Block delimiters. The ensuing lines are then processed according # to the type of content. # # reader - The Reader from which to retrieve the next block # parent - The Document, Section or Block to which the next block belongs # # Returns a Section or Block object holding the parsed content of the processed lines #-- # QUESTION should next_block have an option for whether it should keep looking until # a block is found? right now it bails when it encounters a line to be skipped def self.next_block(reader, parent, attributes = {}, options = {}) # Skip ahead to the block content skipped = reader.skip_blank_lines # bail if we've reached the end of the parent block or document return unless reader.has_more_lines? # check for option to find list item text only # if skipped a line, assume a list continuation was # used and block content is acceptable if (text_only = options[:text]) && skipped > 0 options.delete(:text) text_only = false end parse_metadata = options.fetch(:parse_metadata, true) #parse_sections = options.fetch(:parse_sections, false) document = parent.document if (extensions = document.extensions) block_extensions = extensions.blocks? block_macro_extensions = extensions.block_macros? else block_extensions = block_macro_extensions = false end #parent_context = Block === parent ? parent.context : nil in_list = ListItem === parent block = nil style = nil explicit_style = nil sourcemap = document.sourcemap source_location = nil while !block && reader.has_more_lines? # if parsing metadata, read until there is no more to read if parse_metadata && parse_block_metadata_line(reader, document, attributes, options) reader.advance next #elsif parse_sections && !parent_context && is_next_line_section?(reader, attributes) # block, attributes = next_section(reader, parent, attributes) # break end # QUESTION should we introduce a parsing context object? source_location = reader.cursor if sourcemap this_line = reader.read_line delimited_block = false block_context = nil cloaked_context = nil terminator = nil # QUESTION put this inside call to rekey attributes? if attributes[1] style, explicit_style = parse_style_attribute(attributes, reader) end if (delimited_blk_match = is_delimited_block? this_line, true) delimited_block = true block_context = cloaked_context = delimited_blk_match.context terminator = delimited_blk_match.terminator if !style style = attributes['style'] = block_context.to_s elsif style != block_context.to_s if delimited_blk_match.masq.include? style block_context = style.to_sym elsif delimited_blk_match.masq.include?('admonition') && ADMONITION_STYLES.include?(style) block_context = :admonition elsif block_extensions && extensions.registered_for_block?(style, block_context) block_context = style.to_sym else warn %(asciidoctor: WARNING: #{reader.prev_line_info}: invalid style for #{block_context} block: #{style}) style = block_context.to_s end end end unless delimited_block # this loop only executes once; used for flow control # break once a block is found or at end of loop # returns nil if the line must be dropped # Implementation note - while(true) is twice as fast as loop while true # process lines verbatim if style && Compliance.strict_verbatim_paragraphs && VERBATIM_STYLES.include?(style) block_context = style.to_sym reader.unshift_line this_line # advance to block parsing => break end # process lines normally unless text_only first_char = Compliance.markdown_syntax ? this_line.lstrip.chr : this_line.chr # NOTE we're letting break lines (horizontal rule, page_break, etc) have attributes if (LAYOUT_BREAK_LINES.has_key? first_char) && this_line.length >= 3 && (Compliance.markdown_syntax ? LayoutBreakLinePlusRx : LayoutBreakLineRx) =~ this_line block = Block.new(parent, LAYOUT_BREAK_LINES[first_char], :content_model => :empty) break elsif this_line.end_with?(']') && (match = MediaBlockMacroRx.match(this_line)) blk_ctx = match[1].to_sym block = Block.new(parent, blk_ctx, :content_model => :empty) if blk_ctx == :image posattrs = ['alt', 'width', 'height'] elsif blk_ctx == :video posattrs = ['poster', 'width', 'height'] else posattrs = [] end # QUESTION why did we make exception for explicit style? #if style && !explicit_style if style attributes['alt'] = style if blk_ctx == :image attributes.delete 'style' style = nil end block.parse_attributes(match[3], posattrs, :unescape_input => (blk_ctx == :image), :sub_input => true, :sub_result => false, :into => attributes) target = block.sub_attributes(match[2], :attribute_missing => 'drop-line') if target.empty? # retain as unparsed if attribute-missing is skip if document.attributes.fetch('attribute-missing', Compliance.attribute_missing) == 'skip' return Block.new(parent, :paragraph, :content_model => :simple, :source => [this_line]) # otherwise, drop the line else attributes.clear return end end attributes['target'] = target # now done down below #block.title = attributes.delete('title') if attributes.has_key?('title') #if blk_ctx == :image # if attributes.has_key? 'scaledwidth' # # append % to scaledwidth if ends in number (no units present) # if (48..57).include?((attributes['scaledwidth'][-1] || 0).ord) # attributes['scaledwidth'] = %(#{attributes['scaledwidth']}%) # end # end # document.register(:images, target) # attributes['alt'] ||= Helpers.basename(target, true).tr('_-', ' ') # # QUESTION should video or audio have an auto-numbered caption? # block.assign_caption attributes.delete('caption'), 'figure' #end break # NOTE we're letting the toc macro have attributes elsif first_char == 't' && (match = TocBlockMacroRx.match(this_line)) block = Block.new(parent, :toc, :content_model => :empty) block.parse_attributes(match[1], [], :sub_result => false, :into => attributes) break elsif block_macro_extensions && (match = GenericBlockMacroRx.match(this_line)) && (extension = extensions.registered_for_block_macro?(match[1])) target = match[2] raw_attributes = match[3] if extension.config[:content_model] == :attributes unless raw_attributes.empty? document.parse_attributes(raw_attributes, (extension.config[:pos_attrs] || []), :sub_input => true, :sub_result => false, :into => attributes) end else attributes['text'] = raw_attributes end if (default_attrs = extension.config[:default_attrs]) default_attrs.each {|k, v| attributes[k] ||= v } end if (block = extension.process_method[parent, target, attributes.dup]) attributes.replace block.attributes else attributes.clear return end break end end # haven't found anything yet, continue if (match = CalloutListRx.match(this_line)) block = List.new(parent, :colist) attributes['style'] = 'arabic' reader.unshift_line this_line expected_index = 1 # NOTE skip the match on the first time through as we've already done it (emulates begin...while) while match || (reader.has_more_lines? && (match = CalloutListRx.match(reader.peek_line))) # might want to move this check to a validate method if match[1].to_i != expected_index # FIXME this lineno - 2 hack means we need a proper look-behind cursor warn %(asciidoctor: WARNING: #{reader.path}: line #{reader.lineno - 2}: callout list item index: expected #{expected_index} got #{match[1]}) end list_item = next_list_item(reader, block, match) expected_index += 1 if list_item block << list_item coids = document.callouts.callout_ids(block.items.size) if !coids.empty? list_item.attributes['coids'] = coids else # FIXME this lineno - 2 hack means we need a proper look-behind cursor warn %(asciidoctor: WARNING: #{reader.path}: line #{reader.lineno - 2}: no callouts refer to list item #{block.items.size}) end end match = nil end document.callouts.next_list break elsif UnorderedListRx =~ this_line reader.unshift_line this_line block = next_outline_list(reader, :ulist, parent) break elsif (match = OrderedListRx.match(this_line)) reader.unshift_line this_line block = next_outline_list(reader, :olist, parent) # TODO move this logic into next_outline_list if !attributes['style'] && !block.attributes['style'] marker = block.items[0].marker if marker.start_with? '.' # first one makes more sense, but second one is AsciiDoc-compliant #attributes['style'] = (ORDERED_LIST_STYLES[block.level - 1] || ORDERED_LIST_STYLES[0]).to_s attributes['style'] = (ORDERED_LIST_STYLES[marker.length - 1] || ORDERED_LIST_STYLES[0]).to_s else style = ORDERED_LIST_STYLES.find {|s| OrderedListMarkerRxMap[s] =~ marker } attributes['style'] = (style || ORDERED_LIST_STYLES[0]).to_s end end break elsif (match = DescriptionListRx.match(this_line)) reader.unshift_line this_line block = next_labeled_list(reader, match, parent) break elsif (style == 'float' || style == 'discrete') && is_section_title?(this_line, (Compliance.underline_style_section_titles ? reader.peek_line(true) : nil)) reader.unshift_line this_line float_id, float_reftext, float_title, float_level, _ = parse_section_title(reader, document) attributes['reftext'] = float_reftext if float_reftext float_id ||= attributes['id'] if attributes.has_key?('id') block = Block.new(parent, :floating_title, :content_model => :empty) if float_id.nil_or_empty? # FIXME remove hack of creating throwaway Section to get at the generate_id method tmp_sect = Section.new(parent) tmp_sect.title = float_title block.id = tmp_sect.generate_id else block.id = float_id end block.level = float_level block.title = float_title break # FIXME create another set for "passthrough" styles # FIXME make this more DRY! elsif style && style != 'normal' if PARAGRAPH_STYLES.include?(style) block_context = style.to_sym cloaked_context = :paragraph reader.unshift_line this_line # advance to block parsing => break elsif ADMONITION_STYLES.include?(style) block_context = :admonition cloaked_context = :paragraph reader.unshift_line this_line # advance to block parsing => break elsif block_extensions && extensions.registered_for_block?(style, :paragraph) block_context = style.to_sym cloaked_context = :paragraph reader.unshift_line this_line # advance to block parsing => break else warn %(asciidoctor: WARNING: #{reader.prev_line_info}: invalid style for paragraph: #{style}) style = nil # continue to process paragraph end end break_at_list = (skipped == 0 && in_list) # a literal paragraph is contiguous lines starting at least one space if style != 'normal' && LiteralParagraphRx =~ this_line # So we need to actually include this one in the read_lines group reader.unshift_line this_line lines = read_paragraph_lines reader, break_at_list, :skip_line_comments => text_only adjust_indentation! lines block = Block.new(parent, :literal, :content_model => :verbatim, :source => lines, :attributes => attributes) # a literal gets special meaning inside of a description list # TODO this feels hacky, better way to distinguish from explicit literal block? block.set_option('listparagraph') if in_list # a paragraph is contiguous nonblank/noncontinuation lines else reader.unshift_line this_line lines = read_paragraph_lines reader, break_at_list, :skip_line_comments => true # NOTE we need this logic because we've asked the reader to skip # line comments, which may leave us w/ an empty buffer if those # were the only lines found if lines.empty? # call advance since the reader preserved the last line reader.advance return end catalog_inline_anchors(lines.join(EOL), document) first_line = lines[0] if !text_only && (admonition_match = AdmonitionParagraphRx.match(first_line)) lines[0] = admonition_match.post_match.lstrip attributes['style'] = admonition_match[1] attributes['name'] = admonition_name = admonition_match[1].downcase attributes['caption'] ||= document.attributes[%(#{admonition_name}-caption)] block = Block.new(parent, :admonition, :content_model => :simple, :source => lines, :attributes => attributes) elsif !text_only && Compliance.markdown_syntax && first_line.start_with?('> ') lines.map! {|line| if line == '>' line[1..-1] elsif line.start_with? '> ' line[2..-1] else line end } if lines[-1].start_with? '-- ' attribution, citetitle = lines.pop[3..-1].split(', ', 2) lines.pop while lines[-1].empty? else attribution, citetitle = nil end attributes['style'] = 'quote' attributes['attribution'] = attribution if attribution attributes['citetitle'] = citetitle if citetitle # NOTE will only detect headings that are floating titles (not section titles) # TODO could assume a floating title when inside a block context # FIXME Reader needs to be created w/ line info block = build_block(:quote, :compound, false, parent, Reader.new(lines), attributes) elsif !text_only && (blockquote? lines, first_line) lines[0] = first_line[1..-1] attribution, citetitle = lines.pop[3..-1].split(', ', 2) lines.pop while lines[-1].empty? # strip trailing quote lines[-1] = lines[-1].chop attributes['style'] = 'quote' attributes['attribution'] = attribution if attribution attributes['citetitle'] = citetitle if citetitle block = Block.new(parent, :quote, :content_model => :simple, :source => lines, :attributes => attributes) else # if [normal] is used over an indented paragraph, shift content to left margin if style == 'normal' # QUESTION do we even need to shift since whitespace is normalized by XML in this case? adjust_indentation! lines end block = Block.new(parent, :paragraph, :content_model => :simple, :source => lines, :attributes => attributes) end end # forbid loop from executing more than once break end end # either delimited block or styled paragraph if !block && block_context # abstract and partintro should be handled by open block # FIXME kind of hackish...need to sort out how to generalize this block_context = :open if block_context == :abstract || block_context == :partintro case block_context when :admonition attributes['name'] = admonition_name = style.downcase attributes['caption'] ||= document.attributes[%(#{admonition_name}-caption)] block = build_block(block_context, :compound, terminator, parent, reader, attributes) when :comment build_block(block_context, :skip, terminator, parent, reader, attributes) return when :example block = build_block(block_context, :compound, terminator, parent, reader, attributes) when :listing, :fenced_code, :source if block_context == :fenced_code style = attributes['style'] = 'source' language, linenums = this_line[3..-1].tr(' ', '').split(',', 2) if !language.nil_or_empty? attributes['language'] = language attributes['linenums'] = '' unless linenums.nil_or_empty? elsif (default_language = document.attributes['source-language']) attributes['language'] = default_language end if !attributes.key?('indent') && document.attributes.key?('source-indent') attributes['indent'] = document.attributes['source-indent'] end terminator = terminator[0..2] elsif block_context == :source AttributeList.rekey(attributes, [nil, 'language', 'linenums']) unless attributes.key? 'language' if (default_language = document.attributes['source-language']) attributes['language'] = default_language end end if !attributes.key?('indent') && document.attributes.key?('source-indent') attributes['indent'] = document.attributes['source-indent'] end end block = build_block(:listing, :verbatim, terminator, parent, reader, attributes) when :literal block = build_block(block_context, :verbatim, terminator, parent, reader, attributes) when :pass block = build_block(block_context, :raw, terminator, parent, reader, attributes) when :stem, :latexmath, :asciimath if block_context == :stem attributes['style'] = if (explicit_stem_syntax = attributes[2]) explicit_stem_syntax.include?('tex') ? 'latexmath' : 'asciimath' elsif (default_stem_syntax = document.attributes['stem']).nil_or_empty? 'asciimath' else default_stem_syntax end end block = build_block(:stem, :raw, terminator, parent, reader, attributes) when :open, :sidebar block = build_block(block_context, :compound, terminator, parent, reader, attributes) when :table cursor = reader.cursor block_reader = Reader.new reader.read_lines_until(:terminator => terminator, :skip_line_comments => true), cursor case terminator.chr when ',' attributes['format'] = 'csv' when ':' attributes['format'] = 'dsv' end block = next_table(block_reader, parent, attributes) when :quote, :verse AttributeList.rekey(attributes, [nil, 'attribution', 'citetitle']) block = build_block(block_context, (block_context == :verse ? :verbatim : :compound), terminator, parent, reader, attributes) else if block_extensions && (extension = extensions.registered_for_block?(block_context, cloaked_context)) # TODO pass cloaked_context to extension somehow (perhaps a new instance for each cloaked_context?) if (content_model = extension.config[:content_model]) != :skip if !(pos_attrs = extension.config[:pos_attrs] || []).empty? AttributeList.rekey(attributes, [nil].concat(pos_attrs)) end if (default_attrs = extension.config[:default_attrs]) default_attrs.each {|k, v| attributes[k] ||= v } end end block = build_block block_context, content_model, terminator, parent, reader, attributes, :extension => extension unless block && content_model != :skip attributes.clear return end else # this should only happen if there's a misconfiguration raise %(Unsupported block type #{block_context} at #{reader.line_info}) end end end end # when looking for nested content, one or more line comments, comment # blocks or trailing attribute lists could leave us without a block, # so handle accordingly # REVIEW we may no longer need this nil check # FIXME we've got to clean this up, it's horrible! if block block.source_location = source_location if source_location # REVIEW seems like there is a better way to organize this wrap-up block.title = attributes['title'] unless block.title? # FIXME HACK don't hardcode logic for alt, caption and scaledwidth on images down here if block.context == :image resolved_target = attributes['target'] block.document.register(:images, resolved_target) attributes['alt'] ||= Helpers.basename(resolved_target, true).tr('_-', ' ') attributes['alt'] = block.sub_specialchars attributes['alt'] block.assign_caption attributes.delete('caption'), 'figure' if (scaledwidth = attributes['scaledwidth']) # append % to scaledwidth if ends in number (no units present) if (48..57).include?((scaledwidth[-1] || 0).ord) attributes['scaledwidth'] = %(#{scaledwidth}%) end end else block.caption ||= attributes.delete('caption') end # TODO eventualy remove the style attribute from the attributes hash #block.style = attributes.delete('style') block.style = attributes['style'] # AsciiDoc always use [id] as the reftext in HTML output, # but I'd like to do better in Asciidoctor if (block_id = (block.id ||= attributes['id'])) # TODO sub reftext document.register(:ids, [block_id, (attributes['reftext'] || (block.title? ? block.title : nil))]) end # FIXME remove the need for this update! block.attributes.update(attributes) unless attributes.empty? block.lock_in_subs #if document.attributes.has_key? :pending_attribute_entries # document.attributes.delete(:pending_attribute_entries).each do |entry| # entry.save_to block.attributes # end #end if block.sub? :callouts unless (catalog_callouts block.source, document) # No need to sub callouts if they aren't there block.remove_sub :callouts end end end block end def self.blockquote? lines, first_line = nil lines.size > 1 && ((first_line || lines[0]).start_with? '"') && (lines[-1].start_with? '-- ') && (lines[-2].end_with? '"') end def self.read_paragraph_lines reader, break_at_list, opts = {} opts[:break_on_blank_lines] = true opts[:break_on_list_continuation] = true opts[:preserve_last_line] = true break_condition = (break_at_list ? (Compliance.block_terminates_paragraph ? StartOfBlockOrListProc : StartOfListProc) : (Compliance.block_terminates_paragraph ? StartOfBlockProc : NoOp)) reader.read_lines_until opts, &break_condition end # Public: Determines whether this line is the start of any of the delimited blocks # # returns the match data if this line is the first line of a delimited block or nil if not def self.is_delimited_block? line, return_match_data = false # highly optimized for best performance return unless (line_len = line.length) > 1 && (DELIMITED_BLOCK_LEADERS.include? line[0..1]) # catches open block if line_len == 2 tip = line tl = 2 else # catches all other delimited blocks, including fenced code if line_len <= 4 tip = line tl = line_len else tip = line[0..3] tl = 4 end # special case for fenced code blocks # REVIEW review this logic fenced_code = false if Compliance.markdown_syntax tip_3 = (tl == 4 ? tip.chop : tip) if tip_3 == '```' if tl == 4 && tip.end_with?('`') return end tip = tip_3 tl = 3 fenced_code = true end end # short circuit if not a fenced code block return if tl == 3 && !fenced_code end if DELIMITED_BLOCKS.has_key? tip # tip is the full line when delimiter is minimum length if tl < 4 || tl == line_len if return_match_data context, masq = *DELIMITED_BLOCKS[tip] BlockMatchData.new(context, masq, tip, tip) else true end elsif %(#{tip}#{tip[-1..-1] * (line_len - tl)}) == line if return_match_data context, masq = *DELIMITED_BLOCKS[tip] BlockMatchData.new(context, masq, tip, line) else true end # only enable if/when we decide to support non-congruent block delimiters #elsif (match = BlockDelimiterRx.match(line)) # if return_match_data # context, masq = *DELIMITED_BLOCKS[tip] # BlockMatchData.new(context, masq, tip, match[0]) # else # true # end else nil end else nil end end # whether a block supports compound content should be a config setting # if terminator is false, that means the all the lines in the reader should be parsed # NOTE could invoke filter in here, before and after parsing def self.build_block(block_context, content_model, terminator, parent, reader, attributes, options = {}) if content_model == :skip || content_model == :raw skip_processing = content_model == :skip parse_as_content_model = :simple else skip_processing = false parse_as_content_model = content_model end if terminator.nil? if parse_as_content_model == :verbatim lines = reader.read_lines_until(:break_on_blank_lines => true, :break_on_list_continuation => true) else content_model = :simple if content_model == :compound lines = read_paragraph_lines reader, false, :skip_line_comments => true, :skip_processing => true # QUESTION check for empty lines after grabbing lines for simple content model? end block_reader = nil elsif parse_as_content_model != :compound lines = reader.read_lines_until(:terminator => terminator, :skip_processing => skip_processing) block_reader = nil # terminator is false when reader has already been prepared elsif terminator == false lines = nil block_reader = reader else lines = nil cursor = reader.cursor block_reader = Reader.new reader.read_lines_until(:terminator => terminator, :skip_processing => skip_processing), cursor end if content_model == :skip attributes.clear # FIXME we shouldn't be mixing return types return lines end if content_model == :verbatim if (indent = attributes['indent']) adjust_indentation! lines, indent, (attributes['tabsize'] || parent.document.attributes['tabsize']) elsif (tab_size = (attributes['tabsize'] || parent.document.attributes['tabsize']).to_i) > 0 adjust_indentation! lines, nil, tab_size end end if (extension = options[:extension]) # QUESTION do we want to delete the style? attributes.delete('style') if (block = extension.process_method[parent, block_reader || (Reader.new lines), attributes.dup]) attributes.replace block.attributes # FIXME if the content model is set to compound, but we only have simple in this context, then # forcefully set the content_model to simple to prevent parsing blocks from children # TODO document this behavior!! if block.content_model == :compound && !(lines = block.lines).nil_or_empty? content_model = :compound block_reader = Reader.new lines end else # FIXME need a test to verify this returns nil at the right time return end else block = Block.new(parent, block_context, :content_model => content_model, :source => lines, :attributes => attributes) end # QUESTION should we have an explicit map or can we rely on check for *-caption attribute? if (attributes.has_key? 'title') && (block.document.attr? %(#{block.context}-caption)) block.title = attributes.delete 'title' block.assign_caption attributes.delete('caption') end if content_model == :compound # we can look for blocks until there are no more lines (and not worry # about sections) since the reader is confined within the boundaries of a # delimited block parse_blocks block_reader, block end block end # Public: Parse blocks from this reader until there are no more lines. # # This method calls Parser#next_block until there are no more lines in the # Reader. It does not consider sections because it's assumed the Reader only # has lines which are within a delimited block region. # # reader - The Reader containing the lines to process # parent - The parent Block to which to attach the parsed blocks # # Returns nothing. def self.parse_blocks(reader, parent) while reader.has_more_lines? block = Parser.next_block(reader, parent) parent << block if block end end # Internal: Parse and construct an outline list Block from the current position of the Reader # # reader - The Reader from which to retrieve the outline list # list_type - A Symbol representing the list type (:olist for ordered, :ulist for unordered) # parent - The parent Block to which this outline list belongs # # Returns the Block encapsulating the parsed outline (unordered or ordered) list def self.next_outline_list(reader, list_type, parent) list_block = List.new(parent, list_type) if parent.context == list_type list_block.level = parent.level + 1 else list_block.level = 1 end while reader.has_more_lines? && (match = ListRxMap[list_type].match(reader.peek_line)) marker = resolve_list_marker(list_type, match[1]) # if we are moving to the next item, and the marker is different # determine if we are moving up or down in nesting if list_block.items? && marker != list_block.items[0].marker # assume list is nested by default, but then check to see if we are # popping out of a nested list by matching an ancestor's list marker this_item_level = list_block.level + 1 ancestor = parent while ancestor.context == list_type if marker == ancestor.items[0].marker this_item_level = ancestor.level break end ancestor = ancestor.parent end else this_item_level = list_block.level end if !list_block.items? || this_item_level == list_block.level list_item = next_list_item(reader, list_block, match) elsif this_item_level < list_block.level # leave this block break elsif this_item_level > list_block.level # If this next list level is down one from the # current Block's, append it to content of the current list item list_block.items[-1] << next_block(reader, list_block) end list_block << list_item if list_item list_item = nil reader.skip_blank_lines end list_block end # Internal: Catalog any callouts found in the text, but don't process them # # text - The String of text in which to look for callouts # document - The current document on which the callouts are stored # # Returns A Boolean indicating whether callouts were found def self.catalog_callouts(text, document) found = false if text.include? '<' text.scan(CalloutQuickScanRx) { # alias match for Ruby 1.8.7 compat m = $~ if m[0].chr != '\\' document.callouts.register(m[2]) end # we have to mark as found even if it's escaped so it can be unescaped found = true } end found end # Internal: Catalog any inline anchors found in the text, but don't process them # # text - The String text in which to look for inline anchors # document - The current document on which the references are stored # # Returns nothing def self.catalog_inline_anchors(text, document) if text.include? '[' text.scan(InlineAnchorRx) { # alias match for Ruby 1.8.7 compat m = $~ next if m[0].start_with? '\\' id = m[1] || m[3] reftext = m[2] || m[4] # enable if we want to allow double quoted values #id = id.sub(DoubleQuotedRx, '\2') #if reftext # reftext = reftext.sub(DoubleQuotedMultiRx, '\2') #end document.register(:ids, [id, reftext]) } end nil end # Internal: Parse and construct a description list Block from the current position of the Reader # # reader - The Reader from which to retrieve the labeled list # match - The Regexp match for the head of the list # parent - The parent Block to which this labeled list belongs # # Returns the Block encapsulating the parsed labeled list def self.next_labeled_list(reader, match, parent) list_block = List.new(parent, :dlist) previous_pair = nil # allows us to capture until we find a labeled item # that uses the same delimiter (::, :::, :::: or ;;) sibling_pattern = DescriptionListSiblingRx[match[2]] # NOTE skip the match on the first time through as we've already done it (emulates begin...while) while match || (reader.has_more_lines? && (match = sibling_pattern.match(reader.peek_line))) term, item = next_list_item(reader, list_block, match, sibling_pattern) if previous_pair && !previous_pair[-1] previous_pair.pop previous_pair[0] << term previous_pair << item else # FIXME this misses the automatic parent assignment list_block.items << (previous_pair = [[term], item]) end match = nil end list_block end # Internal: Parse and construct the next ListItem for the current bulleted # (unordered or ordered) list Block, callout lists included, or the next # term ListItem and description ListItem pair for the labeled list Block. # # First collect and process all the lines that constitute the next list # item for the parent list (according to its type). Next, parse those lines # into blocks and associate them with the ListItem (in the case of a # labeled list, the description ListItem). Finally, fold the first block # into the item's text attribute according to rules described in ListItem. # # reader - The Reader from which to retrieve the next list item # list_block - The parent list Block of this ListItem. Also provides access to the list type. # match - The match Array which contains the marker and text (first-line) of the ListItem # sibling_trait - The list marker or the Regexp to match a sibling item # # Returns the next ListItem or ListItem pair (depending on the list type) # for the parent list Block. def self.next_list_item(reader, list_block, match, sibling_trait = nil) if (list_type = list_block.context) == :dlist list_term = ListItem.new(list_block, match[1]) list_item = ListItem.new(list_block, match[3]) has_text = !match[3].nil_or_empty? else # Create list item using first line as the text of the list item text = match[2] checkbox = false if list_type == :ulist && text.start_with?('[') if text.start_with?('[ ] ') checkbox = true checked = false text = text[3..-1].lstrip elsif text.start_with?('[x] ') || text.start_with?('[*] ') checkbox = true checked = true text = text[3..-1].lstrip end end list_item = ListItem.new(list_block, text) if checkbox # FIXME checklist never makes it into the options attribute list_block.attributes['checklist-option'] = '' list_item.attributes['checkbox'] = '' list_item.attributes['checked'] = '' if checked end sibling_trait ||= resolve_list_marker(list_type, match[1], list_block.items.size, true, reader) list_item.marker = sibling_trait has_text = true end # first skip the line with the marker / term reader.advance cursor = reader.cursor list_item_reader = Reader.new read_lines_for_list_item(reader, list_type, sibling_trait, has_text), cursor if list_item_reader.has_more_lines? comment_lines = list_item_reader.skip_line_comments subsequent_line = list_item_reader.peek_line list_item_reader.unshift_lines comment_lines unless comment_lines.empty? if !subsequent_line.nil? continuation_connects_first_block = subsequent_line.empty? # if there's no continuation connecting the first block, then # treat the lines as paragraph text (activated when has_text = false) if !continuation_connects_first_block && list_type != :dlist has_text = false end content_adjacent = !continuation_connects_first_block && !subsequent_line.empty? else continuation_connects_first_block = false content_adjacent = false end # only relevant for :dlist options = {:text => !has_text} # we can look for blocks until there are no more lines (and not worry # about sections) since the reader is confined within the boundaries of a # list while list_item_reader.has_more_lines? if (new_block = next_block(list_item_reader, list_item, {}, options)) list_item << new_block end end list_item.fold_first(continuation_connects_first_block, content_adjacent) end if list_type == :dlist unless list_item.text? || list_item.blocks? list_item = nil end [list_term, list_item] else list_item end end # Internal: Collect the lines belonging to the current list item, navigating # through all the rules that determine what comprises a list item. # # Grab lines until a sibling list item is found, or the block is broken by a # terminator (such as a line comment). Description lists are more greedy if # they don't have optional inline item text...they want that text # # reader - The Reader from which to retrieve the lines. # list_type - The Symbol context of the list (:ulist, :olist, :colist or :dlist) # sibling_trait - A Regexp that matches a sibling of this list item or String list marker # of the items in this list (default: nil) # has_text - Whether the list item has text defined inline (always true except for labeled lists) # # Returns an Array of lines belonging to the current list item. def self.read_lines_for_list_item(reader, list_type, sibling_trait = nil, has_text = true) buffer = [] # three states for continuation: :inactive, :active & :frozen # :frozen signifies we've detected sequential continuation lines & # continuation is not permitted until reset continuation = :inactive # if we are within a nested list, we don't throw away the list # continuation marks because they will be processed when grabbing # the lines for those nested lists within_nested_list = false # a detached continuation is a list continuation that follows a blank line # it gets associated with the outermost block detached_continuation = nil while reader.has_more_lines? this_line = reader.read_line # if we've arrived at a sibling item in this list, we've captured # the complete list item and can begin processing it # the remainder of the method determines whether we've reached # the termination of the list break if is_sibling_list_item?(this_line, list_type, sibling_trait) prev_line = buffer.empty? ? nil : buffer[-1] if prev_line == LIST_CONTINUATION if continuation == :inactive continuation = :active has_text = true buffer[-1] = '' unless within_nested_list end # dealing with adjacent list continuations (which is really a syntax error) if this_line == LIST_CONTINUATION if continuation != :frozen continuation = :frozen buffer << this_line end this_line = nil next end end # a delimited block immediately breaks the list unless preceded # by a list continuation (they are harsh like that ;0) if (match = is_delimited_block?(this_line, true)) if continuation == :active buffer << this_line # grab all the lines in the block, leaving the delimiters in place # we're being more strict here about the terminator, but I think that's a good thing buffer.concat reader.read_lines_until(:terminator => match.terminator, :read_last_line => true) continuation = :inactive else break end # technically BlockAttributeLineRx only breaks if ensuing line is not a list item # which really means BlockAttributeLineRx only breaks if it's acting as a block delimiter # FIXME to be AsciiDoc compliant, we shouldn't break if style in attribute line is "literal" (i.e., [literal]) elsif list_type == :dlist && continuation != :active && BlockAttributeLineRx =~ this_line break else if continuation == :active && !this_line.empty? # literal paragraphs have special considerations (and this is one of # two entry points into one) # if we don't process it as a whole, then a line in it that looks like a # list item will throw off the exit from it if LiteralParagraphRx =~ this_line reader.unshift_line this_line buffer.concat reader.read_lines_until( :preserve_last_line => true, :break_on_blank_lines => true, :break_on_list_continuation => true) {|line| # we may be in an indented list disguised as a literal paragraph # so we need to make sure we don't slurp up a legitimate sibling list_type == :dlist && is_sibling_list_item?(line, list_type, sibling_trait) } continuation = :inactive # let block metadata play out until we find the block elsif BlockTitleRx =~ this_line || BlockAttributeLineRx =~ this_line || AttributeEntryRx =~ this_line buffer << this_line else if nested_list_type = (within_nested_list ? [:dlist] : NESTABLE_LIST_CONTEXTS).find {|ctx| ListRxMap[ctx] =~ this_line } within_nested_list = true if nested_list_type == :dlist && $~[3].nil_or_empty? # get greedy again has_text = false end end buffer << this_line continuation = :inactive end elsif !prev_line.nil? && prev_line.empty? # advance to the next line of content if this_line.empty? reader.skip_blank_lines this_line = reader.read_line # if we hit eof or a sibling, stop reading break if this_line.nil? || is_sibling_list_item?(this_line, list_type, sibling_trait) end if this_line == LIST_CONTINUATION detached_continuation = buffer.size buffer << this_line else # has_text is only relevant for dlist, which is more greedy until it has text for an item # for all other lists, has_text is always true # in this block, we have to see whether we stay in the list if has_text # TODO any way to combine this with the check after skipping blank lines? if is_sibling_list_item?(this_line, list_type, sibling_trait) break elsif nested_list_type = NESTABLE_LIST_CONTEXTS.find {|ctx| ListRxMap[ctx] =~ this_line } buffer << this_line within_nested_list = true if nested_list_type == :dlist && $~[3].nil_or_empty? # get greedy again has_text = false end # slurp up any literal paragraph offset by blank lines # NOTE we have to check for indented list items first elsif LiteralParagraphRx =~ this_line reader.unshift_line this_line buffer.concat reader.read_lines_until( :preserve_last_line => true, :break_on_blank_lines => true, :break_on_list_continuation => true) {|line| # we may be in an indented list disguised as a literal paragraph # so we need to make sure we don't slurp up a legitimate sibling list_type == :dlist && is_sibling_list_item?(line, list_type, sibling_trait) } else break end else # only dlist in need of item text, so slurp it up! # pop the blank line so it's not interpretted as a list continuation buffer.pop unless within_nested_list buffer << this_line has_text = true end end else has_text = true if !this_line.empty? if nested_list_type = (within_nested_list ? [:dlist] : NESTABLE_LIST_CONTEXTS).find {|ctx| ListRxMap[ctx] =~ this_line } within_nested_list = true if nested_list_type == :dlist && $~[3].nil_or_empty? # get greedy again has_text = false end end buffer << this_line end end this_line = nil end reader.unshift_line this_line if this_line if detached_continuation buffer.delete_at detached_continuation end # strip trailing blank lines to prevent empty blocks buffer.pop while !buffer.empty? && buffer[-1].empty? # We do need to replace the optional trailing continuation # a blank line would have served the same purpose in the document buffer.pop if !buffer.empty? && buffer[-1] == LIST_CONTINUATION #warn "BUFFER[#{list_type},#{sibling_trait}]>#{buffer * EOL}#{buffer.inspect} ["Foo", "~~~"] # # id, reftext, title, level, single = parse_section_title(reader, document) # # title # # => "Foo" # level # # => 2 # id # # => nil # single # # => false # # line1 # # => "==== Foo" # # id, reftext, title, level, single = parse_section_title(reader, document) # # title # # => "Foo" # level # # => 3 # id # # => nil # single # # => true # # returns an Array of [String, String, Integer, String, Boolean], representing the # id, reftext, title, level and line count of the Section, or nil. # #-- # NOTE for efficiency, we don't reuse methods that check for a section title def self.parse_section_title(reader, document) line1 = reader.read_line sect_id = nil sect_title = nil sect_level = -1 sect_reftext = nil single_line = true first_char = line1.chr if (first_char == '=' || (Compliance.markdown_syntax && first_char == '#')) && (match = AtxSectionRx.match(line1)) sect_level = single_line_section_level match[1] sect_title = match[2] if sect_title.end_with?(']]') && (anchor_match = InlineSectionAnchorRx.match(sect_title)) if anchor_match[2].nil? sect_title = anchor_match[1] sect_id = anchor_match[3] sect_reftext = anchor_match[4] end end elsif Compliance.underline_style_section_titles if (line2 = reader.peek_line(true)) && SECTION_LEVELS.has_key?(line2.chr) && line2 =~ SetextSectionLineRx && (name_match = SetextSectionTitleRx.match(line1)) && # chomp so that a (non-visible) endline does not impact calculation (line_length(line1) - line_length(line2)).abs <= 1 sect_title = name_match[1] if sect_title.end_with?(']]') && (anchor_match = InlineSectionAnchorRx.match(sect_title)) if anchor_match[2].nil? sect_title = anchor_match[1] sect_id = anchor_match[3] sect_reftext = anchor_match[4] end end sect_level = section_level line2 single_line = false reader.advance end end if sect_level >= 0 sect_level += document.attr('leveloffset', 0).to_i end [sect_id, sect_reftext, sect_title, sect_level, single_line] end # Public: Calculate the number of unicode characters in the line, excluding the endline # # line - the String to calculate # # returns the number of unicode characters in the line def self.line_length(line) FORCE_UNICODE_LINE_LENGTH ? line.scan(UnicodeCharScanRx).length : line.length end # Public: Consume and parse the two header lines (line 1 = author info, line 2 = revision info). # # Returns the Hash of header metadata. If a Document object is supplied, the metadata # is applied directly to the attributes of the Document. # # reader - the Reader holding the source lines of the document # document - the Document we are building (default: nil) # # Examples # # data = ["Author Name \n", "v1.0, 2012-12-21: Coincide w/ end of world.\n"] # parse_header_metadata(Reader.new data, nil, :normalize => true) # # => {'author' => 'Author Name', 'firstname' => 'Author', 'lastname' => 'Name', 'email' => 'author@example.org', # # 'revnumber' => '1.0', 'revdate' => '2012-12-21', 'revremark' => 'Coincide w/ end of world.'} def self.parse_header_metadata(reader, document = nil) # NOTE this will discard away any comment lines, but not skip blank lines process_attribute_entries(reader, document) metadata = {} implicit_author = nil implicit_authors = nil if reader.has_more_lines? && !reader.next_line_empty? author_metadata = process_authors reader.read_line unless author_metadata.empty? if document # apply header subs and assign to document author_metadata.each do |key, val| unless document.attributes.has_key? key document.attributes[key] = ::String === val ? (document.apply_header_subs val) : val end end implicit_author = document.attributes['author'] implicit_authors = document.attributes['authors'] end metadata = author_metadata end # NOTE this will discard any comment lines, but not skip blank lines process_attribute_entries(reader, document) rev_metadata = {} if reader.has_more_lines? && !reader.next_line_empty? rev_line = reader.read_line if (match = RevisionInfoLineRx.match(rev_line)) rev_metadata['revnumber'] = match[1].rstrip if match[1] unless (component = match[2].strip).empty? # version must begin with 'v' if date is absent if !match[1] && (component.start_with? 'v') rev_metadata['revnumber'] = component[1..-1] else rev_metadata['revdate'] = component end end rev_metadata['revremark'] = match[3].rstrip if match[3] else # throw it back reader.unshift_line rev_line end end unless rev_metadata.empty? if document # apply header subs and assign to document rev_metadata.each do |key, val| unless document.attributes.has_key? key document.attributes[key] = document.apply_header_subs(val) end end end metadata.update rev_metadata end # NOTE this will discard any comment lines, but not skip blank lines process_attribute_entries(reader, document) reader.skip_blank_lines end if document # process author attribute entries that override (or stand in for) the implicit author line author_metadata = nil if document.attributes.has_key?('author') && (author_line = document.attributes['author']) != implicit_author # do not allow multiple, process as names only author_metadata = process_authors author_line, true, false elsif document.attributes.has_key?('authors') && (author_line = document.attributes['authors']) != implicit_authors # allow multiple, process as names only author_metadata = process_authors author_line, true else authors = [] author_key = %(author_#{authors.size + 1}) while document.attributes.has_key? author_key authors << document.attributes[author_key] author_key = %(author_#{authors.size + 1}) end if authors.size == 1 # do not allow multiple, process as names only author_metadata = process_authors authors[0], true, false elsif authors.size > 1 # allow multiple, process as names only author_metadata = process_authors authors.join('; '), true end end if author_metadata document.attributes.update author_metadata # special case if !document.attributes.has_key?('email') && document.attributes.has_key?('email_1') document.attributes['email'] = document.attributes['email_1'] end end end metadata end # Internal: Parse the author line into a Hash of author metadata # # author_line - the String author line # names_only - a Boolean flag that indicates whether to process line as # names only or names with emails (default: false) # multiple - a Boolean flag that indicates whether to process multiple # semicolon-separated entries in the author line (default: true) # # returns a Hash of author metadata def self.process_authors(author_line, names_only = false, multiple = true) author_metadata = {} keys = ['author', 'authorinitials', 'firstname', 'middlename', 'lastname', 'email'] author_entries = multiple ? (author_line.split ';').map {|line| line.strip } : [author_line] author_entries.each_with_index do |author_entry, idx| next if author_entry.empty? key_map = {} if idx == 0 keys.each do |key| key_map[key.to_sym] = key end else keys.each do |key| key_map[key.to_sym] = %(#{key}_#{idx + 1}) end end segments = nil if names_only # splitting on ' ' collapses repeating spaces uniformly # `split ' ', 3` causes odd behavior in Opal; see https://github.com/asciidoctor/asciidoctor.js/issues/159 if (segments = author_entry.split ' ').size > 3 segments = segments[0..1].push(segments[2..-1].join ' ') end elsif (match = AuthorInfoLineRx.match(author_entry)) segments = match.to_a segments.shift end unless segments.nil? author_metadata[key_map[:firstname]] = fname = segments[0].tr('_', ' ') author_metadata[key_map[:author]] = fname author_metadata[key_map[:authorinitials]] = fname[0, 1] if !segments[1].nil? && !segments[2].nil? author_metadata[key_map[:middlename]] = mname = segments[1].tr('_', ' ') author_metadata[key_map[:lastname]] = lname = segments[2].tr('_', ' ') author_metadata[key_map[:author]] = [fname, mname, lname].join ' ' author_metadata[key_map[:authorinitials]] = [fname[0, 1], mname[0, 1], lname[0, 1]].join elsif !segments[1].nil? author_metadata[key_map[:lastname]] = lname = segments[1].tr('_', ' ') author_metadata[key_map[:author]] = [fname, lname].join ' ' author_metadata[key_map[:authorinitials]] = [fname[0, 1], lname[0, 1]].join end author_metadata[key_map[:email]] = segments[3] unless names_only || segments[3].nil? else author_metadata[key_map[:author]] = author_metadata[key_map[:firstname]] = fname = author_entry.strip.tr_s(' ', ' ') author_metadata[key_map[:authorinitials]] = fname[0, 1] end author_metadata['authorcount'] = idx + 1 # only assign the _1 attributes if there are multiple authors if idx == 1 keys.each do |key| author_metadata[%(#{key}_1)] = author_metadata[key] if author_metadata.has_key? key end end if idx == 0 author_metadata['authors'] = author_metadata[key_map[:author]] else author_metadata['authors'] = %(#{author_metadata['authors']}, #{author_metadata[key_map[:author]]}) end end author_metadata end # Internal: Parse lines of metadata until a line of metadata is not found. # # This method processes sequential lines containing block metadata, ignoring # blank lines and comments. # # reader - the source reader # parent - the parent to which the lines belong # attributes - a Hash of attributes in which any metadata found will be stored (default: {}) # options - a Hash of options to control processing: (default: {}) # * :text indicates that lexer is only looking for text content # and thus the block title should not be captured # # returns the Hash of attributes including any metadata found def self.parse_block_metadata_lines(reader, parent, attributes = {}, options = {}) while parse_block_metadata_line(reader, parent, attributes, options) # discard the line just processed reader.advance reader.skip_blank_lines end attributes end # Internal: Parse the next line if it contains metadata for the following block # # This method handles lines with the following content: # # * line or block comment # * anchor # * attribute list # * block title # # Any attributes found will be inserted into the attributes argument. # If the line contains block metadata, the method returns true, otherwise false. # # reader - the source reader # parent - the parent of the current line # attributes - a Hash of attributes in which any metadata found will be stored # options - a Hash of options to control processing: (default: {}) # * :text indicates that lexer is only looking for text content # and thus the block title should not be captured # # returns true if the line contains metadata, otherwise false def self.parse_block_metadata_line(reader, parent, attributes, options = {}) return false unless reader.has_more_lines? next_line = reader.peek_line if (commentish = next_line.start_with?('//')) && (match = CommentBlockRx.match(next_line)) terminator = match[0] reader.read_lines_until(:skip_first_line => true, :preserve_last_line => true, :terminator => terminator, :skip_processing => true) elsif commentish && CommentLineRx =~ next_line # do nothing, we'll skip it elsif !options[:text] && next_line.start_with?(':') && (match = AttributeEntryRx.match(next_line)) process_attribute_entry(reader, parent, attributes, match) elsif (in_square_brackets = next_line.start_with?('[') && next_line.end_with?(']')) && (match = BlockAnchorRx.match(next_line)) unless match[1].nil_or_empty? attributes['id'] = match[1] # AsciiDoc always uses [id] as the reftext in HTML output, # but I'd like to do better in Asciidoctor # registration is deferred until the block or section is processed attributes['reftext'] = match[2] unless match[2].nil? end elsif in_square_brackets && (match = BlockAttributeListRx.match(next_line)) parent.document.parse_attributes(match[1], [], :sub_input => true, :into => attributes) # NOTE title doesn't apply to section, but we need to stash it for the first block # TODO should issue an error if this is found above the document title elsif !options[:text] && (match = BlockTitleRx.match(next_line)) attributes['title'] = match[1] else return false end true end def self.process_attribute_entries(reader, parent, attributes = nil) reader.skip_comment_lines while process_attribute_entry(reader, parent, attributes) # discard line just processed reader.advance reader.skip_comment_lines end end def self.process_attribute_entry(reader, parent, attributes = nil, match = nil) match ||= (reader.has_more_lines? ? AttributeEntryRx.match(reader.peek_line) : nil) if match name = match[1] unless (value = match[2] || '').empty? if value.end_with?(line_continuation = LINE_CONTINUATION) || value.end_with?(line_continuation = LINE_CONTINUATION_LEGACY) value = value.chop.rstrip while reader.advance break if (next_line = reader.peek_line.strip).empty? if (keep_open = next_line.end_with? line_continuation) next_line = next_line.chop.rstrip end separator = (value.end_with? LINE_BREAK) ? EOL : ' ' value = %(#{value}#{separator}#{next_line}) break unless keep_open end end end store_attribute(name, value, (parent ? parent.document : nil), attributes) true else false end end # Public: Store the attribute in the document and register attribute entry if accessible # # name - the String name of the attribute to store # value - the String value of the attribute to store # doc - the Document being parsed # attrs - the attributes for the current context # # returns a 2-element array containing the attribute name and value def self.store_attribute(name, value, doc = nil, attrs = nil) # TODO move processing of attribute value to utility method if name.end_with?('!') # a nil value signals the attribute should be deleted (undefined) value = nil name = name.chop elsif name.start_with?('!') # a nil value signals the attribute should be deleted (undefined) value = nil name = name[1..-1] end name = sanitize_attribute_name(name) accessible = true if doc # alias numbered attribute to sectnums if name == 'numbered' name = 'sectnums' # support relative leveloffset values elsif name == 'leveloffset' if value case value.chr when '+' value = ((doc.attr 'leveloffset', 0).to_i + (value[1..-1] || 0).to_i).to_s when '-' value = ((doc.attr 'leveloffset', 0).to_i - (value[1..-1] || 0).to_i).to_s end end end accessible = value ? doc.set_attribute(name, value) : doc.delete_attribute(name) end if accessible && attrs # NOTE lookup resolved value (resolution occurs inside set_attribute) value = doc.attributes[name] if value Document::AttributeEntry.new(name, value).save_to(attrs) end [name, value] end # Internal: Resolve the 0-index marker for this list item # # For ordered lists, match the marker used for this list item against the # known list markers and determine which marker is the first (0-index) marker # in its number series. # # For callout lists, return <1>. # # For bulleted lists, return the marker as passed to this method. # # list_type - The Symbol context of the list # marker - The String marker for this list item # ordinal - The position of this list item in the list # validate - Whether to validate the value of the marker # # Returns the String 0-index marker for this list item def self.resolve_list_marker(list_type, marker, ordinal = 0, validate = false, reader = nil) if list_type == :olist && !marker.start_with?('.') resolve_ordered_list_marker(marker, ordinal, validate, reader) elsif list_type == :colist '<1>' else marker end end # Internal: Resolve the 0-index marker for this ordered list item # # Match the marker used for this ordered list item against the # known ordered list markers and determine which marker is # the first (0-index) marker in its number series. # # The purpose of this method is to normalize the implicit numbered markers # so that they can be compared against other list items. # # marker - The marker used for this list item # ordinal - The 0-based index of the list item (default: 0) # validate - Perform validation that the marker provided is the proper # marker in the sequence (default: false) # # Examples # # marker = 'B.' # Parser.resolve_ordered_list_marker(marker, 1, true) # # => 'A.' # # Returns the String of the first marker in this number series def self.resolve_ordered_list_marker(marker, ordinal = 0, validate = false, reader = nil) number_style = ORDERED_LIST_STYLES.find {|s| OrderedListMarkerRxMap[s] =~ marker } expected = actual = nil case number_style when :arabic if validate expected = ordinal + 1 actual = marker.to_i end marker = '1.' when :loweralpha if validate expected = ('a'[0].ord + ordinal).chr actual = marker.chomp('.') end marker = 'a.' when :upperalpha if validate expected = ('A'[0].ord + ordinal).chr actual = marker.chomp('.') end marker = 'A.' when :lowerroman if validate # TODO report this in roman numerals; see https://github.com/jamesshipton/roman-numeral/blob/master/lib/roman_numeral.rb expected = ordinal + 1 actual = roman_numeral_to_int(marker.chomp(')')) end marker = 'i)' when :upperroman if validate # TODO report this in roman numerals; see https://github.com/jamesshipton/roman-numeral/blob/master/lib/roman_numeral.rb expected = ordinal + 1 actual = roman_numeral_to_int(marker.chomp(')')) end marker = 'I)' end if validate && expected != actual warn %(asciidoctor: WARNING: #{reader.line_info}: list item index: expected #{expected}, got #{actual}) end marker end # Internal: Determine whether the this line is a sibling list item # according to the list type and trait (marker) provided. # # line - The String line to check # list_type - The context of the list (:olist, :ulist, :colist, :dlist) # sibling_trait - The String marker for the list or the Regexp to match a sibling # # Returns a Boolean indicating whether this line is a sibling list item given # the criteria provided def self.is_sibling_list_item?(line, list_type, sibling_trait) if ::Regexp === sibling_trait matcher = sibling_trait expected_marker = false else matcher = ListRxMap[list_type] expected_marker = sibling_trait end if (m = matcher.match(line)) if expected_marker expected_marker == resolve_list_marker(list_type, m[1]) else true end else false end end # Internal: Parse the table contained in the provided Reader # # table_reader - a Reader containing the source lines of an AsciiDoc table # parent - the parent Block of this Asciidoctor::Table # attributes - attributes captured from above this Block # # returns an instance of Asciidoctor::Table parsed from the provided reader def self.next_table(table_reader, parent, attributes) table = Table.new(parent, attributes) if (attributes.has_key? 'title') table.title = attributes.delete 'title' table.assign_caption attributes.delete('caption') end if (attributes.key? 'cols') && !(colspecs = parse_colspecs attributes['cols']).empty? table.create_columns colspecs explicit_colspecs = true else explicit_colspecs = false end skipped = table_reader.skip_blank_lines parser_ctx = Table::ParserContext.new(table_reader, table, attributes) skip_implicit_header = (attributes.key? 'header-option') || (attributes.key? 'noheader-option') loop_idx = -1 while table_reader.has_more_lines? loop_idx += 1 line = table_reader.read_line if !skip_implicit_header && skipped == 0 && loop_idx == 0 && !(next_line = table_reader.peek_line).nil? && next_line.empty? table.has_header_option = true attributes['header-option'] = '' attributes['options'] = (attributes.key? 'options') ? %(#{attributes['options']},header) : 'header' end if parser_ctx.format == 'psv' if parser_ctx.starts_with_delimiter? line line = line[1..-1] # push an empty cell spec if boundary at start of line parser_ctx.close_open_cell else next_cellspec, line = parse_cellspec(line, :start, parser_ctx.delimiter) # if the cell spec is not null, then we're at a cell boundary if !next_cellspec.nil? parser_ctx.close_open_cell next_cellspec else # QUESTION do we not advance to next line? if so, when will we if we came into this block? end end end seen = false while !seen || !line.empty? seen = true if (m = parser_ctx.match_delimiter(line)) if parser_ctx.format == 'csv' if parser_ctx.buffer_has_unclosed_quotes?(m.pre_match) # throw it back, it's too small line = parser_ctx.skip_matched_delimiter(m) next end else if m.pre_match.end_with? '\\' # skip over escaped delimiter # handle special case when end of line is reached (see issue #1306) if (line = parser_ctx.skip_matched_delimiter(m, true)).empty? parser_ctx.buffer = %(#{parser_ctx.buffer}#{EOL}) parser_ctx.keep_cell_open break end next end end if parser_ctx.format == 'psv' next_cellspec, cell_text = parse_cellspec(m.pre_match, :end) parser_ctx.push_cellspec next_cellspec parser_ctx.buffer = %(#{parser_ctx.buffer}#{cell_text}) else parser_ctx.buffer = %(#{parser_ctx.buffer}#{m.pre_match}) end if (line = m.post_match).empty? # hack to prevent dropping empty cell found at end of line (see issue #1106) seen = false end parser_ctx.close_cell else # no other delimiters to see here # suck up this line into the buffer and move on parser_ctx.buffer = %(#{parser_ctx.buffer}#{line}#{EOL}) # QUESTION make stripping endlines in csv data an option? (unwrap-option?) if parser_ctx.format == 'csv' parser_ctx.buffer = %(#{parser_ctx.buffer.rstrip} ) end line = '' if parser_ctx.format == 'psv' || (parser_ctx.format == 'csv' && parser_ctx.buffer_has_unclosed_quotes?) parser_ctx.keep_cell_open else parser_ctx.close_cell true end end end skipped = table_reader.skip_blank_lines unless parser_ctx.cell_open? unless table_reader.has_more_lines? # NOTE may have already closed cell in csv or dsv table (see previous call to parser_ctx.close_cell(true)) parser_ctx.close_cell true if parser_ctx.cell_open? end end unless (table.attributes['colcount'] ||= table.columns.size) == 0 || explicit_colspecs table.assign_column_widths end table.partition_header_footer attributes table end # Internal: Parse the column specs for this table. # # The column specs dictate the number of columns, relative # width of columns, default alignments for cells in each # column, and/or default styles or filters applied to the cells in # the column. # # Every column spec is guaranteed to have a width # # returns a Hash of attributes that specify how to format # and layout the cells in the table. def self.parse_colspecs records records = records.tr ' ', '' if records.include? ' ' # check for deprecated syntax: single number, equal column spread if records == records.to_i.to_s return ::Array.new(records.to_i) { { 'width' => 1 } } end specs = [] # NOTE -1 argument ensures we don't drop empty records records.split(',', -1).each {|record| if record.empty? specs << { 'width' => 1 } # TODO might want to use scan rather than this mega-regexp elsif (m = ColumnSpecRx.match(record)) spec = {} if m[2] # make this an operation colspec, rowspec = m[2].split '.' if !colspec.nil_or_empty? && Table::ALIGNMENTS[:h].has_key?(colspec) spec['halign'] = Table::ALIGNMENTS[:h][colspec] end if !rowspec.nil_or_empty? && Table::ALIGNMENTS[:v].has_key?(rowspec) spec['valign'] = Table::ALIGNMENTS[:v][rowspec] end end # to_i permits us to support percentage width by stripping the % # NOTE this is slightly out of compliance w/ AsciiDoc, but makes way more sense spec['width'] = (m[3] ? m[3].to_i : 1) # make this an operation if m[4] && Table::TEXT_STYLES.has_key?(m[4]) spec['style'] = Table::TEXT_STYLES[m[4]] end if m[1] 1.upto(m[1].to_i) { specs << spec.dup } else specs << spec end end } specs end # Internal: Parse the cell specs for the current cell. # # The cell specs dictate the cell's alignments, styles or filters, # colspan, rowspan and/or repeating content. # # The default spec when pos == :end is {} since we already know we're at a # delimiter. When pos == :start, we *may* be at a delimiter, nil indicates # we're not. # # returns the Hash of attributes that indicate how to layout # and style this cell in the table. def self.parse_cellspec(line, pos = :start, delimiter = nil) m = nil rest = '' case pos when :start if line.include? delimiter spec_part, rest = line.split delimiter, 2 if (m = CellSpecStartRx.match spec_part) return [{}, rest] if m[0].empty? else return [nil, line] end else return [nil, line] end when :end if (m = CellSpecEndRx.match line) # NOTE return the line stripped of trailing whitespace if no cellspec is found in this case return [{}, line.rstrip] if m[0].lstrip.empty? rest = m.pre_match else return [{}, line] end end spec = {} if m[1] colspec, rowspec = m[1].split '.' colspec = colspec.nil_or_empty? ? 1 : colspec.to_i rowspec = rowspec.nil_or_empty? ? 1 : rowspec.to_i if m[2] == '+' spec['colspan'] = colspec unless colspec == 1 spec['rowspan'] = rowspec unless rowspec == 1 elsif m[2] == '*' spec['repeatcol'] = colspec unless colspec == 1 end end if m[3] colspec, rowspec = m[3].split '.' if !colspec.nil_or_empty? && Table::ALIGNMENTS[:h].has_key?(colspec) spec['halign'] = Table::ALIGNMENTS[:h][colspec] end if !rowspec.nil_or_empty? && Table::ALIGNMENTS[:v].has_key?(rowspec) spec['valign'] = Table::ALIGNMENTS[:v][rowspec] end end if m[4] && Table::TEXT_STYLES.has_key?(m[4]) spec['style'] = Table::TEXT_STYLES[m[4]] end [spec, rest] end # Public: Parse the first positional attribute and assign named attributes # # Parse the first positional attribute to extract the style, role and id # parts, assign the values to their cooresponding attribute keys and return # both the original style attribute and the parsed value from the first # positional attribute. # # attributes - The Hash of attributes to process and update # # Examples # # puts attributes # => {1 => "abstract#intro.lead%fragment", "style" => "preamble"} # # parse_style_attribute(attributes) # => ["abstract", "preamble"] # # puts attributes # => {1 => "abstract#intro.lead", "style" => "abstract", "id" => "intro", # "role" => "lead", "options" => ["fragment"], "fragment-option" => ''} # # Returns a two-element Array of the parsed style from the # first positional attribute and the original style that was # replaced def self.parse_style_attribute(attributes, reader = nil) original_style = attributes['style'] raw_style = attributes[1] # NOTE spaces are not allowed in shorthand, so if we find one, this ain't shorthand if raw_style && !raw_style.include?(' ') && Compliance.shorthand_property_syntax type = :style collector = [] parsed = {} # QUESTION should this be a private method? (though, it's never called if shorthand isn't used) save_current = lambda { if collector.empty? if type != :style warn %(asciidoctor: WARNING:#{reader.nil? ? nil : " #{reader.prev_line_info}:"} invalid empty #{type} detected in style attribute) end else case type when :role, :option parsed[type] ||= [] parsed[type].push collector.join when :id if parsed.has_key? :id warn %(asciidoctor: WARNING:#{reader.nil? ? nil : " #{reader.prev_line_info}:"} multiple ids detected in style attribute) end parsed[type] = collector.join else parsed[type] = collector.join end collector = [] end } raw_style.each_char do |c| if c == '.' || c == '#' || c == '%' save_current.call case c when '.' type = :role when '#' type = :id when '%' type = :option end else collector.push c end end # small optimization if no shorthand is found if type == :style parsed_style = attributes['style'] = raw_style else save_current.call if parsed.has_key? :style parsed_style = attributes['style'] = parsed[:style] else parsed_style = nil end if parsed.has_key? :id attributes['id'] = parsed[:id] end if parsed.has_key? :role attributes['role'] = parsed[:role] * ' ' end if parsed.has_key? :option (options = parsed[:option]).each do |option| attributes[%(#{option}-option)] = '' end if (existing_opts = attributes['options']) attributes['options'] = (options + existing_opts.split(',')) * ',' else attributes['options'] = options * ',' end end end [parsed_style, original_style] else attributes['style'] = raw_style [raw_style, original_style] end end # Remove the block indentation (the leading whitespace equal to the amount of # leading whitespace of the least indented line), then replace tabs with # spaces (using proper tab expansion logic) and, finally, indent the lines by # the amount specified. # # This method preserves the relative indentation of the lines. # # lines - the Array of String lines to process (no trailing endlines) # indent - the integer number of spaces to add to the beginning # of each line; if this value is nil, the existing # space is preserved (optional, default: 0) # # Examples # # source = < [" def names", " @names.split ' '", " end"] # # puts Parser.adjust_indentation!(source.split "\n") * "\n" # # => def names # # => @names.split ' ' # # => end # # returns Nothing #-- # QUESTION should indent be called margin? def self.adjust_indentation! lines, indent = 0, tab_size = 0 return if lines.empty? # expand tabs if a tab is detected unless tab_size is nil if (tab_size = tab_size.to_i) > 0 && (lines.join.include? TAB) #if (tab_size = tab_size.to_i) > 0 && (lines.index {|line| line.include? TAB }) full_tab_space = ' ' * tab_size lines.map! do |line| next line if line.empty? # NOTE Opal has to patch this use of sub! line.sub!(TabIndentRx) {|tabs| full_tab_space * tabs.length } if line.start_with? TAB if line.include? TAB # keeps track of how many spaces were added to adjust offset in match data spaces_added = 0 # NOTE Opal has to patch this use of gsub! line.gsub!(TabRx) { # calculate how many spaces this tab represents, then replace tab with spaces if (offset = ($~.begin 0) + spaces_added) % tab_size == 0 spaces_added += (tab_size - 1) full_tab_space else unless (spaces = tab_size - offset % tab_size) == 1 spaces_added += (spaces - 1) end ' ' * spaces end } else line end end end # skip adjustment of gutter if indent is -1 return unless indent && (indent = indent.to_i) > -1 # determine width of gutter gutter_width = nil lines.each do |line| next if line.empty? # NOTE this logic assumes no whitespace-only lines if (line_indent = line.length - line.lstrip.length) == 0 gutter_width = nil break else unless gutter_width && line_indent > gutter_width gutter_width = line_indent end end end # remove gutter then apply new indent if specified # NOTE gutter_width is > 0 if not nil if indent == 0 if gutter_width lines.map! {|line| line.empty? ? line : line[gutter_width..-1] } end else padding = ' ' * indent if gutter_width lines.map! {|line| line.empty? ? line : padding + line[gutter_width..-1] } else lines.map! {|line| line.empty? ? line : padding + line } end end nil end # Public: Convert a string to a legal attribute name. # # name - the String name of the attribute # # Returns a String with the legal AsciiDoc attribute name. # # Examples # # sanitize_attribute_name('Foo Bar') # => 'foobar' # # sanitize_attribute_name('foo') # => 'foo' # # sanitize_attribute_name('Foo 3 #-Billy') # => 'foo3-billy' def self.sanitize_attribute_name(name) name.gsub(InvalidAttributeNameCharsRx, '').downcase end # Internal: Converts a Roman numeral to an integer value. # # value - The String Roman numeral to convert # # Returns the Integer for this Roman numeral def self.roman_numeral_to_int(value) value = value.downcase digits = { 'i' => 1, 'v' => 5, 'x' => 10 } result = 0 (0..value.length - 1).each {|i| digit = digits[value[i..i]] if i + 1 < value.length && digits[value[i+1..i+1]] > digit result -= digit else result += digit end } result end end end asciidoctor-1.5.5/lib/asciidoctor/path_resolver.rb000066400000000000000000000371621277513741400223240ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # Public: Handles all operations for resolving, cleaning and joining paths. # This class includes operations for handling both web paths (request URIs) and # system paths. # # The main emphasis of the class is on creating clean and secure paths. Clean # paths are void of duplicate parent and current directory references in the # path name. Secure paths are paths which are restricted from accessing # directories outside of a jail root, if specified. # # Since joining two paths can result in an insecure path, this class also # handles the task of joining a parent (start) and child (target) path. # # This class makes no use of path utilities from the Ruby libraries. Instead, # it handles all aspects of path manipulation. The main benefit of # internalizing these operations is that the class is able to handle both posix # and windows paths independent of the operating system on which it runs. This # makes the class both deterministic and easier to test. # # Examples # # resolver = PathResolver.new # # # Web Paths # # resolver.web_path('images') # => 'images' # # resolver.web_path('./images') # => './images' # # resolver.web_path('/images') # => '/images' # # resolver.web_path('./images/../assets/images') # => './assets/images' # # resolver.web_path('/../images') # => '/images' # # resolver.web_path('images', 'assets') # => 'assets/images' # # resolver.web_path('tiger.png', '../assets/images') # => '../assets/images/tiger.png' # # # System Paths # # resolver.working_dir # => '/path/to/docs' # # resolver.system_path('images') # => '/path/to/docs/images' # # resolver.system_path('../images') # => '/path/to/images' # # resolver.system_path('/etc/images') # => '/etc/images' # # resolver.system_path('images', '/etc') # => '/etc/images' # # resolver.system_path('', '/etc/images') # => '/etc/images' # # resolver.system_path(nil, nil, '/path/to/docs') # => '/path/to/docs' # # resolver.system_path('..', nil, '/path/to/docs') # => '/path/to/docs' # # resolver.system_path('../../../css', nil, '/path/to/docs') # => '/path/to/docs/css' # # resolver.system_path('../../../css', '../../..', '/path/to/docs') # => '/path/to/docs/css' # # resolver.system_path('..', 'C:\\data\\docs\\assets', 'C:\\data\\docs') # => 'C:/data/docs' # # resolver.system_path('..\\..\\css', 'C:\\data\\docs\\assets', 'C:\\data\\docs') # => 'C:/data/docs/css' # # begin # resolver.system_path('../../../css', '../../..', '/path/to/docs', :recover => false) # rescue SecurityError => e # puts e.message # end # => 'path ../../../../../../css refers to location outside jail: /path/to/docs (disallowed in safe mode)' # # resolver.system_path('/path/to/docs/images', nil, '/path/to/docs') # => '/path/to/docs/images' # # begin # resolver.system_path('images', '/etc', '/path/to/docs') # rescue SecurityError => e # puts e.message # end # => Start path /etc is outside of jail: /path/to/docs' # class PathResolver DOT = '.' DOT_DOT = '..' DOT_SLASH = './' SLASH = '/' BACKSLASH = '\\' DOUBLE_SLASH = '//' WindowsRootRx = /^[a-zA-Z]:(?:\\|\/)/ attr_accessor :file_separator attr_accessor :working_dir # Public: Construct a new instance of PathResolver, optionally specifying the # file separator (to override the system default) and the working directory # (to override the present working directory). The working directory will be # expanded to an absolute path inside the constructor. # # file_separator - the String file separator to use for path operations # (optional, default: File::SEPARATOR) # working_dir - the String working directory (optional, default: Dir.pwd) # def initialize file_separator = nil, working_dir = nil @file_separator = file_separator ? file_separator : (::File::ALT_SEPARATOR || ::File::SEPARATOR) if working_dir @working_dir = (is_root? working_dir) ? working_dir : (::File.expand_path working_dir) else @working_dir = ::File.expand_path ::Dir.pwd end @_partition_path_sys = {} @_partition_path_web = {} end # Public: Check if the specified path is an absolute root path # This operation correctly handles both posix and windows paths. # # path - the String path to check # # returns a Boolean indicating whether the path is an absolute root path def is_root? path # Unix absolute paths and UNC paths start with slash if path.start_with? SLASH true # Windows roots can begin with drive letter elsif @file_separator == BACKSLASH && WindowsRootRx =~ path true # Absolute paths in the browser start with file:/// elsif ::RUBY_ENGINE_OPAL && ::JAVASCRIPT_PLATFORM == 'browser' && (path.start_with? 'file:///') true else false end end # Public: Determine if the path is a UNC (root) path # # path - the String path to check # # returns a Boolean indicating whether the path is a UNC path def is_unc? path path.start_with? DOUBLE_SLASH end # Public: Determine if the path is an absolute (root) web path # # path - the String path to check # # returns a Boolean indicating whether the path is an absolute (root) web path def is_web_root? path path.start_with? SLASH end # Public: Normalize path by converting any backslashes to forward slashes # # path - the String path to normalize # # returns a String path with any backslashes replaced with forward slashes def posixfy path if path.nil_or_empty? '' elsif path.include? BACKSLASH path.tr BACKSLASH, SLASH else path end end # Public: Expand the path by resolving any parent references (..) # and cleaning self references (.). # # The result will be relative if the path is relative and # absolute if the path is absolute. The file separator used # in the expanded path is the one specified when the class # was constructed. # # path - the String path to expand # # returns a String path with any parent or self references resolved. def expand_path path path_segments, path_root, _ = partition_path path join_path path_segments, path_root end # Public: Partition the path into path segments and remove any empty segments # or segments that are self references (.). The path is converted to a posix # path before being partitioned. # # path - the String path to partition # web_path - a Boolean indicating whether the path should be handled # as a web path (optional, default: false) # # Returns a 3-item Array containing the Array of String path segments, the # path root (e.g., '/', './', 'c:/') if the path is absolute and the posix # version of the path. #-- # QUESTION is it worth it to normalize slashes? it doubles the time elapsed def partition_path path, web_path = false if (result = web_path ? @_partition_path_web[path] : @_partition_path_sys[path]) return result end posix_path = posixfy path root = if web_path # ex. /sample/path if is_web_root? posix_path SLASH # ex. ./sample/path elsif posix_path.start_with? DOT_SLASH DOT_SLASH # ex. sample/path else nil end else if is_root? posix_path # ex. //sample/path if is_unc? posix_path DOUBLE_SLASH # ex. /sample/path elsif posix_path.start_with? SLASH SLASH # ex. c:/sample/path (or file:///sample/path in browser environment) else posix_path[0..(posix_path.index SLASH)] end # ex. ./sample/path elsif posix_path.start_with? DOT_SLASH DOT_SLASH # ex. sample/path else nil end end path_segments = posix_path.split SLASH # shift twice for a UNC path if root == DOUBLE_SLASH path_segments = path_segments[2..-1] # shift twice for a file:/// path and adjust root # NOTE technically file:/// paths work without this adjustment #elsif ::RUBY_ENGINE_OPAL && ::JAVASCRIPT_PLATFORM == 'browser' && root == 'file:/' # root = 'file://' # path_segments = path_segments[2..-1] # shift once for any other root elsif root path_segments.shift end # strip out all dot entries path_segments.delete DOT # QUESTION should we chomp trailing /? (we pay a small fraction) #posix_path = posix_path.chomp '/' (web_path ? @_partition_path_web : @_partition_path_sys)[path] = [path_segments, root, posix_path] end # Public: Join the segments using the posix file separator (since Ruby knows # how to work with paths specified this way, regardless of OS). Use the root, # if specified, to construct an absolute path. Otherwise join the segments as # a relative path. # # segments - a String Array of path segments # root - a String path root (optional, default: nil) # # returns a String path formed by joining the segments using the posix file # separator and prepending the root, if specified def join_path segments, root = nil if root %(#{root}#{segments * SLASH}) else segments * SLASH end end # Public: Resolve a system path from the target and start paths. If a jail # path is specified, enforce that the resolved directory is contained within # the jail path. If a jail path is not provided, the resolved path may be # any location on the system. If the resolved path is absolute, use it as is. # If the resolved path is relative, resolve it relative to the working_dir # specified in the constructor. # # target - the String target path # start - the String start (i.e., parent) path # jail - the String jail path to confine the resolved path # opts - an optional Hash of options to control processing (default: {}): # * :recover is used to control whether the processor should auto-recover # when an illegal path is encountered # * :target_name is used in messages to refer to the path being resolved # # returns a String path that joins the target path with the start path with # any parent references resolved and self references removed and enforces # that the resolved path be contained within the jail, if provided def system_path target, start, jail = nil, opts = {} if jail unless is_root? jail raise ::SecurityError, %(Jail is not an absolute path: #{jail}) end jail = posixfy jail end if target.nil_or_empty? target_segments = [] else target_segments, target_root, _ = partition_path target end if target_segments.empty? if start.nil_or_empty? return jail ? jail : @working_dir elsif is_root? start unless jail return expand_path start end else return system_path start, jail, jail, opts end end if target_root && target_root != DOT_SLASH resolved_target = join_path target_segments, target_root # if target is absolute and a sub-directory of jail, or # a jail is not in place, let it slide if !jail || (resolved_target.start_with? jail) return resolved_target end end if start.nil_or_empty? start = jail ? jail : @working_dir elsif is_root? start start = posixfy start else start = system_path start, jail, jail, opts end # both jail and start have been posixfied at this point if jail == start jail_segments, jail_root, _ = partition_path jail start_segments = jail_segments.dup elsif jail unless start.start_with? jail raise ::SecurityError, %(#{opts[:target_name] || 'Start path'} #{start} is outside of jail: #{jail} (disallowed in safe mode)) end start_segments, start_root, _ = partition_path start jail_segments, jail_root, _ = partition_path jail # Already checked for this condition #if start_root != jail_root # raise ::SecurityError, %(Jail root #{jail_root} does not match root of #{opts[:target_name] || 'start path'}: #{start_root}) #end else start_segments, start_root, _ = partition_path start jail_root = start_root end resolved_segments = start_segments.dup warned = false target_segments.each do |segment| if segment == DOT_DOT if jail if resolved_segments.length > jail_segments.length resolved_segments.pop elsif !(recover ||= (opts.fetch :recover, true)) raise ::SecurityError, %(#{opts[:target_name] || 'path'} #{target} refers to location outside jail: #{jail} (disallowed in safe mode)) elsif !warned warn %(asciidoctor: WARNING: #{opts[:target_name] || 'path'} has illegal reference to ancestor of jail, auto-recovering) warned = true end else resolved_segments.pop end else resolved_segments.push segment end end join_path resolved_segments, jail_root end # Public: Resolve a web path from the target and start paths. # The main function of this operation is to resolve any parent # references and remove any self references. # # The target is assumed to be a path, not a qualified URI. # That check should happen before this method is invoked. # # target - the String target path # start - the String start (i.e., parent) path # # returns a String path that joins the target path with the # start path with any parent references resolved and self # references removed def web_path target, start = nil target = posixfy target start = posixfy start uri_prefix = nil unless start.nil_or_empty? || (is_web_root? target) target = %(#{start.chomp '/'}#{SLASH}#{target}) if (uri_prefix = Helpers.uri_prefix target) target = target[uri_prefix.length..-1] end end # use this logic instead if we want to normalize target if it contains a URI #unless is_web_root? target # if preserve_uri_target && (uri_prefix = Helpers.uri_prefix target) # target = target[uri_prefix.length..-1] # elsif !start.nil_or_empty? # target = %(#{start}#{SLASH}#{target}) # if (uri_prefix = Helpers.uri_prefix target) # target = target[uri_prefix.length..-1] # end # end #end target_segments, target_root, _ = partition_path target, true resolved_segments = [] target_segments.each do |segment| if segment == DOT_DOT if resolved_segments.empty? resolved_segments << segment unless target_root && target_root != DOT_SLASH elsif resolved_segments[-1] == DOT_DOT resolved_segments << segment else resolved_segments.pop end else resolved_segments << segment # checking for empty would eliminate repeating forward slashes #resolved_segments << segment unless segment.empty? end end if uri_prefix %(#{uri_prefix}#{join_path resolved_segments, target_root}) else join_path resolved_segments, target_root end end # Public: Calculate the relative path to this absolute filename from the specified base directory # # If either the filename or the base_directory are not absolute paths, no work is done. # # filename - An absolute file name as a String # base_directory - An absolute base directory as a String # # Return the relative path String of the filename calculated from the base directory def relative_path filename, base_directory if (is_root? filename) && (is_root? base_directory) offset = base_directory.chomp(@file_separator).length + 1 filename[offset..-1] else filename end end end end asciidoctor-1.5.5/lib/asciidoctor/reader.rb000066400000000000000000001153211277513741400207030ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # Public: Methods for retrieving lines from AsciiDoc source files class Reader class Cursor attr_accessor :file attr_accessor :dir attr_accessor :path attr_accessor :lineno def initialize file, dir = nil, path = nil, lineno = nil @file = file @dir = dir @path = path @lineno = lineno end def line_info %(#{path}: line #{lineno}) end alias :to_s :line_info end attr_reader :file attr_reader :dir attr_reader :path # Public: Get the 1-based offset of the current line. attr_reader :lineno # Public: Get the document source as a String Array of lines. attr_reader :source_lines # Public: Control whether lines are processed using Reader#process_line on first visit (default: true) attr_accessor :process_lines # Public: Initialize the Reader object def initialize data = nil, cursor = nil, opts = {:normalize => false} if !cursor @file = @dir = nil @path = '' @lineno = 1 # IMPORTANT lineno assignment must proceed prepare_lines call! elsif ::String === cursor @file = cursor @dir, @path = ::File.split @file @lineno = 1 # IMPORTANT lineno assignment must proceed prepare_lines call! else @file = cursor.file @dir = cursor.dir @path = cursor.path || '' if @file unless @dir # REVIEW might to look at this assignment closer @dir = ::File.dirname @file @dir = nil if @dir == '.' # right? end unless cursor.path @path = ::File.basename @file end end @lineno = cursor.lineno || 1 # IMPORTANT lineno assignment must proceed prepare_lines call! end @lines = data ? (prepare_lines data, opts) : [] @source_lines = @lines.dup @eof = @lines.empty? @look_ahead = 0 @process_lines = true @unescape_next_line = false end # Internal: Prepare the lines from the provided data # # This method strips whitespace from the end of every line of # the source data and appends a LF (i.e., Unix endline). This # whitespace substitution is very important to how Asciidoctor # works. # # Any leading or trailing blank lines are also removed. # # data - A String Array of input data to be normalized # opts - A Hash of options to control what cleansing is done # # Returns The String lines extracted from the data def prepare_lines data, opts = {} if ::String === data if opts[:normalize] Helpers.normalize_lines_from_string data else data.split EOL end else if opts[:normalize] Helpers.normalize_lines_array data else data.dup end end end # Internal: Processes a previously unvisited line # # By default, this method marks the line as processed # by incrementing the look_ahead counter and returns # the line unmodified. # # Returns The String line the Reader should make available to the next # invocation of Reader#read_line or nil if the Reader should drop the line, # advance to the next line and process it. def process_line line @look_ahead += 1 if @process_lines line end # Public: Check whether there are any lines left to read. # # If a previous call to this method resulted in a value of false, # immediately returned the cached value. Otherwise, delegate to # peek_line to determine if there is a next line available. # # Returns True if there are more lines, False if there are not. def has_more_lines? !(@eof || (@eof = peek_line.nil?)) end # Public: Peek at the next line and check if it's empty (i.e., whitespace only) # # This method Does not consume the line from the stack. # # Returns True if the there are no more lines or if the next line is empty def next_line_empty? peek_line.nil_or_empty? end # Public: Peek at the next line of source data. Processes the line, if not # already marked as processed, but does not consume it. # # This method will probe the reader for more lines. If there is a next line # that has not previously been visited, the line is passed to the # Reader#process_line method to be initialized. This call gives # sub-classess the opportunity to do preprocessing. If the return value of # the Reader#process_line is nil, the data is assumed to be changed and # Reader#peek_line is invoked again to perform further processing. # # direct - A Boolean flag to bypasses the check for more lines and immediately # returns the first element of the internal @lines Array. (default: false) # # Returns the next line of the source data as a String if there are lines remaining. # Returns nothing if there is no more data. def peek_line direct = false if direct || @look_ahead > 0 @unescape_next_line ? @lines[0][1..-1] : @lines[0] elsif @eof || @lines.empty? @eof = true @look_ahead = 0 nil else # FIXME the problem with this approach is that we aren't # retaining the modified line (hence the @unescape_next_line tweak) # perhaps we need a stack of proxy lines if !(line = process_line @lines[0]) peek_line else line end end end # Public: Peek at the next multiple lines of source data. Processes the lines, if not # already marked as processed, but does not consume them. # # This method delegates to Reader#read_line to process and collect the line, then # restores the lines to the stack before returning them. This allows the lines to # be processed and marked as such so that subsequent reads will not need to process # the lines again. # # num - The Integer number of lines to peek. # direct - A Boolean indicating whether processing should be disabled when reading lines # # Returns A String Array of the next multiple lines of source data, or an empty Array # if there are no more lines in this Reader. def peek_lines num = 1, direct = true old_look_ahead = @look_ahead result = [] num.times do if (line = read_line direct) result << line else break end end unless result.empty? result.reverse_each {|line| unshift line } @look_ahead = old_look_ahead if direct end result end # Public: Get the next line of source data. Consumes the line returned. # # direct - A Boolean flag to bypasses the check for more lines and immediately # returns the first element of the internal @lines Array. (default: false) # # Returns the String of the next line of the source data if data is present. # Returns nothing if there is no more data. def read_line direct = false if direct || @look_ahead > 0 || has_more_lines? shift else nil end end # Public: Get the remaining lines of source data. # # This method calls Reader#read_line repeatedly until all lines are consumed # and returns the lines as a String Array. This method differs from # Reader#lines in that it processes each line in turn, hence triggering # any preprocessors implemented in sub-classes. # # Returns the lines read as a String Array def read_lines lines = [] while has_more_lines? lines << shift end lines end alias :readlines :read_lines # Public: Get the remaining lines of source data joined as a String. # # Delegates to Reader#read_lines, then joins the result. # # Returns the lines read joined as a String def read read_lines * EOL end # Public: Advance to the next line by discarding the line at the front of the stack # # direct - A Boolean flag to bypasses the check for more lines and immediately # returns the first element of the internal @lines Array. (default: true) # # Returns a Boolean indicating whether there was a line to discard. def advance direct = true !!read_line(direct) end # Public: Push the String line onto the beginning of the Array of source data. # # Since this line was (assumed to be) previously retrieved through the # reader, it is marked as seen. # # line_to_restore - the line to restore onto the stack # # Returns nothing. def unshift_line line_to_restore unshift line_to_restore nil end alias :restore_line :unshift_line # Public: Push an Array of lines onto the front of the Array of source data. # # Since these lines were (assumed to be) previously retrieved through the # reader, they are marked as seen. # # Returns nothing. def unshift_lines lines_to_restore # QUESTION is it faster to use unshift(*lines_to_restore)? lines_to_restore.reverse_each {|line| unshift line } nil end alias :restore_lines :unshift_lines # Public: Replace the next line with the specified line. # # Calls Reader#advance to consume the current line, then calls # Reader#unshift to push the replacement onto the top of the # line stack. # # replacement - The String line to put in place of the next line (i.e., the line at the cursor). # # Returns nothing. def replace_next_line replacement advance unshift replacement nil end # deprecated alias :replace_line :replace_next_line # Public: Strip off leading blank lines in the Array of lines. # # Examples # # @lines # => ["", "", "Foo", "Bar", ""] # # skip_blank_lines # => 2 # # @lines # => ["Foo", "Bar", ""] # # Returns an Integer of the number of lines skipped def skip_blank_lines return 0 if eof? num_skipped = 0 # optimized code for shortest execution path while (next_line = peek_line) if next_line.empty? advance num_skipped += 1 else return num_skipped end end num_skipped end # Public: Skip consecutive lines containing line comments and return them. # # Examples # @lines # => ["// foo", "bar"] # # comment_lines = skip_comment_lines # => ["// foo"] # # @lines # => ["bar"] # # Returns the Array of lines that were skipped def skip_comment_lines opts = {} return [] if eof? comment_lines = [] include_blank_lines = opts[:include_blank_lines] while (next_line = peek_line) if include_blank_lines && next_line.empty? comment_lines << shift elsif (commentish = next_line.start_with?('//')) && (match = CommentBlockRx.match(next_line)) comment_lines << shift comment_lines.push(*(read_lines_until(:terminator => match[0], :read_last_line => true, :skip_processing => true))) elsif commentish && CommentLineRx =~ next_line comment_lines << shift else break end end comment_lines end # Public: Skip consecutive lines that are line comments and return them. def skip_line_comments return [] if eof? comment_lines = [] # optimized code for shortest execution path while (next_line = peek_line) if CommentLineRx =~ next_line comment_lines << shift else break end end comment_lines end # Public: Advance to the end of the reader, consuming all remaining lines # # Returns nothing. def terminate @lineno += @lines.size @lines.clear @eof = true @look_ahead = 0 nil end # Public: Check whether this reader is empty (contains no lines) # # Returns true if there are no more lines to peek, otherwise false. def eof? !has_more_lines? end alias :empty? :eof? # Public: Return all the lines from `@lines` until we (1) run out them, # (2) find a blank line with :break_on_blank_lines => true, or (3) find # a line for which the given block evals to true. # # options - an optional Hash of processing options: # * :break_on_blank_lines may be used to specify to break on # blank lines # * :skip_first_line may be used to tell the reader to advance # beyond the first line before beginning the scan # * :preserve_last_line may be used to specify that the String # causing the method to stop processing lines should be # pushed back onto the `lines` Array. # * :read_last_line may be used to specify that the String # causing the method to stop processing lines should be # included in the lines being returned # # Returns the Array of lines forming the next segment. # # Examples # # data = [ # "First line\n", # "Second line\n", # "\n", # "Third line\n", # ] # reader = Reader.new data, nil, :normalize => true # # reader.read_lines_until # => ["First line", "Second line"] def read_lines_until options = {} result = [] advance if options[:skip_first_line] if @process_lines && options[:skip_processing] @process_lines = false restore_process_lines = true else restore_process_lines = false end if (terminator = options[:terminator]) break_on_blank_lines = false break_on_list_continuation = false else break_on_blank_lines = options[:break_on_blank_lines] break_on_list_continuation = options[:break_on_list_continuation] end skip_comments = options[:skip_line_comments] line_read = false line_restored = false complete = false while !complete && (line = read_line) complete = while true break true if terminator && line == terminator # QUESTION: can we get away with line.empty? here? break true if break_on_blank_lines && line.empty? if break_on_list_continuation && line_read && line == LIST_CONTINUATION options[:preserve_last_line] = true break true end break true if block_given? && (yield line) break false end if complete if options[:read_last_line] result << line line_read = true end if options[:preserve_last_line] unshift line line_restored = true end else unless skip_comments && line.start_with?('//') && CommentLineRx =~ line result << line line_read = true end end end if restore_process_lines @process_lines = true @look_ahead -= 1 if line_restored && !terminator end result end # Internal: Shift the line off the stack and increment the lineno # # This method can be used directly when you've already called peek_line # and determined that you do, in fact, want to pluck that line off the stack. # # Returns The String line at the top of the stack def shift @lineno += 1 @look_ahead -= 1 unless @look_ahead == 0 @lines.shift end # Internal: Restore the line to the stack and decrement the lineno def unshift line @lineno -= 1 @look_ahead += 1 @eof = false @lines.unshift line end def cursor Cursor.new @file, @dir, @path, @lineno end # Public: Get information about the last line read, including file name and line number. # # Returns A String summary of the last line read def line_info %(#{@path}: line #{@lineno}) end alias :next_line_info :line_info def prev_line_info %(#{@path}: line #{@lineno - 1}) end # Public: Get a copy of the remaining Array of String lines managed by this Reader # # Returns A copy of the String Array of lines remaining in this Reader def lines @lines.dup end # Public: Get a copy of the remaining lines managed by this Reader joined as a String def string @lines * EOL end # Public: Get the source lines for this Reader joined as a String def source @source_lines * EOL end # Public: Get a summary of this Reader. # # # Returns A string summary of this reader, which contains the path and line information def to_s line_info end end # Public: Methods for retrieving lines from AsciiDoc source files, evaluating preprocessor # directives as each line is read off the Array of lines. class PreprocessorReader < Reader attr_reader :include_stack attr_reader :includes # Public: Initialize the PreprocessorReader object def initialize document, data = nil, cursor = nil @document = document super data, cursor, :normalize => true include_depth_default = document.attributes.fetch('max-include-depth', 64).to_i include_depth_default = 0 if include_depth_default < 0 # track both absolute depth for comparing to size of include stack and relative depth for reporting @maxdepth = {:abs => include_depth_default, :rel => include_depth_default} @include_stack = [] @includes = (document.references[:includes] ||= []) @skipping = false @conditional_stack = [] @include_processor_extensions = nil end def prepare_lines data, opts = {} result = super # QUESTION should this work for AsciiDoc table cell content? Currently it does not. if @document && (@document.attributes.has_key? 'skip-front-matter') if (front_matter = skip_front_matter! result) @document.attributes['front-matter'] = front_matter * EOL end end if opts.fetch :condense, true result.shift && @lineno += 1 while (first = result[0]) && first.empty? result.pop while (last = result[-1]) && last.empty? end if opts[:indent] Parser.adjust_indentation! result, opts[:indent], (@document.attr 'tabsize') end result end def process_line line return line unless @process_lines if line.empty? @look_ahead += 1 return '' end # NOTE highly optimized if line.end_with?(']') && !line.start_with?('[') && line.include?('::') if line.include?('if') && (match = ConditionalDirectiveRx.match(line)) # if escaped, mark as processed and return line unescaped if line.start_with?('\\') @unescape_next_line = true @look_ahead += 1 line[1..-1] else if preprocess_conditional_inclusion(*match.captures) # move the pointer past the conditional line advance # treat next line as uncharted territory nil else # the line was not a valid conditional line # mark it as visited and return it @look_ahead += 1 line end end elsif @skipping advance nil elsif ((escaped = line.start_with?('\\include::')) || line.start_with?('include::')) && (match = IncludeDirectiveRx.match(line)) # if escaped, mark as processed and return line unescaped if escaped @unescape_next_line = true @look_ahead += 1 line[1..-1] else # QUESTION should we strip whitespace from raw attributes in Substitutors#parse_attributes? (check perf) if preprocess_include match[1], match[2].strip # peek again since the content has changed nil else # the line was not a valid include line and is unchanged # mark it as visited and return it @look_ahead += 1 line end end else # NOTE optimization to inline super @look_ahead += 1 line end elsif @skipping advance nil else # NOTE optimization to inline super @look_ahead += 1 line end end # Public: Override the Reader#peek_line method to pop the include # stack if the last line has been reached and there's at least # one include on the stack. # # Returns the next line of the source data as a String if there are lines remaining # in the current include context or a parent include context. # Returns nothing if there are no more lines remaining and the include stack is empty. def peek_line direct = false if (line = super) line elsif @include_stack.empty? nil else pop_include peek_line direct end end # Internal: Preprocess the directive (macro) to conditionally include content. # # Preprocess the conditional inclusion directive (ifdef, ifndef, ifeval, # endif) under the cursor. If the Reader is currently skipping content, then # simply track the open and close delimiters of any nested conditional # blocks. If the Reader is not skipping, mark whether the condition is # satisfied and continue preprocessing recursively until the next line of # available content is found. # # directive - The conditional inclusion directive (ifdef, ifndef, ifeval, endif) # target - The target, which is the name of one or more attributes that are # used in the condition (blank in the case of the ifeval directive) # delimiter - The conditional delimiter for multiple attributes ('+' means all # attributes must be defined or undefined, ',' means any of the attributes # can be defined or undefined. # text - The text associated with this directive (occurring between the square brackets) # Used for a single-line conditional block in the case of the ifdef or # ifndef directives, and for the conditional expression for the ifeval directive. # # Returns a Boolean indicating whether the cursor should be advanced def preprocess_conditional_inclusion directive, target, delimiter, text # must have a target before brackets if ifdef or ifndef # must not have text between brackets if endif # don't honor match if it doesn't meet this criteria # QUESTION should we warn for these bogus declarations? if ((directive == 'ifdef' || directive == 'ifndef') && target.empty?) || (directive == 'endif' && text) return false end # attributes are case insensitive target = target.downcase if directive == 'endif' stack_size = @conditional_stack.size if stack_size > 0 pair = @conditional_stack[-1] if target.empty? || target == pair[:target] @conditional_stack.pop @skipping = @conditional_stack.empty? ? false : @conditional_stack[-1][:skipping] else warn %(asciidoctor: ERROR: #{line_info}: mismatched macro: endif::#{target}[], expected endif::#{pair[:target]}[]) end else warn %(asciidoctor: ERROR: #{line_info}: unmatched macro: endif::#{target}[]) end return true end skip = false unless @skipping # QUESTION any way to wrap ifdef & ifndef logic up together? case directive when 'ifdef' case delimiter when nil # if the attribute is undefined, then skip skip = !@document.attributes.has_key?(target) when ',' # if any attribute is defined, then don't skip skip = target.split(',').none? {|name| @document.attributes.has_key? name } when '+' # if any attribute is undefined, then skip skip = target.split('+').any? {|name| !@document.attributes.has_key? name } end when 'ifndef' case delimiter when nil # if the attribute is defined, then skip skip = @document.attributes.has_key?(target) when ',' # if any attribute is undefined, then don't skip skip = target.split(',').none? {|name| !@document.attributes.has_key? name } when '+' # if any attribute is defined, then skip skip = target.split('+').any? {|name| @document.attributes.has_key? name } end when 'ifeval' # the text in brackets must match an expression # don't honor match if it doesn't meet this criteria if !target.empty? || !(expr_match = EvalExpressionRx.match(text.strip)) return false end lhs = resolve_expr_val expr_match[1] rhs = resolve_expr_val expr_match[3] # regex enforces a restricted set of math-related operations if (op = expr_match[2]) == '!=' skip = lhs.send :==, rhs else skip = !(lhs.send op.to_sym, rhs) end end end # conditional inclusion block if directive == 'ifeval' || !text @skipping = true if skip @conditional_stack << {:target => target, :skip => skip, :skipping => @skipping} # single line conditional inclusion else unless @skipping || skip # FIXME slight hack to skip past conditional line # but keep our synthetic line marked as processed # QUESTION can we use read_line true and unshift twice instead? conditional_line = peek_line true replace_next_line text.rstrip unshift conditional_line return true end end true end # Internal: Preprocess the directive (macro) to include the target document. # # Preprocess the directive to include the target document. The scenarios # are as follows: # # If SafeMode is SECURE or greater, the directive is ignore and the include # directive line is emitted verbatim. # # Otherwise, if an include processor is specified pass the target and # attributes to that processor and expect an Array of String lines in return. # # Otherwise, if the max depth is greater than 0, and is not exceeded by the # stack size, normalize the target path and read the lines onto the beginning # of the Array of source data. # # If none of the above apply, emit the include directive line verbatim. # # target - The name of the source document to include as specified in the # target slot of the include::[] macro # # Returns a Boolean indicating whether the line under the cursor has changed. def preprocess_include raw_target, raw_attributes if (target = @document.sub_attributes raw_target, :attribute_missing => 'drop-line').empty? advance if @document.attributes.fetch('attribute-missing', Compliance.attribute_missing) == 'skip' unshift %(Unresolved directive in #{@path} - include::#{raw_target}[#{raw_attributes}]) end true # assume that if an include processor is given, the developer wants # to handle when and how to process the include elsif include_processors? && (extension = @include_processor_extensions.find {|candidate| candidate.instance.handles? target }) advance # FIXME parse attributes if requested by extension extension.process_method[@document, self, target, AttributeList.new(raw_attributes).parse] true # if running in SafeMode::SECURE or greater, don't process this directive # however, be friendly and at least make it a link to the source document elsif @document.safe >= SafeMode::SECURE # FIXME we don't want to use a link macro if we are in a verbatim context replace_next_line %(link:#{target}[]) true elsif (abs_maxdepth = @maxdepth[:abs]) > 0 if @include_stack.size >= abs_maxdepth warn %(asciidoctor: ERROR: #{line_info}: maximum include depth of #{@maxdepth[:rel]} exceeded) return false end if ::RUBY_ENGINE_OPAL # NOTE resolves uri relative to currently loaded document # NOTE we defer checking if file exists and catch the 404 error if it does not # TODO only use this logic if env-browser is set target_type = :file include_file = path = if @include_stack.empty? ::Dir.pwd == @document.base_dir ? target : (::File.join @dir, target) else ::File.join @dir, target end elsif Helpers.uriish? target unless @document.attributes.has_key? 'allow-uri-read' replace_next_line %(link:#{target}[]) return true end target_type = :uri include_file = path = target if @document.attributes.has_key? 'cache-uri' # caching requires the open-uri-cached gem to be installed # processing will be automatically aborted if these libraries can't be opened Helpers.require_library 'open-uri/cached', 'open-uri-cached' unless defined? ::OpenURI::Cache elsif !::RUBY_ENGINE_OPAL # autoload open-uri ::OpenURI end else target_type = :file # include file is resolved relative to dir of current include, or base_dir if within original docfile include_file = @document.normalize_system_path(target, @dir, nil, :target_name => 'include file') unless ::File.file? include_file warn %(asciidoctor: WARNING: #{line_info}: include file not found: #{include_file}) replace_next_line %(Unresolved directive in #{@path} - include::#{target}[#{raw_attributes}]) return true end #path = @document.relative_path include_file path = PathResolver.new.relative_path include_file, @document.base_dir end inc_lines = nil tags = nil attributes = {} if !raw_attributes.empty? # QUESTION should we use @document.parse_attribues? attributes = AttributeList.new(raw_attributes).parse if attributes.has_key? 'lines' inc_lines = [] attributes['lines'].split(DataDelimiterRx).each do |linedef| if linedef.include?('..') from, to = linedef.split('..', 2).map(&:to_i) if to == -1 inc_lines << from inc_lines << 1.0/0.0 else inc_lines.concat ::Range.new(from, to).to_a end else inc_lines << linedef.to_i end end inc_lines = inc_lines.sort.uniq elsif attributes.has_key? 'tag' tags = [attributes['tag']].to_set elsif attributes.has_key? 'tags' tags = attributes['tags'].split(DataDelimiterRx).to_set end end if inc_lines unless inc_lines.empty? selected = [] inc_line_offset = 0 inc_lineno = 0 begin open(include_file, 'r') do |f| f.each_line do |l| inc_lineno += 1 take = inc_lines[0] if ::Float === take && take.infinite? selected.push l inc_line_offset = inc_lineno if inc_line_offset == 0 else if f.lineno == take selected.push l inc_line_offset = inc_lineno if inc_line_offset == 0 inc_lines.shift end break if inc_lines.empty? end end end rescue warn %(asciidoctor: WARNING: #{line_info}: include #{target_type} not readable: #{include_file}) replace_next_line %(Unresolved directive in #{@path} - include::#{target}[#{raw_attributes}]) return true end advance # FIXME not accounting for skipped lines in reader line numbering push_include selected, include_file, path, inc_line_offset, attributes end elsif tags unless tags.empty? selected = [] inc_line_offset = 0 inc_lineno = 0 active_tag = nil tags_found = ::Set.new begin open(include_file, 'r') do |f| f.each_line do |l| inc_lineno += 1 # must force encoding here since we're performing String operations on line l.force_encoding(::Encoding::UTF_8) if FORCE_ENCODING l = l.rstrip # tagged lines in XML may end with '-->' tl = l.chomp('-->').rstrip if active_tag if tl.end_with?(%(end::#{active_tag}[])) active_tag = nil else selected.push l unless tl.end_with?('[]') && TagDirectiveRx =~ tl inc_line_offset = inc_lineno if inc_line_offset == 0 end else tags.each do |tag| if tl.end_with?(%(tag::#{tag}[])) active_tag = tag tags_found << tag break end end if tl.end_with?('[]') && TagDirectiveRx =~ tl end end end rescue warn %(asciidoctor: WARNING: #{line_info}: include #{target_type} not readable: #{include_file}) replace_next_line %(Unresolved directive in #{@path} - include::#{target}[#{raw_attributes}]) return true end unless (missing_tags = tags.to_a - tags_found.to_a).empty? warn %(asciidoctor: WARNING: #{line_info}: tag#{missing_tags.size > 1 ? 's' : nil} '#{missing_tags * ','}' not found in include #{target_type}: #{include_file}) end advance # FIXME not accounting for skipped lines in reader line numbering push_include selected, include_file, path, inc_line_offset, attributes end else begin # NOTE read content first so that we only advance cursor if IO operation succeeds include_content = open(include_file, 'r') {|f| f.read } advance push_include include_content, include_file, path, 1, attributes rescue warn %(asciidoctor: WARNING: #{line_info}: include #{target_type} not readable: #{include_file}) replace_next_line %(Unresolved directive in #{@path} - include::#{target}[#{raw_attributes}]) return true end end true else false end end # Public: Push source onto the front of the reader and switch the context # based on the file, document-relative path and line information given. # # This method is typically used in an IncludeProcessor to add source # read from the target specified. # # Examples # # path = 'partial.adoc' # file = File.expand_path path # data = IO.read file # reader.push_include data, file, path # # Returns this Reader object. def push_include data, file = nil, path = nil, lineno = 1, attributes = {} @include_stack << [@lines, @file, @dir, @path, @lineno, @maxdepth, @process_lines] if file @file = file @dir = File.dirname file # only process lines in AsciiDoc files @process_lines = ASCIIDOC_EXTENSIONS[::File.extname(file)] else @file = nil @dir = '.' # right? # we don't know what file type we have, so assume AsciiDoc @process_lines = true end @path = if path @includes << Helpers.rootname(path) path else '' end @lineno = lineno if attributes.has_key? 'depth' depth = attributes['depth'].to_i depth = 1 if depth <= 0 @maxdepth = {:abs => (@include_stack.size - 1) + depth, :rel => depth} end # effectively fill the buffer if (@lines = prepare_lines data, :normalize => true, :condense => false, :indent => attributes['indent']).empty? pop_include else # FIXME we eventually want to handle leveloffset without affecting the lines if attributes.has_key? 'leveloffset' @lines.unshift '' @lines.unshift %(:leveloffset: #{attributes['leveloffset']}) @lines.push '' if (old_leveloffset = @document.attr 'leveloffset') @lines.push %(:leveloffset: #{old_leveloffset}) else @lines.push ':leveloffset!:' end # compensate for these extra lines @lineno -= 2 end # FIXME kind of a hack #Document::AttributeEntry.new('infile', @file).save_to_next_block @document #Document::AttributeEntry.new('indir', @dir).save_to_next_block @document @eof = false @look_ahead = 0 end self end def pop_include if @include_stack.size > 0 @lines, @file, @dir, @path, @lineno, @maxdepth, @process_lines = @include_stack.pop # FIXME kind of a hack #Document::AttributeEntry.new('infile', @file).save_to_next_block @document #Document::AttributeEntry.new('indir', ::File.dirname(@file)).save_to_next_block @document @eof = @lines.empty? @look_ahead = 0 end nil end def include_depth @include_stack.size end def exceeded_max_depth? if (abs_maxdepth = @maxdepth[:abs]) > 0 && @include_stack.size >= abs_maxdepth @maxdepth[:rel] else false end end # TODO Document this override # also, we now have the field in the super class, so perhaps # just implement the logic there? def shift if @unescape_next_line @unescape_next_line = false super[1..-1] else super end end # Private: Ignore front-matter, commonly used in static site generators def skip_front_matter! data, increment_linenos = true front_matter = nil if data[0] == '---' original_data = data.dup front_matter = [] data.shift @lineno += 1 if increment_linenos while !data.empty? && data[0] != '---' front_matter.push data.shift @lineno += 1 if increment_linenos end if data.empty? data.unshift(*original_data) @lineno = 0 if increment_linenos front_matter = nil else data.shift @lineno += 1 if increment_linenos end end front_matter end # Private: Resolve the value of one side of the expression # # Examples # # expr = '"value"' # resolve_expr_val expr # # => "value" # # expr = '"value' # resolve_expr_val expr # # => "\"value" # # expr = '"{undefined}"' # resolve_expr_val expr # # => "" # # expr = '{undefined}' # resolve_expr_val expr # # => nil # # expr = '2' # resolve_expr_val expr # # => 2 # # @document.attributes['name'] = 'value' # expr = '"{name}"' # resolve_expr_val expr # # => "value" # # Returns The value of the expression, coerced to the appropriate type def resolve_expr_val val if ((val.start_with? '"') && (val.end_with? '"')) || ((val.start_with? '\'') && (val.end_with? '\'')) quoted = true val = val[1...-1] else quoted = false end # QUESTION should we substitute first? # QUESTION should we also require string to be single quoted (like block attribute values?) if val.include? '{' val = @document.sub_attributes val, :attribute_missing => 'drop' end if quoted val else if val.empty? nil elsif val == 'true' true elsif val == 'false' false elsif val.rstrip.empty? ' ' elsif val.include? '.' val.to_f else # fallback to coercing to integer, since we # require string values to be explicitly quoted val.to_i end end end def include_processors? if @include_processor_extensions.nil? if @document.extensions? && @document.extensions.include_processors? !!(@include_processor_extensions = @document.extensions.include_processors) else @include_processor_extensions = false end else @include_processor_extensions != false end end def to_s %(#<#{self.class}@#{object_id} {path: #{@path.inspect}, line #: #{@lineno}, include depth: #{@include_stack.size}, include stack: [#{@include_stack.map {|inc| inc.to_s}.join ', '}]}>) end end end asciidoctor-1.5.5/lib/asciidoctor/section.rb000066400000000000000000000121331277513741400211020ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # Public: Methods for managing sections of AsciiDoc content in a document. # The section responds as an Array of content blocks by delegating # block-related methods to its @blocks Array. # # Examples # # section = Asciidoctor::Section.new # section.title = 'Section 1' # section.id = 'sect1' # # section.size # => 0 # # section.id # => "sect1" # # section << new_block # section.size # => 1 class Section < AbstractBlock # Public: Get/Set the 0-based index order of this section within the parent block attr_accessor :index # Public: Get/Set the number of this section within the parent block # Only relevant if the attribute numbered is true attr_accessor :number # Public: Get/Set the section name of this section attr_accessor :sectname # Public: Get/Set the flag to indicate whether this is a special section or a child of one attr_accessor :special # Public: Get the state of the numbered attribute at this section (need to preserve for creating TOC) attr_accessor :numbered # Public: Initialize an Asciidoctor::Section object. # # parent - The parent Asciidoc Object. def initialize parent = nil, level = nil, numbered = true, opts = {} super parent, :section, opts if level @level = level else @level = parent ? (parent.level + 1) : 1 end @numbered = numbered && @level > 0 @special = parent && parent.context == :section && parent.special @index = 0 @number = 1 end # Public: The name of this section, an alias of the section title alias :name :title # Public: Generate a String id for this section. # # The generated id is prefixed with value of the 'idprefix' attribute, which # is an underscore by default. # # Section id synthesis can be disabled by undefining the 'sectids' attribute. # # If the generated id is already in use in the document, a count is appended # until a unique id is found. # # Examples # # section = Section.new(parent) # section.title = "Foo" # section.generate_id # => "_foo" # # another_section = Section.new(parent) # another_section.title = "Foo" # another_section.generate_id # => "_foo_1" # # yet_another_section = Section.new(parent) # yet_another_section.title = "Ben & Jerry" # yet_another_section.generate_id # => "_ben_jerry" def generate_id if @document.attributes.has_key? 'sectids' sep = @document.attributes['idseparator'] || '_' pre = @document.attributes['idprefix'] || '_' base_id = %(#{pre}#{title.downcase.gsub(InvalidSectionIdCharsRx, sep).tr_s(sep, sep).chomp(sep)}) # ensure id doesn't begin with idseparator if idprefix is empty and idseparator is not empty if pre.empty? && !sep.empty? && base_id.start_with?(sep) base_id = base_id[1..-1] base_id = base_id[1..-1] while base_id.start_with?(sep) end gen_id = base_id cnt = Compliance.unique_id_start_index while @document.references[:ids].has_key? gen_id gen_id = %(#{base_id}#{sep}#{cnt}) cnt += 1 end gen_id else nil end end # Public: Get the section number for the current Section # # The section number is a unique, dot separated String # where each entry represents one level of nesting and # the value of each entry is the 1-based outline number # of the Section amongst its numbered sibling Sections # # delimiter - the delimiter to separate the number for each level # append - the String to append at the end of the section number # or Boolean to indicate the delimiter should not be # appended to the final level # (default: nil) # # Examples # # sect1 = Section.new(document) # sect1.level = 1 # sect1_1 = Section.new(sect1) # sect1_1.level = 2 # sect1_2 = Section.new(sect1) # sect1_2.level = 2 # sect1 << sect1_1 # sect1 << sect1_2 # sect1_1_1 = Section.new(sect1_1) # sect1_1_1.level = 3 # sect1_1 << sect1_1_1 # # sect1.sectnum # # => 1. # # sect1_1.sectnum # # => 1.1. # # sect1_2.sectnum # # => 1.2. # # sect1_1_1.sectnum # # => 1.1.1. # # sect1_1_1.sectnum(',', false) # # => 1,1,1 # # Returns the section number as a String def sectnum(delimiter = '.', append = nil) append ||= (append == false ? '' : delimiter) if @level && @level > 1 && @parent && @parent.context == :section %(#{@parent.sectnum(delimiter)}#{@number}#{append}) else %(#{@number}#{append}) end end # Public: Append a content block to this block's list of blocks. # # If the child block is a Section, assign an index to it. # # block - The child Block to append to this parent Block # # Returns The parent Block def << block assign_index block if block.context == :section super end def to_s if @title != nil qualified_title = @numbered ? %(#{sectnum} #{@title}) : @title %(#<#{self.class}@#{object_id} {level: #{@level}, title: #{qualified_title.inspect}, blocks: #{@blocks.size}}>) else super end end end end asciidoctor-1.5.5/lib/asciidoctor/stylesheets.rb000066400000000000000000000057071277513741400220230ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # A utility class for working with the built-in stylesheets. #-- # QUESTION create methods for link_*_stylesheet? # QUESTION create method for user stylesheet? class Stylesheets DEFAULT_STYLESHEET_NAME = 'asciidoctor.css' DEFAULT_PYGMENTS_STYLE = 'default' STYLESHEETS_DATA_PATH = ::File.join DATA_PATH, 'stylesheets' @__instance__ = new def self.instance @__instance__ end def primary_stylesheet_name DEFAULT_STYLESHEET_NAME end # Public: Read the contents of the default Asciidoctor stylesheet # # returns the [String] Asciidoctor stylesheet data def primary_stylesheet_data @primary_stylesheet_data ||= ::IO.read(::File.join(STYLESHEETS_DATA_PATH, 'asciidoctor-default.css')).chomp end def embed_primary_stylesheet %() end def write_primary_stylesheet target_dir ::File.open(::File.join(target_dir, primary_stylesheet_name), 'w') {|f| f.write primary_stylesheet_data } end def coderay_stylesheet_name 'coderay-asciidoctor.css' end # Public: Read the contents of the default CodeRay stylesheet # # returns the [String] CodeRay stylesheet data def coderay_stylesheet_data # NOTE use the following lines to load a built-in theme instead # unless load_coderay.nil? # ::CodeRay::Encoders[:html]::CSS.new(:default).stylesheet # end @coderay_stylesheet_data ||= ::IO.read(::File.join(STYLESHEETS_DATA_PATH, 'coderay-asciidoctor.css')).chomp end def embed_coderay_stylesheet %() end def write_coderay_stylesheet target_dir ::File.open(::File.join(target_dir, coderay_stylesheet_name), 'w') {|f| f.write coderay_stylesheet_data } end def pygments_stylesheet_name style = nil %(pygments-#{style || DEFAULT_PYGMENTS_STYLE}.css) end # Public: Generate the Pygments stylesheet with the specified style. # # returns the [String] Pygments stylesheet data def pygments_stylesheet_data style = nil if load_pygments (@pygments_stylesheet_data ||= {})[style || DEFAULT_PYGMENTS_STYLE] ||= (::Pygments.css '.listingblock .pygments', :classprefix => 'tok-', :style => (style || DEFAULT_PYGMENTS_STYLE)). sub('.listingblock .pygments {', '.listingblock .pygments, .listingblock .pygments code {') else '/* Pygments styles disabled. Pygments is not available. */' end end def embed_pygments_stylesheet style = nil %() end def write_pygments_stylesheet target_dir, style = nil ::File.open(::File.join(target_dir, pygments_stylesheet_name(style)), 'w') {|f| f.write pygments_stylesheet_data(style) } end #def load_coderay # (defined? ::CodeRay) ? true : !(Helpers.require_library 'coderay', true, :ignore).nil? #end def load_pygments (defined? ::Pygments) ? true : !(Helpers.require_library 'pygments', 'pygments.rb', :ignore).nil? end end end asciidoctor-1.5.5/lib/asciidoctor/substitutors.rb000066400000000000000000001536551277513741400222470ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # Public: Methods to perform substitutions on lines of AsciiDoc text. This module # is intented to be mixed-in to Section and Block to provide operations for performing # the necessary substitutions. module Substitutors SPECIAL_CHARS = { '&' => '&', '<' => '<', '>' => '>' } SPECIAL_CHARS_PATTERN = /[#{SPECIAL_CHARS.keys.join}]/ SUBS = { :basic => [:specialcharacters], :normal => [:specialcharacters, :quotes, :attributes, :replacements, :macros, :post_replacements], :verbatim => [:specialcharacters, :callouts], :title => [:specialcharacters, :quotes, :replacements, :macros, :attributes, :post_replacements], :header => [:specialcharacters, :attributes], # by default, AsciiDoc performs :attributes and :macros on a pass block # TODO make this a compliance setting :pass => [] } COMPOSITE_SUBS = { :none => [], :normal => SUBS[:normal], :verbatim => SUBS[:verbatim], :specialchars => [:specialcharacters] } SUB_SYMBOLS = { :a => :attributes, :m => :macros, :n => :normal, :p => :post_replacements, :q => :quotes, :r => :replacements, :c => :specialcharacters, :v => :verbatim } SUB_OPTIONS = { :block => COMPOSITE_SUBS.keys + SUBS[:normal] + [:callouts], :inline => COMPOSITE_SUBS.keys + SUBS[:normal] } SUB_HIGHLIGHT = ['coderay', 'pygments'] # Delimiters and matchers for the passthrough placeholder # See http://www.aivosto.com/vbtips/control-characters.html#listabout for characters to use # SPA, start of guarded protected area (\u0096) PASS_START = "\u0096" # EPA, end of guarded protected area (\u0097) PASS_END = "\u0097" # match placeholder record PASS_MATCH = /\u0096(\d+)\u0097/ # fix placeholder record after syntax highlighting PASS_MATCH_HI = /]*>\u0096<\/span>[^\d]*(\d+)[^\d]*]*>\u0097<\/span>/ # Internal: A String Array of passthough (unprocessed) text captured from this block attr_reader :passthroughs # Public: Apply the specified substitutions to the lines of text # # source - The String or String Array of text to process # subs - The substitutions to perform. Can be a Symbol or a Symbol Array (default: :normal) # expand - A Boolean to control whether sub aliases are expanded (default: true) # # returns Either a String or String Array, whichever matches the type of the first argument def apply_subs source, subs = :normal, expand = false if !subs return source elsif subs == :normal subs = SUBS[:normal] elsif expand if ::Symbol === subs subs = COMPOSITE_SUBS[subs] || [subs] else effective_subs = [] subs.each do |key| if COMPOSITE_SUBS.has_key? key effective_subs += COMPOSITE_SUBS[key] else effective_subs << key end end subs = effective_subs end end return source if subs.empty? text = (multiline = ::Array === source) ? source * EOL : source if (has_passthroughs = subs.include? :macros) text = extract_passthroughs text has_passthroughs = false if @passthroughs.empty? end subs.each do |type| case type when :specialcharacters text = sub_specialchars text when :quotes text = sub_quotes text when :attributes text = sub_attributes(text.split EOL) * EOL when :replacements text = sub_replacements text when :macros text = sub_macros text when :highlight text = highlight_source text, (subs.include? :callouts) when :callouts text = sub_callouts text unless subs.include? :highlight when :post_replacements text = sub_post_replacements text else warn %(asciidoctor: WARNING: unknown substitution type #{type}) end end text = restore_passthroughs text if has_passthroughs multiline ? (text.split EOL) : text end # Public: Apply normal substitutions. # # lines - The lines of text to process. Can be a String or a String Array # # returns - A String with normal substitutions performed def apply_normal_subs(lines) apply_subs(::Array === lines ? lines * EOL : lines) end # Public: Apply substitutions for titles. # # title - The String title to process # # returns - A String with title substitutions performed def apply_title_subs(title) apply_subs title, SUBS[:title] end # Public: Apply substitutions for header metadata and attribute assignments # # text - String containing the text process # # returns - A String with header substitutions performed def apply_header_subs(text) apply_subs text, SUBS[:header] end # Internal: Extract the passthrough text from the document for reinsertion after processing. # # text - The String from which to extract passthrough fragements # # returns - The text with the passthrough region substituted with placeholders def extract_passthroughs(text) compat_mode = @document.compat_mode text = text.gsub(PassInlineMacroRx) { # alias match for Ruby 1.8.7 compat m = $~ preceding = nil if (boundary = m[4]).nil_or_empty? # pass:[] if m[6] == '\\' # NOTE we don't look for nested pass:[] macros next m[0][1..-1] end @passthroughs[pass_key = @passthroughs.size] = {:text => (unescape_brackets m[8]), :subs => (m[7].nil_or_empty? ? [] : (resolve_pass_subs m[7]))} else # $$, ++ or +++ # skip ++ in compat mode, handled as normal quoted text if compat_mode && boundary == '++' next m[2].nil_or_empty? ? %(#{m[1]}#{m[3]}++#{extract_passthroughs m[5]}++) : %(#{m[1]}[#{m[2]}]#{m[3]}++#{extract_passthroughs m[5]}++) end attributes = m[2] # fix non-matching group results in Opal under Firefox if ::RUBY_ENGINE_OPAL attributes = nil if attributes == '' end escape_count = m[3].size content = m[5] old_behavior = false if attributes if escape_count > 0 # NOTE we don't look for nested unconstrained pass macros # must enclose string following next in " for Opal next "#{m[1]}[#{attributes}]#{'\\' * (escape_count - 1)}#{boundary}#{m[5]}#{boundary})" elsif m[1] == '\\' preceding = %([#{attributes}]) attributes = nil else if boundary == '++' && (attributes.end_with? 'x-') old_behavior = true attributes = attributes[0...-2] end attributes = parse_attributes attributes end elsif escape_count > 0 # NOTE we don't look for nested unconstrained pass macros # must enclose string following next in " for Opal next "#{m[1]}[#{attributes}]#{'\\' * (escape_count - 1)}#{boundary}#{m[5]}#{boundary}" end subs = (boundary == '+++' ? [] : [:specialcharacters]) pass_key = @passthroughs.size if attributes if old_behavior @passthroughs[pass_key] = {:text => content, :subs => SUBS[:normal], :type => :monospaced, :attributes => attributes} else @passthroughs[pass_key] = {:text => content, :subs => subs, :type => :unquoted, :attributes => attributes} end else @passthroughs[pass_key] = {:text => content, :subs => subs} end end %(#{preceding}#{PASS_START}#{pass_key}#{PASS_END}) } if (text.include? '++') || (text.include? '$$') || (text.include? 'ss:') pass_inline_char1, pass_inline_char2, pass_inline_rx = PassInlineRx[compat_mode] text = text.gsub(pass_inline_rx) { # alias match for Ruby 1.8.7 compat m = $~ preceding = m[1] attributes = m[2] escape_mark = (m[3].start_with? '\\') ? '\\' : nil format_mark = m[4] content = m[5] # fix non-matching group results in Opal under Firefox if ::RUBY_ENGINE_OPAL attributes = nil if attributes == '' end if compat_mode old_behavior = true else if (old_behavior = (attributes && (attributes.end_with? 'x-'))) attributes = attributes[0...-2] end end if attributes if format_mark == '`' && !old_behavior # must enclose string following next in " for Opal next "#{preceding}[#{attributes}]#{escape_mark}`#{extract_passthroughs content}`" end if escape_mark # honor the escape of the formatting mark (must enclose string following next in " for Opal) next "#{preceding}[#{attributes}]#{m[3][1..-1]}" elsif preceding == '\\' # honor the escape of the attributes preceding = %([#{attributes}]) attributes = nil else attributes = parse_attributes attributes end elsif format_mark == '`' && !old_behavior # must enclose string following next in " for Opal next "#{preceding}#{escape_mark}`#{extract_passthroughs content}`" elsif escape_mark # honor the escape of the formatting mark (must enclose string following next in " for Opal) next "#{preceding}#{m[3][1..-1]}" end pass_key = @passthroughs.size if compat_mode @passthroughs[pass_key] = {:text => content, :subs => [:specialcharacters], :attributes => attributes, :type => :monospaced} elsif attributes if old_behavior subs = (format_mark == '`' ? [:specialcharacters] : SUBS[:normal]) @passthroughs[pass_key] = {:text => content, :subs => subs, :attributes => attributes, :type => :monospaced} else @passthroughs[pass_key] = {:text => content, :subs => [:specialcharacters], :attributes => attributes, :type => :unquoted} end else @passthroughs[pass_key] = {:text => content, :subs => [:specialcharacters]} end %(#{preceding}#{PASS_START}#{pass_key}#{PASS_END}) } if (text.include? pass_inline_char1) || (pass_inline_char2 && (text.include? pass_inline_char2)) # NOTE we need to do the stem in a subsequent step to allow it to be escaped by the former text = text.gsub(StemInlineMacroRx) { # alias match for Ruby 1.8.7 compat m = $~ # honor the escape if m[0].start_with? '\\' next m[0][1..-1] end if (type = m[1].to_sym) == :stem type = ((default_stem_type = document.attributes['stem']).nil_or_empty? ? 'asciimath' : default_stem_type).to_sym end content = unescape_brackets m[3] if m[2].nil_or_empty? subs = (@document.basebackend? 'html') ? [:specialcharacters] : [] else subs = resolve_pass_subs m[2] end @passthroughs[pass_key = @passthroughs.size] = {:text => content, :subs => subs, :type => type} %(#{PASS_START}#{pass_key}#{PASS_END}) } if (text.include? ':') && ((text.include? 'stem:') || (text.include? 'math:')) text end # Internal: Restore the passthrough text by reinserting into the placeholder positions # # text - The String text into which to restore the passthrough text # outer - A Boolean indicating whether we are in the outer call (default: true) # # returns The String text with the passthrough text restored def restore_passthroughs text, outer = true if outer && (@passthroughs.empty? || !text.include?(PASS_START)) return text end text.gsub(PASS_MATCH) { # NOTE we can't remove entry from map because placeholder may have been duplicated by other substitutions pass = @passthroughs[$~[1].to_i] subbed_text = (subs = pass[:subs]) ? apply_subs(pass[:text], subs) : pass[:text] if (type = pass[:type]) subbed_text = Inline.new(self, :quoted, subbed_text, :type => type, :attributes => pass[:attributes]).convert end subbed_text.include?(PASS_START) ? restore_passthroughs(subbed_text, false) : subbed_text } ensure # free memory if in outer call...we don't need these anymore @passthroughs.clear if outer end # Public: Substitute special characters (i.e., encode XML) # # Special characters are defined in the Asciidoctor::SPECIAL_CHARS Array constant # # text - The String text to process # # returns The String text with special characters replaced def sub_specialchars(text) SUPPORTS_GSUB_RESULT_HASH ? text.gsub(SPECIAL_CHARS_PATTERN, SPECIAL_CHARS) : text.gsub(SPECIAL_CHARS_PATTERN) { SPECIAL_CHARS[$&] } end alias :sub_specialcharacters :sub_specialchars if RUBY_ENGINE == 'opal' def sub_quotes text QUOTE_SUBS[@document.compat_mode].each do |type, scope, pattern| text = text.gsub(pattern) { convert_quoted_text $~, type, scope } end text end def sub_replacements text REPLACEMENTS.each do |pattern, replacement, restore| text = text.gsub(pattern) { do_replacement $~, replacement, restore } end text end else # Public: Substitute quoted text (includes emphasis, strong, monospaced, etc) # # text - The String text to process # # returns The converted String text def sub_quotes text # NOTE interpolation is faster than String#dup text = %(#{text}) # NOTE using gsub! here as an MRI Ruby optimization QUOTE_SUBS[@document.compat_mode].each do |type, scope, pattern| text.gsub!(pattern) { convert_quoted_text $~, type, scope } end text end # Public: Substitute replacement characters (e.g., copyright, trademark, etc) # # text - The String text to process # # returns The String text with the replacement characters substituted def sub_replacements text # NOTE interpolation is faster than String#dup text = %(#{text}) # NOTE Using gsub! as optimization REPLACEMENTS.each do |pattern, replacement, restore| text.gsub!(pattern) { do_replacement $~, replacement, restore } end text end end # Internal: Substitute replacement text for matched location # # returns The String text with the replacement characters substituted def do_replacement m, replacement, restore if (matched = m[0]).include? '\\' matched.tr '\\', '' else case restore when :none replacement when :leading %(#{m[1]}#{replacement}) when :bounding %(#{m[1]}#{replacement}#{m[2]}) end end end # Public: Substitute attribute references # # Attribute references are in the format +{name}+. # # If an attribute referenced in the line is missing, the line is dropped. # # text - The String text to process # # returns The String text with the attribute references replaced with attribute values #-- # NOTE it's necessary to perform this substitution line-by-line # so that a missing key doesn't wipe out the whole block of data # when attribute-undefined and/or attribute-missing is drop-line def sub_attributes data, opts = {} return data if data.nil_or_empty? # normalizes data type to an array (string becomes single-element array) if (string_data = ::String === data) data = [data] end doc_attrs = @document.attributes attribute_missing = nil result = [] data.each do |line| reject = false reject_if_empty = false line = line.gsub(AttributeReferenceRx) { # alias match for Ruby 1.8.7 compat m = $~ # escaped attribute, return unescaped if m[1] == '\\' || m[4] == '\\' %({#{m[2]}}) elsif !m[3].nil_or_empty? offset = (directive = m[3]).length + 1 expr = m[2][offset..-1] case directive when 'set' args = expr.split(':') _, value = Parser.store_attribute(args[0], args[1] || '', @document) unless value # since this is an assignment, only drop-line applies here (skip and drop imply the same result) if doc_attrs.fetch('attribute-undefined', Compliance.attribute_undefined) == 'drop-line' reject = true break '' end end reject_if_empty = true '' when 'counter', 'counter2' args = expr.split(':') val = @document.counter(args[0], args[1]) if directive == 'counter2' reject_if_empty = true '' else val end else # if we get here, our AttributeReference regex is too loose warn %(asciidoctor: WARNING: illegal attribute directive: #{m[3]}) m[0] end elsif doc_attrs.key?(key = m[2].downcase) doc_attrs[key] elsif INTRINSIC_ATTRIBUTES.key? key INTRINSIC_ATTRIBUTES[key] else case (attribute_missing ||= (opts[:attribute_missing] || doc_attrs.fetch('attribute-missing', Compliance.attribute_missing))) when 'skip' m[0] when 'drop-line' warn %(asciidoctor: WARNING: dropping line containing reference to missing attribute: #{key}) reject = true break '' when 'warn' warn %(asciidoctor: WARNING: skipping reference to missing attribute: #{key}) m[0] else # 'drop' # QUESTION should we warn in this case? reject_if_empty = true '' end end } if line.include? '{' result << line unless reject || (reject_if_empty && line.empty?) end string_data ? result * EOL : result end # Public: Substitute inline macros (e.g., links, images, etc) # # Replace inline macros, which may span multiple lines, in the provided text # # source - The String text to process # # returns The converted String text def sub_macros(source) return source if source.nil_or_empty? # some look ahead assertions to cut unnecessary regex calls found = {} found[:square_bracket] = source.include?('[') found[:round_bracket] = source.include?('(') found[:colon] = found_colon = source.include?(':') found[:macroish] = (found[:square_bracket] && found_colon) found[:macroish_short_form] = (found[:square_bracket] && found_colon && source.include?(':[')) use_link_attrs = @document.attributes.has_key?('linkattrs') experimental = @document.attributes.has_key?('experimental') # NOTE interpolation is faster than String#dup result = %(#{source}) if experimental if found[:macroish_short_form] && (result.include?('kbd:') || result.include?('btn:')) result = result.gsub(KbdBtnInlineMacroRx) { # alias match for Ruby 1.8.7 compat m = $~ # honor the escape if (captured = m[0]).start_with? '\\' next captured[1..-1] end if captured.start_with?('kbd') keys = unescape_bracketed_text m[1] if keys == '+' keys = ['+'] else # need to use closure to work around lack of negative lookbehind keys = keys.split(KbdDelimiterRx).inject([]) {|c, key| if key.end_with?('++') c << key[0..-3].strip c << '+' else c << key.strip end c } end Inline.new(self, :kbd, nil, :attributes => {'keys' => keys}).convert elsif captured.start_with?('btn') label = unescape_bracketed_text m[1] Inline.new(self, :button, label).convert end } end if found[:macroish] && result.include?('menu:') result = result.gsub(MenuInlineMacroRx) { # alias match for Ruby 1.8.7 compat m = $~ # honor the escape if (captured = m[0]).start_with? '\\' next captured[1..-1] end menu = m[1] items = m[2] if !items submenus = [] menuitem = nil else if (delim = items.include?('>') ? '>' : (items.include?(',') ? ',' : nil)) submenus = items.split(delim).map {|it| it.strip } menuitem = submenus.pop else submenus = [] menuitem = items.rstrip end end Inline.new(self, :menu, nil, :attributes => {'menu' => menu, 'submenus' => submenus, 'menuitem' => menuitem}).convert } end if result.include?('"') && result.include?('>') result = result.gsub(MenuInlineRx) { # alias match for Ruby 1.8.7 compat m = $~ # honor the escape if (captured = m[0]).start_with? '\\' next captured[1..-1] end input = m[1] menu, *submenus = input.split('>').map {|it| it.strip } menuitem = submenus.pop Inline.new(self, :menu, nil, :attributes => {'menu' => menu, 'submenus' => submenus, 'menuitem' => menuitem}).convert } end end # FIXME this location is somewhat arbitrary, probably need to be able to control ordering # TODO this handling needs some cleanup if (extensions = @document.extensions) && extensions.inline_macros? # && found[:macroish] extensions.inline_macros.each do |extension| result = result.gsub(extension.instance.regexp) { # alias match for Ruby 1.8.7 compat m = $~ # honor the escape if m[0].start_with? '\\' next m[0][1..-1] end target = m[1] attributes = if extension.config[:format] == :short # TODO if content_model is :attributes, set target to nil and parse attributes # maybe if content_model is :text, we should put content into text attribute {} else if extension.config[:content_model] == :attributes parse_attributes m[2], (extension.config[:pos_attrs] || []), :sub_input => true, :unescape_input => true else { 'text' => (unescape_bracketed_text m[2]) } end end extension.process_method[self, target, attributes] } end end if found[:macroish] && (result.include?('image:') || result.include?('icon:')) # image:filename.png[Alt Text] result = result.gsub(ImageInlineMacroRx) { # alias match for Ruby 1.8.7 compat m = $~ # honor the escape if m[0].start_with? '\\' next m[0][1..-1] end raw_attrs = unescape_bracketed_text m[2] if m[0].start_with? 'icon:' type = 'icon' posattrs = ['size'] else type = 'image' posattrs = ['alt', 'width', 'height'] end target = sub_attributes(m[1]) unless type == 'icon' @document.register(:images, target) end attrs = parse_attributes(raw_attrs, posattrs) attrs['alt'] ||= Helpers.basename(target, true).tr('_-', ' ') Inline.new(self, :image, nil, :type => type, :target => target, :attributes => attrs).convert } end if found[:macroish_short_form] || found[:round_bracket] # indexterm:[Tigers,Big cats] # (((Tigers,Big cats))) # indexterm2:[Tigers] # ((Tigers)) result = result.gsub(IndextermInlineMacroRx) { # alias match for Ruby 1.8.7 compat m = $~ # honor the escape if m[0].start_with? '\\' next m[0][1..-1] end # fix non-matching group results in Opal under Firefox if ::RUBY_ENGINE_OPAL m[1] = nil if m[1] == '' end num_brackets = 0 text_in_brackets = nil unless (macro_name = m[1]) text_in_brackets = m[3] if (text_in_brackets.start_with? '(') && (text_in_brackets.end_with? ')') text_in_brackets = text_in_brackets[1...-1] num_brackets = 3 else num_brackets = 2 end end # non-visible if macro_name == 'indexterm' || num_brackets == 3 if !macro_name # (((Tigers,Big cats))) terms = split_simple_csv normalize_string(text_in_brackets) else # indexterm:[Tigers,Big cats] terms = split_simple_csv normalize_string(m[2], true) end @document.register(:indexterms, [*terms]) Inline.new(self, :indexterm, nil, :attributes => {'terms' => terms}).convert # visible else if !macro_name # ((Tigers)) text = normalize_string text_in_brackets else # indexterm2:[Tigers] text = normalize_string m[2], true end @document.register(:indexterms, [text]) Inline.new(self, :indexterm, text, :type => :visible).convert end } end if found_colon && (result.include? '://') # inline urls, target[text] (optionally prefixed with link: and optionally surrounded by <>) result = result.gsub(LinkInlineRx) { # alias match for Ruby 1.8.7 compat m = $~ # honor the escape if m[2].start_with? '\\' # must enclose string following next in " for Opal next "#{m[1]}#{m[2][1..-1]}#{m[3]}" end # fix non-matching group results in Opal under Firefox if ::RUBY_ENGINE_OPAL m[3] = nil if m[3] == '' end # not a valid macro syntax w/o trailing square brackets # we probably shouldn't even get here...our regex is doing too much if m[1] == 'link:' && !m[3] next m[0] end prefix = (m[1] != 'link:' ? m[1] : '') target = m[2] suffix = '' unless m[3] || target !~ UriTerminator case $~[0] when ')' # strip the trailing ) target = target[0..-2] suffix = ')' when ';' # strip the <> around the link if prefix.start_with?('<') && target.end_with?('>') prefix = prefix[4..-1] target = target[0..-5] # strip the ); from the end of the link elsif target.end_with?(');') target = target[0..-3] suffix = ');' else target = target[0..-2] suffix = ';' end when ':' # strip the ): from the end of the link if target.end_with?('):') target = target[0..-3] suffix = '):' else target = target[0..-2] suffix = ':' end end end @document.register(:links, target) link_opts = { :type => :link, :target => target } attrs = nil #text = m[3] ? sub_attributes(m[3].gsub('\]', ']')) : '' if m[3].nil_or_empty? text = '' else if use_link_attrs && (m[3].start_with?('"') || (m[3].include?(',') && m[3].include?('='))) attrs = parse_attributes(sub_attributes(m[3].gsub('\]', ']')), []) link_opts[:id] = (attrs.delete 'id') if attrs.has_key? 'id' text = attrs[1] || '' else text = sub_attributes(m[3].gsub('\]', ']')) end # TODO enable in Asciidoctor 1.5.1 # support pipe-separated text and title #unless attrs && (attrs.has_key? 'title') # if text.include? '|' # attrs ||= {} # text, attrs['title'] = text.split '|', 2 # end #end if text.end_with? '^' text = text.chop if attrs attrs['window'] ||= '_blank' else attrs = {'window' => '_blank'} end end end if text.empty? if @document.attr? 'hide-uri-scheme' text = target.sub UriSniffRx, '' else text = target end if attrs attrs['role'] = %(bare #{attrs['role']}).chomp ' ' else attrs = {'role' => 'bare'} end end link_opts[:attributes] = attrs if attrs %(#{prefix}#{Inline.new(self, :anchor, text, link_opts).convert}#{suffix}) } end if found[:macroish] && (result.include? 'link:') || (result.include? 'mailto:') # inline link macros, link:target[text] result = result.gsub(LinkInlineMacroRx) { # alias match for Ruby 1.8.7 compat m = $~ # honor the escape if m[0].start_with? '\\' next m[0][1..-1] end raw_target = m[1] mailto = m[0].start_with?('mailto:') target = mailto ? %(mailto:#{raw_target}) : raw_target link_opts = { :type => :link, :target => target } attrs = nil #text = sub_attributes(m[2].gsub('\]', ']')) text = if use_link_attrs && (m[2].start_with?('"') || m[2].include?(',')) attrs = parse_attributes(sub_attributes(m[2].gsub('\]', ']')), []) link_opts[:id] = (attrs.delete 'id') if attrs.key? 'id' if mailto if attrs.key? 2 target = link_opts[:target] = "#{target}?subject=#{Helpers.encode_uri(attrs[2])}" if attrs.key? 3 target = link_opts[:target] = "#{target}&body=#{Helpers.encode_uri(attrs[3])}" end end end attrs[1] else sub_attributes(m[2].gsub('\]', ']')) end # QUESTION should a mailto be registered as an e-mail address? @document.register(:links, target) # TODO enable in Asciidoctor 1.5.1 # support pipe-separated text and title #unless attrs && (attrs.key? 'title') # if text.include? '|' # attrs ||= {} # text, attrs['title'] = text.split '|', 2 # end #end if text.end_with? '^' text = text.chop if attrs attrs['window'] ||= '_blank' else attrs = {'window' => '_blank'} end end if text.empty? # mailto is a special case, already processed if mailto text = raw_target else if @document.attr? 'hide-uri-scheme' text = raw_target.sub UriSniffRx, '' else text = raw_target end if attrs attrs['role'] = %(bare #{attrs['role']}).chomp ' ' else attrs = {'role' => 'bare'} end end end link_opts[:attributes] = attrs if attrs Inline.new(self, :anchor, text, link_opts).convert } end if result.include? '@' result = result.gsub(EmailInlineMacroRx) { # alias match for Ruby 1.8.7 compat m = $~ address = m[0] if (lead = m[1]) case lead when '\\' next address[1..-1] else next address end end target = %(mailto:#{address}) # QUESTION should this be registered as an e-mail address? @document.register(:links, target) Inline.new(self, :anchor, address, :type => :link, :target => target).convert } end if found[:macroish_short_form] && result.include?('footnote') result = result.gsub(FootnoteInlineMacroRx) { # alias match for Ruby 1.8.7 compat m = $~ # honor the escape if m[0].start_with? '\\' next m[0][1..-1] end if m[1] == 'footnote' id = nil # REVIEW it's a dirty job, but somebody's gotta do it text = restore_passthroughs(sub_inline_xrefs(sub_inline_anchors(normalize_string m[2], true)), false) index = @document.counter('footnote-number') @document.register(:footnotes, Document::Footnote.new(index, id, text)) type = nil target = nil else id, text = m[2].split(',', 2) id = id.strip # NOTE In Opal, text is set to empty string if comma is missing if text.nil_or_empty? if (footnote = @document.references[:footnotes].find {|fn| fn.id == id }) index = footnote.index text = footnote.text else index = nil text = id end target = id id = nil type = :xref else # REVIEW it's a dirty job, but somebody's gotta do it text = restore_passthroughs(sub_inline_xrefs(sub_inline_anchors(normalize_string text, true)), false) index = @document.counter('footnote-number') @document.register(:footnotes, Document::Footnote.new(index, id, text)) type = :ref target = nil end end Inline.new(self, :footnote, text, :attributes => {'index' => index}, :id => id, :target => target, :type => type).convert } end sub_inline_xrefs(sub_inline_anchors(result, found), found) end # Internal: Substitute normal and bibliographic anchors def sub_inline_anchors(text, found = nil) if (!found || found[:square_bracket]) && text.include?('[[[') text = text.gsub(InlineBiblioAnchorRx) { # alias match for Ruby 1.8.7 compat m = $~ # honor the escape if m[0].start_with? '\\' next m[0][1..-1] end id = reftext = m[1] Inline.new(self, :anchor, reftext, :type => :bibref, :target => id).convert } end if ((!found || found[:square_bracket]) && text.include?('[[')) || ((!found || found[:macroish]) && text.include?('anchor:')) text = text.gsub(InlineAnchorRx) { # alias match for Ruby 1.8.7 compat m = $~ # honor the escape if m[0].start_with? '\\' next m[0][1..-1] end # fix non-matching group results in Opal under Firefox if ::RUBY_ENGINE_OPAL m[1] = nil if m[1] == '' m[2] = nil if m[2] == '' m[4] = nil if m[4] == '' end id = m[1] || m[3] reftext = m[2] || m[4] || %([#{id}]) # enable if we want to allow double quoted values #id = id.sub(DoubleQuotedRx, '\2') #if reftext # reftext = reftext.sub(DoubleQuotedMultiRx, '\2') #else # reftext = "[#{id}]" #end Inline.new(self, :anchor, reftext, :type => :ref, :target => id).convert } end text end # Internal: Substitute cross reference links def sub_inline_xrefs(text, found = nil) if (!found || found[:macroish]) || text.include?('<<') text = text.gsub(XrefInlineMacroRx) { # alias match for Ruby 1.8.7 compat m = $~ # honor the escape if m[0].start_with? '\\' next m[0][1..-1] end # fix non-matching group results in Opal under Firefox if ::RUBY_ENGINE_OPAL m[1] = nil if m[1] == '' end if m[1] id, reftext = m[1].split(',', 2).map {|it| it.strip } id = id.sub(DoubleQuotedRx, '\2') # NOTE In Opal, reftext is set to empty string if comma is missing reftext = if reftext.nil_or_empty? nil else reftext.sub(DoubleQuotedMultiRx, '\2') end else id = m[2] reftext = m[3] unless m[3].nil_or_empty? end if id.include? '#' path, fragment = id.split('#') # QUESTION perform this check and throw it back if it fails? #elsif (start_chr = id.chr) == '.' || start_chr == '/' # next m[0][1..-1] else path = nil fragment = id end # handles forms: doc#, doc.adoc#, doc#id and doc.adoc#id if path path = Helpers.rootname(path) # the referenced path is this document, or its contents has been included in this document if @document.attributes['docname'] == path || @document.references[:includes].include?(path) refid = fragment path = nil target = %(##{fragment}) else refid = fragment ? %(#{path}##{fragment}) : path path = "#{@document.attributes['relfileprefix']}#{path}#{@document.attributes.fetch 'outfilesuffix', '.html'}" target = fragment ? %(#{path}##{fragment}) : path end # handles form: id or Section Title else # resolve fragment as reftext if cannot be resolved as refid and looks like reftext if !(@document.references[:ids].has_key? fragment) && ((fragment.include? ' ') || fragment.downcase != fragment) && (resolved_id = RUBY_MIN_VERSION_1_9 ? (@document.references[:ids].key fragment) : (@document.references[:ids].index fragment)) fragment = resolved_id end refid = fragment target = %(##{fragment}) end Inline.new(self, :anchor, reftext, :type => :xref, :target => target, :attributes => {'path' => path, 'fragment' => fragment, 'refid' => refid}).convert } end text end # Public: Substitute callout source references # # text - The String text to process # # Returns the converted String text def sub_callouts(text) # FIXME cache this dynamic regex callout_rx = (attr? 'line-comment') ? /(?:#{::Regexp.escape(attr 'line-comment')} )?#{CalloutSourceRxt}/ : CalloutSourceRx text.gsub(callout_rx) { # alias match for Ruby 1.8.7 compat m = $~ # honor the escape if m[1] == '\\' # we have to do a sub since we aren't sure it's the first char next m[0].sub('\\', '') end Inline.new(self, :callout, m[3], :id => @document.callouts.read_next_id).convert } end # Public: Substitute post replacements # # text - The String text to process # # Returns the converted String text def sub_post_replacements(text) if (@document.attributes.has_key? 'hardbreaks') || (@attributes.has_key? 'hardbreaks-option') lines = (text.split EOL) return text if lines.size == 1 last = lines.pop lines.map {|line| Inline.new(self, :break, line.rstrip.chomp(LINE_BREAK), :type => :line).convert }.push(last) * EOL elsif text.include? '+' text.gsub(LineBreakRx) { Inline.new(self, :break, $~[1], :type => :line).convert } else text end end # Internal: Convert a quoted text region # # match - The MatchData for the quoted text region # type - The quoting type (single, double, strong, emphasis, monospaced, etc) # scope - The scope of the quoting (constrained or unconstrained) # # Returns The converted String text for the quoted text region def convert_quoted_text(match, type, scope) unescaped_attrs = nil if match[0].start_with? '\\' if scope == :constrained && !(attrs = match[2]).nil_or_empty? unescaped_attrs = %([#{attrs}]) else return match[0][1..-1] end end if scope == :constrained if unescaped_attrs %(#{unescaped_attrs}#{Inline.new(self, :quoted, match[3], :type => type).convert}) else if (attributes = parse_quoted_text_attributes(match[2])) id = attributes.delete 'id' type = :unquoted if type == :mark else id = nil end %(#{match[1]}#{Inline.new(self, :quoted, match[3], :type => type, :id => id, :attributes => attributes).convert}) end else if (attributes = parse_quoted_text_attributes(match[1])) id = attributes.delete 'id' type = :unquoted if type == :mark else id = nil end Inline.new(self, :quoted, match[2], :type => type, :id => id, :attributes => attributes).convert end end # Internal: Parse the attributes that are defined on quoted text # # str - A String of unprocessed attributes (space-separated roles or the id/role shorthand syntax) # # returns nil if str is nil, an empty Hash if str is empty, otherwise a Hash of attributes (role and id only) def parse_quoted_text_attributes(str) return unless str return {} if str.empty? str = sub_attributes(str) if str.include?('{') str = str.strip # for compliance, only consider first positional attribute str, _ = str.split(',', 2) if str.include?(',') if str.empty? {} elsif (str.start_with?('.') || str.start_with?('#')) && Compliance.shorthand_property_syntax segments = str.split('#', 2) if segments.length > 1 id, *more_roles = segments[1].split('.') else id = nil more_roles = [] end roles = segments[0].empty? ? [] : segments[0].split('.') if roles.length > 1 roles.shift end if more_roles.length > 0 roles.concat more_roles end attrs = {} attrs['id'] = id if id attrs['role'] = roles * ' ' unless roles.empty? attrs else {'role' => str} end end # Internal: Parse the attributes in the attribute line # # attrline - A String of unprocessed attributes (key/value pairs) # posattrs - The keys for positional attributes # # returns nil if attrline is nil, an empty Hash if attrline is empty, otherwise a Hash of parsed attributes def parse_attributes(attrline, posattrs = ['role'], opts = {}) return unless attrline return {} if attrline.empty? attrline = @document.sub_attributes(attrline) if opts[:sub_input] attrline = unescape_bracketed_text(attrline) if opts[:unescape_input] block = nil if opts.fetch(:sub_result, true) # substitutions are only performed on attribute values if block is not nil block = self end if (into = opts[:into]) AttributeList.new(attrline, block).parse_into(into, posattrs) else AttributeList.new(attrline, block).parse(posattrs) end end # Internal: Strip bounding whitespace, fold endlines and unescaped closing # square brackets from text extracted from brackets def unescape_bracketed_text(text) return '' if text.empty? # FIXME make \] a regex text.strip.tr(EOL, ' ').gsub('\]', ']') end # Internal: Strip bounding whitespace and fold endlines def normalize_string str, unescape_brackets = false if str.empty? '' elsif unescape_brackets unescape_brackets str.strip.tr(EOL, ' ') else str.strip.tr(EOL, ' ') end end # Internal: Unescape closing square brackets. # Intended for text extracted from square brackets. def unescape_brackets str # FIXME make \] a regex str.empty? ? '' : str.gsub('\]', ']') end # Internal: Split text formatted as CSV with support # for double-quoted values (in which commas are ignored) def split_simple_csv str if str.empty? values = [] elsif str.include? '"' values = [] current = [] quote_open = false str.each_char do |c| case c when ',' if quote_open current.push c else values << current.join.strip current = [] end when '"' quote_open = !quote_open else current.push c end end values << current.join.strip else values = str.split(',').map {|it| it.strip } end values end # Internal: Resolve the list of comma-delimited subs against the possible options. # # subs - A comma-delimited String of substitution aliases # # returns An Array of Symbols representing the substitution operation def resolve_subs subs, type = :block, defaults = nil, subject = nil return [] if subs.nil_or_empty? candidates = nil modifiers_present = SubModifierSniffRx =~ subs subs.tr(' ', '').split(',').each do |key| modifier_operation = nil if modifiers_present if (first = key.chr) == '+' modifier_operation = :append key = key[1..-1] elsif first == '-' modifier_operation = :remove key = key[1..-1] elsif key.end_with? '+' modifier_operation = :prepend key = key.chop end end key = key.to_sym # special case to disable callouts for inline subs if type == :inline && (key == :verbatim || key == :v) resolved_keys = [:specialcharacters] elsif COMPOSITE_SUBS.key? key resolved_keys = COMPOSITE_SUBS[key] elsif type == :inline && key.length == 1 && (SUB_SYMBOLS.key? key) resolved_key = SUB_SYMBOLS[key] if (candidate = COMPOSITE_SUBS[resolved_key]) resolved_keys = candidate else resolved_keys = [resolved_key] end else resolved_keys = [key] end if modifier_operation candidates ||= (defaults ? defaults.dup : []) case modifier_operation when :append candidates += resolved_keys when :prepend candidates = resolved_keys + candidates when :remove candidates -= resolved_keys end else candidates ||= [] candidates += resolved_keys end end # weed out invalid options and remove duplicates (first wins) # TODO may be use a set instead? resolved = candidates & SUB_OPTIONS[type] unless (candidates - resolved).empty? invalid = candidates - resolved warn %(asciidoctor: WARNING: invalid substitution type#{invalid.size > 1 ? 's' : ''}#{subject ? ' for ' : nil}#{subject}: #{invalid * ', '}) end resolved end def resolve_block_subs subs, defaults, subject resolve_subs subs, :block, defaults, subject end def resolve_pass_subs subs resolve_subs subs, :inline, nil, 'passthrough macro' end # Public: Highlight the source code if a source highlighter is defined # on the document, otherwise return the text unprocessed # # Callout marks are stripped from the source prior to passing it to the # highlighter, then later restored in converted form, so they are not # incorrectly processed by the source highlighter. # # source - the source code String to highlight # process_callouts - a Boolean flag indicating whether callout marks should be substituted # # returns the highlighted source code, if a source highlighter is defined # on the document, otherwise the source with verbatim substituions applied def highlight_source source, process_callouts, highlighter = nil case (highlighter ||= @document.attributes['source-highlighter']) when 'coderay' unless (highlighter_loaded = defined? ::CodeRay) || @document.attributes['coderay-unavailable'] if (Helpers.require_library 'coderay', true, :warn).nil? # prevent further attempts to load CodeRay @document.set_attr 'coderay-unavailable', '' else highlighter_loaded = true end end when 'pygments' unless (highlighter_loaded = defined? ::Pygments) || @document.attributes['pygments-unavailable'] if (Helpers.require_library 'pygments', 'pygments.rb', :warn).nil? # prevent further attempts to load Pygments @document.set_attr 'pygments-unavailable', '' else highlighter_loaded = true end end else # unknown highlighting library (something is misconfigured if we arrive here) highlighter_loaded = false end return sub_source source, process_callouts unless highlighter_loaded lineno = 0 callout_on_last = false if process_callouts callout_marks = {} last = -1 # FIXME cache this dynamic regex callout_rx = (attr? 'line-comment') ? /(?:#{::Regexp.escape(attr 'line-comment')} )?#{CalloutExtractRxt}/ : CalloutExtractRx # extract callout marks, indexed by line number source = source.split(EOL).map {|line| lineno = lineno + 1 line.gsub(callout_rx) { # alias match for Ruby 1.8.7 compat m = $~ # honor the escape if m[1] == '\\' m[0].sub('\\', '') else (callout_marks[lineno] ||= []) << m[3] last = lineno nil end } } * EOL callout_on_last = (last == lineno) callout_marks = nil if callout_marks.empty? else callout_marks = nil end linenums_mode = nil highlight_lines = nil case highlighter when 'coderay' if (linenums_mode = (attr? 'linenums') ? (@document.attributes['coderay-linenums-mode'] || :table).to_sym : nil) if attr? 'highlight', nil, false highlight_lines = resolve_highlight_lines(attr 'highlight', nil, false) end end result = ::CodeRay::Duo[attr('language', :text, false).to_sym, :html, { :css => (@document.attributes['coderay-css'] || :class).to_sym, :line_numbers => linenums_mode, :line_number_anchors => false, :highlight_lines => highlight_lines, :bold_every => false}].highlight source when 'pygments' lexer = ::Pygments::Lexer[attr('language', nil, false)] || ::Pygments::Lexer['text'] opts = { :cssclass => 'pyhl', :classprefix => 'tok-', :nobackground => true } unless (@document.attributes['pygments-css'] || 'class') == 'class' opts[:noclasses] = true opts[:style] = (@document.attributes['pygments-style'] || Stylesheets::DEFAULT_PYGMENTS_STYLE) end if attr? 'highlight', nil, false unless (highlight_lines = resolve_highlight_lines(attr 'highlight', nil, false)).empty? opts[:hl_lines] = highlight_lines * ' ' end end if attr? 'linenums' # TODO we could add the line numbers in ourselves instead of having to strip out the junk # FIXME move these regular expressions into constants if (opts[:linenos] = @document.attributes['pygments-linenums-mode'] || 'table') == 'table' linenums_mode = :table # NOTE these subs clean out HTML that messes up our styles result = lexer.highlight(source, :options => opts). sub(/
    (.*)<\/div>/m, '\1'). gsub(/]*>(.*?)<\/pre>\s*/m, '\1') else result = lexer.highlight(source, :options => opts). sub(/
    ]*>(.*?)<\/pre><\/div>/m, '\1') end else # nowrap gives us just the highlighted source; won't work when we need linenums though opts[:nowrap] = true result = lexer.highlight(source, :options => opts) end end # fix passthrough placeholders that got caught up in syntax highlighting unless @passthroughs.empty? result = result.gsub PASS_MATCH_HI, %(#{PASS_START}\\1#{PASS_END}) end if process_callouts && callout_marks lineno = 0 reached_code = linenums_mode != :table result.split(EOL).map {|line| unless reached_code unless line.include?('') next line end reached_code = true end lineno = lineno + 1 if (conums = callout_marks.delete(lineno)) tail = nil if callout_on_last && callout_marks.empty? # QUESTION when does this happen? if (pos = line.index '') tail = line[pos..-1] line = %(#{line[0...pos].chomp ' '} ) else # Give conum on final line breathing room if trailing space in source is dropped line = %(#{line.chomp ' '} ) end end if conums.size == 1 %(#{line}#{Inline.new(self, :callout, conums[0], :id => @document.callouts.read_next_id).convert }#{tail}) else conums_markup = conums.map {|conum| Inline.new(self, :callout, conum, :id => @document.callouts.read_next_id).convert } * ' ' %(#{line}#{conums_markup}#{tail}) end else line end } * EOL else result end end # e.g., highlight="1-5, !2, 10" or highlight=1-5;!2,10 def resolve_highlight_lines spec lines = [] spec.delete(' ').split(DataDelimiterRx).map do |entry| negate = false if entry.start_with? '!' entry = entry[1..-1] negate = true end if entry.include? '-' s, e = entry.split '-', 2 line_nums = (s.to_i..e.to_i).to_a if negate lines -= line_nums else lines.concat line_nums end else if negate lines.delete entry.to_i else lines << entry.to_i end end end lines.sort.uniq end # Public: Apply verbatim substitutions on source (for use when highlighting is disabled). # # source - the source code String on which to apply verbatim substitutions # process_callouts - a Boolean flag indicating whether callout marks should be substituted # # returns the substituted source def sub_source source, process_callouts return process_callouts ? sub_callouts(sub_specialchars(source)) : sub_specialchars(source) end # Internal: Lock-in the substitutions for this block # # Looks for an attribute named "subs". If present, resolves the # substitutions and assigns it to the subs property on this block. # Otherwise, assigns a set of default substitutions based on the # content model of the block. # # Returns nothing def lock_in_subs if @default_subs default_subs = @default_subs else case @content_model when :simple default_subs = SUBS[:normal] when :verbatim if @context == :listing || (@context == :literal && !(option? 'listparagraph')) default_subs = SUBS[:verbatim] elsif @context == :verse default_subs = SUBS[:normal] else default_subs = SUBS[:basic] end when :raw if @context == :stem default_subs = SUBS[:basic] else default_subs = SUBS[:pass] end else return end end if (custom_subs = @attributes['subs']) @subs = resolve_block_subs custom_subs, default_subs, @context else @subs = default_subs.dup end # QUESION delegate this logic to a method? if @context == :listing && @style == 'source' && @attributes['language'] && @document.basebackend?('html') && SUB_HIGHLIGHT.include?(@document.attributes['source-highlighter']) @subs = @subs.map {|sub| sub == :specialcharacters ? :highlight : sub } end end end end asciidoctor-1.5.5/lib/asciidoctor/table.rb000066400000000000000000000444721277513741400205400ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor # Public: Methods and constants for managing AsciiDoc table content in a document. # It supports all three of AsciiDoc's table formats: psv, dsv and csv. class Table < AbstractBlock # Public: A data object that encapsulates the collection of rows (head, foot, body) for a table class Rows attr_accessor :head, :foot, :body def initialize head = [], foot = [], body = [] @head = head @foot = foot @body = body end alias :[] :send end # Public: A String key that specifies the default table format in AsciiDoc (psv) DEFAULT_DATA_FORMAT = 'psv' # Public: An Array of String keys that represent the table formats in AsciiDoc DATA_FORMATS = ['psv', 'dsv', 'csv'] # Public: A Hash mapping the AsciiDoc table formats to their default delimiters DEFAULT_DELIMITERS = { 'psv' => '|', 'dsv' => ':', 'csv' => ',' } # Public: A Hash mapping styles abbreviations to styles that can be applied # to a table column or cell TEXT_STYLES = { 'd' => :none, 's' => :strong, 'e' => :emphasis, 'm' => :monospaced, 'h' => :header, 'l' => :literal, 'v' => :verse, 'a' => :asciidoc } # Public: A Hash mapping alignment abbreviations to alignments (horizontal # and vertial) that can be applies to a table column or cell ALIGNMENTS = { :h => { '<' => 'left', '>' => 'right', '^' => 'center' }, :v => { '<' => 'top', '>' => 'bottom', '^' => 'middle' } } # Public: Get/Set the columns for this table attr_accessor :columns # Public: Get/Set the Rows struct for this table (encapsulates head, foot # and body rows) attr_accessor :rows # Public: Boolean specifies whether this table has a header row attr_accessor :has_header_option def initialize parent, attributes super parent, :table @rows = Rows.new @columns = [] @has_header_option = attributes.key? 'header-option' # smell like we need a utility method here # to resolve an integer width from potential bogus input if (pcwidth = attributes['width']) if (pcwidth_intval = pcwidth.to_i) > 100 || pcwidth_intval < 1 pcwidth_intval = 100 unless pcwidth_intval == 0 && (pcwidth == '0' || pcwidth == '0%') end else pcwidth_intval = 100 end @attributes['tablepcwidth'] = pcwidth_intval if @document.attributes.key? 'pagewidth' @attributes['tableabswidth'] ||= ((@attributes['tablepcwidth'].to_f / 100) * @document.attributes['pagewidth']).round end attributes['orientation'] = 'landscape' if attributes.key? 'rotate-option' end # Internal: Returns whether the current row being processed is # the header row def header_row? @has_header_option && @rows.body.empty? end # Internal: Creates the Column objects from the column spec # # returns nothing def create_columns colspecs cols = [] width_base = 0 colspecs.each do |colspec| width_base += colspec['width'] cols << (Column.new self, cols.size, colspec) end unless (@columns = cols).empty? @attributes['colcount'] = cols.size assign_column_widths(width_base == 0 ? nil : width_base) end nil end # Internal: Assign column widths to columns # # This method rounds the percentage width values to 4 decimal places and # donates the balance to the final column. # # This method assumes there's at least one column in the columns array. # # width_base - the total of the relative column values used for calculating percentage widths (default: nil) # # returns nothing def assign_column_widths width_base = nil pf = 10.0 ** 4 # precision factor (multipler / divisor) for managing precision of calculated result total_width = col_pcwidth = 0 if width_base @columns.each {|col| total_width += (col_pcwidth = col.assign_width nil, width_base, pf) } else col_pcwidth = ((100 * pf / @columns.size).to_i) / pf col_pcwidth = col_pcwidth.to_i if col_pcwidth.to_i == col_pcwidth @columns.each {|col| total_width += col.assign_width col_pcwidth } end # donate balance, if any, to final column @columns[-1].assign_width(((100 - total_width + col_pcwidth) * pf).round / pf) unless total_width == 100 nil end # Internal: Partition the rows into header, footer and body as determined # by the options on the table # # returns nothing def partition_header_footer(attributes) # set rowcount before splitting up body rows @attributes['rowcount'] = @rows.body.size num_body_rows = @rows.body.size if num_body_rows > 0 && @has_header_option head = @rows.body.shift num_body_rows -= 1 # styles aren't applied to header row head.each {|c| c.style = nil } # QUESTION why does AsciiDoc use an array for head? is it # possible to have more than one based on the syntax? @rows.head = [head] end if num_body_rows > 0 && attributes.key?('footer-option') @rows.foot = [@rows.body.pop] end nil end end # Public: Methods to manage the columns of an AsciiDoc table. In particular, it # keeps track of the column specs class Table::Column < AbstractNode # Public: Get/Set the Symbol style for this column. attr_accessor :style def initialize table, index, attributes = {} super table, :column @style = attributes['style'] attributes['colnumber'] = index + 1 attributes['width'] ||= 1 attributes['halign'] ||= 'left' attributes['valign'] ||= 'top' update_attributes(attributes) end # Public: An alias to the parent block (which is always a Table) alias :table :parent # Internal: Calculate and assign the widths (percentage and absolute) for this column # # This method assigns the colpcwidth and colabswidth attributes. # # returns the resolved colpcwidth value def assign_width col_pcwidth, width_base = nil, pf = 10000.0 if width_base col_pcwidth = ((@attributes['width'].to_f / width_base) * 100 * pf).to_i / pf col_pcwidth = col_pcwidth.to_i if col_pcwidth.to_i == col_pcwidth end @attributes['colpcwidth'] = col_pcwidth if parent.attributes.key? 'tableabswidth' # FIXME calculate more accurately (only used in DocBook output) @attributes['colabswidth'] = ((col_pcwidth / 100.0) * parent.attributes['tableabswidth']).round end col_pcwidth end end # Public: Methods for managing the a cell in an AsciiDoc table. class Table::Cell < AbstractNode # Public: Get/Set the Symbol style for this cell (default: nil) attr_accessor :style # Public: An Integer of the number of columns this cell will span (default: nil) attr_accessor :colspan # Public: An Integer of the number of rows this cell will span (default: nil) attr_accessor :rowspan # Public: An alias to the parent block (which is always a Column) alias :column :parent # Public: The internal Asciidoctor::Document for a cell that has the asciidoc style attr_reader :inner_document def initialize column, text, attributes = {}, cursor = nil super column, :cell @text = text @style = nil @colspan = nil @rowspan = nil # TODO feels hacky if column @style = column.attributes['style'] update_attributes(column.attributes) end if attributes @colspan = attributes.delete('colspan') @rowspan = attributes.delete('rowspan') # TODO eventualy remove the style attribute from the attributes hash #@style = attributes.delete('style') if attributes.key? 'style' @style = attributes['style'] if attributes.key? 'style' update_attributes(attributes) end # only allow AsciiDoc cells in non-header rows if @style == :asciidoc && !column.table.header_row? # FIXME hide doctitle from nested document; temporary workaround to fix # nested document seeing doctitle and assuming it has its own document title parent_doctitle = @document.attributes.delete('doctitle') # NOTE we need to process the first line of content as it may not have been processed # the included content cannot expect to match conditional terminators in the remaining # lines of table cell content, it must be self-contained logic inner_document_lines = @text.split(EOL) unless inner_document_lines.empty? || !inner_document_lines[0].include?('::') unprocessed_lines = inner_document_lines[0] processed_lines = PreprocessorReader.new(@document, unprocessed_lines).readlines if processed_lines != unprocessed_lines inner_document_lines.shift inner_document_lines.unshift(*processed_lines) end end @inner_document = Document.new(inner_document_lines, :header_footer => false, :parent => @document, :cursor => cursor) @document.attributes['doctitle'] = parent_doctitle unless parent_doctitle.nil? end end # Public: Get the text with normal substitutions applied for this cell. Used for cells in the head rows def text apply_normal_subs(@text).strip end # Public: Handles the body data (tbody, tfoot), applying styles and partitioning into paragraphs def content if @style == :asciidoc @inner_document.convert else text.split(BlankLineRx).map do |p| !@style || @style == :header ? p : Inline.new(parent, :quoted, p, :type => @style).convert end end end def to_s "#{super.to_s} - [text: #@text, colspan: #{@colspan || 1}, rowspan: #{@rowspan || 1}, attributes: #@attributes]" end end # Public: Methods for managing the parsing of an AsciiDoc table. Instances of this # class are primarily responsible for tracking the buffer of a cell as the parser # moves through the lines of the table using tail recursion. When a cell boundary # is located, the previous cell is closed, an instance of Table::Cell is # instantiated, the row is closed if the cell satisifies the column count and, # finally, a new buffer is allocated to track the next cell. class Table::ParserContext # Public: The Table currently being parsed attr_accessor :table # Public: The AsciiDoc table format (psv, dsv or csv) attr_accessor :format # Public: Get the expected column count for a row # # colcount is the number of columns to pull into a row # A value of -1 means we use the number of columns found # in the first line as the colcount attr_reader :colcount # Public: The String buffer of the currently open cell attr_accessor :buffer # Public: The cell delimiter for this table. attr_reader :delimiter # Public: The cell delimiter compiled Regexp for this table. attr_reader :delimiter_re def initialize(reader, table, attributes = {}) @reader = reader @table = table # TODO if reader.cursor becomes a reference, this would require .dup @last_cursor = reader.cursor if (@format = attributes['format']) unless Table::DATA_FORMATS.include? @format raise %(Illegal table format: #{@format}) end else @format = Table::DEFAULT_DATA_FORMAT end @delimiter = if @format == 'psv' && !(attributes.key? 'separator') && table.document.nested? '!' else attributes['separator'] || Table::DEFAULT_DELIMITERS[@format] end @delimiter_re = /#{Regexp.escape @delimiter}/ @colcount = table.columns.empty? ? -1 : table.columns.size @buffer = '' @cellspecs = [] @cell_open = false @active_rowspans = [0] @column_visits = 0 @current_row = [] @linenum = -1 end # Public: Checks whether the line provided starts with the cell delimiter # used by this table. # # returns true if the line starts with the delimiter, false otherwise def starts_with_delimiter?(line) line.start_with? @delimiter end # Public: Checks whether the line provided contains the cell delimiter # used by this table. # # returns Regexp MatchData if the line contains the delimiter, false otherwise def match_delimiter(line) @delimiter_re.match(line) end # Public: Skip beyond the matched delimiter because it was a false positive # (either because it was escaped or in a quoted context) # # returns the String after the match def skip_matched_delimiter(match, escaped = false) @buffer = %(#{@buffer}#{escaped ? match.pre_match.chop : match.pre_match}#{@delimiter}) match.post_match end # Public: Determines whether the buffer has unclosed quotes. Used for CSV data. # # returns true if the buffer has unclosed quotes, false if it doesn't or it # isn't quoted data def buffer_has_unclosed_quotes?(append = nil) record = %(#{@buffer}#{append}).strip record.start_with?('"') && !record.start_with?('""') && !record.end_with?('"') end # Public: Determines whether the buffer contains quoted data. Used for CSV data. # # returns true if the buffer starts with a double quote (and not an escaped double quote), # false otherwise def buffer_quoted? @buffer = @buffer.lstrip @buffer.start_with?('"') && !@buffer.start_with?('""') end # Public: Takes a cell spec from the stack. Cell specs precede the delimiter, so a # stack is used to carry over the spec from the previous cell to the current cell # when the cell is being closed. # # returns The cell spec Hash captured from parsing the previous cell def take_cellspec @cellspecs.shift end # Public: Puts a cell spec onto the stack. Cell specs precede the delimiter, so a # stack is used to carry over the spec to the next cell. # # returns nothing def push_cellspec(cellspec = {}) # this shouldn't be nil, but we check anyway @cellspecs << (cellspec || {}) nil end # Public: Marks that the cell should be kept open. Used when the end of the line is # reached and the cell may contain additional text. # # returns nothing def keep_cell_open @cell_open = true nil end # Public: Marks the cell as closed so that the parser knows to instantiate a new cell # instance and add it to the current row. # # returns nothing def mark_cell_closed @cell_open = false nil end # Public: Checks whether the current cell is still open # # returns true if the cell is marked as open, false otherwise def cell_open? @cell_open end # Public: Checks whether the current cell has been marked as closed # # returns true if the cell is marked as closed, false otherwise def cell_closed? !@cell_open end # Public: If the current cell is open, close it. In additional, push the # cell spec captured from the end of this cell onto the stack for use # by the next cell. # # returns nothing def close_open_cell(next_cellspec = {}) push_cellspec next_cellspec close_cell(true) if cell_open? advance nil end # Public: Close the current cell, instantiate a new Table::Cell, add it to # the current row and, if the number of expected columns for the current # row has been met, close the row and begin a new one. # # returns nothing def close_cell(eol = false) cell_text = @buffer.strip @buffer = '' if @format == 'psv' cellspec = take_cellspec if cellspec repeat = cellspec.delete('repeatcol') || 1 else warn %(asciidoctor: ERROR: #{@last_cursor.line_info}: table missing leading separator, recovering automatically) cellspec = {} repeat = 1 end else cellspec = nil repeat = 1 if @format == 'csv' if !cell_text.empty? && cell_text.include?('"') # this may not be perfect logic, but it hits the 99% if cell_text.start_with?('"') && cell_text.end_with?('"') # unquote cell_text = cell_text[1...-1].strip end # collapses escaped quotes cell_text = cell_text.tr_s('"', '"') end end end 1.upto(repeat) do |i| # TODO make column resolving an operation if @colcount == -1 @table.columns << (column = Table::Column.new(@table, @table.columns.size + i - 1)) if cellspec && (cellspec.key? 'colspan') && (extra_cols = cellspec['colspan'].to_i - 1) > 0 offset = @table.columns.size extra_cols.times do |j| @table.columns << Table::Column.new(@table, offset + j) end end else # QUESTION is this right for cells that span columns? unless (column = @table.columns[@current_row.size]) warn %(asciidoctor: ERROR: #{@last_cursor.line_info}: dropping cell because it exceeds specified number of columns) return end end cell = Table::Cell.new(column, cell_text, cellspec, @last_cursor) @last_cursor = @reader.cursor unless !cell.rowspan || cell.rowspan == 1 activate_rowspan(cell.rowspan, (cell.colspan || 1)) end @column_visits += (cell.colspan || 1) @current_row << cell # don't close the row if we're on the first line and the column count has not been set explicitly # TODO perhaps the colcount/linenum logic should be in end_of_row? (or a should_end_row? method) close_row if end_of_row? && (@colcount != -1 || @linenum > 0 || (eol && i == repeat)) end @cell_open = false nil end # Public: Close the row by adding it to the Table and resetting the row # Array and counter variables. # # returns nothing def close_row @table.rows.body << @current_row # don't have to account for active rowspans here # since we know this is first row @colcount = @column_visits if @colcount == -1 @column_visits = 0 @current_row = [] @active_rowspans.shift @active_rowspans[0] ||= 0 nil end # Public: Activate a rowspan. The rowspan Array is consulted when # determining the effective number of cells in the current row. # # returns nothing def activate_rowspan(rowspan, colspan) 1.upto(rowspan - 1).each {|i| # longhand assignment used for Opal compatibility @active_rowspans[i] = (@active_rowspans[i] || 0) + colspan } nil end # Public: Check whether we've met the number of effective columns for the current row. def end_of_row? @colcount == -1 || effective_column_visits == @colcount end # Public: Calculate the effective column visits, which consists of the number of # cells plus any active rowspans. def effective_column_visits @column_visits + @active_rowspans[0] end # Internal: Advance to the next line (which may come after the parser begins processing # the next line if the last cell had wrapped content). def advance @linenum += 1 end end end asciidoctor-1.5.5/lib/asciidoctor/timings.rb000066400000000000000000000020311277513741400211040ustar00rootroot00000000000000# encoding: UTF-8 module Asciidoctor class Timings def initialize @log = {} @timers = {} end def start key @timers[key] = ::Time.now end def record key @log[key] = (::Time.now - (@timers.delete key)) end def read_parse (time = (@log[:read] || 0) + (@log[:parse] || 0)) > 0 ? time : nil end def convert @log[:convert] || 0 end def read_parse_convert (time = (@log[:read] || 0) + (@log[:parse] || 0) + (@log[:convert] || 0)) > 0 ? time : nil end def total (time = (@log[:read] || 0) + (@log[:parse] || 0) + (@log[:convert] || 0) + (@log[:write] || 0)) > 0 ? time : nil end def print_report to = $stdout, subject = nil to.puts %(Input file: #{subject}) if subject to.puts %( Time to read and parse source: #{'%05.5f' % read_parse.to_f}) to.puts %( Time to convert document: #{'%05.5f' % convert.to_f}) to.puts %( Total time (read, parse and convert): #{'%05.5f' % read_parse_convert.to_f}) end end end asciidoctor-1.5.5/lib/asciidoctor/version.rb000066400000000000000000000000531277513741400211210ustar00rootroot00000000000000module Asciidoctor VERSION = '1.5.5' end asciidoctor-1.5.5/man/000077500000000000000000000000001277513741400146135ustar00rootroot00000000000000asciidoctor-1.5.5/man/asciidoctor.1000066400000000000000000000202601277513741400172000ustar00rootroot00000000000000'\" t .\" Title: asciidoctor .\" Author: Dan Allen, Sarah White, Ryan Waldron .\" Generator: Asciidoctor 1.5.5 .\" Date: 2016-10-05 .\" Manual: Asciidoctor Manual .\" Source: Asciidoctor 1.5.5 .\" Language: English .\" .TH "ASCIIDOCTOR" "1" "2016-10-05" "Asciidoctor 1.5.5" "Asciidoctor Manual" .ie \n(.g .ds Aq \(aq .el .ds Aq ' .ss \n[.ss] 0 .nh .ad l .de URL \\$2 \(laURL: \\$1 \(ra\\$3 .. .if \n[.g] .mso www.tmac .LINKSTYLE blue R < > .SH "NAME" asciidoctor \- converts AsciiDoc source files to HTML, DocBook and other formats .SH "SYNOPSIS" .sp \fBasciidoctor\fP [\fIOPTION\fP]... \fIFILE\fP... .SH "DESCRIPTION" .sp The asciidoctor(1) command converts the AsciiDoc source file(s) \fIFILE\fP to HTML5, DocBook 5, DocBook 4.5, man(ual) page and other custom output formats. .sp If \fIFILE\fP is \fI\-\fP then the AsciiDoc source is read from standard input. .SH "OPTIONS" .SS "Security Settings" .sp \fB\-B, \-\-base\-dir\fP=\fIDIR\fP .RS 4 Base directory containing the document and resources. Defaults to the directory containing the source file, or the working directory if the source is read from a stream. Can be used as a way to chroot the execution of the program. .RE .sp \fB\-S, \-\-safe\-mode\fP=\fISAFE_MODE\fP .RS 4 Set safe mode level: \fIunsafe\fP, \fIsafe\fP, \fIserver\fP or \fIsecure\fP. Disables potentially dangerous macros in source files, such as \f[CR]include::[]\fP. If not set, the safe mode level defaults to \fIunsafe\fP when Asciidoctor is invoked using this script. .RE .sp \fB\-\-safe\fP .RS 4 Set safe mode level to \fIsafe\fP. Enables include macros, but restricts access to ancestor paths of source file. Provided for compatibility with the asciidoc command. If not set, the safe mode level defaults to \fIunsafe\fP when Asciidoctor is invoked using this script. .RE .SS "Document Settings" .sp \fB\-a, \-\-attribute\fP=\fIATTRIBUTE\fP .RS 4 Define, override or delete a document attribute. Command\-line attributes take precedence over attributes defined in the source file. .sp \fIATTRIBUTE\fP is normally formatted as a key\-value pair, in the form \fINAME=VALUE\fP. Alternate acceptable forms are \fINAME\fP (where the \fIVALUE\fP defaults to an empty string), \fINAME!\fP (unassigns the \fINAME\fP attribute) and \fINAME=VALUE@\fP (where \fIVALUE\fP does not override value of \fINAME\fP attribute if it\(cqs already defined in the source document). Values containing spaces should be enclosed in quotes. .sp This option may be specified more than once. .RE .sp \fB\-b, \-\-backend\fP=\fIBACKEND\fP .RS 4 Backend output file format: \fIhtml5\fP, \fIdocbook5\fP, \fIdocbook45\fP and \fImanpage\fP are supported out of the box. You can also use the backend alias names \fIhtml\fP (aliased to \fIhtml5\fP) or \fIdocbook\fP (aliased to \fIdocbook5\fP). Defaults to \fIhtml5\fP. Other options can be passed, but if Asciidoctor cannot find the backend, it will fail during conversion. .RE .sp \fB\-d, \-\-doctype\fP=\fIDOCTYPE\fP .RS 4 Document type: \fIarticle\fP, \fIbook\fP, \fImanpage\fP or \fIinline\fP. Sets the root element when using the \fIdocbook\fP backend and the style class on the HTML body element when using the \fIhtml\fP backend. The \fIbook\fP document type allows multiple level\-0 section titles in a single document. The \fImanpage\fP document type enables parsing of metadata necessary to produce a manpage. The \fIinline\fP document type allows the content of a single paragraph to be formatted and returned without wrapping it in a containing element. Defaults to \fIarticle\fP. .RE .SS "Rendering Control" .sp \fB\-C, \-\-compact\fP .RS 4 Compact the output by removing blank lines. (No longer in use). .RE .sp \fB\-D, \-\-destination\-dir\fP=\fIDIR\fP .RS 4 Destination output directory. Defaults to the directory containing the source file, or the working directory if the source is read from a stream. If specified, the directory is resolved relative to the working directory. .RE .sp \fB\-E, \-\-template\-engine\fP=\fINAME\fP .RS 4 Template engine to use for the custom converter templates. The gem with the same name as the engine will be loaded automatically. This name is also used to build the full path to the custom converter templates. If a template engine is not specified, it will be auto\-detected based on the file extension of the custom converter templates found. .RE .sp \fB\-e, \-\-eruby\fP .RS 4 Specifies the eRuby implementation to use for executing the custom converter templates written in ERB. Supported values are \fIerb\fP and \fIerubis\fP. Defaults to \fIerb\fP. .RE .sp \fB\-I, \-\-load\-path\fP=\fIDIRECTORY\fP .RS 4 Add the specified directory to the load path, so that \fI\-r\fP can load extensions from outside the default Ruby load path. This option may be specified more than once. .RE .sp \fB\-n, \-\-section\-numbers\fP .RS 4 Auto\-number section titles. Synonym for \fB\-\-attribute numbered\fP. .RE .sp \fB\-o, \-\-out\-file\fP=\fIOUT_FILE\fP .RS 4 Write output to file \fIOUT_FILE\fP. Defaults to the base name of the input file suffixed with \fIbackend\fP extension. If the input is read from standard input, then the output file defaults to stdout. If \fIOUT_FILE\fP is \fI\-\fP then the standard output is also used. If specified, the file is resolved relative to the working directory. .RE .sp \fB\-r, \-\-require\fP=\fILIBRARY\fP .RS 4 Require the specified library before executing the processor, using the standard Ruby require. This option may be specified more than once. .RE .sp \fB\-s, \-\-no\-header\-footer\fP .RS 4 Suppress the document header and footer in the output. .RE .sp \fB\-T, \-\-template\-dir\fP=\fIDIR\fP .RS 4 A directory containing custom converter templates that override one or more templates from the built\-in set. (requires \fItilt\fP gem) .sp If there is a subfolder that matches the engine name (if specified), that folder is appended to the template directory path. Similarly, if there is a subfolder in the resulting template directory that matches the name of the backend, that folder is appended to the template directory path. .sp This option may be specified more than once. Matching templates found in subsequent directories override ones previously discovered. .RE .SS "Processing Information" .sp \fB\-q, \-\-quiet\fP .RS 4 Silence warnings. .RE .sp \fB\-\-trace\fP .RS 4 Include backtrace information on errors. Not enabled by default. .RE .sp \fB\-v, \-\-verbose\fP .RS 4 Verbosely print processing information and configuration file checks to stderr. .RE .sp \fB\-t, \-\-timings\fP .RS 4 Display timings information (time to read, parse and convert). .RE .SS "Program Information" .sp \fB\-h, \-\-help\fP .RS 4 Show the help message. .RE .sp \fB\-V, \-\-version\fP .RS 4 Print program version number. .sp \f[CR]\-v\fP can also be used if no other flags or arguments are present. .RE .SH "ENVIRONMENT" .sp \fBAsciidoctor\fP honors the SOURCE_DATE_EPOCH environment variable. If this variable is assigned an integer value, that value is used as the epoch of all input documents and as the local date and time. See \c .URL "https://reproducible\-builds.org/specs/source\-date\-epoch/" "" " " for more information about this environment variable. .SH "EXIT STATUS" .sp \fB0\fP .RS 4 Success. .RE .sp \fB1\fP .RS 4 Failure (syntax or usage error; configuration error; document processing failure; unexpected error). .RE .SH "BUGS" .sp Refer to the \fBAsciidoctor\fP issue tracker at \c .URL "https://github.com/asciidoctor/asciidoctor/issues?q=is%3Aopen" "" "." .SH "AUTHORS" .sp \fBAsciidoctor\fP was written by Dan Allen, Ryan Waldron, Jason Porter, Nick Hengeveld and other contributors. .sp \fBAsciiDoc\fP was written by Stuart Rackham and has received contributions from many other individuals. .SH "RESOURCES" .sp \fBProject web site:\fP \c .URL "http://asciidoctor.org" "" "" .sp \fBGit source repository on GitHub:\fP \c .URL "https://github.com/asciidoctor/asciidoctor" "" "" .sp \fBGitHub organization:\fP \c .URL "https://github.com/asciidoctor" "" "" .sp \fBDiscussion list / forum:\fP \c .URL "http://discuss.asciidoctor.org" "" "" .SH "COPYING" .sp Copyright (C) 2012\-2016 Dan Allen, Ryan Waldron and the Asciidoctor Project. Free use of this software is granted under the terms of the MIT License. .SH "AUTHOR(S)" .sp \fBDan Allen, Sarah White, Ryan Waldron\fP .RS 4 Author(s). .RE asciidoctor-1.5.5/man/asciidoctor.adoc000066400000000000000000000160051277513741400177500ustar00rootroot00000000000000= asciidoctor(1) Dan Allen; Sarah White; Ryan Waldron :doctype: manpage :man manual: Asciidoctor Manual :man source: Asciidoctor 1.5.5 :page-layout: base == NAME asciidoctor - converts AsciiDoc source files to HTML, DocBook and other formats == SYNOPSIS *asciidoctor* [_OPTION_]... _FILE_... == DESCRIPTION The asciidoctor(1) command converts the AsciiDoc source file(s) _FILE_ to HTML5, DocBook 5, DocBook 4.5, man(ual) page and other custom output formats. If _FILE_ is _-_ then the AsciiDoc source is read from standard input. == OPTIONS === Security Settings *-B, --base-dir*=_DIR_:: Base directory containing the document and resources. Defaults to the directory containing the source file, or the working directory if the source is read from a stream. Can be used as a way to chroot the execution of the program. *-S, --safe-mode*=_SAFE_MODE_:: Set safe mode level: _unsafe_, _safe_, _server_ or _secure_. Disables potentially dangerous macros in source files, such as `include::[]`. If not set, the safe mode level defaults to _unsafe_ when Asciidoctor is invoked using this script. *--safe*:: Set safe mode level to _safe_. Enables include macros, but restricts access to ancestor paths of source file. Provided for compatibility with the asciidoc command. If not set, the safe mode level defaults to _unsafe_ when Asciidoctor is invoked using this script. === Document Settings *-a, --attribute*=_ATTRIBUTE_:: Define, override or delete a document attribute. Command-line attributes take precedence over attributes defined in the source file. + _ATTRIBUTE_ is normally formatted as a key-value pair, in the form _NAME=VALUE_. Alternate acceptable forms are _NAME_ (where the _VALUE_ defaults to an empty string), _NAME!_ (unassigns the _NAME_ attribute) and _NAME=VALUE@_ (where _VALUE_ does not override value of _NAME_ attribute if it's already defined in the source document). Values containing spaces should be enclosed in quotes. + This option may be specified more than once. *-b, --backend*=_BACKEND_:: Backend output file format: _html5_, _docbook5_, _docbook45_ and _manpage_ are supported out of the box. You can also use the backend alias names _html_ (aliased to _html5_) or _docbook_ (aliased to _docbook5_). Defaults to _html5_. Other options can be passed, but if Asciidoctor cannot find the backend, it will fail during conversion. *-d, --doctype*=_DOCTYPE_:: Document type: _article_, _book_, _manpage_ or _inline_. Sets the root element when using the _docbook_ backend and the style class on the HTML body element when using the _html_ backend. The _book_ document type allows multiple level-0 section titles in a single document. The _manpage_ document type enables parsing of metadata necessary to produce a manpage. The _inline_ document type allows the content of a single paragraph to be formatted and returned without wrapping it in a containing element. Defaults to _article_. === Rendering Control *-C, --compact*:: Compact the output by removing blank lines. (No longer in use). *-D, --destination-dir*=_DIR_:: Destination output directory. Defaults to the directory containing the source file, or the working directory if the source is read from a stream. If specified, the directory is resolved relative to the working directory. *-E, --template-engine*=_NAME_:: Template engine to use for the custom converter templates. The gem with the same name as the engine will be loaded automatically. This name is also used to build the full path to the custom converter templates. If a template engine is not specified, it will be auto-detected based on the file extension of the custom converter templates found. *-e, --eruby*:: Specifies the eRuby implementation to use for executing the custom converter templates written in ERB. Supported values are _erb_ and _erubis_. Defaults to _erb_. *-I, --load-path*=_DIRECTORY_:: Add the specified directory to the load path, so that _-r_ can load extensions from outside the default Ruby load path. This option may be specified more than once. *-n, --section-numbers*:: Auto-number section titles. Synonym for *--attribute numbered*. *-o, --out-file*=_OUT_FILE_:: Write output to file _OUT_FILE_. Defaults to the base name of the input file suffixed with _backend_ extension. If the input is read from standard input, then the output file defaults to stdout. If _OUT_FILE_ is _-_ then the standard output is also used. If specified, the file is resolved relative to the working directory. *-r, --require*=_LIBRARY_:: Require the specified library before executing the processor, using the standard Ruby require. This option may be specified more than once. *-s, --no-header-footer*:: Suppress the document header and footer in the output. *-T, --template-dir*=_DIR_:: A directory containing custom converter templates that override one or more templates from the built-in set. (requires _tilt_ gem) + If there is a subfolder that matches the engine name (if specified), that folder is appended to the template directory path. Similarly, if there is a subfolder in the resulting template directory that matches the name of the backend, that folder is appended to the template directory path. + This option may be specified more than once. Matching templates found in subsequent directories override ones previously discovered. === Processing Information *-q, --quiet*:: Silence warnings. *--trace*:: Include backtrace information on errors. Not enabled by default. *-v, --verbose*:: Verbosely print processing information and configuration file checks to stderr. *-t, --timings*:: Display timings information (time to read, parse and convert). === Program Information *-h, --help*:: Show the help message. *-V, --version*:: Print program version number. + `-v` can also be used if no other flags or arguments are present. == ENVIRONMENT *Asciidoctor* honors the SOURCE_DATE_EPOCH environment variable. If this variable is assigned an integer value, that value is used as the epoch of all input documents and as the local date and time. See https://reproducible-builds.org/specs/source-date-epoch/ for more information about this environment variable. == EXIT STATUS *0*:: Success. *1*:: Failure (syntax or usage error; configuration error; document processing failure; unexpected error). == BUGS Refer to the *Asciidoctor* issue tracker at https://github.com/asciidoctor/asciidoctor/issues?q=is%3Aopen. == AUTHORS *Asciidoctor* was written by Dan Allen, Ryan Waldron, Jason Porter, Nick Hengeveld and other contributors. *AsciiDoc* was written by Stuart Rackham and has received contributions from many other individuals. == RESOURCES *Project web site:* http://asciidoctor.org *Git source repository on GitHub:* https://github.com/asciidoctor/asciidoctor *GitHub organization:* https://github.com/asciidoctor *Discussion list / forum:* http://discuss.asciidoctor.org == COPYING Copyright \(C) 2012-2016 Dan Allen, Ryan Waldron and the Asciidoctor Project. Free use of this software is granted under the terms of the MIT License. asciidoctor-1.5.5/run-tests.sh000077500000000000000000000007611277513741400163470ustar00rootroot00000000000000#!/bin/sh # A convenience script to run tests without delays caused by incrementally writing to the terminal buffer. # This script will execute against all supported Ruby versions if "all" is the first argument to the script. if [ "$1" = "all" ]; then rvm 1.8@asciidoctor-dev,jruby@asciidoctor-dev,rbx@asciidoctor-dev,1.9@asciidoctor-dev,2.0@asciidoctor-dev,2.1@asciidoctor-dev "do" ./run-tests.sh else rake > /tmp/asciidoctor-test-results.txt 2>&1; cat /tmp/asciidoctor-test-results.txt fi asciidoctor-1.5.5/screenshot.png000066400000000000000000004535301277513741400167350ustar00rootroot00000000000000PNG  IHDR@X:*BbKGD pHYs  tIME Mh IDATxwXT:Ei!6v#Xb,MrSb{WTDT콠˖CΆP@U~vwNs8;MRRB!B!B!D+Y B!B!B'^]rC!B$''w!B!_Cz!B!B!#!B!B!B 0w֌2ϳƍ/r_/ 3rKrB!cKQqtt>V̼st :мh40kL*Vs=wu`0Pj*c6h4YDYdee1s իW ʂ`u:K_fd4 ...ؔj_222rʕy9,Y3g0Wptt,rPc0=k0@N@E\rI !H@>d .uTy3{N ];wݷߒ'BlJRSSұ#Zm|'=p#׷o㓧NY/U^zTH!BcFY&籺ׯ3i$N:E`` OaL&S{7ؾ};&LˋMPZ9ɕ+Wҹ3RBEAbkkKRR_NN޽KTDŽB<@a21s6}od?bwiݤsqϚի9< {+KI,ԪQ-۶a2}cJP@#{luTj ǜ"[l6ӺeKR !Ⱶdbٴy3W\rO\ãpx9h4}<==$CBXQ NLLd͚5qE6n?q jc_ ,OSW\ILL^E5ؐȑ#4h`d2N8ݻٺmC/;2?߽R˓xTTFca0,U+Wr &NB^CQQK#&&g''*Vq:dޤI{q˖SF n3̫oI8tx/χ:jY;~3klf9O?+BF}~ʕ\xUhb^͛7\r֭[xyywYKIѪukU ;w"55MTf=4 ȑ#GL2iyRؖmpvvzj2}l~[WFJŌlشzt낍-W^ey _~:kG͚5HNN_c֭tj޴@/xyyzr8>``(B d7d'ٻ?ƌrlIIIl߹ۖ&i:|ʶ;BQ6$$$iRm|о1T bɲ>OVV{C,b~a06E?z*Λok0>[[[^7Hǝ<{c^8_o`dԈ\rK1r8vol5x1tҙoW_+Ƃ~^@0p5ڶnEhܸy+WKشv5nV9?r 7,[3o\˾}hҤ5ڐlkki ,̝߮;.2h x7ٰ~=1zoTÃ,(^\xCѶmb8r0W^婧EQغe%R 66[[[6jd<z獺MFéS9s,5jTڵk TB|7www?ϛ֟wҹX֫gtaztZEY^`w>sի0 6ndݞ]AJj*o6|}^eY4oy_M|8S><=,gkgr|Op*rB!DS*Ʋgn޽;Gٶm˗-I&l6 mSx_uCLݯGb6FS {m?`2X"_5FdŊL:uU HQySI2(v1%ɋY('5"..={h۶}M-[Zj4jOOOOVV ?#Gih ͧʰQz͠rQK;~6w:YTY~u$y#11={/z=c~&Ju _)VЭsg mې|.Xܡ=`W᧟9qdufdfNg ~3>hj7lh7F̒RMZTŶ|*w] 0lmm9x< HՒƁjժʑ#G8{չzc\rZjg-n_6 +nYyn۟ӧO[ n;O\1@DqdQ4u݅gYM޽{ ❉-Y|=)Wz^rBQ2 0o7 !Yn=hI?l`=wIJ'aP̜ah4Qƍɂ0VO_I `kWUyl@6 f6i?=1/¥ cص'&1Kjp6mي ;v?aܾO*=y=C|-fӟ7 @V5l?F(| \ >{ytulw( !Aի]///tr瞴iS4mʖ͛ٸaw^X( .]jB lllpvv___J(˷o$%%N oooU{WP [ 2ÇlRp2;oRSS9{,III䠷ݝ*AAxxzZUIQ+srrpׯ_'3#4׻~})YS/_Çs%llV3..gҬY3󾗵lՊMu諮{Νi59{QdggsInܸhN]R|#ܫ 7^'%%eZmswA\KȰC``n7:dz*(3~~~<uk̛7NǫSzb}UصV !88+N~ޫkɍ̴̕C`@@M!Dɕ*r-yt+^uFKCj. se^}xOo233uFˁqolRN:͛7h4b與}}iРO?4MqwwQvO?޽{IHH 5%\N#ʨ_&$$Īƍ 6 NQ֙3g5r$̝77駉dʕ,[CpBv˗III`0qqqrʄ3p 6mZ`u;Νf Begg߹'͍J>>TQ~ѡCR .sqFMPPs-t=݋w?@́HMM%22Y q.W[2tذ{j1\r52BiӰl6?0w\N:Err2& ;;;Xt)OEDXl63jHN:Ō3UA֭[,ZݻvORRwquu<``ԩԩ[תŋ1|8^^̟?{{BÇQ 4#Fjr ?#Wŋ$''&/(]<5kբ<۫WzC}ѢE|w?~[BX"UVLnNe̬,U3;YY\۵Z-{ܺy=z0x`b{;v_֭[-5HVˋZkӿzc vlj=a1 >jt?|=k׮ҥKkZnU6GϞa$"eT$P(kqqdggӯoYǟpqn߾]`~5\~KT\dkg^ЭsGʖmI}[Z ;IOϧS~] -98:tl??,WƂ_ѽWnܸQHXt{hӪ%&ljH_~||CUBgǮ]dee7%xi|B"5iܘ7SBXN!$e%(3={>9ܾC={KXX}7T7;w`oO5h~G\t .p1v͗_~/J= }BY]י?App0͚Q%('gg|2ϟԩSl߾]bih4>z=i322޳*V?Hwޝs1N8A5 -u N¬YqDDD9sxϟϒ%K2don6̞5iӦq*UD͚5Z*fn\ιsr .7:t(52i޽"{8f3vvv<mصs'KġCHHHeqN8AP*4hj-66rrr_gƌTXM  7oիCq06ͻ&FAc4~:7o݂|@` {DGGSRBRCjJ w&$$#Fbr&L3gR #88W77 Wg˖-lݺ-[2u7n˗-rt֍&M` lٺm[b6bŊ=Io2>}tك@LXkWstL2k׮QJڶmKpHʕ#11fY> O{n쉎nݺ wYV- OFw&UQ$5k6x0^y^؟voľ1,]=xL&F 5f3_F#'Obz'իVB!'hCiCҪukϛʕ+ <m IDATZ7n0|p>ׯӮ]+rgn/_7ߴl?y#899Q$k`\zlƁ9kQۓ7M~yCu[ʕٙ{5mkCZZW_RJ̞=:QbEK>ߘ2e /OO&?ZRSS1|8k׮yZFQ^3[0aΝ;ǨQ6|xh?9st{UjԨQZ-'O)S>Xb%7of͢Ed7P\"N3X!yС-55_~iS2o<ʗ/I$"#5HFFG\ ޴)vvv,Zid4Aԯ[ܸyv[ j,i۱ղϜ%,0{lBBB>cV+Q7nǎԩS9CՄZhAyool٪ ;vоCBK殝;Q-[Z@>Lu:/Y´ik|'*꼱N@`ٌ'n+WRqMٌ-NbرT^~2Xyq`+T`ȑ,\^{S^ Z7端Xv-=z`FVDwkպ5_NN޽/б#~~~L{N*_keff&<7oҽ{w R졯OOzvJT ch4̝9C_9<~Lz{rwwǯ QӹR5$:Z~~!vHZymX ͛gqA~w.@@`|{B ފRŊ>6/~ŋzB?Ny<ӽOmS:N`?xz=O7ʕ`H|N\/~~Ktl BtJ _%W럟_fz~];[;FJyO/llmZ|KCLO}-qcccC֭|BuWPDy$BQj#sNݺ:xӧO"V8^O߾8;;[(JmV'-)lFټi͛7gYa4tN❿qT=åK Q7w.{d"##1h4ҲeK&Oѣoh׮-[,Р޹s'3fˋi}FDDe;E5A_IRfMVZEll,ysmv߾}M&"5*wSח;wb2֑LMy ;EQ$%%1I&L<W؃}sD Qrw>o6gaț :4;uDǎYp!VI灇;F!Hƍݻ79K/fqer]vϻ%]Z-ϝc꧟bkg_j` (( &0rH~G>=:t4 9~~~*=z|2~VZUd8! ))IIJJR?4hr !B<wѨ(,X@qrtTz쩘fEQ>#N8`%Mt3JϞ=7WWe͚5(d*=RfMŧR%eÆ (`(:r\RZ5͛_MwaŻ|yi&JNNNtGiʏ?@'NT޽zض#GeeV݆_,տ 8+UyQ<=*)ǎ+t;vT)𹺭]v)vO?YOM(}}_eӦM%.GɤjRqwsS_`yu[999JNw77e* 'N(>*)a (E^vءTTV^5.tR^yg4 OOOj֬Yꊳ3YYddd&';ɄMyx:n]ӡ1<~XOOӧOgǎ0q$ubccx"4lذZҨNNN:u,[%G5 NNNdggYnB!^F@æ5`}Ҿ}{N8СCYf 2:qqݸqcǎFƍ-w>=nkkk񤥥Qv=棢(888P^=8|P4pah4( Ie!տRJ:uHKK#6jPk߾}QWNU-.OLL$66jתU7/~.L&)ؠ`0cu^ݱ'99S< dvvv-y?bbb={6:1cRVBZ@QNʗ/_X"ʕ#--ׯ&r..TTfg ?t!({*!nnntbˣ?vdB!;RIv*֮B 5\Qvm9Ž; j㮝3fΤK.3h@?0(ĵk׬S+Wh4HY׿=Zh^gΝ2S?v111Ш'՞N*T 4,-y\rP!w\QkFc]˩eQN<==KS{CF,AuMkhKf.=uT l&77^en F4P3666|겮jժQvm._LLL g;vMӦM0E'w&33]%ߏ3͋90$=q_RX? N{U*W̜9s,{k4K"V^ʕ+2z472鈋cW_j+%&;;N6o~Qq~Ԡ/ۗ(B<B!Ᵽ6z۷T ,|; Z;CfFݻYv-k.r1d2΀۷/GfŊ\KvZ7ooM ksss KB͇ӧNq lmmYUY^ :N $od0,-wdI(+WL&lllʆ gϞّMxxxX mW-ꇆٳg[.pwP\\UVjH2z_0vX&O֭[ڵ+ `l߱~7n0i$LZ-LիWҵ+Æ +Wwtdff2l0Nrrrz䟟յȺ-U,;99QbEjլOބO Ύ)2͙رk |mGPjU~[]@vv60hӪ#hܨ!_MR*#KO9l(_"#B<ŋq;9 "Ab7C"uڕhV\/|Չ!z= kؐ1cDzdbΝ˦M~H~ 쓓:Ǣ,+&o~`jԬiWwV@zZm+==ܜ*W&#=ò}3Ȭ3ػw/YYY888p9=J*U̸Wޞ&My&on put邛CiwA:vȺuزe%Ц鰵SNMرiPP3g`֭I󂡥CDEPB*U }c݉?5sUz?ӳ1#^ kN #9^7&NNb T)dX'M֭ػ-[B !8u .i&<<<Թ垦8F;2k,;Mܥ}!ZKCҽG&MEx7 "L&/`ɒ%|5{6V gV̙ӧ?r'Wmܸq#{vٙ^zժWɉ'Ob0)v(p9Ǐ༡h48r䈥NL:uzDw%--/O2'hh4Z& h{9guxgDt:]{i}7Օ8Ju=Bx+G\^Zeg֫MS4o4#_SzId?SP`fW.o/ ndffgWAvv6>ӋƑiь~_ ''u8y_mMgԇf2pKx;Si!,t ǎq 55nzޢew;!ElyFѠjT~_jƾjZ#ƟeBfЩ@Ŀ{]iNiˈcPqds"\vVmhьg{޲x#9ݘ% IDAT8{/ Ff4lN=9~ߨ(EjC~(=zŋhEf?hRw?$B<۷KPPOkgUfͨU Wb ʻˋl233#Zh6l !!P{$;w^ʗ/ϛoUhw5hذ! ,/ayc\t5jj~wxx8חcH͍-d|,h4xzzb2R,U\vv6\-M1r;z{K.ѻwoFm~܎j2puue۷}ǀ^:F±km i$H$)"##C("<2sC :؏[[ow7a"<2ZfԣhզtFsDxdxvD!<:lh۹0bDxd8p3/bÄd\y {=ǖ-"<2ZkCzaQlA<;~BkF=C p{l{/Q1 7;-?"<2Zl޺՞W-ys;GDxdtDTfáGDxdgmdV$srB8yRGFu7:\"<2Z<9ia6+<F?\=/hqatN'Zƴ>5{\GFO./]m:u/ZTDw$ΜFQ>rTD`Wq(''7Wi+&C`Q;gOEߕٿԴRi3|)#Ź9n^ FlF !8sϣǎh?a_,w,}McDxd=CzVvoݛzAi+vZjn )MS=> #./ϡ?:w&I$^"#Ũq !8'#Ŏ;/'&sѢS+D!H$ɭrO"!!A4 ~K[ UT6mr8W~$F0d2:qE<3nCۋuٲeR@ ?ΒO+K.ڡ~z"55T|rJ"ukQTTT*_ ,*XtEfY}Jb4uD׮]^7wnr-o~!&=(,,[llǎ#<=EDvv5wyѼY3#^xy{>[es;l=xm۴nk*v>쳢R@ܩ\Yl߶E(ڵm+›7qF"%%m+,,QR@ر ϯء/2x ˝ů;yZUD{^nټYx޽zf+?>>^4l@7o.\rYD"qǢj5[l!Ϥ'p {ZAA99<EEE(7țz-\ԾHlw4'N9vr}Is tރOLzy"mOKΝK_b2ެC/M(wM_8s)2)**Gի[/'~X?kPo>Xnmlv`2w`loBB~7ڡP4o֔S?_ h29K_jӦǮ{kԈO>QfIt\Gcqh,5k ;'N!F>ޖmիZx~zu̶X7#]7;!!ijcF%w&ڽw779v:+ *nk 鮺 Zƍe/?E4 kg3gwr׀D"HUlnK֭]˹sh޼9֠7k{Ojܸ1dgg5JbݺuL>K.wwc;]PPSY` >p)ns4x`FEvv6_3gb0+^ZFQ.@/>JQn+:C2o< ?Ξݻ oe1YL2?3z(.]XllIJJbܸq,[ʁ|4}:^^^eRRYcƄ0u4<<<3z4/_.[EEE޹\veVT*N:Ehh(n^@С/\ ))͚QR%9w/`_+k<[o^Z^֗}ȑ#Yr%~Ν:cӇc_h?n֍p1fΜFaĉ4n!zKEe=f3LcjԨ5k䓜 OݔW֫7~=9p?0ڵJժPTTDrRvbQn]f|YaSDՋ3gktRv͠Ah۶-Upuuh4KRR'_~g^[4E /v'$$ܑw+ʊϏ,5n|Ccl?`h"z=noDjmp 7>ݛHm0q"8;9a0h0] FsRPJCJ w77srvliquq!W+7F)rT<ŭ( =ucͺupI.>DaA!m}q7惻&[x7y9/Q*E{Eբmn ;wCL~m}=TwLt\m0VHLRn!7WWז}Fqsu|Scqq vkpvv.s瑭*խ:3|`!,Z o6;5%Da{wܸq#7oߟ:|C,=z#GXb^{mX˪UXbjOOO@Q(**"''D@@{㉉)ן-Jߧu̙3}uV<<Vk}7dddh\H?F˛NcڵOdVqvv"""hҴ):t ::ooo72&f͛bJsV\ԩSqrr'ۢsFEEE1b&LH:u*l6֫}δiprrgggL&dgg#ˋO=u6Yeɶmޑw;:uPjUNc}-YC&a"Hbi%͜rk]ZmϜ3oͽfu ]Z;v⥜<}LxnXWM%DFpV|}}Yw$''@p.YBܙ3ri.^@zz:T*<<=^ 4U4nV{x[fٲK:޽{9q8W\`0B@J ifjՊ*UZܮT+WDQr-oڵY~=2e:mh\ThחJ+.&[#ۢe`` SMQ]f;ߟHOOQ1"}GBpѮKW-Μ=[[ ӱh۹Kŋ"<2Z=F\xQL&|Ï?'N{"-MDmowFfhV{E1sΗƋN=zH8^GF=S OP,1{АGExd8t1zDxdػ0B׋om;[jx+:/[}GFm?w $t:xik"<2Zl2ұ[ٮ*"<2Z{Rrxo4\׽e:rd1"<2Z7uɱK`xB~F-^h;)"bDJN=z:O~kڃybX2QPP eI$"] ѽр7sMy7f%xmύfk<hZ;|GHG IDAT3[yVkZ>zЭsg>5.#9z1^w>t܉kѽO?{fioϳcG3gR͍ƍSZ5,Çß[`O?ω4mƬ>oZv-0n̝M6³؏?b&СԯWKжsW<<[66O}ϽpO{=^{[O<6/ެ۶-3 esܳWcV;FJà /޼ۮo~cGլuó_>=ey qj7UwhբſH"H$+VBf9 U}O/kwޮxP!0Lea+x?Z\O}='ƥRl^p[VG۽rTd^[iÍaݫe͑=w#fEeix"k֬!8$zSl6Cf8|Οwʹvs\>{ND"Hn7 Xˋڡݓeݓ 55F 񸻻Y=˗O&Ozb.n7WWԮ;^VKj6Q\xjL}k KiOBZZz) 0z`w9p0С]FVKYH`RRRqrrb>!6oaʕ((<=b8[lJ;mei4 +w>,jdO?qjժɛGٮ~Lppr_Zlɥ˗U&`Q:Xro6U_뾸$y8=Fɶ?5|8 ۅ8|(;]A`׬wtӗ61ԯWgggCM=jH8o.pl,NZ-]:vt s' g 9hXgO`μyDur>b&iƘQOϜy_93$8gg'CJmPh߶ s^_Āѿore2bؓT^EK;F摇f =v gg'ụ .nTP-ϝ\t ߰vz~m-:ZF̘1>LC|!T& D"Hyny_Zʃ_ųQRܭ[ivs깑߼o5ݮq+o*:ηCXzbINNN:ԬUzn۝7z?݉qH$m|ClD[o;oӻg~7ßͯ+ߴED"H$K5гH$-H xlPիǖ[quupj a0lڴo,`РA7]"H$Hӥs'3ßG/Uj/ŅH!I$D"H$}MF!;;$jժuC ɄFСCTT:$D" DB@6+<}\ZK G"H$D"HSlʉzQ^=~^<{x,10.JѐˇӦqeƏO͚51F$D"WQO4?Rs<==U& ףn߿1!+W&3+>={ʁH$!H$wEQ0LRذaǏݝj{?Jb=w^|֭[G׮]?˥D"HoD"H$D"d D"w6m͘AAA۷O>4lԈ*UFH'''+Wc֭lۺ!yʕ+ߐ -D"HoH$D"H$w#D~{H$Ǧغu+s;HOKwww4b`0O~~>kߞ{.]RC"H$ͷTH$D"H$"=$c( Fgr?NRRy:& WWW qoЀUR0B$D"׿=dtD"H$D"H$qlJ ٌZ~ԯ_BךfL&jZ R"H$҈=2DJXbK6DDŰq&)D"H$D"H$7JeY )53͘fT*T~H$wO̜%,ԡ=]:wKNw Q1|0#9D"H$D"(bWj\R D"HGX쒒lPre)D"H$D"H$D"HTHD"H$D"H$D"H$D7]]Byg $%ُke?[CyHMMh4L j.|qgҤIJJd6JXF̝CyYYtً3>eێm-F///OV-Zj+Of=( nnxxzve-[YԫS7^{&55(ߥGnSOINJ!YSfẤdz/gۺ۰шӧMeD=oDT58}v$"D"H$IV=$D"H$ֱSn7_ĕ+^;Ç̳hzuA eټuh4ȧ1LՓ:kϝ;pg_g*{B1yھ8f|1[Z&V%11M7/,r(*߲L+U &>~ 8(? O|ӛ5kuvڽ ʌ*ia6yvSa yaNmx!<޻4/**kP>9CGкeK{۷s񱧿4Ż68\BK\f?.%K1Sxb<8d(IFF~~~~/c?__f9NfM귍٠mD"H$D"!. &tj>,P) "᝟y[:URD"Hn%ȸѣ8gXveÃ:(?,/jտ@떭_%N8g?.^tWq݆ ptUqgΖfS#~pV D"H$^FaW؎fCݨn4<E)SqxU w.eUfqUs0fIXd1|D"H$ͽ ɄYXqr"N5 ;KǮMFOÃy_Áسw-jά{jeZllC.55BB]\\H$D"HuJW!/\Edkbb"&*bˤd梡sD5<ݴ}cMa0w"F5}tF (< EAmM2 qGp͉|VMP1aUN"H$mS^rdrߑkYjԖ _P@RRc{s$6flvAʙ.H$D"d2fDPTT;w&00L%Hqˉr;e6QTٳ;vؕ.M6{:,X@`` {@׳j*.^h~ 8T]c[9/&;;S .z%V),p?_G UoϣPII[YB8^WC~, J!>1cW]xG=F3Z :\4xR(ٟeˮPǥˎs\JͳjW (S R<˒4K ccem(y J!=$^%6|һܱt~lED"H${ANNNhZY-V":Cj[9( nn rOɬU琾|OT\ȡD"H$DrQ~QF >Ltt4o߾(\ds%7;|:doMXBT0]mݦH1ͼ&Ew^Ҍ7%T kɳ,\=V0)ՇbʏunobQyat Y*5{}7r%)9-Yɱ5 B}H$Dror[ _~5<-D]Hs&d"335kq4=# a6_w775v=-'++ Hzz>/AUy͌a q]{g%xxxа~[Օ?l!%%E5D"H$ZMHHNNNEpp0Çq|7dee9(:E4J)IOO`0\:"&& `8{,*te۬;tF!33L 1ͤӱcG Xؼy3Bt:v^ד^wPxdffEQQQn7Y`T)L¾m#RrO֑o@R(,67BJ/ګiyLyZ寤s%=".OzN!St^]Puz.$da)'KGQHjV(2YUd0q>){|&k>hM7-&_⮼Xl.^J`p?BX榮dY:|˘gb4Ҳ PWo4Y@^%D"H$X:];%@DTО]}Mٽg/=RR 4[.lc3tk53^+U'3>9׮zK;AfMؾ=vpV[>={U|`=m)tIED"H$lvzn2PTt҅{rvJ֭[ٿ?dffRV-hPCi&<<c>W!MخE}_]CBy&f9MqԨɘe~3>sY| 2uEĄU65OI̖CϷ/22,\iJ 2sxhdҥuذ)]5kFbb"IIHzxzꅿ?kHnܹ3ǎc1!CZfȐ!TRB#TիWgҤIEQ8}4 ,]v+k׎ ryj5{]v(K~?mw)tt;]8{ ZۇThƷEW``@ lW س#WpqRShTӏ7εhjV)1?-Uth[i[[,_˝#H L~"5$:,4o/wovi=H,`L5|v{quְ} &RX4')b[w9~ j+_!)#ZN}F4vgǰjJ3FsXn7~'91}\hzם :ƒ7njh=};ZMe L{inHa]X5u4kw6W'/d1qnFҷp957g}(z'"r{x 7{I3dƒ9?][Tbü4);g>bϦM"H$ɿmӳ'}zf_/r5mBMmL mcb6\:oO~4?A<vAر#;vdƍݻ5j`񉉉А@?[Z}ol;HߘJ߰"Cr IDAT ;IKͪv[l5%馥_K?֟SxafjV.uniWg Z &31aUfIZ5D놕x_C9tGH$Dr"H$D"H$+R$''ӨQ#䐝e6:: qHHۗǏS/ql޼ .P~}Ο?On7LSfMO$%%j9,zp%=Bjz`QV~g5Uhոh+H$=TH$D"H$ۂu~~>[] !0Lvk Ab2VJ)wRڄP*и- }P:e\łK4ZFPh0; >sǒ0۵>W1F:v z2B4jhISrZS&>7P(\?D"H${ H$D"H$HmʊT۷/`YwuuҰ-jqr,61LvDy[Ɔ 8vyyy4hj֬ə3g0h %+"VKPPƍ+d29d]6 ][~[)j<=I yF }uҨpwՠ7X%@ z<\vS,yݲu#,ԗovжU`&eY>UMc8xi0.-a2 {J /coIJQP[4el>x1Z\BQѪa%fOh`Q/s$.>w6ѸCAo0UςHdpbDm8]%ָV1 |*E!'߀lFQY!Ez owb8M 4]ӪerXD^Y6y*ܜȵA8puحFϔ}D_2ZD"HiTRD"H$Q,6l7nϏ-ZڵksEr#!!@U\p^~JJJl_~~~k4i9u 6,e/Yo6\lt:{TrssrQuBsh_J!,ԏ#qiMf],{LER ־4 iKogKII?^nV *q1U9U9z7j᪥}q2ˤ]3 A{;XW (aQ\R m;Q 6⮝PߍyBOBCYjV*8/WsJ%x1<^eJ\I/xB&]v!ĄU+x,V#VeFbHS돇η9ҦsWrhPnrB&EʴDrP@( hL%=4*q,>ÞD & ,ԯm4Ke_Ŵ}GQ]zB % H^RDbH(RҤ~[!9Љy@fvgfg7wΤhߐZ{֩*63hM({U.#n-L  Grֺ BLn&B!Bɓx"]t!SL@AÆ ?$44]vqA6mj_B -Z3fo>V^ ^^^(P{:Lu@"""v[ ">>/ILL[)EPPܸq`T] ɓM._۷p.vn(پ!߭+t`+|;I8 l:x WB1.ތf¢s}ZENQ[Y2_8KOHx\ ê=Wpu"N/Lrexv~_y%sX˫+=ǃsKb>V\]Gg`y{̜g9q6U}N98w3zU>OO$,l?|aq<FPxX@ʫrMLfg8x6sӒY ћe5~&V̘n58N_Wȟ ^"vopjV#7,VhevcǑ {<ʍXv&>,Łӡ}-DZh[-:z0qQf=kwTK`\$,ɹk˷O4ʧo%1\먭x_n8K^3c4Zy2m,~2~Q{wr=4oghf>W۪<fv "$"'$E',"='O$x3N`?i"Bg/ .!B!xjlP=g]Yz5AAA(P[nq9.^H\h߾= N-dMٲe9w'N <<͛SB~ ʕ#66ÇcXhڴ)^^^k^M40LԬYӾiIb([=`ƍܼyĪU SL\|{"B!BJ5)l);` lޝ.қ)6 SiU6i)K;C?֥,"ʾ!yt]_o5tzmSG)  ;uf֞ГӍmB%\]HfR4MCMXrjsJ:ȍX,`.oS8 >FR)=yTǾS])~/XU] n=wꐼ/]W?]sgl^cK=ΧN{oslHq2&'e[l7Mx9f\} ! !B!s% ǃ~_ooFƷGp+==!1aμ;3Sޘ;>w,oB#FLJ+g +5k,g.nD9_}?ϑ=;U~h޴ Q|;|F}/B!B!e &LtL E }K5@xi?ĊXv G _sѷ@g?ٳgc԰(\k|ٲ8~\kmܰ *~G~|;|oL=re>ys(Rw]#N>cGʙQÿR.XȘӬI2+W-ɞ-kV,ʰt B7˓Jsְ1/3gYo}{?tZܹr5@٫79|3cW!u2B!*@˶PJ1~L:rYP50Ȩ(k ~eKə3CߏF4>֍[ᷘ5g.}ڋ][62d`{#kyCB3oNNNt|Sق)}ܭ!]=?KUwoKY:U*Wt~Ⱥ1Bؾc'aapqvamk!ks2닧Ggu4j:kW,Owmb.ťf˞vw`D$o lپ֭Z=rй+7_|yf6m/b7w.^iGYrE?/Yz<7hPnۮRt}cbb/f˚> ~<~m}5EgxWOٹy#FDox|U|ַ_{ ///>At):we >kPf ʖ) ?n[gվ l6>sWÏ{s4{?+R0kiOKhجiڸKڍ,[vuZ<==廬]+W潖*XwZ`e %;oӞf%عkwM&uǾʖ.-l೏?ѣ۰>ͦ54MQ !B!UF>&M9նm;v(_@mĤ$XH6jt]1gT{S?Pl.maa?P5hܡgY>nXo)_@ըy _P?ik֭Sj^vOHPPHeX,KUl6+_@e6է}>W-۴U&!Hhh>f'NJm.?P١fߏwv]ɡ[榛K IDAT2̏~I97"""|_+_+{s8 jE*&&F)ǕڴejT~@kk㞽>P~PvQjCq㕯p=m?Za[|G*_@7ҏ'Ϝu}J?z?Pݸq;諡in۽wT:1R7nߔ_@UuTTߎxWUW7P;tTjɲ-E2W*_@u^nR._o;vRH:?P=^_~T;wuH?KU1a=B!HDd$?MB'g'?;vDN:5:smA[ŸɠS[oڂQV.::Nr'?y<O d#tl@Z7kFcFЕrȟ#{v""#9u49CP`9q Dl+9wci;ߍ7J쳞uFm8"_Ȩ/Yf}hiŷ?7o YK`歸Rti}aƗcyK|Ȧ[yY}|vkm*o>uy?Zn=J)bQ/\`DFFq)T͛=s}dɗXz ͜ŨX|Z@s sߛ4l똒[$B!B!xeX Bu&Ogѧ`]|~wssl6?~)wkɰ2L ˝2=w=?Ms);zX{viFL&u3g6+Tk[fzMy7AۼqȎgggoİc.^)U[ѱ];o؈}~ݳ~`뺮ӂprvN5eSR/gxy^`0i7n|ӮsW{PnxB Ӿ꩷յin"kSNm) zg-\'''og|]yl۱fMuɒ%CSiX)B!B!eYh1;xQ ڃ^^$$&prve^^^L&̞Tr_n6rϜJeڵ~a4evZ^}nnDĠrx":*zqi0[6k֭'6.s3rط|1p0& L]Q֫yVaI#`eN瘏| ]&}63?>2e-'8Ժ0y֬>OԯJ)VYK,\Y~&U æɭKd2萾d /_˗9딾kz7*2*m۱ƌNɛ'/ͱǟ{dJw[reQX~{TLl,JVTĞ}m!)ɔ '::M[Q@~TىUkb5m'QB!B! , Ĵ?3_inի0$%%q1Fݻ݃;l!88խkvիװX,$%%qQ?yɕ#{b`2Xq'Ďƪ5k sպeKOLl,k.L2Q/[xdj׬i4n؀cP&A͍?,pڼ B/صe5d ؉!NZˋ>>s!, IIZ9 >3̛OdTT:л_^{Ԑ%(>^-~e2''g˔tسay4o |;l87oiT ib0(Zcٙ?%13B!B!xe68t7[fVC޽ɒQxqgڴjL5p5nvY?gWYF,X'ZaOݷԯC+WΜ|3ƌ\.O>}|ǟiɧO c4hž6ahWTV50ʕٻ?5^gOwvv_&g6ٲfO4nuiyGhӱC;{]ԮQQ75/K >v8:wux͖mz >w7 ?LFM5Թc{>kj**f6bin(6!t:|=lþߟZf-֬WgUmjohO[7}ؼn k[8}EvMzwE RJ^)&6Z5֧>,_+^^^6mA_x1 /yPz[^ܹrɻ#!B!B2$y X-xNf͞jU:kAL(T`N/I˖5'{li`0<~ѷ˖e u+-m\*x,28.^wiܰ3fC j*ڵ͐ZWXx!&+WydI>ڙ9rY^[}CǷBy7n'ǂR'M`t/WOw=k m'''+W&8$m#rLsB 8, v >>Bo3f/SXh!{:p]$BB}!B!Ȉi !$!!5idɜtB!B!HS`};|2p^筼BnFmʜ3d=!B!B!ēHS`\p~2rB={pSx1j׬KaԸ׻ !/,K!B=d !B!D~B!B!k!B!B!#!B!B!Bp$"B!B!B#@bcc ʖmS l]o JN8o]j TeӖ-rݧmm?5jrϼ/\g>_fΞ۹w8})Bbc꺎bAX$>_@UV}@y}}L4VTT4C'=7odT_NϏzH/~rEA=w^Ȗ5s]fL3[пo26NnFB!36x iZm6F1E4FciV:O>y1^I4MCˀsaη,\JhZOo[tR @FA{t]W-5b1@w!8IԯȞXg( WysH?,f!Oν2c.[Ƃ 8p KvM8s C J*VD5={5Xda}x91S%{loTɀR`0hS6])44v.N (44G>$=󆄄[7Wq9nݢk_k糞7G&ooP-0 ?s8} /(!'_!ӧO3qpqqA4e777}mkaX{&[v)QS |b6ZjU4Mgܞۧ#G2mTf̘f0z={UWghDW #pif̜IxxxV5W3w\ԯ\@)UJo$!&c`$?^GMÔYtECqΜa9.6J̚_oȠxa1ڰATO߾|l޴nݻ?^a0hY͇n0kr?.XQ L[qL.mUL.DŚ *G,h8%,(eDj(]%3g !cy"Flߘf5;To Mx'ˁxy4۰_ HfMȣ]!cF`E<Y}|7j$eJN7of)ZCz:ao?[olشt]F=4߸ Obb"x{yr_޻F$-f3nnnϛߧMݞVDEG/J)4Rn}bbb쿗)]YMK3s A)(ZS~fޓNA`uPƤǮ]5ơn `_puu}~{I Pa4؉'OeZZwHhh& gg'F|-k݇J/΄iд9~GGfϙ@IHHd$&&*oO>ѣ}'K-HwmaC+RߦʻWVMݝzjL;ڱ};f͙˂Ec'N[jU7igæ͌8[)VX1~|+R!xv͜9s0p ŊŒ<|LXj?OL"E) [=mJ-MӬӥ$Oef`06+wnj֬iO7aۇ`@u{=ߔu4V^;#J)2gLƍ9}ꔽ,f~-,6(N&قSW._[;ﵝ[ڶ n޼qH>?'MXb~3R,Ь;B4>vwm g\=-lقd^zL&{^zꫯҰQ#{Kf>hK0:(eHQ{;ZMf]2741Jn'шR [@Y5CRXLI8OqvuiьF Lg=nhi/tt].b:`ȂA3:tuūVǜdxzޟwOf{z#J;ջ7իWw諶6on܊J`kLXt.J1TW;׺:BWލ۷l>՛][6߬~97GJ?$-җ|F\\<̝Ki*2eʔ*'9~$ TDŽ6n-9sݴe oނw;-,**QRE8~$+V^fl\!qٺ};ŋYF8;pu֧1=ϞpuuqJ)o1wBve_`D'@lm/@d2ѶSgRѢgMt[IIxEs /Mرk}`HصJF )U$aaa_{QF-ȟ/6ld8Q_vX,t 9v8{;߾zf)\7Oݳo9gP4jX?eيy廎oٻ?ʖku0\r[[<`m0r #F ̜9bŊmt)lvwOrעHK8oKǎVCK:(sHJJbʏkJ0-DT%o|[rv>>Yo, +7np;=4Ӧb0mTw[a&"""ɒ%`WlY1moa6٘L&&O@@SPl|~Oح[(V(1cT[0qx{yӿX7p0l]Ρ=5U}}Mh֭p{OS:3~JW^䍷XJ|f_Ã-:ԥLJ<3s !*m  c?~J+ۿQo <ܾ}ݻwGٲeyq7oۛ$UK/֭[r*̙3 \.^sګi{ٳxzyQD J.ɓ'9~8P>>J_Ŋɓ\zui$֮]K||%5:~`1(۠ Q!A\ؿKR9' H>eIÔ+둭`;M.Cl-.:@[xeA!*gd %łΜm\69Y IЙӔk 90'&r)"97&3V,c$FI)+V;1xzzҰaC_ݸ ˓/u@c:tYN_&667nlfSN1vXv-(P5j԰_J)a8}^;5 sf43Vĥ۔-!`;nlǃ 'WVwɍ[6R|DplNF|qЈxV35!G7£{eݜЀ_"WtuEIdFFw˱M.?IW QTN{`ЈIb`B#J@dv׵n}2zX,{c(JB"Y B#ڵqx3f֩t2mϡ r$_޼9z,=?AϏz<оXJԪyӄV:k;0#{v~& JQ!ay[4m̔_AU@@*T+ɏHDFEOZ*cF j*/ޯO/>q[Cz^gp"}ZoHRq~)9'K/S!K4_;d@tt^J~{~g{6=o>͚Xdj`;w&8$\9s:\s/~fTG劕V5" ;͛X,:whDI2Lo۾gggJ%T|C}97o]8W^bЭKTuB|3 `}6XfX4p {/e˖uk2zh*W}8{,SM~ű}6,Yl7mǩ]6ww4nGD0{l6oLÆ Zł Xj{bJL0WWWΟ?OL5z47N5l\>v_M#((777-ZDLL 9r@,7˗-c޼yr*Tiȟ?T\{o>Ǔ?~.]D\駟(VXO =J~8y5P 'M`DEE1ct8@\l,UUSD}tVZܹsQJʮ;ٵsu`fM7 f͚EJM_9铬;+Sr 2Т[, 6 ֭?1'$DFyw`Ńr?$Fٝ[o7'#!!|bkb䩬vϝ__':$wbɚHF-4͠1,Z[wf0pq*V[/<}߻Dthli<ɄG7nClŋRti>, NNN9r͛3bH~gY7~bt# _`0zjf3uֵ@)Ϝ>̈́ ҥKdɒ&PVҕ s#9u%D?/=0y2(ڂ ǿ#;!=ه4sg=|,SET(v\"":5c#KnCgX"Aq}#rUg] (y;۬w6jXtF|:^@ɠuLً+EdJp C~Ni^}j,Acs|= D){yzao_]BUxxzX111?Pm޺-6_@ҥi?P8聎48?PmI m*WTjULl}a\M'dR;tRiZ6k̛jkZÞKJu{vTm:vzm;uIs[jՕCR*2*J77oZk?P8xP=I!-vܕ5;tTd2*7$4Ժ]m۱'_b/spHRQQj?Pmش9k~>Cլ[ߡCN9|S|ҫٻoLz Y[5jԨ}/^|?ӎ;| n}Bd=gf뺲X,J)}pwW>PY,͛6\9s'*Z~ɒEu~l6WUSU R-RN;lY3Od{o+ZCzddTzT)տRjDj`txMZ* ڏ뺲J);<{.@BME )U@BQkAP@A(H.EAzDj~2?n%<<ٛݝݻٙzgOU6*J sJ)f͚!C2SJ+W꩖-Zy9uꔪSܹsyp^S^&Z:vڳg^ UZ2٣V5WWzUjTP`Z0#3gHU|y5feTFFǼwUAAjgRѕ*qK{aF/lեKUV-uTTd5sRJ\Ro 3//O)믿+UR}~Zǫ+??n:}]:wVM7VIII͛7+oo5ӹժ*"<\ ~M;~Zjܨ7nJKMUJ)fUtiճgBUɓ'[n]穧Tjj^77;31w4Q[ƫ}{ջ횫>߶ovKkUʨ w9΋c޽ԏK G3q]jMԓ^Cڶ+Ûݢމ[YhUԆSTҙ8u5q EP *'=M9O6_m6xEe&'*춭=K&ԌWgSq*n5K[LQ#t`JۈZ6qmxw;J0wǒ)Z1rz[U^nHJTY)JqN]i*0 @}J)μ6L*U_RgΜQS={t!]*/szƞ4q Qx@ݻW)?+UR;uҷQ\MZx@?q]HRZ[2m(es}r&Uu>l)elU;WOԌe׾٪ڿJv.&]mn~,ӱˎ8?3_.fO.V,QvMΫNC+O_LWQ NYW~]g_O]KQawR/Nٮ/{~6E"7M1-!?͆w ʒeiیA z Z4 8|M7bɬ\5+En^lZ"T)~ڷ_^bp[LMM΁| Bl`ZW`v>u$?g=Um&W~Ō(n n7C9\tV+Ghӱ{ ס>`ӷش~MXX~C\|52")9w][Vv \)뜒 atߛ,=B!M(V ,`„ iӆv` --AoAZxC)EhX.)1,N8-B=4 ?Ffv;Vg@h5> l64iݺqqN<7kbl6_ r]ƃ>H9g]R^=v喟oǏꫯI^^+W履ZGgϞ4mڔ5jEpH9rDOb!+3xCҥyꩧrcts@@~L>>> =N f4IMMe…jݚF^^u^Xz5GڜqAf^VWGlf\t?~FTBŊQ |YpW9Ͽ/.\jjTTfZ:uꤧ4 GM)t||}l , AAAMl:H3?95^| ̂V^&$Q5jup| Wy e" /8 4CC  찚ͬ6U/Q^C*T aS9>qo2lܕNȪ1c9v~5'M'<ChrP||  ?$]eǝ;{Xrr2OSN//Pzu*WmV^W]u8+~ҦM7nfnS:t .x|vnpX-mP6ܟ ˱x"?u̼aWJ/s39FMDĻm:AoUHx]Uʄa92ȳ*ڦ@Ն@o \Njud)<LF9y2|{],V;;til6;w/kU,LJ6%M! ado֮-JR>F}:sfo fO>lA[~=M{ͨPQkUTf\|u,??L&y?LfZhI8'.-"_@ ~zuoy/uCu5>e+w9~x L8_DD;j`n15B?x>iYKg}YڵlvJX9s8mƇrB!nBiv7$::I'7ںW\Cٳ'~~~ff͚[Ǐs%6mJ1f3?Oߩ5 2f+W&lYj^ٲe,ЃZ܎Jժ $-ERJQ99htt4>u QF|5o_͛|lQF BBBX~=SN-om\qynqNO?xZ59p{\|ʹM۶XV٣vZz!u6mܹs4 .p):voUGi|V ;Ip =μC 5k"((3i$=-㹱* n5 -ALz=E)TNL㦜ڳk٭Ζ*P] a*Wʫv۶m֎]җX Ԕl 4ʮǠ{\}! {}㶞v`ʮꊫ̔㴘UРAvMRR~- z `QpA{ M&z۶"UE~/8{ x{Uكm2tBrB/Oɦ}(1[9Br{yAC9kIҽD꽦_Fͮ/{ e)Q!~,v;wbE> Vktݵ_@PP5]}3!8(֭Zrt zdߎQcߣQl ~sGyyع{7-&8(UX9֭KLL vL|</^NiSزe *U"2*m p}y_Q] hXbM6=i\p 6ۼ9ʕmoP\;"Hbo/2y]η;7y*RQڑ` ]A5gG^LRuǼ]7jWd` QբmRSmmV|1f"; +:4UGF#͚5#11QPWU˖ԫWիav;:yZw4Ǯθvjg; izgPro?{9#y$nǗ_neo~Z(my火i }]qK;xc[n*UHn8-&77Cݰ9o"rf)>yS>O?S(}Td$Le”nkլu-{wHSض}W_n0XQ .q}{Ŷ޷1bBw^:uhצ oL*M䶍ƍҺUKmzyyه4wy~lڲM[\Qd4z7t~5mJܵp>q?׫Kog֭ȅɈco(XJj*'OVzvCvxy`ev^wޛ8cWq[6cz}u~Ǻos+N:9v=ۺwX9ʣO>vێӂ#guo?~ mNO r IDATbi<ާ/׮eub&"B!ĿR @FF_ycǎ1yڴm7ܹҀcdĉlj8~x=zQr l6S^?"]g: |||?aժUlv8*S{0kFjj*}ݻbɄGDܳiRwwI`4:A0ckpFѪU+,YBJJ {~9NJ%**㏳i&b71Sf9#`Q㞻q$$$Pzuƌe ŊvFUVz#PԿ~+FQ+r-*(߻M`4b˃Zm;PI3&o|t|~tF FJveUD>s kߞqtԉ~K.L:sΑJnn>uy `4R3:t$lv;v~>FՈϱnp7ܒWfWX1ͮ*`4hz Fǽ6Wk`(5/&7@LL4c O3M ZK-Jb" )bW^z[oeHts}0IOKSzg11ECLNuP|吗P^]ʗ/-Z5jjY=݇H?v gϞcܹ:|pz۷#?`X]C|@=]s_%Q6*i&;obRTV}z?mM ƳyV,[Nrr n1~i|ݷKOvlqq>yo+IIMޭb~yft499,I YRI"9Grr211ѼE@m߶ MptoU&;Z}=?trΞ=GժUkn[9:rLOJ5zԪQc?cŪoHKOQ{=Jc33vͲ+9w<իWc;#&:3EÄBs56kn^ :@To+ᯄ bbgrT7m"ߟh㨸}:vBXX]t]wqYL&Sv)mzVSyUkPs%) i%\qkll,c?ƍMxD~;۷o$^B|o?BZeZ: }bY6+WD3svTV-NIiF# /'S%9$'<2&_z^V<9f0p1yo Ƴs@{٩ơ`<{ $vq>FTJhDDWc*NF~Y'khϗm +_6\GZnv ĵtՏ:uoڵk sv3m4߸̬,7iP,8-_}.*Ouͮ05V',Л鯴wkwv l;p?gxIm!j45Ve]|74(@p.%gm'QWc,B4M)nwyˁ(ۡrt2if*pnS#3BJfuЩds)_:Ġ]$?zP!nFtZҡ};:xh=iVǗh}7}wo޺ ʀW_涶iheYŊx7J|N6Oo޴۵}%FoxM5I%Jýx^L׮MڵiSm>yC{^{ XkBYr[ݾy3B*Wįq)5-s__??:ЩcG:uX-bcݦbҪ]K7Bq>m| > C:~jML87b4f׮]K8q>|jj*GK.gϝϏ5 rcoddc;xb0<6bv6m7n$yssi߾=>>>/⫸:r^E_Ӵ"ӸFtF-j[5k֤f͚ݻٌ}?DPPM\Ӑ]jݚP,Y9/f~OvܙUVdb}|_[=t9fp4[3;:-%4UK%H~5Gg*xKG|L4]dp 1 =v@J%i`2 T%x /a]`W?כOѬNiQj;F\scgRr)%.!yzVro9rzUk4h;AǟxW/7n|BO'w4WYSvk^͑V-)8!Cٳw/f߼ӦLjU]b`XY) !57W3zh6mʨѣ"77|ݻwӷo_f~yvn[f3wٳYt)}RSS9}>S||<׵jժngڵ3#F*/W㬦iUN:#qԞ;w.˖-瞣B z'N`49}޸wixiР+V "<ʕ+ۋmޜJ*ӢE wN\pqO:i233@e c5BߨqOc4jw 4ŋjl>\1@^|EN:EIZlIVkWmvf#l [g~ܗaʥ.>ދ帣sm4`Xr$~;bJN"=;xĸV*Mo'8;,ݻprNBý9vӜq?!/'sf&'vmr3թo&6ʗzVl=Uc׬0sq cL,?3&[ִ6;~/z1G& " =Lb•lx:f;\zj,;snDfs:-[revA(͆7]veݺu$''9\zaZfZ$^;{ q)8b|1*]R=z&Phh0 z|Lm8ARz.K̥LRҡ8evs\::Sw6.%gci2qԋIYo2IV Ŏ])r-,t@0?؂@?/}?][FӾq|/O%c4j_䍏Vt(X]+[ o}Ykq\:+ĘM\JO}v]׽B[h ,,_tٙ'MUr6?BӖ âPB) !Bgp#KX"f1[48{, .mvH/ffƇ)sر#Æh4b09 о}{, ^^^l޴^{ esO׮넄5[VƍeHMME)Edfd0 **~Ѭ'M#22ݻ£] +^xgС\| =<}!''=zpbbb[t L<+V4`ԨQ/_MXhSLTPiӦh">s eƍ?~=>2ǵ~DD_ΙC*U MOOHKMŮ5j`W_QTBqvOFٌdby"%%wyMONv6hjբq& 07SNe!,^Ν;cΝ{9/_oqz#'x9y$6RJQfM^u;|g$$$`)Ut믿d>bƌQre ĢEXf ـ襗__~aΗ_bv6jWVcߟQB=j< ۷;eNTp6 cYToњ#Q~CV+~eB֎mSzi7iaoe?K ĮF|Cgt&qS2MSrFcRn-)|Mc Of(GnԧsYz,bUi<7_Į ~:tn448Mˁ+LJT[RF3ϞCϱ#kP=n烇rl&mM[yp_~Κ I9w7ekf˽{繽ukbccj&4D嘹(Lfmux _o9oA+h4cA)~Yv 1QAXv Myr1)Q_c+ʵRBCoDJXmo2l*#΃  Zgf0L߾ 4gyi?ir⯐ƞ{9~6*U*ӡm[o !BBnbhGn|d2a2 ӧQq5e䐞!!!~eggStB b +rQJ1+rrrnS4e`s6֖F BLJJ VH233IKM9ek_(HJJfk6LXX[MVVzͩ\eIVVaaax{{EZz[ uBRR5Mb!**47KP``&LlvEՎ%%%R>!={;@B!٣`#np[u,Xm pNOeG#ywϘߵsѤqcՋSB{a|lݶ5j$L<Ĕ/Wqw+ IJ?(BˋZ^*Ghs%c"@T]Xfp*h68-6l6epv8:) &&Gj\j˵}M@=rW޼L ݮ@s~cJ/Y X^s< G9{;;>l6fbSZ8"{Bҵ+B!(r |0xhX:7+^q PkQ]ŕIQ{H !B!?snuxhvC6+J$M B!g_'B!B!iLcQB$s !B!B!# B!B!B!HB!B!B!n:"B!B!Bt!B!B!# B!B!B!HB!B!B!n:"B!B!Bt!B!B!# B!B!B!HB!B!B!n:"B!B!Bt!B!B!# B!B!B!HB!B!B!n:"B!B!Bt!B!B!# B!B!B!HB!B!B!n:?b#b[oN?Dճ'\{i AAAԨVf5f^Vys!B!B!7N;Z6/KnvN_OzFFIKO/2m[oeiM9v?S.]V=ǤӪ]7oI9{_[^k֭Ql }*(M_7Go֮;3gQⴳS~Ĭ/evl6|~9b;YpBB95qㆄ'( MlG(]5j3U5i2hjMjA)B!"g}9+V(CSo3ƌOvv5&%'3vrrr٤KA܀wvsǎŦ5=f}9q'*'9_[YO;;~tl׎wvRŊsj%d6iۂ o÷=r̻rFbo_2mƇ0weۥVܜv;~&88oo[Ѷ- U jDDDS""\<̄2eiUS-|& `0h>|Yc]lNG)LBSJȴBw)nbk}ӕޮcڷ{矒V;}ILۉxLXV̝K/{3Im/[ %"$$D [H^^ .9/+Wѳ{}i|<}ݷ>/~EbR#-'^t\:s~Ezz>><-4mZb(S1xiDGWWFӂψ/CHOW^#>z Bق4&@B}vʹBOd"9]x?'0 ش/\` \#6G#--}@֬_]R% ?p{cFǷ7TJvmY2+Ё6~/oR HFF?i߾IXX- R̛#FH25_ݞNnFlkʥKAJbd$BONNW\ٳ\pA.+Ѩa0htN>˗Ixl6?W,1hPvEn@Xh(ǎ~z;#ƌ囵Z2nhnkDk֭gͺ#]:{Lun}>:w 1);ԉ=g~RaaL.r~n,V͚%:񓧠iJ0c3"ƾ[IKOnWÜa0?{ksOpP,_Z(;xlPbf} ~zmڑ{uj1<K;hԠ}4cc\IRPJO1#G\nח1exl{'[Lt4>oooug̝@)iL <+W<6mqzC>'~ZWZL&&CV-9xJsos/_e˱Y q1βE v&ONrR~kTO>v{yؾ1١=1푣G?pIII(kdz!1)I?o"^~=>VfM>":yt&OpW/8x[:oC<].n |Y._v\{#jת}WmݶW ,,_m_d!$%%vW; oն=99Y>>e}LJ>Jו߂b؈Q q}*" CXVB>eujvKcyɧ?sF>,Sޥf꿫xşLL8޸OrY}9w<+tވT\ڼ<4=wB[Asfz? b ;nC7ƽ1{˶m8???zj,XGNpp,Gnn.~ȃݻQ\y~M[[o3vdѱgēvY8Asrɡwg1<ԣ;ʖuٸi3CèÊ̓R-YY_'[7*U5kyb゚RŊtt' /a7kv}ni yдq#q@\|l-u~෇@Zժ\x%KqaLOgã:?YO6@8w(^\B -T uJ оB{^(N x]?v@@I2w93g3sν4y4d2#0 hg/!з\uw֮?|'=NLTW]cU=wwqZcb;/?/\`gŪU$&ݝKC#JyWP9zujڤȟy0 oLJ%+Vl*jF=xJW_Avy`zEqtALsfxwQ=M>Bƍ1j4׮piѶ=ލ*z2V'1ppvlڀJzv~~ٽ;/^$1i G`Ǧd2q 7oӎzx p10d;7o`6b^DVW t6j|D7jH-h8{y:oݻٹk7 h߶ bc =w7n8뜹Li׏в!pt:vxNI+W(~(^6 RFV녿E ˻XT`h4t+ . (F#eʔgϞ(,ӳ?8^h #""F] .<E\|Pe+Wڥ=ק deg߳m;vq u:Ь« !.>Ahӱ`2ˇ'MM32yvvoj7h(|xMn mg|q-'~! zvB]&4k!',S u6?ʡOz}erZ~~8/VOΞ;g޶S!.>A8y_ q ™gUO ٺ  MZ~ +>.إEݠPXXN털('>\x+%u XO.]d?(mX(hiY!.>AxQdlo5kJ|&y8&m1qBFB8fwB ˗LOoP4Z$'}r>7  iiW޸yKO&]z|fBFMNW*Z4 lشG!9e PPP`^^v\44m.CoQQP'кcgh4>3ko_-ًn4i6bK!nPHCz!Y =Pv>0o.;OҥBxx9AZ*?p6AL&c( ZArr O.]Z+ܺ O?}+rG.µk tYAպ\p;=Err0n8A5KϤe _\.ZsJhfo-{?Ί+ۭ. Oҹ4j]y7sb)*}Z~pQ4&N|;YO4v lHq[#9g7.u)iR/2p;ʾˬ?虳xsh-ʇf5a0X;+MƎo)?LƼ^g8sse^}E9<|=@:9mZA_"ŕܸyyme+!#-4Tld+HhD"qPPU\LCQ4XfpMNʭ7c<F12ɱ(ӛٳ4JH;`bڷvi4JHoo[eOoV6־L<=<Vdn_ۘ9yyGL|||BN+ࠠz̢Lp m]e] m[xםq/UiP*,.^kMO}o2| 0kٱk'lIұ>UדdBՊdmadddPL1&3i^;wqf8]ʰ*ԯԍ"rܼ<) ** t׷pb_kWSDh/8p7 X h(d΍ QB͓ӧ~V TTQLj1LvkT*E"إ;?>.&˾Rdti7[ſu#J A&/vɣ:AFQQ yv= nnnhRŃp}n[`gxq0^6Få4^6kׯS.4§̬,"#"ͨSՀa J2zUJ%|3RYɛ'u+yS~y˛>>wƏ|jn*B1mjʔdHF&O3ݱ[+3~\8~G7u:_}v7f'NbXnyW +;Ӏ[f}f4B[TX!f#M`\pѻwg_D"c?[:GD^2d2%2 ^'33 II˖!z#k%O#=z 4z<v Bז J /2_=z=ٸ73jQ*慅T*i3 NxD"g+utd~/dh4\.G.ޓ`q7|0$sOie~ .<] IDAT{և}1-+[ >% wwwXݕ)@-/(LJ ?Ozg(ښ4k:l[8g:ݩ^7ER,Oz(BpPo;_fa.TO3?VM 탁/^!o5SK )nooo$ g|p93| zusP" +X cojU`jUf|}}FrW[n߹ƾ&**1ȱ1J0L~VVu]|J*VHqȻr͐VY8eIw+TЁUND0'&%!Jq1lT*+jK\zh|k5T"E:1jz:=7lps~7 ?__6n inڼ+h:;Bg,<2`!]`в\q-( J+nPJ14mڄ};_^{nC"BG%n`ȑ]\3н{u$ rYDyuֱj*֮]ҥK9q۶mcŊ$&&D`t~12ey-Ż7Fefel29֗ ύ[OKn^L  }CSSS>}:6mbĉoN$2z*gf̘1ݻަG׭[7ocƌa-4Z5֮]+ސ}rmѣ۷?lˁgϲl2֬YCRR$&&yf+A૯bՏ]:7 d/_Nbb"Wf͚5^D/_NRRYYY޽O>ɓ'B g37_oXAR@c2Q}k+==3f6nD头ĉYjf}˗Yt)ǎ;=?t?s 0L\LMewH$ԮgW_cdegc0p$ 4 :#G7߲?hjv7;jw7w߸ ^O~A yb 4{j7hHC9-XzudpRԯϼSш^gæML)Aw -XDvN_};2wOuzyyҴi.]%#3HfV.ݠ=N 0}L&wbŨa.L&vEKmgQB/\? ++Km\x)=nnn^۷3>6jXwt |||MOv(ϟA" ;vbD|}| {z=+6%o3r9]:vQB[o ;b]O^t:>?F#leMy{vnc4ٶ}Rgw:lYjլɡ#G/D~fV6[6GP`uFe܄`0uvoD@@]vDaa!?Lm֭lٶ |(= ~]4&222?IKdBqQF}Yǯs޽L& x7ahHOODZ-w:D\V&&;'^,d2 '7ѤR)rYs8k֥04JHo^L&]nGob[ٖVwhU*W"22 >c؜ݔRx(**sxVN~:[5oLj$]ˁCDcիU%|y&3x!\pX5ǿEUjdf?p~, "{фD"O? JoJUw R j+ (=ʼyd 111c"##N|Fo֔XuҬ}``29]7 &I,>1Ҭӛ+Q `bxʼֽEzwY& aAQ@gzW&FfזO`18knkϝ;OOOdz>+°*J3۾-omkf WREWȪfĈr@-ZvIϭkڵkٳg}gǏ_e۶mNK&Fcf0҂dѢE=z|}}t_|;vp0u>(J[geq,8+^ʑ3meF4N=h=Y|85 vgD  ̙C^^QQQ( ͥK.ԬY~0}SRXw"H0F< 4.j]e2e4LfM6 d_{|Wt.RdEqнsgXN{iT:ufޢEtلth._֭xoGнkshZ;y3L`F3W -ˤgSnTJ%x^}m;vm1]&|Wڳݻv̷wuayk֠Y&l۾-[G&`WF|z4JH`ןҴUP0kƏO/¦-[d ^nmNۺM˖JJm20}wb hP>{T4<4i܈zugl<(խ+o6w՘ѣfdxBAfViiiՌuzE]6sXE8600'MI6Lhd kY|wBڹx~lش eCkvyodg԰mޤ qjyV6bfl޺[ͧvrBA8|([wl7]Ν;wѭuʌ_˚xU }_^Οs}29m5Mޝ6c&lnXXwSN'C^}ح`4F U`!,7D2n_:bBbg޻v.hM>}FsfO/~B0 X&LƺukI= TB* ;Tο1ND"RJԪUk׮ѲeK1/_ڵkFE~ޱx-ٶNƑ?sz`DYRn7X~[=V5JQsa4T ML&S塸;!g4LwӷQ)wߓ ca<ȔH$h4222h'iӦHR R>öOޱcǸr劝JJeT*Ϫx>Y)J;-e;J.ۙ L&Cղo>Zn]pׯӧ)((ã--=qqqL2{)!!uT*???ZjEJJ ʕf͚ 6mJrr2gח5jU\.)Dd$^#qV:>g|Ǣ\4&oi*hyO..%jDGGӣGd2jUVQjUjԨA5thmiv&;%;m-K3__ `)YY|2dHR4&ҨhJ^-y4sqU1`{޽!4lڟ1QD;"&*{z5|*22ͧKrU5֨Uw:K 4i?$7/ZѧWi,2ՌopV3Y󾾶TD%L9˗Ҩ\/JppӶIrsiک#϶htaO?raX˗wK3: 6lJ)Ç N8V&vMb˶m,_HNNի/V;י3w VJZQQQh;l1~ԭSkֈ?LaӖ-LL"j֨Ϋ/>(i?qfVNΝ\j՗^,OtTTPNn)Y*777^1L,S|pDGƫAѼI2fL.\Ȝy9.LDE=Κk֐&J/Z*ܵb}8翿6%P'/SWvͪ5\~ʕ+(e+Va9j?f2֥_@q8|XJhrdttwr3琘D RNF 2mg1,^¦-[sLú[_gs<==iߦ5;v|hG.].?@*ѯW/4nQ V.1+gd0~I`#:Eѽ{ $rW=L_҉R)7n`F˖-b/--7G@@ڵ#008`V.̕;ˠ6Mz]dM_qWՈ*$ J|+iˎs;rAڽ= vVzDzʓۛFG 05t;:z?8bՒI΢0rCWN fst"=U"xkU=M9˷՛xyTLZ@r uJ:ͩlj?#:UK @M<_=QB ?W _7 6?|0wF&qA ]vH$Μ9ݻ>nҤ Xn=zͰaضmˋjժ4߸q#fӧM픢;v`׮]x{{Ӻuk*T *% ٤p-j5Z"***A@Tr6l؀L&QFԯ_N\PP@JJ W^ERѼys*W,>/,,$%%,>___^}6;wd4k̡ lJOO';;v1}t.\@ll222M6qM֯_OAAi///6l#GO>vZNͶmh4+Wvډ'V(J|||h޼9{ի( :t@jj*|tԉדJPP۷lٲvm{9lBQQ!!!oooo HLLt֍P.\HQQ+V$&&;vOƍiРEEElذK.P(hҤvc֭ҫW//9T*~4jHTzK$t:(nnn4k֌hy~~>ׯ/QV`7IxjgA&MV*R6,쀀 d׏Fn]۷/ WqF233VZDػw/z *ЪU+J%ZLuL&#%%"""h۶-~~~c򻪷7Koޤ aHHM||8d"H0ZߕV,uAlnnȋ`bŊh®A@RL&aÆ;=6mۘL&iڴ)J\VZŵkרP=z@ղ|r._Lhh( Bղd݋SL!::.]uU'>>&M… 9wQfMZhJB"r.8!;.<ddfҦcg~==rS ,}+Tg޵R7c1 .ũHx[7U `V['Tgs`y6O̰ԷcZ|00nbN&>sɋwfwGS5/uiAt٭o]nM"y&{N6|׉Ki2qG:XSeVԭlouv>JbxtLٷVӣiG5ZF;=ix)**⮒wFL~= 0zܘ᳘ ?L!_MDMjx?Ǧ]xv9]&==I&ѴiSZj` (('N0sLwN͚59|0TX֭[Yr% 4Ã~UƘ1c~HNN/&55CFdd$999L0PjԨAXXH h4|ңG3f ʕs*GZ> X".\ ))ӤI'y9}4k֬7ߤ%ٳ|2G믿^ztڕN8ٳԩիW bOpݺuһwo}bbb1b@J̛7SN1vX&OLժUիw}իWUV|W[oXp!۷o/$((V˜9s8x _h42zhj֬СCE7>s~ƍ'3}6 BP0| #G?%%~m=… O*>>lڴJe,77J%QQQvT*QQQ(RL $77<4СCՋBCCӧNѣb[oݺˆjժ $;7p֮]3ŋ+WBNCVR|reJ$nsf. yzV]O5§d;%z \Yx 3b|-d2xL6VgDj6mÐI%h xɩW9ωnh8seJNoK;v!`?wUrL&o5B&b0̧mK$pl*x 7nr:NX"5"ML9{o+k׮`JpoNaa!jAtT^]Wfͻ#˩kA~ ]k[O* ;nnnvϟ?/iZ-* Jh$,, D"Çs?BBBt(J8wH_Ŋٺu+ɴlR).k׮+*W9{,u߭X")))^6mڠRēiiijѕU^Jv8s]WV:e2aK.!D@pp0;wXrrr1e˖eر*uq6m*ʐ7 pU&ԧO~>SڵkGllxċJhX3 @jjyLj4\|uYbHt:EReʔm[^xJEٲe( ٿ(O־4@zzX֭[j:66ZjA2ed5AbLa$' @t3UX.)o4!00PO???1B 33۷oӾ]WrMZqz"##Q*v[ƕDʑ"mIh', i [3|f{p:=111 ./`sJ2KF*<CMQ= . . f#xѸq?Iʎ5k2_(xxxr  e˺\[rLet 4~iӦQvm "k1\tшT*E&S$xo|f@7"I ugK l[=jp=I b{*SlD8BJW +`00@ƀR9Zbht<ͧj-ܕ̊;:ܕuTǀ`n%@B.E!+_J\Fv֦}-&WY )6F#:Ծ;NɧP(X V|t۲F_\jsSL999SvR tss`0`0b\0}t\E%bgѢEٳCE"pQ Xp!zT'3vܙ\-[޽{2dJ.0/**B&Э* PE$Nȑ#\|Y///VcFBBsε_hU(~'mk2̎G9Abi/7T*GX$5 R[_@[-ֶF PIH>j pd21,4=Y qk.\֭[EȰ1c{\IsOaa6V>wk;~JpUUAo؎{\x'ot5 .mZr5 . .Bd29EE9̞ }DPT*pb.[\fH$rOYt)~~~ 8PTEEE1x`JrQ^\Icu5 MFdj qyIpFtkSv1jn6LDJ d>@ 5rXvjcTb66httMlv4:JLT8)e65 V-}nr FFe$`0P?RӿK TB@ۥt: _m߽9E#FiU+5Ul5M߾}EHQ%''___fՋ/Hz3gnnnr O;Z˸|2[4Jd3Y@`mo$E\.d2QfM:v(S(vR\Njj*tؑٳg3n80 ƊhEUFSTvlo,k|6`AJRT֗FF4 *ȋ3Lv{wߓF4iBz((({JJFy TI1TX䶉?-M%?222]6}͍T5t: eW791|I1loxvΝ %''www1Zv1$8u9;::&D HR!z6Fo0a5s ˷[<[G BtmY7o¾qxxxf^QQY,J///;wիԭ[.]pEe**/\ TV TZ:uJlݻws%޽: $$D`pITR[߫$ ׯ_ɓԪUKtU|y X'@AAEEE/_OOO}APPٸBOk4֭[G߿?rXXXHFFׅܹsGt_NppXu>$9uPkXXxsRi4rR֭[b=VBRiZTTDff]3'[Gk\j5Rƶ K_* -|}9ߠ˔1 rV3J &3Xz*4iD]@͛s?m\L&9RV uV,: Ҹqc^W F . . . > $`]V؞.$y Eo #66VX!*/^ȡCVXcâ88~G/v{ Ql؍{%1Ɵ%QcEPD@PTlX;Jq-wyĝ}wfyw+W;vG!002 O<ݻR_\>QJʶ&8q14w''?̀m55=Z\e{"#[[ mUY ˔,ؑy +,ҹ6NE?ű(jF=)2!-K}}xG<ƭT\D}3au,)V\1ϱM&lU%[JG BJMЭe5Ա#7! Jf޲vNYyr&>ͅB`ߴ;vc|$&&pssc%)+++ %%<( p/&tcVၜbHHH`ɮkSx%"""RpDDDe˖e-`nn"##8|0bcc6O:۷o^xkkkfQP"!1Qc09ZZZܜ AAAy&˗#/˅7R),JRrR)d2+OV_}?۷ozmԨ~)XXX 쌰0ѢP(UVEW^!;;Ǐg͛798LdddÃMسg,XGC J7h]k{ڐgZ}*JF&&&h۶-3 qQ-Bke!Bf&6?p6=(؝R(R*Ѷ\9;:D)XZ[[!>>J];@ @RRáT*q];w-[d͛7 =4h]:ucܺu rfZXXٳgy>;wkCI$fctt4L__ߏbKJi͟?eLP( BP( ڗ&z}u+*UE˖ WDb OO+-- VVVFwŋqm9ֶRE`n"@p)vCĭW怆5l3ppaq >.kq)2n5+4e '`Cp8nu~Fy+ jY©nyx\p7^t3L!Fv B4+ژ5ǿq'1 0=u#6! YnU>B"rlL1*￁XCjj mRX?~Rx)amm {{{޽{vڵkN:n߾ ///j~o޼3n}]Bxx5233T$BLk׮1 HPV-\z!!!z*j֬ V|FVV#xzzƍhٲ%z̈́` WbccqY\r*T@vW666Bll,RSS1x`TPj"QQQwB!T+W ;;S|>9333s4m+VĥKpm$%%aРAR _Z5\|!!!x!ڴi+ 0vh"==q&) $$$˸|2ׯÇܜ}Fp]DGG#&&pwwgr]p8ԪU  Bdd$ѺukFE0 Ξ=D֭Qzuرcx!T*!PR%p8ܽ{ǎC^^RSSիC"~> +++twbb"͙]3qqqǫW;S={3gʕ+Q@ajpp0\+lݻ@֭aiiDDDիWHIIA:u p ܺu jImnnDiiiuvb1:7qp@yjE}7ZYYA ܹsF:uРA <qP#11~:Zh7ԯ_111˗SSS9쐑DGGbŊpttDTT^|ի˗Gll,bbbڵk39d\.pdgg#-- ^:D",--8Yj֬dܻw5BڵVB{pNR ujуzع{?LBnJt؁'G-66g߼m;`ؠA;7cdį&u+}rRϞӧ`eeId8C3_:ف]{~] zcע"Æbit!b=1ׯcI1f;UWJzz:|I IDATԿ= /}(\.99 TƖ-PB{XYՅ@`! >>իW/Q8wiWR,Z~ᬛ;"==|>@o.Pb)JgzEDڕX Zͼk!r9r9|>_P\ jb.bm.t6nbbJۿ=htJ [[t\.;?Zb`X0O J J NjBrq\Bj-]hԄ脓"yp x\pS!_pX֡"Jf:A{MBr& WB>WۄgU rСqZM$E uzD"n6!66Dqgh7z},H6cYFuǾ:E BfyEâs B{O[} P؎360z0Tk_Lێ^uQ[^4DŽ)k+x<̐~LMM.gc^M>~[,n1nէVs:ѵݹ(<5 & bL$wD9$sx{>7}wUi)Bd$Rf뚥%֮N͚1n܊Ř og?4]:B"#8ПV .ݰ~*9~oXYYa5hҸ .BdT#֯Y|'spR.`w$'CVp_bk^HIMe+.!?/Xь 6Xz%Ȑ={a©A8u&JfXj%Z6ohX)PTDR2ڡ mμ|J 66Xw «$ݗUh{ Z XfM`_}}\c&زOc^8xzc ;W_slX ͝XmݹwsYF掎kbe8|=o޸m\ j̘3߼􅭭 ֭Zgm s9m{yvtsî={1jƇ;w0ob$eKlXX={[N,kvwSm$[))m\]v֗S+WTC'\i# Rjvv:U՘8m:޻qrXj%iԈiڍ7i Nxc R ssl\MÖaٍ _\۷1obꬣ[{WV+W0yL 8+W!UF?7C&MJ_]68ZzM5Wbb42XcӆzZƘ0>ah԰aƑL.Ǹ#P\9n-ԯ'è(PaJ4l@!#Gӧd4[kTǁʵr: UT?nbHKKclWXOF.BZZ:S=0@]=]d$`֌i9tQ]p4,[kQ;i2n߹[`;ןhX>6{ԱiS :/Sna/_1g V~'|j]-rXC^>'r:\ Ðx9B Uz~^BEe3o>Jx!M|h 8;ə4-H@tqTDUP\EVM"G_p8_? ]<|h:wnݺ1Ne BPl6j šMkWuuEfVyza8*53͚|ݻ#i;Nnn0t8 v`r%˖3ME~~>81&!T+Nݐ0>ڴFkddf⸗7LS>PAه\I7o߆R`/[qAG"#oHq/0 |Q<{ĊUk1sTII0ul\ c%-fC$WhԨRSp0.DF[.LCqb":oV-["-#2Crܿ+U²%k6|!>N_ިQ!s w2Ǝ8ql"1g|l\Z~[;b1~b(ìiSKx=)3f!\(_LQ&b@>Vq>2sYW̌D ‹/ѥSG4wrBrj*}0zD 78Ky:qѮMku1jDx< U*WF coKrRz(gkR燡v+T('2eΏ<.wT w,uqP(ж+Qq 2 S'M@ '<“OQF @}ݻY&x5n‚N‚x%3#>>޾~<}&.0qWZ50T999=~"rI: X]3E3c:?&MsFKשU Ça 9? 42ܻ~<}&.e 9qTlr1f$XYYY3sxեg/dddаA}<{>'1rxD=Ê:ulx;*WA#aN`8\quD֮s:?(4Tڵ3<<P=7W kjU;7yuDKnS) ^tZJ[BP(_0iii$--+/\$.$V\.'.;&ʕJ%i۩ iچ,Y8:ulk73+8:vJŔ~8r%?/XȔGGgrb ڹ S?mlBpg_.dϝ %.Kr\NZkO&M%Ggҽ׏{$.kLJ"m;u!ڶ'JROGgr)*U.ɈsdYYqtv1z~ێمD_*;xqtvavGgrJuӍuSBz*{.Bba6oۮy5VGgaTu4*/qtv!1׮sssImɜy ^مL6Dq=ʕBPQ,|ǦمܼuF~ 2z[ ?Fϯc#qtv!uBӏ8:R߷U;7T*I7م?BH|Bqtv!g!?tȠ ĩ+YoLYم 6Uw]sd=?obM?*;ܶ=du.".E;Ҫm{"BR)ڳqtv!{,x8:ٿ?-\D!:K yj5iߥ[1yĩ+NU*i׹+iݡ#Q輿}| Wm`>Jمۇ8:ǰ{ˋ);mP0vC]HXxDt{ ܵ;/9VLEDN,BD" LXlF|SҮ] 6ܻ#Je>!DV8MP( B|{sW0]yRx<ū7oRŊZ*|P(ѱ7orJR !+Fm]cp>Ƚh{WS.]RP* qvU]q82L}١RŊeAJeLس9sX-4a6||?Y[Y튣Pt8 \nYgM>˖umS`k  m A6Q( BP(R:8f;5lݺ!0(k׮Wfjj: q L3A-uj~/ ddc8yUckk+*LP@Vùe0>0ݘ F'~m{]^J*qPޞu,)ЯJU_cQgR]~8&w!?оzz̙5Cop YMXo$WY X5ZkB`|jO>{2sv233jT^dKhTŶ: -֨QV?Z_ר^-@R(/4FzFFdhP$g&^|Y8}0,CA~ -+/EȨh8 Bnj3@]֭ B3& -=]ueۧPg} UW~?-- BթSPuCx_BzuYڹbcc2E |r  oWPdP ܼph BP( cRjd2.51ׯ#0(0mlxkhURA/42h1wTz072v7##D}!W(uDljSAoS" Pݬ,M]+cT^t2BTEQ'|!7/W%;K+IX$\'W#x^^jx>EL"YNv\&b5Hppi?;;V̱doD"J&iS0}dH2ʳ BS\KNwek77/e2{X~Cim *V2P|4N>_ B&&UAB%B x: BP( SO:wꈜ\|rEP`Os!H0rP| ?_`SʒSP]Kܐ KK!оrrsae{.o999dzhTxAH(D N3"/ÁBz/6)hHOlm`KfpuiX[[(_z5jBb׉4K(Fev6lJk`ii['d2|>'֫[R)EG' ۤ!'Vihִ Tj5~8vk 'PMi J2p8H$b4i-ZZ2f͚HOO{ c%YҊ+4 {䰡Xl)T-ArrGZHNѿ2[ݾ$뇕 _c@?cHع!u<}|XǙ8lHL42$&ꆅoQ"+ Z>L|:8,afVaeUffˁǓ] BP('Կ'C"1 J%r9nbպ81UÖM1k4rnщ18}&r9X~32=d0d<} e 7oʵ8i8p >O>cdq~_W]t_9?Mr2T*R;d!#eXW]3xuS+W3zn6kr vVD^G`nnu0uGԝ>'VCGu'H 7o,!C3p1󄅅jhB:7o?O>JbbU}~[;Z SB} =x'|aeiՌ* IDAT))UVX /nwpPTB@R!􇕕+rӧϠV>ux\d!4D">ziE'}@._ctv."+T/P%VŢ_")u?r/FBV#'73h-9P㐕9_Ȃ~t(J8DL$w{g"`]z|,[ 999P( ÅR2k}l,_Ub%TRf;K >#drdee1{_vEFFV[r9{W¾Zp\j҄\2Fp!m\.|ݷ! wރ˗VS16nAȯ%K pB$"/=% tn)_9 < SpAP( B|*JkK/W!sB#-\87nn]ش)`?ѱ{,Z -7gnXe1__ K~Yנ@vRH1#Kl]OV8reEi<6X`V]AJ\X|)RRy) {Z:/_k6'X,?!95kvp.":wevm #ڷmYuv`ѭ;<}|­]weh.Fo12t>~~Cal޸m\5Qj̞> l S̜6 gbQzmZ¹E D_C~ߋ)۱u T&9;G tY1_ܻoopR[^QtPǏ߻c ssivѮukva[(b>-n a;AN_ƸĞXpAd3s^NnnhܨsܰA1Bxy=,,,V[Uֳn]ameeP~Ȩ(uƔ{{Jez~}u p~(| 4?{{?|On/R6֘:i"m;̞AXtC8pC߅zġGp4N.|~}q˻kÇ xzۇ-Kj@uj{n]3cGbhܰnܺ6;3Ca Ι)3عz+OV-Ud9l(2Q$a};tb/viٻ ^%%a)W7?utP( BP(eJЫgOojL ں}p&E~~>NaXdq<E'z`ʄؼm;233ѵs' _oAЩ>1m\\0jׯwot <#up8quXlmQAm.).p8رu nފ1//$&>}5,?fEЯ/uq puuFRgU[m\.ihܨ!O _5~-ϟ ?F 1H]- υS`vd_1333lx!==M7Ia~HMMӓF ^ľp-4Eݹm ΄?YYplƏD".t=?v 탽憁ڭ]ʕ*kT/׮.~'q* %O-wځSAA?u999pj cLk'lmg犷Ǻ5E`&nZYYYԥCMΦM!b}.L$t}{Xz{(xޡnسoԄ_ܱ^S`Aw~ܹw&&t }~A .y 1 b@߾:lڈc8$v/fϘuj㸗7r`a C8燠KѲysnp| JpncF4Xא-/"Rƺdvj9~Oiښ~=vyzivCXc)x>owO@Pd28Ĩ#  z=M["54_'N@^^_{kٽo:|aԄ*rJ8x|>֮.z28~m݆1иaL8x)c=8τ sVLLL/n BP( B|8iiikaS .ѶukCJ;\.lmlGBP(n _ b|N`u~&OT՘6i"ƌr^\/AA||;9 L_Q( B`Bx.i[ ק7>LP( BP( >  BP( tB{{{BP( ۃP( BP(BJ%UBP( ?uP( BP(WA*UP BP(ʻ@R( BP(WÁUBP( ?E n+QAbŊA]i:kSS+Wԩ] G:kҸ1=K 9x:.c6OR)\:2ڷk|_q&MA oTT=ڼ+Wc0cj B!p8T BP( r=pDVC/O*C}pد/n]a[W\t9>to [W714c10g 2DP( A BP(r\do߱;w+ZZt 4sg\9 VwTotS( BP( BP[$ BP( BP( BP:>z?J%LMMv 8;B0o"D^T&˅͚֯6eefeCؼqB!tJ%LnJ8lQj9셔Tm9@/c٪HMMJH$BJo1y &9jb4m[X%'[ϖd` BT VD q2 GaX9,I;];taFW=.װlw жS2)*<}?֮A =c^uk֮-sV1󧹸v:c;66Xz%4n'ˈ1cq=;sCG#%5 |>/ ]:u, s{ i s(J|^ _!%%~?7+8S՘6k6nފ-Jߖ!bul׳aμ8WVXap=3GFu{WIIٻ/v N͚1su KzaFm}VdrD]aРA}غŠ 2 Fƃ[[l\5dՋ}s.Fjj*cnmbzWcai8w+T`=~Cо=hP1y6V^ݺEѶ)Zo_q@ BP( BP(Guv-B!fMԴ4޷3a! L!#Ga|<ڵi֮.1O/-H&\bEv MIMŞ0mD @ (~],bF])3gA$Whܨ!Rp0".\D cǃ?5r\Yuӟr\@YӦur2оm86k1ׯ#(,N,Lq0t@TT '1i2ݻ8tniS%"\o}>$_=a|>$N9DlСYҹLw լQF)P%Wvm Z:?( BP( BP('<{66X&\!6mZ~57j@8 {{;g<}tkִ i۰A<vvUY<޿9ک#Fm;v`ΌM[BVc߮Sbѻ||0gty*f-P(Z) BP( BP(:@G c-fsRRR\zT{5r8~Bjj\b蠁z;2tp4ŷ߲ &MAÇpe3j$_>k6~gժjp=O@֮ƿ6#553MEiͳ4_1#ǎdrv %߼eſ, Ki74͢.OŜy 3|cى!PTs fQ]mmm&9!ّYlP{w)`eҩ ^yB*hd(fccHMKedІ֫?}dqq.<ujf՝4~X7ɨY ȗ+ BP( BP( .|THvcmJŔ=ȟ\m`ڵ3ܮRϤT(A1C^i^QҰ yC իfه/@TBd$7jp̘:gC4!jl9c?p8wCjZܽ+c 7/?Jbfj R #>Fŵ+Bk;AN&SSSU*x>Tl9@\1l(V >W_}x5"RTٲsQ^^^eB|BڷkCj\<c&N(vtJ$$$Cv6d0VȐ)&Lf@bb”E"B)ׇ4ĪU͙75BP( BP( T|vN;-, I"]]b(999\xrssaiiɔkC7UPpIuD"A®jG ;;ߋ %ݩpPMkW shOiQQϗԴp!u\.Y73N9kkk%$Ͻsru,;׆vc=[Fr2 DtǏY;qO ƵkX2~;H kVd>k'\( ɡRMgŭې+O277gʄ! SAo_P|rۗ P( BP( BPJshҸp80d>}.SwHM>Ig"''jAaX&8rcJ gCÐds5rsn8;?bʕǂeߙgpPoׯqcӧPTq&\[ <0k\.\q/oXZZfٻ(5 lKN$tBoJozE"U"HSQ HEAH ޓm3dɆ$bf̞s$7{9۾ I0S G#e Qy|Mw"s}( [۶o~lڲ5_Yoo9FjjDQĿGbͺ`4vm۱?.7_z_L&zlݾKWjmӖ~L&ddf&@EԷz`{LDFFL&v섗'e|#f~4L&6mw Olk4,yRRR -OA|=K{L}0L}ڼ`yѮSc/]FXxß~4 ұV[=zY}1c:ڴjlg;b#9[1smuvvƛ#_/QcixD@dxШXpAa0m,<ۭݾ-_F9 Z}ӏ'aȫcc a 6e#›i&'Mx5fMYo//4m<6vdL8)_6#F)1Mh"x9ڦS>|s7Lvejۿvd07K~h@]g]boaHO@XP 2NN.f[:pU*WÇ+~YӦbMرku t_Xa#v)r?,u{U&$%%IQ ";$ (#!*{9 rtt=#""""""""R q ǀ~}WDDDDDDDDD DDDDDX0=\BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDTJC#nȃ */ ͭȃ9fͱn;I>ܸyNl,ZWTP JTzuۅ%""""zB$ AbgQw%?}hSϦMй2gly賰<߻4у$HX@0 @ JS2$D@H$H&|3@DDD!Ou$!| g?A!08q" v,qG~R}Ǐ˖l2>e27p"$l^yI jQ a| <}LS3_ f@ZLrtLKA  @YxYxhAj"""y6k`岟jh"_~]>kgDDDD$ H=u Ah޾pPnUk@C72._@@$Ha$dF_FɣȎ Sf& jWw86:\*TV?c_4{Kc* 8QiT@NNE*?m,A$888l2uR(<WN7֬\={##3XWJ$&%CqrD<fNZ𷻄ǡNBfM us98:(λ/bْXbػF Z->> Q '"d\[7V/CN#;>]ݡ5nY x/\Y4 ú n&$T O(pY XK~ 3 ~C QwL.̧M*vhݲv?-[A/dĸ[JJ*V[ƍФQciq9۸m;voIU|ySl>d0؁K.6O; dffe1hثcvW\F֭дI$$c՚u<5ؼ>>žff^Կ_[<߷?]vm q#$&'a;l`w舞ݺfO_V"!m:߽d899@5t:^: /<*T(ȃxk{rtXVѣp-W=KĉJ,K#L>o UGM{zpT˜[בv nNdF_B//'ML&kZ'_ܠƅ3qqL8x .rvd4A+zDϡ uqJ>zeT|XAIKDDD)BRRE>( ~B<Ǐck#TrrržJh߶ Z4o|m&` :}&/]Ͼ3Q&0[֯[1 wّCΝ8t ϴoi柴 @Νɤw'b_!Icq^AXzX0bYvv6z;qqh4SthkSvΝOp4n0u:n 6lyEh$a]P`2l=,< Y%?- AXnpjt4+@*/ WY~h1]˖,F5!JNNFq?{#?@v- 9Q/PGBͺubNAUyCAqZm,3?D@.3y6ܳn^gUN=/_D? p.W?"""|G-r"^G[ׯêkqV9wYcsgmx{#%57oDVV6DIJ廾FJ%O;7 ,h1o_P&0ODWۖ3`08u}] 6NJ_Wa>MAmel I ,A//ء|LOPڽZF7ǎk_C|߫Gw|h1RSS[2相l_8:g\>LFAY,ȹlk fQּn!M[&/ťᅥG0l}!Ba:x8Vю"]<%&@墽,AjdGчDDDDy<{>?>??jDFs^`4ؽe^ S 嫆; ׮W.^ g_/O]q/x5׭A2e._0:zH&= Fuy%GFf>X97I4QIdKG]pݞ7M0D1E+wܲٓpHdT2矰>/sw{ )߫ &ZNR*!wlۇ|L& +i|c>?;llǂ J`{H7&ABf%R ȁšm$ A)RmSZg$R07 =<qfڵu ]^C@_p528{5h!b PTEv$B\Z _ cѷ`+qUkU`"%99 f'''CE Hzz<ؙӃgκ }r"k׃ͣ-Vh&h{=I/̈́ xDe"XE@!(rFM}WyK0J{ B-Ah0NxO {~p^!E(ɅEiur^d|v}?}c/; Xz v *=WP*kI&xNEiƟfWAsU=D5#ĤMJ,rهEa;vfr|=7o݊g;9*+-T*UZju6 0q'aͱaf<к@TbwߡCEab4JB_@vv6ܥ֮x'g~d$PhL3ґ} ٱaHP@mH 8b/a>OND̎m*BpgڲOܪA@.o JG'Dd4;}RFAYv=pgH&:tP;6 n.d\:cF8ر;A?譲sD$FGMH>~xvdӴ%tUB(^RewǃCzIF΍H:v8@X>-Qp ^vXRw'ڃsO?;=[! Pyzml]ҺVSV;_< Cr"o׮]۬MnA"""z@OuFhݺ(_L -KW`EfE{܏z(u7ƾ3DFbpssCpP=[Ӻ{@]\ùyՐ}=FBSV&ʺ- m;p ,oֲJ%'&9.Sr8Z_$u^N0f#!+;FO>rbo!jX !5R*-L9H;w1;6֦U;e\*V_6W} IG"qrhd\uZ.u:dݸ_O՟AwPy${KS3>MG09("F4..?/B>B>k[|aʀ>)1k\&􉸹~45pyD|Jݳ=qOv4U5:kBER}7׭@> lb;NL ѠBV,h0)0ee#!|[R"P =86vZm ǀ2yAm f &xkFTjR|0nnX zPsTx5\ʒH;N=>d^mHuT Uڀ$ApQᅗQsgr~A={}5oDWoS?}_}ϦC.v '  B=7f4p"~z,o͜^/hs#F}˯g#_{^^c~~xq7Fn6c:MunR}߮:auܣ3Mz<9OD!,<"_5lV-Z`h.(""""*,pPE~"1׮`B S˄mC887WOhא%dĭͫqcr9 W/DV YȘAȺ˷#jMzZ .. j;X4Z |"ZCd-oFqqwhJyC:xxRƱqZ}65jC;!!N}:lA(~^ K7^Fу5_o_X2jK#8xDMAD@M!J] XWX)82$DoRp)rNO}>yR}oyEϋpzxh• o y;+ cfΝ<|f8w_⹨wi,yՐ(B([s_dT!arwTK5q&$)OjyȨ{;U@T|9tvF#j5ڃïh0<@$qZmzR;qhx N$&#*5no]c hț1bOz['>o .1 rpnrtp`/J#vy%T0L'vXVde~OG *8`G(\ 4]Y^a4yd Gϋ(D Yq" d:yœɔgT*\[QG#h(Ԝ0fŋ} &K ߷țQ͸&"""*g>FADDDDDOy2XPPQcdpt WY7s'-E !&yYP5)'댬Wu̻)@2w3X'wkՃBm;_˿eٵzmh'*_} ?oEWF '6N}z6NDQ  IDAT^Z{aFɲE%XJ ]ܝ( V Ӝks*+ Ip/%OBi:w $IT#ny߯E;ztd߾n*ɫ& Y$HF#T.ZU[ȼrZb D EoE/z\3 =DڹS@{]|, M`Xx;NpY9qȸ|>w̙Y;ֺ!CW''<6$),|^+.h"U)IޅDH&S *+ TOަ˩L<G@Jp*o P8:[ k+T¸T iOvHܯOWɻZܟj]b1&Iw v,^˷Razb2댣c!rLgN ue`LK.zB}r/SU'`ʺ}Y7 ܽqb.u8 3\\\b׵tvIh9"""z *v""""'A@9P_ / Dųپ6©O +QcGmپ$P(   9/#coP I"T.Z%+:(AqE*t'5 OK?Ⱦy\kW 4ކ1#P;Q. o@C²Fj$DkFo{;*2~j7$ޖv\"tpt,V;L9YB1&# z zbdA`ǞĹ\ݑ}1Шvu+k}"""z@ SϺŕ$pV njҀ^=.]#_A)_LA߂BN$?%ØѠtP5*$ u(RE^Q(TBXT:@RÐV䫌>)ZfI&0 y's@ v@С+#ą9qȼ~?jwIw}R"*5.}}1X:SSl:Ɣ cF:4Њhx4fCvw7W@uy iOP@PK@,@jc?[:89q,̘u"oO$(ѺX\KX2VE@ ^H9y*d#l? HduQZ֘;&|\(|K*ɢ$p NW'lx5j&0cW}!upQy .-5jөdDހ[ZoGæ60$BiFI,kx hž%(G!"""*y!""""R(w_EHF#클pa4޺U|%H 9}G Fڹo*0!!(m?6I&yuh ғkº ATJ$6QM'eۍZ}$*jiOֵ8B%Js;LFx5߽M+}s*AO6g`*)DhyA"""0 :jyT< *-ϟR*+@Θs'Qy=iܪʝEH& L&$O HF#YE XER<PE2/yqEd2Bex5n]_8cndm$K2u 3.0$'+ǍUZW7׫(\,Ȑ .G>, DDDD%WQguz=SNEx'찬zϏ3! @&]I pX 쬘0fåB\E&|hJs[5^ ,9;TW/>P;,vS@Y_< Ѡ7'Bh>p D3.BeݼCj KJ$!""""s",HF#GJ)s um(Ey+{-2^'KSR cF:2_5wCx?`{b|{ͺP!1}#KΒl 5fIVH=uo9AUu#ʷC|r"$8K`d^[׋5NROd4nQz9w0ff|%*5$Y4AZ&PSU!,$KNh-%)'wM;sy q ІH:._y3SVRϝ .̟.*ó~c$?G|{tP5D`1^Kp黯PKoyB$@h}Һ r0C79` IPH;w MS<5"xҟe>p{z\? `R@9NJ>x vLXAa>JG' %,܎gl!v\3MBag4A$AՑB[!)[1N Dvh>f?B^Q8;sD0Jp^ U^y|)N/QÿPa6\a>&FȌCJM[Sqt 8Q2Hp翚g\!M=~YSǑ{,}BɉH>v''ő1aB S<]ikđQ/#!r#38g!($lPmH:|zu0 iH8wvo3Uk!Mٹ:N-cG^jqbis> ձz 9\JJy pwD(:e|7ƠBi DDDDw_DDDDDԐ$ @4pmb\y4޾pJG'=td݈FNjc'"hP:9:TD/ ýV(F~,l}RDƌtKlj4@cfM>1Ԕ">% J2V6,^G@NpZNerqIKH;{ UkןïJ ]b$GyKqa X1m[p Sft qȼvBj_<VB ǀxxAuBt qȈ Aw{jnvh<8& w~&? GY%#e5CF2qqLyeT K*Ph!0A2h$uQE(4D7 õ?^Pm~FޖJ1;9w!>@w>c`9\\9uJCz*2.Cv-4m} Z ICZ*t qE˓#C'%BC;1))IOOO=2xX=DgO")d\8[נK13jwOUGFhǀO\s6#1SPT"|B=PF;{årUBr* ig;ܪׂ[:Am#vV8x»q"C!D6ok$ (8z?: Lَ!y3ӡ8@[%-#}h< p`/o۩'uӈDJoT WXcx7n {LBʩHpW."'.drPh:(^ar'yڑ~Y7!@PkK*/ÚPIG8;هK`HTA m`x5h ߖh;~z8nIG˜+Zud\cײ=|<ʺ߷!!ro\hC k>tt*O:|8yJGHIB Ti֦hg =!O곇PpIYCɣh4^azL}yu{x~~}Uv?gBDDDDDO%2'2ls cspDMt=~wMZ[]{r~d̗}PxIs$}xQ&({TPcLX[V$60=vg^3Iϒh,*qY2""""""""""@^Z,~N9#ǎBƒᄉnܸh6ר^h4!py  *𵋗.ƾ3eDDDDDDDDDDDqY=Ыgu:$&&aq)<|7`2RV/==JYYYb6Ν?F `WPbEJgtgjuBc_9J~Cʔ K7jHMK9o+/׮ߴekv,+4j~>>Du۪Ӧu+dddU/"""""""""""z[d#eHLLhD|B>l\rr > AEK廦4j5_S "?o/Wl2? F zf@zu׮h4BQFDDDDDDDDDD=-ʗ/ zuW\=wÆM晎'}I؜hХSG۸ :uʰ6eN}_q'X eKa!.o//L|>6y#^QFDDDDDDDDDD=Ȥ'`ؠAX/8y <=ѭs'knv[׵ka2Ro8_>j֨m;wÇ F PjU kpb\`  !!]-$IIIgnhۦ5VG h #""""""""""z$ < WEXxG#<ʆGGn]N_ժV/KH$"""R/99ك= """""""""""*mXjGi"""""""""""z 귿"""""""""""G!"""""* s DDDDDDDDDDDT0BDDDDDDDDDDD DDDDDDDDDDDT<ȗs> GFSXn=3[w($""""*aDQd'=TBWFg;tO?)޵,PjUtؑ#Q(?+&IA(3L$ *7DшI?EFfqNO~kׯ? <+Woܰƿ=֮#""""* .]`s$?DQ$IP* ~ Jٻ(lz^KQ@iATEz(kTDPtA@T/ =.Y Pn~,dΜs̙I|sqPR%ط?{IjJm>ǚǶc*gvЧzy,WMyfDDDDD ^WuV\2(=99gG}mhOiSDDDDDʣ!0y썌| C CuKը>w߾vurZ _-&=H7m6ws/+W"//4Ge߰J+w@V*sf}$""""RNGxr;Ʊci۶-eafSfYaŎ)q /eY矼1}: :UNDDDD3trn@xxoTuN<IGwx?_޽. غm]t ړ3?fԍbl&$[.aշ?cnɼ_yV֮[G6mtal.qsаaC;N'ep8Xj 4DG`4F~~>^PL L ZbcFfĉBC5s&QHNx ,۶a8^} ]0sgs! )2_huuQƑyzY$g֭<أ\5W^ɼr"]'a!q IDATx_`-{DDDDD ڵkiݪ]tSb"ڥ lw} sҳG:w/i|u 4 63 ):u*yR>֭Z1k,ޚ1 /.]h"ϸc锘H]={v Vhiݺ5~&7' j b`RR p0q<!u&λY7o*?qBp:}Q@*HIM#""""rc׮]def)t8N8@NNN7eqСq }%5%<>HӦM |b|r嗓EL<^={֌zm?aI;Ν;ӻO?{[n6IӦNEDDDDC~{.(Ӆq'yVbgeeRnaN~)Fc&BCq u ȋjryN'qqqXEʕ`ӦM,X_~0z^~%nv^:A-"(﹇ ;v䬳bw}̙=+W)FJKDDDDD+?`3UtTyyy|,]u`arǗdQek[8}KDd$w&44۲ؽk >>׋aU^\ll,-FDDDU1+H|\i=0[X]l"""utEDDDDʫ܅w88c Ō+\>+;4ypbyIbNaF3uU >sm]gPn&dɲeڽիy=zf͞+3OQfM,6_ҳG7.8|"""""@ Fys呑GYf-GNUud]mSܱ])IȠK^Aiwu5+)7H4$6Hx&>0SeÂʆ2vSr~{%%%%ǟmIuJ#* 1U$==Zju-4 hvل/ЬY3\.egWhYP_~|#RFfAca&ijJ,8 14nz8iтٟ~ʁh8~}ڹ4׮0C\=bxP#߸Q#6ФQCSR׸H4lؐӧSm۶ʖ͛iѢ5k$//o-"!! pW0qD"WoUU㮻`֭}۶ql ##[,Klmx^,bΝ}v6mm ;~rr """"""'$~ p8(xXp!yyDDD0d*^z%}9ua1jՊ,^6/0Сwy',ZW_}T rٳÇc&W_}5#}39tW ѣ0 222x_m6<UUqF7*)"""""RC9!g,8Xph Me6ej9?0"eq q9é˲0 Hpzeu()r\AM '""""R @DDDDDDPh )hkHQDDDDD .F)wrG)wl{;oغ}խ'?zڹd0 VBPvmݍ"""""""""""e }I ;ЫgO2={LH2pÏ>=wATdd =7/ǟzkQ&֬XVn:;c:]x!-[zjs.<}Nxn;FwH) ʕ+ӥg/,[}Խ'YYYTVT ,+9Sݑ"""""""""""eHNNO| @TT$oONuJը>w߾vIknLz l۶wOWiģкU"lќ6RbE>=]r)YTMH`3q:ev!]1;v;SDDDDʝJ!"""""r<^dɲlݶ͟co,US<0aQN}EA/q%[m[tR`,F/̦jՄ4M+%1Q|>K6ouhۦM\ ۶ٷիUSCDDDDDDDDDD pĶmnwi|`>;uaLy1 '$$nu9)x`$zY$w߳e6^3th>>"&Lk/T澻"//.]LxF J.`6oRf;oH2p0s LHs8LzA~\{w //e+Vm<6G2.WNopT @JjjkGŴhќ;)pNv"Zjief ӤApAoNdd$j"44?hj׮i:t]vG͚5=/f֭Ӈc6]w< >>}_DDDDD|r?"99S!>{q:9p\s m O3e>'vw'x{]wM-ظq#}7KXx8O?4u!55?,_~%?e_knnvڷoϿvÙ8iReoΘA^~>6PR@xb nB\.h0 ^/{a\uՁDz,n3 bq-'gxW/&LoM xgYb?|=/wO>$S&O޽y駩[.ӦMGϞt1h47p8Xr%|\vY$""""GHbxb?Q"Gp8 ' Z0Xjׯ{|AaÇtb+''Dիl?u KTd$7l_ò,^o 0OsWӢE M%""""R ⁀EvYعs'nիVg\!!lذBժUmIʕBBBp\EBC/xiz1 'O&6.I&` HzrMX).PL;?4ؾcnpnv AMot;իyʋH0@DDDDD\"#"h֬111J*DG3XeY< U(a *`7|3͛7*() (2L}/t:Y`o6璙 <"B7H! H>s,]yQ9Π<ÁZnMll,ͣy昦Ijj*7lCǎ$$$$P0 N'N3(p.`eƍ曼TZ5ϰaС)D)wJZhSEɮ]r|}FBB 4mڔH^xy:zuRNNJrr2e׏;9sػw/]RN^/`…tԉn7YYY,ZQFPF^=eĿEjZ:xFE9{&77SaF*3sLG]IKKò,5kF hӦ w`׮]ݴ)+Uu6iӆM6w>СC1 U?Ē%KHLL$$$,ZnMeۘgMenZl Xܲ,G| x<Bp&qqquK끈=|Frr ^&%%Pș)DDDDDD xޠL LAeYV`t4J%av`q=N+Sh#9bDDDDDH-^R`xy8s DDDDDDDDDDDQDDDDDDDDDDDD@DDDDDDDDDDDQDDDDDDDDDDDD@DDDDDDDDDDDQDDDDDDDDDDDD@DDDDDDDDDDDqE%w^bz w> 민DVʼBykkt9eYYRgPgiSfݺ2ԣ@DDDDDDDDDDt:kX-Xض]b^>˲Jz+}=s;y-ON WҾcG3) r]tҍiwa':uA>?8wB ;qa\u"/i1%ˊ9ҮcbnuL$++>ұK7]؉YAy._Es@; |mswұk@]=cͺu7Lddf{c|2g.]DV&]DZMzz(O~q~<Ͻ2:&ᅨȦwq@<K-QÆ x Pv_~r,!a|,]QcƫI{_}-<(ي|20)-qP23~%KѷŁC`֕'%5?ɨ1I{ ඛo"*2ZQ&w*V;n'#=7~w UÆ?W_ч'ZK[M4wqNYdsǍ'??~Ď8 ^upA*WTDFF271 ~}zY&ʒop86<Z^ËӲ%??lYau3y:rGy=7ȑΚ@jU700 y_}a8NM֥ S_D0ض}1>ڶ,46(:2{ŗ_agS!> 3) 37zx8p yZ5j@Դ4,bيx]hټy ohh(^{M,5koaaau{eͷg^^/Y G^ :wx5k4e*+W*<WVbn m7;j;n҄^H}hѢ_-ZDUyݵԪQMUMHi&y- r^}}:ލJuV DDDDDDDDDDDTFrr •n!h{͊e٣m]ѱC|lDxY>5wgAM^hټ9oMM\DDDDHII""""""TWz_-ZaL<K.㣏?NG ~lG """"yb2"'=#\T|Fs޽Og~H5tEDDD䄔4, ZPDDDDD'۶1Mz &:3ժV庫":*K/@tt.V#"""""iemŎmU[R~USWߟʫʫʫG{"5DDDDDD't<Fʫ U^U^*򖿟iG2$""""""""""" """""""""""R("""""""""""" """""""""""R(""""""""""K IDAT"" """""""""""R("""""""""""" """""""""""R(""""""""""""厳,*YtYvb4m,Y96FѯV\c8000JSPt(9⵰ml/1mYؖ]J^rEDDDL:r=֬XVl/9y7.=K-;""""""ʲ, 8/ J:X3qf);k4M Ѯ6@|| WsFS/%%>{Gۃ ^/VF1`ޮ[Y90c"dd㈏ Iy?XY8V$Ѿ6`ԗ?uv=nSSrs=a[e? =k2L}W2]"e-阑t`d`Gc8 """r=7٘ϙWDDDDD'۶zx^^ojB, ),5kx<"u˕tZmUzr}Md;t>!Lw?ߖ}/} x|a.og%0Œ$<\&mਗ਼:?]V\<)JcW$O΁qO/-~1/B$ucU?@_Hm,'#KLeG$5-y6][);Z]DDDDD 00M۶q88 *4MGr4Y˲8i;w +<??XPfhFhFX(fTDЈ;߃.䵰s_[oԈaA#̨t'>1 Q2zٷ#fxXv~aZeތe^/Nj?J޹g>ma<](_pxĎLH!!EU>ӷF`D{BGQqbDc|W8FK߸""""'ي<۶1 ]vgڶmOfVm۶J*x^7n$;;;/P.99kג⯿bŬ[˗Bzz:]tJ*XizjRI&T\uˆ,x)Œ p~T3L#= ۲0 OMe`Q"amyQ|Gx L?r?`MmNR)J<$KbH![߮',BU)9K~!Y. +#gB\-g*E ܀gA"z v{pDG<=WHfd8Fx/Up """"Rvy^ɂ # 2?HvN$&&SOef~X˲p8]>}kn*Wdzf矓ٳ%99gE*U0MpuױyfHOOaÆ|ᇄ+%̈${[q+ޥx۶$/}`F՛&ɓ|*ݟBXǖTy^_B#" YU&+Wɜ fX,,*w5W)]q8pTýe7Y/&/If\Wib%=& eՓ r_`aFGPpVXL w;Q*Bj'>3 #sZDDxVOSp'~߅U;/CXĎ@_܇J rԨQC0rH.RSSy'iݺ5_}'LYfx<֯_ύ7HBBBٌ/h=p(0!] _lϮT|xQ gƽk/o}B I%}\7eFD(1#:q\g.vwql;c(:WPv:'Gǃբ>a[r`S8*ٯ#fIq_wnM]#5z`gӿ6I{<[Kksu !uwo-/%{O<:\6!uٽewhҫ&jPׂ_EDDDDNlǫ7DDDDD8ɻÿ/,]ʼyHMIEѸq 0HNN/׶-m۶哏?A 0jBmNtTIPR%ضmeqnV$&&IY69agٷcॽg>]MЋ0\!?m o&'!v~o&wa.\|.l`<˓<[lN+hݾCg#Sͻdٲ"y'O}^~w9ϿIk|[Ϟ8kNXlYc yg>-}󉈈\RSS_0`YVPPx9 q;^ОK>GsaǜLxǷdI^xU,ꤵgnJ7UFDDDDu֥_8 t-l^8 t:p8y p8N9L8nLmgw:Qh_OP>/[ƍz>Rn3EDDD>t֍ݺ MQtX6V@)R?Kq݁þKyv߶HZq^/n 7hhkY ?<S|fg'=#:uj{s{z鳒F?- O""""rZ,_SG3m"#yⱩ;@L:?/ L`*;&>0;Xf AXh(aaa|`eqwڵc&111b>3}^=3zH{ԮU+Pfƍq}jofx=U׏b |KjRRRx<8&>0/>von. ϩS6Wa\5|5k`_r cʤO*'H):sg{cO<ɳO>qpC\ԣ{GaK%>..>v =Z`?Y"+?ү2:wć/ [oxxi7Jg\>*V)Tp^WX VWy7kΝ;X/ԩ ^/6O{M<3^9-[p\zУs\4g""""""""""rlVp!A/y+UHڵٰSrHW$s#&^s;2ŽE">YY,^೔+"/?¯չ}mlٺ?..0Xn+.BZz:yy7JE~9-Zsǭ7}dE| @~MG-IDD~ʈҠYE"##ٵ{)9+#**x|#ZhΌצ cSOm۶M : w?:Nz;޳wI&ddfAuض}|=..ك)>7~GJJ 5W#$$={̬Uv2i{mCӂ6$**9 J)zq..8 wIINGoi꠩Nh|' gdz rЦukmm0Mݻ1{nZ:H{- @noɷȈKlCI*l~ʈ˅ ߿u 0 vߠ|}Q6 {mq CΨڥ3]8c S^pӇ3g:=%?Z[.W223iP~9f} ^S4IVf?YNnnP3gcKDDDDDDDDDt(udky呑ɔ'9%}BBp\ƛ0:/MKӦmvvSm+0< :|գ[Wbٴb,Q`=êY#-[ꇹsᬚGzn+ӐY{U>Eݻv]*h~L6M#[[խKھcl64cM_ML3wg4ҷ1׷3RRRռYS 2D~~t0.jKTZF55QŊ NDCzt'c5oByd3=cyd3sk єѥN?aZz_ G_ZFij,|4kC?-%%Ő2xYԣ>0^]r5\.:wn]oBP.*ϳ\K1Fep8TZ2*??0\:?~#___*(&*;WUWreq9}`@uCx ^:@!uvK0 Yu\Z~ExK0r# IDATVcٶחs/Ok^ܶ/,9߶E m -%%Őr'r&3?~gg2VY mwctuCx ^:@!uCx ^:@!uCx ^:@!b/\WF%  \-rrrKj\1arr8x\qGa\.\saWJ*RJT "arŰS^:@!uCx ^A$eH6n]x{Ða$&>.:ԏa0rd/K{e\av;AR + vſPvlSy^q͟MIy=^=f+U Qe6K_U2dٔҮtۛGjڷwZnvڥ 7?r:3g#:TϣxC|Aww6JJ7!!AqqqW5('_G"鿽BJ}N;?wKкkuk…K0t:/i6)??̆a(77Wr:۸\.痺<93|9 =vIvOڿH Pff$ĉ:tǶ<{Knӂ㦥)//ONSjғO>7lбG%Iyyy%֗{>օfR˖-xb9s6uj[ҙ3g]l50#Fh:R$RRֆH;`.YtE8aJkYr7@qkh[&//Ϻ6iSt%Iyމ&3mDy.U_E)x 8"B/BBB'h֭@ 1B7p^z%լYz#a ad.߿ym9bC,YX_L19w-ylI>>>>C>ϲf~7jرڽ{v/E{jժYͷd^g6M2 JJJ*73ZjyU}6ƍdՋ_3I=@|}}KlϹuv fϽ̼vXb>Sp?ݯ;N/6˗kĉy*gHHBk;N>FzS*WLmb!ijt  kJ>^_WTWK#TUVMנAdەE[nl6Ҵ`_NT',L+W:tfft*,խWO*U… br8qBՓ6mڤoFyyдit+?;;[&M3dٔ&MhŊQ^^իggӦMZho.*|?j#?egg+::Z%%aÆłA6MG[chmL5lP4ivd͟7OUf͚[j޼yU^]ժU+>OuJM4)qΉ?L:uR˖-eٔ%KhժUڻwURʇYޟ7lcT~}J45p jZz<͜1Cׯj֬իWkV ::ZGQÆ kOjjf͚C)!!A>I!!!V̙3Z`֭]xթSG+WVjjM[*88X5k/_ *vڰaym(--MUUӱc4c %''+,,L?֮] U^])))۶fpݬYZz4k֬$+?{UVu|^}]J*kGYܿ?~ ݻvQڶ}zAeddB :w$? @*X/Wm6Ǎqc*fM^пo >s?e˖)##C=Ӛ:u233駟#yyyc5kV3ga$I5Ҷ[5|pEGG̙3~_z*ҹsJ*I*9_/k̛o ȗ_ԯ.8NÆ?}͙3GgΜѨQ޻Ar s: PTPA۵k.{=z3L+=3aѣx})=u8Ct߀ڰav6nܨc2 C#zJ3f̐$yMMu]w~['OVժUba}?sAJIIу<;vnk޽4p;ic9gUʕ|r}1yԮʕ++V-kZz$aÆZb1BfRff3Fc|Ӫgө }y&M?^ *UlN: ҶmԨqc޵K>KڶmvBCC5w4o\<ʒ!sA?~\=>;zTܩ{G{UW; nz5|ʒ0]M*UX 8_bp%]g|0 cÆoIS1 0.W8N0 cCa|Z{n#//ϸ ɓa7_~[7|0 x饗a"/_޽qYg4iزeYmii 8ӻ2QصkazjAƁ 0iu;999aƱcnj^k,\:^xXo^0 #&&hܨq10 #..ha-\o3l0:d\ۤnZ+_3Zli$%%acyheFm Wa] 0O?ޭ[6j=zak妛~0 ߌ6[111>>f7>>ިl|3s?l<aF;4^5kݤ?7ڷkgapF*3ի摑y*l׶1 03f[2Ξ=k{*ã>jNӸS'?ѨaCc߾}ÇO(M7h|1yg'Z36 Lis ؊j.W,n 'v:|ka?I>wNuRNNd鉡CUf͂B}_ryTIދۜeT;_]C[={:f ΂!nVիWOvN PNN:w}̙3XT*á{WM5g'ȣZsm42acnl6Ξ=k֨^zvG̶/z^9tvs[SE6fMU&꙯Nv|6/F/by%@µ^F1cO4jX <7_ũQF$)_ K '+9%U$%9"áZ!!%c͉Gm6|||SrqD~g]۴li.7_./513[rsstℂ%IGfSh>>>V=(p\q8}Zu=㒤z)66V۷mөSJgXXV^x]v5ax>|Xӿ>HwqjIe}ffu(.jr֐YNS))):PBzРA#n:͜9SƎ^۷OGx)+SjUh…JJJ_wղeKuM&LP||<?P}Uƍt:,`('N(!I}nKXX~=|XVofڻgUYlկ__w}^{U-?_Wk{;wTNN-Z.շ~{$ 6L3fЗ_|?qiy7|SS*##CwPӦMUR%)++K/ofxA;vLw$ 8P>%%%i͚5;gun=1tV\%ɓϔz޻Wڵk4|ӧnѣ:vbwa%&&өd+!2Ϟn~Af+V?:3fΝ:t$rjժ&}֯[Ӧ)55UqqqV~õvZ)--j/___}wZjL~I ]??EEE~O^,ݰp]F┒"Izz=mϨQ4oHJ1ҾQ0o)Y]-ZE\jqoVT:Uթ;zݸq̟@%$$Cr8j,woVZԩV\͛7ɓzԲeKIRdd2Ϟ՚5k﯈k!_Vtam޴Ia]z꥓Zbvܩݺ瞳1g'Z=>ΝST|!GSjJ:t ݮ'O*>>^͛7Wڵu-h=ZnΜ9VZZ`lެhbڷo_rժU+ժUK u1EFFjZt233U-(Hj׮ӧOkھ}:p^xEvM6MǏSHHM^YɓuI*TQ8$5jHv]Vá5kO?UXX222XׯS'OcլY3KUTIKQqCҥKճgO=6dhΝzbPw}2 Cԏ?X;TŊնm[mڸQ?o 5jfH~Z rXGjҤI]vکW^ںuoؠ艡C[rԨQ#EEEiҥڲeϝӛcƨ~ڽ{+""BIII  SnT^=-[LyyZr\߿_{UxX8N:)//O'OTBBІ T^=:}Z+WԌ3t뭷* *a ֍;kUڴqN$%W^Qm uu)!!Ak׮ ;o͛e(=zT7nThhÕUVgѲe˴{. ݻumuIZvnFfm޴Iv+DmڸQCCCqq^:wl-aKII1$Y`N}5<{\VҋҖgkӲ{s=`~S.]4ԵkW!.w>ƍZbj׮m5.ك9@x`sP$/um5}o1Ѵ9V8itїv2̴J8ƽW2s7~ 3dpA*21ef}n{hZ%z.\.沢۔V}y!Z4/{y*%ý[;КHmYZ{aM`[pejjZs޸6JJ;G̴̹=u\Pv)Mg wq5W8=ZիWW@HJ҃>nݺyMsm6;V{Q.]{g ~ׇ=ӧeW.D"##C999 יLURE*UQ@= x0{Z[_ɞEUR ʅ>=+ {ʃ(x .*ކ: J>u?G9@Ռ!!uCx ^:xjxzC.B IDATx ^:@!uC~gѣ2aj߾ǿDS=4~?4/۶mSzzu5i֧gϞAw8TZ55nX7xnvU\.ݗl6)44T[Vp 駟4ydM6o|mZzu-];ѣGKvZf&LPϟ?_+W.1 /| P>}tI2 =@x<-YDO?̙3MyXXڷo \jժiƍze$I~l٢-[hҥ?@{:7nܨٳg{ULL~͚5O]???u]^>)OImZJEGGk1cUf-;v6lؠPIuI&i̙9sIR߾}e%I%C,Y"y2l޼#quiѢEzW2nذA&L ̙3eF@xիW+??_uQ^^~?4?3g>3itb$R:tqW^QJJFm۶qQiv]߿y*T)OyiΝ)^ZuEozvU [om۶jҤ4i"` kY.]/ZG֭eٔu֝7111jӦ߾}*88#ݻN: $M4 ^o޼yj߾:w,IZ`U_+w]wݥ={*??_u3 /7?N֮].mz RJsssՠAy~EK*p&''G+VP߾}/4vJ:uJ?F͚5KKEaJLLԴiӴe%''RJj֬qlٲL͛7 T&Mԯ_?r-{oɒ%QE/kٲe:y/^~h[NIIIl[z쩁z~1eoY:m4}Gaaa3g믿EY[?Jl{G#GԔ)S*==]6lnrIRRKmذANtcǎ\^9==m.}j6m4i$͞=[9"___iFÇWDDDۺuk*??_׀Եk y{#_Jy;Uٳg+44z6ol XlUV)//O={رc =@x bŊ޽ڵk`kyi}7رƎ1c(,,L~yeddKbz>e߲мyԴi?fEGGP ݫoF6lSO=u*11Q~a,ܹsգG;V=nݪzJgC/,z?X?QFYfZv 'N;OjÆ _NfΜyQSRs%UVM!!!fSKѽ{wUTIZlY/zjWϞ=_5m4hС%v>}Ν;{ҨN:ڴi/^TU^]dFsi̘1V㨨(%%%iʔ)Zn.IZ P0\Z_V:ubcceaW_UZZ~i ؽiy ݟ>+!|ӣG+}wׯWZZ|||}ԨQ#}ݥRz{)ivءǏ{[x9gk͟?_uQ6mejԨ: x35iDk׮p8tM7I$qr8R9C~ϼ'K)լo߾Kj:qsE{w U`9ѣmw.___\.-ZcѣG{,\}QIS!..N'N(XG_}$)$$D7|C{bJHHPffl6XeUoq9Z':uT-q3gp\ zYf͚I*!^=޽{KJ<_Jyf_㣌 ;jѢïȱ?^lLU͛7W@@@UV &,Y۷NLLTԦMEDDHƍWl(X=j%RBϷ4}tOr83fQQQR^^bFGGK*f͚T֒~˛WIj޼1nnŊJ孓HkNR0l1bd~/IC/^}j!d;wU*++KX㱬W:IRs!օ3_pvEϕo7nT0y|NSa\0_z;w|x7tjڴiZlԵkWURf4}teff*%%EjRDDl6ۧ;wjݪQrJ}Gט1c<+[ֲetA͛7OҦM4}tM8Q;vM7$IԩS/l6ڴi#__ߋZt:vZ͞=[ԴiSuEa(##C;wԇ~oVAAAzwձcbo׮VX~I:|&Mկ__Ǐ-OYKS-o^8]V+VTrr-Zɓ'kڱcRSSձcGըQC6M/S*V( *ko^WV||,Xcǎi۶m1c>#lR?{lذAf͒aJII_ի[JHHP||t9h8sz^Z999jܸ֭+.áPXBׯWvv233}v?^w}o߮\k.D^ݕ5K-OimnAAAAϔauYeffiӦS5 <-_\ .T0l)YJKKSnBBB{IM2EׯWRRׯ>}hѴiGѣG+--o&ٳ;UZU 6ԍ7ި;믿֚5k$ݮ0C ]LYϧ<[޼jĉZtN:o^Æ SVVz!k.]hȐ!zKL|Y̙3ʕ+uql6խ[W}}W!xiiiޗW^]K.8qVX$U^]111e/\|FU1ϟoM7|3ZѓzӦM2eݫ<5h@Uyyyիڴi?֮]ۿ?oKӴiSM>U:uCx ^:@Uk a IENDB`asciidoctor-1.5.5/test/000077500000000000000000000000001277513741400150175ustar00rootroot00000000000000asciidoctor-1.5.5/test/attributes_test.rb000066400000000000000000001140241277513741400205730ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end context 'Attributes' do context 'Assignment' do test 'creates an attribute' do doc = document_from_string(':frog: Tanglefoot') assert_equal 'Tanglefoot', doc.attributes['frog'] end test 'requires a space after colon following attribute name' do doc = document_from_string 'foo:bar' assert_equal nil, doc.attributes['foo'] end test 'creates an attribute by fusing a legacy multi-line value' do str = <<-EOS :description: This is the first + Ruby implementation of + AsciiDoc. EOS doc = document_from_string(str) assert_equal 'This is the first Ruby implementation of AsciiDoc.', doc.attributes['description'] end test 'creates an attribute by fusing a multi-line value' do str = <<-EOS :description: This is the first \\ Ruby implementation of \\ AsciiDoc. EOS doc = document_from_string(str) assert_equal 'This is the first Ruby implementation of AsciiDoc.', doc.attributes['description'] end test 'honors line break characters in multi-line values' do str = <<-EOS :signature: Linus Torvalds + \\ Linux Hacker + \\ linus.torvalds@example.com EOS doc = document_from_string(str) assert_equal %(Linus Torvalds +\nLinux Hacker +\nlinus.torvalds@example.com), doc.attributes['signature'] end test 'should delete an attribute that ends with !' do doc = document_from_string(":frog: Tanglefoot\n:frog!:") assert_equal nil, doc.attributes['frog'] end test 'should delete an attribute that ends with ! set via API' do doc = document_from_string(":frog: Tanglefoot", :attributes => {'frog!' => ''}) assert_equal nil, doc.attributes['frog'] end test 'should delete an attribute that begins with !' do doc = document_from_string(":frog: Tanglefoot\n:!frog:") assert_equal nil, doc.attributes['frog'] end test 'should delete an attribute that begins with ! set via API' do doc = document_from_string(":frog: Tanglefoot", :attributes => {'!frog' => ''}) assert_equal nil, doc.attributes['frog'] end test 'should delete an attribute set via API to nil value' do doc = document_from_string(":frog: Tanglefoot", :attributes => {'frog' => nil}) assert_equal nil, doc.attributes['frog'] end test "doesn't choke when deleting a non-existing attribute" do doc = document_from_string(':frog!:') assert_equal nil, doc.attributes['frog'] end test "replaces special characters in attribute value" do doc = document_from_string(":xml-busters: <>&") assert_equal '<>&', doc.attributes['xml-busters'] end test "performs attribute substitution on attribute value" do doc = document_from_string(":version: 1.0\n:release: Asciidoctor {version}") assert_equal 'Asciidoctor 1.0', doc.attributes['release'] end test "assigns attribute to empty string if substitution fails to resolve attribute" do doc = document_from_string ":release: Asciidoctor {version}", :attributes => { 'attribute-missing' => 'drop-line' } assert_equal '', doc.attributes['release'] end test "assigns multi-line attribute to empty string if substitution fails to resolve attribute" do doc = document_from_string ":release: Asciidoctor +\n {version}", :attributes => { 'attribute-missing' => 'drop-line' } assert_equal '', doc.attributes['release'] end test 'resolves attributes inside attribute value within header' do input = <<-EOS = Document Title :big: big :bigfoot: {big}foot {bigfoot} EOS result = render_embedded_string input assert result.include? 'bigfoot' end test 'resolves attributes and pass macro inside attribute value outside header' do input = <<-EOS = Document Title content :big: pass:a,q[_big_] :bigfoot: {big}foot {bigfoot} EOS result = render_embedded_string input assert result.include? 'bigfoot' end test 'should limit maximum size of attribute value if safe mode is SECURE' do expected = 'a' * 4096 input = <<-EOS :name: #{'a' * 5000} {name} EOS result = render_embedded_string input, :doctype => :inline assert_equal expected, result assert_equal 4096, result.bytesize end test 'should handle multibyte characters when limiting attribute value size' do expected = '日本' input = <<-EOS :name: 日本語 {name} EOS result = render_embedded_string input, :doctype => :inline, :attributes => { 'max-attribute-value-size' => 6 } assert_equal expected, result assert_equal 6, result.bytesize end test 'should not mangle multibyte characters when limiting attribute value size' do expected = '日本' input = <<-EOS :name: 日本語 {name} EOS result = render_embedded_string input, :doctype => :inline, :attributes => { 'max-attribute-value-size' => 8 } assert_equal expected, result assert_equal 6, result.bytesize end test 'should allow maximize size of attribute value to be disabled' do expected = 'a' * 5000 input = <<-EOS :name: #{'a' * 5000} {name} EOS result = render_embedded_string input, :doctype => :inline, :attributes => { 'max-attribute-value-size' => nil } assert_equal expected, result assert_equal 5000, result.bytesize end test 'resolves user-home attribute if safe mode is less than SERVER' do input = <<-EOS :imagesdir: {user-home}/etc/images {imagesdir} EOS output = render_embedded_string input, :doctype => :inline, :safe => :safe if RUBY_VERSION >= '1.9' assert_equal %(#{Dir.home}/etc/images), output else assert_equal %(#{ENV['HOME']}/etc/images), output end end test 'user-home attribute resolves to . if safe mode is SERVER or greater' do input = <<-EOS :imagesdir: {user-home}/etc/images {imagesdir} EOS output = render_embedded_string input, :doctype => :inline, :safe => :server if RUBY_VERSION >= '1.9' assert_equal %(./etc/images), output else assert_equal %(./etc/images), output end end test "apply custom substitutions to text in passthrough macro and assign to attribute" do doc = document_from_string(":xml-busters: pass:[<>&]") assert_equal '<>&', doc.attributes['xml-busters'] doc = document_from_string(":xml-busters: pass:none[<>&]") assert_equal '<>&', doc.attributes['xml-busters'] doc = document_from_string(":xml-busters: pass:specialcharacters[<>&]") assert_equal '<>&', doc.attributes['xml-busters'] end test "attribute is treated as defined until it's not" do input = <<-EOS :holygrail: ifdef::holygrail[] The holy grail has been found! endif::holygrail[] :holygrail!: ifndef::holygrail[] Buggers! What happened to the grail? endif::holygrail[] EOS output = render_string input assert_xpath '//p', output, 2 assert_xpath '(//p)[1][text() = "The holy grail has been found!"]', output, 1 assert_xpath '(//p)[2][text() = "Buggers! What happened to the grail?"]', output, 1 end # Validates requirement: "Header attributes are overridden by command-line attributes." test 'attribute defined in document options overrides attribute in document' do doc = document_from_string(':cash: money', :attributes => {'cash' => 'heroes'}) assert_equal 'heroes', doc.attributes['cash'] end test 'attribute defined in document options cannot be unassigned in document' do doc = document_from_string(':cash!:', :attributes => {'cash' => 'heroes'}) assert_equal 'heroes', doc.attributes['cash'] end test 'attribute undefined in document options cannot be assigned in document' do doc = document_from_string(':cash: money', :attributes => {'cash!' => '' }) assert_equal nil, doc.attributes['cash'] doc = document_from_string(':cash: money', :attributes => {'cash' => nil }) assert_equal nil, doc.attributes['cash'] end test 'backend and doctype attributes are set by default in default configuration' do input = <<-EOS = Document Title Author Name content EOS doc = document_from_string input expect = { 'backend' => 'html5', 'backend-html5' => '', 'backend-html5-doctype-article' => '', 'outfilesuffix' => '.html', 'basebackend' => 'html', 'basebackend-html' => '', 'basebackend-html-doctype-article' => '', 'doctype' => 'article', 'doctype-article' => '', 'filetype' => 'html', 'filetype-html' => '' } expect.each do |key, val| assert doc.attributes.key? key assert_equal val, doc.attributes[key] end end test 'backend and doctype attributes are set by default in custom configuration' do input = <<-EOS = Document Title Author Name content EOS doc = document_from_string input, :doctype => 'book', :backend => 'docbook' expect = { 'backend' => 'docbook5', 'backend-docbook5' => '', 'backend-docbook5-doctype-book' => '', 'outfilesuffix' => '.xml', 'basebackend' => 'docbook', 'basebackend-docbook' => '', 'basebackend-docbook-doctype-book' => '', 'doctype' => 'book', 'doctype-book' => '', 'filetype' => 'xml', 'filetype-xml' => '' } expect.each do |key, val| assert doc.attributes.key? key assert_equal val, doc.attributes[key] end end test 'backend attributes are updated if backend attribute is defined in document and safe mode is less than SERVER' do input = <<-EOS = Document Title Author Name :backend: docbook :doctype: book content EOS doc = document_from_string input, :safe => Asciidoctor::SafeMode::SAFE expect = { 'backend' => 'docbook5', 'backend-docbook5' => '', 'backend-docbook5-doctype-book' => '', 'outfilesuffix' => '.xml', 'basebackend' => 'docbook', 'basebackend-docbook' => '', 'basebackend-docbook-doctype-book' => '', 'doctype' => 'book', 'doctype-book' => '', 'filetype' => 'xml', 'filetype-xml' => '' } expect.each do |key, val| assert doc.attributes.key?(key) assert_equal val, doc.attributes[key] end assert !doc.attributes.key?('backend-html5') assert !doc.attributes.key?('backend-html5-doctype-article') assert !doc.attributes.key?('basebackend-html') assert !doc.attributes.key?('basebackend-html-doctype-article') assert !doc.attributes.key?('doctype-article') assert !doc.attributes.key?('filetype-html') end test 'backend attributes defined in document options overrides backend attribute in document' do doc = document_from_string(':backend: docbook45', :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'backend' => 'html5'}) assert_equal 'html5', doc.attributes['backend'] assert doc.attributes.has_key? 'backend-html5' assert_equal 'html', doc.attributes['basebackend'] assert doc.attributes.has_key? 'basebackend-html' end test 'set_attr should not overwrite existing key if overwrite is false' do node = Asciidoctor::Block.new nil, :paragraph, :attributes => { 'foo' => 'bar' } assert_equal 'bar', (node.attr 'foo') node.set_attr 'foo', 'baz', false assert_equal 'bar', (node.attr 'foo') end test 'set_attr should overwrite existing key by default' do node = Asciidoctor::Block.new nil, :paragraph, :attributes => { 'foo' => 'bar' } assert_equal 'bar', (node.attr 'foo') node.set_attr 'foo', 'baz' assert_equal 'baz', (node.attr 'foo') end test 'set_attr should set header attribute in loaded document' do input = <<-EOS :uri: http://example.org {uri} EOS doc = Asciidoctor.load input, :attributes => { 'uri' => 'https://github.com' } doc.set_attr 'uri', 'https://google.com' output = doc.convert assert_xpath '//a[@href="https://google.com"]', output, 1 end test 'verify toc attribute matrix' do expected_data = <<-EOS #attributes |toc|toc-position|toc-placement|toc-class toc | |nil |auto |nil toc=header | |nil |auto |nil toc=beeboo | |nil |auto |nil toc=left | |left |auto |toc2 toc2 | |left |auto |toc2 toc=right | |right |auto |toc2 toc=preamble | |content |preamble |nil toc=macro | |content |macro |nil toc toc-placement=macro toc-position=left | |content |macro |nil toc toc-placement! | |content |macro |nil EOS expected = expected_data.strip.lines.map {|l| next if l.start_with? '#' l.split('|').map {|e| (e = e.strip) == 'nil' ? nil : e } }.compact expected.each do |expect| raw_attrs, toc, toc_position, toc_placement, toc_class = expect attrs = Hash[*(raw_attrs.split ' ').map {|e| e.include?('=') ? e.split('=') : [e, ''] }.flatten] doc = document_from_string '', :attributes => attrs toc ? (assert doc.attr?('toc', toc)) : (assert !doc.attr?('toc')) toc_position ? (assert doc.attr?('toc-position', toc_position)) : (assert !doc.attr?('toc-position')) toc_placement ? (assert doc.attr?('toc-placement', toc_placement)) : (assert !doc.attr?('toc-placement')) toc_class ? (assert doc.attr?('toc-class', toc_class)) : (assert !doc.attr?('toc-class')) end end end context 'Interpolation' do test "render properly with simple names" do html = render_string(":frog: Tanglefoot\n:my_super-hero: Spiderman\n\nYo, {frog}!\nBeat {my_super-hero}!") result = Nokogiri::HTML(html) assert_equal "Yo, Tanglefoot!\nBeat Spiderman!", result.css("p").first.content.strip end test 'attribute lookup is not case sensitive' do input = <<-EOS :He-Man: The most powerful man in the universe He-Man: {He-Man} She-Ra: {She-Ra} EOS result = render_embedded_string input, :attributes => {'She-Ra' => 'The Princess of Power'} assert_xpath '//p[text()="He-Man: The most powerful man in the universe"]', result, 1 assert_xpath '//p[text()="She-Ra: The Princess of Power"]', result, 1 end test "render properly with single character name" do html = render_string(":r: Ruby\n\nR is for {r}!") result = Nokogiri::HTML(html) assert_equal 'R is for Ruby!', result.css("p").first.content.strip end test "collapses spaces in attribute names" do input = <<-EOS Main Header =========== :My frog: Tanglefoot Yo, {myfrog}! EOS output = render_string input assert_xpath '(//p)[1][text()="Yo, Tanglefoot!"]', output, 1 end test "ignores lines with bad attributes if attribute-missing is drop-line" do input = <<-EOS :attribute-missing: drop-line This is blah blah {foobarbaz} all there is. EOS html = render_embedded_string input result = Nokogiri::HTML(html) refute_match(/blah blah/m, result.css("p").first.content.strip) end test "attribute value gets interpretted when rendering" do doc = document_from_string(":google: http://google.com[Google]\n\n{google}") assert_equal 'http://google.com[Google]', doc.attributes['google'] output = doc.render assert_xpath '//a[@href="http://google.com"][text() = "Google"]', output, 1 end test 'should drop line with reference to missing attribute if attribute-missing attribute is drop-line' do input = <<-EOS :attribute-missing: drop-line Line 1: This line should appear in the output. Line 2: Oh no, a {bogus-attribute}! This line should not appear in the output. EOS output = render_embedded_string input assert_match(/Line 1/, output) refute_match(/Line 2/, output) end test 'should not drop line with reference to missing attribute by default' do input = <<-EOS Line 1: This line should appear in the output. Line 2: A {bogus-attribute}! This time, this line should appear in the output. EOS output = render_embedded_string input assert_match(/Line 1/, output) assert_match(/Line 2/, output) assert_match(/\{bogus-attribute\}/, output) end test 'should drop line with attribute unassignment by default' do input = <<-EOS :a: Line 1: This line should appear in the output. Line 2: {set:a!}This line should not appear in the output. EOS output = render_embedded_string input assert_match(/Line 1/, output) refute_match(/Line 2/, output) end test 'should not drop line with attribute unassignment if attribute-undefined is drop' do input = <<-EOS :attribute-undefined: drop :a: Line 1: This line should appear in the output. Line 2: {set:a!}This line should not appear in the output. EOS output = render_embedded_string input assert_match(/Line 1/, output) assert_match(/Line 2/, output) refute_match(/\{set:a!\}/, output) end test "substitutes inside unordered list items" do html = render_string(":foo: bar\n* snort at the {foo}\n* yawn") result = Nokogiri::HTML(html) assert_match(/snort at the bar/, result.css("li").first.content.strip) end test 'substitutes inside section title' do output = render_string(":prefix: Cool\n\n== {prefix} Title\n\ncontent") result = Nokogiri::HTML(output) assert_match(/Cool Title/, result.css('h2').first.content) assert_match(/_cool_title/, result.css('h2').first.attr('id')) end test 'interpolates attribute defined in header inside attribute entry in header' do input = <<-EOS = Title Author Name :attribute-a: value :attribute-b: {attribute-a} preamble EOS doc = document_from_string(input, :parse_header_only => true) assert_equal 'value', doc.attributes['attribute-b'] end test 'interpolates author attribute inside attribute entry in header' do input = <<-EOS = Title Author Name :name: {author} preamble EOS doc = document_from_string(input, :parse_header_only => true) assert_equal 'Author Name', doc.attributes['name'] end test 'interpolates revinfo attribute inside attribute entry in header' do input = <<-EOS = Title Author Name 2013-01-01 :date: {revdate} preamble EOS doc = document_from_string(input, :parse_header_only => true) assert_equal '2013-01-01', doc.attributes['date'] end test 'attribute entries can resolve previously defined attributes' do input = <<-EOS = Title Author Name v1.0, 2010-01-01: First release! :a: value :a2: {a} :revdate2: {revdate} {a} == {a2} {revdate} == {revdate2} EOS doc = document_from_string input assert_equal '2010-01-01', doc.attr('revdate') assert_equal '2010-01-01', doc.attr('revdate2') assert_equal 'value', doc.attr('a') assert_equal 'value', doc.attr('a2') output = doc.render assert output.include?('value == value') assert output.include?('2010-01-01 == 2010-01-01') end test 'substitutes inside block title' do input = <<-EOS :gem_name: asciidoctor .Require the +{gem_name}+ gem To use {gem_name}, the first thing to do is to import it in your Ruby source file. EOS output = render_embedded_string input, :attributes => {'compat-mode' => ''} assert_xpath '//*[@class="title"]/code[text()="asciidoctor"]', output, 1 input = <<-EOS :gem_name: asciidoctor .Require the `{gem_name}` gem To use {gem_name}, the first thing to do is to import it in your Ruby source file. EOS output = render_embedded_string input assert_xpath '//*[@class="title"]/code[text()="asciidoctor"]', output, 1 end test 'renders attribute until it is deleted' do input = <<-EOS :foo: bar Crossing the {foo}. :foo!: Belly up to the {foo}. EOS output = render_embedded_string input assert_xpath '//p[text()="Crossing the bar."]', output, 1 assert_xpath '//p[text()="Belly up to the bar."]', output, 0 end test 'should allow compat-mode to be set and unset in middle of document' do input = <<-EOS :foo: bar [[paragraph-a]] `{foo}` :compat-mode!: [[paragraph-b]] `{foo}` :compat-mode: [[paragraph-c]] `{foo}` EOS result = render_embedded_string input, :attributes => {'compat-mode' => '@'} assert_xpath '/*[@id="paragraph-a"]//code[text()="{foo}"]', result, 1 assert_xpath '/*[@id="paragraph-b"]//code[text()="bar"]', result, 1 assert_xpath '/*[@id="paragraph-c"]//code[text()="{foo}"]', result, 1 end test 'does not disturb attribute-looking things escaped with backslash' do html = render_string(":foo: bar\nThis is a \\{foo} day.") result = Nokogiri::HTML(html) assert_equal 'This is a {foo} day.', result.css('p').first.content.strip end test 'does not disturb attribute-looking things escaped with literals' do html = render_string(":foo: bar\nThis is a +++{foo}+++ day.") result = Nokogiri::HTML(html) assert_equal 'This is a {foo} day.', result.css('p').first.content.strip end test 'does not substitute attributes inside listing blocks' do input = <<-EOS :forecast: snow ---- puts 'The forecast for today is {forecast}' ---- EOS output = render_string(input) assert_match(/\{forecast\}/, output) end test 'does not substitute attributes inside literal blocks' do input = <<-EOS :foo: bar .... You insert the text {foo} to expand the value of the attribute named foo in your document. .... EOS output = render_string(input) assert_match(/\{foo\}/, output) end test 'does not show docdir and shows relative docfile if safe mode is SERVER or greater' do input = <<-EOS * docdir: {docdir} * docfile: {docfile} EOS docdir = Dir.pwd docfile = File.join(docdir, 'sample.asciidoc') output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docdir' => docdir, 'docfile' => docfile} assert_xpath '//li[1]/p[text()="docdir: "]', output, 1 assert_xpath '//li[2]/p[text()="docfile: sample.asciidoc"]', output, 1 end test 'shows absolute docdir and docfile paths if safe mode is less than SERVER' do input = <<-EOS * docdir: {docdir} * docfile: {docfile} EOS docdir = Dir.pwd docfile = File.join(docdir, 'sample.asciidoc') output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'docdir' => docdir, 'docfile' => docfile} assert_xpath %(//li[1]/p[text()="docdir: #{docdir}"]), output, 1 assert_xpath %(//li[2]/p[text()="docfile: #{docfile}"]), output, 1 end test 'assigns attribute defined in attribute reference with set prefix and value' do input = '{set:foo:bar}{foo}' output = render_embedded_string input assert_xpath '//p', output, 1 assert_xpath '//p[text()="bar"]', output, 1 end test 'assigns attribute defined in attribute reference with set prefix and no value' do input = "{set:foo}\n{foo}yes" output = render_embedded_string input assert_xpath '//p', output, 1 assert_xpath '//p[normalize-space(text())="yes"]', output, 1 end test 'assigns attribute defined in attribute reference with set prefix and empty value' do input = "{set:foo:}\n{foo}yes" output = render_embedded_string input assert_xpath '//p', output, 1 assert_xpath '//p[normalize-space(text())="yes"]', output, 1 end test 'unassigns attribute defined in attribute reference with set prefix' do input = <<-EOS :attribute-missing: drop-line :foo: {set:foo!} {foo}yes EOS output = render_embedded_string input assert_xpath '//p', output, 1 assert_xpath '//p/child::text()', output, 0 end end context "Intrinsic attributes" do test "substitute intrinsics" do Asciidoctor::INTRINSIC_ATTRIBUTES.each_pair do |key, value| html = render_string("Look, a {#{key}} is here") # can't use Nokogiri because it interprets the HTML entities and we can't match them assert_match(/Look, a #{Regexp.escape(value)} is here/, html) end end test "don't escape intrinsic substitutions" do html = render_string('happy{nbsp}together') assert_match(/happy together/, html) end test "escape special characters" do html = render_string('&') assert_match(/<node>&<\/node>/, html) end test 'creates counter' do input = <<-EOS {counter:mycounter} EOS doc = document_from_string input output = doc.render assert_equal 1, doc.attributes['mycounter'] assert_xpath '//p[text()="1"]', output, 1 end test 'creates counter silently' do input = <<-EOS {counter2:mycounter} EOS doc = document_from_string input output = doc.render assert_equal 1, doc.attributes['mycounter'] assert_xpath '//p[text()="1"]', output, 0 end test 'creates counter with numeric seed value' do input = <<-EOS {counter2:mycounter:10} EOS doc = document_from_string input doc.render assert_equal 10, doc.attributes['mycounter'] end test 'creates counter with character seed value' do input = <<-EOS {counter2:mycounter:A} EOS doc = document_from_string input doc.render assert_equal 'A', doc.attributes['mycounter'] end test 'increments counter with numeric value' do input = <<-EOS :mycounter: 1 {counter:mycounter} {mycounter} EOS doc = document_from_string input output = doc.render assert_equal 2, doc.attributes['mycounter'] assert_xpath '//p[text()="2"]', output, 2 end test 'increments counter with character value' do input = <<-EOS :mycounter: @ {counter:mycounter} {mycounter} EOS doc = document_from_string input output = doc.render assert_equal 'A', doc.attributes['mycounter'] assert_xpath '//p[text()="A"]', output, 2 end test 'counter uses 0 as seed value if seed attribute is nil' do input = <<-EOS :mycounter: {counter:mycounter} {mycounter} EOS doc = document_from_string input output = doc.render :header_footer => false assert_equal 1, doc.attributes['mycounter'] assert_xpath '//p[text()="1"]', output, 2 end test 'counter value can be reset by attribute entry' do input = <<-EOS :mycounter: before: {counter:mycounter} {counter:mycounter} {counter:mycounter} :mycounter!: after: {counter:mycounter} EOS doc = document_from_string input output = doc.render :header_footer => false assert_equal 1, doc.attributes['mycounter'] assert_xpath '//p[text()="before: 1 2 3"]', output, 1 assert_xpath '//p[text()="after: 1"]', output, 1 end end context 'Block attributes' do test 'parses attribute names as name token' do input = <<-EOS [normal,foo="bar",_foo="_bar",foo1="bar1",foo-foo="bar-bar",foo.foo="bar.bar"] content EOS block = block_from_string input assert_equal 'bar', block.attr('foo') assert_equal '_bar', block.attr('_foo') assert_equal 'bar1', block.attr('foo1') assert_equal 'bar-bar', block.attr('foo-foo') assert_equal 'bar.bar', block.attr('foo.foo') end test 'positional attributes assigned to block' do input = <<-EOS [quote, author, source] ____ A famous quote. ____ EOS doc = document_from_string(input) qb = doc.blocks.first assert_equal 'quote', qb.style assert_equal 'author', qb.attr('attribution') assert_equal 'author', qb.attr(:attribution) assert_equal 'author', qb.attributes['attribution'] assert_equal 'source', qb.attributes['citetitle'] end test 'normal substitutions are performed on single-quoted positional attribute' do input = <<-EOS [quote, author, 'http://wikipedia.org[source]'] ____ A famous quote. ____ EOS doc = document_from_string(input) qb = doc.blocks.first assert_equal 'quote', qb.style assert_equal 'author', qb.attr('attribution') assert_equal 'author', qb.attr(:attribution) assert_equal 'author', qb.attributes['attribution'] assert_equal 'source', qb.attributes['citetitle'] end test 'normal substitutions are performed on single-quoted named attribute' do input = <<-EOS [quote, author, citetitle='http://wikipedia.org[source]'] ____ A famous quote. ____ EOS doc = document_from_string(input) qb = doc.blocks.first assert_equal 'quote', qb.style assert_equal 'author', qb.attr('attribution') assert_equal 'author', qb.attr(:attribution) assert_equal 'author', qb.attributes['attribution'] assert_equal 'source', qb.attributes['citetitle'] end test 'normal substitutions are performed once on single-quoted named title attribute' do input = <<-EOS [title='*title*'] content EOS output = render_embedded_string input assert_xpath '//*[@class="title"]/strong[text()="title"]', output, 1 end test 'attribute list may begin with space' do input = <<-EOS [ quote] ____ A famous quote. ____ EOS doc = document_from_string input qb = doc.blocks.first assert_equal 'quote', qb.style end test 'attribute list may begin with comma' do input = <<-EOS [, author, source] ____ A famous quote. ____ EOS doc = document_from_string input qb = doc.blocks.first assert_equal 'quote', qb.style assert_equal 'author', qb.attributes['attribution'] assert_equal 'source', qb.attributes['citetitle'] end test 'first attribute in list may be double quoted' do input = <<-EOS ["quote", "author", "source", role="famous"] ____ A famous quote. ____ EOS doc = document_from_string input qb = doc.blocks.first assert_equal 'quote', qb.style assert_equal 'author', qb.attributes['attribution'] assert_equal 'source', qb.attributes['citetitle'] assert_equal 'famous', qb.attributes['role'] end test 'first attribute in list may be single quoted' do input = <<-EOS ['quote', 'author', 'source', role='famous'] ____ A famous quote. ____ EOS doc = document_from_string input qb = doc.blocks.first assert_equal 'quote', qb.style assert_equal 'author', qb.attributes['attribution'] assert_equal 'source', qb.attributes['citetitle'] assert_equal 'famous', qb.attributes['role'] end test 'attribute with value None without quotes is ignored' do input = <<-EOS [id=None] paragraph EOS doc = document_from_string input para = doc.blocks.first assert !para.attributes.has_key?('id') end test 'role? returns true if role is assigned' do input = <<-EOS [role="lead"] A paragraph EOS doc = document_from_string input p = doc.blocks.first assert p.role? end test 'role? can check for exact role name match' do input = <<-EOS [role="lead"] A paragraph EOS doc = document_from_string input p = doc.blocks.first assert p.role?('lead') p2 = doc.blocks.last assert !p2.role?('final') end test 'has_role? can check for precense of role name' do input = <<-EOS [role="lead abstract"] A paragraph EOS doc = document_from_string input p = doc.blocks.first assert !p.role?('lead') assert p.has_role?('lead') end test 'roles returns array of role names' do input = <<-EOS [role="story lead"] A paragraph EOS doc = document_from_string input p = doc.blocks.first assert_equal ['story', 'lead'], p.roles end test 'roles returns empty array if role attribute is not set' do input = <<-EOS A paragraph EOS doc = document_from_string input p = doc.blocks.first assert_equal [], p.roles end test "Attribute substitutions are performed on attribute list before parsing attributes" do input = <<-EOS :lead: role="lead" [{lead}] A paragraph EOS doc = document_from_string(input) para = doc.blocks.first assert_equal 'lead', para.attributes['role'] end test 'id, role and options attributes can be specified on block style using shorthand syntax' do input = <<-EOS [normal#first.lead%step] A normal paragraph. EOS doc = document_from_string(input) para = doc.blocks.first assert_equal 'first', para.attributes['id'] assert_equal 'lead', para.attributes['role'] assert_equal 'step', para.attributes['options'] assert para.attributes.has_key?('step-option') end test 'multiple roles and options can be specified in block style using shorthand syntax' do input = <<-EOS [.role1%option1.role2%option2] Text EOS doc = document_from_string input para = doc.blocks.first assert_equal 'role1 role2', para.attributes['role'] assert_equal 'option1,option2', para.attributes['options'] assert para.attributes.has_key?('option1-option') assert para.attributes.has_key?('option2-option') end test 'a role can be added using add_role when the node has no roles' do input = <<-EOS A normal paragraph EOS doc = document_from_string(input) para = doc.blocks.first para.add_role 'role1' assert_equal 'role1', para.attributes['role'] assert para.has_role? 'role1' end test 'a role can be added using add_role when the node already has a role' do input = <<-EOS [.role1] A normal paragraph EOS doc = document_from_string(input) para = doc.blocks.first para.add_role 'role2' assert_equal 'role1 role2', para.attributes['role'] assert para.has_role? 'role1' assert para.has_role? 'role2' end test 'a role is not added using add_role if the node already has that role' do input = <<-EOS [.role1] A normal paragraph EOS doc = document_from_string(input) para = doc.blocks.first para.add_role 'role1' assert_equal 'role1', para.attributes['role'] assert para.has_role? 'role1' end test 'an existing role can be removed using remove_role' do input = <<-EOS [.role1.role2] A normal paragraph EOS doc = document_from_string(input) para = doc.blocks.first para.remove_role 'role1' assert_equal 'role2', para.attributes['role'] assert para.has_role? 'role2' assert !para.has_role?('role1') end test 'roles are not changed when a non-existent role is removed using remove_role' do input = <<-EOS [.role1] A normal paragraph EOS doc = document_from_string(input) para = doc.blocks.first para.remove_role 'role2' assert_equal 'role1', para.attributes['role'] assert para.has_role? 'role1' assert !para.has_role?('role2') end test 'roles are not changed when using remove_role if the node has no roles' do input = <<-EOS A normal paragraph EOS doc = document_from_string(input) para = doc.blocks.first para.remove_role 'role1' assert_equal nil, para.attributes['role'] assert !para.has_role?('role1') end test 'option can be specified in first position of block style using shorthand syntax' do input = <<-EOS [%interactive] - [x] checked EOS doc = document_from_string input list = doc.blocks.first assert_equal 'interactive', list.attributes['options'] assert list.attributes.has_key?('interactive-option') assert list.attributes[1] == '%interactive' end test 'id and role attributes can be specified on section style using shorthand syntax' do input = <<-EOS [dedication#dedication.small] == Section Content. EOS output = render_embedded_string input assert_xpath '/div[@class="sect1 small"]', output, 1 assert_xpath '/div[@class="sect1 small"]/h2[@id="dedication"]', output, 1 end test 'id attribute specified using shorthand syntax should not create a special section' do input = <<-EOS [#idname] == Section content EOS doc = document_from_string input, :backend => 'docbook45' section = doc.blocks[0] refute_nil section assert_equal :section, section.context assert !section.special output = doc.convert assert_css 'section', output, 1 assert_css 'section#idname', output, 1 end test "Block attributes are additive" do input = <<-EOS [id='foo'] [role='lead'] A paragraph. EOS doc = document_from_string(input) para = doc.blocks.first assert_equal 'foo', para.id assert_equal 'lead', para.attributes['role'] end test "Last wins for id attribute" do input = <<-EOS [[bar]] [[foo]] == Section paragraph [[baz]] [id='coolio'] === Section EOS doc = document_from_string(input) sec = doc.first_section assert_equal 'foo', sec.id subsec = sec.blocks.last assert_equal 'coolio', subsec.id end test "trailing block attributes tranfer to the following section" do input = <<-EOS [[one]] == Section One paragraph [[sub]] // try to mess this up! === Sub-section paragraph [role='classy'] //// block comment //// == Section Two content EOS doc = document_from_string(input) section_one = doc.blocks.first assert_equal 'one', section_one.id subsection = section_one.blocks.last assert_equal 'sub', subsection.id section_two = doc.blocks.last assert_equal 'classy', section_two.attr(:role) end end end asciidoctor-1.5.5/test/blocks_test.rb000066400000000000000000002541041277513741400176660ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end context "Blocks" do context 'Line Breaks' do test "ruler" do output = render_string("'''") assert_xpath '//*[@id="content"]/hr', output, 1 assert_xpath '//*[@id="content"]/*', output, 1 end test "ruler between blocks" do output = render_string("Block above\n\n'''\n\nBlock below") assert_xpath '//*[@id="content"]/hr', output, 1 assert_xpath '//*[@id="content"]/hr/preceding-sibling::*', output, 1 assert_xpath '//*[@id="content"]/hr/following-sibling::*', output, 1 end test "page break" do output = render_embedded_string("page 1\n\n<<<\n\npage 2") assert_xpath '/*[translate(@style, ";", "")="page-break-after: always"]', output, 1 assert_xpath '/*[translate(@style, ";", "")="page-break-after: always"]/preceding-sibling::div/p[text()="page 1"]', output, 1 assert_xpath '/*[translate(@style, ";", "")="page-break-after: always"]/following-sibling::div/p[text()="page 2"]', output, 1 end end context 'Comments' do test 'line comment between paragraphs offset by blank lines' do input = <<-EOS first paragraph // line comment second paragraph EOS output = render_embedded_string input refute_match(/line comment/, output) assert_xpath '//p', output, 2 end test 'adjacent line comment between paragraphs' do input = <<-EOS first line // line comment second line EOS output = render_embedded_string input refute_match(/line comment/, output) assert_xpath '//p', output, 1 assert_xpath "//p[1][text()='first line\nsecond line']", output, 1 end test 'comment block between paragraphs offset by blank lines' do input = <<-EOS first paragraph //// block comment //// second paragraph EOS output = render_embedded_string input refute_match(/block comment/, output) assert_xpath '//p', output, 2 end test 'adjacent comment block between paragraphs' do input = <<-EOS first paragraph //// block comment //// second paragraph EOS output = render_embedded_string input refute_match(/block comment/, output) assert_xpath '//p', output, 2 end test "can render with block comment at end of document with trailing endlines" do input = <<-EOS paragraph //// block comment //// EOS output = render_embedded_string input refute_match(/block comment/, output) end test "trailing endlines after block comment at end of document does not create paragraph" do input = <<-EOS paragraph //// block comment //// EOS d = document_from_string input assert_equal 1, d.blocks.size assert_xpath '//p', d.render, 1 end test 'line starting with three slashes should not be line comment' do input = <<-EOS /// not a line comment EOS output = render_embedded_string input assert !output.strip.empty?, "Line should be emitted => #{input.rstrip}" end test 'preprocessor directives should not be processed within comment block within block metadata' do input = <<-EOS .sample title //// ifdef::asciidoctor[////] //// line should be rendered EOS output = render_embedded_string input assert_xpath '//p[text() = "line should be rendered"]', output, 1 end test 'preprocessor directives should not be processed within comment block' do input = <<-EOS dummy line //// ifdef::asciidoctor[////] //// line should be rendered EOS output = render_embedded_string input assert_xpath '//p[text() = "line should be rendered"]', output, 1 end # WARNING if first line of content is a directive, it will get interpretted before we know it's a comment block # it happens because we always look a line ahead...not sure what we can do about it test 'preprocessor directives should not be processed within comment open block' do input = <<-EOS [comment] -- first line of comment ifdef::asciidoctor[--] line should not be rendered -- EOS output = render_embedded_string input assert_xpath '//p', output, 0 end # WARNING if first line of content is a directive, it will get interpretted before we know it's a comment block # it happens because we always look a line ahead...not sure what we can do about it test 'preprocessor directives should not be processed within comment paragraph' do input = <<-EOS [comment] first line of content ifdef::asciidoctor[////] this line should be rendered EOS output = render_embedded_string input assert_xpath '//p[text() = "this line should be rendered"]', output, 1 end test 'comment style on open block should only skip block' do input = <<-EOS [comment] -- skip this block -- not this text EOS result = render_embedded_string input assert_xpath '//p', result, 1 assert_xpath '//p[text()="not this text"]', result, 1 end test 'comment style on paragraph should only skip paragraph' do input = <<-EOS [comment] skip this paragraph not this text EOS result = render_embedded_string input assert_xpath '//p', result, 1 assert_xpath '//p[text()="not this text"]', result, 1 end test 'comment style on paragraph should not cause adjacent block to be skipped' do input = <<-EOS [comment] skip this paragraph [example] not this text EOS result = render_embedded_string input assert_xpath '/*[@class="exampleblock"]', result, 1 assert_xpath '/*[@class="exampleblock"]//*[normalize-space(text())="not this text"]', result, 1 end end context 'Quote and Verse Blocks' do test 'quote block with no attribution' do input = <<-EOS ____ A famous quote. ____ EOS output = render_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_css '.quoteblock > .attribution', output, 0 assert_xpath '//*[@class = "quoteblock"]//p[text() = "A famous quote."]', output, 1 end test 'quote block with attribution' do input = <<-EOS [quote, Famous Person, Famous Book (1999)] ____ A famous quote. ____ EOS output = render_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_css '.quoteblock > .attribution', output, 1 assert_css '.quoteblock > .attribution > cite', output, 1 assert_css '.quoteblock > .attribution > br + cite', output, 1 assert_xpath '//*[@class = "quoteblock"]/*[@class = "attribution"]/cite[text() = "Famous Book (1999)"]', output, 1 attribution = xmlnodes_at_xpath '//*[@class = "quoteblock"]/*[@class = "attribution"]', output, 1 author = attribution.children.first assert_equal "#{expand_entity 8212} Famous Person", author.text.strip end test 'quote block with attribute and id and role shorthand' do input = <<-EOS [quote#think.big, Donald Trump] ____ As long as your going to be thinking anyway, think big. ____ EOS output = render_embedded_string input assert_css '.quoteblock', output, 1 assert_css '#think.quoteblock.big', output, 1 assert_css '.quoteblock > .attribution', output, 1 end test 'quote block with complex content' do input = <<-EOS ____ A famous quote. NOTE: _That_ was inspiring. ____ EOS output = render_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph', output, 1 assert_css '.quoteblock > blockquote > .paragraph + .admonitionblock', output, 1 end test 'quote block using air quotes with no attribution' do input = <<-EOS "" A famous quote. "" EOS output = render_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_css '.quoteblock > .attribution', output, 0 assert_xpath '//*[@class = "quoteblock"]//p[text() = "A famous quote."]', output, 1 end test 'markdown-style quote block with single paragraph and no attribution' do input = <<-EOS > A famous quote. > Some more inspiring words. EOS output = render_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_css '.quoteblock > .attribution', output, 0 assert_xpath %(//*[@class = "quoteblock"]//p[text() = "A famous quote.\nSome more inspiring words."]), output, 1 end test 'lazy markdown-style quote block with single paragraph and no attribution' do input = <<-EOS > A famous quote. Some more inspiring words. EOS output = render_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_css '.quoteblock > .attribution', output, 0 assert_xpath %(//*[@class = "quoteblock"]//p[text() = "A famous quote.\nSome more inspiring words."]), output, 1 end test 'markdown-style quote block with multiple paragraphs and no attribution' do input = <<-EOS > A famous quote. > > Some more inspiring words. EOS output = render_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 2 assert_css '.quoteblock > .attribution', output, 0 assert_xpath %((//*[@class = "quoteblock"]//p)[1][text() = "A famous quote."]), output, 1 assert_xpath %((//*[@class = "quoteblock"]//p)[2][text() = "Some more inspiring words."]), output, 1 end test 'markdown-style quote block with multiple blocks and no attribution' do input = <<-EOS > A famous quote. > > NOTE: Some more inspiring words. EOS output = render_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_css '.quoteblock > blockquote > .admonitionblock', output, 1 assert_css '.quoteblock > .attribution', output, 0 assert_xpath %((//*[@class = "quoteblock"]//p)[1][text() = "A famous quote."]), output, 1 assert_xpath %((//*[@class = "quoteblock"]//*[@class = "admonitionblock note"]//*[@class="content"])[1][normalize-space(text()) = "Some more inspiring words."]), output, 1 end test 'markdown-style quote block with single paragraph and attribution' do input = <<-EOS > A famous quote. > Some more inspiring words. > -- Famous Person, Famous Source, Volume 1 (1999) EOS output = render_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph > p', output, 1 assert_xpath %(//*[@class = "quoteblock"]//p[text() = "A famous quote.\nSome more inspiring words."]), output, 1 assert_css '.quoteblock > .attribution', output, 1 assert_css '.quoteblock > .attribution > cite', output, 1 assert_css '.quoteblock > .attribution > br + cite', output, 1 assert_xpath '//*[@class = "quoteblock"]/*[@class = "attribution"]/cite[text() = "Famous Source, Volume 1 (1999)"]', output, 1 attribution = xmlnodes_at_xpath '//*[@class = "quoteblock"]/*[@class = "attribution"]', output, 1 author = attribution.children.first assert_equal "#{expand_entity 8212} Famous Person", author.text.strip end test 'quoted paragraph-style quote block with attribution' do input = <<-EOS "A famous quote. Some more inspiring words." -- Famous Person, Famous Source, Volume 1 (1999) EOS output = render_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_xpath %(//*[@class = "quoteblock"]/blockquote[normalize-space(text()) = "A famous quote. Some more inspiring words."]), output, 1 assert_css '.quoteblock > .attribution', output, 1 assert_css '.quoteblock > .attribution > cite', output, 1 assert_css '.quoteblock > .attribution > br + cite', output, 1 assert_xpath '//*[@class = "quoteblock"]/*[@class = "attribution"]/cite[text() = "Famous Source, Volume 1 (1999)"]', output, 1 attribution = xmlnodes_at_xpath '//*[@class = "quoteblock"]/*[@class = "attribution"]', output, 1 author = attribution.children.first assert_equal "#{expand_entity 8212} Famous Person", author.text.strip end test 'single-line verse block without attribution' do input = <<-EOS [verse] ____ A famous verse. ____ EOS output = render_string input assert_css '.verseblock', output, 1 assert_css '.verseblock > pre', output, 1 assert_css '.verseblock > .attribution', output, 0 assert_css '.verseblock p', output, 0 assert_xpath '//*[@class = "verseblock"]/pre[normalize-space(text()) = "A famous verse."]', output, 1 end test 'single-line verse block with attribution' do input = <<-EOS [verse, Famous Poet, Famous Poem] ____ A famous verse. ____ EOS output = render_string input assert_css '.verseblock', output, 1 assert_css '.verseblock p', output, 0 assert_css '.verseblock > pre', output, 1 assert_css '.verseblock > .attribution', output, 1 assert_css '.verseblock > .attribution > cite', output, 1 assert_css '.verseblock > .attribution > br + cite', output, 1 assert_xpath '//*[@class = "verseblock"]/*[@class = "attribution"]/cite[text() = "Famous Poem"]', output, 1 attribution = xmlnodes_at_xpath '//*[@class = "verseblock"]/*[@class = "attribution"]', output, 1 author = attribution.children.first assert_equal "#{expand_entity 8212} Famous Poet", author.text.strip end test 'multi-stanza verse block' do input = <<-EOS [verse] ____ A famous verse. Stanza two. ____ EOS output = render_string input assert_xpath '//*[@class = "verseblock"]', output, 1 assert_xpath '//*[@class = "verseblock"]/pre', output, 1 assert_xpath '//*[@class = "verseblock"]//p', output, 0 assert_xpath '//*[@class = "verseblock"]/pre[contains(text(), "A famous verse.")]', output, 1 assert_xpath '//*[@class = "verseblock"]/pre[contains(text(), "Stanza two.")]', output, 1 end test 'verse block does not contain block elements' do input = <<-EOS [verse] ____ A famous verse. .... not a literal .... ____ EOS output = render_string input assert_css '.verseblock', output, 1 assert_css '.verseblock > pre', output, 1 assert_css '.verseblock p', output, 0 assert_css '.verseblock .literalblock', output, 0 end test 'verse should have normal subs' do input = <<-EOS [verse] ____ A famous verse ____ EOS verse = block_from_string input assert_equal Asciidoctor::Substitutors::SUBS[:normal], verse.subs end test 'should not recognize callouts in a verse' do input = <<-EOS [verse] ____ La la la <1> ____ <1> Not pointing to a callout EOS output = render_embedded_string input assert_xpath '//pre[text()="La la la <1>"]', output, 1 end test 'should perform normal subs on a verse block' do input = <<-EOS [verse] ____ _GET /groups/link:#group-id[\{group-id\}]_ ____ EOS output = render_embedded_string input assert output.include?('
    GET /groups/{group-id}
    ') end end context "Example Blocks" do test "can render example block" do input = <<-EOS ==== This is an example of an example block. How crazy is that? ==== EOS output = render_string input assert_xpath '//*[@class="exampleblock"]//p', output, 2 end test "assigns sequential numbered caption to example block with title" do input = <<-EOS .Writing Docs with AsciiDoc ==== Here's how you write AsciiDoc. You just write. ==== .Writing Docs with DocBook ==== Here's how you write DocBook. You futz with XML. ==== EOS doc = document_from_string input output = doc.render assert_xpath '(//*[@class="exampleblock"])[1]/*[@class="title"][text()="Example 1. Writing Docs with AsciiDoc"]', output, 1 assert_xpath '(//*[@class="exampleblock"])[2]/*[@class="title"][text()="Example 2. Writing Docs with DocBook"]', output, 1 assert_equal 2, doc.attributes['example-number'] end test "assigns sequential character caption to example block with title" do input = <<-EOS :example-number: @ .Writing Docs with AsciiDoc ==== Here's how you write AsciiDoc. You just write. ==== .Writing Docs with DocBook ==== Here's how you write DocBook. You futz with XML. ==== EOS doc = document_from_string input output = doc.render assert_xpath '(//*[@class="exampleblock"])[1]/*[@class="title"][text()="Example A. Writing Docs with AsciiDoc"]', output, 1 assert_xpath '(//*[@class="exampleblock"])[2]/*[@class="title"][text()="Example B. Writing Docs with DocBook"]', output, 1 assert_equal 'B', doc.attributes['example-number'] end test "explicit caption is used if provided" do input = <<-EOS [caption="Look! "] .Writing Docs with AsciiDoc ==== Here's how you write AsciiDoc. You just write. ==== EOS doc = document_from_string input output = doc.render assert_xpath '(//*[@class="exampleblock"])[1]/*[@class="title"][text()="Look! Writing Docs with AsciiDoc"]', output, 1 assert !doc.attributes.has_key?('example-number') end test 'explicit caption is set on block even if block has no title' do input = <<-EOS [caption="Look!"] ==== Just write. ==== EOS doc = document_from_string input assert_equal 'Look!', doc.blocks.first.caption output = doc.render refute_match(/Look/, output) end test 'automatic caption can be turned off and on and modified' do input = <<-EOS .first example ==== an example ==== :caption: .second example ==== another example ==== :caption!: :example-caption: Exhibit .third example ==== yet another example ==== EOS output = render_embedded_string input assert_xpath '/*[@class="exampleblock"]', output, 3 assert_xpath '(/*[@class="exampleblock"])[1]/*[@class="title"][starts-with(text(), "Example ")]', output, 1 assert_xpath '(/*[@class="exampleblock"])[2]/*[@class="title"][text()="second example"]', output, 1 assert_xpath '(/*[@class="exampleblock"])[3]/*[@class="title"][starts-with(text(), "Exhibit ")]', output, 1 end end context 'Admonition Blocks' do test 'caption block-level attribute should be used as caption' do input = <<-EOS :tip-caption: Pro Tip [caption="Pro Tip"] TIP: Override the caption of an admonition block using an attribute entry EOS output = render_embedded_string input assert_xpath '/*[@class="admonitionblock tip"]//*[@class="icon"]/*[@class="title"][text()="Pro Tip"]', output, 1 end test 'can override caption of admonition block using document attribute' do input = <<-EOS :tip-caption: Pro Tip TIP: Override the caption of an admonition block using an attribute entry EOS output = render_embedded_string input assert_xpath '/*[@class="admonitionblock tip"]//*[@class="icon"]/*[@class="title"][text()="Pro Tip"]', output, 1 end test 'blank caption document attribute should not blank admonition block caption' do input = <<-EOS :caption: TIP: Override the caption of an admonition block using an attribute entry EOS output = render_embedded_string input assert_xpath '/*[@class="admonitionblock tip"]//*[@class="icon"]/*[@class="title"][text()="Tip"]', output, 1 end end context "Preformatted Blocks" do test 'should separate adjacent paragraphs and listing into blocks' do input = <<-EOS paragraph 1 ---- listing content ---- paragraph 2 EOS output = render_embedded_string input assert_xpath '/*[@class="paragraph"]/p', output, 2 assert_xpath '/*[@class="listingblock"]', output, 1 assert_xpath '(/*[@class="paragraph"]/following-sibling::*)[1][@class="listingblock"]', output, 1 end test "should preserve endlines in literal block" do input = <<-EOS .... line one line two line three .... EOS [true, false].each {|header_footer| output = render_string input, :header_footer => header_footer assert_xpath '//pre', output, 1 assert_xpath '//pre/text()', output, 1 text = xmlnodes_at_xpath('//pre/text()', output, 1).text lines = text.lines.entries assert_equal 5, lines.size expected = "line one\n\nline two\n\nline three".lines.entries assert_equal expected, lines blank_lines = output.scan(/\n[ \t]*\n/).size assert blank_lines >= 2 } end test "should preserve endlines in listing block" do input = <<-EOS [source] ---- line one line two line three ---- EOS [true, false].each {|header_footer| output = render_string input, header_footer => header_footer assert_xpath '//pre/code', output, 1 assert_xpath '//pre/code/text()', output, 1 text = xmlnodes_at_xpath('//pre/code/text()', output, 1).text lines = text.lines.entries assert_equal 5, lines.size expected = "line one\n\nline two\n\nline three".lines.entries assert_equal expected, lines blank_lines = output.scan(/\n[ \t]*\n/).size assert blank_lines >= 2 } end test "should preserve endlines in verse block" do input = <<-EOS -- [verse] ____ line one line two line three ____ -- EOS [true, false].each {|header_footer| output = render_string input, :header_footer => header_footer assert_xpath '//*[@class="verseblock"]/pre', output, 1 assert_xpath '//*[@class="verseblock"]/pre/text()', output, 1 text = xmlnodes_at_xpath('//*[@class="verseblock"]/pre/text()', output, 1).text lines = text.lines.entries assert_equal 5, lines.size expected = "line one\n\nline two\n\nline three".lines.entries assert_equal expected, lines blank_lines = output.scan(/\n[ \t]*\n/).size assert blank_lines >= 2 } end test 'should strip leading and trailing blank lines when rendering verbatim block' do input = <<-EOS [subs="attributes"] .... first line last line {empty} .... EOS doc = document_from_string input, :header_footer => false block = doc.blocks.first assert_equal ['', '', ' first line', '', 'last line', '', '{empty}', ''], block.lines result = doc.render assert_xpath %(//pre[text()=" first line\n\nlast line"]), result, 1 end test 'should process block with CRLF endlines' do input = <<-EOS [source]\r ----\r source line 1\r source line 2\r ----\r EOS output = render_embedded_string input refute_match(/\[source\]/, output) assert_xpath '/*[@class="listingblock"]//pre', output, 1 assert_xpath '/*[@class="listingblock"]//pre/code', output, 1 assert_xpath %(/*[@class="listingblock"]//pre/code[text()="source line 1\nsource line 2"]), output, 1 end test 'should remove block indent if indent attribute is 0' do input = <<-EOS [indent="0"] ---- def names @names.split ' ' end ---- EOS expected = <<-EOS def names @names.split ' ' end EOS output = render_embedded_string input assert_css 'pre', output, 1 assert_css '.listingblock pre', output, 1 result = xmlnodes_at_xpath('//pre', output, 1).text assert_equal expected.chomp, result end test 'should not remove block indent if indent attribute is -1' do input = <<-EOS [indent="-1"] ---- def names @names.split ' ' end ---- EOS expected = <<-EOS def names @names.split ' ' end EOS output = render_embedded_string input assert_css 'pre', output, 1 assert_css '.listingblock pre', output, 1 result = xmlnodes_at_xpath('//pre', output, 1).text assert_equal expected.chomp, result end test 'should set block indent to value specified by indent attribute' do input = <<-EOS [indent="1"] ---- def names @names.split ' ' end ---- EOS expected = <<-EOS def names @names.split ' ' end EOS output = render_embedded_string input assert_css 'pre', output, 1 assert_css '.listingblock pre', output, 1 result = xmlnodes_at_xpath('//pre', output, 1).text assert_equal expected.chomp, result end test 'should set block indent to value specified by indent document attribute' do input = <<-EOS :source-indent: 1 [source,ruby] ---- def names @names.split ' ' end ---- EOS expected = <<-EOS def names @names.split ' ' end EOS output = render_embedded_string input assert_css 'pre', output, 1 assert_css '.listingblock pre', output, 1 result = xmlnodes_at_xpath('//pre', output, 1).text assert_equal expected.chomp, result end test 'should expand tabs if tabsize attribute is positive' do input = <<-EOS :tabsize: 4 [indent=0] ---- def names @names.split ' ' end ---- EOS expected = <<-EOS def names @names.split ' ' end EOS output = render_embedded_string input assert_css 'pre', output, 1 assert_css '.listingblock pre', output, 1 result = xmlnodes_at_xpath('//pre', output, 1).text assert_equal expected.chomp, result end test 'literal block should honor nowrap option' do input = <<-EOS [options="nowrap"] ---- Do not wrap me if I get too long. ---- EOS output = render_embedded_string input assert_css 'pre.nowrap', output, 1 end test 'literal block should set nowrap class if prewrap document attribute is disabled' do input = <<-EOS :prewrap!: ---- Do not wrap me if I get too long. ---- EOS output = render_embedded_string input assert_css 'pre.nowrap', output, 1 end test 'literal block should honor explicit subs list' do input = <<-EOS [subs="verbatim,quotes"] ---- Map *attributes*; //<1> ---- EOS block = block_from_string input assert_equal [:specialcharacters,:callouts,:quotes], block.subs output = block.render assert output.include?('Map<String, String> attributes;') assert_xpath '//pre/b[text()="(1)"]', output, 1 end test 'should be able to disable callouts for literal block' do input = <<-EOS [subs="specialcharacters"] ---- No callout here <1> ---- EOS block = block_from_string input assert_equal [:specialcharacters], block.subs output = block.render assert_xpath '//pre/b[text()="(1)"]', output, 0 end test 'listing block should honor explicit subs list' do input = <<-EOS [subs="specialcharacters,quotes"] ---- $ *python functional_tests.py* Traceback (most recent call last): File "functional_tests.py", line 4, in assert 'Django' in browser.title AssertionError ---- EOS output = render_embedded_string input assert_css '.listingblock pre', output, 1 assert_css '.listingblock pre strong', output, 1 assert_css '.listingblock pre em', output, 0 input2 = <<-EOS [subs="specialcharacters,macros"] ---- $ pass:quotes[*python functional_tests.py*] Traceback (most recent call last): File "functional_tests.py", line 4, in assert pass:quotes['Django'] in browser.title AssertionError ---- EOS output2 = render_embedded_string input2 # FIXME JRuby is adding extra trailing endlines in the second document, # for now, rstrip is necessary assert_equal output.rstrip, output2.rstrip end test 'listing block without title should generate screen element in docbook' do input = <<-EOS ---- listing block ---- EOS output = render_embedded_string input, :backend => 'docbook' assert_xpath '/screen[text()="listing block"]', output, 1 end test 'listing block with title should generate screen element inside formalpara element in docbook' do input = <<-EOS .title ---- listing block ---- EOS output = render_embedded_string input, :backend => 'docbook' assert_xpath '/formalpara', output, 1 assert_xpath '/formalpara/title[text()="title"]', output, 1 assert_xpath '/formalpara/para/screen[text()="listing block"]', output, 1 end test 'source block with no title or language should generate screen element in docbook' do input = <<-EOS [source] ---- listing block ---- EOS output = render_embedded_string input, :backend => 'docbook' assert_xpath '/screen[text()="listing block"]', output, 1 end test 'source block with title and no language should generate screen element inside formalpara element in docbook' do input = <<-EOS [source] .title ---- listing block ---- EOS output = render_embedded_string input, :backend => 'docbook' assert_xpath '/formalpara', output, 1 assert_xpath '/formalpara/title[text()="title"]', output, 1 assert_xpath '/formalpara/para/screen[text()="listing block"]', output, 1 end end context "Open Blocks" do test "can render open block" do input = <<-EOS -- This is an open block. It can span multiple lines. -- EOS output = render_string input assert_xpath '//*[@class="openblock"]//p', output, 2 end test "open block can contain another block" do input = <<-EOS -- This is an open block. It can span multiple lines. ____ It can hold great quotes like this one. ____ -- EOS output = render_string input assert_xpath '//*[@class="openblock"]//p', output, 3 assert_xpath '//*[@class="openblock"]//*[@class="quoteblock"]', output, 1 end end context 'Passthrough Blocks' do test 'can parse a passthrough block' do input = <<-EOS ++++ This is a passthrough block. ++++ EOS block = block_from_string input assert !block.nil? assert_equal 1, block.lines.size assert_equal 'This is a passthrough block.', block.source end test 'does not perform subs on a passthrough block by default' do input = <<-EOS :type: passthrough ++++ This is a '{type}' block. http://asciidoc.org image:tiger.png[] ++++ EOS expected = %(This is a '{type}' block.\nhttp://asciidoc.org\nimage:tiger.png[]) output = render_embedded_string input assert_equal expected, output.strip end test 'does not perform subs on a passthrough block with pass style by default' do input = <<-EOS :type: passthrough [pass] ++++ This is a '{type}' block. http://asciidoc.org image:tiger.png[] ++++ EOS expected = %(This is a '{type}' block.\nhttp://asciidoc.org\nimage:tiger.png[]) output = render_embedded_string input assert_equal expected, output.strip end test 'passthrough block honors explicit subs list' do input = <<-EOS :type: passthrough [subs="attributes,quotes,macros"] ++++ This is a _{type}_ block. http://asciidoc.org ++++ EOS expected = %(This is a passthrough block.\nhttp://asciidoc.org) output = render_embedded_string input assert_equal expected, output.strip end test 'should strip leading and trailing blank lines when rendering raw block' do input = <<-EOS ++++ line above ++++ ++++ first line last line ++++ ++++ line below ++++ EOS doc = document_from_string input, :header_footer => false block = doc.blocks[1] assert_equal ['', '', ' first line', '', 'last line', '', ''], block.lines result = doc.render assert_equal "line above\n first line\n\nlast line\nline below", result, 1 end end context 'Math blocks' do test 'should add LaTeX math delimiters around latexmath block content' do input = <<-'EOS' [latexmath] ++++ \sqrt{3x-1}+(1+x)^2 < y ++++ EOS output = render_embedded_string input assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output, 1 assert_equal '\[\sqrt{3x-1}+(1+x)^2 < y\]', nodes.first.to_s.strip end test 'should not add LaTeX math delimiters around latexmath block content if already present' do input = <<-'EOS' [latexmath] ++++ \[\sqrt{3x-1}+(1+x)^2 < y\] ++++ EOS output = render_embedded_string input assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output, 1 assert_equal '\[\sqrt{3x-1}+(1+x)^2 < y\]', nodes.first.to_s.strip end test 'should render latexmath block in alt of equation in DocBook backend' do input = <<-'EOS' [latexmath] ++++ \sqrt{3x-1}+(1+x)^2 < y ++++ EOS expect = <<-'EOS' EOS output = render_embedded_string input, :backend => :docbook assert_equal expect.strip, output.strip end test 'should add AsciiMath delimiters around asciimath block content' do input = <<-'EOS' [asciimath] ++++ sqrt(3x-1)+(1+x)^2 < y ++++ EOS output = render_embedded_string input assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output, 1 assert_equal '\$sqrt(3x-1)+(1+x)^2 < y\$', nodes.first.to_s.strip end test 'should not add AsciiMath delimiters around asciimath block content if already present' do input = <<-'EOS' [asciimath] ++++ \$sqrt(3x-1)+(1+x)^2 < y\$ ++++ EOS output = render_embedded_string input assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output, 1 assert_equal '\$sqrt(3x-1)+(1+x)^2 < y\$', nodes.first.to_s.strip end test 'should render asciimath block in textobject of equation in DocBook backend' do input = <<-'EOS' [asciimath] ++++ x+b/(2a)<+-sqrt((b^2)/(4a^2)-c/a) ++++ EOS expect = %( x+b2a<±b24a2ca ) output = render_embedded_string input, :backend => :docbook assert_equal expect.strip, output.strip end test 'should output title for latexmath block if defined' do input = <<-'EOS' .The Lorenz Equations [latexmath] ++++ \begin{aligned} \dot{x} & = \sigma(y-x) \\ \dot{y} & = \rho x - y - xz \\ \dot{z} & = -\beta z + xy \end{aligned} ++++ EOS output = render_embedded_string input assert_css '.stemblock', output, 1 assert_css '.stemblock .title', output, 1 assert_xpath '//*[@class="title"][text()="The Lorenz Equations"]', output, 1 end test 'should output title for asciimath block if defined' do input = <<-'EOS' .Simple fraction [asciimath] ++++ a//b ++++ EOS output = render_embedded_string input assert_css '.stemblock', output, 1 assert_css '.stemblock .title', output, 1 assert_xpath '//*[@class="title"][text()="Simple fraction"]', output, 1 end test 'should add AsciiMath delimiters around stem block content if stem attribute != latexmath' do input = <<-'EOS' [stem] ++++ sqrt(3x-1)+(1+x)^2 < y ++++ EOS [ {}, {'stem' => ''}, {'stem' => 'asciimath'} ].each do |attributes| output = render_embedded_string input, :attributes => attributes assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output, 1 assert_equal '\$sqrt(3x-1)+(1+x)^2 < y\$', nodes.first.to_s.strip end end test 'should add LaTeX math delimiters around stem block content if stem attribute is latexmath' do input = <<-'EOS' [stem] ++++ \sqrt{3x-1}+(1+x)^2 < y ++++ EOS output = render_embedded_string input, :attributes => {'stem' => 'latexmath'} assert_css '.stemblock', output, 1 nodes = xmlnodes_at_xpath '//*[@class="content"]/child::text()', output, 1 assert_equal '\[\sqrt{3x-1}+(1+x)^2 < y\]', nodes.first.to_s.strip end end context 'Metadata' do test 'block title above section gets carried over to first block in section' do input = <<-EOS .Title == Section paragraph EOS output = render_string input assert_xpath '//*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="paragraph"]/*[@class="title"][text() = "Title"]', output, 1 assert_xpath '//*[@class="paragraph"]/p[text() = "paragraph"]', output, 1 end test 'block title above document title demotes document title to a section title' do input = <<-EOS .Block title = Section Title section paragraph EOS output, errors = nil redirect_streams do |stdout, stderr| output = render_string input errors = stderr.string end assert_xpath '//*[@id="header"]/*', output, 0 assert_xpath '//*[@id="preamble"]/*', output, 0 assert_xpath '//*[@id="content"]/h1[text()="Section Title"]', output, 1 assert_xpath '//*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="paragraph"]/*[@class="title"][text()="Block title"]', output, 1 assert !errors.empty? assert_match(/only book doctypes can contain level 0 sections/, errors) end test 'block title above document title gets carried over to first block in first section if no preamble' do input = <<-EOS .Block title = Document Title == First Section paragraph EOS output = render_string input assert_xpath '//*[@class="sect1"]//*[@class="paragraph"]/*[@class="title"][text() = "Block title"]', output, 1 end test 'empty attribute list should not appear in output' do input = <<-EOS [] -- Block content -- EOS output = render_embedded_string input assert output.include?('Block content') assert !output.include?('[]') end test 'empty block anchor should not appear in output' do input = <<-EOS [[]] -- Block content -- EOS output = render_embedded_string input assert output.include?('Block content') assert !output.include?('[[]]') end end context 'Images' do test 'can render block image with alt text defined in macro' do input = <<-EOS image::images/tiger.png[Tiger] EOS output = render_embedded_string input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end test 'renders SVG image using img element by default' do input = <<-EOS image::tiger.svg[Tiger] EOS output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER assert_xpath '/*[@class="imageblock"]//img[@src="tiger.svg"][@alt="Tiger"]', output, 1 end test 'renders interactive SVG image with alt text using object element' do input = <<-EOS :imagesdir: images [%interactive] image::tiger.svg[Tiger,100] EOS output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER assert_xpath '/*[@class="imageblock"]//object[@type="image/svg+xml"][@data="images/tiger.svg"][@width="100"]/span[@class="alt"][text()="Tiger"]', output, 1 end test 'renders SVG image with alt text using img element when safe mode is secure' do input = <<-EOS [%interactive] image::images/tiger.svg[Tiger,100] EOS output = render_embedded_string input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.svg"][@alt="Tiger"]', output, 1 end test 'inserts fallback image for SVG inside object element using same dimensions' do input = <<-EOS :imagesdir: images [%interactive] image::tiger.svg[Tiger,100,fallback=tiger.png] EOS output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER assert_xpath '/*[@class="imageblock"]//object[@type="image/svg+xml"][@data="images/tiger.svg"][@width="100"]/img[@src="images/tiger.png"][@width="100"]', output, 1 end test 'detects SVG image URI that contains a query string' do input = <<-EOS :imagesdir: images [%interactive] image::http://example.org/tiger.svg?foo=bar[Tiger,100] EOS output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER assert_xpath '/*[@class="imageblock"]//object[@type="image/svg+xml"][@data="http://example.org/tiger.svg?foo=bar"][@width="100"]/span[@class="alt"][text()="Tiger"]', output, 1 end test 'detects SVG image when format attribute is svg' do input = <<-EOS :imagesdir: images [%interactive] image::http://example.org/tiger-svg[Tiger,100,format=svg] EOS output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER assert_xpath '/*[@class="imageblock"]//object[@type="image/svg+xml"][@data="http://example.org/tiger-svg"][@width="100"]/span[@class="alt"][text()="Tiger"]', output, 1 end test 'renders inline SVG image using svg element' do input = <<-EOS :imagesdir: fixtures [%inline] image::circle.svg[Tiger,100] EOS output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER, :attributes => { 'docdir' => ::File.dirname(__FILE__) } assert_match(/]*width="100px"[^>]*>/, output, 1) refute_match(/]*width="500px"[^>]*>/, output) refute_match(/]*height="500px"[^>]*>/, output) refute_match(/]*style="width:500px;height:500px"[^>]*>/, output) end test 'renders inline SVG image using svg element even when data-uri is set' do input = <<-EOS :imagesdir: fixtures :data-uri: [%inline] image::circle.svg[Tiger,100] EOS output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER, :attributes => { 'docdir' => ::File.dirname(__FILE__) } assert_match(/]*width="100px">/, output, 1) end test 'renders alt text for inline svg element if svg cannot be read' do input = <<-EOS [%inline] image::no-such-image.svg[Alt Text] EOS output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SERVER assert_xpath '//span[@class="alt"][text()="Alt Text"]', output, 1 end test 'can render block image with alt text defined in macro containing escaped square bracket' do input = <<-EOS image::images/tiger.png[A [Bengal\\] Tiger] EOS output = render_string input img = xmlnodes_at_xpath '//img', output, 1 assert_equal 'A [Bengal] Tiger', img.attr('alt').value end test 'can render block image with alt text defined in block attribute above macro' do input = <<-EOS [Tiger] image::images/tiger.png[] EOS output = render_embedded_string input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end test 'alt text in macro overrides alt text above macro' do input = <<-EOS [Alt Text] image::images/tiger.png[Tiger] EOS output = render_embedded_string input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end test 'alt text is escaped in HTML backend' do input = <<-EOS image::images/open.png[File > Open] EOS output = render_embedded_string input assert_match(/File > Open/, output) end test 'alt text is escaped in DocBook backend' do input = <<-EOS image::images/open.png[File > Open] EOS output = render_embedded_string input, :backend => :docbook assert_match(/File > Open/, output) end test "can render block image with auto-generated alt text" do input = <<-EOS image::images/tiger.png[] EOS output = render_embedded_string input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="tiger"]', output, 1 end test "can render block image with alt text and height and width" do input = <<-EOS image::images/tiger.png[Tiger, 200, 300] EOS output = render_embedded_string input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"][@width="200"][@height="300"]', output, 1 end test "can render block image with link" do input = <<-EOS image::images/tiger.png[Tiger, link='http://en.wikipedia.org/wiki/Tiger'] EOS output = render_embedded_string input assert_xpath '/*[@class="imageblock"]//a[@class="image"][@href="http://en.wikipedia.org/wiki/Tiger"]/img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end test "can render block image with caption" do input = <<-EOS .The AsciiDoc Tiger image::images/tiger.png[Tiger] EOS doc = document_from_string input output = doc.render assert_xpath '//*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 assert_xpath '//*[@class="imageblock"]/*[@class="title"][text() = "Figure 1. The AsciiDoc Tiger"]', output, 1 assert_equal 1, doc.attributes['figure-number'] end test 'can render block image with explicit caption' do input = <<-EOS [caption="Voila! "] .The AsciiDoc Tiger image::images/tiger.png[Tiger] EOS doc = document_from_string input output = doc.render assert_xpath '//*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 assert_xpath '//*[@class="imageblock"]/*[@class="title"][text() = "Voila! The AsciiDoc Tiger"]', output, 1 assert !doc.attributes.has_key?('figure-number') end test 'can align image in DocBook backend' do input = <<-EOS image::images/sunset.jpg[Sunset, align="right"] EOS output = render_embedded_string input, :backend => :docbook assert_xpath '//imagedata', output, 1 assert_xpath '//imagedata[@align="right"]', output, 1 end test 'can scale image in DocBook backend' do input = <<-EOS image::images/sunset.jpg[Sunset, scale="200"] EOS output = render_embedded_string input, :backend => :docbook assert_xpath '//imagedata', output, 1 assert_xpath '//imagedata[@scale="200"]', output, 1 end test 'can scale image width in DocBook backend' do input = <<-EOS image::images/sunset.jpg[Sunset, scaledwidth="25%"] EOS output = render_embedded_string input, :backend => :docbook assert_xpath '//imagedata', output, 1 assert_xpath '//imagedata[@width="25%"]', output, 1 assert_xpath '//imagedata[@scalefit="1"]', output, 1 end test 'adds % to scaled width if no units given in DocBook backend ' do input = <<-EOS image::images/sunset.jpg[Sunset, scaledwidth="25"] EOS output = render_embedded_string input, :backend => :docbook assert_xpath '//imagedata', output, 1 assert_xpath '//imagedata[@width="25%"]', output, 1 assert_xpath '//imagedata[@scalefit="1"]', output, 1 end test 'keeps line unprocessed if image target is missing attribute reference and attribute-missing is skip' do input = <<-EOS :attribute-missing: skip image::{bogus}[] EOS output = render_embedded_string input assert output.include?('image::{bogus}[]') end test 'drops line if image target is missing attribute reference and attribute-missing is drop' do input = <<-EOS :attribute-missing: drop image::{bogus}[] EOS output = render_embedded_string input assert output.strip.empty? end test 'drops line if image target is missing attribute reference and attribute-missing is drop-line' do input = <<-EOS :attribute-missing: drop-line image::{bogus}[] EOS output = render_embedded_string input assert output.strip.empty? end test 'dropped image does not break processing of following section and attribute-missing is drop-line' do input = <<-EOS :attribute-missing: drop-line image::{bogus}[] == Section Title EOS output = render_embedded_string input assert_css 'img', output, 0 assert_css 'h2', output, 1 assert !output.include?('== Section Title') end test 'should pass through image that references uri' do input = <<-EOS :imagesdir: images image::http://asciidoc.org/images/tiger.png[Tiger] EOS output = render_embedded_string input assert_xpath '/*[@class="imageblock"]//img[@src="http://asciidoc.org/images/tiger.png"][@alt="Tiger"]', output, 1 end test 'can resolve image relative to imagesdir' do input = <<-EOS :imagesdir: images image::tiger.png[Tiger] EOS output = render_embedded_string input assert_xpath '/*[@class="imageblock"]//img[@src="images/tiger.png"][@alt="Tiger"]', output, 1 end test 'embeds base64-encoded data uri for image when data-uri attribute is set' do input = <<-EOS :data-uri: :imagesdir: fixtures image::dot.gif[Dot] EOS doc = document_from_string input, :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'docdir' => File.dirname(__FILE__)} assert_equal 'fixtures', doc.attributes['imagesdir'] output = doc.render assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end test 'embeds base64-encoded data uri for remote image when data-uri attribute is set' do input = <<-EOS :data-uri: image::http://#{resolve_localhost}:9876/fixtures/dot.gif[Dot] EOS output = using_test_webserver do render_embedded_string input, :safe => :safe, :attributes => {'allow-uri-read' => ''} end assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end test 'embeds base64-encoded data uri for remote image when imagesdir is a URI and data-uri attribute is set' do input = <<-EOS :data-uri: :imagesdir: http://#{resolve_localhost}:9876/fixtures image::dot.gif[Dot] EOS output = using_test_webserver do render_embedded_string input, :safe => :safe, :attributes => {'allow-uri-read' => ''} end assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end test 'uses remote image uri when data-uri attribute is set and image cannot be retrieved' do image_uri = "http://#{resolve_localhost}:9876/fixtures/missing-image.gif" input = <<-EOS :data-uri: image::#{image_uri}[Missing image] EOS output = using_test_webserver do render_embedded_string input, :safe => :safe, :attributes => {'allow-uri-read' => ''} end assert_xpath %(/*[@class="imageblock"]//img[@src="#{image_uri}"][@alt="Missing image"]), output, 1 end test 'uses remote image uri when data-uri attribute is set and allow-uri-read is not set' do image_uri = "http://#{resolve_localhost}:9876/fixtures/dot.gif" input = <<-EOS :data-uri: image::#{image_uri}[Dot] EOS output = using_test_webserver do render_embedded_string input, :safe => :safe end assert_xpath %(/*[@class="imageblock"]//img[@src="#{image_uri}"][@alt="Dot"]), output, 1 end test 'can handle embedded data uri images' do input = <<-EOS image::data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=[Dot] EOS output = render_embedded_string input assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end test 'can handle embedded data uri images when data-uri attribute is set' do input = <<-EOS :data-uri: image::data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=[Dot] EOS output = render_embedded_string input assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end # this test will cause a warning to be printed to the console (until we have a message facility) test 'cleans reference to ancestor directories in imagesdir before reading image if safe mode level is at least SAFE' do input = <<-EOS :data-uri: :imagesdir: ../..//fixtures/./../../fixtures image::dot.gif[Dot] EOS doc = document_from_string input, :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'docdir' => File.dirname(__FILE__)} assert_equal '../..//fixtures/./../../fixtures', doc.attributes['imagesdir'] output = doc.render # image target resolves to fixtures/dot.gif relative to docdir (which is explicitly set to the directory of this file) # the reference cannot fall outside of the document directory in safe mode assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end test 'cleans reference to ancestor directories in target before reading image if safe mode level is at least SAFE' do input = <<-EOS :data-uri: :imagesdir: ./ image::../..//fixtures/./../../fixtures/dot.gif[Dot] EOS doc = document_from_string input, :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'docdir' => File.dirname(__FILE__)} assert_equal './', doc.attributes['imagesdir'] output = doc.render # image target resolves to fixtures/dot.gif relative to docdir (which is explicitly set to the directory of this file) # the reference cannot fall outside of the document directory in safe mode assert_xpath '//img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Dot"]', output, 1 end end context 'Media' do test 'should detect and render video macro' do input = <<-EOS video::cats-vs-dogs.avi[] EOS output = render_embedded_string input assert_css 'video', output, 1 assert_css 'video[src="cats-vs-dogs.avi"]', output, 1 end test 'should detect and render video macro with positional attributes for poster and dimensions' do input = <<-EOS video::cats-vs-dogs.avi[cats-and-dogs.png, 200, 300] EOS output = render_embedded_string input assert_css 'video', output, 1 assert_css 'video[src="cats-vs-dogs.avi"]', output, 1 assert_css 'video[poster="cats-and-dogs.png"]', output, 1 assert_css 'video[width="200"]', output, 1 assert_css 'video[height="300"]', output, 1 end test 'video macro should honor all options' do input = <<-EOS video::cats-vs-dogs.avi[options="autoplay,nocontrols,loop"] EOS output = render_embedded_string input assert_css 'video', output, 1 assert_css 'video[autoplay]', output, 1 assert_css 'video:not([controls])', output, 1 assert_css 'video[loop]', output, 1 end test 'video macro should add time range anchor with start time if start attribute is set' do input = <<-EOS video::cats-vs-dogs.avi[start="30"] EOS output = render_embedded_string input assert_css 'video', output, 1 assert_xpath '//video[@src="cats-vs-dogs.avi#t=30"]', output, 1 end test 'video macro should add time range anchor with end time if end attribute is set' do input = <<-EOS video::cats-vs-dogs.avi[end="30"] EOS output = render_embedded_string input assert_css 'video', output, 1 assert_xpath '//video[@src="cats-vs-dogs.avi#t=,30"]', output, 1 end test 'video macro should add time range anchor with start and end time if start and end attributes are set' do input = <<-EOS video::cats-vs-dogs.avi[start="30",end="60"] EOS output = render_embedded_string input assert_css 'video', output, 1 assert_xpath '//video[@src="cats-vs-dogs.avi#t=30,60"]', output, 1 end test 'video macro should use imagesdir attribute to resolve target and poster' do input = <<-EOS :imagesdir: assets video::cats-vs-dogs.avi[cats-and-dogs.png, 200, 300] EOS output = render_embedded_string input assert_css 'video', output, 1 assert_css 'video[src="assets/cats-vs-dogs.avi"]', output, 1 assert_css 'video[poster="assets/cats-and-dogs.png"]', output, 1 assert_css 'video[width="200"]', output, 1 assert_css 'video[height="300"]', output, 1 end test 'video macro should not use imagesdir attribute to resolve target if target is a URL' do input = <<-EOS :imagesdir: assets video::http://example.org/videos/cats-vs-dogs.avi[] EOS output = render_embedded_string input assert_css 'video', output, 1 assert_css 'video[src="http://example.org/videos/cats-vs-dogs.avi"]', output, 1 end test 'video macro should output custom HTML with iframe for vimeo service' do input = <<-EOS video::67480300[vimeo, 400, 300, start=60, options=autoplay] EOS output = render_embedded_string input assert_css 'video', output, 0 assert_css 'iframe', output, 1 assert_css 'iframe[src="https://player.vimeo.com/video/67480300#at=60?autoplay=1"]', output, 1 assert_css 'iframe[width="400"]', output, 1 assert_css 'iframe[height="300"]', output, 1 end test 'video macro should output custom HTML with iframe for youtube service' do input = <<-EOS video::U8GBXvdmHT4/PLg7s6cbtAD15Das5LK9mXt_g59DLWxKUe[youtube, 640, 360, start=60, options="autoplay,modest", theme=light] EOS output = render_embedded_string input assert_css 'video', output, 0 assert_css 'iframe', output, 1 assert_css 'iframe[src="https://www.youtube.com/embed/U8GBXvdmHT4?rel=0&start=60&autoplay=1&list=PLg7s6cbtAD15Das5LK9mXt_g59DLWxKUe&modestbranding=1&theme=light"]', output, 1 assert_css 'iframe[width="640"]', output, 1 assert_css 'iframe[height="360"]', output, 1 end test 'video macro should output custom HTML with iframe for youtube service with dynamic playlist' do input = <<-EOS video::SCZF6I-Rc4I,AsKGOeonbIs,HwrPhOp6-aM[youtube, 640, 360, start=60, options=autoplay] EOS output = render_embedded_string input assert_css 'video', output, 0 assert_css 'iframe', output, 1 assert_css 'iframe[src="https://www.youtube.com/embed/SCZF6I-Rc4I?rel=0&start=60&autoplay=1&playlist=AsKGOeonbIs,HwrPhOp6-aM"]', output, 1 assert_css 'iframe[width="640"]', output, 1 assert_css 'iframe[height="360"]', output, 1 end test 'should detect and render audio macro' do input = <<-EOS audio::podcast.mp3[] EOS output = render_embedded_string input assert_css 'audio', output, 1 assert_css 'audio[src="podcast.mp3"]', output, 1 end test 'audio macro should use imagesdir attribute to resolve target' do input = <<-EOS :imagesdir: assets audio::podcast.mp3[] EOS output = render_embedded_string input assert_css 'audio', output, 1 assert_css 'audio[src="assets/podcast.mp3"]', output, 1 end test 'audio macro should not use imagesdir attribute to resolve target if target is a URL' do input = <<-EOS :imagesdir: assets video::http://example.org/podcast.mp3[] EOS output = render_embedded_string input assert_css 'video', output, 1 assert_css 'video[src="http://example.org/podcast.mp3"]', output, 1 end test 'audio macro should honor all options' do input = <<-EOS audio::podcast.mp3[options="autoplay,nocontrols,loop"] EOS output = render_embedded_string input assert_css 'audio', output, 1 assert_css 'audio[autoplay]', output, 1 assert_css 'audio:not([controls])', output, 1 assert_css 'audio[loop]', output, 1 end end context 'Admonition icons' do test 'can resolve icon relative to default iconsdir' do input = <<-EOS :icons: [TIP] You can use icons for admonitions by setting the 'icons' attribute. EOS output = render_string input, :safe => Asciidoctor::SafeMode::SERVER assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="./images/icons/tip.png"][@alt="Tip"]', output, 1 end test 'can resolve icon relative to custom iconsdir' do input = <<-EOS :icons: :iconsdir: icons [TIP] You can use icons for admonitions by setting the 'icons' attribute. EOS output = render_string input, :safe => Asciidoctor::SafeMode::SERVER assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="icons/tip.png"][@alt="Tip"]', output, 1 end test 'should add file extension to custom icon if not specified' do input = <<-EOS :icons: font :iconsdir: images/icons [TIP,icon=a] Override the icon of an admonition block using an attribute EOS output = render_string input, :safe => Asciidoctor::SafeMode::SERVER assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="images/icons/a.png"]', output, 1 end test 'embeds base64-encoded data uri of icon when data-uri attribute is set and safe mode level is less than SECURE' do input = <<-EOS :icons: :iconsdir: fixtures :icontype: gif :data-uri: [TIP] You can use icons for admonitions by setting the 'icons' attribute. EOS output = render_string input, :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'docdir' => File.dirname(__FILE__)} assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Tip"]', output, 1 end test 'does not embed base64-encoded data uri of icon when safe mode level is SECURE or greater' do input = <<-EOS :icons: :iconsdir: fixtures :icontype: gif :data-uri: [TIP] You can use icons for admonitions by setting the 'icons' attribute. EOS output = render_string input, :attributes => {'icons' => ''} assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="fixtures/tip.gif"][@alt="Tip"]', output, 1 end test 'cleans reference to ancestor directories before reading icon if safe mode level is at least SAFE' do input = <<-EOS :icons: :iconsdir: ../fixtures :icontype: gif :data-uri: [TIP] You can use icons for admonitions by setting the 'icons' attribute. EOS output = render_string input, :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'docdir' => File.dirname(__FILE__)} assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="][@alt="Tip"]', output, 1 end test 'should import Font Awesome and use font-based icons when value of icons attribute is font' do input = <<-EOS :icons: font [TIP] You can use icons for admonitions by setting the 'icons' attribute. EOS output = render_string input, :safe => Asciidoctor::SafeMode::SERVER assert_css 'html > head > link[rel="stylesheet"][href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.6.3/css/font-awesome.min.css"]', output, 1 assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/i[@class="fa icon-tip"]', output, 1 end test 'font-based icon should not override icon specified on admonition' do input = <<-EOS :icons: font :iconsdir: images/icons [TIP,icon=a.png] Override the icon of an admonition block using an attribute EOS output = render_string input, :safe => Asciidoctor::SafeMode::SERVER assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/i[@class="fa icon-tip"]', output, 0 assert_xpath '//*[@class="admonitionblock tip"]//*[@class="icon"]/img[@src="images/icons/a.png"]', output, 1 end test 'should use http uri scheme for assets when asset-uri-scheme is http' do input = <<-EOS :asset-uri-scheme: http :icons: font :source-highlighter: highlightjs TIP: You can control the URI scheme used for assets with the asset-uri-scheme attribute [source,ruby] puts "AsciiDoc, FTW!" EOS output = render_string input, :safe => Asciidoctor::SafeMode::SAFE assert_css 'html > head > link[rel="stylesheet"][href="http://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.6.3/css/font-awesome.min.css"]', output, 1 assert_css 'html > body > script[src="http://cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/highlight.min.js"]', output, 1 end test 'should use no uri scheme for assets when asset-uri-scheme is blank' do input = <<-EOS :asset-uri-scheme: :icons: font :source-highlighter: highlightjs TIP: You can control the URI scheme used for assets with the asset-uri-scheme attribute [source,ruby] puts "AsciiDoc, FTW!" EOS output = render_string input, :safe => Asciidoctor::SafeMode::SAFE assert_css 'html > head > link[rel="stylesheet"][href="//cdnjs.cloudflare.com/ajax/libs/font-awesome/4.6.3/css/font-awesome.min.css"]', output, 1 assert_css 'html > body > script[src="//cdnjs.cloudflare.com/ajax/libs/highlight.js/8.9.1/highlight.min.js"]', output, 1 end end context 'Image paths' do test 'restricts access to ancestor directories when safe mode level is at least SAFE' do input = <<-EOS image::asciidoctor.png[Asciidoctor] EOS basedir = File.expand_path File.dirname(__FILE__) block = block_from_string input, :attributes => {'docdir' => basedir} doc = block.document assert doc.safe >= Asciidoctor::SafeMode::SAFE assert_equal File.join(basedir, 'images'), block.normalize_asset_path('images') assert_equal File.join(basedir, 'etc/images'), block.normalize_asset_path("#{disk_root}etc/images") assert_equal File.join(basedir, 'images'), block.normalize_asset_path('../../images') end test 'does not restrict access to ancestor directories when safe mode is disabled' do input = <<-EOS image::asciidoctor.png[Asciidoctor] EOS basedir = File.expand_path File.dirname(__FILE__) block = block_from_string input, :safe => Asciidoctor::SafeMode::UNSAFE, :attributes => {'docdir' => basedir} doc = block.document assert doc.safe == Asciidoctor::SafeMode::UNSAFE assert_equal File.join(basedir, 'images'), block.normalize_asset_path('images') absolute_path = "#{disk_root}etc/images" assert_equal absolute_path, block.normalize_asset_path(absolute_path) assert_equal File.expand_path(File.join(basedir, '../../images')), block.normalize_asset_path('../../images') end end context 'Source code' do test 'should support fenced code block using backticks' do input = <<-EOS ``` puts "Hello, World!" ``` EOS output = render_embedded_string input assert_css '.listingblock', output, 1 assert_css '.listingblock pre code', output, 1 assert_css '.listingblock pre code:not([class])', output, 1 end test 'should not recognize fenced code blocks with more than three delimiters' do input = <<-EOS ````ruby puts "Hello, World!" ```` ~~~~ javascript alert("Hello, World!") ~~~~ EOS output = render_embedded_string input assert_css '.listingblock', output, 0 end test 'should support fenced code blocks with languages' do input = <<-EOS ```ruby puts "Hello, World!" ``` ``` javascript alert("Hello, World!") ``` EOS output = render_embedded_string input assert_css '.listingblock', output, 2 assert_css '.listingblock pre code.language-ruby[data-lang=ruby]', output, 1 assert_css '.listingblock pre code.language-javascript[data-lang=javascript]', output, 1 end test 'should support fenced code blocks with languages and numbering' do input = <<-EOS ```ruby,numbered puts "Hello, World!" ``` ``` javascript, numbered alert("Hello, World!") ``` EOS output = render_embedded_string input assert_css '.listingblock', output, 2 assert_css '.listingblock pre code.language-ruby[data-lang=ruby]', output, 1 assert_css '.listingblock pre code.language-javascript[data-lang=javascript]', output, 1 end test 'should highlight source if source-highlighter attribute is coderay' do input = <<-EOS :source-highlighter: coderay [source, ruby] ---- require 'coderay' html = CodeRay.scan("puts 'Hello, world!'", :ruby).div(:line_numbers => :table) ---- EOS output = render_string input, :safe => Asciidoctor::SafeMode::SAFE, :linkcss_default => true assert_xpath '//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@class = "constant"][text() = "CodeRay"]', output, 1 assert_match(/\.CodeRay *\{/, output) end test 'should read source language from source-language document attribute if not specified on source block' do input = <<-EOS :source-highlighter: coderay :source-language: ruby [source] ---- require 'coderay' html = CodeRay.scan("puts 'Hello, world!'", :ruby).div(:line_numbers => :table) ---- EOS output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SAFE, :linkcss_default => true assert_xpath '//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@class = "constant"][text() = "CodeRay"]', output, 1 end test 'should rename document attribute named language to source-language when compat-mode is enabled' do input = <<-EOS :language: ruby {source-language} EOS assert_equal 'ruby', render_string(input, :doctype => :inline, :attributes => {'compat-mode' => ''}) input = <<-EOS :language: ruby {source-language} EOS assert_equal '{source-language}', render_string(input, :doctype => :inline) end test 'should replace callout marks but not highlight them if source-highlighter attribute is coderay' do input = <<-EOS :source-highlighter: coderay [source, ruby] ---- require 'coderay' # <1> html = CodeRay.scan("puts 'Hello, world!'", :ruby).div(:line_numbers => :table) # <2> puts html # <3> <4> exit 0 # <5><6> ---- <1> Load library <2> Highlight source <3> Print to stdout <4> Redirect to a file to capture output <5> Exit program <6> Reports success EOS output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SAFE assert_match(/coderay<\/span>.* \(1\)<\/b>$/, output) assert_match(/puts 'Hello, world!'<\/span>.* \(2\)<\/b>$/, output) assert_match(/puts html * \(3\)<\/b> \(4\)<\/b>$/, output) assert_match(/exit.* \(5\)<\/b> \(6\)<\/b><\/code>/, output) end test 'should restore callout marks to correct lines if source highlighter is coderay and table line numbering is enabled' do input = <<-EOS :source-highlighter: coderay :coderay-linenums-mode: table [source, ruby, numbered] ---- require 'coderay' # <1> html = CodeRay.scan("puts 'Hello, world!'", :ruby).div(:line_numbers => :table) # <2> puts html # <3> <4> exit 0 # <5><6> ---- <1> Load library <2> Highlight source <3> Print to stdout <4> Redirect to a file to capture output <5> Exit program <6> Reports success EOS output = render_embedded_string input, :safe => Asciidoctor::SafeMode::SAFE assert_match(/coderay<\/span>.* \(1\)<\/b>$/, output) assert_match(/puts 'Hello, world!'<\/span>.* \(2\)<\/b>$/, output) assert_match(/puts html * \(3\)<\/b> \(4\)<\/b>$/, output) assert_match(/exit.* \(5\)<\/b> \(6\)<\/b><\/pre>/, output) end test 'should preserve passthrough placeholders when highlighting source using coderay' do input = <<-EOS :source-highlighter: coderay [source,java] [subs="specialcharacters,macros,callouts"] ---- public class Printer { public static void main(String[] args) { System.pass:quotes[_out_].println("*asterisks* make text pass:quotes[*bold*]"); } } ---- EOS output = render_string input, :safe => Asciidoctor::SafeMode::SAFE assert_match(/\.out<\/em>\./, output, 1) assert_match(/\*asterisks\*/, output, 1) assert_match(/bold<\/strong>/, output, 1) assert !output.include?(Asciidoctor::Substitutors::PASS_START) end test 'should link to CodeRay stylesheet if source-highlighter is coderay and linkcss is set' do input = <<-EOS :source-highlighter: coderay [source, ruby] ---- require 'coderay' html = CodeRay.scan("puts 'Hello, world!'", :ruby).div(:line_numbers => :table) ---- EOS output = render_string input, :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'linkcss' => ''} assert_xpath '//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@class = "constant"][text() = "CodeRay"]', output, 1 assert_css 'link[rel="stylesheet"][href="./coderay-asciidoctor.css"]', output, 1 end test 'should highlight source inline if source-highlighter attribute is coderay and coderay-css is style' do input = <<-EOS :source-highlighter: coderay :coderay-css: style [source, ruby] ---- require 'coderay' html = CodeRay.scan("puts 'Hello, world!'", :ruby).div(:line_numbers => :table) ---- EOS output = render_string input, :safe => Asciidoctor::SafeMode::SAFE, :linkcss_default => true assert_xpath '//pre[@class="CodeRay highlight"]/code[@data-lang="ruby"]//span[@style = "color:#036;font-weight:bold"][text() = "CodeRay"]', output, 1 refute_match(/\.CodeRay \{/, output) end test 'should include remote highlight.js assets if source-highlighter attribute is highlightjs' do input = <<-EOS :source-highlighter: highlightjs [source, javascript] ---- ---- EOS output = render_string input, :safe => Asciidoctor::SafeMode::SAFE assert_match(/ {'source-highlighter' => 'prettify'} assert_css 'pre[class="prettyprint highlight"]', output, 1 assert_css 'pre > code.language-ruby[data-lang="ruby"]', output, 1 end test 'should set lang attribute on pre when source-highlighter is html-pipeline' do input = <<-EOS [source,ruby] ---- filters = [ HTML::Pipeline::AsciiDocFilter, HTML::Pipeline::SanitizationFilter, HTML::Pipeline::SyntaxHighlightFilter ] puts HTML::Pipeline.new(filters, {}).call(input)[:output] ---- EOS output = render_string input, :attributes => {'source-highlighter' => 'html-pipeline'} assert_css 'pre[lang="ruby"]', output, 1 assert_css 'pre[lang="ruby"] > code', output, 1 assert_css 'pre[class]', output, 0 assert_css 'code[class]', output, 0 end test 'document cannot turn on source highlighting if safe mode is at least SERVER' do input = <<-EOS :source-highlighter: coderay EOS doc = document_from_string input, :safe => Asciidoctor::SafeMode::SERVER assert doc.attributes['source-highlighter'].nil? end end context 'Abstract and Part Intro' do test 'should make abstract on open block without title a quote block for article' do input = <<-EOS = Article [abstract] -- This article is about stuff. And other stuff. -- == Section One content EOS output = render_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock.abstract', output, 1 assert_css '#preamble .quoteblock', output, 1 assert_css '.quoteblock > blockquote', output, 1 assert_css '.quoteblock > blockquote > .paragraph', output, 2 end test 'should make abstract on open block with title a quote block with title for article' do input = <<-EOS = Article .My abstract [abstract] -- This article is about stuff. -- == Section One content EOS output = render_string input assert_css '.quoteblock', output, 1 assert_css '.quoteblock.abstract', output, 1 assert_css '#preamble .quoteblock', output, 1 assert_css '.quoteblock > .title', output, 1 assert_css '.quoteblock > .title + blockquote', output, 1 assert_css '.quoteblock > .title + blockquote > .paragraph', output, 1 end test 'should allow abstract in document with title if doctype is book' do input = <<-EOS = Book :doctype: book [abstract] Abstract for book with title is valid EOS output = render_string input assert_css '.abstract', output, 1 end test 'should not allow abstract as direct child of document if doctype is book' do input = <<-EOS :doctype: book [abstract] Abstract for book without title is invalid. EOS warnings = nil output = nil redirect_streams do |stdout, stderr| output = render_string input warnings = stderr.string end assert_css '.abstract', output, 0 refute_nil warnings assert_match(/WARNING:.*abstract block/, warnings) end test 'should make abstract on open block without title rendered to DocBook' do input = <<-EOS = Article [abstract] -- This article is about stuff. And other stuff. -- EOS output = render_string input, :backend => 'docbook' assert_css 'abstract', output, 1 assert_css 'abstract > simpara', output, 2 end test 'should make abstract on open block with title rendered to DocBook' do input = <<-EOS = Article .My abstract [abstract] -- This article is about stuff. -- EOS output = render_string input, :backend => 'docbook' assert_css 'abstract', output, 1 assert_css 'abstract > title', output, 1 assert_css 'abstract > title + simpara', output, 1 end test 'should allow abstract in document with title if doctype is book rendered to DocBook' do input = <<-EOS = Book :doctype: book [abstract] Abstract for book with title is valid EOS output = render_string input, :backend => 'docbook' assert_css 'abstract', output, 1 end test 'should not allow abstract as direct child of document if doctype is book rendered to DocBook' do input = <<-EOS :doctype: book [abstract] Abstract for book is invalid. EOS output = nil warnings = nil redirect_streams do |stdout, stderr| output = render_string input, :backend => 'docbook' warnings = stderr.string end assert_css 'abstract', output, 0 refute_nil warnings assert_match(/WARNING:.*abstract block/, warnings) end # TODO partintro shouldn't be recognized if doctype is not book, should be in proper place test 'should accept partintro on open block without title' do input = <<-EOS = Book :doctype: book = Part 1 [partintro] -- This is a part intro. It can have multiple paragraphs. -- == Chapter 1 content EOS output = render_string input assert_css '.openblock', output, 1 assert_css '.openblock.partintro', output, 1 assert_css '.openblock .title', output, 0 assert_css '.openblock .content', output, 1 assert_xpath %(//h1[@id="_part_1"]/following-sibling::*[#{contains_class(:openblock)}]), output, 1 assert_xpath %(//*[#{contains_class(:openblock)}]/*[@class="content"]/*[@class="paragraph"]), output, 2 end test 'should accept partintro on open block with title' do input = <<-EOS = Book :doctype: book = Part 1 .Intro title [partintro] -- This is a part intro with a title. -- == Chapter 1 content EOS output = render_string input assert_css '.openblock', output, 1 assert_css '.openblock.partintro', output, 1 assert_css '.openblock .title', output, 1 assert_css '.openblock .content', output, 1 assert_xpath %(//h1[@id="_part_1"]/following-sibling::*[#{contains_class(:openblock)}]), output, 1 assert_xpath %(//*[#{contains_class(:openblock)}]/*[@class="title"][text() = "Intro title"]), output, 1 assert_xpath %(//*[#{contains_class(:openblock)}]/*[@class="content"]/*[@class="paragraph"]), output, 1 end test 'should exclude partintro if not a child of part' do input = <<-EOS = Book :doctype: book [partintro] part intro paragraph EOS output = render_string input assert_css '.partintro', output, 0 end test 'should not allow partintro unless doctype is book' do input = <<-EOS [partintro] part intro paragraph EOS output = render_string input assert_css '.partintro', output, 0 end test 'should accept partintro on open block without title rendered to DocBook' do input = <<-EOS = Book :doctype: book = Part 1 [partintro] -- This is a part intro. It can have multiple paragraphs. -- == Chapter 1 content EOS output = render_string input, :backend => 'docbook45' assert_css 'partintro', output, 1 assert_css 'part#_part_1 > partintro', output, 1 assert_css 'partintro > simpara', output, 2 end test 'should accept partintro on open block with title rendered to DocBook' do input = <<-EOS = Book :doctype: book = Part 1 .Intro title [partintro] -- This is a part intro with a title. -- == Chapter 1 content EOS output = render_string input, :backend => 'docbook45' assert_css 'partintro', output, 1 assert_css 'part#_part_1 > partintro', output, 1 assert_css 'partintro > title', output, 1 assert_css 'partintro > title + simpara', output, 1 end test 'should exclude partintro if not a child of part rendered to DocBook' do input = <<-EOS = Book :doctype: book [partintro] part intro paragraph EOS output = render_string input, :backend => 'docbook' assert_css 'partintro', output, 0 end test 'should not allow partintro unless doctype is book rendered to DocBook' do input = <<-EOS [partintro] part intro paragraph EOS output = render_string input, :backend => 'docbook' assert_css 'partintro', output, 0 end end context 'Substitutions' do test 'should be able to append subs to default block substitution list' do input = <<-EOS :application: Asciidoctor [subs="+attributes,+macros"] .... {application} .... EOS doc = document_from_string input block = doc.blocks.first assert_equal [:specialcharacters, :attributes, :macros], block.subs end test 'should be able to prepend subs to default block substitution list' do input = <<-EOS :application: Asciidoctor [subs="attributes+"] .... {application} .... EOS doc = document_from_string input block = doc.blocks.first assert_equal [:attributes, :specialcharacters], block.subs end test 'should be able to remove subs to default block substitution list' do input = <<-EOS [subs="-quotes,-replacements"] content EOS doc = document_from_string input block = doc.blocks.first assert_equal [:specialcharacters, :attributes, :macros, :post_replacements], block.subs end test 'should be able to prepend, append and remove subs from default block substitution list' do input = <<-EOS :application: asciidoctor [subs="attributes+,-verbatim,+specialcharacters,+macros"] .... http://{application}.org[{gt}{gt}] <1> .... EOS doc = document_from_string input, :header_footer => false block = doc.blocks.first assert_equal [:attributes, :specialcharacters, :macros], block.subs result = doc.render assert result.include?('
    >> <1>
    ') end test 'should be able to set subs then modify them' do input = <<-EOS [subs="verbatim,-callouts"] _hey now_ <1> EOS doc = document_from_string input, :header_footer => false block = doc.blocks.first assert_equal [:specialcharacters], block.subs result = doc.render assert result.include?('_hey now_ <1>') end end context 'References' do test 'should not recognize block anchor with illegal id characters' do input = <<-EOS [[illegal$id,Reference Text]] ---- content ---- EOS doc = document_from_string input block = doc.blocks.first assert_nil block.id assert_nil(block.attr 'reftext') assert !doc.references[:ids].has_key?('illegal$id') end test 'should use specified id and reftext when registering block reference' do input = <<-EOS [[debian,Debian Install]] .Installation on Debian ---- $ apt-get install asciidoctor ---- EOS doc = document_from_string input reftext = doc.references[:ids]['debian'] refute_nil reftext assert_equal 'Debian Install', reftext end test 'should allow square brackets in block reference text' do input = <<-EOS [[debian,[Debian] Install]] .Installation on Debian ---- $ apt-get install asciidoctor ---- EOS doc = document_from_string input reftext = doc.references[:ids]['debian'] refute_nil reftext assert_equal '[Debian] Install', reftext end test 'should allow comma in block reference text' do input = <<-EOS [[debian, Debian, Ubuntu]] .Installation on Debian ---- $ apt-get install asciidoctor ---- EOS doc = document_from_string input reftext = doc.references[:ids]['debian'] refute_nil reftext assert_equal 'Debian, Ubuntu', reftext end test 'should use specified reftext when registering block reference' do input = <<-EOS [[debian]] [reftext="Debian Install"] .Installation on Debian ---- $ apt-get install asciidoctor ---- EOS doc = document_from_string input reftext = doc.references[:ids]['debian'] refute_nil reftext assert_equal 'Debian Install', reftext end end end asciidoctor-1.5.5/test/converter_test.rb000066400000000000000000000406101277513741400204130ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end require 'tilt' unless defined? ::Tilt context 'Converter' do context 'View options' do test 'should set Haml format to html5 for html5 backend' do doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml'), :template_cache => false assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) selected = doc.converter.find_converter('paragraph') assert selected.is_a? Asciidoctor::Converter::TemplateConverter assert selected.templates['paragraph'].is_a? Tilt::HamlTemplate assert_equal :html5, selected.templates['paragraph'].options[:format] end test 'should set Haml format to xhtml for docbook backend' do doc = Asciidoctor::Document.new [], :backend => 'docbook45', :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml'), :template_cache => false assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) selected = doc.converter.find_converter('paragraph') assert selected.is_a? Asciidoctor::Converter::TemplateConverter assert selected.templates['paragraph'].is_a? Tilt::HamlTemplate assert_equal :xhtml, selected.templates['paragraph'].options[:format] end test 'should set Slim format to html for html5 backend' do doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'slim'), :template_cache => false assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) selected = doc.converter.find_converter('paragraph') assert selected.is_a? Asciidoctor::Converter::TemplateConverter assert selected.templates['paragraph'].is_a? Slim::Template assert_equal :html, selected.templates['paragraph'].options[:format] end test 'should set Slim format to nil for docbook backend' do doc = Asciidoctor::Document.new [], :backend => 'docbook45', :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'slim'), :template_cache => false assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) selected = doc.converter.find_converter('paragraph') assert selected.is_a? Asciidoctor::Converter::TemplateConverter assert selected.templates['paragraph'].is_a? Slim::Template assert_nil selected.templates['paragraph'].options[:format] end test 'should set safe mode of Slim AsciiDoc engine to match document safe mode when Slim >= 3' do doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'slim'), :template_cache => false, :safe => :unsafe assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) selected = doc.converter.find_converter('paragraph') assert selected.is_a? Asciidoctor::Converter::TemplateConverter slim_asciidoc_opts = selected.instance_variable_get(:@engine_options)[:slim][:asciidoc] if ::Slim::VERSION >= '3.0' assert_equal({ :safe => Asciidoctor::SafeMode::UNSAFE }, slim_asciidoc_opts) else assert_nil slim_asciidoc_opts end end test 'should support custom template engine options for known engine' do doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'slim'), :template_cache => false, :template_engine_options => { :slim => { :pretty => true } } assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) selected = doc.converter.find_converter('paragraph') assert selected.is_a? Asciidoctor::Converter::TemplateConverter assert selected.templates['paragraph'].is_a? Slim::Template assert_equal true, selected.templates['paragraph'].options[:pretty] end test 'should support custom template engine options' do doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'slim'), :template_cache => false, :template_engine_options => { :slim => { :pretty => true } } assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) selected = doc.converter.find_converter('paragraph') assert selected.is_a? Asciidoctor::Converter::TemplateConverter assert selected.templates['paragraph'].is_a? Slim::Template assert_equal false, selected.templates['paragraph'].options[:sort_attrs] assert_equal true, selected.templates['paragraph'].options[:pretty] end end context 'Custom backends' do test 'should load Haml templates for default backend' do doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml'), :template_cache => false assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) ['paragraph', 'sidebar'].each do |node_name| selected = doc.converter.find_converter node_name assert selected.is_a? Asciidoctor::Converter::TemplateConverter assert selected.templates[node_name].is_a? Tilt::HamlTemplate assert_equal %(block_#{node_name}.html.haml), File.basename(selected.templates[node_name].file) end end test 'should set outfilesuffix according to backend info' do doc = Asciidoctor.load 'content' doc.render assert_equal '.html', doc.attributes['outfilesuffix'] doc = Asciidoctor.load 'content', :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml'), :template_cache => false doc.render assert_equal '.html', doc.attributes['outfilesuffix'] end test 'should not override outfilesuffix attribute if locked' do doc = Asciidoctor.load 'content', :attributes => {'outfilesuffix' => '.foo'} doc.render assert_equal '.foo', doc.attributes['outfilesuffix'] doc = Asciidoctor.load 'content', :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml'), :template_cache => false, :attributes => {'outfilesuffix' => '.foo'} doc.render assert_equal '.foo', doc.attributes['outfilesuffix'] end test 'should load Haml templates for docbook45 backend' do doc = Asciidoctor::Document.new [], :backend => 'docbook45', :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml'), :template_cache => false assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) ['paragraph'].each do |node_name| selected = doc.converter.find_converter node_name assert selected.is_a? Asciidoctor::Converter::TemplateConverter assert selected.templates[node_name].is_a? Tilt::HamlTemplate assert_equal %(block_#{node_name}.xml.haml), File.basename(selected.templates[node_name].file) end end test 'should use Haml templates in place of built-in templates' do input = <<-EOS = Document Title Author Name == Section One Sample paragraph .Related **** Sidebar content **** EOS output = render_embedded_string input, :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml'), :template_cache => false assert_xpath '/*[@class="sect1"]/*[@class="sectionbody"]/p', output, 1 assert_xpath '//aside', output, 1 assert_xpath '/*[@class="sect1"]/*[@class="sectionbody"]/p/following-sibling::aside', output, 1 assert_xpath '//aside/header/h1[text()="Related"]', output, 1 assert_xpath '//aside/header/following-sibling::p[text()="Sidebar content"]', output, 1 end test 'should use built-in global cache to cache templates' do begin # clear out any cache, just to be sure Asciidoctor::Converter::TemplateConverter.clear_caches if defined? Asciidoctor::Converter::TemplateConverter template_dir = File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml') doc = Asciidoctor::Document.new [], :template_dir => template_dir doc.converter caches = Asciidoctor::Converter::TemplateConverter.caches if defined? ::ThreadSafe::Cache assert caches[:templates].is_a?(::ThreadSafe::Cache) assert !caches[:templates].empty? paragraph_template_before = caches[:templates].values.find {|t| File.basename(t.file) == 'block_paragraph.html.haml' } assert !paragraph_template_before.nil? # should use cache doc = Asciidoctor::Document.new [], :template_dir => template_dir template_converter = doc.converter.find_converter('paragraph') paragraph_template_after = template_converter.templates['paragraph'] assert !paragraph_template_after.nil? assert paragraph_template_before.eql?(paragraph_template_after) # should not use cache doc = Asciidoctor::Document.new [], :template_dir => template_dir, :template_cache => false template_converter = doc.converter.find_converter('paragraph') paragraph_template_after = template_converter.templates['paragraph'] assert !paragraph_template_after.nil? assert !paragraph_template_before.eql?(paragraph_template_after) else assert caches.empty? end ensure # clean up Asciidoctor::Converter::TemplateConverter.clear_caches if defined? Asciidoctor::Converter::TemplateConverter end end test 'should use custom cache to cache templates' do template_dir = File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml') Asciidoctor::PathResolver.new.system_path(File.join(template_dir, 'html5', 'block_paragraph.html.haml'), nil) caches = { :scans => {}, :templates => {} } doc = Asciidoctor::Document.new [], :template_dir => template_dir, :template_cache => caches doc.converter assert !caches[:scans].empty? assert !caches[:templates].empty? paragraph_template = caches[:templates].values.find {|t| File.basename(t.file) == 'block_paragraph.html.haml' } assert !paragraph_template.nil? assert paragraph_template.is_a? ::Tilt::HamlTemplate end test 'should be able to disable template cache' do begin # clear out any cache, just to be sure Asciidoctor::Converter::TemplateConverter.clear_caches if defined? Asciidoctor::Converter::TemplateConverter doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'haml'), :template_cache => false doc.converter caches = Asciidoctor::Converter::TemplateConverter.caches assert caches.empty? || caches[:scans].empty? assert caches.empty? || caches[:templates].empty? ensure # clean up Asciidoctor::Converter::TemplateConverter.clear_caches if defined? Asciidoctor::Converter::TemplateConverter end end test 'should load ERB templates using ERBTemplate if eruby is not set' do doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'erb'), :template_cache => false assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) ['paragraph'].each do |node_name| selected = doc.converter.find_converter node_name assert selected.is_a? Asciidoctor::Converter::TemplateConverter template = selected.templates[node_name] assert template.is_a? Tilt::ERBTemplate assert !(template.is_a? Tilt::ErubisTemplate) assert template.instance_variable_get('@engine').is_a? ::ERB assert_equal %(block_#{node_name}.html.erb), File.basename(selected.templates[node_name].file) end end test 'should load ERB templates using ErubisTemplate if eruby is set to erubis' do doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'erb'), :template_cache => false, :eruby => 'erubis' assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) ['paragraph'].each do |node_name| selected = doc.converter.find_converter node_name assert selected.is_a? Asciidoctor::Converter::TemplateConverter template = selected.templates[node_name] assert template.is_a? Tilt::ERBTemplate assert template.is_a? Tilt::ErubisTemplate assert template.instance_variable_get('@engine').is_a? ::Erubis::FastEruby assert_equal %(block_#{node_name}.html.erb), File.basename(selected.templates[node_name].file) end end test 'should load Slim templates for default backend' do doc = Asciidoctor::Document.new [], :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'slim'), :template_cache => false assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) ['paragraph', 'sidebar'].each do |node_name| selected = doc.converter.find_converter node_name assert selected.is_a? Asciidoctor::Converter::TemplateConverter assert selected.templates[node_name].is_a? Slim::Template assert_equal %(block_#{node_name}.html.slim), File.basename(selected.templates[node_name].file) end end test 'should load Slim templates for docbook45 backend' do doc = Asciidoctor::Document.new [], :backend => 'docbook45', :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'slim'), :template_cache => false assert doc.converter.is_a?(Asciidoctor::Converter::CompositeConverter) ['paragraph'].each do |node_name| selected = doc.converter.find_converter node_name assert selected.is_a? Asciidoctor::Converter::TemplateConverter assert selected.templates[node_name].is_a? Slim::Template assert_equal %(block_#{node_name}.xml.slim), File.basename(selected.templates[node_name].file) end end test 'should use Slim templates in place of built-in templates' do input = <<-EOS = Document Title Author Name == Section One Sample paragraph .Related **** Sidebar content **** EOS output = render_embedded_string input, :template_dir => File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends', 'slim'), :template_cache => false assert_xpath '/*[@class="sect1"]/*[@class="sectionbody"]/p', output, 1 assert_xpath '//aside', output, 1 assert_xpath '/*[@class="sect1"]/*[@class="sectionbody"]/p/following-sibling::aside', output, 1 assert_xpath '//aside/header/h1[text()="Related"]', output, 1 assert_xpath '//aside/header/following-sibling::p[text()="Sidebar content"]', output, 1 end test 'should use custom converter if specified' do input = <<-EOS = Document Title preamble == Section content EOS class CustomConverterA def initialize backend, opts = {} end def convert node, name = nil 'document' end def self.converts? backend true end end output = render_string input, :converter => CustomConverterA assert 'document', output end test 'should use converter registered for backend' do input = <<-EOS content EOS begin Asciidoctor::Converter::Factory.unregister_all class CustomConverterB include Asciidoctor::Converter register_for 'foobar' def convert node, name = nil 'foobar content' end end converters = Asciidoctor::Converter::Factory.converters assert converters.size == 1 assert converters['foobar'] == CustomConverterB output = render_string input, :backend => 'foobar' assert 'foobar content', output ensure Asciidoctor::Converter::Factory.unregister_all end end test 'should fall back to catch all converter' do input = <<-EOS content EOS begin Asciidoctor::Converter::Factory.unregister_all class CustomConverterC include Asciidoctor::Converter register_for '*' def convert node, name = nil 'foobaz content' end end converters = Asciidoctor::Converter::Factory.converters assert converters['*'] == CustomConverterC output = render_string input, :backend => 'foobaz' assert 'foobaz content', output ensure Asciidoctor::Converter::Factory.unregister_all end end end end asciidoctor-1.5.5/test/document_test.rb000066400000000000000000002354561277513741400202400ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end BUILT_IN_ELEMENTS = %w(admonition audio colist dlist document embedded example floating_title image inline_anchor inline_break inline_button inline_callout inline_footnote inline_image inline_indexterm inline_kbd inline_menu inline_quoted listing literal stem olist open page_break paragraph pass preamble quote section sidebar table thematic_break toc ulist verse video) context 'Document' do context 'Example document' do test 'document title' do doc = example_document(:asciidoc_index) assert_equal 'AsciiDoc Home Page', doc.doctitle assert_equal 'AsciiDoc Home Page', doc.name assert_equal 14, doc.blocks.size assert_equal :preamble, doc.blocks[0].context assert doc.blocks[1].context == :section # verify compat-mode is set when atx-style doctitle is used result = doc.blocks[0].convert assert_xpath %q(//em[text()="Stuart Rackham"]), result, 1 end end context 'Default settings' do test 'safe mode level set to SECURE by default' do doc = empty_document assert_equal Asciidoctor::SafeMode::SECURE, doc.safe end test 'safe mode level set using string' do doc = empty_document :safe => 'server' assert_equal Asciidoctor::SafeMode::SERVER, doc.safe doc = empty_document :safe => 'foo' assert_equal Asciidoctor::SafeMode::SECURE, doc.safe end test 'safe mode level set using symbol' do doc = empty_document :safe => :server assert_equal Asciidoctor::SafeMode::SERVER, doc.safe doc = empty_document :safe => :foo assert_equal Asciidoctor::SafeMode::SECURE, doc.safe end test 'safe mode level set using integer' do doc = empty_document :safe => 10 assert_equal Asciidoctor::SafeMode::SERVER, doc.safe doc = empty_document :safe => 100 assert_equal 100, doc.safe end test 'safe mode attributes are set on document' do doc = empty_document assert_equal Asciidoctor::SafeMode::SECURE, doc.attr('safe-mode-level') assert_equal 'secure', doc.attr('safe-mode-name') assert doc.attr?('safe-mode-secure') assert !doc.attr?('safe-mode-unsafe') assert !doc.attr?('safe-mode-safe') assert !doc.attr?('safe-mode-server') end test 'safe mode level can be set in the constructor' do doc = Asciidoctor::Document.new [], :safe => Asciidoctor::SafeMode::SAFE assert_equal Asciidoctor::SafeMode::SAFE, doc.safe end test 'safe model level cannot be modified' do doc = empty_document begin doc.safe = Asciidoctor::SafeMode::UNSAFE flunk 'safe mode property of Asciidoctor::Document should not be writable!' rescue end end test 'toc and sectnums should be enabled by default for DocBook backend' do doc = document_from_string 'content', :backend => 'docbook', :parse => true assert doc.attr?('toc') assert doc.attr?('sectnums') result = doc.convert assert_match('', result) assert_match('', result) end test 'maxdepth attribute should be set on asciidoc-toc and asciidoc-numbered processing instructions in DocBook backend' do doc = document_from_string 'content', :backend => 'docbook', :parse => true, :attributes => {'toclevels' => '1', 'sectnumlevels' => '1' } assert doc.attr?('toc') assert doc.attr?('sectnums') result = doc.convert assert_match('', result) assert_match('', result) end test 'should be able to disable toc and sectnums in document header for DocBook backend' do input = <<-EOS = Document Title :toc!: :sectnums!: EOS doc = document_from_string input, :backend => 'docbook' assert !doc.attr?('toc') assert !doc.attr?('sectnums') end test 'should be able to disable section numbering using numbered attribute in document header for DocBook backend' do input = <<-EOS = Document Title :numbered!: EOS doc = document_from_string input, :backend => 'docbook' assert !doc.attr?('sectnums') end end context 'Load APIs' do test 'should load input file' do sample_input_path = fixture_path('sample.asciidoc') doc = Asciidoctor.load(File.new(sample_input_path), :safe => Asciidoctor::SafeMode::SAFE) assert_equal 'Document Title', doc.doctitle assert_equal File.expand_path(sample_input_path), doc.attr('docfile') assert_equal File.expand_path(File.dirname(sample_input_path)), doc.attr('docdir') end test 'should load input file from filename' do sample_input_path = fixture_path('sample.asciidoc') doc = Asciidoctor.load_file(sample_input_path, :safe => Asciidoctor::SafeMode::SAFE) assert_equal 'Document Title', doc.doctitle assert_equal File.expand_path(sample_input_path), doc.attr('docfile') assert_equal File.expand_path(File.dirname(sample_input_path)), doc.attr('docdir') end test 'should not load invalid file' do sample_input_path = fixture_path('hello-asciidoctor.pdf') exception = assert_raises ArgumentError do Asciidoctor.load_file(sample_input_path, :safe => Asciidoctor::SafeMode::SAFE) end assert_match(/Failed to load AsciiDoc document/, exception.message) # verify we have the correct backtrace (should be in at least first 5 lines) assert_match((RUBY_ENGINE == 'rbx' ? /parser\.rb/ : /helpers\.rb/), exception.backtrace[0..4].join("\n")) end if RUBY_MIN_VERSION_1_9 test 'should load input IO' do input = StringIO.new(<<-EOS) Document Title ============== preamble EOS doc = Asciidoctor.load(input, :safe => Asciidoctor::SafeMode::SAFE) assert_equal 'Document Title', doc.doctitle assert !doc.attr?('docfile') assert_equal doc.base_dir, doc.attr('docdir') end test 'should load input string' do input = <<-EOS Document Title ============== preamble EOS doc = Asciidoctor.load(input, :safe => Asciidoctor::SafeMode::SAFE) assert_equal 'Document Title', doc.doctitle assert !doc.attr?('docfile') assert_equal doc.base_dir, doc.attr('docdir') end test 'should load input string array' do input = <<-EOS Document Title ============== preamble EOS doc = Asciidoctor.load(input.lines.entries, :safe => Asciidoctor::SafeMode::SAFE) assert_equal 'Document Title', doc.doctitle assert !doc.attr?('docfile') assert_equal doc.base_dir, doc.attr('docdir') end test 'should accept attributes as array' do # NOTE there's a tab character before idseparator doc = Asciidoctor.load('text', :attributes => %w(toc sectnums source-highlighter=coderay idprefix idseparator=-)) assert doc.attributes.is_a?(Hash) assert doc.attr?('toc') assert_equal '', doc.attr('toc') assert doc.attr?('sectnums') assert_equal '', doc.attr('sectnums') assert doc.attr?('source-highlighter') assert_equal 'coderay', doc.attr('source-highlighter') assert doc.attr?('idprefix') assert_equal '', doc.attr('idprefix') assert doc.attr?('idseparator') assert_equal '-', doc.attr('idseparator') end test 'should accept attributes as empty array' do doc = Asciidoctor.load('text', :attributes => []) assert doc.attributes.is_a?(Hash) end test 'should accept attributes as string' do # NOTE there's a tab character before idseparator doc = Asciidoctor.load('text', :attributes => 'toc sectnums source-highlighter=coderay idprefix idseparator=-') assert doc.attributes.is_a?(Hash) assert doc.attr?('toc') assert_equal '', doc.attr('toc') assert doc.attr?('sectnums') assert_equal '', doc.attr('sectnums') assert doc.attr?('source-highlighter') assert_equal 'coderay', doc.attr('source-highlighter') assert doc.attr?('idprefix') assert_equal '', doc.attr('idprefix') assert doc.attr?('idseparator') assert_equal '-', doc.attr('idseparator') end test 'should accept values containing spaces in attributes string' do # NOTE there's a tab character before self: doc = Asciidoctor.load('text', :attributes => 'idprefix idseparator=- note-caption=Note\ to\ self: toc') assert doc.attributes.is_a?(Hash) assert doc.attr?('idprefix') assert_equal '', doc.attr('idprefix') assert doc.attr?('idseparator') assert_equal '-', doc.attr('idseparator') assert doc.attr?('note-caption') assert_equal "Note to self:", doc.attr('note-caption') end test 'should accept attributes as empty string' do doc = Asciidoctor.load('text', :attributes => '') assert doc.attributes.is_a?(Hash) end test 'should accept attributes as nil' do doc = Asciidoctor.load('text', :attributes => nil) assert doc.attributes.is_a?(Hash) end test 'should accept attributes if hash like' do class Hashish def initialize @table = {'toc' => ''} end def keys @table.keys end def [](key) @table[key] end end doc = Asciidoctor.load('text', :attributes => Hashish.new) assert doc.attributes.is_a?(Hash) assert doc.attributes.has_key?('toc') end test 'should output timestamps by default' do doc = document_from_string 'text', :backend => :html5, :attributes => nil result = doc.convert assert doc.attr?('docdate') refute doc.attr? 'reproducible' assert_xpath '//div[@id="footer-text" and contains(string(.//text()), "Last updated")]', result, 1 end test 'should not output timestamps if reproducible attribute is set in HTML 5' do doc = document_from_string 'text', :backend => :html5, :attributes => { 'reproducible' => '' } result = doc.convert assert doc.attr?('docdate') assert doc.attr?('reproducible') assert_xpath '//div[@id="footer-text" and contains(string(.//text()), "Last updated")]', result, 0 end test 'should not output timestamps if reproducible attribute is set in DocBook' do doc = document_from_string 'text', :backend => :docbook, :attributes => { 'reproducible' => '' } result = doc.convert assert doc.attr?('docdate') assert doc.attr?('reproducible') assert_xpath '/article/info/date', result, 0 end test 'should not modify options argument' do options = { :safe => Asciidoctor::SafeMode::SAFE } options.freeze sample_input_path = fixture_path('sample.asciidoc') begin Asciidoctor.load_file sample_input_path, options rescue flunk %(options argument should not be modified) end end test 'should not modify attributes Hash argument' do attributes = {} attributes.freeze options = { :safe => Asciidoctor::SafeMode::SAFE, :attributes => attributes } sample_input_path = fixture_path('sample.asciidoc') begin Asciidoctor.load_file sample_input_path, options rescue flunk %(attributes argument should not be modified) end end test 'should track file and line information with blocks if sourcemap option is set' do doc = Asciidoctor.load_file fixture_path('sample.asciidoc'), :sourcemap => true section_1 = doc.sections[0] assert_equal 'Section A', section_1.title refute_nil section_1.source_location assert_equal 'sample.asciidoc', section_1.file assert_equal 10, section_1.lineno section_2 = doc.sections[1] assert_equal 'Section B', section_2.title refute_nil section_2.source_location assert_equal 'sample.asciidoc', section_2.file assert_equal 18, section_2.lineno last_block = section_2.blocks[-1] assert_equal :ulist, last_block.context refute_nil last_block.source_location assert_equal 'sample.asciidoc', last_block.file assert_equal 23, last_block.lineno doc = Asciidoctor.load_file fixture_path('master.adoc'), :sourcemap => true, :safe => :safe section_1 = doc.sections[0] assert_equal 'Chapter A', section_1.title refute_nil section_1.source_location assert_equal fixture_path('chapter-a.adoc'), section_1.file assert_equal 1, section_1.lineno end test 'find_by should return Array of blocks anywhere in document tree that match criteria' do input = <<-EOS = Document Title preamble == Section A paragraph -- Exhibit A:: + [#tiger.animal] image::tiger.png[Tiger] -- image::shoe.png[Shoe] == Section B paragraph EOS doc = Asciidoctor.load input result = doc.find_by :context => :image assert_equal 2, result.size assert_equal :image, result[0].context assert_equal 'tiger.png', result[0].attr('target') assert_equal :image, result[1].context assert_equal 'shoe.png', result[1].attr('target') end test 'find_by should return an empty Array if no matches are found' do input = <<-EOS paragraph EOS doc = Asciidoctor.load input result = doc.find_by :context => :section refute_nil result assert_equal 0, result.size end test 'find_by should return Array of blocks that match style criteria' do input = <<-EOS [square] * one * two * three --- * apples * bananas * pears EOS doc = Asciidoctor.load input result = doc.find_by :context => :ulist, :style => 'square' assert_equal 1, result.size assert_equal :ulist, result[0].context end test 'find_by should return Array of blocks that match role criteria' do input = <<-EOS [#tiger.animal] image::tiger.png[Tiger] image::shoe.png[Shoe] EOS doc = Asciidoctor.load input result = doc.find_by :context => :image, :role => 'animal' assert_equal 1, result.size assert_equal :image, result[0].context assert_equal 'tiger.png', result[0].attr('target') end test 'find_by should return the document title section if context selector is :section' do input = <<-EOS = Document Title preamble == Section One content EOS doc = Asciidoctor.load input result = doc.find_by :context => :section refute_nil result assert_equal 2, result.size assert_equal :section, result[0].context assert_equal 'Document Title', result[0].title end test 'find_by should only return results for which the block argument yields true' do input = <<-EOS == Section content === Subsection content EOS doc = Asciidoctor.load input result = doc.find_by(:context => :section) {|sect| sect.level == 1 } refute_nil result assert_equal 1, result.size assert_equal :section, result[0].context assert_equal 'Section', result[0].title end test 'find_by should only return one result when matching by id' do input = <<-EOS == Section content [#subsection] === Subsection content EOS doc = Asciidoctor.load input result = doc.find_by(:context => :section, :id => 'subsection') refute_nil result assert_equal 1, result.size assert_equal :section, result[0].context assert_equal 'Subsection', result[0].title end test 'find_by should return an empty Array if the id criteria matches but the block argument yields false' do input = <<-EOS == Section content [#subsection] === Subsection content EOS doc = Asciidoctor.load input result = doc.find_by(:context => :section, :id => 'subsection') {|sect| false } refute_nil result assert_equal 0, result.size end test 'find_by should not crash if dlist entry does not have description' do input = <<-EOS term without description:: EOS doc = Asciidoctor.load input result = doc.find_by refute_nil result assert_equal 3, result.size assert Asciidoctor::Document === result[0] assert Asciidoctor::List === result[1] assert Asciidoctor::ListItem === result[2] end end context 'Convert APIs' do test 'should convert source document to string when to_file is false' do sample_input_path = fixture_path('sample.asciidoc') output = Asciidoctor.convert_file sample_input_path, :header_footer => true, :to_file => false assert !output.empty? assert_xpath '/html', output, 1 assert_xpath '/html/head', output, 1 assert_xpath '/html/body', output, 1 assert_xpath '/html/head/title[text() = "Document Title"]', output, 1 assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1 end test 'lines in output should be separated by line feed' do sample_input_path = fixture_path('sample.asciidoc') output = Asciidoctor.convert_file sample_input_path, :header_footer => true, :to_file => false assert !output.empty? lines = output.split("\n") assert lines.size == output.split(/\r\n|\r|\n/).size raw_lengths = lines.map(&:length) trimmed_lengths = lines.map {|line| line.rstrip.length } assert raw_lengths == trimmed_lengths end test 'should accept attributes as array' do sample_input_path = fixture_path('sample.asciidoc') output = Asciidoctor.convert_file sample_input_path, :attributes => %w(sectnums idprefix idseparator=-), :to_file => false assert_css '#section-a', output, 1 end test 'should accept attributes as string' do sample_input_path = fixture_path('sample.asciidoc') output = Asciidoctor.convert_file sample_input_path, :attributes => 'sectnums idprefix idseparator=-', :to_file => false assert_css '#section-a', output, 1 end test 'should link to default stylesheet by default when safe mode is SECURE or greater' do sample_input_path = fixture_path('basic.asciidoc') output = Asciidoctor.convert_file sample_input_path, :header_footer => true, :to_file => false assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 1 assert_css 'html:root > head > link[rel="stylesheet"][href="./asciidoctor.css"]', output, 1 end test 'should embed default stylesheet by default if SafeMode is less than SECURE' do input = <<-EOS = Document Title text EOS output = Asciidoctor.render(input, :safe => Asciidoctor::SafeMode::SERVER, :header_footer => true) assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 1 assert_css 'html:root > head > link[rel="stylesheet"][href="./asciidoctor.css"]', output, 0 stylenode = xmlnodes_at_css 'html:root > head > style', output, 1 styles = stylenode.first.content assert !styles.nil? assert !styles.strip.empty? end test 'should link to default stylesheet by default even if linkcss is unset in document' do input = <<-EOS = Document Title :linkcss!: text EOS output = Asciidoctor.render(input, :header_footer => true) assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 1 assert_css 'html:root > head > link[rel="stylesheet"][href="./asciidoctor.css"]', output, 1 end test 'should link to default stylesheet by default if linkcss is unset' do input = <<-EOS = Document Title text EOS output = Asciidoctor.render(input, :header_footer => true, :attributes => {'linkcss!' => ''}) assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 1 assert_css 'html:root > head > link[rel="stylesheet"][href="./asciidoctor.css"]', output, 1 end test 'should embed default stylesheet if safe mode is less than secure and linkcss is unset' do sample_input_path = fixture_path('basic.asciidoc') output = Asciidoctor.convert_file sample_input_path, :header_footer => true, :to_file => false, :safe => Asciidoctor::SafeMode::SAFE, :attributes => {'linkcss!' => ''} assert_css 'html:root > head > style', output, 1 stylenode = xmlnodes_at_css 'html:root > head > style', output, 1 styles = stylenode.first.content assert !styles.nil? assert !styles.strip.empty? end test 'should not link to stylesheet if stylesheet is unset' do input = <<-EOS = Document Title text EOS output = Asciidoctor.render(input, :header_footer => true, :attributes => {'stylesheet!' => ''}) assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 0 assert_css 'html:root > head > link[rel="stylesheet"]', output, 0 end test 'should link to custom stylesheet if specified in stylesheet attribute' do input = <<-EOS = Document Title text EOS output = Asciidoctor.render(input, :header_footer => true, :attributes => {'stylesheet' => './custom.css'}) assert_css 'html:root > head > link[rel="stylesheet"][href^="https://fonts.googleapis.com"]', output, 0 assert_css 'html:root > head > link[rel="stylesheet"][href="./custom.css"]', output, 1 output = Asciidoctor.render(input, :header_footer => true, :attributes => {'stylesheet' => 'file:///home/username/custom.css'}) assert_css 'html:root > head > link[rel="stylesheet"][href="file:///home/username/custom.css"]', output, 1 end test 'should resolve custom stylesheet relative to stylesdir' do input = <<-EOS = Document Title text EOS output = Asciidoctor.render(input, :header_footer => true, :attributes => {'stylesheet' => 'custom.css', 'stylesdir' => './stylesheets'}) assert_css 'html:root > head > link[rel="stylesheet"][href="./stylesheets/custom.css"]', output, 1 end test 'should resolve custom stylesheet to embed relative to stylesdir' do sample_input_path = fixture_path('basic.asciidoc') output = Asciidoctor.convert_file sample_input_path, :header_footer => true, :safe => Asciidoctor::SafeMode::SAFE, :to_file => false, :attributes => {'stylesheet' => 'custom.css', 'stylesdir' => './stylesheets', 'linkcss!' => ''} stylenode = xmlnodes_at_css 'html:root > head > style', output, 1 styles = stylenode.first.content assert !styles.nil? assert !styles.strip.empty? end test 'should convert source file and write result to adjacent file by default' do sample_input_path = fixture_path('sample.asciidoc') sample_output_path = fixture_path('sample.html') begin Asciidoctor.convert_file sample_input_path assert File.exist?(sample_output_path) output = File.read(sample_output_path) assert !output.empty? assert_xpath '/html', output, 1 assert_xpath '/html/head', output, 1 assert_xpath '/html/body', output, 1 assert_xpath '/html/head/title[text() = "Document Title"]', output, 1 assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1 ensure FileUtils.rm(sample_output_path) end end test 'should convert source file and write to specified file' do sample_input_path = fixture_path('sample.asciidoc') sample_output_path = fixture_path('result.html') begin Asciidoctor.convert_file sample_input_path, :to_file => sample_output_path assert File.exist?(sample_output_path) output = File.read(sample_output_path) assert !output.empty? assert_xpath '/html', output, 1 assert_xpath '/html/head', output, 1 assert_xpath '/html/body', output, 1 assert_xpath '/html/head/title[text() = "Document Title"]', output, 1 assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1 ensure FileUtils.rm(sample_output_path) end end test 'should convert source file and write to specified file in base_dir' do sample_input_path = fixture_path('sample.asciidoc') sample_output_path = fixture_path('result.html') fixture_dir = fixture_path('') begin Asciidoctor.convert_file sample_input_path, :to_file => 'result.html', :base_dir => fixture_dir assert File.exist?(sample_output_path) output = File.read(sample_output_path) assert !output.empty? assert_xpath '/html', output, 1 assert_xpath '/html/head', output, 1 assert_xpath '/html/body', output, 1 assert_xpath '/html/head/title[text() = "Document Title"]', output, 1 assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1 rescue => e flunk e.message ensure FileUtils.rm(sample_output_path, :force => true) end end test 'in_place option is ignored when to_file is specified' do sample_input_path = fixture_path('sample.asciidoc') sample_output_path = fixture_path('result.html') begin Asciidoctor.convert_file sample_input_path, :to_file => sample_output_path, :in_place => true assert File.exist?(sample_output_path) ensure FileUtils.rm(sample_output_path) if File.exist? sample_output_path end end test 'in_place option is ignored when to_dir is specified' do sample_input_path = fixture_path('sample.asciidoc') sample_output_path = fixture_path('sample.html') begin Asciidoctor.convert_file sample_input_path, :to_dir => File.dirname(sample_output_path), :in_place => true assert File.exist?(sample_output_path) ensure FileUtils.rm(sample_output_path) if File.exist? sample_output_path end end test 'output should be relative to to_dir option' do sample_input_path = fixture_path('sample.asciidoc') output_dir = File.join(File.dirname(sample_input_path), 'test_output') Dir.mkdir output_dir if !File.exist? output_dir sample_output_path = File.join(output_dir, 'sample.html') begin Asciidoctor.convert_file sample_input_path, :to_dir => output_dir assert File.exist? sample_output_path ensure FileUtils.rm(sample_output_path) if File.exist? sample_output_path FileUtils.rmdir output_dir end end test 'missing directories should be created if mkdirs is enabled' do sample_input_path = fixture_path('sample.asciidoc') output_dir = File.join(File.join(File.dirname(sample_input_path), 'test_output'), 'subdir') sample_output_path = File.join(output_dir, 'sample.html') begin Asciidoctor.convert_file sample_input_path, :to_dir => output_dir, :mkdirs => true assert File.exist? sample_output_path ensure FileUtils.rm(sample_output_path) if File.exist? sample_output_path FileUtils.rmdir output_dir FileUtils.rmdir File.dirname(output_dir) end end # TODO need similar test for when to_dir is specified test 'should raise exception if an attempt is made to overwrite input file' do sample_input_path = fixture_path('sample.asciidoc') assert_raises IOError do Asciidoctor.convert_file sample_input_path, :attributes => { 'outfilesuffix' => '.asciidoc' } end end test 'to_file should be relative to to_dir when both given' do sample_input_path = fixture_path('sample.asciidoc') base_dir = File.dirname(sample_input_path) sample_rel_output_path = File.join('test_output', 'result.html') output_dir = File.dirname(File.join(base_dir, sample_rel_output_path)) Dir.mkdir output_dir if !File.exist? output_dir sample_output_path = File.join(base_dir, sample_rel_output_path) begin Asciidoctor.convert_file sample_input_path, :to_dir => base_dir, :to_file => sample_rel_output_path assert File.exist? sample_output_path ensure FileUtils.rm(sample_output_path) if File.exist? sample_output_path FileUtils.rmdir output_dir end end test 'should not modify options argument' do options = { :safe => Asciidoctor::SafeMode::SAFE, :to_file => false } options.freeze sample_input_path = fixture_path('sample.asciidoc') begin Asciidoctor.convert_file sample_input_path, options rescue flunk %(options argument should not be modified) end end end context 'Docinfo files' do test 'should include docinfo files for html backend' do sample_input_path = fixture_path('basic.asciidoc') cases = { 'docinfo' => { :head_script => 1, :meta => 0, :top_link => 0, :footer_script => 1 }, 'docinfo=private' => { :head_script => 1, :meta => 0, :top_link => 0, :footer_script => 1 }, 'docinfo1' => { :head_script => 0, :meta => 1, :top_link => 1, :footer_script => 0 }, 'docinfo=shared' => { :head_script => 0, :meta => 1, :top_link => 1, :footer_script => 0 }, 'docinfo2' => { :head_script => 1, :meta => 1, :top_link => 1, :footer_script => 1 }, 'docinfo docinfo2' => { :head_script => 1, :meta => 1, :top_link => 1, :footer_script => 1 }, 'docinfo=private,shared' => { :head_script => 1, :meta => 1, :top_link => 1, :footer_script => 1 }, 'docinfo=private-head' => { :head_script => 1, :meta => 0, :top_link => 0, :footer_script => 0 }, 'docinfo=shared-head' => { :head_script => 0, :meta => 1, :top_link => 0, :footer_script => 0 }, 'docinfo=private-footer' => { :head_script => 0, :meta => 0, :top_link => 0, :footer_script => 1 }, 'docinfo=shared-footer' => { :head_script => 0, :meta => 0, :top_link => 1, :footer_script => 0 }, 'docinfo=private-head\ ,\ shared-footer' => { :head_script => 1, :meta => 0, :top_link => 1, :footer_script => 0 } } cases.each do |attr_val, markup| output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => %(linkcss copycss! #{attr_val}) assert !output.empty? assert_css 'script[src="modernizr.js"]', output, markup[:head_script] assert_css 'meta[http-equiv="imagetoolbar"]', output, markup[:meta] assert_css 'body > a#top', output, markup[:top_link] assert_css 'body > script', output, markup[:footer_script] end end test 'should include docinfo footer even if nofooter attribute is set' do sample_input_path = fixture_path('basic.asciidoc') output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo1' => '', 'nofooter' => ''} assert !output.empty? assert_css 'body > a#top', output, 1 end test 'should include docinfo files for html backend with custom docinfodir' do sample_input_path = fixture_path('basic.asciidoc') output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo' => '', 'docinfodir' => 'custom-docinfodir'} assert !output.empty? assert_css 'script[src="bootstrap.js"]', output, 1 assert_css 'meta[name="robots"]', output, 0 output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo1' => '', 'docinfodir' => 'custom-docinfodir'} assert !output.empty? assert_css 'script[src="bootstrap.js"]', output, 0 assert_css 'meta[name="robots"]', output, 1 output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo2' => '', 'docinfodir' => './custom-docinfodir'} assert !output.empty? assert_css 'script[src="bootstrap.js"]', output, 1 assert_css 'meta[name="robots"]', output, 1 output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo2' => '', 'docinfodir' => 'custom-docinfodir/subfolder'} assert !output.empty? assert_css 'script[src="bootstrap.js"]', output, 0 assert_css 'meta[name="robots"]', output, 0 end test 'should include docinfo files for docbook backend' do sample_input_path = fixture_path('basic.asciidoc') output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :backend => 'docbook', :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo' => ''} assert !output.empty? assert_css 'productname', output, 0 assert_css 'copyright', output, 1 output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :backend => 'docbook', :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo1' => ''} assert !output.empty? assert_css 'productname', output, 1 assert_xpath '//xmlns:productname[text()="Asciidoctor™"]', output, 1 assert_css 'edition', output, 1 assert_xpath '//xmlns:edition[text()="1.0"]', output, 1 # verifies substitutions are performed assert_css 'copyright', output, 0 output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :backend => 'docbook', :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo2' => ''} assert !output.empty? assert_css 'productname', output, 1 assert_xpath '//xmlns:productname[text()="Asciidoctor™"]', output, 1 assert_css 'edition', output, 1 assert_xpath '//xmlns:edition[text()="1.0"]', output, 1 # verifies substitutions are performed assert_css 'copyright', output, 1 end test 'should include docinfo footer files for html backend' do sample_input_path = fixture_path('basic.asciidoc') output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo' => ''} assert !output.empty? assert_css 'body script', output, 1 assert_css 'a#top', output, 0 output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo1' => ''} assert !output.empty? assert_css 'body script', output, 0 assert_css 'a#top', output, 1 output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo2' => ''} assert !output.empty? assert_css 'body script', output, 1 assert_css 'a#top', output, 1 end test 'should include docinfo footer files for docbook backend' do sample_input_path = fixture_path('basic.asciidoc') output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :backend => 'docbook', :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo' => ''} assert !output.empty? assert_css 'article > revhistory', output, 1 assert_xpath '/xmlns:article/xmlns:revhistory/xmlns:revision/xmlns:revnumber[text()="1.0"]', output, 1 # verifies substitutions are performed assert_css 'glossary#_glossary', output, 0 output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :backend => 'docbook', :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo1' => ''} assert !output.empty? assert_css 'article > revhistory', output, 0 assert_css 'glossary#_glossary', output, 1 output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :backend => 'docbook', :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo2' => ''} assert !output.empty? assert_css 'article > revhistory', output, 1 assert_xpath '/xmlns:article/xmlns:revhistory/xmlns:revision/xmlns:revnumber[text()="1.0"]', output, 1 # verifies substitutions are performed assert_css 'glossary#_glossary', output, 1 end # WARNING this test manipulates runtime settings; should probably be run in forked process test 'should force encoding of docinfo files to UTF-8' do sample_input_path = fixture_path('basic.asciidoc') if RUBY_VERSION >= '1.9' default_external_old = Encoding.default_external force_encoding_old = Asciidoctor::FORCE_ENCODING verbose_old = $VERBOSE end begin if RUBY_VERSION >= '1.9' $VERBOSE = nil # disable warnings since we have to modify constants Encoding.default_external = 'US-ASCII' Asciidoctor::FORCE_ENCODING = true end output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :backend => 'docbook', :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo2' => ''} assert !output.empty? assert_css 'productname', output, 1 assert_css 'edition', output, 1 assert_xpath '//xmlns:edition[text()="1.0"]', output, 1 # verifies substitutions are performed assert_css 'copyright', output, 1 ensure if RUBY_VERSION >= '1.9' Encoding.default_external = default_external_old Asciidoctor::FORCE_ENCODING = force_encoding_old $VERBOSE = verbose_old end end end test 'should not include docinfo files by default' do sample_input_path = fixture_path('basic.asciidoc') output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER assert !output.empty? assert_css 'script[src="modernizr.js"]', output, 0 assert_css 'meta[http-equiv="imagetoolbar"]', output, 0 output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :backend => 'docbook', :safe => Asciidoctor::SafeMode::SERVER assert !output.empty? assert_css 'productname', output, 0 assert_css 'copyright', output, 0 end test 'should not include docinfo files if safe mode is SECURE or greater' do sample_input_path = fixture_path('basic.asciidoc') output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :attributes => {'docinfo2' => ''} assert !output.empty? assert_css 'script[src="modernizr.js"]', output, 0 assert_css 'meta[http-equiv="imagetoolbar"]', output, 0 output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :backend => 'docbook', :attributes => {'docinfo2' => ''} assert !output.empty? assert_css 'productname', output, 0 assert_css 'copyright', output, 0 end test 'should apply explicit substitutions to docinfo files' do sample_input_path = fixture_path('subs.adoc') output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo' => '', 'docinfosubs' => 'attributes,replacements', 'linkcss' => ''} assert !output.empty? assert_css 'script[src="bootstrap.3.2.0.min.js"]', output, 1 assert_xpath %(//meta[@name="copyright"][@content="#{entity 169} OpenDevise"]), output, 1 end end context 'MathJax' do test 'should add MathJax script to HTML head if stem attribute is set' do output = render_string '', :attributes => {'stem' => ''} assert_match('), {}, :content_model => :raw end end class TemperatureMacro < Asciidoctor::Extensions::InlineMacroProcessor; use_dsl named :degrees name_attributes 'units' def process parent, target, attributes units = attributes['units'] || (parent.document.attr 'temperature-unit', 'C') c = target.to_f case units when 'C' %(#{c} °C) when 'F' %(#{c * 1.8 + 32 } °F) else c end end end class MetaRobotsDocinfoProcessor < Asciidoctor::Extensions::DocinfoProcessor def process document '' end end class MetaAppDocinfoProcessor < Asciidoctor::Extensions::DocinfoProcessor use_dsl at_location :head def process document '' end end class SampleExtensionGroup < Asciidoctor::Extensions::Group def activate registry registry.document.attributes['activate-method-called'] = '' registry.preprocessor SamplePreprocessor end end context 'Extensions' do context 'Register' do test 'should register extension group class' do begin Asciidoctor::Extensions.register :sample, SampleExtensionGroup refute_nil Asciidoctor::Extensions.groups assert_equal 1, Asciidoctor::Extensions.groups.size assert_equal SampleExtensionGroup, Asciidoctor::Extensions.groups[:sample] ensure Asciidoctor::Extensions.unregister_all end end test 'should self register extension group class' do begin SampleExtensionGroup.register :sample refute_nil Asciidoctor::Extensions.groups assert_equal 1, Asciidoctor::Extensions.groups.size assert_equal SampleExtensionGroup, Asciidoctor::Extensions.groups[:sample] ensure Asciidoctor::Extensions.unregister_all end end test 'should register extension group from class name' do begin Asciidoctor::Extensions.register :sample, 'SampleExtensionGroup' refute_nil Asciidoctor::Extensions.groups assert_equal 1, Asciidoctor::Extensions.groups.size assert_equal SampleExtensionGroup, Asciidoctor::Extensions.groups[:sample] ensure Asciidoctor::Extensions.unregister_all end end test 'should register extension group from instance' do begin Asciidoctor::Extensions.register :sample, SampleExtensionGroup.new refute_nil Asciidoctor::Extensions.groups assert_equal 1, Asciidoctor::Extensions.groups.size assert Asciidoctor::Extensions.groups[:sample].is_a? SampleExtensionGroup ensure Asciidoctor::Extensions.unregister_all end end test 'should register extension block' do begin Asciidoctor::Extensions.register(:sample) do end refute_nil Asciidoctor::Extensions.groups assert_equal 1, Asciidoctor::Extensions.groups.size assert Asciidoctor::Extensions.groups[:sample].is_a? Proc ensure Asciidoctor::Extensions.unregister_all end end test 'should get class for top-level class name' do clazz = Asciidoctor::Extensions.class_for_name('Asciidoctor') refute_nil clazz assert_equal Asciidoctor, clazz end test 'should get class for class name in module' do clazz = Asciidoctor::Extensions.class_for_name('Asciidoctor::Extensions') refute_nil clazz assert_equal Asciidoctor::Extensions, clazz end test 'should get class for class name resolved from root' do clazz = Asciidoctor::Extensions.class_for_name('::Asciidoctor::Extensions') refute_nil clazz assert_equal Asciidoctor::Extensions, clazz end test 'should raise exception if cannot find class for name' do begin Asciidoctor::Extensions.class_for_name('InvalidModule::InvalidClass') flunk 'Expecting RuntimeError to be raised' rescue RuntimeError => e assert_equal 'Could not resolve class for name: InvalidModule::InvalidClass', e.message end end test 'should resolve class if class is given' do clazz = Asciidoctor::Extensions.resolve_class(Asciidoctor::Extensions) refute_nil clazz assert_equal Asciidoctor::Extensions, clazz end test 'should resolve class if class from string' do clazz = Asciidoctor::Extensions.resolve_class('Asciidoctor::Extensions') refute_nil clazz assert_equal Asciidoctor::Extensions, clazz end end context 'Activate' do test 'should call activate on extension group class' do begin doc = Asciidoctor::Document.new Asciidoctor::Extensions.register :sample, SampleExtensionGroup registry = Asciidoctor::Extensions::Registry.new registry.activate doc assert doc.attr? 'activate-method-called' assert registry.preprocessors? ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke extension block' do begin doc = Asciidoctor::Document.new Asciidoctor::Extensions.register do @document.attributes['block-called'] = '' preprocessor SamplePreprocessor end registry = Asciidoctor::Extensions::Registry.new registry.activate doc assert doc.attr? 'block-called' assert registry.preprocessors? ensure Asciidoctor::Extensions.unregister_all end end test 'should create registry in Document if extensions are loaded' do begin SampleExtensionGroup.register doc = Asciidoctor::Document.new assert doc.extensions? assert doc.extensions.is_a? Asciidoctor::Extensions::Registry ensure Asciidoctor::Extensions.unregister_all end end end context 'Instantiate' do test 'should instantiate preprocessors' do registry = Asciidoctor::Extensions::Registry.new registry.preprocessor SamplePreprocessor registry.activate Asciidoctor::Document.new assert registry.preprocessors? extensions = registry.preprocessors assert_equal 1, extensions.size assert extensions.first.is_a? Asciidoctor::Extensions::ProcessorExtension assert extensions.first.instance.is_a? SamplePreprocessor assert extensions.first.process_method.is_a? ::Method end test 'should instantiate include processors' do registry = Asciidoctor::Extensions::Registry.new registry.include_processor SampleIncludeProcessor registry.activate Asciidoctor::Document.new assert registry.include_processors? extensions = registry.include_processors assert_equal 1, extensions.size assert extensions.first.is_a? Asciidoctor::Extensions::ProcessorExtension assert extensions.first.instance.is_a? SampleIncludeProcessor assert extensions.first.process_method.is_a? ::Method end test 'should instantiate docinfo processors' do registry = Asciidoctor::Extensions::Registry.new registry.docinfo_processor SampleDocinfoProcessor registry.activate Asciidoctor::Document.new assert registry.docinfo_processors? assert registry.docinfo_processors?(:head) extensions = registry.docinfo_processors assert_equal 1, extensions.size assert extensions.first.is_a? Asciidoctor::Extensions::ProcessorExtension assert extensions.first.instance.is_a? SampleDocinfoProcessor assert extensions.first.process_method.is_a? ::Method end test 'should instantiate treeprocessors' do registry = Asciidoctor::Extensions::Registry.new registry.treeprocessor SampleTreeprocessor registry.activate Asciidoctor::Document.new assert registry.treeprocessors? extensions = registry.treeprocessors assert_equal 1, extensions.size assert extensions.first.is_a? Asciidoctor::Extensions::ProcessorExtension assert extensions.first.instance.is_a? SampleTreeprocessor assert extensions.first.process_method.is_a? ::Method end test 'should instantiate postprocessors' do registry = Asciidoctor::Extensions::Registry.new registry.postprocessor SamplePostprocessor registry.activate Asciidoctor::Document.new assert registry.postprocessors? extensions = registry.postprocessors assert_equal 1, extensions.size assert extensions.first.is_a? Asciidoctor::Extensions::ProcessorExtension assert extensions.first.instance.is_a? SamplePostprocessor assert extensions.first.process_method.is_a? ::Method end test 'should instantiate block processor' do registry = Asciidoctor::Extensions::Registry.new registry.block SampleBlock, :sample registry.activate Asciidoctor::Document.new assert registry.blocks? assert registry.registered_for_block? :sample, :paragraph extension = registry.find_block_extension :sample assert extension.is_a? Asciidoctor::Extensions::ProcessorExtension assert extension.instance.is_a? SampleBlock assert extension.process_method.is_a? ::Method end test 'should not match block processor for unsupported context' do registry = Asciidoctor::Extensions::Registry.new registry.block SampleBlock, :sample registry.activate Asciidoctor::Document.new assert !(registry.registered_for_block? :sample, :sidebar) end test 'should instantiate block macro processor' do registry = Asciidoctor::Extensions::Registry.new registry.block_macro SampleBlockMacro, 'sample' registry.activate Asciidoctor::Document.new assert registry.block_macros? assert registry.registered_for_block_macro? 'sample' extension = registry.find_block_macro_extension 'sample' assert extension.is_a? Asciidoctor::Extensions::ProcessorExtension assert extension.instance.is_a? SampleBlockMacro assert extension.process_method.is_a? ::Method end test 'should instantiate inline macro processor' do registry = Asciidoctor::Extensions::Registry.new registry.inline_macro SampleInlineMacro, 'sample' registry.activate Asciidoctor::Document.new assert registry.inline_macros? assert registry.registered_for_inline_macro? 'sample' extension = registry.find_inline_macro_extension 'sample' assert extension.is_a? Asciidoctor::Extensions::ProcessorExtension assert extension.instance.is_a? SampleInlineMacro assert extension.process_method.is_a? ::Method end test 'should allow processors to be registered by a string name' do registry = Asciidoctor::Extensions::Registry.new registry.preprocessor 'SamplePreprocessor' registry.activate Asciidoctor::Document.new assert registry.preprocessors? extensions = registry.preprocessors assert_equal 1, extensions.size assert extensions.first.is_a? Asciidoctor::Extensions::ProcessorExtension end end context 'Integration' do test 'should invoke preprocessors before parsing document' do input = <<-EOS junk line = Document Title sample content EOS begin Asciidoctor::Extensions.register do preprocessor ScrubHeaderPreprocessor end doc = document_from_string input assert doc.attr? 'skipped' assert_equal 'junk line', (doc.attr 'skipped').strip assert doc.has_header? assert_equal 'Document Title', doc.doctitle ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke include processor to process include macro' do input = <<-EOS before include::lorem-ipsum.txt[] after EOS begin Asciidoctor::Extensions.register do include_processor BoilerplateTextIncludeProcessor end result = render_string input, :safe => :server assert_css '.paragraph > p', result, 3 assert result.include?('before') assert result.include?('Lorem ipsum') assert result.include?('after') ensure Asciidoctor::Extensions.unregister_all end end test 'should call include processor to process include directive' do input = <<-EOS first line include::include-file.asciidoc[] last line EOS # Safe Mode is not required here document = empty_document :base_dir => File.expand_path(File.dirname(__FILE__)) document.extensions.include_processor do process do |doc, reader, target, attributes| # demonstrate that push_include normalizes endlines content = ["include target:: #{target}\n", "\n", "middle line\n"] reader.push_include content, target, target, 1, attributes end end reader = Asciidoctor::PreprocessorReader.new document, input lines = [] lines << reader.read_line lines << reader.read_line lines << reader.read_line assert_equal 'include target:: include-file.asciidoc', lines.last assert_equal 'include-file.asciidoc: line 2', reader.line_info while reader.has_more_lines? lines << reader.read_line end source = lines * ::Asciidoctor::EOL assert_match(/^include target:: include-file.asciidoc$/, source) assert_match(/^middle line$/, source) end test 'should invoke treeprocessors after parsing document' do input = <<-EOS = Document Title Doc Writer content EOS begin Asciidoctor::Extensions.register do treeprocessor ReplaceAuthorTreeprocessor end doc = document_from_string input assert_equal 'Ghost Writer', doc.author ensure Asciidoctor::Extensions.unregister_all end end test 'should allow treeprocessor to replace tree' do input = <<-EOS = Original Document Doc Writer content EOS begin Asciidoctor::Extensions.register do treeprocessor ReplaceTreeTreeprocessor end doc = document_from_string input assert_equal 'Replacement Document', doc.doctitle ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke postprocessors after rendering document' do input = <<-EOS * one * two * three EOS begin Asciidoctor::Extensions.register do postprocessor StripAttributesPostprocessor end output = render_string input refute_match(/
    /, output) ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke processor for custom block' do input = <<-EOS [yell] Hi there! EOS begin Asciidoctor::Extensions.register do block UppercaseBlock end output = render_embedded_string input assert_xpath '//p', output, 1 assert_xpath '//p[text()="HI THERE!"]', output, 1 ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke processor for custom block macro' do input = <<-EOS snippet::12345[] EOS begin Asciidoctor::Extensions.register do block_macro SnippetMacro, :snippet end output = render_embedded_string input assert output.include?('') ensure Asciidoctor::Extensions.unregister_all end end test 'should invoke processor for custom inline macro' do begin Asciidoctor::Extensions.register do inline_macro TemperatureMacro, :degrees end output = render_embedded_string 'Room temperature is degrees:25[C].', :attributes => {'temperature-unit' => 'F'} assert output.include?('Room temperature is 25.0 °C.') output = render_embedded_string 'Room temperature is degrees:25[].', :attributes => {'temperature-unit' => 'F'} assert output.include?('Room temperature is 77.0 °F.') ensure Asciidoctor::Extensions.unregister_all end end test 'should resolve regexp for inline macro lazily' do begin Asciidoctor::Extensions.register do inline_macro do named :label using_format :short process do |parent, target| %() end end end output = render_embedded_string 'label:[Checkbox]' assert output.include?('') ensure Asciidoctor::Extensions.unregister_all end end test 'should not carry over attributes if block processor returns nil' do begin Asciidoctor::Extensions.register do block do named :skip on_context :paragraph parse_content_as :raw process do |parent, reader, attrs| nil end end end input = <<-EOS .unused title [skip] not rendered -- rendered -- EOS doc = document_from_string input assert_equal 1, doc.blocks.size assert_nil doc.blocks[0].attributes['title'] ensure Asciidoctor::Extensions.unregister_all end end test 'should pass attributes by value to block processor' do begin Asciidoctor::Extensions.register do block do named :foo on_context :paragraph parse_content_as :raw process do |parent, reader, attrs| original_attrs = attrs.dup attrs.delete('title') create_paragraph parent, reader.read_lines, original_attrs.merge('id' => 'value') end end end input = <<-EOS .title [foo] content EOS doc = document_from_string input assert_equal 1, doc.blocks.size assert_equal 'title', doc.blocks[0].attributes['title'] assert_equal 'value', doc.blocks[0].id ensure Asciidoctor::Extensions.unregister_all end end test 'parse_content should not share attributes between parsed blocks' do begin Asciidoctor::Extensions.register do block do named :wrap on_context :open process do |parent, reader, attrs| wrap = create_open_block parent, nil, attrs parse_content wrap, reader.read_lines end end end input = <<-EOS [wrap] -- [foo=bar] ==== content ==== [baz=qux] ==== content ==== -- EOS doc = document_from_string input assert_equal 1, doc.blocks.size wrap = doc.blocks[0] assert_equal 2, wrap.blocks.size assert_equal 2, wrap.blocks[0].attributes.size assert_equal 2, wrap.blocks[1].attributes.size assert_nil wrap.blocks[1].attributes['foo'] ensure Asciidoctor::Extensions.unregister_all end end test 'should add docinfo to document' do input = <<-EOS = Document Title sample content EOS begin Asciidoctor::Extensions.register do docinfo_processor MetaRobotsDocinfoProcessor end doc = document_from_string input, :safe => :server assert_equal '', doc.docinfo ensure Asciidoctor::Extensions.unregister_all end end test 'should add multiple docinfo to document' do input = <<-EOS = Document Title sample content EOS begin Asciidoctor::Extensions.register do docinfo_processor MetaAppDocinfoProcessor docinfo_processor MetaRobotsDocinfoProcessor, :position => :>> docinfo_processor do at_location :footer process do |doc| '' end end end doc = document_from_string input, :safe => :server assert_equal ' ', doc.docinfo assert_equal '', doc.docinfo(:footer) ensure Asciidoctor::Extensions.unregister_all end end test 'should append docinfo to document' do begin Asciidoctor::Extensions.register do docinfo_processor MetaRobotsDocinfoProcessor end sample_input_path = fixture_path('basic.asciidoc') output = Asciidoctor.convert_file sample_input_path, :to_file => false, :header_footer => true, :safe => Asciidoctor::SafeMode::SERVER, :attributes => {'docinfo' => ''} assert !output.empty? assert_css 'script[src="modernizr.js"]', output, 1 assert_css 'meta[name="robots"]', output, 1 assert_css 'meta[http-equiv="imagetoolbar"]', output, 0 ensure Asciidoctor::Extensions.unregister_all end end end end asciidoctor-1.5.5/test/fixtures/000077500000000000000000000000001277513741400166705ustar00rootroot00000000000000asciidoctor-1.5.5/test/fixtures/asciidoc_index.txt000066400000000000000000000533141277513741400224040ustar00rootroot00000000000000AsciiDoc Home Page ================== // Web page meta data. :keywords: AsciiDoc, DocBook, EPUB, PDF, ebooks, slideshow, slidy, man page :description: AsciiDoc is a text document format for writing notes, + documentation, articles, books, ebooks, slideshows, + web pages, man pages and blogs. AsciiDoc files can be + translated to many formats including HTML, PDF, EPUB, + man page. .{revdate}: AsciiDoc {revnumber} Released ************************************************************************ Read the link:CHANGELOG.html[CHANGELOG] for release highlights and a full list of all additions, changes and bug fixes. Changes are documented in the updated link:userguide.html[User Guide]. See the link:INSTALL.html[Installation page] for downloads and and installation instructions. 'Stuart Rackham' ************************************************************************ Introduction ------------ {description} AsciiDoc is highly configurable: both the AsciiDoc source file syntax and the backend output markups (which can be almost any type of SGML/XML markup) can be customized and extended by the user. AsciiDoc is free software and is licenced under the terms of the 'GNU General Public License version 2' (GPLv2). TIP: The pages you are reading were written using AsciiDoc, to view the corresponding AsciiDoc source click on the *Page Source* menu item in the left hand margin. Overview and Examples --------------------- You write an AsciiDoc document the same way you would write a normal text document, there are no markup tags or weird format notations. AsciiDoc files are designed to be viewed, edited and printed directly or translated to other presentation formats using the asciidoc(1) command. The asciidoc(1) command translates AsciiDoc files to HTML, XHTML and DocBook markups. DocBook can be post-processed to presentation formats such as HTML, PDF, EPUB, DVI, LaTeX, roff, and Postscript using readily available Open Source tools. Example Articles ~~~~~~~~~~~~~~~~ - This XHTML version of the link:asciidoc.css-embedded.html[AsciiDoc User Guide] was generated by AsciiDoc from link:asciidoc.txt[this AsciiDoc file]. - Here's the link:asciidoc.html[same document] created by first generating DocBook markup using AsciiDoc and then converting the DocBook markup to HTML using 'DocBook XSL Stylesheets'. - The User Guide again, this time a link:chunked/index.html[chunked version]. - AsciiDoc generated this link:article-standalone.html[stand-alone HTML file] containing embedded CSS, JavaScript and images from this link:article.txt[AsciiDoc article template] with this command: asciidoc -a data-uri -a icons -a toc -a max-width=55em article.txt - The same link:article.txt[AsciiDoc article template] generated link:article-html5-toc2.html[this HTML 5] (the 'toc2' attribute puts a table of contents in the left margin) from this command: asciidoc -b html5 -a icons -a toc2 -a theme=flask article.txt - The same link:article.txt[AsciiDoc article template] produced this link:article.html[HTML file] and this link:article.pdf[PDF file] via DocBook markup generated by AsciiDoc. [[X7]] Example Books ~~~~~~~~~~~~~ AsciiDoc markup supports all the standard DocBook frontmatter and backmatter sections (dedication, preface, bibliography, glossary, index, colophon) plus footnotes and index entries. - This link:book.txt[AsciiDoc book] produced link:book.html[this HTML file] using the 'DocBook XSL Stylesheets'. - The link:asciidoc.pdf[PDF formatted AsciiDoc User Guide] was generated from asciidoc(1) DocBook output. - The link:asciidoc.epub[EPUB formatted AsciiDoc User Guide] was generated using link:a2x.1.html[a2x]. - This link:book.epub[EPUB formatted book skeleton] was generated using link:a2x.1.html[a2x]. - This link:book-multi.txt[multi-part AsciiDoc book] produced link:book-multi.html[this HTML file] using the 'DocBook XSL Stylesheets'. Example UNIX Man Pages ~~~~~~~~~~~~~~~~~~~~~~ HTML formatted AsciiDoc man pages link:asciidoc.1.css-embedded.html[with stylesheets] and link:asciidoc.1.html[without stylesheets] were generated by AsciiDoc from link:asciidoc.1.txt[this file]. This link:asciidoc.1[roff formatted man page] was generated from asciidoc(1) DocBook output using `xsltproc(1)` and DocBook XSL Stylesheets. [[X8]] Example Slideshows ~~~~~~~~~~~~~~~~~~ The http://www.w3.org/Talks/Tools/Slidy2/[Slidy] backend generates HTML slideshows that can be viewed in any web browser. What's nice is that you can create completely self contained slideshows including embedded images. - Here is the link:slidy.html[slidy backend documentation] slideshow and here is it's link:slidy.txt[AsciiDoc source]. - An link:slidy-example.html[example slidy slideshow] and the link:slidy-example.txt[AsciiDoc source]. Example Web Site ~~~~~~~~~~~~~~~~ The link:README-website.html[AsciiDoc website] is included in the AsciiDoc distribution (in `./examples/website/`) as an example website built using AsciiDoc. See `./examples/website/README-website.txt`. More examples ~~~~~~~~~~~~~ - See below: <>. - Example link:newtables.html[Tables]. eBook Publication ----------------- The two most popular open eBook formats are http://en.wikipedia.org/wiki/EPUB[EPUB] and PDF. The AsciiDoc link:a2x.1.html[a2x] toolchain wrapper makes it easy to link:publishing-ebooks-with-asciidoc.html[publish EPUB and PDF eBooks with AsciiDoc]. See also <> and link:epub-notes.html[AsciiDoc EPUB Notes]). Blogpost weblog client ---------------------- http://srackham.wordpress.com/blogpost-readme/[blogpost] is a command-line weblog client for publishing AsciiDoc documents to http://wordpress.org/[WordPress] blog hosts. It creates and updates weblog posts and pages directly from AsciiDoc source documents. Source code highlighter ----------------------- AsciiDoc includes a link:source-highlight-filter.html[source code highlighter filter] that uses http://www.gnu.org/software/src-highlite/[GNU source-highlight] to highlight HTML outputs. You also have the option of using the http://pygments.org/[Pygments] highlighter. [[X3]] Mathematical Formulae --------------------- You can include mathematical formulae in AsciiDoc XHTML documents using link:asciimathml.html[ASCIIMathML] or link:latexmathml.html[LaTeXMathML] notation. The link:latex-filter.html[AsciiDoc LaTeX filter] translates LaTeX source to an image that is automatically inserted into the AsciiDoc output documents. AsciiDoc also has 'latexmath' macros for DocBook outputs -- they are documented in link:latexmath.pdf[this PDF file] and can be used in AsciiDoc documents processed by `dblatex(1)`. Editor Support -------------- - An AsciiDoc syntax highlighter for the Vim text editor is included in the AsciiDoc distribution (see the 'Vim Syntax Highlighter' appendix in the 'AsciiDoc User Guide' for details). + .Syntax highlighter screenshot image::images/highlighter.png[height=400,caption="",link="images/highlighter.png"] - Dag Wieers has implemented an alternative Vim syntax file for AsciiDoc which can be found here http://svn.rpmforge.net/svn/trunk/tools/asciidoc-vim/. - David Avsajanishvili has written a source highlighter for AsciiDoc files for http://projects.gnome.org/gtksourceview/[GtkSourceView] (used by http://projects.gnome.org/gedit/[gedit] and a number of other applications). The project is hosted here: https://launchpad.net/asciidoc-gtk-highlight - AsciiDoc resources for the Emacs editor can be found on the http://www.emacswiki.org/emacs/AsciiDoc[AsciiDoc page] at the http://www.emacswiki.org/emacs/EmacsWiki[Emacs Wiki]. - Christian Zuckschwerdt has written a https://github.com/zuckschwerdt/asciidoc.tmbundle[TextMate bundle] for AsciiDoc. Try AsciiDoc on the Web ----------------------- Andrew Koster has written a Web based application to interactively convert and display AsciiDoc source: http://andrewk.webfactional.com/asciidoc.php [[X2]] External Resources and Applications ----------------------------------- Here are resources that I know of, if you know of more drop me a line and I'll add them to the list. - Check the link:INSTALL.html#X2[installation page] for packaged versions of AsciiDoc. - Alex Efros has written an HTML formatted http://powerman.name/doc/asciidoc[AsciiDoc Cheatsheet] using Asciidoc. - Thomas Berker has written an http://liksom.info/blog/?q=node/114[AsciiDoc Cheatsheet] in Open Document and PDF formats. - The http://www.wikimatrix.org/[WikiMatrix] website has an excellent http://www.wikimatrix.org/syntax.php[web page] that compares the various Wiki markup syntaxes. An interesting attempt at Wiki markup standardization is http://www.wikicreole.org/[CREOLE]. - Franck Pommereau has written http://www.univ-paris12.fr/lacl/pommereau/soft/asciidoctest.html[Asciidoctest], a program that doctests snippets of Python code within your Asciidoc documents. - The http://remips.sourceforge.net/[ReMIPS] project website has been built using AsciiDoc. - Here are some link:asciidoc-docbook-xsl.html[DocBook XSL Stylesheets Notes]. - Karl Mowatt-Wilson has developed an http://ikiwiki.info/[ikiwiki] plugin for AsciiDoc which he uses to render http://mowson.org/karl[his website]. The plugin is available http://www.mowson.org/karl/colophon/[here] and there is some discussion of the ikiwiki integration http://ikiwiki.info/users/KarlMW/discussion/[here]. - Glenn Eychaner has http://groups.google.com/group/asciidoc/browse_thread/thread/bf04b55628efe214[reworked the Asciidoc plugin for ikiwiki] that was created by Karl Mowson, the source can be downloaded from http://dl.dropbox.com/u/11256359/asciidoc.pm - David Hajage has written an AsciiDoc package for the http://www.r-project.org/[R Project] (R is a free software environment for statistical computing). 'ascii' is available on 'CRAN' (just run `install.packages("ascii")` from R). Briefly, 'ascii' replaces R results in AsciiDoc document with AsciiDoc markup. More information and examples here: http://eusebe.github.com/ascii/. - Pascal Rapaz has written a Python script to automate AsciiDoc website generation. You can find it at http://www.rapazp.ch/opensource/tools/asciidoc.html. - Jared Henley has written http://jared.henley.id.au/software/awb/documentation.html[AsciiDoc Website Builder]. 'AsciiDoc Website Builder' (awb) is a python program that automates the building of of a website written in AsciiDoc. All you need to write is the AsciiDoc source plus a few simple configuration files. - Brad Adkins has written http://dbixjcl.org/jcl/asciidocgen/asciidocgen.html[AsciiDocGen], a web site generation and deployment tool that allows you write your web site content in AsciiDoc. The http://dbixjcl.org/jcl/asciidocgen/asciidocgen.html[AsciiDocGen web site] is managed using 'AsciiDocGen'. - Filippo Negroni has developed a set of tools to facilitate 'literate programming' using AsciiDoc. The set of tools is called http://eweb.sourceforge.net/[eWEB]. - http://vanderwijk.info/2009/4/23/full-text-based-document-generation-using-asciidoc-and-ditaa[Ivo's blog] describes a http://ditaa.sourceforge.net/[ditaa] filter for AsciiDoc which converts http://en.wikipedia.org/wiki/ASCII_art[ASCII art] into graphics. - http://github.com/github/gollum[Gollum] is a git-powered wiki, it supports various formats, including AsciiDoc. - Gregory Romé has written an http://github.com/gpr/redmine_asciidoc_formatter[AsciiDoc plugin] for the http://www.redmine.org/[Redmine] project management application. - Paul Hsu has started a http://github.com/paulhsu/AsciiDoc.CHT.userguide[Chinese translation of the AsciiDoc User Guide]. - Dag Wieers has written http://dag.wieers.com/home-made/unoconv/[UNOCONV]. 'UNOCONV' can export AsciiDoc outputs to OpenOffice export formats. - Ed Keith has written http://codeextactor.berlios.de/[Code Extractor], it extracts code snippets from source code files and inserts them into AsciiDoc documents. - The http://csrp.iut-blagnac.fr/jmiwebsite/home/[JMI website] hosts a number of extras for AsciiDoc and Slidy written by Jean-Michel Inglebert. - Ryan Tomayko has written an number of http://tomayko.com/src/adoc-themes/[themes for AsciiDoc] along with a http://tomayko.com/src/adoc-themes/hacking.html[script for combining the CSS files] into single CSS theme files for AsciiDoc embedded CSS documents. - Ilya Portnov has written a https://gitorious.org/doc-building-system[document building system for AsciiDoc], here is http://iportnov.blogspot.com/2011/03/asciidoc-beamer.html[short article in Russian] describing it. - Lex Trotman has written https://github.com/elextr/codiicsa[codiicsa], a program that converts DocBook to AsciiDoc. - Qingping Hou has written http://houqp.github.com/asciidoc-deckjs/[an AsciiDoc backend for deck.js]. http://imakewebthings.github.com/deck.js/[deck.js] is a JavaScript library for building modern HTML presentations (slideshows). - The guys from O'Reilly Media have posted an https://github.com/oreillymedia/docbook2asciidoc[XSL Stylesheet to github] that converts DocBook to AsciiDoc. - Lex Trotman has written https://github.com/elextr/flexndex[flexndex], an index generator tool that be used with AsciiDoc. - Michael Haberler has created a https://code.google.com/p/asciidoc-diag-filter/[blockdiag filter for Asciidoc] which embeds http://blockdiag.com/[blockdiag] images in AsciiDoc documents. - Dan Allen has written a https://github.com/mojavelinux/asciidoc-bootstrap-docs-backend[Bootstrap backend] for AsciiDoc. - Steven Boscarine has written https://github.com/StevenBoscarine/JavaAsciidocWrapper[Maven wrapper for AsciiDoc]. - Christian Goltz has written https://github.com/christiangoltz/shaape[Shaape], an Ascii art to image converter for AsciiDoc. - Eduardo Santana has written an https://github.com/edusantana/asciidoc-highlight[Asciidoc Highlight for Notepad++]. - http://www.geany.org/[Geany] 1.23 adds document structure support for AsciiDoc. Please let me know if any of these links need updating. [[X6]] Documents written using AsciiDoc -------------------------------- Here are some documents I know of, if you know of more drop me a line and I'll add them to the list. - The book http://practicalunittesting.com/[Practical Unit Testing] by Tomek Kaczanowski was https://groups.google.com/group/asciidoc/browse_frm/thread/4ba13926262efa23[written using Asciidoc]. - The book http://oreilly.com/catalog/9781449397296[Programming iOS 4] by Matt Neuburg was written using AsciiDoc. Matt has http://www.apeth.net/matt/iosbooktoolchain.html[written an article] describing how he used AsciiDoc and other tools to write the book. - The book http://oreilly.com/catalog/9780596155957/index.html[Programming Scala] by Dean Wampler and Alex Payne (O'Reilly) was http://groups.google.com/group/asciidoc/browse_frm/thread/449f1199343f0e27[written using Asciidoc]. - The http://www.ncfaculty.net/dogle/fishR/index.html[fishR] website has a number of http://www.ncfaculty.net/dogle/fishR/bookex/AIFFD/AIFFD.html[book examples] written using AsciiDoc. - The Neo4j graph database project uses Asciidoc, and the output is published here: http://docs.neo4j.org/. The build process includes live tested source code snippets and is described http://groups.google.com/group/asciidoc/browse_thread/thread/49d570062fd3ff52[here]. - http://frugalware.org/[Frugalware Linux] uses AsciiDoc for http://frugalware.org/docs[documentation]. - http://www.cherokee-project.com/doc/[Cherokee documentation]. - Henrik Maier produced this professional User manual using AsciiDoc: http://www.proconx.com/assets/files/products/modg100/UMMBRG300-1101.pdf - Henrik also produced this folded single page brochure format example: http://www.proconx.com/assets/files/products/modg100/IGMBRG300-1101-up.pdf + See this http://groups.google.com/group/asciidoc/browse_thread/thread/16ab5a06864b934f[AsciiDoc discussion group thread] for details. - The http://www.kernel.org/pub/software/scm/git/docs/user-manual.html[Git User's Manual]. - 'Git Magic' + http://www-cs-students.stanford.edu/~blynn/gitmagic/ + http://github.com/blynn/gitmagic/tree/1e5780f658962f8f9b01638059b27275cfda095c - 'CouchDB: The Definitive Guide' + http://books.couchdb.org/relax/ + http://groups.google.com/group/asciidoc/browse_thread/thread/a60f67cbbaf862aa/d214bf7fa2d538c4?lnk=gst&q=book#d214bf7fa2d538c4 - 'Ramaze Manual' + http://book.ramaze.net/ + http://github.com/manveru/ramaze-book/tree/master - Some documentation about git by Nico Schottelius (in German) http://nico.schotteli.us/papers/linux/git-firmen/. - The http://www.netpromi.com/kirbybase_ruby.html[KirbyBase for Ruby] database management system manual. - The http://xpt.sourceforge.net/[*Nix Power Tools project] uses AsciiDoc for documentation. - The http://www.wesnoth.org/[Battle for Wesnoth] project uses AsciiDoc for its http://www.wesnoth.org/wiki/WesnothManual[Manual] in a number of different languages. - Troy Hanson uses AsciiDoc to generate user guides for the http://tpl.sourceforge.net/[tpl] and http://uthash.sourceforge.net/[uthash] projects (the HTML versions have a customised contents sidebar). - http://volnitsky.com/[Leonid Volnitsky's site] is generated using AsciiDoc and includes Leonid's matplotlib filter. - http://www.weechat.org/[WeeChat] uses AsciiDoc for http://www.weechat.org/doc[project documentation]. - http://www.clansuite.com/[Clansuite] uses AsciiDoc for http://www.clansuite.com/documentation/[project documentation]. - The http://fc-solve.berlios.de/[Freecell Solver program] uses AsciiDoc for its http://fc-solve.berlios.de/docs/#distributed-docs[distributed documentation]. - Eric Raymond's http://gpsd.berlios.de/AIVDM.html[AIVDM/AIVDO protocol decoding] documentation is written using AsciiDoc. - Dwight Schauer has written an http://lxc.teegra.net/[LXC HOWTO] in AsciiDoc. - The http://www.rowetel.com/ucasterisk/[Free Telephony Project] website is generated using AsciiDoc. - Warren Block has http://www.wonkity.com/~wblock/docs/[posted a number of articles written using AsciiDoc]. - The http://code.google.com/p/waf/[Waf project's] 'Waf Book' is written using AsciiDoc, there is an http://waf.googlecode.com/svn/docs/wafbook/single.html[HTML] and a http://waf.googlecode.com/svn/docs/wafbook/waf.pdf[PDF] version. - The http://www.diffkit.org/[DiffKit] project's documentation and website have been written using Asciidoc. - The http://www.networkupstools.org[Network UPS Tools] project http://www.networkupstools.org/documentation.html[documentation] is an example of a large documentation project written using AsciiDoc. - http://www.archlinux.org/pacman/[Pacman], the http://www.archlinux.org/[Arch Linux] package manager, has been documented using AsciiDoc. - Suraj Kurapati has written a number of customized manuals for his Open Source projects using AsciiDoc: * http://snk.tuxfamily.org/lib/detest/ * http://snk.tuxfamily.org/lib/ember/ * http://snk.tuxfamily.org/lib/inochi/ * http://snk.tuxfamily.org/lib/rumai/ - The http://cxxtest.com/[CxxTest] project (unit testing for C++ language) has written its User Guide using AsciiDoc. Please let me know if any of these links need updating. DocBook 5.0 Backend ------------------- Shlomi Fish has begun work on a DocBook 5.0 `docbook50.conf` backend configuration file, you can find it http://bitbucket.org/shlomif/asciidoc[here]. See also: http://groups.google.com/group/asciidoc/browse_thread/thread/4386c7cc053d51a9 [[X1]] LaTeX Backend ------------- An experimental LaTeX backend was written for AsciiDoc in 2006 by Benjamin Klum. Benjamin did a superhuman job (I admit it, I didn't think this was doable due to AsciiDoc's SGML/XML bias). Owning to to other commitments, Benjamin was unable to maintain this backend. Here's link:latex-backend.html[Benjamin's original documentation]. Incompatibilities introduced after AsciiDoc 8.2.7 broke the LaTeX backend. In 2009 Geoff Eddy stepped up and updated the LaTeX backend, thanks to Geoff's efforts it now works with AsciiDoc 8.4.3. Geoff's updated `latex.conf` file shipped with AsciiDoc version 8.4.4. The backend still has limitations and remains experimental (see link:latex-bugs.html[Geoff's notes]). It's probably also worth pointing out that LaTeX output can be generated by passing AsciiDoc generated DocBook through `dblatex(1)`. Patches and bug reports ----------------------- Patches and bug reports are are encouraged, but please try to follow these guidelines: - Post bug reports and patches to the http://groups.google.com/group/asciidoc[asciidoc discussion list], this keeps things transparent and gives everyone a chance to comment. - The email subject line should be a specific and concise topic summary. Commonly accepted subject line prefixes such as '[ANN]', '[PATCH]' and '[SOLVED]' are good. === Bug reports - When reporting problems please illustrate the problem with the smallest possible example that replicates the issue (and please test your example before posting). This technique will also help to eliminate red herrings prior to posting. - Paste the commands that you executed along with any relevant outputs. - Include the version of AsciiDoc and the platform you're running it on. - If you can program please consider writing a patch to fix the problem. === Patches - Keep patches small and atomic (one issue per patch) -- no patch bombs. - If possible test your patch against the current trunk. - If your patch adds or modifies functionality include a short example that illustrates the changes. - Send patches in `diff -u` format, inline inside the mail message is usually best; if it is a very long patch then send it as an attachment. - Include documentation updates if you're up to it; otherwise insert 'TODO' comments at relevant places in the documentation. asciidoctor-1.5.5/test/fixtures/basic-docinfo-footer.html000066400000000000000000000003571277513741400235570ustar00rootroot00000000000000 asciidoctor-1.5.5/test/fixtures/basic-docinfo-footer.xml000066400000000000000000000003021277513741400234010ustar00rootroot00000000000000 {revnumber} 01 Jan 2013 abc Unleashed into the wild asciidoctor-1.5.5/test/fixtures/basic-docinfo.html000066400000000000000000000000451277513741400222550ustar00rootroot00000000000000 asciidoctor-1.5.5/test/fixtures/basic-docinfo.xml000066400000000000000000000001631277513741400221120ustar00rootroot00000000000000 2013 Acme™, Inc. asciidoctor-1.5.5/test/fixtures/basic.asciidoc000066400000000000000000000001261277513741400214500ustar00rootroot00000000000000= Document Title Doc Writer v1.0, 2013-01-01 Body content. asciidoctor-1.5.5/test/fixtures/chapter-a.adoc000066400000000000000000000000251277513741400213610ustar00rootroot00000000000000= Chapter A content asciidoctor-1.5.5/test/fixtures/child-include.adoc000066400000000000000000000001141277513741400222200ustar00rootroot00000000000000first line of child include::grandchild-include.adoc[] last line of child asciidoctor-1.5.5/test/fixtures/circle.svg000066400000000000000000000005201277513741400206470ustar00rootroot00000000000000 asciidoctor-1.5.5/test/fixtures/custom-backends/000077500000000000000000000000001277513741400217525ustar00rootroot00000000000000asciidoctor-1.5.5/test/fixtures/custom-backends/erb/000077500000000000000000000000001277513741400225225ustar00rootroot00000000000000asciidoctor-1.5.5/test/fixtures/custom-backends/erb/html5/000077500000000000000000000000001277513741400235535ustar00rootroot00000000000000asciidoctor-1.5.5/test/fixtures/custom-backends/erb/html5/block_paragraph.html.erb000066400000000000000000000003011277513741400303210ustar00rootroot00000000000000<%#encoding:UTF-8%> class="<%= ['paragraph',role].compact * ' ' %>"><% if title? %>
    <%= title %>
    <% end %>

    <%= content %>

    asciidoctor-1.5.5/test/fixtures/custom-backends/haml/000077500000000000000000000000001277513741400226735ustar00rootroot00000000000000asciidoctor-1.5.5/test/fixtures/custom-backends/haml/docbook45/000077500000000000000000000000001277513741400244645ustar00rootroot00000000000000asciidoctor-1.5.5/test/fixtures/custom-backends/haml/docbook45/block_paragraph.xml.haml000066400000000000000000000003141277513741400312430ustar00rootroot00000000000000- if title? %formalpara{:id=>@id, :role=>(attr :role), :xreflabel=>(attr :reftext)} %title=title %para=content - else %para{:id=>@id, :role=>(attr :role), :xreflabel=>(attr :reftext)}=content asciidoctor-1.5.5/test/fixtures/custom-backends/haml/html5-tweaks/000077500000000000000000000000001277513741400252205ustar00rootroot00000000000000asciidoctor-1.5.5/test/fixtures/custom-backends/haml/html5-tweaks/block_paragraph.html.haml000066400000000000000000000000131277513741400321370ustar00rootroot00000000000000%p=content asciidoctor-1.5.5/test/fixtures/custom-backends/haml/html5/000077500000000000000000000000001277513741400237245ustar00rootroot00000000000000asciidoctor-1.5.5/test/fixtures/custom-backends/haml/html5/block_paragraph.html.haml000066400000000000000000000001071277513741400306470ustar00rootroot00000000000000- if title? .title=title %p{:id=>@id, :class=>(attr 'role')}=content asciidoctor-1.5.5/test/fixtures/custom-backends/haml/html5/block_sidebar.html.haml000066400000000000000000000001431277513741400303130ustar00rootroot00000000000000%aside{:id=>@id, :class=>(attr 'role')} - if title? %header %h1=title =content.chomp asciidoctor-1.5.5/test/fixtures/custom-backends/slim/000077500000000000000000000000001277513741400227165ustar00rootroot00000000000000asciidoctor-1.5.5/test/fixtures/custom-backends/slim/docbook45/000077500000000000000000000000001277513741400245075ustar00rootroot00000000000000asciidoctor-1.5.5/test/fixtures/custom-backends/slim/docbook45/block_paragraph.xml.slim000066400000000000000000000002671277513741400313200ustar00rootroot00000000000000- if title? formalpara id=@id role=(attr :role) xreflabel=(attr :reftext) title=title para=content - else para id=@id role=(attr :role) xreflabel=(attr :reftext) =content asciidoctor-1.5.5/test/fixtures/custom-backends/slim/html5/000077500000000000000000000000001277513741400237475ustar00rootroot00000000000000asciidoctor-1.5.5/test/fixtures/custom-backends/slim/html5/block_paragraph.html.slim000066400000000000000000000001011277513741400307070ustar00rootroot00000000000000- if title? .title=title p id=@id class=(attr 'role') =content asciidoctor-1.5.5/test/fixtures/custom-backends/slim/html5/block_sidebar.html.slim000066400000000000000000000001241277513741400303600ustar00rootroot00000000000000aside id=@id class=(attr 'role') - if title? header h1=title =content asciidoctor-1.5.5/test/fixtures/custom-docinfodir/000077500000000000000000000000001277513741400223205ustar00rootroot00000000000000asciidoctor-1.5.5/test/fixtures/custom-docinfodir/basic-docinfo.html000066400000000000000000000000451277513741400257050ustar00rootroot00000000000000 asciidoctor-1.5.5/test/fixtures/custom-docinfodir/docinfo.html000066400000000000000000000000541277513741400246260ustar00rootroot00000000000000 asciidoctor-1.5.5/test/fixtures/docinfo-footer.html000066400000000000000000000000451277513741400224720ustar00rootroot00000000000000Back to top asciidoctor-1.5.5/test/fixtures/docinfo-footer.xml000066400000000000000000000002521277513741400223260ustar00rootroot00000000000000 Glossary term definition asciidoctor-1.5.5/test/fixtures/docinfo.html000066400000000000000000000000611277513741400211740ustar00rootroot00000000000000 asciidoctor-1.5.5/test/fixtures/docinfo.xml000066400000000000000000000001561277513741400210350ustar00rootroot00000000000000Asciidoctor™ 1.0.0 {revnumber} asciidoctor-1.5.5/test/fixtures/dot.gif000066400000000000000000000000431277513741400201420ustar00rootroot00000000000000GIF89a,D;asciidoctor-1.5.5/test/fixtures/encoding.asciidoc000066400000000000000000000006501277513741400221570ustar00rootroot00000000000000Gregory Romé has written an AsciiDoc plugin for the Redmine project management application. https://github.com/foo-users/foo へと `vicmd` キーマップを足してみている試み、 アニメーションgifです。 tag::romé[] Gregory Romé has written an AsciiDoc plugin for the Redmine project management application. end::romé[] == Überschrift * Codierungen sind verrückt auf älteren Versionen von Ruby asciidoctor-1.5.5/test/fixtures/grandchild-include.adoc000066400000000000000000000000621277513741400232360ustar00rootroot00000000000000first line of grandchild last line of grandchild asciidoctor-1.5.5/test/fixtures/hello-asciidoctor.pdf000066400000000000000000001305601277513741400227740ustar00rootroot00000000000000%PDF-1.4 % 2 0 obj <> endobj 4 0 obj <> /Font <> >> /MediaBox [0 0 595 841] /Annots [<> >>] /Contents 10 0 R >> endobj 10 0 obj <> stream xX7 ߧĒ埁RH$- }$~$g3<$ǒcY^~L_PK 0AH0,?L_vg y,~0m>o|Z;B5x{az~z@ 8=_?,׉Eb0i[F#X4r3}xKve=ohZF{e0UCH p*dZ֠ha9"S ,Qo-L>"b`B*nh|$-o*Rw͗]HMM jtk 1, Ȉ<%ZLYgN@%;n+9U+5(K"MFuEY)S2Y "7/x J,(7JdKr|eTU2+ru J{Twi!%^Ri4D[^Pk6jbhЗdsߛۈ+šq/|:ŒȦaJe/*ZUİXr+:807 I"cr,p^4`*U pIjA.INА%x^Dp{^S ;V{KTה=y6BN'hw=E-F, +UdkJ4\jRA0=MҐC2$rx\UYy=: hɞDE=fQ1kkIAIa8y);G.~xJ-^3:;Uj8 ɗM'1DgWz@^}T(mQq=*J> (^U2\QGщP"gR;,b62eivnjd{%EzkT{ϔ~eٺof{& 3/ڳe2qu"@{HV-9iU;$v(Q(;A m=Sr+6x[sE~aK(-K>rLŭikLq}8CaQ8S&is-(0)DX5FX"~}fBYc 먾WwXt1sa=/9nyoyyw{E#LX*(I]_[zN@'x>R(d$BH 7&n_B"oa7Sx~ "KH oDZ:4^)#K7^"x%B"DJ"X* m r6vK:QY)2@xJ:1e:rAk]PQK~B\TleOd+LySӿ+\ Oxء'' endstream endobj 5 0 obj <> endobj 6 0 obj <> endobj 7 0 obj <> endobj 8 0 obj <> endobj 9 0 obj <> endobj 3 0 obj <> endobj 11 0 obj <> /W [0 [600.0977 0 0 317.8711 400.8789] 15 17 317.8711 18 [336.9141] 19 28 636.2305 29 [336.9141] 36 [684.082 0 0 770.0195 0 0 0 751.9531] 68 [612.793 0 549.8047 634.7656 615.2344 352.0508 634.7656 633.7891 277.832 0 0 277.832 974.1211 0 611.8164 0 0 411.1328 520.9961 392.0898] 93 [524.9023]] >> endobj 19 0 obj <> endobj 20 0 obj <> stream x `6|g2L!! = !)@ؗBYeQD(D1"" .\PAqUBTD%'~_Nuu-9uTo!45Ξ4<'dLC1~x^O"u|ƴgDR5de ː$&Q'OKpOuǟ5<#pO"afk49nG^Ky Q2"{Oq~۰)4: /gd[:}!Dyf bz^dGФqOJףN_]Aҟf<'K4IzPE9F#xIy+0k*5e;mmT#ie=MЃr2bxNq (LГNH: DKFކJ;z l-RS:k)GKue=ђq^ZseJ@{Rrܠ 9ir{]=I;y QImWz/ =?y094tRD叩OaOnAޓ&#m9Jx(cq܀e?0mJ2 T#%wJ ,!\F}@_^| |f%)q~kȎ˞ cM>`BRKP?XC!Br]p=zhl.tP pvIAG]B /Ƚ2XN0=%迀+B*B[u kN1Vcp *l:6)g^yiY8d@/г8.֊y{TIQnbVr2])#x?ۙdG.ˊTGJ=NH+(JܸsϛT ۟|9mlټs5[=]'|dzϰ"yЇOб-s>ƞ/?:|#>sŦ^z 8?W;1S4M| Gяld)]~ِ UG^D;Z4Vi8(WyFxi4Dp.)#t:bˤ#10{\>F _ˀ9"|@ 7>:|cdžq ^g!lb?K7T+Ƙ>BN*虛XAc`ߪBDbKit+Ӡ>UVG}>ڛ}L͹l,%4C>a6)B_X?S J}wX/bc0N_Ev;DS>Z淵Ghҏ(iv;-PS/?2!QC~ȿdVz益ܒ[5:|6j eT}um!` ҟ\ ٙ\I ,[[`C\~O B?ς/W ˜ӁѮY cZKcnc>5(W Q.pW>ʬb y!C< sQЇ}jRʎ3!Tb8X-ćK)z@{. ^(F3c LV %CEJ'x -o 9(C _\84^:!G΍D`poѿO?4#= wc;`Ss#hF{9wM>\-FO} Y50$vF}3=6xDyh%a]mq? &ip}v>SRI__?ﵹSړޓoc>K>z~ηeD2|/ |-b__:cVtG(FۘnSLc}(%F4L]"ǙMVj">F/<Rru )N:1ϳKכ06o ʼ$Ui'B1nGm_K(')S|]}%*Їo#=݇D;ߛq798# j)c>cF_GoS^Н yZqݣ>֛=FBF%Q8>(;׸ygh{3ϢQjm 6A,tkbz 2c] bԙYܶ}Zh+s\ͣ}F|S'!:vx??:dM#??'$cyEwPOKceNp[DzOteYqX#0O:Rr@av4xkJ$=?cԤ4H>ؤKU_YkĀlbmAʊJ a^1lO_j;b!Bo\vй;mh?_ƂM|/lZzĢn9hLJoN BZfz2lϟr`YDN>vgC(ǓE|mYj@M~wx/7Ō}~Go/r6"_xn>TJg*s5VYN>@+M6c<%d.|A|"GW`J {G;r& xѷفu3ʷB=1X㚃f!`n/E_H-_;/YE:knz35/ݩUjXx]4MI8L9r BPa//Mb1",9%+н =1ZFL7+ XzW4F G+-yr5`734[u8<,#h%.7 Y{<+Տp =m~9ߗaq"=1@_~VzJ{ȱ/x X 6 ,׈w*>.:" 2RLy6 pڠ O.G. Xp<A}Ǖ||_ӧ/Jt#]_ xJ\Þe ,m(38  vlGa:5ju(8x; \AZmF >ˁ;f}Hsętue.Z˕{G' mHSory'S9SoEy]mT~1r\vDǁގ}t%6XTQ&mZZ:Ai|.|ԗ g{ȶ=݁qow#O+J}0;Յc'}@j>?T|BS?6)r 5^+iMq_Ś \O<ނ9#>?6`v2;f{OUvz,9ޢ۽E_*%y%{ y[dF^r A޳F}uDc7.P[q>:{~~#w,G%G .q$ś:#j"&P{KkCr.kNb\(u.Po?e2:&*ׯ~%} F{r$uh?7X:E0c:Lu{x/q E>2);j.(_:A懲ט{y^-l?VﵗƁty/{P˾|g,T}zw-\&i,x^`w)F_,%~ @3-KC=>}UZ|y|'-ƳDzez/[2@{b|^Zl2>Jsg5BEϏzTAi1ՓoP=%dgɴ84=Ưs޴ |["WR}q%_c#VsmR4 Z.%#8{~ x[bĻ0.,N- u.P+3ݏs@nk't!m  ũ)*Mmq 9}xo,hIc]bN %ff~+䗞Rrv?y!qੲL-㛣ZV}N˼59s fU*f8AKRXaxj*6'|ٞ_Iu:/qoi5RJC|{C+RMZ ~TU:a{mJ}mvIQ/qg>~hI~ 3jkVHL] ~|}#IOAQ?m}6C͊?鴚UAi7qSmM2LAobMxH:=qZ&]7/i|a@χ=Vsp +G7QK]m{@z ~Þje4 iI+|OcoH  y C^~s^YZ#zޠiz[V/1Huz=g|-2 WS>Wr|1yz|ET,WKfO3pSg.~>*+'6ˏRcOoߜN~ q|H j9b澔S`IJs}Z=XV4֩ et"/>|σ7 4 p(kp@mDl~mA2Dpnҁ֛p߲Ӽ&m~2UmB[NU8U_mfޟ"ꃑ'ũrkǗpɒ(FWP(Bz9>y@Ǜce0<0!ځNF3*`˚vӔ޴fQ6^e@6 Np7|g^z-}Gkӟ g`IE40 RkS>TwtV3tU'}Į"J/5}iu>(׎[9 ̛[=]#u*鯍~Tv]hm}kZAWYwyGkHuJ#1Е"yWF`67 ^|!rl?=R^SGw쿡6u,{;?̾}o f&DK Ps)EFw.JQK{8<< %~Q[Z?'RiZRx}CcAzZ aķ{V\זAA:ڈgW̽Ĥ}v&~/[W}WiLk+jmIg!~Dǵ7u ňQRπt+4XžR཮I%}?V|Ft|hEj{?P<؀b9E3@/z@w`G-?50P{2&,[~=C}=gF~)%xoNdrrߪލqŽ7̕ǩ*M:GeMkR|?7`k!ǚOT~l@O6`G*`6lM;{wy3uW#nvDϐED?wQop{~ ֎wh_61W;}@ޮ@yrKo<[>uy츷H r1k[ү۔1;fUc/wشH]6N}9hrx*wޤz&P۶2hTFe?[HHV>hqWz8-}3xS64.~>wӪ?>*/Җ-阿3y GK7wSʛ;{KFq ޽NcK+R yy~Lt^}V.+[E(+I5 [o #A@*(TKsP%V4{aKKZ+wjuOcd[kqy":lF̥LA)L/oS^Yd@Ϋbf|աw" bd:#]n^מb:jhDc+pZDYl/Y-o}Hz?S} "[ Jj! lcJQk\e~&[F?L4oVϫuҘ?Q:ڂ6?h1F=O1 陵XQIxܢLKI^{ XxG~-v+;.O ~qT|Giv? `:MzO0HTN߯m%cDLy2AoύwZ[b ^$A|+QoC(rqgRɩ񾡲SD|3i[ߔОmoKp*m|iib|r Qv9Eè{Sۑ oT؇)ߧG)[=i6˿n4E7(^֕~{F^cw?NWo.R4} ׬,]nN** Wf²:z%J"M+txOy@ ,_c}X?6 fCT[ʖg8zH]TS +EiKbmMkh>NhYY~\wss-GlKr|Sr8͝=iηEWkk+OhsĄ]7q試V>Tb(Z;kLcy7Әb.\u{{{;ϽSIhbBKbWzyjzxxrJVkIyu߆Vx->0>Zψ=|QynչY&s.\sw|w;os|φMp\¸qqc}h켱6vcGMcl1=klUM%R䏑?D~Yy1B7#߈wspuwuu+~W2Bc%c}#/ź-l~6pDQ[~.az6؄ m~Y60p3"@Dm<"9m@@=(߀f>@*RzR@]phOo A)jW@ߚˀ4ʀ4ƀ 4htonp@P1~muh:sA'PWNBQPa_~z?w@?2TGߐ>Pm"(/8+pZDv8^ CX4tXj@]n@XtH7nЫv;f7؆t Pj@_@zq`,CHCw hҰSr|4%@Q&i؎6 h:aNcuW''t 4e+N愍Y&b~Ц2p=WAa.k9 6\c }"+lKb e+ڍFtA@SmG"6 1v"=0: !: i@ [<4=ӀCO֏APP*)03U4iC1Sc-c4@1[6@ d}0Al@1`c6b'b bv!=:O65si7S$e %GGb.6~*#VӠ{":pCn2;{%y퐥{۶cθ!;7df_>ܟ! ?/nخp2`_CIQ;#@vÀ~ ;@, Q?vHP EpT' ZZ@'Aۃ9.sՓ" AXG< az@mx?AЉ[Aݞ0Ձ4"A}u!o$PǺaà CA<< ς aԠ;A1nB+ ^h9}%CS"ָD=QCȢp1fcI $= J%%C^o/' 1–ys~ ߂5! }o/[ 0q0o |0N-gaV2 v5!c z~Ws#~7 ɇ·̈́Al;6}>#zZc^ϰO@yaacpI#iAa ,T[ o`c͐Z[x;XaKXk pA1U a8_AU1\=n6'u5i\}Dh6 RI# Y)ld%L!Ja0h'("UE53n,CuR<գH )QcP2NM5;VԚP[jG)Ԟ:Нԑ:QgB]uԓzQoCt~FwS@i {^BNzi#=K9B)^mӋz^Wi= z.$ʤ4ڮ4zi5BcbZr45piXZ+4f6<iM$~v+vhMymmŴ}=^3\gvFm%vCrZJ+h%Gh ODi=EOby#MOh ES&K)e5}xPl R x(uPZY 8ϩ[<0G@wNV;wD^H#d#SGqwZ_Q!K=- oِsS:dű_kkUEgWts`?e؏K۫?쇵:xBKv;oS]΢~Ft#vJg&_NZNf_>٧(lv..?:GXgC]gֲR?Z&][:{[goM:{]gBޅq^yuu^=D}u\eq!)^;Ey5+kNE۾-Nݞɶm Sűa0uuEgs:{vC} `d<[˞ƧՍ:{*mx!=>D}:[žunmNgkTZ~̡sGU+_WWl!ʹʊGC؊8\gNR$\ڞ-YlW8b;[El!$0-elPu慲u6Wgst}`l͞fei.uf:`S,:tMβu :lLhuL?6Zgf8:0 YFK~Ȇ ٠6uu6DTW4auv7zKs~RگdwuWYY^!jo a=ugz{{8v a]Xu^:euvP::k߃謝ڶ S:Yj0ֺU:Z:k;;;BN֬]mšXrkr]mq#85jRk~BZ?%zqj,>ՍuYљGgY,ܙ,:!dA&$XSgQY:Z&"uJՙKgN, tv`!Yp&s,(0B Y JF0l!,@gVdZ&SpRr Hg)sr ??DKM-8x"rXrsP:*-"o B(2cƞPe"a j¢)Vv)}=95WrcJ#%Mju Ђ#D{) ,W:*c[[?v+s48,#ϔ"=o6"Dtgܕ6J'0t4{,crҜl V4khzN=A/\b[Rqv/NJ ҕV`mutSti+]P-C;|DsWwz?0U ZI pF1Kwlw(51HDܲ-jիA91Wkd`M O x/-׆ } p |^j[Csau\]D؟ڰ!ch(j {Q9B*T UBP*T UBP*T UBP*T UBP*T UBP*T UBP*T UBP*T UBP*T UBP*T UBP*T UBP*T*4B_P#FV,Y9B}Rt|RIQи 䰨oǯ'i $KMQOjlʃբJ/jBJz%}ǠEMZ-n{ KvyBCIM=učplII'4_cTi!%E|6K7Ov1O=W߮-ITށzB_2,8='u{b STC%#A~TdXC+E{h QT[JQ"QBirCv`E<6Tmե%mUrzV;\֧WŪ U UUC3ʓ)&K!LJQvOg=qݽvhX ?߇Fg)MV-3ꔨE- )ՕJZs2MrkDM9VWc^ԼQChHд5o+5fiVJnE#i|'+ǧdy[NVԵ&vhw&d-S=- b,1%)F55φAb|>T$TLO0=ZU`G`۩>uA4RDH Ԁ5zK}FH @ X''4)tYқ'NqWr+I~^ϓ WV [CSk0"C^UsY\`퀚kd1Qq0^0߳%gC~PWQ:*cQfyZiT[s9#4%W-3.yr9$B~nO/O<|<α$UtZ JQ=dmzןٴON]! 62%A#Z6_ق/\R5kH ̭.?]E}vG o))T"EUdnR(Tm3BO@5\jբ&0Er>IO K{WAw%iHJ`)X[i MlZF*k,@rjHi[?߸ q:q4GPB))EkքXՃP%Lx-3rܙvdCq&HcWR~J?wvJkQz6**UD)rN? )1i]#Ѓ6jwH5I DMJ[E EV9߿Q}" nc {ōVh?%:VJL;%6vjtjԘTwjljыj-Y^K|U}|kǤckedc֚3=7j#54u`Rmz{X!ػgOQ,w sϗ3|+8鍷<5>a ;YUv)QPZ8- UFBH5,aA٩FH&LC/?4NGD͍ZY1M8gz?ʻメT^?Jݛ48sؙ ֩#R#|)5'hM#zpU{u,R!%4)Zh1^9TrƻkͭW K5ņŗ *(۴gO˗g謗o?l|oEgfH%+~;f讣L@^N:Z$uoza{ՌrVzaj pW o.:;:/Kj;j'۹EFF,ʒ,WVT\ı$t܃[9%;6潡>_ߓJNK=:﨟(!٤pNCvm+dNRj!RMZD~n!Kj t ;'z8DZ|JZ:f}!?`k/ŕs1&%,Θ܌_ -(|"u԰%+zZ TxF- 7u5Rrgn?~^}nBFȱ7bm#8-"i2۴y͛WټGׯgl뮍}_b笿}{6{_~yQ?_Jb7޺gP$Ej9tV.}2|oO9$m#tO:$[7AQ"%'`9b+{fZ}xW~ƽƧ7܍E?9tOG|}lMeڏѪP,|k=$ % 5 = ;@NV,GI/Ӝ8ګ%+u.SB5UMWKf44oE ,emRW LZ(Db`UK ֬f#V"#JlDq7Z':y}uu )!2y/wR-{'M]evglcKfM43卛|#΢R+1 {x| suxUI3999sls3q 2'tfX^KCGN9o[].(9r.lE-0dʙuPMCvȱ'`ŦY9OwO>NhWj20w"YnmoAxRM+,7vB&#/ {/Υ ~1/&VVºDq.t-ו8+j1QeuXڝq>):jpXw?I8nĻ ߖo>bnGzii)bXafӦ\gͼk{0F^@ߺf9 3ur*Mf zƉM!}-Tұ*5Tf2 X-/~je%Ɨ|i2qɡ:*Gu.r7j۫(kތ6H)r'wZAR<+P\K($<RD6FOyWly׍J%'{񿵎~ҿLKdΘi3 L(il|3(Zi|-8(zʹno5LKf:VGI @Ƣw(B:jeJ?i|L+!6*U;ix3m#fN-Cf:(%e4f:"Z3ӡdm:IYؓLI؜Q4XQhjd:d&pʠn4哐jO릾m刣Qg DI۟Yiii pJs>2P1ןrQbfֆbDn2gP;ܨ39ѝY'9jްwƍC;3yn%ۏK>i$MUiSƏɚ0!c-*v>&{ب #3& w:n0wflTasmT10,DV?Wϔ/e 6̓KO5$)9|S5CM;"kD4'ZKD_#l&\䠍\6 IBIhw8Шɓ[6jF&dN6|D֤Ó& ~lg7~paaAY4eg쏷g(Qs4eqMΥ6Iೃ:$+l~喛_ ݰ vLQgU1HM9 YE^4G𑥊Ƌfh(qn9 M2z3l̰DWQ?ۜFYhuicM+mmN\TaCu_ ań7lu3| C s|61 BNjV&3>@j9XZɰ_ye29bdaL1ֆdq;=$y82*l`JMɌy#aR942LO4tm 9xq$ nѲ1GR-Or٥=YUfue#*1O #W`p_G\cPbh(qgih;Sp<䴥i&wh1Kx22 &ds6++ebsm͐d>*6u?^2gt1YD|e0GTNRWds98&6se!t9(OEyT[q[xs[v& ?!JYR͗p 4U*Sԯ]zXtkpη2c =ˏ\sd ΎDbis&gX2G^Z_Ͼ[3enAsL K|]e;SUTm~u[fo&a\i1ɬQlacHSczȭVU;=խG5Ԝ#pDR'O#Ođ}Źns#3q^ڋ3|m1 [Cw6 @hDEZ?piv/d5D8.ģPި&y18MC~Y&zq G}~Wl{MOO64$We[m z#{7h*l/lp[3c$804apt'h*%4!SY2Q葏{!J1e$4_s?1u!'M=볝.ηMHn1B}DD9.E.ϞO+w yqq; t$SuJ{"IH(rJs {&z)kM ';fBLj/dW~\Oe04N?i]?iJ2@NT{~sL.2pi}J9+/_<3hwy v_4i] ɥ~5Eqg /E_\Y[2֬V۷;6by_[}G">7bҨ$KāYTqlM6d3ڟXڗo-*kˈ+39Q$.v8OKv<$ P="jDUn-LQ֖o;lfjZ( 7/ކ+g35/ېjwVKE*Œ'ۥ\yV1hmXڷuVcbyb gc֫x؜g⃠#`+'U|PɏWljm |+J5䕠~[>[yڣJ/8 goo+^}_fF]&8 ;+,D3gL2ﺧd,e7Ln,ܐZm;9(6#0^j#xwKrG*KsLb|6c|`;)6?PzS쬧hQ5><_r!^k_qs61e,DД<VP ݪZjӖ`ޫ( bGL'RB΢\r )H׃ r -F" yLȜ&E,8&FSQi|&"(؞ISD5GXP[_%Rd2 cĄ9 6@cAi-YD4d,*CIb9gw TScE:S E52$h&Tg[9Qͧ$B J.Hhs:n@]8BG2A*M $$=& @, (Tvȼ$~h́ݑ" fz,_HKjNH5i0ֲ[z-nۖ e_˽Nbs)_u YFǂhI2_@iP4IH1᪫;yצ2N{+ysWqy^f1x ǨTVT U`"QW\ǑoMB{CϽV[> +GᙎnMN@믡gFzh}ۢ{_wm/hطOB}WM-Eֈ ,sE owl!d&MϷ;}7tGXvzi9A:γ/|&t3~H\rC5MV nxGgѹt܁=难:xa+EҶ+i ݩ0N cyf|Aq=㫑{D_q5ݠq n,0oGYw]㻞4Y=<|=UONS9{Axv`2$pf h~EVpmͭFPU*˄WS-Qg]Gn۱^|PuNI RJHy1YDtg0bRF(Z+K3 fɈ,dTbr6<R/~)vIm+y/SD$uq >(aZ@*?y8p5=qlr+Iygl5`CVX >VwJ GJPc%+A쁕%JV۷h?EXp J%h I`GLPB&2:d} lLy3^jAGP#<*:½lEGx:bm4|`7WP~hjwM߹B^[}ѽ淅ax_l+>{ endstream endobj 12 0 obj <> stream x]n0 yCEH$U?tFBz⮕v gɚvZh'A<z J{G(9a"UEiozI5xcO"wg~`(#uM5 K^hdV{M2>T$FMf+=c/sY=g(;)M/#iK$DGB]:-fJt@š.o3)a%O%[7% (qT:Kqq=?nu>n$=u ɭ endstream endobj 13 0 obj <> /W [0 [600.0977 0 0 259.7656 240.2344] 15 [214.8438 0 237.793 0 0 570.8008] 36 [600.0977 0 0 706.0547 0 501.9531 0 719.2383] 53 [544.9219] 69 [475.0977 0 548.8281] 72 79 226.0742 80 [589.8438 585.9375 0 0 389.1602 465.8203 334.9609]] >> endobj 21 0 obj <> endobj 22 0 obj <> stream x8kxTյkJ^ѯU/MU[zE2s>3 R}={k^{9g = @jF[PqɖmA%8m,8$'b[v6;_bhw'?CXþ/>KK7n=/X-wDl|޺C1L G{D'}w2xˊzV鳯N .ӏL=$Wg .MW]C.GI+:awNRF.A =PGp  IL #bùmd|̃|0iC"܂ЍA8-59Gh4PC35&/a7! 'a\L9Jw„!Oc{=pPg9ef@/ׂkF8k稿{vmL^Ý1 ~J12O!7>s?Fe|?sq8>y \%80AtܟB-;!5"Ї:[؝Ю`)Ev h*T"ik9 cJE y(rBƽZ L&hNj@?vў~6hqG1N4h007c ?rΙ_ h 4ZnrW {t !ABprIkp O |Ș1f…1ը٫AB>_>u< ?KNΣv8 "fD] p$夃6-^'5O!>;ߞLҳ y x yӼYC՗yDO$7O]pIu'|fIqb|%ZۥCmkZV7756WWX^SjieyYE H.Gj0 zs,CXTIį-R4XR,1_I_ DT1*qR0*FD,rDs*N9)pVPI8j #>QHWk8W 2pvV[cq}$&cTo,)Q Qb"ip,h_3ʀ>ŕ}jk[sJIq))TBT_-~%~ϸz"s]V(Y<=Q$Zt~XU>cIVIWRiP`(2*i `$xt<9#V)>j6nh N5pZ#1RhoRֆU ƢH_^tfxZ4`X08aa =8P©=A.(*3ĽxD6u*W'1pVM41UͼtKqM.U4^j$|! f `PUd^Hu9@.VKKHwḱ D tГ*ΰ*GJQ"miTKA5[.u˿#z"i)ԯ+R.P]R[8x/T̹Xexo@uE}İӭ fX -;PѤS+EpS^v$5AqHagJ /Ћa*hE@D[wUWGb5*-ܺb8aPD/GW(i9 tzNN]% Ni(A NO1zFtТRH1Q[tm4LmPs(*.(i!f@AZ/ݒJ1[;dF%%#V"Ǜuòf7Z,'͊LS6JGۨ5f%% {`7nao`)=czunEa,o < p=z"9;3f32ΞJgVܘhX |ADe0Hܗ2Hs]ñF͚ $TWZ$qaewʊ… Kt|!';77o^H7[C0CB ƞ:gא?ձb h 7?-Q|H]^igy)VȽuuEuOϢOc3x>e@lKfLXHa& lt{KK\\-YY|kre94eӧ UC'ebP.;z<{V`pb5ZCv VVlw[gz3KXv䑇O\GCS39Ѧ eq4:cn@1ZYk@asyH J-$;]hN$;v2yǎ~رS;q!q6Ng%J\~L>XX0Fn0bkšrV=ۋLy)_k|oQW(יv]oN0ڑ^5Ax{W WJ_*zf.\TV,?N {#cOG3ꡂ'{p]7߬.]yuՅ[BZ$3<9+K6./73l1-v3 ?"4]{}驴IUU^%g~%^R8Y[yB7N~6} }#vH 4!L<6O^{@N >ݺ8 A|KK1 eCFMifhzC %],Ҭ[ endstream endobj 14 0 obj <> stream x]n0E /E14RԐTbчJ`b,C} M.u4pTVʚG~P5L3V{W[bɵQJU}X> /W [0 [500 0 0 259.7656] 11 12 346.1914 17 [250 288.0859] 19 28 559.082 29 [286.1328] 36 [705.0781 0 0 727.0508] 68 [562.9883 0 492.1875 613.7695 535.1563 0 538.0859 634.7656 319.8242 0 0 0 944.8242 645.0195 577.1484 613.7695 0 471.1914 451.1719 352.0508 634.7656]] >> endobj 23 0 obj <> endobj 24 0 obj <> stream xzkT[ו9^=]$銋C KoAB ;#q:'|zy%"ͤv6nL&kM4Mk4$}\ ;Κ;g}9gιZ JDSM=xG{o{C[mO `ur!Bj2m٬ML ܤ*?(ߴuLB('ԦmB>d%m_)?@ܸmx b2ڱxmeeg(Cs >uӗ-#Gh& '?B|V?)kQ#,u  BㄑS^DF{ Ȭh@r7 02=,0-`IՆNl<ZA]'hF7Π.khFD] _E?<=z迁wc }@oP(ʇPjf;Ț@?cB0>ELKLFvj<% 0*ȯ'c跐~ P<cgҏŜD f?3(@oyvX&WtwFPIX/}d&;3{k]1}BX (@kE}xZ9+B*;Z [Nu}[ĉ-V tD~E[`hH Jw/үmx D[IMdNiYMAig4먏(8f\}q;H[RZ$Ġx2zEq ަާ>\&grjNhQ1aI\Lt\pVƉE?8莥ե_+\ҕcW_k9 zthokm5{=M a/Y̹|ɨSXeJr"A.J¨7qP--6懁00|;Oٸ9Sq 81բZ[ϝz!.rM;EdhL0&<\qH}s!țOT4Mc [W$X$XD|o)=ވHm_{k幹lvvp"5!6zyynhnLtϱ|Rܔ40LH;4m^IYPfnb(Mz*MLdh}=XC#W`"ycG'=Vzn cn彠Ñ}#O[)x6ϥ*{H`UjQ!sHnI 'O>Ác;F٧4}eR GQ|h(% G^># GGN| iSk3]iƝyI:iQ7371FOC0d @+/e3|%=K_i_kv@wv}S |X*,ޯui*.nP~CJʭ\P^Wғh#O% ~rjm?#9,^F̽zEF@y-j MOOﴊ0mm+^L[Q Cb1sw NA`FP`" brP68j%V24yhҴUj5ɅoLR#gGWz/E|9 '"/K< #S;| f!Zg],wѦ[vaA4^@'nFnAK˅Tr0i&NQ@G0\\:_D3{ނP_iSu!;tCqJ䵛4":Eűwӏ¤@[Is Ԕ\j$}zIeqX[^2s* 3[M]Q3kklO~7}Bx 0.DzB^9jBq“_(vP.A4KS4ɎCB4va{ةrîKeRM*8]d//F4fHJI:cbOd?ۍEmā*]SLQe l>ٟF=* VMgb'*]4 *Ǔ81=)-RDb;ٟab*@jԔT*+񞴆0ك55d{.~O<ٳ˗/^}0l?Cp!Sjf)PT)(S'AR@aGEi=r(:sY5ogyg2.%++'u9 *`?4bDcđc׮]oY$ ddQAvXvBZi6:^t/T\e.g=UJ ¥#\lf`6}',G c)utUJ2/j"#?#;jҷHɭ)FגRbHZEJKdSI*ǒESS]1#dkѦ2\OJ:,딎g{O_Z42y@q02\kʛfw=qULˣ -?YXa0/  ^o{]ѕW3VPz4~ vCV!^. q3g3ظEkw;_=b*RH_.%DWs||Xc%DYYU"itv,1Hv-:*;`*W=1VU\7 0I$RvaoZh%/&|+A/' six!)#Ƞ_`ߓ _'EjŋPR+.LB`YM {.@uB9UoUZIaU" wSqc7ZYYت|C*5j)DZ-'3~z׻qa`=TZnxWJ~ݧHeF2_#`.c۝7WNf-ȷ6[0r+zn\ע"d`J\ dT'.:r_3Cl.KRVw8پDL]?yS߽_K/`%kfZEPVLCdt6ZbTm_TUɆaoKHe&MʦҘ 7==]<9oY PSe۪RS2o}瑡/o?~l9{kp&Jt,%͇G#)c[VoaWY[&wj3Rό݀oe7½ʽƽkNl!GT8Tuƴ4c.K $dUب`!aHJ28rNN68b*dh1kƾMtssWCІdDG<-m-,Xٝȡ`8Aw6;E˭z@aUj[\Uf V[ η&T3NenP ;Wm(v`?JģRlAf`Ƃ+v3ŶƮDVEΕ Xmgdu=0ϮNF4\RҘmhk|y5?ޓp輇}ޗfjkg^ӧIkLCmp/W;w$WހٺzY7(3h g֚S4k Fwj=x6ee *bGVNQTR&ӫc W#~j[厕@YqZAn";lMy!zP=(A׭zrTOLnNǦtkbZp)W·t;rH0qw,KrM|8|y ]$Wŋ@_u+]e켯kj@L/u>tlR}E_!jmLb֩KϿ`{O-G .|W/X*Nonjj1ʦMjsƮ|U;fdD6-l_JFD]rmY$_ft* Z୐8Zh Ku]S-sPoeq\N 1Rx57g65!|̴>eʇkuµ >uS,w:<ܥ%6M>z bbf*}Yۘ ћ/$gσ[̿&5%aAoL&p@Y> stream x]Rn0+|L6C8~Kb8f7M0ٝaMrjƚ?2rmT!<9Dq;8O>cu~ᛣzxbɻ=:#*aN{F ʶum<:> /W [0 [600.0977]] >> endobj 25 0 obj <> endobj 26 0 obj <> stream x9XTוc~!f&3o|`j3AC2(/ c:U$v]kmnJ464|iG۴jnc*oIv{w9_ @I]7~/zuk;8 {{Hp.RY^ /?E8k{ZAs䛤ċɣfH)',F*DxWSP|NYӡ \X` NWux < x vxF<F=>Vx)Jw{ ӸfЩ` |8̥<zu:H5[ #v4˰z˓#ay=O؍$,#ǸdtINBn[HSPEv?=!z6M@^<] 3@^<;D?bdK>[ t]N-'T$/3T_؝%_=G4TEzXM6RQSg d$i,5W8*8%ܓ\?<7}rt`twL}'n47-olyMK.kn UްhT̟W>$0hfA~ /PlV$KZ$1]jky_k'D*r5 6jqg(̉aM_\%UM,iiQua=rOW<!yC6kVi-.!@ALwgހf 10Oju!/ޡ74F5^Nj T"uZ7"nn:PQ`U0Ch5 46.񭺣P{?<ԋ^ȥ/*QM_rG;jL{#) o<H<oITEed{ahҨ^=UWb] ldyո2NUj^uᯑA᠘#!XE1꫰{BV8)S(4h\:0xG޿Th_;jEUɪnU),4)|H\1:H{IAéVh$ kXwOW. P7GP J4=FS]cLzuVuy>Y1$=LwU[5\jR&pYZcCsTapjʫp<ڱFż֨Q_jVhYohYusIo\6$EļhQoJ n3QZQ!!@ZHOݔgP ,Oժj0MfpgM*t&.ɩ[R\Ĉ3jIȣp(\jTZ.U5Do<orB=7WfV5ơnkZ=С9z!U]O 8#yG.i9uDߠA0p))S8F81 8^hr(ƴ>?lPGv29CjUMJLe7Qf`6Wڅb~?#A{|duH &[iy7(גl$l+iqDwwPh%~͠x|[x|I~wW@^;0z`l3IhQ߽|}'p|/~/{Kݽwpȗ=I˸eDq>La4gBIw)tsǷS#k0œĤO&c_g ;n%nnr/:s#7_bEፕd+W((/+l( "E.l>I<3>W'LBh"CE U}}} So+, 8>x ^}D4(i J1% Q[^ԥq hw$ޟ W8 Wqz8E_oѻf?|n\~' QwU:=o#EnXGgq;Re 7^A7 y\h/ ೢN;}tb0eK^6L_[GJmo8+`-%נ!`-lН$r'~oIKA';UNQ4n_j\}<x'O1iCt]A|_֏VŷZe׳ WկBU.8s4S+e&("+ &"Uh :(-;&6Mn/.?J>0`NZAǬ֌iY""0[99XUT&2K1w;)xf͛{fΝ;HN z١kWM8h›MtLB[$e<2|~:v Ƭ4<9b‘2PxYYO(ࣽb8*S%VMfOL@yѶɱ6R# sK5sG?E [)Fz-JLJ$& nl6[eA'R6F+S++-kCs#OO~Y-JceաY 3{r=n7n4r\.!_)W,k͔#Y|VΘ0Vrq8Y sJ^PRS *#ߞU۬`9";!nĭȐsk,3`Gi/fKH0O%ϭi+nn&+[i%Va!RnXx}ΜGr刢;i8-GpΤPh6{ 0)O[9MqD  :+Ί6%yH6gNŢ@ln8;$ѫʂ Z~layLڳ*#G~kTg^>͙;[/s)/.a5|[OYksb\'L!w;i&|ZGqY`i(mH_beG 36VF P`\M;'_S\x/}?ڐ1c 1΁- 13x?>sxq{+_mmlX*ʪ#|@bcM16؝>'39!S%s6MX嘓g^_6F+sP Xt*N\4csp!6{cr"%Kl_:XOhyhXj)BC،mقjnXFK )31j.̛wHn[)>s}d_1PBԆeY NdG[ $xy#@3J#c^kM0m{+'` ./tL}8q QҰH7 KT ӰL?\4 nḰ;ӰHn":jn}MvߍD[MuI}m z!輯B)̦8v[ eWH3% 6PѨjvRFA;IBGPe$7L6t_ endstream endobj 18 0 obj <> stream x]n0EYGxtRH,Pi?X*2΂S"uz=6AU_j%-f Zn8BT,AHn7Eo>.,űVĊ tٚg1uĂw#H5jnZBvD(vK]Z4BL:YMdEQ sJJ'>r_WR3әTJ3vcsCHh1=_;xY{wc\Ot9TZTߟZ_v endstream endobj 1 0 obj <> endobj xref 0 27 0000000000 65535 f 0000044763 00000 n 0000000015 00000 n 0000002492 00000 n 0000000076 00000 n 0000001848 00000 n 0000001942 00000 n 0000002078 00000 n 0000002218 00000 n 0000002353 00000 n 0000000463 00000 n 0000002559 00000 n 0000025360 00000 n 0000025754 00000 n 0000030803 00000 n 0000031187 00000 n 0000038864 00000 n 0000039247 00000 n 0000044408 00000 n 0000003049 00000 n 0000003280 00000 n 0000026189 00000 n 0000026423 00000 n 0000031649 00000 n 0000031875 00000 n 0000039465 00000 n 0000039699 00000 n trailer <> startxref 44819 %%EOFasciidoctor-1.5.5/test/fixtures/include-file.asciidoc000066400000000000000000000007351277513741400227350ustar00rootroot00000000000000first line of included content second line of included content third line of included content fourth line of included content fifth line of included content sixth line of included content seventh line of included content eighth line of included content // tag::snippet[] // tag::snippetA[] snippetA content // end::snippetA[] non-tagged content // tag::snippetB[] snippetB content // end::snippetB[] // end::snippet[] more non-tagged content last line of included content asciidoctor-1.5.5/test/fixtures/include-file.xml000066400000000000000000000001401277513741400217450ustar00rootroot00000000000000 content asciidoctor-1.5.5/test/fixtures/master.adoc000066400000000000000000000001051277513741400210070ustar00rootroot00000000000000= Master Document preamble include::chapter-a.adoc[leveloffset=+1] asciidoctor-1.5.5/test/fixtures/parent-include-restricted.adoc000066400000000000000000000001201277513741400245710ustar00rootroot00000000000000first line of parent include::child-include.adoc[depth=1] last line of parent asciidoctor-1.5.5/test/fixtures/parent-include.adoc000066400000000000000000000001111277513741400224230ustar00rootroot00000000000000first line of parent include::child-include.adoc[] last line of parent asciidoctor-1.5.5/test/fixtures/sample.asciidoc000066400000000000000000000005001277513741400216440ustar00rootroot00000000000000Document Title ============== Doc Writer :idprefix: id_ Preamble paragraph. NOTE: This is test, only a test. == Section A *Section A* paragraph. === Section A Subsection *Section A* 'subsection' paragraph. == Section B *Section B* paragraph. .Section B list * Item 1 * Item 2 * Item 3 asciidoctor-1.5.5/test/fixtures/stylesheets/000077500000000000000000000000001277513741400212445ustar00rootroot00000000000000asciidoctor-1.5.5/test/fixtures/stylesheets/custom.css000066400000000000000000000000271277513741400232670ustar00rootroot00000000000000body { color: red; } asciidoctor-1.5.5/test/fixtures/subs-docinfo.html000066400000000000000000000001561277513741400221530ustar00rootroot00000000000000 asciidoctor-1.5.5/test/fixtures/subs.adoc000066400000000000000000000001611277513741400204720ustar00rootroot00000000000000= Document Title Doc Writer v1.0, 2013-01-01 :bootstrap-version: 3.2.0 Body content. asciidoctor-1.5.5/test/fixtures/tip.gif000066400000000000000000000000431277513741400201500ustar00rootroot00000000000000GIF89a,D;asciidoctor-1.5.5/test/invoker_test.rb000066400000000000000000000527531277513741400200740ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end require 'asciidoctor/cli/options' require 'asciidoctor/cli/invoker' context 'Invoker' do test 'should parse source and render as html5 article by default' do invoker = nil output = nil redirect_streams do |out, err| invoker = invoke_cli %w(-o -) output = out.string end assert !invoker.nil? doc = invoker.document assert !doc.nil? assert_equal 'Document Title', doc.doctitle assert_equal 'Doc Writer', doc.attr('author') assert_equal 'html5', doc.attr('backend') assert_equal '.html', doc.attr('outfilesuffix') assert_equal 'article', doc.attr('doctype') assert doc.blocks? assert_equal :preamble, doc.blocks.first.context assert !output.empty? assert_xpath '/html', output, 1 assert_xpath '/html/head', output, 1 assert_xpath '/html/body', output, 1 assert_xpath '/html/head/title[text() = "Document Title"]', output, 1 assert_xpath '/html/body[@class="article"]/*[@id="header"]/h1[text() = "Document Title"]', output, 1 end test 'should set implicit doc info attributes' do sample_filepath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample.asciidoc')) sample_filedir = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures')) invoker = invoke_cli_to_buffer %w(-o /dev/null), sample_filepath doc = invoker.document assert_equal 'sample', doc.attr('docname') assert_equal sample_filepath, doc.attr('docfile') assert_equal sample_filedir, doc.attr('docdir') assert doc.attr?('docdate') assert doc.attr?('doctime') assert doc.attr?('docdatetime') assert invoker.read_output.empty? end test 'should allow docdate and doctime to be overridden' do sample_filepath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample.asciidoc')) invoker = invoke_cli_to_buffer %w(-o /dev/null -a docdate=2015-01-01 -a doctime=10:00:00-07:00), sample_filepath doc = invoker.document assert doc.attr?('docdate', '2015-01-01') assert doc.attr?('doctime', '10:00:00-07:00') assert doc.attr?('docdatetime', '2015-01-01 10:00:00-07:00') end test 'should accept document from stdin and write to stdout' do invoker = invoke_cli_to_buffer(%w(-s), '-') { 'content' } doc = invoker.document assert !doc.attr?('docname') assert !doc.attr?('docfile') assert_equal Dir.pwd, doc.attr('docdir') assert_equal doc.attr('docdate'), doc.attr('localdate') assert_equal doc.attr('doctime'), doc.attr('localtime') assert_equal doc.attr('docdatetime'), doc.attr('localdatetime') assert !doc.attr?('outfile') output = invoker.read_output assert !output.empty? assert_xpath '/*[@class="paragraph"]/p[text()="content"]', output, 1 end test 'should not fail to rewind input if reading document from stdin' do io = STDIN.dup class << io def readlines ['paragraph'] end end invoker = invoke_cli_to_buffer(%w(-s), '-') { io } assert_equal 0, invoker.code assert_equal 1, invoker.document.blocks.size end test 'should accept document from stdin and write to output file' do sample_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample-output.html')) begin invoker = invoke_cli(%W(-s -o #{sample_outpath}), '-') { 'content' } doc = invoker.document assert !doc.attr?('docname') assert !doc.attr?('docfile') assert_equal Dir.pwd, doc.attr('docdir') assert_equal doc.attr('docdate'), doc.attr('localdate') assert_equal doc.attr('doctime'), doc.attr('localtime') assert_equal doc.attr('docdatetime'), doc.attr('localdatetime') assert doc.attr?('outfile') assert_equal sample_outpath, doc.attr('outfile') assert File.exist?(sample_outpath) ensure FileUtils.rm_f(sample_outpath) end end test 'should allow docdir to be specified when input is a string' do expected_docdir = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures')) invoker = invoke_cli_to_buffer(%w(-s --base-dir test/fixtures -o /dev/null), '-') { 'content' } doc = invoker.document assert_equal expected_docdir, doc.attr('docdir') assert_equal expected_docdir, doc.base_dir end test 'should display version and exit' do expected = %(Asciidoctor #{Asciidoctor::VERSION} [http://asciidoctor.org]\nRuntime Environment (#{RUBY_DESCRIPTION})) ['--version', '-V'].each do |switch| actual = nil redirect_streams do |out, err| invoke_cli [switch] actual = out.string.rstrip end refute_nil actual assert actual.start_with?(expected), %(Expected to print version when using #{switch} switch) end end test 'should print warnings to stderr by default' do input = <<-EOS 2. second 3. third EOS warnings = nil redirect_streams do |out, err| invoke_cli_to_buffer(%w(-o /dev/null), '-') { input } warnings = err.string end assert_match(/WARNING/, warnings) end test 'should silence warnings if -q flag is specified' do input = <<-EOS 2. second 3. third EOS warnings = nil redirect_streams do |out, err| invoke_cli_to_buffer(%w(-q -o /dev/null), '-') { input } warnings = err.string end assert_equal '', warnings end test 'should report usage if no input file given' do redirect_streams do |out, err| invoke_cli [], nil assert_match(/Usage:/, err.string) end end test 'should report error if input file does not exist' do redirect_streams do |out, err| invoker = invoke_cli [], 'missing_file.asciidoc' assert_match(/input file .* missing or cannot be read/, err.string) assert_equal 1, invoker.code end end test 'should treat extra arguments as files' do redirect_streams do |out, err| invoker = invoke_cli %w(-o /dev/null extra arguments sample.asciidoc), nil assert_match(/input file .* missing or cannot be read/, err.string) assert_equal 1, invoker.code end end test 'should output to file name based on input file name' do sample_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample.html')) begin invoker = invoke_cli doc = invoker.document assert_equal sample_outpath, doc.attr('outfile') assert File.exist?(sample_outpath) output = File.read(sample_outpath) assert !output.empty? assert_xpath '/html', output, 1 assert_xpath '/html/head', output, 1 assert_xpath '/html/body', output, 1 assert_xpath '/html/head/title[text() = "Document Title"]', output, 1 assert_xpath '/html/body/*[@id="header"]/h1[text() = "Document Title"]', output, 1 ensure FileUtils.rm_f(sample_outpath) end end test 'should output to file in destination directory if set' do destination_path = File.expand_path(File.join(File.dirname(__FILE__), 'test_output')) sample_outpath = File.join(destination_path, 'sample.html') begin FileUtils.mkdir_p(destination_path) # QUESTION should -D be relative to working directory or source directory? invoker = invoke_cli %w(-D test/test_output) #invoker = invoke_cli %w(-D ../../test/test_output) doc = invoker.document assert_equal sample_outpath, doc.attr('outfile') assert File.exist?(sample_outpath) ensure FileUtils.rm_f(sample_outpath) FileUtils.rmdir(destination_path) end end test 'should output to file specified' do sample_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample-output.html')) begin invoker = invoke_cli %W(-o #{sample_outpath}) doc = invoker.document assert_equal sample_outpath, doc.attr('outfile') assert File.exist?(sample_outpath) ensure FileUtils.rm_f(sample_outpath) end end test 'should copy default stylesheet to target directory if linkcss is specified' do sample_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample-output.html')) asciidoctor_stylesheet = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'asciidoctor.css')) coderay_stylesheet = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'coderay-asciidoctor.css')) begin invoker = invoke_cli %W(-o #{sample_outpath} -a linkcss -a source-highlighter=coderay) invoker.document assert File.exist?(sample_outpath) assert File.exist?(asciidoctor_stylesheet) assert File.exist?(coderay_stylesheet) ensure FileUtils.rm_f(sample_outpath) FileUtils.rm_f(asciidoctor_stylesheet) FileUtils.rm_f(coderay_stylesheet) end end test 'should not copy default stylesheet to target directory if linkcss is set and copycss is unset' do sample_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample-output.html')) default_stylesheet = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'asciidoctor.css')) begin invoker = invoke_cli %W(-o #{sample_outpath} -a linkcss -a copycss!) invoker.document assert File.exist?(sample_outpath) assert !File.exist?(default_stylesheet) ensure FileUtils.rm_f(sample_outpath) FileUtils.rm_f(default_stylesheet) end end test 'should copy custom stylesheet to target directory if stylesheet and linkcss is specified' do destdir = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'output')) sample_outpath = File.join destdir, 'sample-output.html' stylesdir = File.join destdir, 'styles' custom_stylesheet = File.join stylesdir, 'custom.css' begin invoker = invoke_cli %W(-o #{sample_outpath} -a linkcss -a copycss=stylesheets/custom.css -a stylesdir=./styles -a stylesheet=custom.css) invoker.document assert File.exist?(sample_outpath) assert File.exist?(custom_stylesheet) ensure FileUtils.rm_f(sample_outpath) FileUtils.rm_f(custom_stylesheet) FileUtils.rmdir(stylesdir) FileUtils.rmdir(destdir) end end test 'should not copy custom stylesheet to target directory if stylesheet and linkcss are set and copycss is unset' do destdir = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'output')) sample_outpath = File.join destdir, 'sample-output.html' stylesdir = File.join destdir, 'styles' custom_stylesheet = File.join stylesdir, 'custom.css' begin invoker = invoke_cli %W(-o #{sample_outpath} -a linkcss -a stylesdir=./styles -a stylesheet=custom.css -a copycss!) invoker.document assert File.exist?(sample_outpath) assert !File.exist?(custom_stylesheet) ensure FileUtils.rm_f(sample_outpath) FileUtils.rm_f(custom_stylesheet) FileUtils.rmdir(stylesdir) if File.directory? stylesdir FileUtils.rmdir(destdir) end end test 'should not copy custom stylesheet to target directory if stylesdir is a URI' do destdir = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'output')) sample_outpath = File.join destdir, 'sample-output.html' stylesdir = File.join destdir, 'http:' begin invoker = invoke_cli %W(-o #{sample_outpath} -a linkcss -a stylesdir=http://example.org/styles -a stylesheet=custom.css) invoker.document assert File.exist?(sample_outpath) assert !File.exist?(stylesdir) ensure FileUtils.rm_f(sample_outpath) FileUtils.rmdir(stylesdir) if File.directory? stylesdir FileUtils.rmdir(destdir) end end test 'should render all passed files' do basic_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'basic.html')) sample_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample.html')) begin invoke_cli_with_filenames [], %w(basic.asciidoc sample.asciidoc) assert File.exist?(basic_outpath) assert File.exist?(sample_outpath) ensure FileUtils.rm_f(basic_outpath) FileUtils.rm_f(sample_outpath) end end test 'options should not be modified when processing multiple files' do destination_path = File.expand_path(File.join(File.dirname(__FILE__), 'test_output')) basic_outpath = File.join(destination_path, 'basic.htm') sample_outpath = File.join(destination_path, 'sample.htm') begin invoke_cli_with_filenames %w(-D test/test_output -a outfilesuffix=.htm), %w(basic.asciidoc sample.asciidoc) assert File.exist?(basic_outpath) assert File.exist?(sample_outpath) ensure FileUtils.rm_f(basic_outpath) FileUtils.rm_f(sample_outpath) FileUtils.rmdir(destination_path) end end test 'should render all files that matches a glob expression' do basic_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'basic.html')) begin invoke_cli_to_buffer [], "ba*.asciidoc" assert File.exist?(basic_outpath) ensure FileUtils.rm_f(basic_outpath) end end test 'should render all files that matches an absolute path glob expression' do basic_outpath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'basic.html')) glob = File.join(File.dirname(__FILE__), 'fixtures', 'ba*.asciidoc') # test Windows using backslash-style pathname if ::File::ALT_SEPARATOR == '\\' glob = glob.tr '/', '\\' end begin invoke_cli_to_buffer [], glob assert File.exist?(basic_outpath) ensure FileUtils.rm_f(basic_outpath) end end test 'should suppress header footer if specified' do invoker = invoke_cli_to_buffer %w(-s -o -) output = invoker.read_output assert_xpath '/html', output, 0 assert_xpath '/*[@id="preamble"]', output, 1 end test 'should output a trailing endline to stdout' do invoker = nil output = nil redirect_streams do |out, err| invoker = invoke_cli %w(-o -) output = out.string end assert !invoker.nil? assert !output.nil? assert output.end_with?("\n") end test 'should set backend to html5 if specified' do invoker = invoke_cli_to_buffer %w(-b html5 -o -) doc = invoker.document assert_equal 'html5', doc.attr('backend') assert_equal '.html', doc.attr('outfilesuffix') output = invoker.read_output assert_xpath '/html', output, 1 end test 'should set backend to docbook45 if specified' do invoker = invoke_cli_to_buffer %w(-b docbook45 -a xmlns -o -) doc = invoker.document assert_equal 'docbook45', doc.attr('backend') assert_equal '.xml', doc.attr('outfilesuffix') output = invoker.read_output assert_xpath '/xmlns:article', output, 1 end test 'should set doctype to article if specified' do invoker = invoke_cli_to_buffer %w(-d article -o -) doc = invoker.document assert_equal 'article', doc.attr('doctype') output = invoker.read_output assert_xpath '/html/body[@class="article"]', output, 1 end test 'should set doctype to book if specified' do invoker = invoke_cli_to_buffer %w(-d book -o -) doc = invoker.document assert_equal 'book', doc.attr('doctype') output = invoker.read_output assert_xpath '/html/body[@class="book"]', output, 1 end test 'should locate custom templates based on template dir, template engine and backend' do custom_backend_root = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends')) invoker = invoke_cli_to_buffer %W(-E haml -T #{custom_backend_root} -o -) doc = invoker.document assert doc.converter.is_a? Asciidoctor::Converter::CompositeConverter selected = doc.converter.find_converter 'paragraph' assert selected.is_a? Asciidoctor::Converter::TemplateConverter assert selected.templates['paragraph'].is_a? Tilt::HamlTemplate end test 'should load custom templates from multiple template directories' do custom_backend_1 = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends/haml/html5')) custom_backend_2 = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'custom-backends/haml/html5-tweaks')) invoker = invoke_cli_to_buffer %W(-T #{custom_backend_1} -T #{custom_backend_2} -o - -s) output = invoker.read_output assert_css '.paragraph', output, 0 assert_css '#preamble > .sectionbody > p', output, 1 end test 'should set attribute with value' do invoker = invoke_cli_to_buffer %w(--trace -a idprefix=id -s -o -) doc = invoker.document assert_equal 'id', doc.attr('idprefix') output = invoker.read_output assert_xpath '//h2[@id="idsection_a"]', output, 1 end test 'should set attribute with value containing equal sign' do invoker = invoke_cli_to_buffer %w(--trace -a toc -a toc-title=t=o=c -o -) doc = invoker.document assert_equal 't=o=c', doc.attr('toc-title') output = invoker.read_output assert_xpath '//*[@id="toctitle"][text() = "t=o=c"]', output, 1 end test 'should set attribute with quoted value containing a space' do # emulating commandline arguments: --trace -a toc -a note-caption="Note to self:" -o - invoker = invoke_cli_to_buffer %w(--trace -a toc -a note-caption=Note\ to\ self: -o -) doc = invoker.document assert_equal 'Note to self:', doc.attr('note-caption') output = invoker.read_output assert_xpath %(//*[#{contains_class('admonitionblock')}]//*[@class='title'][text() = 'Note to self:']), output, 1 end test 'should not set attribute ending in @ if defined in document' do invoker = invoke_cli_to_buffer %w(--trace -a idprefix=id@ -s -o -) doc = invoker.document assert_equal 'id_', doc.attr('idprefix') output = invoker.read_output assert_xpath '//h2[@id="id_section_a"]', output, 1 end test 'should set attribute with no value' do invoker = invoke_cli_to_buffer %w(-a icons -s -o -) doc = invoker.document assert_equal '', doc.attr('icons') output = invoker.read_output assert_xpath '//*[@class="admonitionblock note"]//img[@alt="Note"]', output, 1 end test 'should unset attribute ending in bang' do invoker = invoke_cli_to_buffer %w(-a sectids! -s -o -) doc = invoker.document assert !doc.attr?('sectids') output = invoker.read_output # leave the count loose in case we add more sections assert_xpath '//h2[not(@id)]', output end test 'default mode for cli should be unsafe' do invoker = invoke_cli_to_buffer %w(-o /dev/null) doc = invoker.document assert_equal Asciidoctor::SafeMode::UNSAFE, doc.safe end test 'should set safe mode if specified' do invoker = invoke_cli_to_buffer %w(--safe -o /dev/null) doc = invoker.document assert_equal Asciidoctor::SafeMode::SAFE, doc.safe end test 'should set safe mode to specified level' do levels = { 'unsafe' => Asciidoctor::SafeMode::UNSAFE, 'safe' => Asciidoctor::SafeMode::SAFE, 'server' => Asciidoctor::SafeMode::SERVER, 'secure' => Asciidoctor::SafeMode::SECURE, } levels.each do |name, const| invoker = invoke_cli_to_buffer %W(-S #{name} -o /dev/null) doc = invoker.document assert_equal const, doc.safe end end test 'should set eRuby impl if specified' do invoker = invoke_cli_to_buffer %w(--eruby erubis -o /dev/null) doc = invoker.document assert_equal 'erubis', doc.instance_variable_get('@options')[:eruby] end test 'should force default external encoding to UTF-8' do executable = File.expand_path(File.join(File.dirname(__FILE__), '..', 'bin', 'asciidoctor')) input_path = fixture_path 'encoding.asciidoc' old_lang = ENV['LANG'] ENV['LANG'] = 'US-ASCII' begin # using open3 to work around a bug in JRuby process_manager.rb, # which tries to run a gsub on stdout prematurely breaking the test require 'open3' #cmd = "#{executable} -o - --trace #{input_path}" cmd = "#{File.join RbConfig::CONFIG['bindir'], RbConfig::CONFIG['ruby_install_name']} #{executable} -o - --trace #{input_path}" _, out, _ = Open3.popen3 cmd #stderr_lines = stderr.readlines # warnings may be issued, so don't assert on stderr #assert stderr_lines.empty?, 'Command failed. Expected to receive a rendered document.' stdout_lines = out.readlines assert !stdout_lines.empty? stdout_lines.each {|l| l.force_encoding Encoding::UTF_8 } if Asciidoctor::FORCE_ENCODING stdout_str = stdout_lines.join assert stdout_str.include?('Codierungen sind verrückt auf älteren Versionen von Ruby') ensure ENV['LANG'] = old_lang end end test 'should print timings when -t flag is specified' do input = <<-EOS Sample *AsciiDoc* EOS invoker = nil error = nil redirect_streams do |out, err| invoker = invoke_cli(%w(-t -o /dev/null), '-') { input } error = err.string end assert !invoker.nil? assert !error.nil? assert_match(/Total time/, error) end test 'should use SOURCE_DATE_EPOCH as modified time of input file and local time' do old_source_date_epoch = ENV.delete 'SOURCE_DATE_EPOCH' begin ENV['SOURCE_DATE_EPOCH'] = '1234123412' sample_filepath = File.expand_path(File.join(File.dirname(__FILE__), 'fixtures', 'sample.asciidoc')) invoker = invoke_cli_to_buffer %w(-o /dev/null), sample_filepath doc = invoker.document assert_equal '2009-02-08', (doc.attr 'docdate') assert_match(/2009-02-08 20:03:32 (GMT|UTC)/, (doc.attr 'docdatetime')) assert_equal '2009-02-08', (doc.attr 'localdate') assert_match(/2009-02-08 20:03:32 (GMT|UTC)/, (doc.attr 'localdatetime')) ensure ENV['SOURCE_DATE_EPOCH'] = old_source_date_epoch if old_source_date_epoch end end end asciidoctor-1.5.5/test/links_test.rb000066400000000000000000000421101277513741400175210ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end context 'Links' do test 'qualified url inline with text' do assert_xpath "//a[@href='http://asciidoc.org'][@class='bare'][text() = 'http://asciidoc.org']", render_string("The AsciiDoc project is located at http://asciidoc.org.") end test 'qualified http url inline with hide-uri-scheme set' do assert_xpath "//a[@href='http://asciidoc.org'][@class='bare'][text() = 'asciidoc.org']", render_string("The AsciiDoc project is located at http://asciidoc.org.", :attributes => {'hide-uri-scheme' => ''}) end test 'qualified file url inline with label' do assert_xpath "//a[@href='file:///home/user/bookmarks.html'][text() = 'My Bookmarks']", render_embedded_string('file:///home/user/bookmarks.html[My Bookmarks]') end test 'qualified file url inline with hide-uri-scheme set' do assert_xpath "//a[@href='file:///etc/app.conf'][text() = '/etc/app.conf']", render_string('Edit the configuration file link:file:///etc/app.conf[]', :attributes => {'hide-uri-scheme' => ''}) end test 'qualified url with label' do assert_xpath "//a[@href='http://asciidoc.org'][text() = 'AsciiDoc']", render_string("We're parsing http://asciidoc.org[AsciiDoc] markup") end test 'qualified url with label containing escaped right square bracket' do assert_xpath "//a[@href='http://asciidoc.org'][text() = '[Ascii]Doc']", render_string("We're parsing http://asciidoc.org[[Ascii\\]Doc] markup") end test 'qualified url with label using link macro' do assert_xpath "//a[@href='http://asciidoc.org'][text() = 'AsciiDoc']", render_string("We're parsing link:http://asciidoc.org[AsciiDoc] markup") end test 'qualified url using macro syntax with multi-line label inline with text' do assert_xpath %{//a[@href='http://asciidoc.org'][text() = 'AsciiDoc\nmarkup']}, render_string("We're parsing link:http://asciidoc.org[AsciiDoc\nmarkup]") end test 'qualified url with label containing square brackets using link macro' do str = 'http://example.com[[bracket1\]]' doc = document_from_string str, :header_footer => false, :doctype => 'inline' assert_match '[bracket1]', doc.convert, 1 doc = document_from_string str, :header_footer => false, :backend => 'docbook', :doctype => 'inline' assert_match '[bracket1]', doc.convert, 1 doc = document_from_string str, :header_footer => false, :backend => 'docbook45', :doctype => 'inline' assert_match '[bracket1]', doc.convert, 1 end test 'qualified url surrounded by angled brackets' do assert_xpath '//a[@href="http://asciidoc.org"][text()="http://asciidoc.org"]', render_string(' is the project page for AsciiDoc.'), 1 end test 'qualified url surrounded by round brackets' do assert_xpath '//a[@href="http://asciidoc.org"][text()="http://asciidoc.org"]', render_string('(http://asciidoc.org) is the project page for AsciiDoc.'), 1 end test 'qualified url with trailing round bracket' do assert_xpath '//a[@href="http://asciidoctor.org"][text()="http://asciidoctor.org"]', render_string('Asciidoctor is a Ruby-based AsciiDoc processor (see http://asciidoctor.org)'), 1 end test 'qualified url with trailing semi-colon' do assert_xpath '//a[@href="http://asciidoctor.org"][text()="http://asciidoctor.org"]', render_string('http://asciidoctor.org; where text gets parsed'), 1 end test 'qualified url with trailing colon' do assert_xpath '//a[@href="http://asciidoctor.org"][text()="http://asciidoctor.org"]', render_string('http://asciidoctor.org: where text gets parsed'), 1 end test 'qualified url in round brackets with trailing colon' do assert_xpath '//a[@href="http://asciidoctor.org"][text()="http://asciidoctor.org"]', render_string('(http://asciidoctor.org): where text gets parsed'), 1 end test 'qualified url containing round brackets' do assert_xpath '//a[@href="http://jruby.org/apidocs/org/jruby/Ruby.html#addModule(org.jruby.RubyModule)"][text()="addModule() adds a Ruby module"]', render_string('http://jruby.org/apidocs/org/jruby/Ruby.html#addModule(org.jruby.RubyModule)[addModule() adds a Ruby module]'), 1 end test 'qualified url adjacent to text in square brackets' do assert_xpath '//a[@href="http://asciidoc.org"][text()="AsciiDoc"]', render_string(']http://asciidoc.org[AsciiDoc] project page.'), 1 end test 'qualified url adjacent to text in round brackets' do assert_xpath '//a[@href="http://asciidoc.org"][text()="AsciiDoc"]', render_string(')http://asciidoc.org[AsciiDoc] project page.'), 1 end test 'qualified url following smart apostrophe' do output = render_embedded_string("l’http://www.irit.fr[IRIT]") assert_match(/l’'), 1 end test 'link with quoted text should not be separated into attributes when linkattrs is set' do assert_xpath '//a[@href="http://search.example.com"][text()="Google, Yahoo, Bing = Search Engines"]', render_embedded_string('http://search.example.com["Google, Yahoo, Bing = Search Engines"]', :attributes => {'linkattrs' => ''}), 1 end test 'link with comma in text but no equal sign should not be separated into attributes when linkattrs is set' do assert_xpath '//a[@href="http://search.example.com"][text()="Google, Yahoo, Bing"]', render_embedded_string('http://search.example.com[Google, Yahoo, Bing]', :attributes => {'linkattrs' => ''}), 1 end test 'role and window attributes on link are processed when linkattrs is set' do assert_xpath '//a[@href="http://google.com"][@class="external"][@target="_blank"]', render_embedded_string('http://google.com[Google, role="external", window="_blank"]', :attributes => {'linkattrs' => ''}), 1 end test 'link text that ends in ^ should set link window to _blank' do assert_xpath '//a[@href="http://google.com"][@target="_blank"]', render_embedded_string('http://google.com[Google^]'), 1 end test 'id attribute on link are processed when linkattrs is set' do assert_xpath '//a[@href="http://google.com"][@id="link-1"]', render_embedded_string('http://google.com[Google, id="link-1"]', :attributes => {'linkattrs' => ''}), 1 end test 'title attribute on link are processed when linkattrs is set' do assert_xpath '//a[@href="http://google.com"][@title="title-1"]', render_embedded_string('http://google.com[Google, title="title-1"]', :attributes => {'linkattrs' => ''}), 1 end test 'inline irc link' do assert_xpath '//a[@href="irc://irc.freenode.net"][text()="irc://irc.freenode.net"]', render_embedded_string('irc://irc.freenode.net'), 1 end test 'inline irc link with text' do assert_xpath '//a[@href="irc://irc.freenode.net"][text()="Freenode IRC"]', render_embedded_string('irc://irc.freenode.net[Freenode IRC]'), 1 end test 'inline ref' do variations = %w([[tigers]] anchor:tigers[]) variations.each do |anchor| doc = document_from_string %(Here you can read about tigers.#{anchor}) output = doc.render assert_equal '[tigers]', doc.references[:ids]['tigers'] assert_xpath '//a[@id = "tigers"]', output, 1 assert_xpath '//a[@id = "tigers"]/child::text()', output, 0 end end test 'inline ref with reftext' do variations = %w([[tigers,Tigers]] anchor:tigers[Tigers]) variations.each do |anchor| doc = document_from_string %(Here you can read about tigers.#{anchor}) output = doc.render assert_equal 'Tigers', doc.references[:ids]['tigers'] assert_xpath '//a[@id = "tigers"]', output, 1 assert_xpath '//a[@id = "tigers"]/child::text()', output, 0 end end test 'escaped inline ref' do variations = %w([[tigers]] anchor:tigers[]) variations.each do |anchor| doc = document_from_string %(Here you can read about tigers.\\#{anchor}) output = doc.render assert !doc.references[:ids].has_key?('tigers') assert_xpath '//a[@id = "tigers"]', output, 0 end end test 'xref using angled bracket syntax' do doc = document_from_string '<>' doc.references[:ids]['tigers'] = '[tigers]' assert_xpath '//a[@href="#tigers"][text() = "[tigers]"]', doc.render, 1 end test 'xref using angled bracket syntax with label' do assert_xpath '//a[@href="#tigers"][text() = "About Tigers"]', render_string('<>'), 1 end test 'xref using angled bracket syntax with quoted label' do assert_xpath '//a[@href="#tigers"][text() = "About Tigers"]', render_string('<>'), 1 end test 'xref using angled bracket syntax with path sans extension' do doc = document_from_string '<>', :header_footer => false assert_xpath '//a[@href="tigers.html"][text() = "[tigers]"]', doc.render, 1 end test 'xref using angled bracket syntax with path sans extension using docbook backend' do doc = document_from_string '<>', :header_footer => false, :backend => 'docbook' assert_match 'tigers.xml', doc.render, 1 doc = document_from_string '<>', :header_footer => false, :backend => 'docbook45' assert_match 'tigers.xml', doc.render, 1 end test 'xref using angled bracket syntax with ancestor path sans extension' do doc = document_from_string '<<../tigers#,tigers>>', :header_footer => false assert_xpath '//a[@href="../tigers.html"][text() = "tigers"]', doc.render, 1 end test 'xref using angled bracket syntax with absolute path sans extension' do doc = document_from_string '<>', :header_footer => false assert_xpath '//a[@href="/path/to/tigers.html"][text() = "tigers"]', doc.render, 1 end test 'xref using angled bracket syntax with path and extension' do doc = document_from_string '<>', :header_footer => false assert_xpath '//a[@href="tigers.html"][text() = "[tigers]"]', doc.render, 1 end test 'xref using angled bracket syntax with path and fragment' do doc = document_from_string '<>', :header_footer => false assert_xpath '//a[@href="tigers.html#about"][text() = "[tigers#about]"]', doc.render, 1 end test 'xref using angled bracket syntax with path, fragment and text' do doc = document_from_string '<>', :header_footer => false assert_xpath '//a[@href="tigers.html#about"][text() = "About Tigers"]', doc.render, 1 end test 'xref using angled bracket syntax with path and custom relfilesuffix and outfilesuffix' do attributes = {'relfileprefix' => '../', 'outfilesuffix' => '/'} doc = document_from_string '<>', :header_footer => false, :attributes => attributes assert_xpath '//a[@href="../tigers/#about"][text() = "About Tigers"]', doc.render, 1 end test 'xref using angled bracket syntax with path which has been included in this document' do doc = document_from_string '<>', :header_footer => false doc.references[:includes] << 'tigers' assert_xpath '//a[@href="#about"][text() = "About Tigers"]', doc.render, 1 end test 'xref using angled bracket syntax with nested path which has been included in this document' do doc = document_from_string '<>', :header_footer => false doc.references[:includes] << 'part1/tigers' assert_xpath '//a[@href="#about"][text() = "About Tigers"]', doc.render, 1 end test 'xref using angled bracket syntax inline with text' do assert_xpath '//a[@href="#tigers"][text() = "about tigers"]', render_string('Want to learn <>?'), 1 end test 'xref using angled bracket syntax with multi-line label inline with text' do assert_xpath %{//a[@href="#tigers"][normalize-space(text()) = "about tigers"]}, render_string("Want to learn <>?"), 1 end test 'xref with escaped text' do # when \x0 was used as boundary character for passthrough, it was getting stripped # now using unicode marks as boundary characters, which resolves issue input = 'See the <> section for data about tigers' output = render_embedded_string input assert_xpath %(//a[@href="#tigers"]/code[text()="[tigers]"]), output, 1 end test 'xref using macro syntax' do doc = document_from_string 'xref:tigers[]' doc.references[:ids]['tigers'] = '[tigers]' assert_xpath '//a[@href="#tigers"][text() = "[tigers]"]', doc.render, 1 end test 'xref using macro syntax with label' do assert_xpath '//a[@href="#tigers"][text() = "About Tigers"]', render_string('xref:tigers[About Tigers]'), 1 end test 'xref using macro syntax inline with text' do assert_xpath '//a[@href="#tigers"][text() = "about tigers"]', render_string('Want to learn xref:tigers[about tigers]?'), 1 end test 'xref using macro syntax with multi-line label inline with text' do assert_xpath %{//a[@href="#tigers"][normalize-space(text()) = "about tigers"]}, render_string("Want to learn xref:tigers[about\ntigers]?"), 1 end test 'xref using invalid macro syntax does not create link' do doc = document_from_string 'xref:tigers' doc.references[:ids]['tigers'] = '[tigers]' assert_xpath '//a', doc.render, 0 end test 'xref creates link for unknown reference' do doc = document_from_string '<>' assert_xpath '//a[@href="#tigers"][text() = "[tigers]"]', doc.render, 1 end test 'xref shows label from title of target for forward and backward references in html backend' do input = <<-EOS == Section A <\<_section_b>> == Section B <\<_section_a>> EOS output = render_embedded_string input assert_xpath '//h2[@id="_section_a"][text()="Section A"]', output, 1 assert_xpath '//a[@href="#_section_a"][text()="Section A"]', output, 1 assert_xpath '//h2[@id="_section_b"][text()="Section B"]', output, 1 assert_xpath '//a[@href="#_section_b"][text()="Section B"]', output, 1 end test 'anchor creates reference' do doc = document_from_string "[[tigers]]Tigers roam here." assert_equal({'tigers' => '[tigers]'}, doc.references[:ids]) end test 'anchor with label creates reference' do doc = document_from_string "[[tigers,Tigers]]Tigers roam here." assert_equal({'tigers' => 'Tigers'}, doc.references[:ids]) end test 'anchor with quoted label creates reference with quoted label text' do doc = document_from_string %([[tigers,"Tigers roam here"]]Tigers roam here.) assert_equal({'tigers' => '"Tigers roam here"'}, doc.references[:ids]) end test 'anchor with label containing a comma creates reference' do doc = document_from_string %([[tigers,Tigers, scary tigers, roam here]]Tigers roam here.) assert_equal({'tigers' => 'Tigers, scary tigers, roam here'}, doc.references[:ids]) end end asciidoctor-1.5.5/test/lists_test.rb000066400000000000000000004043761277513741400175570ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end context "Bulleted lists (:ulist)" do context "Simple lists" do test "dash elements with no blank lines" do input = <<-EOS List ==== - Foo - Boo - Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'indented dash elements using spaces' do input = <<-EOS - Foo - Boo - Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'indented dash elements using tabs' do input = <<-EOS \t-\tFoo \t-\tBoo \t-\tBlech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test "dash elements separated by blank lines should merge lists" do input = <<-EOS List ==== - Foo - Boo - Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'dash elements with interspersed line comments should be skipped and not break list' do input = <<-EOS == List - Foo // line comment // another line comment - Boo // line comment more text // another line comment - Blech EOS output = render_embedded_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath %((//ul/li)[2]/p[text()="Boo\nmore text"]), output, 1 end test "dash elements separated by a line comment offset by blank lines should not merge lists" do input = <<-EOS List ==== - Foo - Boo // - Blech EOS output = render_string input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 end test "dash elements separated by a block title offset by a blank line should not merge lists" do input = <<-EOS List ==== - Foo - Boo .Also - Blech EOS output = render_string input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 assert_xpath '(//ul)[2]/preceding-sibling::*[@class = "title"][text() = "Also"]', output, 1 end test "dash elements separated by an attribute entry offset by a blank line should not merge lists" do input = <<-EOS == List - Foo - Boo :foo: bar - Blech EOS output = render_embedded_string input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 end test 'a non-indented wrapped line is folded into text of list item' do input = <<-EOS List ==== - Foo wrapped content - Boo - Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li[1]/*', output, 1 assert_xpath "//ul/li[1]/p[text() = 'Foo\nwrapped content']", output, 1 end test 'a non-indented wrapped line that resembles a block title is folded into text of list item' do input = <<-EOS == List - Foo .wrapped content - Boo - Blech EOS output = render_embedded_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li[1]/*', output, 1 assert_xpath "//ul/li[1]/p[text() = 'Foo\n.wrapped content']", output, 1 end test 'a non-indented wrapped line that resembles an attribute entry is folded into text of list item' do input = <<-EOS == List - Foo :foo: bar - Boo - Blech EOS output = render_embedded_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li[1]/*', output, 1 assert_xpath "//ul/li[1]/p[text() = 'Foo\n:foo: bar']", output, 1 end test 'a list item with a nested marker terminates non-indented paragraph for text of list item' do input = <<-EOS - Foo Bar * Foo EOS output = render_embedded_string input assert_css 'ul ul', output, 1 assert !output.include?('* Foo') end test 'a list item for a different list terminates non-indented paragraph for text of list item' do input = <<-EOS == Example 1 - Foo Bar . Foo == Example 2 * Item text term:: def EOS output = render_embedded_string input assert_css 'ul ol', output, 1 assert !output.include?('* Foo') assert_css 'ul dl', output, 1 assert !output.include?('term:: def') end test 'an indented wrapped line is unindented and folded into text of list item' do input = <<-EOS List ==== - Foo wrapped content - Boo - Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li[1]/*', output, 1 assert_xpath "//ul/li[1]/p[text() = 'Foo\nwrapped content']", output, 1 end test 'wrapped list item with hanging indent followed by non-indented line' do input = <<-EOS == Lists - list item 1 // not line comment second wrapped line - list item 2 EOS output = render_embedded_string input assert_css 'ul', output, 1 assert_css 'ul li', output, 2 # NOTE for some reason, we're getting an extra line after the indented line lines = xmlnodes_at_xpath('(//ul/li)[1]/p', output, 1).text.gsub(/\n[[:space:]]*\n/, "\n").lines.entries assert_equal 3, lines.size assert_equal 'list item 1', lines[0].chomp assert_equal ' // not line comment', lines[1].chomp assert_equal 'second wrapped line', lines[2].chomp end test 'a list item with a nested marker terminates indented paragraph for text of list item' do input = <<-EOS - Foo Bar * Foo EOS output = render_embedded_string input assert_css 'ul ul', output, 1 assert !output.include?('* Foo') end test 'a list item that starts with a sequence of list markers characters should not match a nested list' do input = <<-EOS * first item *. normal text EOS output = render_embedded_string input assert_css 'ul', output, 1 assert_css 'ul li', output, 1 assert_xpath "//ul/li/p[text()='first item\n*. normal text']", output, 1 end test 'a list item for a different list terminates indented paragraph for text of list item' do input = <<-EOS == Example 1 - Foo Bar . Foo == Example 2 * Item text term:: def EOS output = render_embedded_string input assert_css 'ul ol', output, 1 assert !output.include?('* Foo') assert_css 'ul dl', output, 1 assert !output.include?('term:: def') end test "a literal paragraph offset by blank lines in list content is appended as a literal block" do input = <<-EOS List ==== - Foo literal - Boo - Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul/li)[1]/p[text() = "Foo"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]', output, 1 assert_xpath '(//ul/li)[1]/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '((//ul/li)[1]/*[@class="literalblock"])[1]//pre[text() = "literal"]', output, 1 end test "a literal paragraph offset by a blank line in list content followed by line with continuation is appended as two blocks" do input = <<-EOS List ==== - Foo literal + para - Boo - Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul/li)[1]/p[text() = "Foo"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]', output, 1 assert_xpath '(//ul/li)[1]/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '((//ul/li)[1]/*[@class="literalblock"])[1]//pre[text() = "literal"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 end test 'an admonition paragraph attached by a line continuation to a list item with wrapped text should produce admonition' do input = <<-EOS - first-line text wrapped text + NOTE: This is a note. EOS output = render_embedded_string input assert_css 'ul', output, 1 assert_css 'ul > li', output, 1 assert_css 'ul > li > p', output, 1 assert_xpath %(//ul/li/p[text()="first-line text\nwrapped text"]), output, 1 assert_css 'ul > li > p + .admonitionblock.note', output, 1 assert_xpath '//ul/li/*[@class="admonitionblock note"]//td[@class="content"][normalize-space(text())="This is a note."]', output, 1 end test 'appends line as paragraph if attached by continuation following line comment' do input = <<-EOS - list item 1 // line comment + paragraph in list item 1 - list item 2 EOS output = render_embedded_string input assert_css 'ul', output, 1 assert_css 'ul li', output, 2 assert_xpath '(//ul/li)[1]/p[text()="list item 1"]', output, 1 assert_xpath '(//ul/li)[1]/p/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '(//ul/li)[1]/p/following-sibling::*[@class="paragraph"]/p[text()="paragraph in list item 1"]', output, 1 assert_xpath '(//ul/li)[2]/p[text()="list item 2"]', output, 1 end test "a literal paragraph with a line that appears as a list item that is followed by a continuation should create two blocks" do input = <<-EOS * Foo + literal . still literal + para * Bar EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '(//ul/li)[1]/p[text() = "Foo"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]', output, 1 assert_xpath '(//ul/li)[1]/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath %(((//ul/li)[1]/*[@class="literalblock"])[1]//pre[text() = " literal\n. still literal"]), output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 end test "consecutive literal paragraph offset by blank lines in list content are appended as a literal blocks" do input = <<-EOS List ==== - Foo literal more literal - Boo - Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul/li)[1]/p[text() = "Foo"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]', output, 2 assert_xpath '(//ul/li)[1]/p/following-sibling::*[@class="literalblock"]', output, 2 assert_xpath '((//ul/li)[1]/*[@class="literalblock"])[1]//pre[text() = "literal"]', output, 1 assert_xpath "((//ul/li)[1]/*[@class='literalblock'])[2]//pre[text() = 'more\nliteral']", output, 1 end test "a literal paragraph without a trailing blank line consumes following list items" do input = <<-EOS List ==== - Foo literal - Boo - Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 1 assert_xpath '(//ul/li)[1]/p[text() = "Foo"]', output, 1 assert_xpath '(//ul/li)[1]/*[@class="literalblock"]', output, 1 assert_xpath '(//ul/li)[1]/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath "((//ul/li)[1]/*[@class='literalblock'])[1]//pre[text() = ' literal\n- Boo\n- Blech']", output, 1 end test "asterisk elements with no blank lines" do input = <<-EOS List ==== * Foo * Boo * Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'indented asterisk elements using spaces' do input = <<-EOS * Foo * Boo * Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'indented unicode bullet elements using spaces' do input = <<-EOS • Foo • Boo • Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end if ::RUBY_MIN_VERSION_1_9 test 'indented asterisk elements using tabs' do input = <<-EOS \t*\tFoo \t*\tBoo \t*\tBlech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'should represent block style as style class' do ['disc', 'square', 'circle'].each do |style| input = <<-EOS [#{style}] * a * b * c EOS output = render_embedded_string input assert_css ".ulist.#{style}", output, 1 assert_css ".ulist.#{style} ul.#{style}", output, 1 end end test "asterisk elements separated by blank lines should merge lists" do input = <<-EOS List ==== * Foo * Boo * Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 end test 'asterisk elements with interspersed line comments should be skipped and not break list' do input = <<-EOS == List * Foo // line comment // another line comment * Boo // line comment more text // another line comment * Blech EOS output = render_embedded_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath %((//ul/li)[2]/p[text()="Boo\nmore text"]), output, 1 end test "asterisk elements separated by a line comment offset by blank lines should not merge lists" do input = <<-EOS List ==== * Foo * Boo // * Blech EOS output = render_string input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 end test "asterisk elements separated by a block title offset by a blank line should not merge lists" do input = <<-EOS List ==== * Foo * Boo .Also * Blech EOS output = render_string input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 assert_xpath '(//ul)[2]/preceding-sibling::*[@class = "title"][text() = "Also"]', output, 1 end test "asterisk elements separated by an attribute entry offset by a blank line should not merge lists" do input = <<-EOS == List * Foo * Boo :foo: bar * Blech EOS output = render_embedded_string input assert_xpath '//ul', output, 2 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[2]/li', output, 1 end test "list should terminate before next lower section heading" do input = <<-EOS List ==== * first item * second item == Section EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//h2[text() = "Section"]', output, 1 end test "list should terminate before next lower section heading with implicit id" do input = <<-EOS List ==== * first item * second item [[sec]] == Section EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//h2[@id = "sec"][text() = "Section"]', output, 1 end test 'should not find section title immediately below last list item' do input = <<-EOS * first * second == Not a section EOS output = render_embedded_string input assert_css 'ul', output, 1 assert_css 'ul > li', output, 2 assert_css 'h2', output, 0 assert output.include?('== Not a section') assert_xpath %((//li)[2]/p[text() = "second\n== Not a section"]), output, 1 end end context "Lists with inline markup" do test "quoted text" do input = <<-EOS List ==== - I am *strong*. - I am _stressed_. - I am `flexible`. EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul/li)[1]//strong', output, 1 assert_xpath '(//ul/li)[2]//em', output, 1 assert_xpath '(//ul/li)[3]//code', output, 1 end test "attribute substitutions" do input = <<-EOS List ==== :foo: bar - side a {vbar} side b - Take me to a {foo}. EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '(//ul/li)[1]//p[text() = "side a | side b"]', output, 1 assert_xpath '(//ul/li)[2]//p[text() = "Take me to a bar."]', output, 1 end test "leading dot is treated as text not block title" do input = <<-EOS * .first * .second * .third EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 3 %w(.first .second .third).each_with_index do |text, index| assert_xpath "(//ul/li)[#{index + 1}]//p[text() = '#{text}']", output, 1 end end test "word ending sentence on continuing line not treated as a list item" do input = <<-EOS A. This is the story about AsciiDoc. It begins here. B. And it ends here. EOS output = render_string input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 2 end end context "Nested lists" do test "asterisk element mixed with dash elements should be nested" do input = <<-EOS List ==== - Foo * Boo - Blech EOS output = render_string input assert_xpath '//ul', output, 2 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[1]/li//ul/li', output, 1 end test "dash element mixed with asterisks elements should be nested" do input = <<-EOS List ==== * Foo - Boo * Blech EOS output = render_string input assert_xpath '//ul', output, 2 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[1]/li//ul/li', output, 1 end test "lines prefixed with alternating list markers separated by blank lines should be nested" do input = <<-EOS List ==== - Foo * Boo - Blech EOS output = render_string input assert_xpath '//ul', output, 2 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[1]/li//ul/li', output, 1 end test "nested elements (2) with asterisks" do input = <<-EOS List ==== * Foo ** Boo * Blech EOS output = render_string input assert_xpath '//ul', output, 2 assert_xpath '//ul/li', output, 3 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '(//ul)[1]/li//ul/li', output, 1 end test "nested elements (3) with asterisks" do input = <<-EOS List ==== * Foo ** Boo *** Snoo * Blech EOS output = render_string input assert_xpath '//ul', output, 3 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li//ul)[1]/li', output, 1 assert_xpath '(((//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 end test "nested elements (4) with asterisks" do input = <<-EOS List ==== * Foo ** Boo *** Snoo **** Froo * Blech EOS output = render_string input assert_xpath '//ul', output, 4 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li//ul)[1]/li', output, 1 assert_xpath '(((//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 assert_xpath '((((//ul)[1]/li//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 end test "nested elements (5) with asterisks" do input = <<-EOS List ==== * Foo ** Boo *** Snoo **** Froo ***** Groo * Blech EOS output = render_string input assert_xpath '//ul', output, 5 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li//ul)[1]/li', output, 1 assert_xpath '(((//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 assert_xpath '((((//ul)[1]/li//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 assert_xpath '(((((//ul)[1]/li//ul)[1]/li//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 end test 'nested elements (5) with unicode bullet' do input = <<-EOS List ==== • Foo •• Boo ••• Snoo •••• Froo ••••• Groo • Blech EOS output = render_string input assert_xpath '//ul', output, 5 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li//ul)[1]/li', output, 1 assert_xpath '(((//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 assert_xpath '((((//ul)[1]/li//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 assert_xpath '(((((//ul)[1]/li//ul)[1]/li//ul)[1]/li//ul)[1]/li//ul)[1]/li', output, 1 end if ::RUBY_MIN_VERSION_1_9 test "nested ordered elements (2)" do input = <<-EOS List ==== . Foo .. Boo . Blech EOS output = render_string input assert_xpath '//ol', output, 2 assert_xpath '//ol/li', output, 3 assert_xpath '(//ol)[1]/li', output, 2 assert_xpath '(//ol)[1]/li//ol/li', output, 1 end test "nested ordered elements (3)" do input = <<-EOS List ==== . Foo .. Boo ... Snoo . Blech EOS output = render_string input assert_xpath '//ol', output, 3 assert_xpath '(//ol)[1]/li', output, 2 assert_xpath '((//ol)[1]/li//ol)[1]/li', output, 1 assert_xpath '(((//ol)[1]/li//ol)[1]/li//ol)[1]/li', output, 1 end test "nested unordered inside ordered elements" do input = <<-EOS List ==== . Foo * Boo . Blech EOS output = render_string input assert_xpath '//ol', output, 1 assert_xpath '//ul', output, 1 assert_xpath '(//ol)[1]/li', output, 2 assert_xpath '((//ol)[1]/li//ul)[1]/li', output, 1 end test "nested ordered inside unordered elements" do input = <<-EOS List ==== * Foo . Boo * Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ol', output, 1 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li//ol)[1]/li', output, 1 end test 'three levels of alternating unordered and ordered elements' do input = <<-EOS == Lists * bullet 1 . numbered 1.1 ** bullet 1.1.1 * bullet 2 EOS output = render_embedded_string input assert_css '.ulist', output, 2 assert_css '.olist', output, 1 assert_css '.ulist > ul > li > p', output, 3 assert_css '.ulist > ul > li > p + .olist', output, 1 assert_css '.ulist > ul > li > p + .olist > ol > li > p', output, 1 assert_css '.ulist > ul > li > p + .olist > ol > li > p + .ulist', output, 1 assert_css '.ulist > ul > li > p + .olist > ol > li > p + .ulist > ul > li > p', output, 1 assert_css '.ulist > ul > li + li > p', output, 1 end test "lines with alternating markers of unordered and ordered list types separated by blank lines should be nested" do input = <<-EOS List ==== * Foo . Boo * Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ol', output, 1 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li//ol)[1]/li', output, 1 end test 'list item with literal content should not consume nested list of different type' do input = <<-EOS List ==== - bullet literal but not hungry . numbered EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//li', output, 2 assert_xpath '//ul//ol', output, 1 assert_xpath '//ul/li/p', output, 1 assert_xpath '//ul/li/p[text()="bullet"]', output, 1 assert_xpath '//ul/li/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath %(//ul/li/p/following-sibling::*[@class="literalblock"]//pre[text()="literal\nbut not\nhungry"]), output, 1 assert_xpath '//*[@class="literalblock"]/following-sibling::*[@class="olist arabic"]', output, 1 assert_xpath '//*[@class="literalblock"]/following-sibling::*[@class="olist arabic"]//p[text()="numbered"]', output, 1 end test 'nested list item does not eat the title of the following detached block' do input = <<-EOS List ==== - bullet * nested bullet 1 * nested bullet 2 .Title .... literal .... EOS # use render_string so we can match all ulists easier output = render_string input assert_xpath '//*[@class="ulist"]/ul', output, 2 assert_xpath '(//*[@class="ulist"])[1]/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '(//*[@class="ulist"])[1]/following-sibling::*[@class="literalblock"]/*[@class="title"]', output, 1 end test "lines with alternating markers of bulleted and labeled list types separated by blank lines should be nested" do input = <<-EOS List ==== * Foo term1:: def1 * Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//dl', output, 1 assert_xpath '//ul[1]/li', output, 2 assert_xpath '//ul[1]/li//dl[1]/dt', output, 1 assert_xpath '//ul[1]/li//dl[1]/dd', output, 1 end test "nested ordered with attribute inside unordered elements" do input = <<-EOS Blah ==== * Foo [start=2] . Boo * Blech EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ol', output, 1 assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li//ol)[1][@start = 2]/li', output, 1 end end context "List continuations" do test "adjacent list continuation line attaches following paragraph" do input = <<-EOS Lists ===== * Item one, paragraph one + Item one, paragraph two + * Item two EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/p', output, 1 assert_xpath '//ul/li[1]//p', output, 2 assert_xpath '//ul/li[1]/p[text() = "Item one, paragraph one"]', output, 1 assert_xpath '//ul/li[1]/*[@class = "paragraph"]/p[text() = "Item one, paragraph two"]', output, 1 end test "adjacent list continuation line attaches following block" do input = <<-EOS Lists ===== * Item one, paragraph one + .... Item one, literal block .... + * Item two EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/p', output, 1 assert_xpath '(//ul/li[1]/p/following-sibling::*)[1][@class = "literalblock"]', output, 1 end test 'adjacent list continuation line attaches following block with block attributes' do input = <<-EOS Lists ===== * Item one, paragraph one + :foo: bar [[beck]] .Read the following aloud to yourself [source, ruby] ---- 5.times { print "Odelay!" } ---- * Item two EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/p', output, 1 assert_xpath '(//ul/li[1]/p/following-sibling::*)[1][@id="beck"][@class = "listingblock"]', output, 1 assert_xpath '(//ul/li[1]/p/following-sibling::*)[1][@id="beck"]/div[@class="title"][starts-with(text(),"Read")]', output, 1 assert_xpath '(//ul/li[1]/p/following-sibling::*)[1][@id="beck"]//code[@data-lang="ruby"][starts-with(text(),"5.times")]', output, 1 end test 'trailing block attribute line attached by continuation should not create block' do input = <<-EOS Lists ===== * Item one, paragraph one + [source] * Item two EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/*', output, 1 assert_xpath '//ul/li//*[@class="listingblock"]', output, 0 end test 'trailing block title line attached by continuation should not create block' do input = <<-EOS Lists ===== * Item one, paragraph one + .Disappears into the ether * Item two EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/*', output, 1 end test 'consecutive blocks in list continuation attach to list item' do input = <<-EOS Lists ===== * Item one, paragraph one + .... Item one, literal block .... + ____ Item one, quote block ____ + * Item two EOS output = render_embedded_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/p', output, 1 assert_xpath '(//ul/li[1]/p/following-sibling::*)[1][@class = "literalblock"]', output, 1 assert_xpath '(//ul/li[1]/p/following-sibling::*)[2][@class = "quoteblock"]', output, 1 end test 'list item with hanging indent followed by block attached by list continuation' do input = <<-EOS == Lists . list item 1 continued + -- open block in list item 1 -- . list item 2 EOS output = render_embedded_string input assert_css 'ol', output, 1 assert_css 'ol li', output, 2 assert_xpath %((//ol/li)[1]/p[text()="list item 1\ncontinued"]), output, 1 assert_xpath '(//ol/li)[1]/p/following-sibling::*[@class="openblock"]', output, 1 assert_xpath '(//ol/li)[1]/p/following-sibling::*[@class="openblock"]//p[text()="open block in list item 1"]', output, 1 assert_xpath %((//ol/li)[2]/p[text()="list item 2"]), output, 1 end test 'list item paragraph in list item and nested list item' do input = <<-EOS == Lists . list item 1 + list item 1 paragraph * nested list item + nested list item paragraph . list item 2 EOS output = render_embedded_string input assert_css '.olist ol', output, 1 assert_css '.olist ol > li', output, 2 assert_css '.ulist ul', output, 1 assert_css '.ulist ul > li', output, 1 assert_xpath '(//ol/li)[1]/*', output, 3 assert_xpath '((//ol/li)[1]/*)[1]/self::p', output, 1 assert_xpath '((//ol/li)[1]/*)[1]/self::p[text()="list item 1"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="paragraph"]', output, 1 assert_xpath '((//ol/li)[1]/*)[3]/self::div[@class="ulist"]', output, 1 assert_xpath '((//ol/li)[1]/*)[3]/self::div[@class="ulist"]/ul/li', output, 1 assert_xpath '((//ol/li)[1]/*)[3]/self::div[@class="ulist"]/ul/li/p[text()="nested list item"]', output, 1 assert_xpath '((//ol/li)[1]/*)[3]/self::div[@class="ulist"]/ul/li/p/following-sibling::div[@class="paragraph"]', output, 1 end test 'trailing list continuations should attach to list items at respective levels' do input = <<-EOS == Lists . list item 1 + * nested list item 1 * nested list item 2 + paragraph for nested list item 2 + paragraph for list item 1 . list item 2 EOS output = render_embedded_string input assert_css '.olist ol', output, 1 assert_css '.olist ol > li', output, 2 assert_css '.ulist ul', output, 1 assert_css '.ulist ul > li', output, 2 assert_css '.olist .ulist', output, 1 assert_xpath '(//ol/li)[1]/*', output, 3 assert_xpath '((//ol/li)[1]/*)[1]/self::p', output, 1 assert_xpath '((//ol/li)[1]/*)[1]/self::p[text()="list item 1"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="ulist"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/*', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/p', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/div[@class="paragraph"]', output, 1 assert_xpath '((//ol/li)[1]/*)[3]/self::div[@class="paragraph"]', output, 1 end test 'trailing list continuations should attach to list items of different types at respective levels' do input = <<-EOS == Lists * bullet 1 . numbered 1.1 ** bullet 1.1.1 + numbered 1.1 paragraph + bullet 1 paragraph * bullet 2 EOS output = render_embedded_string input assert_xpath '(//ul)[1]/li', output, 2 assert_xpath '((//ul)[1]/li[1])/*', output, 3 assert_xpath '(((//ul)[1]/li[1])/*)[1]/self::p[text()="bullet 1"]', output, 1 assert_xpath '(((//ul)[1]/li[1])/*)[2]/ol', output, 1 assert_xpath '(((//ul)[1]/li[1])/*)[3]/self::div[@class="paragraph"]/p[text()="bullet 1 paragraph"]', output, 1 assert_xpath '((//ul)[1]/li)[1]/div/ol/li', output, 1 assert_xpath '((//ul)[1]/li)[1]/div/ol/li/*', output, 3 assert_xpath '(((//ul)[1]/li)[1]/div/ol/li/*)[1]/self::p[text()="numbered 1.1"]', output, 1 assert_xpath '(((//ul)[1]/li)[1]/div/ol/li/*)[2]/self::div[@class="ulist"]', output, 1 assert_xpath '(((//ul)[1]/li)[1]/div/ol/li/*)[3]/self::div[@class="paragraph"]/p[text()="numbered 1.1 paragraph"]', output, 1 assert_xpath '((//ul)[1]/li)[1]/div/ol/li/div[@class="ulist"]/ul/li', output, 1 assert_xpath '((//ul)[1]/li)[1]/div/ol/li/div[@class="ulist"]/ul/li/*', output, 1 assert_xpath '((//ul)[1]/li)[1]/div/ol/li/div[@class="ulist"]/ul/li/p[text()="bullet 1.1.1"]', output, 1 end test 'repeated list continuations should attach to list items at respective levels' do input = <<-EOS == Lists . list item 1 * nested list item 1 + -- open block for nested list item 1 -- + * nested list item 2 + paragraph for nested list item 2 + paragraph for list item 1 . list item 2 EOS output = render_embedded_string input assert_css '.olist ol', output, 1 assert_css '.olist ol > li', output, 2 assert_css '.ulist ul', output, 1 assert_css '.ulist ul > li', output, 2 assert_css '.olist .ulist', output, 1 assert_xpath '(//ol/li)[1]/*', output, 3 assert_xpath '((//ol/li)[1]/*)[1]/self::p', output, 1 assert_xpath '((//ol/li)[1]/*)[1]/self::p[text()="list item 1"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="ulist"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/*', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/p', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/div[@class="openblock"]', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/*', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/p', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/div[@class="paragraph"]', output, 1 assert_xpath '((//ol/li)[1]/*)[3]/self::div[@class="paragraph"]', output, 1 end test 'repeated list continuations attached directly to list item should attach to list items at respective levels' do input = <<-EOS == Lists . list item 1 + * nested list item 1 + -- open block for nested list item 1 -- + * nested list item 2 + paragraph for nested list item 2 + paragraph for list item 1 . list item 2 EOS output = render_embedded_string input assert_css '.olist ol', output, 1 assert_css '.olist ol > li', output, 2 assert_css '.ulist ul', output, 1 assert_css '.ulist ul > li', output, 2 assert_css '.olist .ulist', output, 1 assert_xpath '(//ol/li)[1]/*', output, 3 assert_xpath '((//ol/li)[1]/*)[1]/self::p', output, 1 assert_xpath '((//ol/li)[1]/*)[1]/self::p[text()="list item 1"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="ulist"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/*', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/p', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/div[@class="openblock"]', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/*', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/p', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/div[@class="paragraph"]', output, 1 assert_xpath '((//ol/li)[1]/*)[3]/self::div[@class="paragraph"]', output, 1 end test 'repeated list continuations should attach to list items at respective levels ignoring blank lines' do input = <<-EOS == Lists . list item 1 + * nested list item 1 + -- open block for nested list item 1 -- + * nested list item 2 + paragraph for nested list item 2 + paragraph for list item 1 . list item 2 EOS output = render_embedded_string input assert_css '.olist ol', output, 1 assert_css '.olist ol > li', output, 2 assert_css '.ulist ul', output, 1 assert_css '.ulist ul > li', output, 2 assert_css '.olist .ulist', output, 1 assert_xpath '(//ol/li)[1]/*', output, 3 assert_xpath '((//ol/li)[1]/*)[1]/self::p', output, 1 assert_xpath '((//ol/li)[1]/*)[1]/self::p[text()="list item 1"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="ulist"]', output, 1 assert_xpath '((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/*', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/p', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[1]/div[@class="openblock"]', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/*', output, 2 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/p', output, 1 assert_xpath '(((//ol/li)[1]/*)[2]/self::div[@class="ulist"]/ul/li)[2]/div[@class="paragraph"]', output, 1 assert_xpath '((//ol/li)[1]/*)[3]/self::div[@class="paragraph"]', output, 1 end test 'trailing list continuations should ignore preceding blank lines' do input = <<-EOS == Lists * bullet 1 ** bullet 1.1 *** bullet 1.1.1 + -- open block -- + bullet 1.1 paragraph + bullet 1 paragraph * bullet 2 EOS output = render_embedded_string input assert_xpath '((//ul)[1]/li[1])/*', output, 3 assert_xpath '(((//ul)[1]/li[1])/*)[1]/self::p[text()="bullet 1"]', output, 1 assert_xpath '(((//ul)[1]/li[1])/*)[2]/self::div[@class="ulist"]', output, 1 assert_xpath '(((//ul)[1]/li[1])/*)[3]/self::div[@class="paragraph"]/p[text()="bullet 1 paragraph"]', output, 1 assert_xpath '((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li', output, 1 assert_xpath '((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li/*', output, 3 assert_xpath '(((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li/*)[1]/self::p[text()="bullet 1.1"]', output, 1 assert_xpath '(((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li/*)[2]/self::div[@class="ulist"]', output, 1 assert_xpath '(((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li/*)[3]/self::div[@class="paragraph"]/p[text()="bullet 1.1 paragraph"]', output, 1 assert_xpath '((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li/div[@class="ulist"]/ul/li', output, 1 assert_xpath '((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li/div[@class="ulist"]/ul/li/*', output, 2 assert_xpath '(((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li/div[@class="ulist"]/ul/li/*)[1]/self::p', output, 1 assert_xpath '(((//ul)[1]/li)[1]/div[@class="ulist"]/ul/li/div[@class="ulist"]/ul/li/*)[2]/self::div[@class="openblock"]', output, 1 end test 'indented outline list item with different marker offset by a blank line should be recognized as a nested list' do input = <<-EOS * item 1 . item 1.1 + attached paragraph . item 1.2 + attached paragraph * item 2 EOS output = render_embedded_string input assert_css 'ul', output, 1 assert_css 'ol', output, 1 assert_css 'ul ol', output, 1 assert_css 'ul > li', output, 2 assert_xpath '((//ul/li)[1]/*)', output, 2 assert_xpath '((//ul/li)[1]/*)[1]/self::p', output, 1 assert_xpath '((//ul/li)[1]/*)[2]/self::div/ol', output, 1 assert_xpath '((//ul/li)[1]/*)[2]/self::div/ol/li', output, 2 (1..2).each do |idx| assert_xpath "(((//ul/li)[1]/*)[2]/self::div/ol/li)[#{idx}]/*", output, 2 assert_xpath "((((//ul/li)[1]/*)[2]/self::div/ol/li)[#{idx}]/*)[1]/self::p", output, 1 assert_xpath "((((//ul/li)[1]/*)[2]/self::div/ol/li)[#{idx}]/*)[2]/self::div[@class=\"paragraph\"]", output, 1 end end test 'indented labeled list item inside outline list item offset by a blank line should be recognized as a nested list' do input = <<-EOS * item 1 term a:: description a + attached paragraph term b:: description b + attached paragraph * item 2 EOS output = render_embedded_string input assert_css 'ul', output, 1 assert_css 'dl', output, 1 assert_css 'ul dl', output, 1 assert_css 'ul > li', output, 2 assert_xpath '((//ul/li)[1]/*)', output, 2 assert_xpath '((//ul/li)[1]/*)[1]/self::p', output, 1 assert_xpath '((//ul/li)[1]/*)[2]/self::div/dl', output, 1 assert_xpath '((//ul/li)[1]/*)[2]/self::div/dl/dt', output, 2 assert_xpath '((//ul/li)[1]/*)[2]/self::div/dl/dd', output, 2 (1..2).each do |idx| assert_xpath "(((//ul/li)[1]/*)[2]/self::div/dl/dd)[#{idx}]/*", output, 2 assert_xpath "((((//ul/li)[1]/*)[2]/self::div/dl/dd)[#{idx}]/*)[1]/self::p", output, 1 assert_xpath "((((//ul/li)[1]/*)[2]/self::div/dl/dd)[#{idx}]/*)[2]/self::div[@class=\"paragraph\"]", output, 1 end end # NOTE this is not consistent w/ AsciiDoc output, but this is some screwy input anyway =begin test "consecutive list continuation lines are folded" do input = <<-EOS Lists ===== * Item one, paragraph one + + Item one, paragraph two + + * Item two + + EOS output = render_string input assert_xpath '//ul', output, 1 assert_xpath '//ul/li', output, 2 assert_xpath '//ul/li[1]/p', output, 1 assert_xpath '//ul/li[1]//p', output, 2 assert_xpath '//ul/li[1]//p[text() = "Item one, paragraph one"]', output, 1 assert_xpath '//ul/li[1]//p[text() = "Item one, paragraph two"]', output, 1 end =end end end context "Ordered lists (:olist)" do context "Simple lists" do test "dot elements with no blank lines" do input = <<-EOS List ==== . Foo . Boo . Blech EOS output = render_string input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 3 end test 'indented dot elements using spaces' do input = <<-EOS . Foo . Boo . Blech EOS output = render_string input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 3 end test 'indented dot elements using tabs' do input = <<-EOS \t.\tFoo \t.\tBoo \t.\tBlech EOS output = render_string input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 3 end test 'should represent explicit role attribute as style class' do input = <<-EOS [role="dry"] . Once . Again . Refactor! EOS output = render_embedded_string input assert_css '.olist.arabic.dry', output, 1 assert_css '.olist ol.arabic', output, 1 end test 'should represent custom numbering and explicit role attribute as style classes' do input = <<-EOS [loweralpha, role="dry"] . Once . Again . Refactor! EOS output = render_embedded_string input assert_css '.olist.loweralpha.dry', output, 1 assert_css '.olist ol.loweralpha', output, 1 end test 'should set reversed attribute on list if reversed option is set' do input = <<-EOS [%reversed, start=3] . three . two . one . blast off! EOS output = render_embedded_string input assert_css 'ol[reversed][start="3"]', output, 1 end test 'should represent implicit role attribute as style class' do input = <<-EOS [.dry] . Once . Again . Refactor! EOS output = render_embedded_string input assert_css '.olist.arabic.dry', output, 1 assert_css '.olist ol.arabic', output, 1 end test 'should represent custom numbering and implicit role attribute as style classes' do input = <<-EOS [loweralpha.dry] . Once . Again . Refactor! EOS output = render_embedded_string input assert_css '.olist.loweralpha.dry', output, 1 assert_css '.olist ol.loweralpha', output, 1 end test "dot elements separated by blank lines should merge lists" do input = <<-EOS List ==== . Foo . Boo . Blech EOS output = render_string input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 3 end test 'dot elements with interspersed line comments should be skipped and not break list' do input = <<-EOS == List . Foo // line comment // another line comment . Boo // line comment more text // another line comment . Blech EOS output = render_embedded_string input assert_xpath '//ol', output, 1 assert_xpath '//ol/li', output, 3 assert_xpath %((//ol/li)[2]/p[text()="Boo\nmore text"]), output, 1 end test "dot elements separated by line comment offset by blank lines should not merge lists" do input = <<-EOS List ==== . Foo . Boo // . Blech EOS output = render_string input assert_xpath '//ol', output, 2 assert_xpath '(//ol)[1]/li', output, 2 assert_xpath '(//ol)[2]/li', output, 1 end test "dot elements separated by a block title offset by a blank line should not merge lists" do input = <<-EOS List ==== . Foo . Boo .Also . Blech EOS output = render_string input assert_xpath '//ol', output, 2 assert_xpath '(//ol)[1]/li', output, 2 assert_xpath '(//ol)[2]/li', output, 1 assert_xpath '(//ol)[2]/preceding-sibling::*[@class = "title"][text() = "Also"]', output, 1 end test "dot elements separated by an attribute entry offset by a blank line should not merge lists" do input = <<-EOS == List . Foo . Boo :foo: bar . Blech EOS output = render_embedded_string input assert_xpath '//ol', output, 2 assert_xpath '(//ol)[1]/li', output, 2 assert_xpath '(//ol)[2]/li', output, 1 end test 'should use start number in docbook4.5 backend' do input = <<-EOS == List [start=7] . item 7 . item 8 EOS output = render_embedded_string input, :backend => 'docbook45' assert_xpath '//orderedlist', output, 1 assert_xpath '(//orderedlist)/listitem', output, 2 assert_xpath '(//orderedlist/listitem)[1][@override = "7"]', output, 1 end test 'should use start number in docbook5 backend' do input = <<-EOS == List [start=7] . item 7 . item 8 EOS output = render_embedded_string input, :backend => 'docbook5' assert_xpath '//orderedlist', output, 1 assert_xpath '(//orderedlist)/listitem', output, 2 assert_xpath '(//orderedlist)[@startingnumber = "7"]', output, 1 end end end context "Description lists (:dlist)" do context "Simple lists" do test "single-line adjacent elements" do input = <<-EOS term1:: def1 term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "single-line indented adjacent elements" do input = <<-EOS term1:: def1 term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "single-line indented adjacent elements with tabs" do input = <<-EOS term1::\tdef1 \tterm2::\tdef2 EOS output = render_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "single-line elements separated by blank line should create a single list" do input = <<-EOS term1:: def1 term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 end test "a line comment between elements should divide them into separate lists" do input = <<-EOS term1:: def1 // term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 2 assert_xpath '//dl/dt', output, 2 assert_xpath '(//dl)[1]/dt', output, 1 assert_xpath '(//dl)[2]/dt', output, 1 end test "a ruler between elements should divide them into separate lists" do input = <<-EOS term1:: def1 ''' term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 2 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl//hr', output, 0 assert_xpath '(//dl)[1]/dt', output, 1 assert_xpath '(//dl)[2]/dt', output, 1 end test "a block title between elements should divide them into separate lists" do input = <<-EOS term1:: def1 .Some more term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 2 assert_xpath '//dl/dt', output, 2 assert_xpath '(//dl)[1]/dt', output, 1 assert_xpath '(//dl)[2]/dt', output, 1 assert_xpath '(//dl)[2]/preceding-sibling::*[@class="title"][text() = "Some more"]', output, 1 end test "multi-line elements with paragraph content" do input = <<-EOS term1:: def1 term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "multi-line elements with indented paragraph content" do input = <<-EOS term1:: def1 term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "multi-line elements with indented paragraph content that includes comment lines" do input = <<-EOS term1:: def1 // comment term2:: def2 // comment def2 continued EOS output = render_embedded_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath %((//dl/dt)[2]/following-sibling::dd/p[text() = "def2\ndef2 continued"]), output, 1 end test "should not strip comment line in literal paragraph block attached to list item" do input = <<-EOS term1:: + line 1 // not a comment line 3 EOS output = render_embedded_string input assert_xpath '//*[@class="literalblock"]', output, 1 assert_xpath %(//*[@class="literalblock"]//pre[text()=" line 1\n// not a comment\n line 3"]), output, 1 end test 'multi-line element with paragraph starting with multiple dashes should not be seen as list' do input = <<-EOS term1:: def1 -- and a note term2:: def2 EOS output = render_embedded_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath %((//dl/dt)[1]/following-sibling::dd/p[text() = "def1#{entity 8201}#{entity 8212}#{entity 8201}and a note"]), output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "multi-line element with multiple terms" do input = <<-EOS term1:: term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dd', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dt', output, 1 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test 'consecutive terms share same varlistentry in docbook' do input = <<-EOS term:: alt term:: description last:: EOS output = render_embedded_string input, :backend => 'docbook' assert_xpath '//varlistentry', output, 2 assert_xpath '(//varlistentry)[1]/term', output, 2 assert_xpath '(//varlistentry)[2]/term', output, 1 assert_xpath '(//varlistentry)[2]/listitem', output, 1 assert_xpath '(//varlistentry)[2]/listitem[normalize-space(text())=""]', output, 1 end test "multi-line elements with blank line before paragraph content" do input = <<-EOS term1:: def1 term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "multi-line elements with paragraph and literal content" do # blank line following literal paragraph is required or else it will gobble up the second term input = <<-EOS term1:: def1 literal term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '//dl/dt/following-sibling::dd//pre', output, 1 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "mixed single and multi-line adjacent elements" do input = <<-EOS term1:: def1 term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dt/following-sibling::dd', output, 2 assert_xpath '(//dl/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "element with anchor" do input = <<-EOS [[term1]]term1:: def1 [[term2]]term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dt', output, 2 assert_xpath '(//dl/dt)[1]/a[@id = "term1"]', output, 1 assert_xpath '(//dl/dt)[2]/a[@id = "term2"]', output, 1 end test "missing space before term does not produce labeled list" do input = <<-EOS term1::def1 term2::def2 EOS output = render_string input assert_xpath '//dl', output, 0 end test "literal block inside labeled list" do input = <<-EOS term:: + .... literal, line 1 literal, line 2 .... anotherterm:: def EOS output = render_string input assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dd', output, 2 assert_xpath '//dl/dd//pre', output, 1 assert_xpath '(//dl/dd)[1]/*[@class="literalblock"]//pre', output, 1 assert_xpath '(//dl/dd)[2]/p[text() = "def"]', output, 1 end test "literal block inside labeled list with trailing line continuation" do input = <<-EOS term:: + .... literal, line 1 literal, line 2 .... + anotherterm:: def EOS output = render_string input assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dd', output, 2 assert_xpath '//dl/dd//pre', output, 1 assert_xpath '(//dl/dd)[1]/*[@class="literalblock"]//pre', output, 1 assert_xpath '(//dl/dd)[2]/p[text() = "def"]', output, 1 end test "multiple listing blocks inside labeled list" do input = <<-EOS term:: + ---- listing, line 1 listing, line 2 ---- + ---- listing, line 1 listing, line 2 ---- anotherterm:: def EOS output = render_string input assert_xpath '//dl/dt', output, 2 assert_xpath '//dl/dd', output, 2 assert_xpath '//dl/dd//pre', output, 2 assert_xpath '(//dl/dd)[1]/*[@class="listingblock"]//pre', output, 2 assert_xpath '(//dl/dd)[2]/p[text() = "def"]', output, 1 end test "open block inside labeled list" do input = <<-EOS term:: + -- Open block as description of term. And some more detail... -- anotherterm:: def EOS output = render_string input assert_xpath '//dl/dd//p', output, 3 assert_xpath '(//dl/dd)[1]//*[@class="openblock"]//p', output, 2 end test "paragraph attached by a list continuation on either side in a labeled list" do input = <<-EOS term1:: def1 + more detail + term2:: def2 EOS output = render_string input assert_xpath '(//dl/dt)[1][normalize-space(text())="term1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text())="term2"]', output, 1 assert_xpath '(//dl/dd)[1]//p', output, 2 assert_xpath '((//dl/dd)[1]//p)[1][text()="def1"]', output, 1 assert_xpath '(//dl/dd)[1]/p/following-sibling::*[@class="paragraph"]/p[text() = "more detail"]', output, 1 end test "paragraph attached by a list continuation on either side to a multi-line element in a labeled list" do input = <<-EOS term1:: def1 + more detail + term2:: def2 EOS output = render_string input assert_xpath '(//dl/dt)[1][normalize-space(text())="term1"]', output, 1 assert_xpath '(//dl/dt)[2][normalize-space(text())="term2"]', output, 1 assert_xpath '(//dl/dd)[1]//p', output, 2 assert_xpath '((//dl/dd)[1]//p)[1][text()="def1"]', output, 1 assert_xpath '(//dl/dd)[1]/p/following-sibling::*[@class="paragraph"]/p[text() = "more detail"]', output, 1 end test "verse paragraph inside a labeled list" do input = <<-EOS term1:: def + [verse] la la la term2:: def EOS output = render_string input assert_xpath '//dl/dd//p', output, 2 assert_xpath '(//dl/dd)[1]/*[@class="verseblock"]/pre[text() = "la la la"]', output, 1 end test "list inside a labeled list" do input = <<-EOS term1:: * level 1 ** level 2 * level 1 term2:: def EOS output = render_string input assert_xpath '//dl/dd', output, 2 assert_xpath '//dl/dd/p', output, 1 assert_xpath '(//dl/dd)[1]//ul', output, 2 assert_xpath '((//dl/dd)[1]//ul)[1]//ul', output, 1 end test "list inside a labeled list offset by blank lines" do input = <<-EOS term1:: * level 1 ** level 2 * level 1 term2:: def EOS output = render_string input assert_xpath '//dl/dd', output, 2 assert_xpath '//dl/dd/p', output, 1 assert_xpath '(//dl/dd)[1]//ul', output, 2 assert_xpath '((//dl/dd)[1]//ul)[1]//ul', output, 1 end test "should only grab one line following last item if item has no inline description" do input = <<-EOS term1:: def1 term2:: def2 A new paragraph Another new paragraph EOS output = render_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dd', output, 2 assert_xpath '(//dl/dd)[1]/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dd)[2]/p[text() = "def2"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]', output, 2 assert_xpath '(//*[@class="dlist"]/following-sibling::*[@class="paragraph"])[1]/p[text() = "A new paragraph"]', output, 1 assert_xpath '(//*[@class="dlist"]/following-sibling::*[@class="paragraph"])[2]/p[text() = "Another new paragraph"]', output, 1 end test "should only grab one literal line following last item if item has no inline description" do input = <<-EOS term1:: def1 term2:: def2 A new paragraph Another new paragraph EOS output = render_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dd', output, 2 assert_xpath '(//dl/dd)[1]/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dd)[2]/p[text() = "def2"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]', output, 2 assert_xpath '(//*[@class="dlist"]/following-sibling::*[@class="paragraph"])[1]/p[text() = "A new paragraph"]', output, 1 assert_xpath '(//*[@class="dlist"]/following-sibling::*[@class="paragraph"])[2]/p[text() = "Another new paragraph"]', output, 1 end test "should append subsequent paragraph literals to list item as block content" do input = <<-EOS term1:: def1 term2:: def2 literal A new paragraph. EOS output = render_string input assert_xpath '//dl', output, 1 assert_xpath '//dl/dd', output, 2 assert_xpath '(//dl/dd)[1]/p[text() = "def1"]', output, 1 assert_xpath '(//dl/dd)[2]/p[text() = "def2"]', output, 1 assert_xpath '(//dl/dd)[2]/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '(//dl/dd)[2]/p/following-sibling::*[@class="literalblock"]//pre[text() = "literal"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '(//*[@class="dlist"]/following-sibling::*[@class="paragraph"])[1]/p[text() = "A new paragraph."]', output, 1 end test 'should not match comment line that looks like labeled list term' do input = <<-EOS * item //:: == Section section text EOS output = render_embedded_string input assert_xpath '/*[@class="ulist"]', output, 1 assert_xpath '/*[@class="sect1"]', output, 1 assert_xpath '/*[@class="sect1"]/h2[text()="Section"]', output, 1 assert_xpath '/*[@class="ulist"]/following-sibling::*[@class="sect1"]', output, 1 end test 'more than 4 consecutive colons should become part of description list term' do input = <<-EOS A term::::: a description EOS output = render_embedded_string input assert_xpath '//dl', output, 1 assert_xpath '//dt', output, 1 assert_xpath '//dt[text()="A term:"]', output, 1 assert_xpath '//dd/p[text()="a description"]', output, 1 end end context "Nested lists" do test "single-line adjacent nested elements" do input = <<-EOS term1:: def1 label1::: detail1 term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '//dl//dl/dt[normalize-space(text()) = "label1"]', output, 1 assert_xpath '//dl//dl/dt/following-sibling::dd/p[text() = "detail1"]', output, 1 assert_xpath '(//dl)[1]/dt[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl)[1]/dt[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "single-line adjacent maximum nested elements" do input = <<-EOS term1:: def1 label1::: detail1 name1:::: value1 item1;; price1 term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 4 assert_xpath '//dl//dl//dl//dl', output, 1 end test "single-line nested elements seperated by blank line at top level" do input = <<-EOS term1:: def1 label1::: detail1 term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '//dl//dl/dt[normalize-space(text()) = "label1"]', output, 1 assert_xpath '//dl//dl/dt/following-sibling::dd/p[text() = "detail1"]', output, 1 assert_xpath '(//dl)[1]/dt[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl)[1]/dt[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "single-line nested elements seperated by blank line at nested level" do input = <<-EOS term1:: def1 label1::: detail1 label2::: detail2 term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '//dl//dl/dt[normalize-space(text()) = "label1"]', output, 1 assert_xpath '//dl//dl/dt/following-sibling::dd/p[text() = "detail1"]', output, 1 assert_xpath '(//dl)[1]/dt[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl)[1]/dt[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "single-line adjacent nested elements with alternate delimiters" do input = <<-EOS term1:: def1 label1;; detail1 term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '//dl//dl/dt[normalize-space(text()) = "label1"]', output, 1 assert_xpath '//dl//dl/dt/following-sibling::dd/p[text() = "detail1"]', output, 1 assert_xpath '(//dl)[1]/dt[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl)[1]/dt[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "multi-line adjacent nested elements" do input = <<-EOS term1:: def1 label1::: detail1 term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '//dl//dl/dt[normalize-space(text()) = "label1"]', output, 1 assert_xpath '//dl//dl/dt/following-sibling::dd/p[text() = "detail1"]', output, 1 assert_xpath '(//dl)[1]/dt[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl)[1]/dt[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "multi-line nested elements seperated by blank line at nested level repeated" do input = <<-EOS term1:: def1 label1::: detail1 label2::: detail2 term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '(//dl//dl/dt)[1][normalize-space(text()) = "label1"]', output, 1 assert_xpath '(//dl//dl/dt)[1]/following-sibling::dd/p[text() = "detail1"]', output, 1 assert_xpath '(//dl//dl/dt)[2][normalize-space(text()) = "label2"]', output, 1 assert_xpath '(//dl//dl/dt)[2]/following-sibling::dd/p[text() = "detail2"]', output, 1 end test "multi-line element with indented nested element" do input = <<-EOS term1:: def1 label1;; detail1 term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt', output, 2 assert_xpath '(//dl)[1]/dd', output, 2 assert_xpath '((//dl)[1]/dt)[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '((//dl)[1]/dt)[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '//dl//dl/dt', output, 1 assert_xpath '//dl//dl/dt[normalize-space(text()) = "label1"]', output, 1 assert_xpath '//dl//dl/dt/following-sibling::dd/p[text() = "detail1"]', output, 1 assert_xpath '((//dl)[1]/dt)[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '((//dl)[1]/dt)[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "mixed single and multi-line elements with indented nested elements" do input = <<-EOS term1:: def1 label1::: detail1 term2:: def2 EOS output = render_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[text() = "def1"]', output, 1 assert_xpath '//dl//dl/dt[normalize-space(text()) = "label1"]', output, 1 assert_xpath '//dl//dl/dt/following-sibling::dd/p[text() = "detail1"]', output, 1 assert_xpath '(//dl)[1]/dt[2][normalize-space(text()) = "term2"]', output, 1 assert_xpath '(//dl)[1]/dt[2]/following-sibling::dd/p[text() = "def2"]', output, 1 end test "multi-line elements with first paragraph folded to text with adjacent nested element" do input = <<-EOS term1:: def1 continued label1::: detail1 EOS output = render_string input assert_xpath '//dl', output, 2 assert_xpath '//dl//dl', output, 1 assert_xpath '(//dl)[1]/dt[1][normalize-space(text()) = "term1"]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[starts-with(text(), "def1")]', output, 1 assert_xpath '(//dl)[1]/dt[1]/following-sibling::dd/p[contains(text(), "continued")]', output, 1 assert_xpath '//dl//dl/dt[normalize-space(text()) = "label1"]', output, 1 assert_xpath '//dl//dl/dt/following-sibling::dd/p[text() = "detail1"]', output, 1 end end context 'Special lists' do test 'should render glossary list with proper semantics' do input = <<-EOS [glossary] term 1:: def 1 term 2:: def 2 EOS output = render_embedded_string input assert_css '.dlist.glossary', output, 1 assert_css '.dlist dt:not([class])', output, 2 end test 'consecutive glossary terms should share same glossentry element in docbook' do input = <<-EOS [glossary] term:: alt term:: description last:: EOS output = render_embedded_string input, :backend => 'docbook' assert_xpath '/glossentry', output, 2 assert_xpath '(/glossentry)[1]/glossterm', output, 2 assert_xpath '(/glossentry)[2]/glossterm', output, 1 assert_xpath '(/glossentry)[2]/glossdef', output, 1 assert_xpath '(/glossentry)[2]/glossdef[normalize-space(text())=""]', output, 1 end test 'should render horizontal list with proper markup' do input = <<-EOS [horizontal] first term:: description + more detail second term:: description EOS output = render_embedded_string input assert_css '.hdlist', output, 1 assert_css '.hdlist table', output, 1 assert_css '.hdlist table colgroup', output, 0 assert_css '.hdlist table tr', output, 2 assert_xpath '/*[@class="hdlist"]/table/tr[1]/td', output, 2 assert_xpath '/*[@class="hdlist"]/table/tr[1]/td[@class="hdlist1"]', output, 1 assert_xpath '/*[@class="hdlist"]/table/tr[1]/td[@class="hdlist2"]', output, 1 assert_xpath '/*[@class="hdlist"]/table/tr[1]/td[@class="hdlist2"]/p', output, 1 assert_xpath '/*[@class="hdlist"]/table/tr[1]/td[@class="hdlist2"]/p/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '((//tr)[1]/td)[1][normalize-space(text())="first term"]', output, 1 assert_xpath '((//tr)[1]/td)[2]/p[normalize-space(text())="description"]', output, 1 assert_xpath '/*[@class="hdlist"]/table/tr[2]/td', output, 2 assert_xpath '((//tr)[2]/td)[1][normalize-space(text())="second term"]', output, 1 assert_xpath '((//tr)[2]/td)[2]/p[normalize-space(text())="description"]', output, 1 end test 'should set col widths of item and label if specified' do input = <<-EOS [horizontal] [labelwidth="25", itemwidth="75"] term:: def EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup', output, 1 assert_css 'table > colgroup > col', output, 2 assert_xpath '(//table/colgroup/col)[1][@style="width: 25%;"]', output, 1 assert_xpath '(//table/colgroup/col)[2][@style="width: 75%;"]', output, 1 end test 'should set col widths of item and label in docbook if specified' do input = <<-EOS [horizontal] [labelwidth="25", itemwidth="75"] term:: def EOS output = render_embedded_string input, :backend => 'docbook' assert_css 'informaltable', output, 1 assert_css 'informaltable > tgroup', output, 1 assert_css 'informaltable > tgroup > colspec', output, 2 assert_xpath '(/informaltable/tgroup/colspec)[1][@colwidth="25*"]', output, 1 assert_xpath '(/informaltable/tgroup/colspec)[2][@colwidth="75*"]', output, 1 end test 'should add strong class to label if strong option is set' do input = <<-EOS [horizontal, options="strong"] term:: def EOS output = render_embedded_string input assert_css '.hdlist', output, 1 assert_css '.hdlist td.hdlist1.strong', output, 1 end test 'consecutive terms in horizontal list should share same cell' do input = <<-EOS [horizontal] term:: alt term:: description last:: EOS output = render_embedded_string input assert_xpath '//tr', output, 2 assert_xpath '(//tr)[1]/td[@class="hdlist1"]', output, 1 # NOTE I'm trimming the trailing
    in Asciidoctor #assert_xpath '(//tr)[1]/td[@class="hdlist1"]/br', output, 2 assert_xpath '(//tr)[1]/td[@class="hdlist1"]/br', output, 1 assert_xpath '(//tr)[2]/td[@class="hdlist2"]', output, 1 end test 'consecutive terms in horizontal list should share same entry in docbook' do input = <<-EOS [horizontal] term:: alt term:: description last:: EOS output = render_embedded_string input, :backend => 'docbook' assert_xpath '//row', output, 2 assert_xpath '(//row)[1]/entry', output, 2 assert_xpath '((//row)[1]/entry)[1]/simpara', output, 2 assert_xpath '(//row)[2]/entry', output, 2 assert_xpath '((//row)[2]/entry)[2][normalize-space(text())=""]', output, 1 end test 'should render horizontal list in docbook with proper markup' do input = <<-EOS .Terms [horizontal] first term:: description + more detail second term:: description EOS output = render_embedded_string input, :backend => 'docbook' assert_xpath '/table', output, 1 assert_xpath '/table[@tabstyle="horizontal"]', output, 1 assert_xpath '/table[@tabstyle="horizontal"]/title[text()="Terms"]', output, 1 assert_xpath '/table//row', output, 2 assert_xpath '(/table//row)[1]/entry', output, 2 assert_xpath '(/table//row)[2]/entry', output, 2 assert_xpath '((/table//row)[1]/entry)[2]/simpara', output, 2 end test 'should render qanda list in HTML with proper semantics' do input = <<-EOS [qanda] Question 1:: Answer 1. Question 2:: Answer 2. + NOTE: A note about Answer 2. EOS output = render_embedded_string input assert_css '.qlist.qanda', output, 1 assert_css '.qanda > ol', output, 1 assert_css '.qanda > ol > li', output, 2 (1..2).each do |idx| assert_css ".qanda > ol > li:nth-child(#{idx}) > p", output, 2 assert_css ".qanda > ol > li:nth-child(#{idx}) > p:first-child > em", output, 1 assert_xpath "/*[@class = 'qlist qanda']/ol/li[#{idx}]/p[1]/em[normalize-space(text()) = 'Question #{idx}']", output, 1 assert_css ".qanda > ol > li:nth-child(#{idx}) > p:last-child > *", output, 0 assert_xpath "/*[@class = 'qlist qanda']/ol/li[#{idx}]/p[2][normalize-space(text()) = 'Answer #{idx}.']", output, 1 end assert_xpath "/*[@class = 'qlist qanda']/ol/li[2]/p[2]/following-sibling::div[@class='admonitionblock note']", output, 1 end test 'should render qanda list in DocBook with proper semantics' do input = <<-EOS [qanda] Question 1:: Answer 1. Question 2:: Answer 2. + NOTE: A note about Answer 2. EOS output = render_embedded_string input, :backend => 'docbook' assert_css 'qandaset', output, 1 assert_css 'qandaset > qandaentry', output, 2 (1..2).each do |idx| assert_css "qandaset > qandaentry:nth-child(#{idx}) > question", output, 1 assert_css "qandaset > qandaentry:nth-child(#{idx}) > question > simpara", output, 1 assert_xpath "/qandaset/qandaentry[#{idx}]/question/simpara[normalize-space(text()) = 'Question #{idx}']", output, 1 assert_css "qandaset > qandaentry:nth-child(#{idx}) > answer", output, 1 assert_css "qandaset > qandaentry:nth-child(#{idx}) > answer > simpara", output, 1 assert_xpath "/qandaset/qandaentry[#{idx}]/answer/simpara[normalize-space(text()) = 'Answer #{idx}.']", output, 1 end assert_xpath "/qandaset/qandaentry[2]/answer/simpara/following-sibling::note", output, 1 end test 'consecutive questions should share same question element in docbook' do input = <<-EOS [qanda] question:: follow-up question:: response last question:: EOS output = render_embedded_string input, :backend => 'docbook' assert_xpath '//qandaentry', output, 2 assert_xpath '(//qandaentry)[1]/question', output, 1 assert_xpath '(//qandaentry)[1]/question/simpara', output, 2 assert_xpath '(//qandaentry)[2]/question', output, 1 assert_xpath '(//qandaentry)[2]/answer', output, 1 assert_xpath '(//qandaentry)[2]/answer[normalize-space(text())=""]', output, 1 end test 'should render bibliography list with proper semantics' do input = <<-EOS [bibliography] - [[[taoup]]] Eric Steven Raymond. 'The Art of Unix Programming'. Addison-Wesley. ISBN 0-13-142901-9. - [[[walsh-muellner]]] Norman Walsh & Leonard Muellner. 'DocBook - The Definitive Guide'. O'Reilly & Associates. 1999. ISBN 1-56592-580-7. EOS output = render_embedded_string input assert_css '.ulist.bibliography', output, 1 assert_css '.ulist.bibliography ul', output, 1 assert_css '.ulist.bibliography ul li', output, 2 assert_css '.ulist.bibliography ul li p', output, 2 assert_css '.ulist.bibliography ul li:nth-child(1) p a#taoup', output, 1 assert_xpath '//a/*', output, 0 text = xmlnodes_at_xpath '(//a)[1]/following-sibling::text()', output, 1 assert text.text.start_with?('[taoup] ') end test 'should render bibliography list with proper semantics to DocBook' do input = <<-EOS [bibliography] - [[[taoup]]] Eric Steven Raymond. 'The Art of Unix Programming'. Addison-Wesley. ISBN 0-13-142901-9. - [[[walsh-muellner]]] Norman Walsh & Leonard Muellner. 'DocBook - The Definitive Guide'. O'Reilly & Associates. 1999. ISBN 1-56592-580-7. EOS output = render_embedded_string input, :backend => 'docbook' assert_css 'bibliodiv', output, 1 assert_css 'bibliodiv > bibliomixed', output, 2 assert_css 'bibliodiv > bibliomixed > bibliomisc', output, 2 assert_css 'bibliodiv > bibliomixed:nth-child(1) > bibliomisc > anchor', output, 1 assert_css 'bibliodiv > bibliomixed:nth-child(1) > bibliomisc > anchor[xreflabel="[taoup]"]', output, 1 assert_css 'bibliodiv > bibliomixed:nth-child(2) > bibliomisc > anchor', output, 1 assert_css 'bibliodiv > bibliomixed:nth-child(2) > bibliomisc > anchor[xreflabel="[walsh-muellner]"]', output, 1 end end end context 'Description lists redux' do context 'Label without text on same line' do test 'folds text from subsequent line' do input = <<-EOS == Lists term1:: def1 EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text from first line after blank lines' do input = <<-EOS == Lists term1:: def1 EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text from first line after blank line and immediately preceding next item' do input = <<-EOS == Lists term1:: def1 term2:: def2 EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 2 assert_xpath '(//*[@class="dlist"]//dd)[1]/p[text()="def1"]', output, 1 end test 'paragraph offset by blank lines does not break list if label does not have inline text' do input = <<-EOS == Lists term1:: def1 term2:: def2 EOS output = render_embedded_string input assert_css 'dl', output, 1 assert_css 'dl > dt', output, 2 assert_css 'dl > dd', output, 2 assert_xpath '(//dl/dd)[1]/p[text()="def1"]', output, 1 end test 'folds text from first line after comment line' do input = <<-EOS == Lists term1:: // comment def1 EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text from line following comment line offset by blank line' do input = <<-EOS == Lists term1:: // comment def1 EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text from subsequent indented line' do input = <<-EOS == Lists term1:: def1 EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text from indented line after blank line' do input = <<-EOS == Lists term1:: def1 EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 end test 'folds text that looks like ruler offset by blank line' do input = <<-EOS == Lists term1:: ''' EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="'''"]), output, 1 end test 'folds text that looks like ruler offset by blank line and line comment' do input = <<-EOS == Lists term1:: // comment ''' EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="'''"]), output, 1 end test 'folds text that looks like ruler and the line following it offset by blank line' do input = <<-EOS == Lists term1:: ''' continued EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[normalize-space(text())="''' continued"]), output, 1 end test 'folds text that looks like title offset by blank line' do input = <<-EOS == Lists term1:: .def1 EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()=".def1"]', output, 1 end test 'folds text that looks like title offset by blank line and line comment' do input = <<-EOS == Lists term1:: // comment .def1 EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()=".def1"]', output, 1 end test 'folds text that looks like admonition offset by blank line' do input = <<-EOS == Lists term1:: NOTE: def1 EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="NOTE: def1"]', output, 1 end test 'folds text that looks like section title offset by blank line' do input = <<-EOS == Lists term1:: == Another Section EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="== Another Section"]', output, 1 assert_xpath '//h2', output, 1 end test 'folds text of first literal line offset by blank line appends subsequent literals offset by blank line as blocks' do input = <<-EOS == Lists term1:: def1 literal literal EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="literalblock"]', output, 2 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="literalblock"]//pre[text()="literal"]', output, 2 end test 'folds text of subsequent line and appends following literal line offset by blank line as block if term has no inline description' do input = <<-EOS == Lists term1:: def1 literal term2:: def2 EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 2 assert_xpath '(//*[@class="dlist"]//dd)[1]/p[text()="def1"]', output, 1 assert_xpath '(//*[@class="dlist"]//dd)[1]/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '(//*[@class="dlist"]//dd)[1]/p/following-sibling::*[@class="literalblock"]//pre[text()="literal"]', output, 1 end test 'appends literal line attached by continuation as block if item has no inline description' do input = <<-EOS == Lists term1:: + literal EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]//pre[text()="literal"]', output, 1 end test 'appends literal line attached by continuation as block if item has no inline description followed by ruler' do input = <<-EOS == Lists term1:: + literal ''' EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]//pre[text()="literal"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::hr', output, 1 end test 'appends line attached by continuation as block if item has no inline description followed by ruler' do input = <<-EOS == Lists term1:: + para ''' EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]/p[text()="para"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::hr', output, 1 end test 'appends line attached by continuation as block if item has no inline description followed by block' do input = <<-EOS == Lists term1:: + para .... literal .... EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]/p[text()="para"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="literalblock"]//pre[text()="literal"]', output, 1 end test 'appends block attached by continuation but not subsequent block not attached by continuation' do input = <<-EOS == Lists term1:: + .... literal .... .... detached .... EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]//pre[text()="literal"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="literalblock"]//pre[text()="detached"]', output, 1 end test 'appends list if item has no inline description' do input = <<-EOS == Lists term1:: * one * two * three EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd//ul/li', output, 3 end test 'appends list to first term when followed immediately by second term' do input = <<-EOS == Lists term1:: * one * two * three term2:: def2 EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 2 assert_xpath '(//*[@class="dlist"]//dd)[1]/p', output, 0 assert_xpath '(//*[@class="dlist"]//dd)[1]//ul/li', output, 3 assert_xpath '(//*[@class="dlist"]//dd)[2]/p[text()="def2"]', output, 1 end test 'appends indented list to first term that is adjacent to second term' do input = <<-EOS == Lists label 1:: description 1 * one * two * three label 2:: description 2 paragraph EOS output = render_embedded_string input assert_css '.dlist > dl', output, 1 assert_css '.dlist dt', output, 2 assert_xpath '(//*[@class="dlist"]//dt)[1][normalize-space(text())="label 1"]', output, 1 assert_xpath '(//*[@class="dlist"]//dt)[2][normalize-space(text())="label 2"]', output, 1 assert_css '.dlist dd', output, 2 assert_xpath '(//*[@class="dlist"]//dd)[1]/p[text()="description 1"]', output, 1 assert_xpath '(//*[@class="dlist"]//dd)[2]/p[text()="description 2"]', output, 1 assert_xpath '(//*[@class="dlist"]//dd)[1]/p/following-sibling::*[@class="ulist"]', output, 1 assert_xpath '(//*[@class="dlist"]//dd)[1]/p/following-sibling::*[@class="ulist"]//li', output, 3 assert_css '.dlist + .paragraph', output, 1 end test 'appends indented list to first term that is attached by a continuation and adjacent to second term' do input = <<-EOS == Lists label 1:: description 1 + * one * two * three label 2:: description 2 paragraph EOS output = render_embedded_string input assert_css '.dlist > dl', output, 1 assert_css '.dlist dt', output, 2 assert_xpath '(//*[@class="dlist"]//dt)[1][normalize-space(text())="label 1"]', output, 1 assert_xpath '(//*[@class="dlist"]//dt)[2][normalize-space(text())="label 2"]', output, 1 assert_css '.dlist dd', output, 2 assert_xpath '(//*[@class="dlist"]//dd)[1]/p[text()="description 1"]', output, 1 assert_xpath '(//*[@class="dlist"]//dd)[2]/p[text()="description 2"]', output, 1 assert_xpath '(//*[@class="dlist"]//dd)[1]/p/following-sibling::*[@class="ulist"]', output, 1 assert_xpath '(//*[@class="dlist"]//dd)[1]/p/following-sibling::*[@class="ulist"]//li', output, 3 assert_css '.dlist + .paragraph', output, 1 end test 'appends list and paragraph block when line following list attached by continuation' do input = <<-EOS == Lists term1:: * one * two * three + para EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]/ul/li', output, 3 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 end test 'first continued line associated with nested list item and second continued line associated with term' do input = <<-EOS == Lists term1:: * one + nested list para + term1 para EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]/ul/li', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]/ul/li/*[@class="paragraph"]/p[text()="nested list para"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="ulist"]/following-sibling::*[@class="paragraph"]/p[text()="term1 para"]', output, 1 end test 'literal line attached by continuation swallows adjacent line that looks like term' do input = <<-EOS == Lists term1:: + literal notnestedterm::: + literal notnestedterm::: EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]', output, 2 assert_xpath %(//*[@class="dlist"]//dd/*[@class="literalblock"]//pre[text()=" literal\nnotnestedterm:::"]), output, 2 end test 'line attached by continuation is appended as paragraph if term has no inline description' do input = <<-EOS == Lists term1:: + para EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]/p[text()="para"]', output, 1 end test 'attached paragraph does not break on adjacent nested labeled list term' do input = <<-EOS term1:: def + more description not a term::: def EOS output = render_embedded_string input assert_css '.dlist > dl > dt', output, 1 assert_css '.dlist > dl > dd', output, 1 assert_css '.dlist > dl > dd > .paragraph', output, 1 assert output.include?('not a term::: def') end # FIXME pending =begin test 'attached paragraph does not break on adjacent sibling labeled list term' do input = <<-EOS term1:: def + more description not a term:: def EOS output = render_embedded_string input assert_css '.dlist > dl > dt', output, 1 assert_css '.dlist > dl > dd', output, 1 assert_css '.dlist > dl > dd > .paragraph', output, 1 assert output.include?('not a term:: def') end =end test 'attached styled paragraph does not break on adjacent nested labeled list term' do input = <<-EOS term1:: def + [quote] more description not a term::: def EOS output = render_embedded_string input assert_css '.dlist > dl > dt', output, 1 assert_css '.dlist > dl > dd', output, 1 assert_css '.dlist > dl > dd > .quoteblock', output, 1 assert output.include?('not a term::: def') end test 'appends line as paragraph if attached by continuation following blank line and line comment when term has no inline description' do input = <<-EOS == Lists term1:: // comment + para EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]/p[text()="para"]', output, 1 end test 'line attached by continuation offset by blank line is appended as paragraph if term has no inline description' do input = <<-EOS == Lists term1:: + para EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p', output, 0 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]/p[text()="para"]', output, 1 end test 'delimited block breaks list even when term has no inline description' do input = <<-EOS == Lists term1:: ==== detached ==== EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 0 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="exampleblock"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="exampleblock"]//p[text()="detached"]', output, 1 end test 'attribute line breaks list even when term has no inline description' do input = <<-EOS == Lists term1:: [verse] detached EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 0 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="verseblock"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="verseblock"]/pre[text()="detached"]', output, 1 end test 'id line breaks list even when term has no inline description' do input = <<-EOS == Lists term1:: [[id]] detached EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 0 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]/p[text()="detached"]', output, 1 end end context 'Item with text inline' do test 'folds text from inline description and subsequent line' do input = <<-EOS == Lists term1:: def1 continued EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="def1\ncontinued"]), output, 1 end test 'folds text from inline description and subsequent lines' do input = <<-EOS == Lists term1:: def1 continued continued EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="def1\ncontinued\ncontinued"]), output, 1 end test 'folds text from inline description and line following comment line' do input = <<-EOS == Lists term1:: def1 // comment continued EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="def1\ncontinued"]), output, 1 end test 'folds text from inline description and subsequent indented line' do input = <<-EOS == Lists term1:: def1 continued EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath %(//*[@class="dlist"]//dd/p[text()="def1\ncontinued"]), output, 1 end test 'appends literal line offset by blank line as block if item has inline description' do input = <<-EOS == Lists term1:: def1 literal EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="literalblock"]//pre[text()="literal"]', output, 1 end test 'appends literal line offset by blank line as block and appends line after continuation as block if item has inline description' do input = <<-EOS == Lists term1:: def1 literal + para EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="literalblock"]//pre[text()="literal"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="literalblock"]/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 end test 'appends line after continuation as block and literal line offset by blank line as block if item has inline description' do input = <<-EOS == Lists term1:: def1 + para literal EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/*[@class="paragraph"]/following-sibling::*[@class="literalblock"]//pre[text()="literal"]', output, 1 end test 'appends list if item has inline description' do input = <<-EOS == Lists term1:: def1 * one * two * three EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="ulist"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="ulist"]/ul/li', output, 3 end test 'appends literal line attached by continuation as block if item has inline description followed by ruler' do input = <<-EOS == Lists term1:: def1 + literal ''' EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="literalblock"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="literalblock"]//pre[text()="literal"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::hr', output, 1 end test 'line offset by blank line breaks list if term has inline description' do input = <<-EOS == Lists term1:: def1 detached EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]/p[text()="detached"]', output, 1 end test 'nested term with description does not consume following heading' do input = <<-EOS == Lists term:: def nestedterm;; nesteddef Detached ~~~~~~~~ EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 2 assert_xpath '//*[@class="dlist"]//dd', output, 2 assert_xpath '//*[@class="dlist"]/dl//dl', output, 1 assert_xpath '//*[@class="dlist"]/dl//dl/dt', output, 1 assert_xpath '((//*[@class="dlist"])[1]//dd)[1]/p[text()="def"]', output, 1 assert_xpath '((//*[@class="dlist"])[1]//dd)[1]/p/following-sibling::*[@class="dlist"]', output, 1 assert_xpath '((//*[@class="dlist"])[1]//dd)[1]/p/following-sibling::*[@class="dlist"]//dd/p[text()="nesteddef"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="sect2"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="sect2"]/h3[text()="Detached"]', output, 1 end test 'line attached by continuation is appended as paragraph if term has inline description followed by detached paragraph' do input = <<-EOS == Lists term1:: def1 + para detached EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="paragraph"]/p[text()="detached"]', output, 1 end test 'line attached by continuation is appended as paragraph if term has inline description followed by detached block' do input = <<-EOS == Lists term1:: def1 + para **** detached **** EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="sidebarblock"]', output, 1 assert_xpath '//*[@class="dlist"]/following-sibling::*[@class="sidebarblock"]//p[text()="detached"]', output, 1 end test 'line attached by continuation offset by line comment is appended as paragraph if term has inline description' do input = <<-EOS == Lists term1:: def1 // comment + para EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 end test 'line attached by continuation offset by blank line is appended as paragraph if term has inline description' do input = <<-EOS == Lists term1:: def1 + para EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 1 assert_xpath '//*[@class="dlist"]//dd', output, 1 assert_xpath '//*[@class="dlist"]//dd/p[text()="def1"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '//*[@class="dlist"]//dd/p/following-sibling::*[@class="paragraph"]/p[text()="para"]', output, 1 end test 'line comment offset by blank line divides lists because item has text' do input = <<-EOS == Lists term1:: def1 // term2:: def2 EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 2 end test 'ruler offset by blank line divides lists because item has text' do input = <<-EOS == Lists term1:: def1 ''' term2:: def2 EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 2 end test 'block title offset by blank line divides lists and becomes title of second list because item has text' do input = <<-EOS == Lists term1:: def1 .title term2:: def2 EOS output = render_embedded_string input assert_xpath '//*[@class="dlist"]/dl', output, 2 assert_xpath '(//*[@class="dlist"])[2]/*[@class="title"][text()="title"]', output, 1 end end end context 'Callout lists' do test 'listing block with sequential callouts followed by adjacent callout list' do input = <<-EOS [source, ruby] ---- require 'asciidoctor' # <1> doc = Asciidoctor::Document.new('Hello, World!') # <2> puts doc.render # <3> ---- <1> Describe the first line <2> Describe the second line <3> Describe the third line EOS output = render_string input, :attributes => {'backend' => 'docbook45'} assert_xpath '//programlisting', output, 1 assert_xpath '//programlisting//co', output, 3 assert_xpath '(//programlisting//co)[1][@id = "CO1-1"]', output, 1 assert_xpath '(//programlisting//co)[2][@id = "CO1-2"]', output, 1 assert_xpath '(//programlisting//co)[3][@id = "CO1-3"]', output, 1 assert_xpath '//programlisting/following-sibling::calloutlist/callout', output, 3 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[1][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[2][@arearefs = "CO1-2"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[3][@arearefs = "CO1-3"]', output, 1 end test 'listing block with sequential callouts followed by non-adjacent callout list' do input = <<-EOS [source, ruby] ---- require 'asciidoctor' # <1> doc = Asciidoctor::Document.new('Hello, World!') # <2> puts doc.render # <3> ---- Paragraph. <1> Describe the first line <2> Describe the second line <3> Describe the third line EOS output = render_string input, :attributes => {'backend' => 'docbook45'} assert_xpath '//programlisting', output, 1 assert_xpath '//programlisting//co', output, 3 assert_xpath '(//programlisting//co)[1][@id = "CO1-1"]', output, 1 assert_xpath '(//programlisting//co)[2][@id = "CO1-2"]', output, 1 assert_xpath '(//programlisting//co)[3][@id = "CO1-3"]', output, 1 assert_xpath '//programlisting/following-sibling::*[1][self::simpara]', output, 1 assert_xpath '//programlisting/following-sibling::calloutlist/callout', output, 3 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[1][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[2][@arearefs = "CO1-2"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[3][@arearefs = "CO1-3"]', output, 1 end test 'listing block with a callout that refers to two different lines' do input = <<-EOS [source, ruby] ---- require 'asciidoctor' # <1> doc = Asciidoctor::Document.new('Hello, World!') # <2> puts doc.render # <2> ---- <1> Import the library <2> Where the magic happens EOS output = render_string input, :attributes => {'backend' => 'docbook45'} assert_xpath '//programlisting', output, 1 assert_xpath '//programlisting//co', output, 3 assert_xpath '(//programlisting//co)[1][@id = "CO1-1"]', output, 1 assert_xpath '(//programlisting//co)[2][@id = "CO1-2"]', output, 1 assert_xpath '(//programlisting//co)[3][@id = "CO1-3"]', output, 1 assert_xpath '//programlisting/following-sibling::calloutlist/callout', output, 2 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[1][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[2][@arearefs = "CO1-2 CO1-3"]', output, 1 end test 'listing block with non-sequential callouts followed by adjacent callout list' do input = <<-EOS [source, ruby] ---- require 'asciidoctor' # <2> doc = Asciidoctor::Document.new('Hello, World!') # <3> puts doc.render # <1> ---- <1> Describe the first line <2> Describe the second line <3> Describe the third line EOS output = render_string input, :attributes => {'backend' => 'docbook45'} assert_xpath '//programlisting', output, 1 assert_xpath '//programlisting//co', output, 3 assert_xpath '(//programlisting//co)[1][@id = "CO1-1"]', output, 1 assert_xpath '(//programlisting//co)[2][@id = "CO1-2"]', output, 1 assert_xpath '(//programlisting//co)[3][@id = "CO1-3"]', output, 1 assert_xpath '//programlisting/following-sibling::calloutlist/callout', output, 3 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[1][@arearefs = "CO1-3"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[2][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//programlisting/following-sibling::calloutlist/callout)[3][@arearefs = "CO1-2"]', output, 1 end test 'two listing blocks can share the same callout list' do input = <<-EOS .Import library [source, ruby] ---- require 'asciidoctor' # <1> ---- .Use library [source, ruby] ---- doc = Asciidoctor::Document.new('Hello, World!') # <2> puts doc.render # <3> ---- <1> Describe the first line <2> Describe the second line <3> Describe the third line EOS output = render_string input, :attributes => {'backend' => 'docbook45'} assert_xpath '//programlisting', output, 2 assert_xpath '(//programlisting)[1]//co', output, 1 assert_xpath '(//programlisting)[1]//co[@id = "CO1-1"]', output, 1 assert_xpath '(//programlisting)[2]//co', output, 2 assert_xpath '((//programlisting)[2]//co)[1][@id = "CO1-2"]', output, 1 assert_xpath '((//programlisting)[2]//co)[2][@id = "CO1-3"]', output, 1 assert_xpath '(//calloutlist/callout)[1][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//calloutlist/callout)[2][@arearefs = "CO1-2"]', output, 1 assert_xpath '(//calloutlist/callout)[3][@arearefs = "CO1-3"]', output, 1 end test 'two listing blocks each followed by an adjacent callout list' do input = <<-EOS .Import library [source, ruby] ---- require 'asciidoctor' # <1> ---- <1> Describe the first line .Use library [source, ruby] ---- doc = Asciidoctor::Document.new('Hello, World!') # <1> puts doc.render # <2> ---- <1> Describe the second line <2> Describe the third line EOS output = render_string input, :attributes => {'backend' => 'docbook45'} assert_xpath '//programlisting', output, 2 assert_xpath '(//programlisting)[1]//co', output, 1 assert_xpath '(//programlisting)[1]//co[@id = "CO1-1"]', output, 1 assert_xpath '(//programlisting)[2]//co', output, 2 assert_xpath '((//programlisting)[2]//co)[1][@id = "CO2-1"]', output, 1 assert_xpath '((//programlisting)[2]//co)[2][@id = "CO2-2"]', output, 1 assert_xpath '//calloutlist', output, 2 assert_xpath '(//calloutlist)[1]/callout', output, 1 assert_xpath '((//calloutlist)[1]/callout)[1][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//calloutlist)[2]/callout', output, 2 assert_xpath '((//calloutlist)[2]/callout)[1][@arearefs = "CO2-1"]', output, 1 assert_xpath '((//calloutlist)[2]/callout)[2][@arearefs = "CO2-2"]', output, 1 end test 'callout list with block content' do input = <<-EOS [source, ruby] ---- require 'asciidoctor' # <1> doc = Asciidoctor::Document.new('Hello, World!') # <2> puts doc.render # <3> ---- <1> Imports the library as a RubyGem <2> Creates a new document * Scans the lines for known blocks * Converts the lines into blocks <3> Renders the document + You can write this to file rather than printing to stdout. EOS output = render_string input, :attributes => {'backend' => 'docbook45'} assert_xpath '//calloutlist', output, 1 assert_xpath '//calloutlist/callout', output, 3 assert_xpath '(//calloutlist/callout)[1]/*', output, 1 assert_xpath '(//calloutlist/callout)[2]/para', output, 1 assert_xpath '(//calloutlist/callout)[2]/itemizedlist', output, 1 assert_xpath '(//calloutlist/callout)[3]/para', output, 1 assert_xpath '(//calloutlist/callout)[3]/simpara', output, 1 end test 'escaped callout should not be interpreted as a callout' do input = <<-EOS [source, ruby] ---- require 'asciidoctor' # \\<1> ---- EOS output = render_string input, :attributes => {'backend' => 'docbook45'} assert_xpath '//co', output, 0 end test 'should not recognize callouts in middle of line' do input = <<-EOS [source, ruby] ---- puts "The syntax <1> at the end of the line makes a code callout" ---- EOS output = render_embedded_string input assert_xpath '//b', output, 0 end test 'should allow multiple callouts on the same line' do input = <<-EOS [source, ruby] ---- require 'asciidoctor' <1> doc = Asciidoctor.load('Hello, World!') # <2> <3> <4> puts doc.render <5><6> exit 0 ---- <1> Require library <2> Load document from String <3> Uses default backend and doctype <4> One more for good luck <5> Renders document to String <6> Prints output to stdout EOS output = render_embedded_string input assert_xpath '//code/b', output, 6 assert_match(/ \(1\)<\/b>$/, output) assert_match(/ \(2\)<\/b> \(3\)<\/b> \(4\)<\/b>$/, output) assert_match(/ \(5\)<\/b>\(6\)<\/b>$/, output) end test 'should allow XML comment-style callouts' do input = <<-EOS [source, xml] ----
    Section Title Just a paragraph
    ---- <1> The title is required <2> The content isn't EOS output = render_embedded_string input assert_xpath '//b', output, 2 assert_xpath '//b[text()="(1)"]', output, 1 assert_xpath '//b[text()="(2)"]', output, 1 end test 'should not allow callouts with half an XML comment' do input = <<-EOS ---- First line <1--> Second line <2--> ---- EOS output = render_embedded_string input assert_xpath '//b', output, 0 end test 'should not recognize callouts in an indented labeled list paragraph' do input = <<-EOS foo:: bar <1> <1> Not pointing to a callout EOS output = render_embedded_string input assert_xpath '//dl//b', output, 0 assert_xpath '//dl/dd/p[text()="bar <1>"]', output, 1 assert_xpath '//ol/li/p[text()="Not pointing to a callout"]', output, 1 end test 'should not recognize callouts in an indented outline list paragraph' do input = <<-EOS * foo bar <1> <1> Not pointing to a callout EOS output = render_embedded_string input assert_xpath '//ul//b', output, 0 assert_xpath %(//ul/li/p[text()="foo\nbar <1>"]), output, 1 assert_xpath '//ol/li/p[text()="Not pointing to a callout"]', output, 1 end test 'should remove line comment chars that precedes callout number' do input = <<-EOS [source,ruby] ---- puts 'Hello, world!' # <1> ---- <1> Ruby [source,groovy] ---- println 'Hello, world!' // <1> ---- <1> Groovy [source,clojure] ---- (def hello (fn [] "Hello, world!")) ;; <1> (hello) ---- <1> Clojure [source,haskell] ---- main = putStrLn "Hello, World!" -- <1> ---- <1> Haskell EOS [{}, {'source-highlighter' => 'coderay'}].each do |attributes| output = render_embedded_string input, :attributes => attributes assert_xpath '//b', output, 4 nodes = xmlnodes_at_css 'pre', output assert_equal %(puts 'Hello, world!' (1)), nodes[0].text assert_equal %(println 'Hello, world!' (1)), nodes[1].text assert_equal %((def hello (fn [] "Hello, world!")) (1)\n(hello)), nodes[2].text assert_equal %(main = putStrLn "Hello, World!" (1)), nodes[3].text end end test 'should allow line comment chars that precede callout number to be specified' do input = <<-EOS [source,erlang,line-comment=%] ---- hello_world() -> io:fwrite("hello, world\n"). % <1> ---- <1> Erlang EOS output = render_embedded_string input assert_xpath '//b', output, 1 nodes = xmlnodes_at_css 'pre', output assert_equal %(hello_world() -> io:fwrite("hello, world\n"). (1)), nodes[0].text end test 'literal block with callouts' do input = <<-EOS .... Roses are red <1> Violets are blue <2> .... <1> And so is Ruby <2> But violet is more like purple EOS output = render_string input, :attributes => {'backend' => 'docbook45'} assert_xpath '//literallayout', output, 1 assert_xpath '//literallayout//co', output, 2 assert_xpath '(//literallayout//co)[1][@id = "CO1-1"]', output, 1 assert_xpath '(//literallayout//co)[2][@id = "CO1-2"]', output, 1 assert_xpath '//literallayout/following-sibling::*[1][self::calloutlist]/callout', output, 2 assert_xpath '(//literallayout/following-sibling::*[1][self::calloutlist]/callout)[1][@arearefs = "CO1-1"]', output, 1 assert_xpath '(//literallayout/following-sibling::*[1][self::calloutlist]/callout)[2][@arearefs = "CO1-2"]', output, 1 end test 'callout list with icons enabled' do input = <<-EOS [source, ruby] ---- require 'asciidoctor' # <1> doc = Asciidoctor::Document.new('Hello, World!') # <2> puts doc.render # <3> ---- <1> Describe the first line <2> Describe the second line <3> Describe the third line EOS output = render_embedded_string input, :attributes => {'icons' => ''} assert_css '.listingblock code > img', output, 3 (1..3).each do |i| assert_xpath %((/div[@class="listingblock"]//code/img)[#{i}][@src="./images/icons/callouts/#{i}.png"][@alt="#{i}"]), output, 1 end assert_css '.colist table td img', output, 3 (1..3).each do |i| assert_xpath %((/div[@class="colist arabic"]//td/img)[#{i}][@src="./images/icons/callouts/#{i}.png"][@alt="#{i}"]), output, 1 end end test 'callout list with font-based icons enabled' do input = <<-EOS [source] ---- require 'asciidoctor' # <1> doc = Asciidoctor::Document.new('Hello, World!') #<2> puts doc.render #<3> ---- <1> Describe the first line <2> Describe the second line <3> Describe the third line EOS output = render_embedded_string input, :attributes => {'icons' => 'font'} assert_css '.listingblock code > i', output, 3 (1..3).each do |i| assert_xpath %((/div[@class="listingblock"]//code/i)[#{i}]), output, 1 assert_xpath %((/div[@class="listingblock"]//code/i)[#{i}][@class="conum"][@data-value="#{i}"]), output, 1 assert_xpath %((/div[@class="listingblock"]//code/i)[#{i}]/following-sibling::b[text()="(#{i})"]), output, 1 end assert_css '.colist table td i', output, 3 (1..3).each do |i| assert_xpath %((/div[@class="colist arabic"]//td/i)[#{i}]), output, 1 assert_xpath %((/div[@class="colist arabic"]//td/i)[#{i}][@class="conum"][@data-value = "#{i}"]), output, 1 assert_xpath %((/div[@class="colist arabic"]//td/i)[#{i}]/following-sibling::b[text() = "#{i}"]), output, 1 end end end context 'Checklists' do test 'should create checklist if at least one item has checkbox syntax' do input = <<-EOS - [ ] todo - [x] done - [ ] another todo - [*] another done - plain EOS output = render_embedded_string input assert_css '.ulist.checklist', output, 1 assert_xpath %((/*[@class="ulist checklist"]/ul/li)[1]/p[text()="#{expand_entity 10063} todo"]), output, 1 assert_xpath %((/*[@class="ulist checklist"]/ul/li)[2]/p[text()="#{expand_entity 10003} done"]), output, 1 assert_xpath %((/*[@class="ulist checklist"]/ul/li)[3]/p[text()="#{expand_entity 10063} another todo"]), output, 1 assert_xpath %((/*[@class="ulist checklist"]/ul/li)[4]/p[text()="#{expand_entity 10003} another done"]), output, 1 assert_xpath '(/*[@class="ulist checklist"]/ul/li)[5]/p[text()="plain"]', output, 1 end test 'should create checklist with font icons if at least one item has checkbox syntax and icons attribute is font' do input = <<-EOS - [ ] todo - [x] done - plain EOS output = render_embedded_string input, :attributes => {'icons' => 'font'} assert_css '.ulist.checklist', output, 1 assert_css '.ulist.checklist li i.fa-check-square-o', output, 1 assert_css '.ulist.checklist li i.fa-square-o', output, 1 assert_xpath '(/*[@class="ulist checklist"]/ul/li)[3]/p[text()="plain"]', output, 1 end test 'should create interactive checklist if interactive option is set even with icons attribute is font' do input = <<-EOS :icons: font [options="interactive"] - [ ] todo - [x] done EOS output = render_embedded_string input assert_css '.ulist.checklist', output, 1 assert_css '.ulist.checklist li input[type="checkbox"]', output, 2 assert_css '.ulist.checklist li input[type="checkbox"][disabled]', output, 0 assert_css '.ulist.checklist li input[type="checkbox"][checked]', output, 1 end end context 'Lists model' do test 'content should return items in list' do input = <<-EOS * one * two * three EOS doc = document_from_string input list = doc.blocks.first assert list.is_a? Asciidoctor::List items = list.items assert_equal 3, items.size assert_equal list.items, list.content end test 'list item should be the parent of block attached to a list item' do input = <<-EOS * list item 1 + ---- listing block in list item 1 ---- EOS doc = document_from_string input list = doc.blocks.first list_item_1 = list.items.first listing_block = list_item_1.blocks.first assert_equal :listing, listing_block.context assert_equal list_item_1, listing_block.parent end test 'outline? should return true for unordered list' do input = <<-EOS * one * two * three EOS doc = document_from_string input list = doc.blocks.first assert list.outline? end test 'outline? should return true for ordered list' do input = <<-EOS . one . two . three EOS doc = document_from_string input list = doc.blocks.first assert list.outline? end test 'outline? should return false for description list' do input = <<-EOS label:: desc EOS doc = document_from_string input list = doc.blocks.first assert !list.outline? end test 'simple? should return true for list item with no nested blocks' do input = <<-EOS * one * two * three EOS doc = document_from_string input list = doc.blocks.first assert list.items.first.simple? assert !list.items.first.compound? end test 'simple? should return true for list item with nested outline list' do input = <<-EOS * one ** more about one ** and more * two * three EOS doc = document_from_string input list = doc.blocks.first assert list.items.first.simple? assert !list.items.first.compound? end test 'simple? should return false for list item with block content' do input = <<-EOS * one + ---- listing block in list item 1 ---- * two * three EOS doc = document_from_string input list = doc.blocks.first assert !list.items.first.simple? assert list.items.first.compound? end end asciidoctor-1.5.5/test/manpage_test.rb000066400000000000000000000161011277513741400200120ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end SAMPLE_MANPAGE_HEADER = <<-EOS.chomp = command (1) Author Name :doctype: manpage :man manual: Command Manual :man source: Command 1.2.3 == NAME command - does stuff == SYNOPSIS *command* [_OPTION_]... _FILE_... == DESCRIPTION EOS context 'Manpage' do context 'Configuration' do test 'should define default linkstyle' do input = SAMPLE_MANPAGE_HEADER output = Asciidoctor.convert input, :backend => :manpage, :header_footer => true assert_match(/^\.LINKSTYLE blue R < >$/, output) end test 'should use linkstyle defined by man-linkstyle attribute' do input = SAMPLE_MANPAGE_HEADER output = Asciidoctor.convert input, :backend => :manpage, :header_footer => true, :attributes => { 'man-linkstyle' => 'cyan B \[fo] \[fc]' } assert_match(/^\.LINKSTYLE cyan B \\\[fo\] \\\[fc\]$/, output) end end context 'Manify' do test 'should escape lone period' do input = %(#{SAMPLE_MANPAGE_HEADER} .) output = Asciidoctor.convert input, :backend => :manpage assert_equal '\&.', output.lines.entries.last.chomp end test 'should escape raw macro' do input = %(#{SAMPLE_MANPAGE_HEADER} AAA this line of text should be show .if 1 .nx BBB this line and the one above it should be visible) output = Asciidoctor.convert input, :backend => :manpage assert_equal '\&.if 1 .nx', output.lines.entries[-2].chomp end end context 'Backslash' do test 'should not escape spaces for empty manual or source fields' do input = SAMPLE_MANPAGE_HEADER.lines.select {|l| !l.start_with?(':man ') } output = Asciidoctor.convert input, :backend => :manpage, :header_footer => true assert_match ' Manual: \ \&', output assert_match ' Source: \ \&', output assert_match(/^\.TH "COMMAND" .* "\\ \\&" "\\ \\&"$/, output) end test 'should preserve backslashes in escape sequences' do input = %(#{SAMPLE_MANPAGE_HEADER} "`hello`" '`goodbye`' *strong* _weak_ `even`) output = Asciidoctor.convert input, :backend => :manpage assert_equal '\(lqhello\(rq \(oqgoodbye\(cq \fBstrong\fP \fIweak\fP \f[CR]even\fP', output.lines.entries.last.chomp end test 'should escape backslashes in content' do input = %(#{SAMPLE_MANPAGE_HEADER} \\.foo \\ bar\\ baz) output = Asciidoctor.convert input, :backend => :manpage assert_equal '\(rs.foo \(rs bar\(rs', output.lines.entries[-2].chomp end test 'should escape literal escape sequence' do input = %(#{SAMPLE_MANPAGE_HEADER} \\fB makes text bold) output = Asciidoctor.convert input, :backend => :manpage assert_match '\(rsfB makes text bold', output end end context 'URL macro' do test 'should not leave blank line before URL macro' do input = %(#{SAMPLE_MANPAGE_HEADER} First paragraph. http://asciidoc.org[AsciiDoc]) output = Asciidoctor.convert input, :backend => :manpage assert_equal '.sp First paragraph. .sp .URL "http://asciidoc.org" "AsciiDoc" ""', output.lines.entries[-4..-1].join end test 'should not swallow content following URL' do input = %(#{SAMPLE_MANPAGE_HEADER} http://asciidoc.org[AsciiDoc] can be used to create man pages.) output = Asciidoctor.convert input, :backend => :manpage assert_equal '.URL "http://asciidoc.org" "AsciiDoc" " " can be used to create man pages.', output.lines.entries[-2..-1].join end test 'should pass adjacent character as final argument of URL macro' do input = %(#{SAMPLE_MANPAGE_HEADER} This is http://asciidoc.org[AsciiDoc].) output = Asciidoctor.convert input, :backend => :manpage assert_equal 'This is \c .URL "http://asciidoc.org" "AsciiDoc" "."', output.lines.entries[-2..-1].join end test 'should pass adjacent character as final argument of URL macro and move trailing content to next line' do input = %(#{SAMPLE_MANPAGE_HEADER} This is http://asciidoc.org[AsciiDoc], which can be used to write content.) output = Asciidoctor.convert input, :backend => :manpage assert_equal 'This is \c .URL "http://asciidoc.org" "AsciiDoc" "," which can be used to write content.', output.lines.entries[-3..-1].join end test 'should not leave blank lines between URLs on contiguous lines of input' do input = %(#{SAMPLE_MANPAGE_HEADER} The corresponding implementations are http://clisp.sf.net[CLISP], http://ccl.clozure.com[Clozure CL], http://cmucl.org[CMUCL], http://ecls.sf.net[ECL], and http://sbcl.sf.net[SBCL].) output = Asciidoctor.convert input, :backend => :manpage assert_equal '.sp The corresponding implementations are .URL "http://clisp.sf.net" "CLISP" "," .URL "http://ccl.clozure.com" "Clozure CL" "," .URL "http://cmucl.org" "CMUCL" "," .URL "http://ecls.sf.net" "ECL" "," and \c .URL "http://sbcl.sf.net" "SBCL" "."', output.lines.entries[-8..-1].join end test 'should not leave blank lines between URLs on same line of input' do input = %(#{SAMPLE_MANPAGE_HEADER} The corresponding implementations are http://clisp.sf.net[CLISP], http://ccl.clozure.com[Clozure CL], http://cmucl.org[CMUCL], http://ecls.sf.net[ECL], and http://sbcl.sf.net[SBCL].) output = Asciidoctor.convert input, :backend => :manpage assert_equal '.sp The corresponding implementations are \c .URL "http://clisp.sf.net" "CLISP" "," .URL "http://ccl.clozure.com" "Clozure CL" "," .URL "http://cmucl.org" "CMUCL" "," .URL "http://ecls.sf.net" "ECL" "," and .URL "http://sbcl.sf.net" "SBCL" "."', output.lines.entries[-8..-1].join end test 'should not insert space between link and non-whitespace characters surrounding it' do input = %(#{SAMPLE_MANPAGE_HEADER} Please search |link:http://discuss.asciidoctor.org[the forums]| before asking.) output = Asciidoctor.convert input, :backend => :manpage assert_equal '.sp Please search |\c .URL "http://discuss.asciidoctor.org" "the forums" "|" before asking.', output.lines.entries[-4..-1].join end end context 'Callout List' do test 'should generate callout list using proper formatting commands' do input = %(#{SAMPLE_MANPAGE_HEADER} ---- $ gem install asciidoctor # <1> ---- <1> Installs the asciidoctor gem from RubyGems.org) output = Asciidoctor.convert input, :backend => :manpage assert output.end_with? '.TS tab(:); r lw(\n(.lu*75u/100u). \fB(1)\fP\h\'-2n\':T{ Installs the asciidoctor gem from RubyGems.org T} .TE' end end context 'Environment' do test 'should use SOURCE_DATE_EPOCH as modified time of input file and local time' do old_source_date_epoch = ENV.delete 'SOURCE_DATE_EPOCH' begin ENV['SOURCE_DATE_EPOCH'] = '1234123412' output = Asciidoctor.convert SAMPLE_MANPAGE_HEADER, :backend => :manpage, :header_footer => true assert_match(/Date: 2009-02-08/, output) assert_match(/^\.TH "COMMAND" "1" "2009-02-08" "Command 1.2.3" "Command Manual"$/, output) ensure ENV['SOURCE_DATE_EPOCH'] = old_source_date_epoch if old_source_date_epoch end end end end asciidoctor-1.5.5/test/options_test.rb000066400000000000000000000172641277513741400201100ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end require 'asciidoctor/cli/options' context 'Options' do test 'should return error code 0 when help flag is present' do redirect_streams do |stdout, stderr| exitval = Asciidoctor::Cli::Options.parse!(%w(-h)) assert_equal 0, exitval assert_match(/^Usage:/, stdout.string) end end test 'should return error code 1 when invalid option present' do redirect_streams do |stdout, stderr| exitval = Asciidoctor::Cli::Options.parse!(%w(--foobar)) assert_equal 1, exitval assert_equal 'asciidoctor: invalid option: --foobar', stderr.string.chomp end end test 'should return error code 1 when option has invalid argument' do redirect_streams do |stdout, stderr| exitval = Asciidoctor::Cli::Options.parse!(%w(-d chapter input.ad)) # had to change for #320 assert_equal 1, exitval assert_equal 'asciidoctor: invalid argument: -d chapter', stderr.string.chomp end end test 'should return error code 1 when option is missing required argument' do redirect_streams do |stdout, stderr| exitval = Asciidoctor::Cli::Options.parse!(%w(-b)) assert_equal 1, exitval assert_equal 'asciidoctor: option missing argument: -b', stderr.string.chomp end end test 'should emit warning when unparsed options remain' do redirect_streams do |stdout, stderr| options = Asciidoctor::Cli::Options.parse!(%w(-b docbook - -)) assert options.is_a? Hash assert_match(/asciidoctor: WARNING: extra arguments .*/, stderr.string.chomp) end end test 'basic argument assignment' do options = Asciidoctor::Cli::Options.parse!(%w(-v -s -d book test/fixtures/sample.asciidoc)) assert_equal 2, options[:verbose] assert_equal false, options[:header_footer] assert_equal 'book', options[:attributes]['doctype'] assert_equal 1, options[:input_files].size assert_equal 'test/fixtures/sample.asciidoc', options[:input_files][0] end test 'standard attribute assignment' do options = Asciidoctor::Cli::Options.parse!(%w(-a docinfosubs=attributes,replacements -a icons test/fixtures/sample.asciidoc)) assert_equal 'attributes,replacements', options[:attributes]['docinfosubs'] assert_equal '', options[:attributes]['icons'] end test 'multiple attribute arguments' do options = Asciidoctor::Cli::Options.parse!(%w(-a imagesdir=images -a icons test/fixtures/sample.asciidoc)) assert_equal 'images', options[:attributes]['imagesdir'] assert_equal '', options[:attributes]['icons'] end test 'should only split attribute key/value pairs on first equal sign' do options = Asciidoctor::Cli::Options.parse!(%w(-a name=value=value test/fixtures/sample.asciidoc)) assert_equal 'value=value', options[:attributes]['name'] end test 'should allow safe mode to be specified' do options = Asciidoctor::Cli::Options.parse!(%w(-S safe test/fixtures/sample.asciidoc)) assert_equal Asciidoctor::SafeMode::SAFE, options[:safe] end test 'should allow any backend to be specified' do options = Asciidoctor::Cli::Options.parse!(%w(-b my_custom_backend test/fixtures/sample.asciidoc)) assert_equal 'my_custom_backend', options[:attributes]['backend'] end test 'article doctype assignment' do options = Asciidoctor::Cli::Options.parse!(%w(-d article test/fixtures/sample.asciidoc)) assert_equal 'article', options[:attributes]['doctype'] end test 'book doctype assignment' do options = Asciidoctor::Cli::Options.parse!(%w(-d book test/fixtures/sample.asciidoc)) assert_equal 'book', options[:attributes]['doctype'] end test 'inline doctype assignment' do options = Asciidoctor::Cli::Options.parse!(%w(-d inline test/fixtures/sample.asciidoc)) assert_equal 'inline', options[:attributes]['doctype'] end test 'template engine assignment' do options = Asciidoctor::Cli::Options.parse!(%w(-E haml test/fixtures/sample.asciidoc)) assert_equal 'haml', options[:template_engine] end test 'template directory assignment' do options = Asciidoctor::Cli::Options.parse!(%w(-T custom-backend test/fixtures/sample.asciidoc)) assert_equal ['custom-backend'], options[:template_dirs] end test 'multiple template directory assignments' do options = Asciidoctor::Cli::Options.parse!(%w(-T custom-backend -T custom-backend-hacks test/fixtures/sample.asciidoc)) assert_equal ['custom-backend', 'custom-backend-hacks'], options[:template_dirs] end test 'multiple -r flags requires specified libraries' do options = Asciidoctor::Cli::Options.new redirect_streams do |stdout, stderr| exitval = options.parse! %w(-r foobar -r foobaz test/fixtures/sample.asciidoc) assert_match(%(asciidoctor: FAILED: 'foobar' could not be loaded), stderr.string) assert_equal 1, exitval assert_equal ['foobar', 'foobaz'], options[:requires] end end test '-r flag with multiple values requires specified libraries' do options = Asciidoctor::Cli::Options.new redirect_streams do |stdout, stderr| exitval = options.parse! %w(-r foobar,foobaz test/fixtures/sample.asciidoc) assert_match(%(asciidoctor: FAILED: 'foobar' could not be loaded), stderr.string) assert_equal 1, exitval assert_equal ['foobar', 'foobaz'], options[:requires] end end test '-I option appends paths to $LOAD_PATH' do options = Asciidoctor::Cli::Options.new old_load_path = $LOAD_PATH.dup begin exitval = options.parse! %w(-I foobar -I foobaz test/fixtures/sample.asciidoc) refute_equal 1, exitval assert_equal old_load_path.size + 2, $LOAD_PATH.size assert_equal File.expand_path('foobar'), $LOAD_PATH[0] assert_equal File.expand_path('foobaz'), $LOAD_PATH[1] assert_equal ['foobar', 'foobaz'], options[:load_paths] ensure ($LOAD_PATH.size - old_load_path.size).times { $LOAD_PATH.shift } end end test '-I option appends multiple paths to $LOAD_PATH' do options = Asciidoctor::Cli::Options.new old_load_path = $LOAD_PATH.dup begin exitval = options.parse! %W(-I foobar#{File::PATH_SEPARATOR}foobaz test/fixtures/sample.asciidoc) refute_equal 1, exitval assert_equal old_load_path.size + 2, $LOAD_PATH.size assert_equal File.expand_path('foobar'), $LOAD_PATH[0] assert_equal File.expand_path('foobaz'), $LOAD_PATH[1] assert_equal ['foobar', 'foobaz'], options[:load_paths] ensure ($LOAD_PATH.size - old_load_path.size).times { $LOAD_PATH.shift } end end test 'should set verbose to 2 when -v flag is specified' do options = Asciidoctor::Cli::Options.parse!(%w(-v test/fixtures/sample.asciidoc)) assert_equal 2, options[:verbose] end test 'should set verbose to 0 when -q flag is specified' do options = Asciidoctor::Cli::Options.parse!(%w(-q test/fixtures/sample.asciidoc)) assert_equal 0, options[:verbose] end test 'should set verbose to 2 when -v flag is specified after -q flag' do options = Asciidoctor::Cli::Options.parse!(%w(-q -v test/fixtures/sample.asciidoc)) assert_equal 2, options[:verbose] end test 'should set verbose to 0 when -q flag is specified after -v flag' do options = Asciidoctor::Cli::Options.parse!(%w(-v -q test/fixtures/sample.asciidoc)) assert_equal 0, options[:verbose] end test 'should enable timings when -t flag is specified' do options = Asciidoctor::Cli::Options.parse!(%w(-t test/fixtures/sample.asciidoc)) assert_equal true, options[:timings] end test 'timings option is disable by default' do options = Asciidoctor::Cli::Options.parse!(%w(test/fixtures/sample.asciidoc)) assert_equal false, options[:timings] end end asciidoctor-1.5.5/test/paragraphs_test.rb000066400000000000000000000351431277513741400205410ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end context 'Paragraphs' do context 'Normal' do test 'should treat plain text separated by blank lines as paragraphs' do input = <<-EOS Plain text for the win! Yep. Text. Plain and simple. EOS output = render_embedded_string input assert_css 'p', output, 2 assert_xpath '(//p)[1][text() = "Plain text for the win!"]', output, 1 assert_xpath '(//p)[2][text() = "Yep. Text. Plain and simple."]', output, 1 end test 'should associate block title with paragraph' do input = <<-EOS .Titled Paragraph. Winning. EOS output = render_embedded_string input assert_css 'p', output, 2 assert_xpath '(//p)[1]/preceding-sibling::*[@class = "title"]', output, 1 assert_xpath '(//p)[1]/preceding-sibling::*[@class = "title"][text() = "Titled"]', output, 1 assert_xpath '(//p)[2]/preceding-sibling::*[@class = "title"]', output, 0 end test 'no duplicate block before next section' do input = <<-EOS = Title Preamble == First Section Paragraph 1 Paragraph 2 == Second Section Last words EOS output = render_string input assert_xpath '//p[text() = "Paragraph 2"]', output, 1 end test 'does not treat wrapped line as a list item' do input = <<-EOS paragraph . wrapped line EOS output = render_embedded_string input assert_css 'p', output, 1 assert_xpath %(//p[text()="paragraph\n. wrapped line"]), output, 1 end test 'does not treat wrapped line as a block title' do input = <<-EOS paragraph .wrapped line EOS output = render_embedded_string input assert_css 'p', output, 1 assert_xpath %(//p[text()="paragraph\n.wrapped line"]), output, 1 end test 'interprets normal paragraph style as normal paragraph' do input = <<-EOS [normal] Normal paragraph. Nothing special. EOS output = render_embedded_string input assert_css 'p', output, 1 end test 'removes indentation from literal paragraph marked as normal' do input = <<-EOS [normal] Normal paragraph. Nothing special. Last line. EOS output = render_embedded_string input assert_css 'p', output, 1 assert_xpath %(//p[text()="Normal paragraph.\n Nothing special.\nLast line."]), output, 1 end test 'normal paragraph terminates at block attribute list' do input = <<-EOS normal text [literal] literal text EOS output = render_embedded_string input assert_css '.paragraph:root', output, 1 assert_css '.literalblock:root', output, 1 end test 'normal paragraph terminates at block delimiter' do input = <<-EOS normal text -- text in open block -- EOS output = render_embedded_string input assert_css '.paragraph:root', output, 1 assert_css '.openblock:root', output, 1 end test 'normal paragraph terminates at list continuation' do input = <<-EOS normal text + EOS output = render_embedded_string input assert_css '.paragraph:root', output, 2 assert_xpath %((/*[@class="paragraph"])[1]/p[text() = "normal text"]), output, 1 assert_xpath %((/*[@class="paragraph"])[2]/p[text() = "+"]), output, 1 end test 'normal style turns literal paragraph into normal paragraph' do input = <<-EOS [normal] normal paragraph, despite the leading indent EOS output = render_embedded_string input assert_css '.paragraph:root > p', output, 1 end test 'expands index term macros in DocBook backend' do input = <<-EOS Here is an index entry for ((tigers)). indexterm:[Big cats,Tigers,Siberian Tiger] Here is an index entry for indexterm2:[Linux]. (((Operating Systems,Linux,Fedora))) Note that multi-entry terms generate separate index entries. EOS output = render_embedded_string input, :attributes => {'backend' => 'docbook45'} assert_xpath '/simpara', output, 1 term1 = (xmlnodes_at_xpath '(//indexterm)[1]', output, 1).first assert_equal 'tigers', term1.to_s assert term1.next.content.start_with?('tigers') term2 = (xmlnodes_at_xpath '(//indexterm)[2]', output, 1).first term2_elements = term2.elements assert_equal 3, term2_elements.size assert_equal 'Big cats', term2_elements[0].to_s assert_equal 'Tigers', term2_elements[1].to_s assert_equal 'Siberian Tiger', term2_elements[2].to_s term3 = (xmlnodes_at_xpath '(//indexterm)[3]', output, 1).first term3_elements = term3.elements assert_equal 2, term3_elements.size assert_equal 'Tigers', term3_elements[0].to_s assert_equal 'Siberian Tiger', term3_elements[1].to_s term4 = (xmlnodes_at_xpath '(//indexterm)[4]', output, 1).first term4_elements = term4.elements assert_equal 1, term4_elements.size assert_equal 'Siberian Tiger', term4_elements[0].to_s term5 = (xmlnodes_at_xpath '(//indexterm)[5]', output, 1).first assert_equal 'Linux', term5.to_s assert term5.next.content.start_with?('Linux') assert_xpath '(//indexterm)[6]/*', output, 3 assert_xpath '(//indexterm)[7]/*', output, 2 assert_xpath '(//indexterm)[8]/*', output, 1 end test 'normal paragraph should honor explicit subs list' do input = <<-EOS [subs="specialcharacters"] ** EOS output = render_embedded_string input assert output.include?('*<Hey Jude>*') end test 'normal paragraph should honor specialchars shorthand' do input = <<-EOS [subs="specialchars"] ** EOS output = render_embedded_string input assert output.include?('*<Hey Jude>*') end test 'should add a hardbreak at end of each line when hardbreaks option is set' do input = <<-EOS [%hardbreaks] read my lips EOS output = render_embedded_string input assert_css 'br', output, 2 assert_xpath '//p', output, 1 assert output.include?("

    read
    \nmy
    \nlips

    ") end end context 'Literal' do test 'single-line literal paragraphs' do input = <<-EOS LITERALS ARE LITERALLY AWESOME! EOS output = render_embedded_string input assert_xpath '//pre', output, 3 end test 'multi-line literal paragraph' do input = <<-EOS Install instructions: yum install ruby rubygems gem install asciidoctor You're good to go! EOS output = render_embedded_string input assert_xpath '//pre', output, 1 # indentation should be trimmed from literal block assert_xpath %(//pre[text() = "yum install ruby rubygems\ngem install asciidoctor"]), output, 1 end test 'literal paragraph' do input = <<-EOS [literal] this text is literally literal EOS output = render_embedded_string input assert_xpath %(/*[@class="literalblock"]//pre[text()="this text is literally literal"]), output, 1 end test 'should read content below literal style verbatim' do input = <<-EOS [literal] image::not-an-image-block[] EOS output = render_embedded_string input assert_xpath %(/*[@class="literalblock"]//pre[text()="image::not-an-image-block[]"]), output, 1 assert_css 'img', output, 0 end test 'listing paragraph' do input = <<-EOS [listing] this text is a listing EOS output = render_embedded_string input assert_xpath %(/*[@class="listingblock"]//pre[text()="this text is a listing"]), output, 1 end test 'source paragraph' do input = <<-EOS [source] use the source, luke! EOS output = render_embedded_string input assert_xpath %(/*[@class="listingblock"]//pre[@class="highlight"]/code[text()="use the source, luke!"]), output, 1 end test 'source code paragraph with language' do input = <<-EOS [source, perl] die 'zomg perl sucks'; EOS output = render_embedded_string input assert_xpath %(/*[@class="listingblock"]//pre[@class="highlight"]/code[@class="language-perl"][@data-lang="perl"][text()="die 'zomg perl sucks';"]), output, 1 end test 'literal paragraph terminates at block attribute list' do input = <<-EOS literal text [normal] normal text EOS output = render_embedded_string input assert_xpath %(/*[@class="literalblock"]), output, 1 assert_xpath %(/*[@class="paragraph"]), output, 1 end test 'literal paragraph terminates at block delimiter' do input = <<-EOS literal text -- normal text -- EOS output = render_embedded_string input assert_xpath %(/*[@class="literalblock"]), output, 1 assert_xpath %(/*[@class="openblock"]), output, 1 end test 'literal paragraph terminates at list continuation' do input = <<-EOS literal text + EOS output = render_embedded_string input assert_xpath %(/*[@class="literalblock"]), output, 1 assert_xpath %(/*[@class="literalblock"]//pre[text() = "literal text"]), output, 1 assert_xpath %(/*[@class="paragraph"]), output, 1 assert_xpath %(/*[@class="paragraph"]/p[text() = "+"]), output, 1 end end context 'Quote' do test "single-line quote paragraph" do input = <<-EOS [quote] Famous quote. EOS output = render_string input assert_xpath '//*[@class = "quoteblock"]', output, 1 assert_xpath '//*[@class = "quoteblock"]//p', output, 0 assert_xpath '//*[@class = "quoteblock"]//*[contains(text(), "Famous quote.")]', output, 1 end test 'quote paragraph terminates at list continuation' do input = <<-EOS [quote] A famouse quote. + EOS output = render_embedded_string input assert_css '.quoteblock:root', output, 1 assert_css '.paragraph:root', output, 1 assert_xpath %(/*[@class="paragraph"]/p[text() = "+"]), output, 1 end test "verse paragraph" do output = render_string("[verse]\nFamous verse.") assert_xpath '//*[@class = "verseblock"]', output, 1 assert_xpath '//*[@class = "verseblock"]/pre', output, 1 assert_xpath '//*[@class = "verseblock"]//p', output, 0 assert_xpath '//*[@class = "verseblock"]/pre[normalize-space(text()) = "Famous verse."]', output, 1 end test 'should perform normal subs on a verse paragraph' do input = <<-EOS [verse] _GET /groups/link:#group-id[\{group-id\}]_ EOS output = render_embedded_string input assert output.include?('
    GET /groups/{group-id}
    ') end test 'quote paragraph should honor explicit subs list' do input = <<-EOS [subs="specialcharacters"] [quote] *Hey Jude* EOS output = render_embedded_string input assert output.include?('*Hey Jude*') end end context "special" do test "note multiline syntax" do Asciidoctor::ADMONITION_STYLES.each do |style| assert_xpath "//div[@class='admonitionblock #{style.downcase}']", render_string("[#{style}]\nThis is a winner.") end end test "note block syntax" do Asciidoctor::ADMONITION_STYLES.each do |style| assert_xpath "//div[@class='admonitionblock #{style.downcase}']", render_string("[#{style}]\n====\nThis is a winner.\n====") end end test "note inline syntax" do Asciidoctor::ADMONITION_STYLES.each do |style| assert_xpath "//div[@class='admonitionblock #{style.downcase}']", render_string("#{style}: This is important, fool!") end end test "sidebar block" do input = <<-EOS == Section .Sidebar **** Content goes here **** EOS result = render_string(input) assert_xpath "//*[@class='sidebarblock']//p", result, 1 end context 'Styled Paragraphs' do test 'should wrap text in simpara for styled paragraphs when rendered to DocBook' do input = <<-EOS = Book :doctype: book [preface] = About this book [abstract] An abstract for the book. = Part 1 [partintro] An intro to this part. == Chapter 1 [sidebar] Just a side note. [example] As you can see here. [quote] Wise words from a wise person. EOS output = render_string input, :backend => 'docbook' assert_css 'abstract > simpara', output, 1 assert_css 'partintro > simpara', output, 1 assert_css 'sidebar > simpara', output, 1 assert_css 'informalexample > simpara', output, 1 assert_css 'blockquote > simpara', output, 1 end test 'should wrap text in simpara for styled paragraphs with title when rendered to DocBook' do input = <<-EOS = Book :doctype: book [preface] = About this book [abstract] .Abstract title An abstract for the book. = Part 1 [partintro] .Part intro title An intro to this part. == Chapter 1 [sidebar] .Sidebar title Just a side note. [example] .Example title As you can see here. [quote] .Quote title Wise words from a wise person. EOS output = render_string input, :backend => 'docbook' assert_css 'abstract > title', output, 1 assert_xpath '//abstract/title[text() = "Abstract title"]', output, 1 assert_css 'abstract > title + simpara', output, 1 assert_css 'partintro > title', output, 1 assert_xpath '//partintro/title[text() = "Part intro title"]', output, 1 assert_css 'partintro > title + simpara', output, 1 assert_css 'sidebar > title', output, 1 assert_xpath '//sidebar/title[text() = "Sidebar title"]', output, 1 assert_css 'sidebar > title + simpara', output, 1 assert_css 'example > title', output, 1 assert_xpath '//example/title[text() = "Example title"]', output, 1 assert_css 'example > title + simpara', output, 1 assert_css 'blockquote > title', output, 1 assert_xpath '//blockquote/title[text() = "Quote title"]', output, 1 assert_css 'blockquote > title + simpara', output, 1 end end context 'Inline doctype' do test 'should only format and output text in first paragraph when doctype is inline' do input = "http://asciidoc.org[AsciiDoc] is a _lightweight_ markup language...\n\nignored" output = render_string input, :doctype => 'inline' assert_equal 'AsciiDoc is a lightweight markup language…​', output end test 'should output nil if first block is not a paragraph' do input = '* bullet' output = render_string input, :doctype => 'inline' assert output.nil? end end end end asciidoctor-1.5.5/test/parser_test.rb000066400000000000000000000574511277513741400177130ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end context "Parser" do test "is_section_title?" do assert Asciidoctor::Parser.is_section_title?('AsciiDoc Home Page', '==================') assert Asciidoctor::Parser.is_section_title?('=== AsciiDoc Home Page') end test 'sanitize attribute name' do assert_equal 'foobar', Asciidoctor::Parser.sanitize_attribute_name("Foo Bar") assert_equal 'foo', Asciidoctor::Parser.sanitize_attribute_name("foo") assert_equal 'foo3-bar', Asciidoctor::Parser.sanitize_attribute_name("Foo 3^ # - Bar[") end test "collect unnamed attribute" do attributes = {} line = 'quote' expected = {1 => 'quote'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect unnamed attribute double-quoted" do attributes = {} line = '"quote"' expected = {1 => 'quote'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect empty unnamed attribute double-quoted" do attributes = {} line = '""' expected = {1 => ''} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect unnamed attribute double-quoted containing escaped quote" do attributes = {} line = '"ba\"zaar"' expected = {1 => 'ba"zaar'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect unnamed attribute single-quoted" do attributes = {} line = '\'quote\'' expected = {1 => 'quote'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect empty unnamed attribute single-quoted" do attributes = {} line = '\'\'' expected = {1 => ''} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect unnamed attribute single-quoted containing escaped quote" do attributes = {} line = '\'ba\\\'zaar\'' expected = {1 => 'ba\'zaar'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect unnamed attribute with dangling delimiter" do attributes = {} line = 'quote , ' expected = {1 => 'quote'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect unnamed attribute in second position after empty attribute" do attributes = {} line = ', John Smith' expected = {1 => nil, 2 => 'John Smith'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect unnamed attributes" do attributes = {} line = "first, second one, third" expected = {1 => 'first', 2 => 'second one', 3 => 'third'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect named attribute" do attributes = {} line = 'foo=bar' expected = {'foo' => 'bar'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect named attribute double-quoted" do attributes = {} line = 'foo="bar"' expected = {'foo' => 'bar'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect named attribute with double-quoted empty value' do attributes = {} line = 'height=100,caption="",link="images/octocat.png"' expected = {'height' => '100', 'caption' => '', 'link' => 'images/octocat.png'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect named attribute single-quoted" do attributes = {} line = 'foo=\'bar\'' expected = {'foo' => 'bar'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test 'collect named attribute with single-quoted empty value' do attributes = {} line = "height=100,caption='',link='images/octocat.png'" expected = {'height' => '100', 'caption' => '', 'link' => 'images/octocat.png'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect named attributes unquoted" do attributes = {} line = "first=value, second=two, third=3" expected = {'first' => 'value', 'second' => 'two', 'third' => '3'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect named attributes quoted" do attributes = {} line = "first='value', second=\"value two\", third=three" expected = {'first' => 'value', 'second' => 'value two', 'third' => 'three'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect named attributes quoted containing non-semantic spaces" do attributes = {} line = " first = 'value', second =\"value two\" , third= three " expected = {'first' => 'value', 'second' => 'value two', 'third' => 'three'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect mixed named and unnamed attributes" do attributes = {} line = "first, second=\"value two\", third=three, Sherlock Holmes" expected = {1 => 'first', 'second' => 'value two', 'third' => 'three', 4 => 'Sherlock Holmes'} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect options attribute" do attributes = {} line = "quote, options='opt1,opt2 , opt3'" expected = {1 => 'quote', 'options' => 'opt1,opt2 , opt3', 'opt1-option' => '', 'opt2-option' => '', 'opt3-option' => ''} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect opts attribute as options" do attributes = {} line = "quote, opts='opt1,opt2 , opt3'" expected = {1 => 'quote', 'options' => 'opt1,opt2 , opt3', 'opt1-option' => '', 'opt2-option' => '', 'opt3-option' => ''} Asciidoctor::AttributeList.new(line).parse_into(attributes) assert_equal expected, attributes end test "collect and rekey unnamed attributes" do attributes = {} line = "first, second one, third, fourth" expected = {1 => 'first', 2 => 'second one', 3 => 'third', 4 => 'fourth', 'a' => 'first', 'b' => 'second one', 'c' => 'third'} Asciidoctor::AttributeList.new(line).parse_into(attributes, ['a', 'b', 'c']) assert_equal expected, attributes end test "rekey positional attributes" do attributes = {1 => 'source', 2 => 'java'} expected = {1 => 'source', 2 => 'java', 'style' => 'source', 'language' => 'java'} Asciidoctor::AttributeList.rekey(attributes, ['style', 'language', 'linenums']) assert_equal expected, attributes end test 'parse style attribute with id and role' do attributes = {1 => 'style#id.role'} style, original_style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_equal 'style', style assert_nil original_style assert_equal 'style', attributes['style'] assert_equal 'id', attributes['id'] assert_equal 'role', attributes['role'] assert_equal 'style#id.role', attributes[1] end test 'parse style attribute with style, role, id and option' do attributes = {1 => 'style.role#id%fragment'} style, original_style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_equal 'style', style assert_nil original_style assert_equal 'style', attributes['style'] assert_equal 'id', attributes['id'] assert_equal 'role', attributes['role'] assert_equal '', attributes['fragment-option'] assert_equal 'fragment', attributes['options'] assert_equal 'style.role#id%fragment', attributes[1] end test 'parse style attribute with style, id and multiple roles' do attributes = {1 => 'style#id.role1.role2'} style, original_style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_equal 'style', style assert_nil original_style assert_equal 'style', attributes['style'] assert_equal 'id', attributes['id'] assert_equal 'role1 role2', attributes['role'] assert_equal 'style#id.role1.role2', attributes[1] end test 'parse style attribute with style, multiple roles and id' do attributes = {1 => 'style.role1.role2#id'} style, original_style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_equal 'style', style assert_nil original_style assert_equal 'style', attributes['style'] assert_equal 'id', attributes['id'] assert_equal 'role1 role2', attributes['role'] assert_equal 'style.role1.role2#id', attributes[1] end test 'parse style attribute with positional and original style' do attributes = {1 => 'new_style', 'style' => 'original_style'} style, original_style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_equal 'new_style', style assert_equal 'original_style', original_style assert_equal 'new_style', attributes['style'] assert_equal 'new_style', attributes[1] end test 'parse style attribute with id and role only' do attributes = {1 => '#id.role'} style, original_style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_nil style assert_nil original_style assert_equal 'id', attributes['id'] assert_equal 'role', attributes['role'] assert_equal '#id.role', attributes[1] end test 'parse empty style attribute' do attributes = {1 => nil} style, original_style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_nil style assert_nil original_style assert_nil attributes['id'] assert_nil attributes['role'] assert_nil attributes[1] end test 'parse style attribute with option should preserve existing options' do attributes = {1 => '%header', 'options' => 'footer', 'footer-option' => ''} style, original_style = Asciidoctor::Parser.parse_style_attribute(attributes) assert_nil style assert_nil original_style assert_equal 'header,footer', attributes['options'] assert_equal '', attributes['header-option'] assert_equal '', attributes['footer-option'] end test "parse author first" do metadata, _ = parse_header_metadata 'Stuart' assert_equal 5, metadata.size assert_equal 1, metadata['authorcount'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Stuart', metadata['firstname'] assert_equal 'S', metadata['authorinitials'] end test "parse author first last" do metadata, _ = parse_header_metadata 'Yukihiro Matsumoto' assert_equal 6, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Yukihiro Matsumoto', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Yukihiro', metadata['firstname'] assert_equal 'Matsumoto', metadata['lastname'] assert_equal 'YM', metadata['authorinitials'] end test "parse author first middle last" do metadata, _ = parse_header_metadata 'David Heinemeier Hansson' assert_equal 7, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'David Heinemeier Hansson', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'David', metadata['firstname'] assert_equal 'Heinemeier', metadata['middlename'] assert_equal 'Hansson', metadata['lastname'] assert_equal 'DHH', metadata['authorinitials'] end test "parse author first middle last email" do metadata, _ = parse_header_metadata 'David Heinemeier Hansson ' assert_equal 8, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'David Heinemeier Hansson', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'David', metadata['firstname'] assert_equal 'Heinemeier', metadata['middlename'] assert_equal 'Hansson', metadata['lastname'] assert_equal 'rails@ruby-lang.org', metadata['email'] assert_equal 'DHH', metadata['authorinitials'] end test "parse author first email" do metadata, _ = parse_header_metadata 'Stuart ' assert_equal 6, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Stuart', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Stuart', metadata['firstname'] assert_equal 'founder@asciidoc.org', metadata['email'] assert_equal 'S', metadata['authorinitials'] end test "parse author first last email" do metadata, _ = parse_header_metadata 'Stuart Rackham ' assert_equal 7, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Stuart Rackham', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Stuart', metadata['firstname'] assert_equal 'Rackham', metadata['lastname'] assert_equal 'founder@asciidoc.org', metadata['email'] assert_equal 'SR', metadata['authorinitials'] end test "parse author with hyphen" do metadata, _ = parse_header_metadata 'Tim Berners-Lee ' assert_equal 7, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Tim Berners-Lee', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Tim', metadata['firstname'] assert_equal 'Berners-Lee', metadata['lastname'] assert_equal 'founder@www.org', metadata['email'] assert_equal 'TB', metadata['authorinitials'] end test "parse author with single quote" do metadata, _ = parse_header_metadata 'Stephen O\'Grady ' assert_equal 7, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Stephen O\'Grady', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Stephen', metadata['firstname'] assert_equal 'O\'Grady', metadata['lastname'] assert_equal 'founder@redmonk.com', metadata['email'] assert_equal 'SO', metadata['authorinitials'] end test "parse author with dotted initial" do metadata, _ = parse_header_metadata 'Heiko W. Rupp ' assert_equal 8, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Heiko W. Rupp', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Heiko', metadata['firstname'] assert_equal 'W.', metadata['middlename'] assert_equal 'Rupp', metadata['lastname'] assert_equal 'hwr@example.de', metadata['email'] assert_equal 'HWR', metadata['authorinitials'] end test "parse author with underscore" do metadata, _ = parse_header_metadata 'Tim_E Fella' assert_equal 6, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Tim E Fella', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Tim E', metadata['firstname'] assert_equal 'Fella', metadata['lastname'] assert_equal 'TF', metadata['authorinitials'] end test 'parse author name with letters outside basic latin' do metadata, _ = parse_header_metadata 'Stéphane Brontë' assert_equal 6, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Stéphane Brontë', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Stéphane', metadata['firstname'] assert_equal 'Brontë', metadata['lastname'] assert_equal 'SB', metadata['authorinitials'] end if ::RUBY_MIN_VERSION_1_9 test 'parse ideographic author names' do metadata, _ = parse_header_metadata '李 四 ' assert_equal 7, metadata.size assert_equal 1, metadata['authorcount'] assert_equal '李 四', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal '李', metadata['firstname'] assert_equal '四', metadata['lastname'] assert_equal 'si.li@example.com', metadata['email'] assert_equal '李四', metadata['authorinitials'] end if ::RUBY_MIN_VERSION_1_9 test "parse author condenses whitespace" do metadata, _ = parse_header_metadata ' Stuart Rackham ' assert_equal 7, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Stuart Rackham', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Stuart', metadata['firstname'] assert_equal 'Rackham', metadata['lastname'] assert_equal 'founder@asciidoc.org', metadata['email'] assert_equal 'SR', metadata['authorinitials'] end test "parse invalid author line becomes author" do metadata, _ = parse_header_metadata ' Stuart Rackham, founder of AsciiDoc ' assert_equal 5, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Stuart Rackham, founder of AsciiDoc ', metadata['author'] assert_equal metadata['author'], metadata['authors'] assert_equal 'Stuart Rackham, founder of AsciiDoc ', metadata['firstname'] assert_equal 'S', metadata['authorinitials'] end test 'parse multiple authors' do metadata, _ = parse_header_metadata 'Doc Writer ; John Smith ' assert_equal 2, metadata['authorcount'] assert_equal 'Doc Writer, John Smith', metadata['authors'] assert_equal 'Doc Writer', metadata['author'] assert_equal 'Doc Writer', metadata['author_1'] assert_equal 'John Smith', metadata['author_2'] end test "parse rev number date remark" do input = <<-EOS Ryan Waldron v0.0.7, 2013-12-18: The first release you can stand on EOS metadata, _ = parse_header_metadata input assert_equal 9, metadata.size assert_equal '0.0.7', metadata['revnumber'] assert_equal '2013-12-18', metadata['revdate'] assert_equal 'The first release you can stand on', metadata['revremark'] end test "parse rev date" do input = <<-EOS Ryan Waldron 2013-12-18 EOS metadata, _ = parse_header_metadata input assert_equal 7, metadata.size assert_equal '2013-12-18', metadata['revdate'] end test 'parse rev number with trailing comma' do input = <<-EOS Stuart Rackham v8.6.8, EOS metadata, _ = parse_header_metadata input assert_equal 7, metadata.size assert_equal '8.6.8', metadata['revnumber'] assert !metadata.has_key?('revdate') end # Asciidoctor recognizes a standalone revision without a trailing comma test 'parse rev number' do input = <<-EOS Stuart Rackham v8.6.8 EOS metadata, _ = parse_header_metadata input assert_equal 7, metadata.size assert_equal '8.6.8', metadata['revnumber'] assert !metadata.has_key?('revdate') end # while compliant w/ AsciiDoc, this is just sloppy parsing test "treats arbitrary text on rev line as revdate" do input = <<-EOS Ryan Waldron foobar EOS metadata, _ = parse_header_metadata input assert_equal 7, metadata.size assert_equal 'foobar', metadata['revdate'] end test "parse rev date remark" do input = <<-EOS Ryan Waldron 2013-12-18: The first release you can stand on EOS metadata, _ = parse_header_metadata input assert_equal 8, metadata.size assert_equal '2013-12-18', metadata['revdate'] assert_equal 'The first release you can stand on', metadata['revremark'] end test "should not mistake attribute entry as rev remark" do input = <<-EOS Joe Cool :page-layout: post EOS metadata, _ = parse_header_metadata input refute_equal 'page-layout: post', metadata['revremark'] assert !metadata.has_key?('revdate') end test "parse rev remark only" do input = <<-EOS Joe Cool :Must start revremark-only line with space EOS metadata, _ = parse_header_metadata input assert_equal 'Must start revremark-only line with space', metadata['revremark'] assert !metadata.has_key?('revdate') end test "skip line comments before author" do input = <<-EOS // Asciidoctor // release artist Ryan Waldron EOS metadata, _ = parse_header_metadata input assert_equal 6, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Ryan Waldron', metadata['author'] assert_equal 'Ryan', metadata['firstname'] assert_equal 'Waldron', metadata['lastname'] assert_equal 'RW', metadata['authorinitials'] end test "skip block comment before author" do input = <<-EOS //// Asciidoctor release artist //// Ryan Waldron EOS metadata, _ = parse_header_metadata input assert_equal 6, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Ryan Waldron', metadata['author'] assert_equal 'Ryan', metadata['firstname'] assert_equal 'Waldron', metadata['lastname'] assert_equal 'RW', metadata['authorinitials'] end test "skip block comment before rev" do input = <<-EOS Ryan Waldron //// Asciidoctor release info //// v0.0.7, 2013-12-18 EOS metadata, _ = parse_header_metadata input assert_equal 8, metadata.size assert_equal 1, metadata['authorcount'] assert_equal 'Ryan Waldron', metadata['author'] assert_equal '0.0.7', metadata['revnumber'] assert_equal '2013-12-18', metadata['revdate'] end test "attribute entry overrides generated author initials" do blankdoc = Asciidoctor::Document.new reader = Asciidoctor::Reader.new "Stuart Rackham \n:Author Initials: SJR".lines.entries metadata = Asciidoctor::Parser.parse_header_metadata(reader, blankdoc) assert_equal 'SR', metadata['authorinitials'] assert_equal 'SJR', blankdoc.attributes['authorinitials'] end test 'adjust indentation to 0' do input = <<-EOS.chomp def names @name.split ' ' end EOS expected = <<-EOS.chomp def names @name.split ' ' end EOS lines = input.split("\n") Asciidoctor::Parser.adjust_indentation! lines assert_equal expected, (lines * "\n") end test 'adjust indentation mixed with tabs and spaces to 0' do input = <<-EOS.chomp def names \t @name.split ' ' end EOS expected = <<-EOS.chomp def names @name.split ' ' end EOS lines = input.split("\n") Asciidoctor::Parser.adjust_indentation! lines, 0, 4 assert_equal expected, (lines * "\n") end test 'expands tabs to spaces' do input = <<-EOS.chomp Filesystem Size Used Avail Use% Mounted on Filesystem Size Used Avail Use% Mounted on devtmpfs 3.9G 0 3.9G 0% /dev /dev/mapper/fedora-root 48G 18G 29G 39% / EOS expected = <<-EOS.chomp Filesystem Size Used Avail Use% Mounted on Filesystem Size Used Avail Use% Mounted on devtmpfs 3.9G 0 3.9G 0% /dev /dev/mapper/fedora-root 48G 18G 29G 39% / EOS lines = input.split("\n") Asciidoctor::Parser.adjust_indentation! lines, 0, 4 assert_equal expected, (lines * "\n") end test 'adjust indentation to non-zero' do input = <<-EOS.chomp def names @name.split ' ' end EOS expected = <<-EOS.chomp def names @name.split ' ' end EOS lines = input.split("\n") Asciidoctor::Parser.adjust_indentation! lines, 2 assert_equal expected, (lines * "\n") end test 'preserve block indent if indent is -1' do input = <<-EOS def names @name.split ' ' end EOS expected = input lines = input.lines.entries Asciidoctor::Parser.adjust_indentation! lines, -1 assert_equal expected, lines.join end test 'adjust indentation handles empty lines gracefully' do input = [] expected = input lines = input.dup Asciidoctor::Parser.adjust_indentation! lines assert_equal expected, lines end end asciidoctor-1.5.5/test/paths_test.rb000066400000000000000000000272031277513741400175260ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end context 'Path Resolver' do context 'Web Paths' do def setup @resolver = Asciidoctor::PathResolver.new end test 'target with absolute path' do assert_equal '/images', @resolver.web_path('/images') assert_equal '/images', @resolver.web_path('/images', '') assert_equal '/images', @resolver.web_path('/images', nil) end test 'target with relative path' do assert_equal 'images', @resolver.web_path('images') assert_equal 'images', @resolver.web_path('images', '') assert_equal 'images', @resolver.web_path('images', nil) end test 'target with hidden relative path' do assert_equal '.images', @resolver.web_path('.images') assert_equal '.images', @resolver.web_path('.images', '') assert_equal '.images', @resolver.web_path('.images', nil) end test 'target with path relative to current directory' do assert_equal './images', @resolver.web_path('./images') assert_equal './images', @resolver.web_path('./images', '') assert_equal './images', @resolver.web_path('./images', nil) end test 'target with absolute path ignores start path' do assert_equal '/images', @resolver.web_path('/images', 'foo') assert_equal '/images', @resolver.web_path('/images', '/foo') assert_equal '/images', @resolver.web_path('/images', './foo') end test 'target with relative path appended to start path' do assert_equal 'assets/images', @resolver.web_path('images', 'assets') assert_equal '/assets/images', @resolver.web_path('images', '/assets') #assert_equal '/assets/images/tiger.png', @resolver.web_path('tiger.png', '/assets//images') assert_equal './assets/images', @resolver.web_path('images', './assets') assert_equal '/theme.css', @resolver.web_path('theme.css', '/') assert_equal '/css/theme.css', @resolver.web_path('theme.css', '/css/') end test 'target with path relative to current directory appended to start path' do assert_equal 'assets/images', @resolver.web_path('./images', 'assets') assert_equal '/assets/images', @resolver.web_path('./images', '/assets') assert_equal './assets/images', @resolver.web_path('./images', './assets') end test 'target with relative path appended to url start path' do assert_equal 'http://www.example.com/assets/images', @resolver.web_path('images', 'http://www.example.com/assets') end # enable if we want to allow web_path to detect and preserve a target URI #test 'target with file url appended to relative path' do # assert_equal 'file:///home/username/styles/asciidoctor.css', @resolver.web_path('file:///home/username/styles/asciidoctor.css', '.') #end # enable if we want to allow web_path to detect and preserve a target URI #test 'target with http url appended to relative path' do # assert_equal 'http://example.com/asciidoctor.css', @resolver.web_path('http://example.com/asciidoctor.css', '.') #end test 'normalize target' do assert_equal '../images', @resolver.web_path('../images/../images') end test 'append target to start path and normalize' do assert_equal '../images', @resolver.web_path('../images/../images', '../images') assert_equal '../../images', @resolver.web_path('../images', '..') end test 'normalize parent directory that follows root' do assert_equal '/tiger.png', @resolver.web_path('/../tiger.png') assert_equal '/tiger.png', @resolver.web_path('/../../tiger.png') end test 'uses start when target is empty' do assert_equal 'assets/images', @resolver.web_path('', 'assets/images') assert_equal 'assets/images', @resolver.web_path(nil, 'assets/images') end test 'posixfies windows paths' do assert_equal '/images', @resolver.web_path('\\images') assert_equal '../images', @resolver.web_path('..\\images') assert_equal '/images', @resolver.web_path('\\..\\images') assert_equal 'assets/images', @resolver.web_path('assets\\images') assert_equal '../assets/images', @resolver.web_path('assets\\images', '..\\images\\..') end end context 'System Paths' do JAIL = '/home/doctor/docs' def setup @resolver = Asciidoctor::PathResolver.new end test 'prevents access to paths outside of jail' do assert_equal "#{JAIL}/css", @resolver.system_path('../../../../../css', "#{JAIL}/assets/stylesheets", JAIL) assert_equal "#{JAIL}/css", @resolver.system_path('/../../../../../css', "#{JAIL}/assets/stylesheets", JAIL) assert_equal "#{JAIL}/css", @resolver.system_path('../../../css', '../../..', JAIL) end test 'throws exception for illegal path access if recover is false' do begin @resolver.system_path('../../../../../css', "#{JAIL}/assets/stylesheets", JAIL, :recover => false) flunk 'Expecting SecurityError to be raised' rescue SecurityError end end test 'resolves start path if target is empty' do assert_equal "#{JAIL}/assets/stylesheets", @resolver.system_path('', "#{JAIL}/assets/stylesheets", JAIL) assert_equal "#{JAIL}/assets/stylesheets", @resolver.system_path(nil, "#{JAIL}/assets/stylesheets", JAIL) end test 'resolves start path if target is dot' do assert_equal "#{JAIL}/assets/stylesheets", @resolver.system_path('.', "#{JAIL}/assets/stylesheets", JAIL) assert_equal "#{JAIL}/assets/stylesheets", @resolver.system_path('./', "#{JAIL}/assets/stylesheets", JAIL) end test 'treats absolute target as relative when jail is specified' do assert_equal "#{JAIL}/assets/stylesheets", @resolver.system_path('/', "#{JAIL}/assets/stylesheets", JAIL) assert_equal "#{JAIL}/assets/stylesheets/foo", @resolver.system_path('/foo', "#{JAIL}/assets/stylesheets", JAIL) assert_equal "#{JAIL}/assets/foo", @resolver.system_path('/../foo', "#{JAIL}/assets/stylesheets", JAIL) end test 'allows use of absolute target or start if resolved path is sub-path of jail' do assert_equal "#{JAIL}/my/path", @resolver.system_path("#{JAIL}/my/path", '', JAIL) assert_equal "#{JAIL}/my/path", @resolver.system_path("#{JAIL}/my/path", nil, JAIL) assert_equal "#{JAIL}/my/path", @resolver.system_path('', "#{JAIL}/my/path", JAIL) assert_equal "#{JAIL}/my/path", @resolver.system_path(nil, "#{JAIL}/my/path", JAIL) assert_equal "#{JAIL}/my/path", @resolver.system_path('path', "#{JAIL}/my", JAIL) end test 'uses jail path if start path is empty' do assert_equal "#{JAIL}/images/tiger.png", @resolver.system_path('images/tiger.png', '', JAIL) assert_equal "#{JAIL}/images/tiger.png", @resolver.system_path('images/tiger.png', nil, JAIL) end test 'raises security error if start is not contained within jail' do begin @resolver.system_path('images/tiger.png', '/etc', JAIL) flunk 'Expecting SecurityError to be raised' rescue SecurityError end begin @resolver.system_path('.', '/etc', JAIL) flunk 'Expecting SecurityError to be raised' rescue SecurityError end end test 'resolves absolute directory if jail is not specified' do assert_equal '/usr/share/stylesheet.css', @resolver.system_path('/usr/share/stylesheet.css', '/home/dallen/docs/assets/stylesheets') end test 'resolves ancestor directory of start if jail is not specified' do assert_equal '/usr/share/stylesheet.css', @resolver.system_path('../../../../../usr/share/stylesheet.css', '/home/dallen/docs/assets/stylesheets') end test 'resolves absolute path if start is absolute and target is relative' do assert_equal '/usr/share/assets/stylesheet.css', @resolver.system_path('assets/stylesheet.css', '/usr/share') end test 'resolves absolute UNC path if start is absolute and target is relative' do assert_equal '//QA/c$/users/asciidoctor/assets/stylesheet.css', @resolver.system_path('assets/stylesheet.css', '//QA/c$/users/asciidoctor') end test 'resolves relative target relative to current directory if start is empty' do pwd = File.expand_path(Dir.pwd) assert_equal "#{pwd}/images/tiger.png", @resolver.system_path('images/tiger.png', '') assert_equal "#{pwd}/images/tiger.png", @resolver.system_path('images/tiger.png', nil) end test 'resolves relative hidden target relative to current directory if start is empty' do pwd = File.expand_path(Dir.pwd) assert_equal "#{pwd}/.images/tiger.png", @resolver.system_path('.images/tiger.png', '') assert_equal "#{pwd}/.images/tiger.png", @resolver.system_path('.images/tiger.png', nil) end test 'resolves and normalizes start with target is empty' do pwd = File.expand_path(Dir.pwd) assert_equal '/home/doctor/docs', @resolver.system_path('', '/home/doctor/docs') assert_equal '/home/doctor/docs', @resolver.system_path(nil, '/home/doctor/docs') assert_equal "#{pwd}/assets/images", @resolver.system_path(nil, 'assets/images') assert_equal "#{JAIL}/assets/images", @resolver.system_path('', '../assets/images', JAIL) end test 'posixfies windows paths' do assert_equal "#{JAIL}/assets/css", @resolver.system_path('..\\css', 'assets\\stylesheets', JAIL) end test 'resolves windows paths when file separator is backlash' do @resolver.file_separator = '\\' assert_equal 'C:/data/docs', @resolver.system_path('..', "C:\\data\\docs\\assets", 'C:\\data\\docs') assert_equal 'C:/data/docs', @resolver.system_path('..\\..', "C:\\data\\docs\\assets", 'C:\\data\\docs') assert_equal 'C:/data/docs/css', @resolver.system_path('..\\..\\css', "C:\\data\\docs\\assets", 'C:\\data\\docs') end test 'should calculate relative path' do filename = @resolver.system_path('part1/chapter1/section1.adoc', nil, JAIL) assert_equal "#{JAIL}/part1/chapter1/section1.adoc", filename assert_equal 'part1/chapter1/section1.adoc', @resolver.relative_path(filename, JAIL) end test 'should resolve relative path relative to base dir in unsafe mode' do base_dir = fixture_path 'base' doc = empty_document :base_dir => base_dir, :safe => Asciidoctor::SafeMode::UNSAFE expected = ::File.join base_dir, 'images', 'tiger.png' actual = doc.normalize_system_path 'tiger.png', 'images' assert_equal expected, actual end test 'should resolve absolute path as absolute in unsafe mode' do base_dir = fixture_path 'base' doc = empty_document :base_dir => base_dir, :safe => Asciidoctor::SafeMode::UNSAFE actual = doc.normalize_system_path 'tiger.png', '/etc/images' assert_equal '/etc/images/tiger.png', actual end end context 'Helpers' do test 'rootname should return file name without extension' do assert_equal 'master', Asciidoctor::Helpers.rootname('master.adoc') assert_equal 'docs/master', Asciidoctor::Helpers.rootname('docs/master.adoc') end test 'rootname should file name if it has no extension' do assert_equal 'master', Asciidoctor::Helpers.rootname('master') assert_equal 'docs/master', Asciidoctor::Helpers.rootname('docs/master') end test 'UriSniffRx should detect URIs' do assert Asciidoctor::UriSniffRx =~ 'http://example.com' assert Asciidoctor::UriSniffRx =~ 'https://example.com' assert Asciidoctor::UriSniffRx =~ 'data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=' end test 'UriSniffRx should not detect an absolute Windows path as a URI' do assert Asciidoctor::UriSniffRx !~ 'c:/sample.adoc' assert Asciidoctor::UriSniffRx !~ 'c:\\sample.adoc' end end end asciidoctor-1.5.5/test/preamble_test.rb000066400000000000000000000075241277513741400202020ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end context 'Preamble' do test 'title and single paragraph preamble before section' do input = <<-EOS = Title Preamble paragraph 1. == First Section Section paragraph 1. EOS result = render_string(input) assert_xpath '//p', result, 2 assert_xpath '//*[@id="preamble"]', result, 1 assert_xpath '//*[@id="preamble"]//p', result, 1 assert_xpath '//*[@id="preamble"]/following-sibling::*//h2[@id="_first_section"]', result, 1 assert_xpath '//*[@id="preamble"]/following-sibling::*//p', result, 1 end test 'title of preface is blank by default in DocBook output' do input = <<-EOS = Document Title :doctype: book Preface content. == First Section Section content. EOS result = render_string input, :backend => :docbook assert_xpath '//preface/title', result, 1 title_node = xmlnodes_at_xpath '//preface/title', result, 1 assert_equal '', title_node.text end test 'preface-title attribute is assigned as title of preface in DocBook output' do input = <<-EOS = Document Title :doctype: book :preface-title: Preface Preface content. == First Section Section content. EOS result = render_string input, :backend => :docbook assert_xpath '//preface/title[text()="Preface"]', result, 1 end test 'title and multi-paragraph preamble before section' do input = <<-EOS = Title Preamble paragraph 1. Preamble paragraph 2. == First Section Section paragraph 1. EOS result = render_string(input) assert_xpath '//p', result, 3 assert_xpath '//*[@id="preamble"]', result, 1 assert_xpath '//*[@id="preamble"]//p', result, 2 assert_xpath '//*[@id="preamble"]/following-sibling::*//h2[@id="_first_section"]', result, 1 assert_xpath '//*[@id="preamble"]/following-sibling::*//p', result, 1 end test 'should not wrap content in preamble if document has title but no sections' do input = <<-EOS = Title paragraph EOS result = render_string(input) assert_xpath '//p', result, 1 assert_xpath '//*[@id="content"]/*[@class="paragraph"]/p', result, 1 assert_xpath '//*[@id="content"]/*[@class="paragraph"]/following-sibling::*', result, 0 end test 'title and section without preamble' do input = <<-EOS = Title == First Section Section paragraph 1. EOS result = render_string(input) assert_xpath '//p', result, 1 assert_xpath '//*[@id="preamble"]', result, 0 assert_xpath '//h2[@id="_first_section"]', result, 1 end test 'no title with preamble and section' do input = <<-EOS Preamble paragraph 1. == First Section Section paragraph 1. EOS result = render_string(input) assert_xpath '//p', result, 2 assert_xpath '//*[@id="preamble"]', result, 0 assert_xpath '//h2[@id="_first_section"]/preceding::p', result, 1 end test 'preamble in book doctype' do input = <<-EOS = Book :doctype: book Back then... = Chapter One [partintro] It was a dark and stormy night... == Scene One Someone's gonna get axed. = Chapter Two [partintro] They couldn't believe their eyes when... == Scene One The axe came swinging. EOS d = document_from_string(input) assert_equal 'book', d.doctype output = d.render assert_xpath '//h1', output, 3 assert_xpath %{//*[@id="preamble"]//p[text() = "Back then#{expand_entity 8230}#{expand_entity 8203}"]}, output, 1 end test 'should render table of contents in preamble if toc-placement attribute value is preamble' do input = <<-EOS = Article :toc: :toc-placement: preamble Once upon a time... == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = render_string input assert_xpath '//*[@id="preamble"]/*[@id="toc"]', output, 1 end end asciidoctor-1.5.5/test/reader_test.rb000066400000000000000000001440611277513741400176530ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end class ReaderTest < Minitest::Test DIRNAME = File.expand_path(File.dirname(__FILE__)) SAMPLE_DATA = <<-EOS.chomp.split(::Asciidoctor::EOL) first line second line third line EOS context 'Reader' do context 'Prepare lines' do test 'should prepare lines from Array data' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA, reader.lines end test 'should prepare lines from String data' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA, reader.lines end test 'should remove UTF-8 BOM from first line of String data' do data = "\xef\xbb\xbf#{SAMPLE_DATA.join ::Asciidoctor::EOL}" reader = Asciidoctor::Reader.new data, nil, :normalize => true assert_equal 'f', reader.lines.first[0..0] assert_equal SAMPLE_DATA, reader.lines end test 'should remove UTF-8 BOM from first line of Array data' do data = SAMPLE_DATA.dup data[0] = "\xef\xbb\xbf#{data.first}" reader = Asciidoctor::Reader.new data, nil, :normalize => true assert_equal 'f', reader.lines.first[0..0] assert_equal SAMPLE_DATA, reader.lines end if Asciidoctor::COERCE_ENCODING test 'should encode UTF-16LE string to UTF-8 when BOM is found' do data = "\uFEFF#{SAMPLE_DATA.join ::Asciidoctor::EOL}".encode('UTF-16LE').force_encoding('UTF-8') reader = Asciidoctor::Reader.new data, nil, :normalize => true assert_equal 'f', reader.lines.first[0..0] assert_equal SAMPLE_DATA, reader.lines end test 'should encode UTF-16LE string array to UTF-8 when BOM is found' do data = "\uFEFF#{SAMPLE_DATA.join ::Asciidoctor::EOL}".encode('UTF-16LE').force_encoding('UTF-8').lines.to_a reader = Asciidoctor::Reader.new data, nil, :normalize => true assert_equal 'f', reader.lines.first[0..0] assert_equal SAMPLE_DATA, reader.lines end test 'should encode UTF-16BE string to UTF-8 when BOM is found' do data = "\uFEFF#{SAMPLE_DATA.join ::Asciidoctor::EOL}".encode('UTF-16BE').force_encoding('UTF-8') reader = Asciidoctor::Reader.new data, nil, :normalize => true assert_equal 'f', reader.lines.first[0..0] assert_equal SAMPLE_DATA, reader.lines end test 'should encode UTF-16BE string array to UTF-8 when BOM is found' do data = "\uFEFF#{SAMPLE_DATA.join ::Asciidoctor::EOL}".encode('UTF-16BE').force_encoding('UTF-8').lines.to_a reader = Asciidoctor::Reader.new data, nil, :normalize => true assert_equal 'f', reader.lines.first[0..0] assert_equal SAMPLE_DATA, reader.lines end end end context 'With empty data' do test 'has_more_lines? should return false with empty data' do assert !Asciidoctor::Reader.new.has_more_lines? end test 'empty? should return true with empty data' do assert Asciidoctor::Reader.new.empty? assert Asciidoctor::Reader.new.eof? end test 'next_line_empty? should return true with empty data' do assert Asciidoctor::Reader.new.next_line_empty? end test 'peek_line should return nil with empty data' do assert_nil Asciidoctor::Reader.new.peek_line end test 'peek_lines should return empty Array with empty data' do assert_equal [], Asciidoctor::Reader.new.peek_lines end test 'read_line should return nil with empty data' do assert_nil Asciidoctor::Reader.new.read_line #assert_nil Asciidoctor::Reader.new.get_line end test 'read_lines should return empty Array with empty data' do assert_equal [], Asciidoctor::Reader.new.read_lines #assert_equal [], Asciidoctor::Reader.new.get_lines end end context 'With data' do test 'has_more_lines? should return true if there are lines remaining' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert reader.has_more_lines? end test 'empty? should return false if there are lines remaining' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert !reader.empty? assert !reader.eof? end test 'next_line_empty? should return false if next line is not blank' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert !reader.next_line_empty? end test 'next_line_empty? should return true if next line is blank' do reader = Asciidoctor::Reader.new ['', 'second line'] assert reader.next_line_empty? end test 'peek_line should return next line if there are lines remaining' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA.first, reader.peek_line end test 'peek_line should not consume line or increment line number' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA.first, reader.peek_line assert_equal SAMPLE_DATA.first, reader.peek_line assert_equal 1, reader.lineno end test 'peek_line should return next lines if there are lines remaining' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA[0..1], reader.peek_lines(2) end test 'peek_lines should not consume lines or increment line number' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA[0..1], reader.peek_lines(2) assert_equal SAMPLE_DATA[0..1], reader.peek_lines(2) assert_equal 1, reader.lineno end test 'peek_lines should not invert order of lines' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA, reader.lines reader.peek_lines 3 assert_equal SAMPLE_DATA, reader.lines end test 'read_line should return next line if there are lines remaining' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA.first, reader.read_line end test 'read_line should consume next line and increment line number' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA[0], reader.read_line assert_equal SAMPLE_DATA[1], reader.read_line assert_equal 3, reader.lineno end test 'advance should consume next line and return a Boolean indicating if a line was consumed' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert reader.advance assert reader.advance assert reader.advance assert !reader.advance end test 'read_lines should return all lines' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA, reader.read_lines end test 'read should return all lines joined as String' do reader = Asciidoctor::Reader.new SAMPLE_DATA assert_equal SAMPLE_DATA.join(::Asciidoctor::EOL), reader.read end test 'has_more_lines? should return false after read_lines is invoked' do reader = Asciidoctor::Reader.new SAMPLE_DATA reader.read_lines assert !reader.has_more_lines? end test 'unshift puts line onto Reader as next line to read' do reader = Asciidoctor::Reader.new SAMPLE_DATA, nil, :normalize => true reader.unshift 'line zero' assert_equal 'line zero', reader.peek_line assert_equal 'line zero', reader.read_line assert_equal 1, reader.lineno end test 'terminate should consume all lines and update line number' do reader = Asciidoctor::Reader.new SAMPLE_DATA reader.terminate assert reader.eof? assert_equal 4, reader.lineno end test 'skip_blank_lines should skip blank lines' do reader = Asciidoctor::Reader.new ['', ''].concat(SAMPLE_DATA) reader.skip_blank_lines assert_equal SAMPLE_DATA.first, reader.peek_line end test 'lines should return remaining lines' do reader = Asciidoctor::Reader.new SAMPLE_DATA reader.read_line assert_equal SAMPLE_DATA[1..-1], reader.lines end test 'source_lines should return copy of original data Array' do reader = Asciidoctor::Reader.new SAMPLE_DATA reader.read_lines assert_equal SAMPLE_DATA, reader.source_lines end test 'source should return original data Array joined as String' do reader = Asciidoctor::Reader.new SAMPLE_DATA reader.read_lines assert_equal SAMPLE_DATA.join(::Asciidoctor::EOL), reader.source end end context 'Line context' do test 'to_s should return file name and line number of current line' do reader = Asciidoctor::Reader.new SAMPLE_DATA, 'sample.adoc' reader.read_line assert_equal 'sample.adoc: line 2', reader.to_s end test 'line_info should return file name and line number of current line' do reader = Asciidoctor::Reader.new SAMPLE_DATA, 'sample.adoc' reader.read_line assert_equal 'sample.adoc: line 2', reader.line_info assert_equal 'sample.adoc: line 2', reader.next_line_info end test 'prev_line_info should return file name and line number of previous line read' do reader = Asciidoctor::Reader.new SAMPLE_DATA, 'sample.adoc' reader.read_line assert_equal 'sample.adoc: line 1', reader.prev_line_info end end context 'Read lines until' do test 'Read lines until until end' do lines = <<-EOS.lines.entries This is one paragraph. This is another paragraph. EOS reader = Asciidoctor::Reader.new lines, nil, :normalize => true result = reader.read_lines_until assert_equal 3, result.size assert_equal lines.map {|l| l.chomp }, result assert !reader.has_more_lines? assert reader.eof? end test 'Read lines until until blank line' do lines = <<-EOS.lines.entries This is one paragraph. This is another paragraph. EOS reader = Asciidoctor::Reader.new lines, nil, :normalize => true result = reader.read_lines_until :break_on_blank_lines => true assert_equal 1, result.size assert_equal lines.first.chomp, result.first assert_equal lines.last.chomp, reader.peek_line end test 'Read lines until until blank line preserving last line' do lines = <<-EOS.chomp.split(::Asciidoctor::EOL) This is one paragraph. This is another paragraph. EOS reader = Asciidoctor::Reader.new lines result = reader.read_lines_until :break_on_blank_lines => true, :preserve_last_line => true assert_equal 1, result.size assert_equal lines.first.chomp, result.first assert reader.next_line_empty? end test 'Read lines until until condition is true' do lines = <<-EOS.chomp.split(::Asciidoctor::EOL) -- This is one paragraph inside the block. This is another paragraph inside the block. -- This is a paragraph outside the block. EOS reader = Asciidoctor::Reader.new lines reader.read_line result = reader.read_lines_until {|line| line == '--' } assert_equal 3, result.size assert_equal lines[1, 3], result assert reader.next_line_empty? end test 'Read lines until until condition is true, taking last line' do lines = <<-EOS.chomp.split(::Asciidoctor::EOL) -- This is one paragraph inside the block. This is another paragraph inside the block. -- This is a paragraph outside the block. EOS reader = Asciidoctor::Reader.new lines reader.read_line result = reader.read_lines_until(:read_last_line => true) {|line| line == '--' } assert_equal 4, result.size assert_equal lines[1, 4], result assert reader.next_line_empty? end test 'Read lines until until condition is true, taking and preserving last line' do lines = <<-EOS.chomp.split(::Asciidoctor::EOL) -- This is one paragraph inside the block. This is another paragraph inside the block. -- This is a paragraph outside the block. EOS reader = Asciidoctor::Reader.new lines reader.read_line result = reader.read_lines_until(:read_last_line => true, :preserve_last_line => true) {|line| line == '--' } assert_equal 4, result.size assert_equal lines[1, 4], result assert_equal '--', reader.peek_line end end end context 'PreprocessorReader' do context 'Type hierarchy' do test 'PreprocessorReader should extend from Reader' do reader = empty_document.reader assert reader.is_a?(Asciidoctor::Reader) end test 'PreprocessorReader should invoke or emulate Reader initializer' do doc = Asciidoctor::Document.new SAMPLE_DATA reader = doc.reader assert_equal SAMPLE_DATA, reader.lines assert_equal 1, reader.lineno end end context 'Prepare lines' do test 'should prepare and normalize lines from Array data' do data = SAMPLE_DATA.map {|line| line.chomp} data.unshift '' data.push '' doc = Asciidoctor::Document.new data reader = doc.reader assert_equal SAMPLE_DATA, reader.lines end test 'should prepare and normalize lines from String data' do data = SAMPLE_DATA.map {|line| line.chomp} data.unshift ' ' data.push ' ' data_as_string = data * ::Asciidoctor::EOL doc = Asciidoctor::Document.new data_as_string reader = doc.reader assert_equal SAMPLE_DATA, reader.lines end test 'should clean CRLF from end of lines' do input = <<-EOS source\r with\r CRLF\r endlines\r EOS [input, input.lines.to_a, input.split(::Asciidoctor::EOL), input.split(::Asciidoctor::EOL).join(::Asciidoctor::EOL)].each do |lines| doc = Asciidoctor::Document.new lines reader = doc.reader reader.lines.each do |line| assert !line.end_with?("\r"), "CRLF not properly cleaned for source lines: #{lines.inspect}" assert !line.end_with?("\r\n"), "CRLF not properly cleaned for source lines: #{lines.inspect}" assert !line.end_with?("\n"), "CRLF not properly cleaned for source lines: #{lines.inspect}" end end end test 'should not skip front matter by default' do input = <<-EOS --- layout: post title: Document Title author: username tags: [ first, second ] --- = Document Title Author Name preamble EOS doc = Asciidoctor::Document.new input reader = doc.reader assert !doc.attributes.key?('front-matter') assert_equal '---', reader.peek_line end test 'should skip front matter if specified by skip-front-matter attribute' do front_matter = %(layout: post title: Document Title author: username tags: [ first, second ]) input = <<-EOS --- #{front_matter} --- = Document Title Author Name preamble EOS doc = Asciidoctor::Document.new input, :attributes => {'skip-front-matter' => ''} reader = doc.reader assert_equal '= Document Title', reader.peek_line assert_equal front_matter, doc.attributes['front-matter'] end end context 'Include Stack' do test 'PreprocessorReader#push_include method should return reader' do reader = empty_document.reader append_lines = %w(one two three) result = reader.push_include append_lines, '', '' assert_equal reader, result end test 'PreprocessorReader#push_include method should put lines on top of stack' do lines = %w(a b c) doc = Asciidoctor::Document.new lines reader = doc.reader append_lines = %w(one two three) reader.push_include append_lines, '', '' assert_equal 1, reader.include_stack.size assert_equal 'one', reader.read_line.rstrip end test 'PreprocessorReader#push_include method should gracefully handle file and path' do lines = %w(a b c) doc = Asciidoctor::Document.new lines reader = doc.reader append_lines = %w(one two three) reader.push_include append_lines assert_equal 1, reader.include_stack.size assert_equal 'one', reader.read_line.rstrip assert_nil reader.file assert_equal '', reader.path end end context 'Include Directive' do test 'include directive is disabled by default and becomes a link' do input = <<-EOS include::include-file.asciidoc[] EOS doc = Asciidoctor::Document.new input reader = doc.reader assert_equal 'link:include-file.asciidoc[]', reader.read_line end test 'include directive is enabled when safe mode is less than SECURE' do input = <<-EOS include::fixtures/include-file.asciidoc[] EOS doc = document_from_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME output = doc.render assert_match(/included content/, output) end test 'include directive should resolve file with spaces in name' do input = <<-EOS include::fixtures/include file.asciidoc[] EOS include_file = File.join DIRNAME, 'fixtures', 'include-file.asciidoc' include_file_with_sp = File.join DIRNAME, 'fixtures', 'include file.asciidoc' begin FileUtils.cp include_file, include_file_with_sp doc = document_from_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME output = doc.render assert_match(/included content/, output) ensure FileUtils.rm include_file_with_sp end end test 'include directive should resolve file with {sp} in name' do input = <<-EOS include::fixtures/include{sp}file.asciidoc[] EOS include_file = File.join DIRNAME, 'fixtures', 'include-file.asciidoc' include_file_with_sp = File.join DIRNAME, 'fixtures', 'include file.asciidoc' begin FileUtils.cp include_file, include_file_with_sp doc = document_from_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME output = doc.render assert_match(/included content/, output) ensure FileUtils.rm include_file_with_sp end end test 'include directive should resolve file relative to current include' do input = <<-EOS include::fixtures/parent-include.adoc[] EOS pseudo_docfile = File.join DIRNAME, 'include-master.adoc' fixtures_dir = File.join DIRNAME, 'fixtures' parent_include_docfile = File.join fixtures_dir, 'parent-include.adoc' child_include_docfile = File.join fixtures_dir, 'child-include.adoc' grandchild_include_docfile = File.join fixtures_dir, 'grandchild-include.adoc' doc = empty_safe_document :base_dir => DIRNAME reader = Asciidoctor::PreprocessorReader.new doc, input, pseudo_docfile assert_equal pseudo_docfile, reader.file assert_equal DIRNAME, reader.dir assert_equal 'include-master.adoc', reader.path assert_equal 'first line of parent', reader.read_line assert_equal 'fixtures/parent-include.adoc: line 1', reader.prev_line_info assert_equal parent_include_docfile, reader.file assert_equal fixtures_dir, reader.dir assert_equal 'fixtures/parent-include.adoc', reader.path reader.skip_blank_lines assert_equal 'first line of child', reader.read_line assert_equal 'fixtures/child-include.adoc: line 1', reader.prev_line_info assert_equal child_include_docfile, reader.file assert_equal fixtures_dir, reader.dir assert_equal 'fixtures/child-include.adoc', reader.path reader.skip_blank_lines assert_equal 'first line of grandchild', reader.read_line assert_equal 'fixtures/grandchild-include.adoc: line 1', reader.prev_line_info assert_equal grandchild_include_docfile, reader.file assert_equal fixtures_dir, reader.dir assert_equal 'fixtures/grandchild-include.adoc', reader.path reader.skip_blank_lines assert_equal 'last line of grandchild', reader.read_line reader.skip_blank_lines assert_equal 'last line of child', reader.read_line reader.skip_blank_lines assert_equal 'last line of parent', reader.read_line assert_equal 'fixtures/parent-include.adoc: line 5', reader.prev_line_info assert_equal parent_include_docfile, reader.file assert_equal fixtures_dir, reader.dir assert_equal 'fixtures/parent-include.adoc', reader.path end test 'missing file referenced by include directive is replaced by warning' do input = <<-EOS include::fixtures/no-such-file.adoc[] trailing content EOS begin doc = document_from_string input, :safe => :safe, :base_dir => DIRNAME assert_equal 2, doc.blocks.size assert_equal ['Unresolved directive in - include::fixtures/no-such-file.adoc[]'], doc.blocks[0].lines assert_equal ['trailing content'], doc.blocks[1].lines rescue flunk 'include directive should not raise exception on missing file' end end test 'unreadable file referenced by include directive is replaced by warning' do include_file = File.join DIRNAME, 'fixtures', 'chapter-a.adoc' FileUtils.chmod 0000, include_file input = <<-EOS include::fixtures/chapter-a.adoc[] trailing content EOS begin doc = document_from_string input, :safe => :safe, :base_dir => DIRNAME assert_equal 2, doc.blocks.size assert_equal ['Unresolved directive in - include::fixtures/chapter-a.adoc[]'], doc.blocks[0].lines assert_equal ['trailing content'], doc.blocks[1].lines rescue flunk 'include directive should not raise exception on missing file' ensure FileUtils.chmod 0644, include_file end end unless windows? # IMPORTANT this test needs to be run on Windows to verify proper behavior in Windows test 'can resolve include directive with absolute path' do include_path = ::File.join DIRNAME, 'fixtures', 'chapter-a.adoc' input = <<-EOS include::#{include_path}[] EOS result = document_from_string input, :safe => :safe assert_equal 'Chapter A', result.doctitle result = document_from_string input, :safe => :unsafe, :base_dir => ::Dir.tmpdir assert_equal 'Chapter A', result.doctitle end test 'include directive can retrieve data from uri' do #url = 'http://echo.jsontest.com/name/asciidoctor' url = %(http://#{resolve_localhost}:9876/name/asciidoctor) input = <<-EOS .... include::#{url}[] .... EOS expect = /\{"name": "asciidoctor"\}/ output = using_test_webserver do render_embedded_string input, :safe => :safe, :attributes => {'allow-uri-read' => ''} end refute_nil output assert_match(expect, output) end test 'inaccessible uri referenced by include directive does not crash processor' do url = %(http://#{resolve_localhost}:9876/no_such_file) input = <<-EOS .... include::#{url}[] .... EOS output = begin using_test_webserver do render_embedded_string input, :safe => :safe, :attributes => {'allow-uri-read' => ''} end rescue flunk 'include directive should not raise exception on inaccessible uri' end refute_nil output assert_match(/Unresolved directive/, output) end test 'include directive supports line selection' do input = <<-EOS include::fixtures/include-file.asciidoc[lines=1;3..4;6..-1] EOS output = render_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME assert_match(/first line/, output) refute_match(/second line/, output) assert_match(/third line/, output) assert_match(/fourth line/, output) refute_match(/fifth line/, output) assert_match(/sixth line/, output) assert_match(/seventh line/, output) assert_match(/eighth line/, output) assert_match(/last line of included content/, output) end test 'include directive supports line selection using quoted attribute value' do input = <<-EOS include::fixtures/include-file.asciidoc[lines="1, 3..4 , 6 .. -1"] EOS output = render_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME assert_match(/first line/, output) refute_match(/second line/, output) assert_match(/third line/, output) assert_match(/fourth line/, output) refute_match(/fifth line/, output) assert_match(/sixth line/, output) assert_match(/seventh line/, output) assert_match(/eighth line/, output) assert_match(/last line of included content/, output) end test 'include directive supports tagged selection' do input = <<-EOS include::fixtures/include-file.asciidoc[tag=snippetA] EOS output = render_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME assert_match(/snippetA content/, output) refute_match(/snippetB content/, output) refute_match(/non-tagged content/, output) refute_match(/included content/, output) end test 'include directive supports multiple tagged selection' do input = <<-EOS include::fixtures/include-file.asciidoc[tags=snippetA;snippetB] EOS output = render_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME assert_match(/snippetA content/, output) assert_match(/snippetB content/, output) refute_match(/non-tagged content/, output) refute_match(/included content/, output) end test 'include directive supports tagged selection in XML file' do input = <<-EOS [source,xml,indent=0] ---- include::fixtures/include-file.xml[tag=snippet] ---- EOS output = render_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME assert_match('<snippet>content</snippet>', output) refute_match('root', output) end test 'include directive does not select tagged lines inside tagged selection' do input = <<-EOS ++++ include::fixtures/include-file.asciidoc[tags=snippet] ++++ EOS output = render_embedded_string input, :safe => :safe, :base_dir => DIRNAME expect = %(snippetA content non-tagged content snippetB content) assert_equal expect, output end test 'should warn if tag is not found in include file' do input = <<-EOS include::fixtures/include-file.asciidoc[tag=snippetZ] EOS old_stderr = $stderr $stderr = StringIO.new begin render_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME warning = $stderr.tap(&:rewind).read refute_nil warning assert_match(/WARNING.*snippetZ/, warning) ensure $stderr = old_stderr end end test 'lines attribute takes precedence over tags attribute in include directive' do input = <<-EOS include::fixtures/include-file.asciidoc[lines=1, tags=snippetA;snippetB] EOS output = render_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME assert_match(/first line of included content/, output) refute_match(/snippetA content/, output) refute_match(/snippetB content/, output) end test 'indent of included file can be reset to size of indent attribute' do input = <<-EOS [source, xml] ---- include::fixtures/basic-docinfo.xml[lines=2..3, indent=0] ---- EOS output = render_string input, :safe => :safe, :header_footer => false, :base_dir => DIRNAME result = xmlnodes_at_xpath('//pre', output, 1).text assert_equal "2013\nAcme™, Inc.", result end test 'should fall back to built-in include directive behavior when not handled by include processor' do input = <<-EOS include::fixtures/include-file.asciidoc[] EOS include_processor = Class.new { def initialize document end def handles? target false end def process reader, target, attributes raise 'TestIncludeHandler should not have been invoked' end } document = empty_safe_document :base_dir => DIRNAME reader = Asciidoctor::PreprocessorReader.new document, input reader.instance_variable_set '@include_processors', [include_processor.new(document)] lines = reader.read_lines source = lines * ::Asciidoctor::EOL assert_match(/included content/, source) end test 'leveloffset attribute entries should be added to content if leveloffset attribute is specified' do input = <<-EOS include::fixtures/master.adoc[] EOS expected = <<-EOS.chomp.split(::Asciidoctor::EOL) = Master Document preamble :leveloffset: +1 = Chapter A content :leveloffset!: EOS document = Asciidoctor.load input, :safe => :safe, :base_dir => DIRNAME, :parse => false assert_equal expected, document.reader.read_lines end test 'attributes are substituted in target of include directive' do input = <<-EOS :fixturesdir: fixtures :ext: asciidoc include::{fixturesdir}/include-file.{ext}[] EOS doc = document_from_string input, :safe => :safe, :base_dir => DIRNAME output = doc.render assert_match(/included content/, output) end test 'line is skipped by default if target of include directive resolves to empty' do input = <<-EOS include::{foodir}/include-file.asciidoc[] EOS doc = empty_safe_document :base_dir => DIRNAME reader = Asciidoctor::PreprocessorReader.new doc, input assert_equal 'Unresolved directive in - include::{foodir}/include-file.asciidoc[]', reader.read_line end test 'line is dropped if target of include directive resolves to empty and attribute-missing attribute is not skip' do input = <<-EOS include::{foodir}/include-file.asciidoc[] EOS doc = empty_safe_document :base_dir => DIRNAME, :attributes => {'attribute-missing' => 'drop'} reader = Asciidoctor::PreprocessorReader.new doc, input assert_nil reader.read_line end test 'line following dropped include is not dropped' do input = <<-EOS include::{foodir}/include-file.asciidoc[] yo EOS doc = empty_safe_document :base_dir => DIRNAME, :attributes => {'attribute-missing' => 'drop'} reader = Asciidoctor::PreprocessorReader.new doc, input assert_equal 'yo', reader.read_line end test 'escaped include directive is left unprocessed' do input = <<-EOS \\include::fixtures/include-file.asciidoc[] \\escape preserved here EOS doc = empty_safe_document :base_dir => DIRNAME reader = Asciidoctor::PreprocessorReader.new doc, input # we should be able to peek it multiple times and still have the backslash preserved # this is the test for @unescape_next_line assert_equal 'include::fixtures/include-file.asciidoc[]', reader.peek_line assert_equal 'include::fixtures/include-file.asciidoc[]', reader.peek_line assert_equal 'include::fixtures/include-file.asciidoc[]', reader.read_line assert_equal '\\escape preserved here', reader.read_line end test 'include directive not at start of line is ignored' do input = <<-EOS include::include-file.asciidoc[] EOS para = block_from_string input assert_equal 1, para.lines.size # NOTE the space gets stripped because the line is treated as an inline literal assert_equal :literal, para.context assert_equal 'include::include-file.asciidoc[]', para.source end test 'include directive is disabled when max-include-depth attribute is 0' do input = <<-EOS include::include-file.asciidoc[] EOS para = block_from_string input, :safe => :safe, :attributes => { 'max-include-depth' => 0 } assert_equal 1, para.lines.size assert_equal 'include::include-file.asciidoc[]', para.source end test 'max-include-depth cannot be set by document' do input = <<-EOS :max-include-depth: 1 include::include-file.asciidoc[] EOS para = block_from_string input, :safe => :safe, :attributes => { 'max-include-depth' => 0 } assert_equal 1, para.lines.size assert_equal 'include::include-file.asciidoc[]', para.source end test 'include directive should be disabled if max include depth has been exceeded' do input = <<-EOS include::fixtures/parent-include.adoc[depth=1] EOS pseudo_docfile = File.join DIRNAME, 'include-master.adoc' doc = empty_safe_document :base_dir => DIRNAME reader = Asciidoctor::PreprocessorReader.new doc, input, Asciidoctor::Reader::Cursor.new(pseudo_docfile) lines = reader.readlines assert lines.include?('include::child-include.adoc[]') end test 'include directive should be disabled if max include depth set in nested context has been exceeded' do input = <<-EOS include::fixtures/parent-include-restricted.adoc[depth=3] EOS pseudo_docfile = File.join DIRNAME, 'include-master.adoc' doc = empty_safe_document :base_dir => DIRNAME reader = Asciidoctor::PreprocessorReader.new doc, input, Asciidoctor::Reader::Cursor.new(pseudo_docfile) lines = reader.readlines assert lines.include?('first line of child') assert lines.include?('include::grandchild-include.adoc[]') end test 'read_lines_until should not process lines if process option is false' do lines = <<-EOS.each_line.to_a //// include::fixtures/no-such-file.adoc[] //// EOS doc = empty_safe_document :base_dir => DIRNAME reader = Asciidoctor::PreprocessorReader.new doc, lines reader.read_line result = reader.read_lines_until(:terminator => '////', :skip_processing => true) assert_equal lines.map {|l| l.chomp}[1..1], result end test 'skip_comment_lines should not process lines read' do lines = <<-EOS.each_line.to_a //// include::fixtures/no-such-file.adoc[] //// EOS doc = empty_safe_document :base_dir => DIRNAME reader = Asciidoctor::PreprocessorReader.new doc, lines result = reader.skip_comment_lines assert_equal lines.map {|l| l.chomp}, result end end context 'Conditional Inclusions' do test 'process_line returns nil if cursor advanced' do input = <<-EOS ifdef::asciidoctor[] Asciidoctor! endif::asciidoctor[] EOS doc = Asciidoctor::Document.new input reader = doc.reader assert_nil reader.process_line(reader.lines.first) end test 'peek_line advances cursor to next conditional line of content' do input = <<-EOS ifdef::asciidoctor[] Asciidoctor! endif::asciidoctor[] EOS doc = Asciidoctor::Document.new input reader = doc.reader assert_equal 1, reader.lineno assert_equal 'Asciidoctor!', reader.peek_line assert_equal 2, reader.lineno end test 'process_line returns line if cursor not advanced' do input = <<-EOS content ifdef::asciidoctor[] Asciidoctor! endif::asciidoctor[] EOS doc = Asciidoctor::Document.new input reader = doc.reader refute_nil reader.process_line(reader.lines.first) end test 'peek_line does not advance cursor when on a regular content line' do input = <<-EOS content ifdef::asciidoctor[] Asciidoctor! endif::asciidoctor[] EOS doc = Asciidoctor::Document.new input reader = doc.reader assert_equal 1, reader.lineno assert_equal 'content', reader.peek_line assert_equal 1, reader.lineno end test 'peek_line returns nil if cursor advances past end of source' do input = <<-EOS ifdef::foobar[] swallowed content endif::foobar[] EOS doc = Asciidoctor::Document.new input reader = doc.reader assert_equal 1, reader.lineno assert_nil reader.peek_line assert_equal 4, reader.lineno end test 'ifdef with defined attribute includes content' do input = <<-EOS ifdef::holygrail[] There is a holy grail! endif::holygrail[] EOS doc = Asciidoctor::Document.new input, :attributes => { 'holygrail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'There is a holy grail!', (lines * ::Asciidoctor::EOL) end test 'ifdef with defined attribute includes text in brackets' do input = <<-EOS On our quest we go... ifdef::holygrail[There is a holy grail!] There was much rejoicing. EOS doc = Asciidoctor::Document.new input, :attributes => { 'holygrail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal "On our quest we go...\nThere is a holy grail!\nThere was much rejoicing.", (lines * ::Asciidoctor::EOL) end test 'ifdef attribute name is not case sensitive' do input = <<-EOS ifdef::showScript[] The script is shown! endif::showScript[] EOS doc = Asciidoctor::Document.new input, :attributes => { 'showscript' => '' } result = doc.reader.read assert_equal 'The script is shown!', result end test 'ifndef with defined attribute does not include text in brackets' do input = <<-EOS On our quest we go... ifndef::hardships[There is a holy grail!] There was no rejoicing. EOS doc = Asciidoctor::Document.new input, :attributes => { 'hardships' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal "On our quest we go...\nThere was no rejoicing.", (lines * ::Asciidoctor::EOL) end test 'include with non-matching nested exclude' do input = <<-EOS ifdef::grail[] holy ifdef::swallow[] swallow endif::swallow[] grail endif::grail[] EOS doc = Asciidoctor::Document.new input, :attributes => { 'grail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal "holy\ngrail", (lines * ::Asciidoctor::EOL) end test 'nested excludes with same condition' do input = <<-EOS ifndef::grail[] ifndef::grail[] not here endif::grail[] endif::grail[] EOS doc = Asciidoctor::Document.new input, :attributes => { 'grail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal '', (lines * ::Asciidoctor::EOL) end test 'include with nested exclude of inverted condition' do input = <<-EOS ifdef::grail[] holy ifndef::grail[] not here endif::grail[] grail endif::grail[] EOS doc = Asciidoctor::Document.new input, :attributes => { 'grail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal "holy\ngrail", (lines * ::Asciidoctor::EOL) end test 'exclude with matching nested exclude' do input = <<-EOS poof ifdef::swallow[] no ifdef::swallow[] swallow endif::swallow[] here endif::swallow[] gone EOS doc = Asciidoctor::Document.new input, :attributes => { 'grail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal "poof\ngone", (lines * ::Asciidoctor::EOL) end test 'exclude with nested include using shorthand end' do input = <<-EOS poof ifndef::grail[] no grail ifndef::swallow[] or swallow endif::[] in here endif::[] gone EOS doc = Asciidoctor::Document.new input, :attributes => { 'grail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal "poof\ngone", (lines * ::Asciidoctor::EOL) end test 'ifdef with one alternative attribute set includes content' do input = <<-EOS ifdef::holygrail,swallow[] Our quest is complete! endif::holygrail,swallow[] EOS doc = Asciidoctor::Document.new input, :attributes => { 'swallow' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'Our quest is complete!', (lines * ::Asciidoctor::EOL) end test 'ifdef with no alternative attributes set does not include content' do input = <<-EOS ifdef::holygrail,swallow[] Our quest is complete! endif::holygrail,swallow[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal '', (lines * ::Asciidoctor::EOL) end test 'ifdef with all required attributes set includes content' do input = <<-EOS ifdef::holygrail+swallow[] Our quest is complete! endif::holygrail+swallow[] EOS doc = Asciidoctor::Document.new input, :attributes => { 'holygrail' => '', 'swallow' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'Our quest is complete!', (lines * ::Asciidoctor::EOL) end test 'ifdef with missing required attributes does not include content' do input = <<-EOS ifdef::holygrail+swallow[] Our quest is complete! endif::holygrail+swallow[] EOS doc = Asciidoctor::Document.new input, :attributes => { 'holygrail' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal '', (lines * ::Asciidoctor::EOL) end test 'ifndef with undefined attribute includes block' do input = <<-EOS ifndef::holygrail[] Our quest continues to find the holy grail! endif::holygrail[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'Our quest continues to find the holy grail!', (lines * ::Asciidoctor::EOL) end test 'ifndef with one alternative attribute set includes content' do input = <<-EOS ifndef::holygrail,swallow[] Our quest is complete! endif::holygrail,swallow[] EOS doc = Asciidoctor::Document.new input, :attributes => { 'swallow' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'Our quest is complete!', (lines * ::Asciidoctor::EOL) end test 'ifndef with no alternative attributes set includes content' do input = <<-EOS ifndef::holygrail,swallow[] Our quest is complete! endif::holygrail,swallow[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'Our quest is complete!', (lines * ::Asciidoctor::EOL) end test 'ifndef with any required attributes set does not include content' do input = <<-EOS ifndef::holygrail+swallow[] Our quest is complete! endif::holygrail+swallow[] EOS doc = Asciidoctor::Document.new input, :attributes => { 'swallow' => '' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal '', (lines * ::Asciidoctor::EOL) end test 'ifndef with no required attributes set includes content' do input = <<-EOS ifndef::holygrail+swallow[] Our quest is complete! endif::holygrail+swallow[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'Our quest is complete!', (lines * ::Asciidoctor::EOL) end test 'escaped ifdef is unescaped and ignored' do input = <<-EOS \\ifdef::holygrail[] content \\endif::holygrail[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal "ifdef::holygrail[]\ncontent\nendif::holygrail[]", (lines * ::Asciidoctor::EOL) end test 'ifeval comparing missing attribute to nil includes content' do input = <<-EOS ifeval::['{foo}' == ''] No foo for you! endif::[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'No foo for you!', (lines * ::Asciidoctor::EOL) end test 'ifeval comparing missing attribute to 0 drops content' do input = <<-EOS ifeval::[{leveloffset} == 0] I didn't make the cut! endif::[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal '', (lines * ::Asciidoctor::EOL) end test 'ifeval comparing double-quoted attribute to matching string includes content' do input = <<-EOS ifeval::["{gem}" == "asciidoctor"] Asciidoctor it is! endif::[] EOS doc = Asciidoctor::Document.new input, :attributes => { 'gem' => 'asciidoctor' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'Asciidoctor it is!', (lines * ::Asciidoctor::EOL) end test 'ifeval comparing single-quoted attribute to matching string includes content' do input = <<-EOS ifeval::['{gem}' == 'asciidoctor'] Asciidoctor it is! endif::[] EOS doc = Asciidoctor::Document.new input, :attributes => { 'gem' => 'asciidoctor' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'Asciidoctor it is!', (lines * ::Asciidoctor::EOL) end test 'ifeval comparing quoted attribute to non-matching string drops content' do input = <<-EOS ifeval::['{gem}' == 'asciidoctor'] Asciidoctor it is! endif::[] EOS doc = Asciidoctor::Document.new input, :attributes => { 'gem' => 'tilt' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal '', (lines * ::Asciidoctor::EOL) end test 'ifeval comparing attribute to lower version number includes content' do input = <<-EOS ifeval::['{asciidoctor-version}' >= '0.1.0'] That version will do! endif::[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'That version will do!', (lines * ::Asciidoctor::EOL) end test 'ifeval comparing attribute to self includes content' do input = <<-EOS ifeval::['{asciidoctor-version}' == '{asciidoctor-version}'] Of course it's the same! endif::[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'Of course it\'s the same!', (lines * ::Asciidoctor::EOL) end test 'ifeval arguments can be transposed' do input = <<-EOS ifeval::['0.1.0' <= '{asciidoctor-version}'] That version will do! endif::[] EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'That version will do!', (lines * ::Asciidoctor::EOL) end test 'ifeval matching numeric equality includes content' do input = <<-EOS ifeval::[{rings} == 1] One ring to rule them all! endif::[] EOS doc = Asciidoctor::Document.new input, :attributes => { 'rings' => '1' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'One ring to rule them all!', (lines * ::Asciidoctor::EOL) end test 'ifeval matching numeric inequality includes content' do input = <<-EOS ifeval::[{rings} != 0] One ring to rule them all! endif::[] EOS doc = Asciidoctor::Document.new input, :attributes => { 'rings' => '1' } reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal 'One ring to rule them all!', (lines * ::Asciidoctor::EOL) end test 'ifdef with no target is ignored' do input = <<-EOS ifdef::[] content EOS doc = Asciidoctor::Document.new input reader = doc.reader lines = [] while reader.has_more_lines? lines << reader.read_line end assert_equal "ifdef::[]\ncontent", (lines * ::Asciidoctor::EOL) end end end end asciidoctor-1.5.5/test/sections_test.rb000066400000000000000000002105231277513741400202350ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end context 'Sections' do context 'Ids' do test 'synthetic id is generated by default' do sec = block_from_string('== Section One') assert_equal '_section_one', sec.id end test 'synthetic id replaces non-word characters with underscores' do sec = block_from_string("== We're back!") assert_equal '_we_re_back', sec.id end test 'synthetic id removes repeating underscores' do sec = block_from_string('== Section $ One') assert_equal '_section_one', sec.id end test 'synthetic id removes entities' do sec = block_from_string('== Ben & Jerry & Company "Ice Cream Brothers" ✾') assert_equal '_ben_jerry_company_ice_cream_brothers', sec.id end test 'synthetic id prefix can be customized' do sec = block_from_string(":idprefix: id_\n\n== Section One") assert_equal 'id_section_one', sec.id end test 'synthetic id prefix can be set to blank' do sec = block_from_string(":idprefix:\n\n== Section One") assert_equal 'section_one', sec.id end test 'synthetic id prefix is stripped from beginning of id if set to blank' do sec = block_from_string(":idprefix:\n\n== & More") assert_equal 'more', sec.id end test 'synthetic id separator can be customized' do sec = block_from_string(":idseparator: -\n\n== Section One") assert_equal '_section-one', sec.id end test 'synthetic id separator can be set to blank' do sec = block_from_string(":idseparator:\n\n== Section One") assert_equal '_sectionone', sec.id end test 'synthetic id separator can be set to blank when idprefix is blank' do sec = block_from_string(":idprefix:\n:idseparator:\n\n== Section One") assert_equal 'sectionone', sec.id end test 'synthetic id separator is removed from beginning of id when idprefix is blank' do sec = block_from_string(":idprefix:\n:idseparator: _\n\n== +Section One") assert_equal 'section_one', sec.id end test 'synthetic ids can be disabled' do sec = block_from_string(":sectids!:\n\n== Section One\n") assert sec.id.nil? end test 'explicit id in anchor above section title overrides synthetic id' do sec = block_from_string("[[one]]\n== Section One") assert_equal 'one', sec.id end test 'explicit id can be defined using an embedded anchor' do sec = block_from_string("== Section One [[one]] ==") assert_equal 'one', sec.id assert_equal 'Section One', sec.title end test 'explicit id can be defined using an embedded anchor with reftext' do sec = block_from_string("== Section One [[one,Section Uno]] ==") assert_equal 'one', sec.id assert_equal 'Section One', sec.title assert_equal 'Section Uno', (sec.attr 'reftext') end test 'id and reftext in embedded anchor cannot be quoted' do sec = block_from_string(%(== Section One [["one","Section Uno"]] ==)) refute_equal 'one', sec.id assert_equal 'Section One [["one","Section Uno"]]', sec.title assert_nil(sec.attr 'reftext') end test 'reftext in embedded anchor may contain comma' do sec = block_from_string(%(== Section One [[one, Section,Uno]] ==)) assert_equal 'one', sec.id assert_equal 'Section One', sec.title assert_equal 'Section,Uno', (sec.attr 'reftext') end test 'should unescape but not process inline anchor' do sec = block_from_string(%(== Section One \\[[one]] ==)) refute_equal 'one', sec.id assert_equal 'Section One [[one]]', sec.title end test 'title substitutions are applied before generating id' do sec = block_from_string("== Section{sp}One\n") assert_equal '_section_one', sec.id end test 'synthetic ids are unique' do input = <<-EOS == Some section text == Some section text EOS doc = document_from_string input assert_equal '_some_section', doc.blocks[0].id assert_equal '_some_section_2', doc.blocks[1].id end # NOTE test cannot be run in parallel with other tests test 'can set start index of synthetic ids' do old_unique_id_start_index = Asciidoctor::Compliance.unique_id_start_index begin input = <<-EOS == Some section text == Some section text EOS Asciidoctor::Compliance.unique_id_start_index = 1 doc = document_from_string input assert_equal '_some_section', doc.blocks[0].id assert_equal '_some_section_1', doc.blocks[1].id ensure Asciidoctor::Compliance.unique_id_start_index = old_unique_id_start_index end end test 'should use specified id and reftext when registering section reference' do input = <<-EOS [[install,Install Procedure]] == Install content EOS doc = document_from_string input reftext = doc.references[:ids]['install'] refute_nil reftext assert_equal 'Install Procedure', reftext end test 'should use specified reftext when registering section reference' do input = <<-EOS [reftext="Install Procedure"] == Install content EOS doc = document_from_string input reftext = doc.references[:ids]['_install'] refute_nil reftext assert_equal 'Install Procedure', reftext end test 'should not overwrite existing id entry in references table' do input = <<-EOS [#install] == First Install content [#install] == Second Install content EOS doc = document_from_string input reftext = doc.references[:ids]['install'] refute_nil reftext assert_equal 'First Install', reftext end test 'should not overwrite existing id entry with generated reftext in references table' do input = <<-EOS [#install] == First Install content [#install] content EOS doc = document_from_string input reftext = doc.references[:ids]['install'] refute_nil reftext assert_equal 'First Install', reftext end end context "document title (level 0)" do test "document title with multiline syntax" do title = "My Title" chars = "=" * title.length assert_xpath "//h1[not(@id)][text() = 'My Title']", render_string(title + "\n" + chars) assert_xpath "//h1[not(@id)][text() = 'My Title']", render_string(title + "\n" + chars + "\n") end test "document title with multiline syntax, give a char" do title = "My Title" chars = "=" * (title.length + 1) assert_xpath "//h1[not(@id)][text() = 'My Title']", render_string(title + "\n" + chars) assert_xpath "//h1[not(@id)][text() = 'My Title']", render_string(title + "\n" + chars + "\n") end test "document title with multiline syntax, take a char" do title = "My Title" chars = "=" * (title.length - 1) assert_xpath "//h1[not(@id)][text() = 'My Title']", render_string(title + "\n" + chars) assert_xpath "//h1[not(@id)][text() = 'My Title']", render_string(title + "\n" + chars + "\n") end test 'document title with multiline syntax and unicode characters' do input = <<-EOS AsciiDoc Writer’s Guide ======================= Author Name preamble EOS result = render_string input assert_xpath '//h1', result, 1 assert_xpath '//h1[text()="AsciiDoc Writer’s Guide"]', result, 1 end test "not enough chars for a multiline document title" do title = "My Title" chars = "=" * (title.length - 2) assert_xpath '//h1', render_string(title + "\n" + chars), 0 assert_xpath '//h1', render_string(title + "\n" + chars + "\n"), 0 end test "too many chars for a multiline document title" do title = "My Title" chars = "=" * (title.length + 2) assert_xpath '//h1', render_string(title + "\n" + chars), 0 assert_xpath '//h1', render_string(title + "\n" + chars + "\n"), 0 end test "document title with multiline syntax cannot begin with a dot" do title = ".My Title" chars = "=" * title.length assert_xpath '//h1', render_string(title + "\n" + chars), 0 end test "document title with single-line syntax" do assert_xpath "//h1[not(@id)][text() = 'My Title']", render_string("= My Title") end test "document title with symmetric syntax" do assert_xpath "//h1[not(@id)][text() = 'My Title']", render_string("= My Title =") end test 'should assign id on document title to body' do input = <<-EOS [[idname]] = Document Title content EOS output = render_string input assert_css 'body#idname', output, 1 end test 'should assign id defined using shorthand syntax on document title to body' do input = <<-EOS [#idname] = Document Title content EOS output = render_string input assert_css 'body#idname', output, 1 end test 'should use inline id instead of id defined in block attributes' do input = <<-EOS [#idname-block] = Document Title [[idname-inline]] content EOS output = render_string input assert_css 'body#idname-inline', output, 1 end test 'block id above document title sets id on document' do input = <<-EOS [[reference]] = Reference Manual :css-signature: refguide preamble EOS doc = document_from_string input assert_equal 'reference', doc.id assert_equal 'refguide', doc.attr('css-signature') output = doc.render assert_css 'body#reference', output, 1 end test 'should discard style, role and options shorthand attributes defined on document title' do input = <<-EOS [style#idname.rolename%optionname] = Document Title content EOS doc = document_from_string input assert doc.blocks[0].attributes.empty? output = doc.convert assert_css 'body#idname', output, 1 assert_css '.rolename', output, 0 end end context "level 1" do test "with multiline syntax" do assert_xpath "//h2[@id='_my_section'][text() = 'My Section']", render_string("My Section\n-----------") end test "heading title with multiline syntax cannot begin with a dot" do title = ".My Title" chars = "-" * title.length assert_xpath '//h2', render_string(title + "\n" + chars), 0 end test "with single-line syntax" do assert_xpath "//h2[@id='_my_title'][text() = 'My Title']", render_string("== My Title") end test "with single-line symmetric syntax" do assert_xpath "//h2[@id='_my_title'][text() = 'My Title']", render_string("== My Title ==") end test "with single-line non-matching symmetric syntax" do assert_xpath "//h2[@id='_my_title'][text() = 'My Title ===']", render_string("== My Title ===") end test "with XML entity" do assert_xpath "//h2[@id='_where_s_the_love'][text() = \"Where#{[8217].pack('U*')}s the love?\"]", render_string("== Where's the love?") end test "with non-word character" do assert_xpath "//h2[@id='_where_s_the_love'][text() = \"Where’s the love?\"]", render_string("== Where’s the love?") end test "with sequential non-word characters" do assert_xpath "//h2[@id='_what_the_is_this'][text() = 'What the \#@$ is this?']", render_string('== What the #@$ is this?') end test "with trailing whitespace" do assert_xpath "//h2[@id='_my_title'][text() = 'My Title']", render_string("== My Title ") end test "with custom blank idprefix" do assert_xpath "//h2[@id='my_title'][text() = 'My Title']", render_string(":idprefix:\n\n== My Title ") end test "with custom non-blank idprefix" do assert_xpath "//h2[@id='ref_my_title'][text() = 'My Title']", render_string(":idprefix: ref_\n\n== My Title ") end test 'with multibyte characters' do input = <<-EOS == Asciidoctor in 中文 EOS output = render_string input if ::RUBY_MIN_VERSION_1_9 assert_xpath '//h2[@id="_asciidoctor_in_中文"][text()="Asciidoctor in 中文"]', output else assert_xpath '//h2[@id="_asciidoctor_in"][text()="Asciidoctor in 中文"]', output end end test 'with only multibyte characters' do input = <<-EOS == 视图 EOS output = render_embedded_string input assert_xpath '//h2[@id="_视图"][text()="视图"]', output end if ::RUBY_MIN_VERSION_1_9 test 'multiline syntax with only multibyte characters' do input = <<-EOS 视图 -- content 连接器 --- content EOS output = render_embedded_string input assert_xpath '//h2[@id="_视图"][text()="视图"]', output assert_xpath '//h2[@id="_连接器"][text()="连接器"]', output end if ::RUBY_MIN_VERSION_1_9 end context "level 2" do test "with multiline syntax" do assert_xpath "//h3[@id='_my_section'][text() = 'My Section']", render_string(":fragment:\nMy Section\n~~~~~~~~~~~") end test "with single line syntax" do assert_xpath "//h3[@id='_my_title'][text() = 'My Title']", render_string(":fragment:\n=== My Title") end end context "level 3" do test "with multiline syntax" do assert_xpath "//h4[@id='_my_section'][text() = 'My Section']", render_string(":fragment:\nMy Section\n^^^^^^^^^^") end test "with single line syntax" do assert_xpath "//h4[@id='_my_title'][text() = 'My Title']", render_string(":fragment:\n==== My Title") end end context "level 4" do test "with multiline syntax" do assert_xpath "//h5[@id='_my_section'][text() = 'My Section']", render_string(":fragment:\nMy Section\n++++++++++") end test "with single line syntax" do assert_xpath "//h5[@id='_my_title'][text() = 'My Title']", render_string(":fragment:\n===== My Title") end end context "level 5" do test "with single line syntax" do assert_xpath "//h6[@id='_my_title'][text() = 'My Title']", render_string(":fragment:\n====== My Title") end end context 'Markdown-style headings' do test 'single-line document title with leading marker' do input = <<-EOS # Document Title EOS output = render_string input assert_xpath "//h1[not(@id)][text() = 'Document Title']", output, 1 end test 'single-line document title with symmetric markers' do input = <<-EOS # Document Title # EOS output = render_string input assert_xpath "//h1[not(@id)][text() = 'Document Title']", output, 1 end test 'single-line section title with leading marker' do input = <<-EOS ## Section One blah blah EOS output = render_string input assert_xpath "//h2[@id='_section_one'][text() = 'Section One']", output, 1 end test 'single-line section title with symmetric markers' do input = <<-EOS ## Section One ## blah blah EOS output = render_string input assert_xpath "//h2[@id='_section_one'][text() = 'Section One']", output, 1 end end context 'Floating Title' do test 'should create floating title if style is float' do input = <<-EOS [float] = Independent Heading! not in section EOS output = render_embedded_string input assert_xpath '/h1[@id="_independent_heading"]', output, 1 assert_xpath '/h1[@class="float"]', output, 1 assert_xpath %(/h1[@class="float"][text()="Independent Heading!"]), output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]/p', output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]/p[text()="not in section"]', output, 1 end test 'should create floating title if style is discrete' do input = <<-EOS [discrete] === Independent Heading! not in section EOS output = render_embedded_string input assert_xpath '/h3', output, 1 assert_xpath '/h3[@id="_independent_heading"]', output, 1 assert_xpath '/h3[@class="discrete"]', output, 1 assert_xpath %(/h3[@class="discrete"][text()="Independent Heading!"]), output, 1 assert_xpath '/h3/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '/h3/following-sibling::*[@class="paragraph"]/p', output, 1 assert_xpath '/h3/following-sibling::*[@class="paragraph"]/p[text()="not in section"]', output, 1 end test 'should create floating title if style is float with shorthand role and id' do input = <<-EOS [float.independent#first] = Independent Heading! not in section EOS output = render_embedded_string input assert_xpath '/h1[@id="first"]', output, 1 assert_xpath '/h1[@class="float independent"]', output, 1 assert_xpath %(/h1[@class="float independent"][text()="Independent Heading!"]), output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]/p', output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]/p[text()="not in section"]', output, 1 end test 'should create floating title if style is discrete with shorthand role and id' do input = <<-EOS [discrete.independent#first] = Independent Heading! not in section EOS output = render_embedded_string input assert_xpath '/h1[@id="first"]', output, 1 assert_xpath '/h1[@class="discrete independent"]', output, 1 assert_xpath %(/h1[@class="discrete independent"][text()="Independent Heading!"]), output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]', output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]/p', output, 1 assert_xpath '/h1/following-sibling::*[@class="paragraph"]/p[text()="not in section"]', output, 1 end test 'floating title should be a block with context floating_title' do input = <<-EOS [float] === Independent Heading! not in section EOS doc = document_from_string input floatingtitle = doc.blocks.first assert floatingtitle.is_a?(Asciidoctor::Block) assert floatingtitle.context != :section assert_equal :floating_title, floatingtitle.context assert_equal '_independent_heading', floatingtitle.id assert doc.references[:ids].has_key?('_independent_heading') end test 'can assign explicit id to floating title' do input = <<-EOS [[unchained]] [float] === Independent Heading! not in section EOS doc = document_from_string input floating_title = doc.blocks.first assert_equal 'unchained', floating_title.id assert doc.references[:ids].has_key?('unchained') end test 'should not include floating title in toc' do input = <<-EOS :toc: == Section One [float] === Miss Independent == Section Two EOS output = render_string input assert_xpath '//*[@id="toc"]', output, 1 assert_xpath %(//*[@id="toc"]//a[contains(text(), "Section ")]), output, 2 assert_xpath %(//*[@id="toc"]//a[text()="Miss Independent"]), output, 0 end test 'should not set id on floating title if sectids attribute is unset' do input = <<-EOS [float] === Independent Heading! not in section EOS output = render_embedded_string input, :attributes => {'sectids' => nil} assert_xpath '/h3', output, 1 assert_xpath '/h3[@id="_independent_heading"]', output, 0 assert_xpath '/h3[@class="float"]', output, 1 end test 'should use explicit id for floating title if specified' do input = <<-EOS [[free]] [float] == Independent Heading! not in section EOS output = render_embedded_string input assert_xpath '/h2', output, 1 assert_xpath '/h2[@id="free"]', output, 1 assert_xpath '/h2[@class="float"]', output, 1 end test 'should add role to class attribute on floating title' do input = <<-EOS [float, role="isolated"] == Independent Heading! not in section EOS output = render_embedded_string input assert_xpath '/h2', output, 1 assert_xpath '/h2[@id="_independent_heading"]', output, 1 assert_xpath '/h2[@class="float isolated"]', output, 1 end test 'should use specified id and reftext when registering discrete section reference' do input = <<-EOS [[install,Install Procedure]] [discrete] == Install content EOS doc = document_from_string input reftext = doc.references[:ids]['install'] refute_nil reftext assert_equal 'Install Procedure', reftext end test 'should use specified reftext when registering discrete section reference' do input = <<-EOS [reftext="Install Procedure"] [discrete] == Install content EOS doc = document_from_string input reftext = doc.references[:ids]['_install'] refute_nil reftext assert_equal 'Install Procedure', reftext end end context 'Level offset' do test 'should print error if standalone document is included without level offset' do input = <<-EOS = Master Document Doc Writer text in master // begin simulated include::[] = Standalone Document :author: Junior Writer text in standalone // end simulated include::[] EOS output = warnings = nil redirect_streams do |out, err| output = render_string input warnings = err.string end assert !warnings.empty? assert_match(/only book doctypes can contain level 0 sections/, warnings) end test 'should add level offset to section level' do input = <<-EOS = Master Document Doc Writer Master document written by {author}. :leveloffset: 1 // begin simulated include::[] = Standalone Document :author: Junior Writer Standalone document written by {author}. == Section in Standalone Standalone section text. // end simulated include::[] :leveloffset!: == Section in Master Master section text. EOS output = warnings = nil redirect_streams do |out, err| output = render_string input warnings = err.string end assert warnings.empty? assert_match(/Master document written by Doc Writer/, output) assert_match(/Standalone document written by Junior Writer/, output) assert_xpath '//*[@class="sect1"]/h2[text() = "Standalone Document"]', output, 1 assert_xpath '//*[@class="sect2"]/h3[text() = "Section in Standalone"]', output, 1 assert_xpath '//*[@class="sect1"]/h2[text() = "Section in Master"]', output, 1 end test 'level offset should be added to floating title' do input = <<-EOS = Master Document Doc Writer :leveloffset: 1 [float] = Floating Title EOS output = render_string input assert_xpath '//h2[@class="float"][text() = "Floating Title"]', output, 1 end test 'should be able to reset level offset' do input = <<-EOS = Master Document Doc Writer Master preamble. :leveloffset: 1 = Standalone Document Standalone preamble. :leveloffset!: == Level 1 Section EOS output = render_string input assert_xpath '//*[@class = "sect1"]/h2[text() = "Standalone Document"]', output, 1 assert_xpath '//*[@class = "sect1"]/h2[text() = "Level 1 Section"]', output, 1 end test 'should add relative offset value to current leveloffset' do input = <<-EOS = Master Document Doc Writer Master preamble. :leveloffset: 1 = Chapter 1 content :leveloffset: +1 = Standalone Section content EOS output = render_string input assert_xpath '//*[@class = "sect1"]/h2[text() = "Chapter 1"]', output, 1 assert_xpath '//*[@class = "sect2"]/h3[text() = "Standalone Section"]', output, 1 end end context 'Section Numbering' do test 'should create section number with one entry for level 1' do sect1 = Asciidoctor::Section.new assert_equal '1.', sect1.sectnum end test 'should create section number with two entries for level 2' do sect1 = Asciidoctor::Section.new sect1_1 = Asciidoctor::Section.new(sect1) sect1 << sect1_1 assert_equal '1.1.', sect1_1.sectnum end test 'should create section number with three entries for level 3' do sect1 = Asciidoctor::Section.new sect1_1 = Asciidoctor::Section.new(sect1) sect1 << sect1_1 sect1_1_1 = Asciidoctor::Section.new(sect1_1) sect1_1 << sect1_1_1 assert_equal '1.1.1.', sect1_1_1.sectnum end test 'should create section number for second section in level' do sect1 = Asciidoctor::Section.new sect1_1 = Asciidoctor::Section.new(sect1) sect1 << sect1_1 sect1_2 = Asciidoctor::Section.new(sect1) sect1 << sect1_2 assert_equal '1.2.', sect1_2.sectnum end test 'sectnum should use specified delimiter and append string' do sect1 = Asciidoctor::Section.new sect1_1 = Asciidoctor::Section.new(sect1) sect1 << sect1_1 sect1_1_1 = Asciidoctor::Section.new(sect1_1) sect1_1 << sect1_1_1 assert_equal '1,1,1,', sect1_1_1.sectnum(',') assert_equal '1:1:1', sect1_1_1.sectnum(':', false) end test 'should render section numbers when sectnums attribute is set' do input = <<-EOS = Title :sectnums: == Section_1 text === Section_1_1 text ==== Section_1_1_1 text == Section_2 text === Section_2_1 text === Section_2_2 text EOS output = render_string input assert_xpath '//h2[@id="_section_1"][starts-with(text(), "1. ")]', output, 1 assert_xpath '//h3[@id="_section_1_1"][starts-with(text(), "1.1. ")]', output, 1 assert_xpath '//h4[@id="_section_1_1_1"][starts-with(text(), "1.1.1. ")]', output, 1 assert_xpath '//h2[@id="_section_2"][starts-with(text(), "2. ")]', output, 1 assert_xpath '//h3[@id="_section_2_1"][starts-with(text(), "2.1. ")]', output, 1 assert_xpath '//h3[@id="_section_2_2"][starts-with(text(), "2.2. ")]', output, 1 end test 'should render section numbers when numbered attribute is set' do input = <<-EOS = Title :numbered: == Section_1 text === Section_1_1 text ==== Section_1_1_1 text == Section_2 text === Section_2_1 text === Section_2_2 text EOS output = render_string input assert_xpath '//h2[@id="_section_1"][starts-with(text(), "1. ")]', output, 1 assert_xpath '//h3[@id="_section_1_1"][starts-with(text(), "1.1. ")]', output, 1 assert_xpath '//h4[@id="_section_1_1_1"][starts-with(text(), "1.1.1. ")]', output, 1 assert_xpath '//h2[@id="_section_2"][starts-with(text(), "2. ")]', output, 1 assert_xpath '//h3[@id="_section_2_1"][starts-with(text(), "2.1. ")]', output, 1 assert_xpath '//h3[@id="_section_2_2"][starts-with(text(), "2.2. ")]', output, 1 end test 'blocks should have level' do input = <<-EOS = Title preamble == Section 1 paragraph === Section 1.1 paragraph EOS doc = document_from_string input assert_equal 0, doc.blocks[0].level assert_equal 1, doc.blocks[1].level assert_equal 1, doc.blocks[1].blocks[0].level assert_equal 2, doc.blocks[1].blocks[1].level assert_equal 2, doc.blocks[1].blocks[1].blocks[0].level end test 'section numbers should not increment when numbered attribute is turned off within document' do input = <<-EOS = Document Title :numbered: :numbered!: == Colophon Section == Another Colophon Section == Final Colophon Section :numbered: == Section One === Section One Subsection == Section Two == Section Three EOS output = render_string input assert_xpath '//h1[text()="Document Title"]', output, 1 assert_xpath '//h2[@id="_colophon_section"][text()="Colophon Section"]', output, 1 assert_xpath '//h2[@id="_another_colophon_section"][text()="Another Colophon Section"]', output, 1 assert_xpath '//h2[@id="_final_colophon_section"][text()="Final Colophon Section"]', output, 1 assert_xpath '//h2[@id="_section_one"][text()="1. Section One"]', output, 1 assert_xpath '//h3[@id="_section_one_subsection"][text()="1.1. Section One Subsection"]', output, 1 assert_xpath '//h2[@id="_section_two"][text()="2. Section Two"]', output, 1 assert_xpath '//h2[@id="_section_three"][text()="3. Section Three"]', output, 1 end test 'section numbers can be toggled even if numbered attribute is enable via the API' do input = <<-EOS = Document Title :numbered!: == Colophon Section == Another Colophon Section == Final Colophon Section :numbered: == Section One === Section One Subsection == Section Two == Section Three EOS output = render_string input, :attributes => {'numbered' => ''} assert_xpath '//h1[text()="Document Title"]', output, 1 assert_xpath '//h2[@id="_colophon_section"][text()="Colophon Section"]', output, 1 assert_xpath '//h2[@id="_another_colophon_section"][text()="Another Colophon Section"]', output, 1 assert_xpath '//h2[@id="_final_colophon_section"][text()="Final Colophon Section"]', output, 1 assert_xpath '//h2[@id="_section_one"][text()="1. Section One"]', output, 1 assert_xpath '//h3[@id="_section_one_subsection"][text()="1.1. Section One Subsection"]', output, 1 assert_xpath '//h2[@id="_section_two"][text()="2. Section Two"]', output, 1 assert_xpath '//h2[@id="_section_three"][text()="3. Section Three"]', output, 1 end test 'section numbers cannot be toggled even if numbered attribute is disabled via the API' do input = <<-EOS = Document Title :numbered!: == Colophon Section == Another Colophon Section == Final Colophon Section :numbered: == Section One === Section One Subsection == Section Two == Section Three EOS output = render_string input, :attributes => {'numbered!' => ''} assert_xpath '//h1[text()="Document Title"]', output, 1 assert_xpath '//h2[@id="_colophon_section"][text()="Colophon Section"]', output, 1 assert_xpath '//h2[@id="_another_colophon_section"][text()="Another Colophon Section"]', output, 1 assert_xpath '//h2[@id="_final_colophon_section"][text()="Final Colophon Section"]', output, 1 assert_xpath '//h2[@id="_section_one"][text()="Section One"]', output, 1 assert_xpath '//h3[@id="_section_one_subsection"][text()="Section One Subsection"]', output, 1 assert_xpath '//h2[@id="_section_two"][text()="Section Two"]', output, 1 assert_xpath '//h2[@id="_section_three"][text()="Section Three"]', output, 1 end # NOTE AsciiDoc fails this test because it does not properly check for a None value when looking up the numbered attribute test 'section numbers should not increment until numbered attribute is turned back on' do input = <<-EOS = Document Title :numbered!: == Colophon Section == Another Colophon Section == Final Colophon Section :numbered: == Section One === Section One Subsection == Section Two == Section Three EOS output = render_string input assert_xpath '//h1[text()="Document Title"]', output, 1 assert_xpath '//h2[@id="_colophon_section"][text()="Colophon Section"]', output, 1 assert_xpath '//h2[@id="_another_colophon_section"][text()="Another Colophon Section"]', output, 1 assert_xpath '//h2[@id="_final_colophon_section"][text()="Final Colophon Section"]', output, 1 assert_xpath '//h2[@id="_section_one"][text()="1. Section One"]', output, 1 assert_xpath '//h3[@id="_section_one_subsection"][text()="1.1. Section One Subsection"]', output, 1 assert_xpath '//h2[@id="_section_two"][text()="2. Section Two"]', output, 1 assert_xpath '//h2[@id="_section_three"][text()="3. Section Three"]', output, 1 end test 'table with asciidoc content should not disable numbering of subsequent sections' do input = <<-EOS = Document Title :numbered: preamble == Section One |=== a|content |=== == Section Two content EOS output = render_string input assert_xpath '//h2[@id="_section_one"]', output, 1 assert_xpath '//h2[@id="_section_one"][text()="1. Section One"]', output, 1 assert_xpath '//h2[@id="_section_two"]', output, 1 assert_xpath '//h2[@id="_section_two"][text()="2. Section Two"]', output, 1 end test 'should not number parts when doctype is book' do input = <<-EOS = Document Title :doctype: book :numbered: = Part 1 == Chapter 1 content = Part 2 == Chapter 2 content EOS output = render_string input assert_xpath '(//h1)[1][text()="Document Title"]', output, 1 assert_xpath '(//h1)[2][text()="Part 1"]', output, 1 assert_xpath '(//h1)[3][text()="Part 2"]', output, 1 assert_xpath '(//h2)[1][text()="1. Chapter 1"]', output, 1 assert_xpath '(//h2)[2][text()="2. Chapter 2"]', output, 1 end test 'should number chapters sequentially even when divided into parts' do input = <<-EOS = Document Title :doctype: book :numbered: == Chapter 1 content = Part 1 == Chapter 2 content = Part 2 == Chapter 3 content == Chapter 4 content EOS result = render_string input (1..4).each do |num| assert_xpath %(//h2[@id="_chapter_#{num}"]), result, 1 assert_xpath %(//h2[@id="_chapter_#{num}"][text()="#{num}. Chapter #{num}"]), result, 1 end end end context 'Links and anchors' do test 'should include anchor if sectanchors document attribute is set' do input = <<-EOS == Installation Installation section. === Linux Linux installation instructions. EOS output = render_embedded_string input, :attributes => {'sectanchors' => ''} assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a', output, 1 assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a[@class="anchor"][@href="#_installation"]', output, 1 assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a/following-sibling::text()="Installation"', output, true assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a', output, 1 assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a[@class="anchor"][@href="#_linux"]', output, 1 assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a/following-sibling::text()="Linux"', output, true end test 'should link section if sectlinks document attribute is set' do input = <<-EOS == Installation Installation section. === Linux Linux installation instructions. EOS output = render_embedded_string input, :attributes => {'sectlinks' => ''} assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a', output, 1 assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a[@class="link"][@href="#_installation"]', output, 1 assert_xpath '/*[@class="sect1"]/h2[@id="_installation"]/a[text()="Installation"]', output, 1 assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a', output, 1 assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a[@class="link"][@href="#_linux"]', output, 1 assert_xpath '//*[@class="sect2"]/h3[@id="_linux"]/a[text()="Linux"]', output, 1 end end context 'Special sections' do test 'should assign sectname and caption to appendix section' do input = <<-EOS [appendix] == Attribute Options Details EOS output = block_from_string input assert_equal 'appendix', output.sectname assert_equal 'Appendix A: ', output.caption end test 'should render appendix title prefixed with caption' do input = <<-EOS [appendix] == Attribute Options Details EOS output = render_embedded_string input assert_xpath '//h2[text()="Appendix A: Attribute Options"]', output, 1 end test 'should prefix appendix title by label and letter only when numbered is enabled' do input = <<-EOS :numbered: [appendix] == Attribute Options Details EOS output = render_embedded_string input assert_xpath '//h2[text()="Appendix A: Attribute Options"]', output, 1 end test 'should use custom appendix caption if specified' do input = <<-EOS :appendix-caption: App [appendix] == Attribute Options Details EOS output = render_embedded_string input assert_xpath '//h2[text()="App A: Attribute Options"]', output, 1 end test 'should only assign letter to appendix when numbered is enabled and appendix caption is empty' do input = <<-EOS :numbered: :appendix-caption: [appendix] == Attribute Options Details EOS output = render_embedded_string input assert_xpath '//h2[text()="A. Attribute Options"]', output, 1 end test 'should increment appendix number for each appendix section' do input = <<-EOS [appendix] == Attribute Options Details [appendix] == Migration Details EOS output = render_embedded_string input assert_xpath '(//h2)[1][text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '(//h2)[2][text()="Appendix B: Migration"]', output, 1 end test 'should continue numbering after appendix' do input = <<-EOS :numbered: == First Section content [appendix] == Attribute Options content == Migration content EOS output = render_embedded_string input assert_xpath '(//h2)[1][text()="1. First Section"]', output, 1 assert_xpath '(//h2)[2][text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '(//h2)[3][text()="2. Migration"]', output, 1 end test 'should number appendix subsections using appendix letter' do input = <<-EOS :numbered: [appendix] == Attribute Options Details === Optional Attributes Details EOS output = render_embedded_string input assert_xpath '(//h2)[1][text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '(//h3)[1][text()="A.1. Optional Attributes"]', output, 1 end test 'should not number level 4 section by default' do input = <<-EOS :numbered: == Level_1 === Level_2 ==== Level_3 ===== Level_4 text EOS output = render_embedded_string input assert_xpath '//h5', output, 1 assert_xpath '//h5[text()="Level_4"]', output, 1 end test 'should only number levels up to value defined by sectnumlevels attribute' do input = <<-EOS :numbered: :sectnumlevels: 2 == Level_1 === Level_2 ==== Level_3 ===== Level_4 text EOS output = render_embedded_string input assert_xpath '//h2', output, 1 assert_xpath '//h2[text()="1. Level_1"]', output, 1 assert_xpath '//h3', output, 1 assert_xpath '//h3[text()="1.1. Level_2"]', output, 1 assert_xpath '//h4', output, 1 assert_xpath '//h4[text()="Level_3"]', output, 1 assert_xpath '//h5', output, 1 assert_xpath '//h5[text()="Level_4"]', output, 1 end test 'should not number sections or subsections in regions where numbered is off' do input = <<-EOS :numbered: == Section One :numbered!: [appendix] == Attribute Options Details [appendix] == Migration Details === Gotchas Details [glossary] == Glossary Terms EOS output = render_embedded_string input assert_xpath '(//h2)[1][text()="1. Section One"]', output, 1 assert_xpath '(//h2)[2][text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '(//h2)[3][text()="Appendix B: Migration"]', output, 1 assert_xpath '(//h3)[1][text()="Gotchas"]', output, 1 assert_xpath '(//h2)[4][text()="Glossary"]', output, 1 end test 'should not number sections or subsections in toc in regions where numbered is off' do input = <<-EOS :numbered: :toc: == Section One :numbered!: [appendix] == Attribute Options Details [appendix] == Migration Details === Gotchas Details [glossary] == Glossary Terms EOS output = render_string input assert_xpath '//*[@id="toc"]/ul//li/a[text()="1. Section One"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Appendix B: Migration"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Gotchas"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Glossary"]', output, 1 end test 'should only number sections in toc up to value defined by sectnumlevels attribute' do input = <<-EOS :numbered: :toc: :sectnumlevels: 2 :toclevels: 3 == Level 1 === Level 2 ==== Level 3 EOS output = render_string input assert_xpath '//*[@id="toc"]//a[@href="#_level_1"][text()="1. Level 1"]', output, 1 assert_xpath '//*[@id="toc"]//a[@href="#_level_2"][text()="1.1. Level 2"]', output, 1 assert_xpath '//*[@id="toc"]//a[@href="#_level_3"][text()="Level 3"]', output, 1 end # reenable once we have :specialnumbered!: implemented =begin test 'should not number special sections or subsections' do input = <<-EOS :numbered: :specialnumbered!: == Section One [appendix] == Attribute Options Details [appendix] == Migration Details === Gotchas Details [glossary] == Glossary Terms EOS output = render_embedded_string input assert_xpath '(//h2)[1][text()="1. Section One"]', output, 1 assert_xpath '(//h2)[2][text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '(//h2)[3][text()="Appendix B: Migration"]', output, 1 assert_xpath '(//h3)[1][text()="Gotchas"]', output, 1 assert_xpath '(//h2)[4][text()="Glossary"]', output, 1 end test 'should not number special sections or subsections in toc' do input = <<-EOS :numbered: :specialnumbered!: :toc: == Section One [appendix] == Attribute Options Details [appendix] == Migration Details === Gotchas Details [glossary] == Glossary Terms EOS output = render_string input assert_xpath '//*[@id="toc"]/ul//li/a[text()="1. Section One"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Appendix A: Attribute Options"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Appendix B: Migration"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Gotchas"]', output, 1 assert_xpath '//*[@id="toc"]/ul//li/a[text()="Glossary"]', output, 1 end =end test 'level 0 special sections in multipart book should be rendered as level 1' do input = <<-EOS = Multipart Book Doc Writer :doctype: book [preface] = Preface Preface text [appendix] = Appendix Appendix text EOS output = render_string input assert_xpath '//h2[@id = "_preface"]', output, 1 assert_xpath '//h2[@id = "_appendix"]', output, 1 end test 'should output docbook elements that coorespond to special sections in book doctype' do input = <<-EOS = Multipart Book :doctype: book :idprefix: [abstract] = Abstract Title Normal chapter (no abstract in book) [dedication] = Dedication Title Dedication content [preface] = Preface Title Preface content === Preface sub-section Preface subsection content = Part 1 [partintro] .Part intro title Part intro content == Chapter 1 blah blah == Chapter 2 blah blah = Part 2 [partintro] blah blah == Chapter 3 blah blah == Chapter 4 blah blah [appendix] = Appendix Title Appendix content === Appendix sub-section Appendix sub-section content [bibliography] = Bibliography Title Bibliography content [glossary] = Glossary Title Glossary content [colophon] = Colophon Title Colophon content [index] = Index Title EOS output = render_embedded_string input, :backend => 'docbook45' assert_xpath '/chapter[@id="abstract_title"]', output, 1 assert_xpath '/chapter[@id="abstract_title"]/title[text()="Abstract Title"]', output, 1 assert_xpath '/chapter/following-sibling::dedication[@id="dedication_title"]', output, 1 assert_xpath '/chapter/following-sibling::dedication[@id="dedication_title"]/title[text()="Dedication Title"]', output, 1 assert_xpath '/dedication/following-sibling::preface[@id="preface_title"]', output, 1 assert_xpath '/dedication/following-sibling::preface[@id="preface_title"]/title[text()="Preface Title"]', output, 1 assert_xpath '/preface/section[@id="preface_sub_section"]', output, 1 assert_xpath '/preface/section[@id="preface_sub_section"]/title[text()="Preface sub-section"]', output, 1 assert_xpath '/preface/following-sibling::part[@id="part_1"]', output, 1 assert_xpath '/preface/following-sibling::part[@id="part_1"]/title[text()="Part 1"]', output, 1 assert_xpath '/part[@id="part_1"]/partintro', output, 1 assert_xpath '/part[@id="part_1"]/partintro/title[text()="Part intro title"]', output, 1 assert_xpath '/part[@id="part_1"]/partintro/following-sibling::chapter[@id="chapter_1"]', output, 1 assert_xpath '/part[@id="part_1"]/partintro/following-sibling::chapter[@id="chapter_1"]/title[text()="Chapter 1"]', output, 1 assert_xpath '(/part)[2]/following-sibling::appendix[@id="appendix_title"]', output, 1 assert_xpath '(/part)[2]/following-sibling::appendix[@id="appendix_title"]/title[text()="Appendix Title"]', output, 1 assert_xpath '/appendix/section[@id="appendix_sub_section"]', output, 1 assert_xpath '/appendix/section[@id="appendix_sub_section"]/title[text()="Appendix sub-section"]', output, 1 assert_xpath '/appendix/following-sibling::bibliography[@id="bibliography_title"]', output, 1 assert_xpath '/appendix/following-sibling::bibliography[@id="bibliography_title"]/title[text()="Bibliography Title"]', output, 1 assert_xpath '/bibliography/following-sibling::glossary[@id="glossary_title"]', output, 1 assert_xpath '/bibliography/following-sibling::glossary[@id="glossary_title"]/title[text()="Glossary Title"]', output, 1 assert_xpath '/glossary/following-sibling::colophon[@id="colophon_title"]', output, 1 assert_xpath '/glossary/following-sibling::colophon[@id="colophon_title"]/title[text()="Colophon Title"]', output, 1 assert_xpath '/colophon/following-sibling::index[@id="index_title"]', output, 1 assert_xpath '/colophon/following-sibling::index[@id="index_title"]/title[text()="Index Title"]', output, 1 end test 'abstract section maps to abstract element in docbook for article doctype' do input = <<-EOS = Article :idprefix: [abstract] == Abstract Title Abstract content EOS output = render_embedded_string input, :backend => 'docbook45' assert_xpath '/abstract[@id="abstract_title"]', output, 1 assert_xpath '/abstract[@id="abstract_title"]/title[text()="Abstract Title"]', output, 1 end test 'should allow a special section to be nested at arbitrary depth in DocBook output' do input = <<-EOS = Document Title :doctype: book == Glossaries [glossary] === Glossary A Glossaries are optional. Glossaries entries are an example of a style of AsciiDoc labeled lists. [glossary] A glossary term:: The corresponding definition. A second glossary term:: The corresponding definition. EOS output = render_string input, :backend => :docbook assert_xpath '//glossary', output, 1 assert_xpath '//chapter/glossary', output, 1 assert_xpath '//glossary/title[text()="Glossary A"]', output, 1 assert_xpath '//glossary/glossentry', output, 2 end end context "heading patterns in blocks" do test "should not interpret a listing block as a heading" do input = <<-EOS Section ------- ---- code ---- fin. EOS output = render_string input assert_xpath "//h2", output, 1 end test "should not interpret an open block as a heading" do input = <<-EOS Section ------- -- ha -- fin. EOS output = render_string input assert_xpath "//h2", output, 1 end test "should not interpret an attribute list as a heading" do input = <<-EOS Section ======= preamble [TIP] ==== This should be a tip, not a heading. ==== EOS output = render_string input assert_xpath "//*[@class='admonitionblock tip']//p[text() = 'This should be a tip, not a heading.']", output, 1 end test "should not match a heading in a labeled list" do input = <<-EOS Section ------- term1:: + ---- list = [1, 2, 3]; ---- term2:: == not a heading term3:: def // fin. EOS output = render_string input assert_xpath "//h2", output, 1 assert_xpath "//dl", output, 1 end test "should not match a heading in a bulleted list" do input = <<-EOS Section ------- * first + ---- list = [1, 2, 3]; ---- + * second == not a heading * third fin. EOS output = render_string input assert_xpath "//h2", output, 1 assert_xpath "//ul", output, 1 end test "should not match a heading in a block" do input = <<-EOS ==== == not a heading ==== EOS output = render_string input assert_xpath "//h2", output, 0 assert_xpath "//*[@class='exampleblock']//p[text() = '== not a heading']", output, 1 end end context 'Table of Contents' do test 'should render unnumbered table of contents in header if toc attribute is set' do input = <<-EOS = Article :toc: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... === Interlude While they were waiting... == Section Three That's all she wrote! EOS output = render_string input assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/*[@id="toctitle"][text()="Table of Contents"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul[@class="sectlevel1"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]//ul', output, 2 assert_xpath '//*[@id="header"]//*[@id="toc"]//li', output, 4 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="Section One"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li/ul', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li/ul[@class="sectlevel2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li/ul/li', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li/ul/li/a[@href="#_interlude"][text()="Interlude"]', output, 1 assert_xpath '((//*[@id="header"]//*[@id="toc"]/ul)[1]/li)[3]/a[@href="#_section_three"][text()="Section Three"]', output, 1 end test 'should render numbered table of contents in header if toc and numbered attributes are set' do input = <<-EOS = Article :toc: :numbered: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... === Interlude While they were waiting... == Section Three That's all she wrote! EOS output = render_string input assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/*[@id="toctitle"][text()="Table of Contents"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]//ul', output, 2 assert_xpath '//*[@id="header"]//*[@id="toc"]//li', output, 4 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li/ul/li', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li/ul/li/a[@href="#_interlude"][text()="2.1. Interlude"]', output, 1 assert_xpath '((//*[@id="header"]//*[@id="toc"]/ul)[1]/li)[3]/a[@href="#_section_three"][text()="3. Section Three"]', output, 1 end test 'should render a table of contents that honors numbered setting at position of section in document' do input = <<-EOS = Article :toc: :numbered: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... === Interlude While they were waiting... :numbered!: == Section Three That's all she wrote! EOS output = render_string input assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/*[@id="toctitle"][text()="Table of Contents"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]//ul', output, 2 assert_xpath '//*[@id="header"]//*[@id="toc"]//li', output, 4 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 assert_xpath '((//*[@id="header"]//*[@id="toc"]/ul)[1]/li)[3]/a[@href="#_section_three"][text()="Section Three"]', output, 1 end test 'should not number parts in table of contents for book doctype when numbered attribute is set' do input = <<-EOS = Book :doctype: book :toc: :numbered: = Part 1 == First Section of Part 1 blah == Second Section of Part 1 blah = Part 2 == First Section of Part 2 blah EOS output = render_string input assert_xpath '//*[@id="toc"]', output, 1 assert_xpath '//*[@id="toc"]/ul', output, 1 assert_xpath '//*[@id="toc"]/ul[@class="sectlevel0"]', output, 1 assert_xpath '//*[@id="toc"]/ul[@class="sectlevel0"]/li', output, 2 assert_xpath '(//*[@id="toc"]/ul[@class="sectlevel0"]/li)[1]/a[text()="Part 1"]', output, 1 assert_xpath '(//*[@id="toc"]/ul[@class="sectlevel0"]/li)[2]/a[text()="Part 2"]', output, 1 assert_xpath '(//*[@id="toc"]/ul[@class="sectlevel0"]/li)[1]/ul', output, 1 assert_xpath '(//*[@id="toc"]/ul[@class="sectlevel0"]/li)[1]/ul[@class="sectlevel1"]', output, 1 assert_xpath '(//*[@id="toc"]/ul[@class="sectlevel0"]/li)[1]/ul/li', output, 2 assert_xpath '((//*[@id="toc"]/ul[@class="sectlevel0"]/li)[1]/ul/li)[1]/a[text()="1. First Section of Part 1"]', output, 1 end test 'should render table of contents in header if toc2 attribute is set' do input = <<-EOS = Article :toc2: :numbered: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = render_string input assert_xpath '//body[@class="article toc2 toc-left"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 end test 'should set toc position if toc attribute is set to position' do input = <<-EOS = Article :toc: > :numbered: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = render_string input assert_xpath '//body[@class="article toc2 toc-right"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 end test 'should set toc position if toc and toc-position attributes are set' do input = <<-EOS = Article :toc: :toc-position: right :numbered: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = render_string input assert_xpath '//body[@class="article toc2 toc-right"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 end test 'should set toc position if toc2 and toc-position attribute are set' do input = <<-EOS = Article :toc2: :toc-position: right :numbered: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = render_string input assert_xpath '//body[@class="article toc2 toc-right"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 end test 'should set toc position if toc attribute is set to direction' do input = <<-EOS = Article :toc: right :numbered: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = render_string input assert_xpath '//body[@class="article toc2 toc-right"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc2"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[1]/a[@href="#_section_one"][text()="1. Section One"]', output, 1 end test 'should set toc placement to preamble if toc attribute is set to preamble' do input = <<-EOS = Article :toc: preamble Yada yada == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = render_string input assert_css '#preamble #toc', output, 1 assert_css '#preamble .sectionbody + #toc', output, 1 end test 'should use document attributes toc-class, toc-title and toclevels to create toc' do input = <<-EOS = Article :toc: :toc-title: Contents :toc-class: toc2 :toclevels: 1 == Section 1 === Section 1.1 ==== Section 1.1.1 ==== Section 1.1.2 === Section 1.2 == Section 2 Fin. EOS output = render_string input assert_css '#header #toc', output, 1 assert_css '#header #toc.toc2', output, 1 assert_css '#header #toc li', output, 2 assert_css '#header #toc #toctitle', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/*[@id="toctitle"][text()="Contents"]', output, 1 end test 'should not render table of contents if toc-placement attribute is unset' do input = <<-EOS = Article :toc: :toc-placement!: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = render_string input assert_xpath '//*[@id="toc"]', output, 0 end test 'should render table of contents at location of toc macro' do input = <<-EOS = Article :toc: :toc-placement: macro Once upon a time... toc::[] == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = render_string input assert_css '#preamble #toc', output, 1 assert_css '#preamble .paragraph + #toc', output, 1 end test 'should render table of contents at location of toc macro in embedded document' do input = <<-EOS = Article :toc: :toc-placement: macro Once upon a time... toc::[] == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = render_string input, :header_footer => false assert_css '#preamble:root #toc', output, 1 assert_css '#preamble:root .paragraph + #toc', output, 1 end test 'should render table of contents at default location in embedded document if toc attribute is set' do input = <<-EOS = Article :showtitle: :toc: Once upon a time... == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = render_string input, :header_footer => false assert_css 'h1:root', output, 1 assert_css 'h1:root + #toc:root', output, 1 assert_css 'h1:root + #toc:root + #preamble:root', output, 1 end test 'should not activate toc macro if toc-placement is not set' do input = <<-EOS = Article :toc: Once upon a time... toc::[] == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = render_string input assert_css '#toc', output, 1 assert_css '#toctitle', output, 1 assert_css '.toc', output, 1 assert_css '#content .toc', output, 0 end test 'should only output toc at toc macro if toc is macro' do input = <<-EOS = Article :toc: macro Once upon a time... toc::[] == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... EOS output = render_string input assert_css '#toc', output, 1 assert_css '#toctitle', output, 1 assert_css '.toc', output, 1 assert_css '#content .toc', output, 1 end test 'should use global attributes for toc-title, toc-class and toclevels for toc macro' do input = <<-EOS = Article :toc: :toc-placement: macro :toc-title: Contents :toc-class: contents :toclevels: 1 Preamble. toc::[] == Section 1 === Section 1.1 ==== Section 1.1.1 ==== Section 1.1.2 === Section 1.2 == Section 2 Fin. EOS output = render_string input assert_css '#toc', output, 1 assert_css '#toctitle', output, 1 assert_css '#preamble #toc', output, 1 assert_css '#preamble #toc.contents', output, 1 assert_xpath '//*[@id="toc"]/*[@class="title"][text() = "Contents"]', output, 1 assert_css '#toc li', output, 2 assert_xpath '(//*[@id="toc"]//li)[1]/a[text() = "Section 1"]', output, 1 assert_xpath '(//*[@id="toc"]//li)[2]/a[text() = "Section 2"]', output, 1 end test 'should honor id, title, role and level attributes on toc macro' do input = <<-EOS = Article :toc: :toc-placement: macro :toc-title: Ignored :toc-class: ignored :toclevels: 5 :tocdepth: 1 Preamble. [[contents]] [role="contents"] .Contents toc::[levels={tocdepth}] == Section 1 === Section 1.1 ==== Section 1.1.1 ==== Section 1.1.2 === Section 1.2 == Section 2 Fin. EOS output = render_string input assert_css '#toc', output, 0 assert_css '#toctitle', output, 0 assert_css '#preamble #contents', output, 1 assert_css '#preamble #contents.contents', output, 1 assert_xpath '//*[@id="contents"]/*[@class="title"][text() = "Contents"]', output, 1 assert_css '#contents li', output, 2 assert_xpath '(//*[@id="contents"]//li)[1]/a[text() = "Section 1"]', output, 1 assert_xpath '(//*[@id="contents"]//li)[2]/a[text() = "Section 2"]', output, 1 end test 'child toc levels should not have additional bullet at parent level in html' do input = <<-EOS = Article :toc: == Section One It was a dark and stormy night... == Section Two They couldn't believe their eyes when... === Interlude While they were waiting... == Section Three That's all she wrote! EOS output = render_string input assert_xpath '//*[@id="header"]//*[@id="toc"][@class="toc"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/*[@id="toctitle"][text()="Table of Contents"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]//ul', output, 2 assert_xpath '//*[@id="header"]//*[@id="toc"]//li', output, 4 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[2]/a[@href="#_section_two"][text()="Section Two"]', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li/ul/li', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li[2]/ul/li', output, 1 assert_xpath '//*[@id="header"]//*[@id="toc"]/ul/li/ul/li/a[@href="#_interlude"][text()="Interlude"]', output, 1 assert_xpath '((//*[@id="header"]//*[@id="toc"]/ul)[1]/li)[3]/a[@href="#_section_three"][text()="Section Three"]', output, 1 end test 'should not display a table of contents if document has no sections' do input_src = <<-EOS = Document Title :toc: toc::[] This document has no sections. It only has content. EOS ['', 'left', 'preamble', 'macro'].each do |placement| input = input_src.gsub(':toc:', "\\& #{placement}") output = render_string input assert_css '#toctitle', output, 0 end end end context 'article doctype' do test 'should create sections only in docbook backend' do input = <<-EOS = Article Doc Writer == Section 1 The adventure. === Subsection One It was a dark and stormy night... === Subsection Two They couldn't believe their eyes when... == Section 2 The return. === Subsection Three While they were returning... === Subsection Four That's all she wrote! EOS output = render_string input, :backend => 'docbook' assert_xpath '//part', output, 0 assert_xpath '//chapter', output, 0 assert_xpath '/article/section', output, 2 assert_xpath '/article/section[1]/title[text() = "Section 1"]', output, 1 assert_xpath '/article/section[2]/title[text() = "Section 2"]', output, 1 assert_xpath '/article/section/section', output, 4 assert_xpath '/article/section[1]/section[1]/title[text() = "Subsection One"]', output, 1 assert_xpath '/article/section[2]/section[1]/title[text() = "Subsection Three"]', output, 1 end end context 'book doctype' do test 'document title with level 0 headings' do input = <<-EOS = Book Doc Writer :doctype: book = Chapter One [partintro] It was a dark and stormy night... == Scene One Someone's gonna get axed. = Chapter Two [partintro] They couldn't believe their eyes when... == Interlude While they were waiting... = Chapter Three == Scene One That's all she wrote! EOS output = render_string(input) assert_css 'body.book', output, 1 assert_css 'h1', output, 4 assert_css '#header h1', output, 1 assert_css '#content h1', output, 3 assert_css '#content h1.sect0', output, 3 assert_css 'h2', output, 3 assert_css '#content h2', output, 3 assert_xpath '//h1[@id="_chapter_one"][text() = "Chapter One"]', output, 1 assert_xpath '//h1[@id="_chapter_two"][text() = "Chapter Two"]', output, 1 assert_xpath '//h1[@id="_chapter_three"][text() = "Chapter Three"]', output, 1 end test 'should add partintro style to child paragraph of part' do input = <<-EOS = Book :doctype: book = Part 1 part intro == Chapter 1 EOS doc = document_from_string input partintro = doc.blocks.first.blocks.first assert_equal :open, partintro.context assert_equal 'partintro', partintro.style end test 'should add partintro style to child open block of part' do input = <<-EOS = Book :doctype: book = Part 1 -- part intro -- == Chapter 1 EOS doc = document_from_string input partintro = doc.blocks.first.blocks.first assert_equal :open, partintro.context assert_equal 'partintro', partintro.style end test 'should wrap child paragraphs of part in partintro open block' do input = <<-EOS = Book :doctype: book = Part 1 part intro more part intro == Chapter 1 EOS doc = document_from_string input partintro = doc.blocks.first.blocks.first assert_equal :open, partintro.context assert_equal 'partintro', partintro.style assert_equal 2, partintro.blocks.size assert_equal :paragraph, partintro.blocks[0].context assert_equal :paragraph, partintro.blocks[1].context end test 'should warn if part has no sections' do input = <<-EOS = Book :doctype: book = Part 1 [partintro] intro EOS doc = warnings = nil redirect_streams do |out, err| doc = document_from_string input warnings = err.string end refute_nil warnings assert !warnings.empty? assert_match(/ERROR:.*section/, warnings) end test 'should create parts and chapters in docbook backend' do input = <<-EOS = Book Doc Writer :doctype: book = Part 1 [partintro] The adventure. == Chapter One It was a dark and stormy night... == Chapter Two They couldn't believe their eyes when... = Part 2 [partintro] The return. == Chapter Three While they were returning... == Chapter Four That's all she wrote! EOS output = render_string input, :backend => 'docbook' assert_xpath '//chapter/chapter', output, 0 assert_xpath '/book/part', output, 2 assert_xpath '/book/part[1]/title[text() = "Part 1"]', output, 1 assert_xpath '/book/part[2]/title[text() = "Part 2"]', output, 1 assert_xpath '/book/part/chapter', output, 4 assert_xpath '/book/part[1]/chapter[1]/title[text() = "Chapter One"]', output, 1 assert_xpath '/book/part[2]/chapter[1]/title[text() = "Chapter Three"]', output, 1 end test 'subsections in preface and appendix should start at level 2' do input = <<-EOS = Multipart Book Doc Writer :doctype: book [preface] = Preface Preface content === Preface subsection Preface subsection content = Part 1 .Part intro title [partintro] Part intro content == Chapter 1 content [appendix] = Appendix Appendix content === Appendix subsection Appendix subsection content EOS output = warnings = nil redirect_streams do |out, err| output = render_string input, :backend => 'docbook' warnings = err.string end assert warnings.empty? assert_xpath '/book/preface', output, 1 assert_xpath '/book/preface/section', output, 1 assert_xpath '/book/part', output, 1 assert_xpath '/book/part/partintro', output, 1 assert_xpath '/book/part/partintro/title', output, 1 assert_xpath '/book/part/partintro/simpara', output, 1 assert_xpath '/book/appendix', output, 1 assert_xpath '/book/appendix/section', output, 1 end end end asciidoctor-1.5.5/test/substitutions_test.rb000066400000000000000000002553261277513741400213570ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end # TODO # - test negatives # - test role on every quote type context 'Substitutions' do context 'Dispatcher' do test 'apply normal substitutions' do para = block_from_string("[blue]_http://asciidoc.org[AsciiDoc]_ & [red]*Ruby*\n§ Making +++documentation+++ together +\nsince (C) {inception_year}.") para.document.attributes['inception_year'] = '2012' result = para.apply_normal_subs(para.lines) assert_equal %{AsciiDoc & Ruby\n§ Making documentation together
    \nsince © 2012.}, result end end context 'Quotes' do BACKSLASH = '\\' test 'single-line double-quoted string' do para = block_from_string(%q{``a few quoted words''}, :attributes => {'compat-mode' => ''}) assert_equal '“a few quoted words”', para.sub_quotes(para.source) para = block_from_string(%q{"`a few quoted words`"}) assert_equal '“a few quoted words”', para.sub_quotes(para.source) end test 'escaped single-line double-quoted string' do para = block_from_string %(#{BACKSLASH}``a few quoted words''), :attributes => {'compat-mode' => ''} assert_equal %q(‘`a few quoted words’'), para.sub_quotes(para.source) para = block_from_string %(#{BACKSLASH * 2}``a few quoted words''), :attributes => {'compat-mode' => ''} assert_equal %q(``a few quoted words''), para.sub_quotes(para.source) para = block_from_string(%(#{BACKSLASH}"`a few quoted words`")) assert_equal %q("`a few quoted words`"), para.sub_quotes(para.source) para = block_from_string(%(#{BACKSLASH * 2}"`a few quoted words`")) assert_equal %(#{BACKSLASH}"`a few quoted words`"), para.sub_quotes(para.source) end test 'multi-line double-quoted string' do para = block_from_string(%Q{``a few\nquoted words''}, :attributes => {'compat-mode' => ''}) assert_equal "“a few\nquoted words”", para.sub_quotes(para.source) para = block_from_string(%Q{"`a few\nquoted words`"}) assert_equal "“a few\nquoted words”", para.sub_quotes(para.source) end test 'double-quoted string with inline single quote' do para = block_from_string(%q{``Here's Johnny!''}, :attributes => {'compat-mode' => ''}) assert_equal %q{“Here's Johnny!”}, para.sub_quotes(para.source) para = block_from_string(%q{"`Here's Johnny!`"}) assert_equal %q{“Here's Johnny!”}, para.sub_quotes(para.source) end test 'double-quoted string with inline backquote' do para = block_from_string(%q{``Here`s Johnny!''}, :attributes => {'compat-mode' => ''}) assert_equal %q{“Here`s Johnny!”}, para.sub_quotes(para.source) para = block_from_string(%q{"`Here`s Johnny!`"}) assert_equal %q{“Here`s Johnny!”}, para.sub_quotes(para.source) end test 'double-quoted string around monospaced text' do para = block_from_string(%q("``E=mc^2^` is the solution!`")) assert_equal %q(“`E=mc2` is the solution!”), para.apply_subs(para.source); para = block_from_string(%q("```E=mc^2^`` is the solution!`")) assert_equal %q(“E=mc2 is the solution!”), para.apply_subs(para.source); end test 'single-line single-quoted string' do para = block_from_string(%q{`a few quoted words'}, :attributes => {'compat-mode' => ''}) assert_equal '‘a few quoted words’', para.sub_quotes(para.source) para = block_from_string(%q{'`a few quoted words`'}) assert_equal '‘a few quoted words’', para.sub_quotes(para.source) end test 'escaped single-line single-quoted string' do para = block_from_string(%(#{BACKSLASH}`a few quoted words'), :attributes => {'compat-mode' => ''}) assert_equal %(`a few quoted words'), para.sub_quotes(para.source) para = block_from_string(%(#{BACKSLASH}'`a few quoted words`')) assert_equal %('`a few quoted words`'), para.sub_quotes(para.source) end test 'multi-line single-quoted string' do para = block_from_string(%Q{`a few\nquoted words'}, :attributes => {'compat-mode' => ''}) assert_equal "‘a few\nquoted words’", para.sub_quotes(para.source) para = block_from_string(%Q{'`a few\nquoted words`'}) assert_equal "‘a few\nquoted words’", para.sub_quotes(para.source) end test 'single-quoted string with inline single quote' do para = block_from_string(%q{`That isn't what I did.'}, :attributes => {'compat-mode' => ''}) assert_equal %q{‘That isn't what I did.’}, para.sub_quotes(para.source) para = block_from_string(%q{'`That isn't what I did.`'}) assert_equal %q{‘That isn't what I did.’}, para.sub_quotes(para.source) end test 'single-quoted string with inline backquote' do para = block_from_string(%q{`Here`s Johnny!'}, :attributes => {'compat-mode' => ''}) assert_equal %q{‘Here`s Johnny!’}, para.sub_quotes(para.source) para = block_from_string(%q{'`Here`s Johnny!`'}) assert_equal %q{‘Here`s Johnny!’}, para.sub_quotes(para.source) end test 'single-line constrained marked string' do #para = block_from_string(%q{#a few words#}, :attributes => {'compat-mode' => ''}) #assert_equal 'a few words', para.sub_quotes(para.source) para = block_from_string(%q{#a few words#}) assert_equal 'a few words', para.sub_quotes(para.source) end test 'escaped single-line constrained marked string' do para = block_from_string(%(#{BACKSLASH}#a few words#)) assert_equal '#a few words#', para.sub_quotes(para.source) end test 'multi-line constrained marked string' do #para = block_from_string(%Q{#a few\nwords#}, :attributes => {'compat-mode' => ''}) #assert_equal "a few\nwords", para.sub_quotes(para.source) para = block_from_string(%Q{#a few\nwords#}) assert_equal "a few\nwords", para.sub_quotes(para.source) end test 'constrained marked string should not match entity references' do para = block_from_string('111 #mark a# 222 "`quote a`" 333 #mark b# 444') assert_equal %(111 mark a 222 “quote a” 333 mark b 444), para.sub_quotes(para.source) end test 'single-line unconstrained marked string' do #para = block_from_string(%q{##--anything goes ##}, :attributes => {'compat-mode' => ''}) #assert_equal '--anything goes ', para.sub_quotes(para.source) para = block_from_string(%q{##--anything goes ##}) assert_equal '--anything goes ', para.sub_quotes(para.source) end test 'escaped single-line unconstrained marked string' do para = block_from_string(%(#{BACKSLASH}#{BACKSLASH}##--anything goes ##)) assert_equal '##--anything goes ##', para.sub_quotes(para.source) end test 'multi-line unconstrained marked string' do #para = block_from_string(%Q{##--anything\ngoes ##}, :attributes => {'compat-mode' => ''}) #assert_equal "--anything\ngoes ", para.sub_quotes(para.source) para = block_from_string(%Q{##--anything\ngoes ##}) assert_equal "--anything\ngoes ", para.sub_quotes(para.source) end test 'single-line constrained marked string with role' do para = block_from_string(%q{[statement]#a few words#}) assert_equal 'a few words', para.sub_quotes(para.source) end test 'single-line constrained strong string' do para = block_from_string(%q{*a few strong words*}) assert_equal 'a few strong words', para.sub_quotes(para.source) end test 'escaped single-line constrained strong string' do para = block_from_string(%(#{BACKSLASH}*a few strong words*)) assert_equal '*a few strong words*', para.sub_quotes(para.source) end test 'multi-line constrained strong string' do para = block_from_string(%Q{*a few\nstrong words*}) assert_equal "a few\nstrong words", para.sub_quotes(para.source) end test 'constrained strong string containing an asterisk' do para = block_from_string(%q{*bl*ck*-eye}) assert_equal 'bl*ck-eye', para.sub_quotes(para.source) end test 'constrained strong string containing an asterisk and multibyte word chars' do para = block_from_string(%q{*黑*眼圈*}) assert_equal '黑*眼圈', para.sub_quotes(para.source) end if ::RUBY_MIN_VERSION_1_9 test 'single-line constrained quote variation emphasized string' do para = block_from_string(%q{_a few emphasized words_}) assert_equal 'a few emphasized words', para.sub_quotes(para.source) end test 'escaped single-line constrained quote variation emphasized string' do para = block_from_string(%(#{BACKSLASH}_a few emphasized words_)) assert_equal %q(_a few emphasized words_), para.sub_quotes(para.source) end test 'escaped single quoted string' do para = block_from_string(%(#{BACKSLASH}'a few emphasized words')) # NOTE the \' is replaced with ' by the :replacements substitution, later in the substitution pipeline assert_equal %(#{BACKSLASH}'a few emphasized words'), para.sub_quotes(para.source) end test 'multi-line constrained emphasized quote variation string' do para = block_from_string(%Q{_a few\nemphasized words_}) assert_equal "a few\nemphasized words", para.sub_quotes(para.source) end test 'single-quoted string containing an emphasized phrase' do para = block_from_string(%q{`I told him, 'Just go for it!''}, :attributes => {'compat-mode' => ''}) assert_equal '‘I told him, Just go for it!’', para.sub_quotes(para.source) para = block_from_string(%q{'`I told him, 'Just go for it!'`'}) assert_equal %q(‘I told him, 'Just go for it!'’), para.sub_quotes(para.source) end test 'escaped single-quotes inside emphasized words are restored' do para = block_from_string(%('Here#{BACKSLASH}'s Johnny!'), :attributes => {'compat-mode' => ''}) assert_equal %q(Here's Johnny!), para.apply_normal_subs(para.lines) para = block_from_string(%('Here#{BACKSLASH}'s Johnny!')) assert_equal %q('Here's Johnny!'), para.apply_normal_subs(para.lines) end test 'single-line constrained emphasized underline variation string' do para = block_from_string(%q{_a few emphasized words_}) assert_equal 'a few emphasized words', para.sub_quotes(para.source) end test 'escaped single-line constrained emphasized underline variation string' do para = block_from_string(%(#{BACKSLASH}_a few emphasized words_)) assert_equal '_a few emphasized words_', para.sub_quotes(para.source) end test 'multi-line constrained emphasized underline variation string' do para = block_from_string(%Q{_a few\nemphasized words_}) assert_equal "a few\nemphasized words", para.sub_quotes(para.source) end # NOTE must use apply_normal_subs because constrained monospaced is handled as a passthrough test 'single-line constrained monospaced string' do para = block_from_string(%(`a few <{monospaced}> words`), :attributes => {'monospaced' => 'monospaced', 'compat-mode' => ''}) assert_equal 'a few <{monospaced}> words', para.apply_normal_subs(para.lines) para = block_from_string(%(`a few <{monospaced}> words`), :attributes => {'monospaced' => 'monospaced'}) assert_equal 'a few <monospaced> words', para.apply_normal_subs(para.lines) end # NOTE must use apply_normal_subs because constrained monospaced is handled as a passthrough test 'single-line constrained monospaced string with role' do para = block_from_string(%([input]`a few <{monospaced}> words`), :attributes => {'monospaced' => 'monospaced', 'compat-mode' => ''}) assert_equal 'a few <{monospaced}> words', para.apply_normal_subs(para.lines) para = block_from_string(%([input]`a few <{monospaced}> words`), :attributes => {'monospaced' => 'monospaced'}) assert_equal 'a few <monospaced> words', para.apply_normal_subs(para.lines) end # NOTE must use apply_normal_subs because constrained monospaced is handled as a passthrough test 'escaped single-line constrained monospaced string' do para = block_from_string(%(#{BACKSLASH}`a few words`), :attributes => {'compat-mode' => ''}) assert_equal '`a few <monospaced> words`', para.apply_normal_subs(para.lines) para = block_from_string(%(#{BACKSLASH}`a few words`)) assert_equal '`a few <monospaced> words`', para.apply_normal_subs(para.lines) end # NOTE must use apply_normal_subs because constrained monospaced is handled as a passthrough test 'escaped single-line constrained monospaced string with role' do para = block_from_string(%([input]#{BACKSLASH}`a few words`), :attributes => {'compat-mode' => ''}) assert_equal '[input]`a few <monospaced> words`', para.apply_normal_subs(para.lines) para = block_from_string(%([input]#{BACKSLASH}`a few words`)) assert_equal '[input]`a few <monospaced> words`', para.apply_normal_subs(para.lines) end # NOTE must use apply_normal_subs because constrained monospaced is handled as a passthrough test 'escaped role on single-line constrained monospaced string' do para = block_from_string(%(#{BACKSLASH}[input]`a few words`), :attributes => {'compat-mode' => ''}) assert_equal '[input]a few <monospaced> words', para.apply_normal_subs(para.lines) para = block_from_string(%(#{BACKSLASH}[input]`a few words`)) assert_equal '[input]a few <monospaced> words', para.apply_normal_subs(para.lines) end # NOTE must use apply_normal_subs because constrained monospaced is handled as a passthrough test 'escaped role on escaped single-line constrained monospaced string' do para = block_from_string(%(#{BACKSLASH}[input]#{BACKSLASH}`a few words`), :attributes => {'compat-mode' => ''}) assert_equal %(#{BACKSLASH}[input]`a few <monospaced> words`), para.apply_normal_subs(para.lines) para = block_from_string(%(#{BACKSLASH}[input]#{BACKSLASH}`a few words`)) assert_equal %(#{BACKSLASH}[input]`a few <monospaced> words`), para.apply_normal_subs(para.lines) end # NOTE must use apply_normal_subs because constrained monospaced is handled as a passthrough test 'multi-line constrained monospaced string' do para = block_from_string(%(`a few\n<{monospaced}> words`), :attributes => {'monospaced' => 'monospaced', 'compat-mode' => ''}) assert_equal "a few\n<{monospaced}> words", para.apply_normal_subs(para.lines) para = block_from_string(%(`a few\n<{monospaced}> words`), :attributes => {'monospaced' => 'monospaced'}) assert_equal "a few\n<monospaced> words", para.apply_normal_subs(para.lines) end test 'single-line unconstrained strong chars' do para = block_from_string(%q{**Git**Hub}) assert_equal 'GitHub', para.sub_quotes(para.source) end test 'escaped single-line unconstrained strong chars' do para = block_from_string(%(#{BACKSLASH}**Git**Hub)) assert_equal '*Git*Hub', para.sub_quotes(para.source) end test 'multi-line unconstrained strong chars' do para = block_from_string(%Q{**G\ni\nt\n**Hub}) assert_equal "G\ni\nt\nHub", para.sub_quotes(para.source) end test 'unconstrained strong chars with inline asterisk' do para = block_from_string(%q{**bl*ck**-eye}) assert_equal 'bl*ck-eye', para.sub_quotes(para.source) end test 'unconstrained strong chars with role' do para = block_from_string(%q{Git[blue]**Hub**}) assert_equal %q{GitHub}, para.sub_quotes(para.source) end # TODO this is not the same result as AsciiDoc, though I don't understand why AsciiDoc gets what it gets test 'escaped unconstrained strong chars with role' do para = block_from_string(%(Git#{BACKSLASH}[blue]**Hub**)) assert_equal %q{Git[blue]*Hub*}, para.sub_quotes(para.source) end test 'single-line unconstrained emphasized chars' do para = block_from_string(%q{__Git__Hub}) assert_equal 'GitHub', para.sub_quotes(para.source) end test 'escaped single-line unconstrained emphasized chars' do para = block_from_string(%(#{BACKSLASH}__Git__Hub)) assert_equal '__Git__Hub', para.sub_quotes(para.source) end test 'escaped single-line unconstrained emphasized chars around word' do para = block_from_string(%(#{BACKSLASH}#{BACKSLASH}__GitHub__)) assert_equal '__GitHub__', para.sub_quotes(para.source) end test 'multi-line unconstrained emphasized chars' do para = block_from_string(%Q{__G\ni\nt\n__Hub}) assert_equal "G\ni\nt\nHub", para.sub_quotes(para.source) end test 'unconstrained emphasis chars with role' do para = block_from_string(%q{[gray]__Git__Hub}) assert_equal %q{GitHub}, para.sub_quotes(para.source) end test 'escaped unconstrained emphasis chars with role' do para = block_from_string(%(#{BACKSLASH}[gray]__Git__Hub)) assert_equal %q{[gray]__Git__Hub}, para.sub_quotes(para.source) end test 'single-line constrained monospaced chars' do para = block_from_string(%q{call +save()+ to persist the changes}, :attributes => {'compat-mode' => ''}) assert_equal 'call save() to persist the changes', para.sub_quotes(para.source) para = block_from_string(%q{call [x-]+save()+ to persist the changes}) assert_equal 'call save() to persist the changes', para.apply_subs(para.source) para = block_from_string(%q{call `save()` to persist the changes}) assert_equal 'call save() to persist the changes', para.sub_quotes(para.source) end test 'single-line constrained monospaced chars with role' do para = block_from_string(%q{call [method]+save()+ to persist the changes}, :attributes => {'compat-mode' => ''}) assert_equal 'call save() to persist the changes', para.sub_quotes(para.source) para = block_from_string(%q{call [method x-]+save()+ to persist the changes}) assert_equal 'call save() to persist the changes', para.apply_subs(para.source) para = block_from_string(%q{call [method]`save()` to persist the changes}) assert_equal 'call save() to persist the changes', para.sub_quotes(para.source) end test 'escaped single-line constrained monospaced chars' do para = block_from_string(%(call #{BACKSLASH}+save()+ to persist the changes), :attributes => {'compat-mode' => ''}) assert_equal 'call +save()+ to persist the changes', para.sub_quotes(para.source) para = block_from_string(%(call #{BACKSLASH}`save()` to persist the changes)) assert_equal 'call `save()` to persist the changes', para.sub_quotes(para.source) end test 'escaped single-line constrained monospaced chars with role' do para = block_from_string(%(call [method]#{BACKSLASH}+save()+ to persist the changes), :attributes => {'compat-mode' => ''}) assert_equal 'call [method]+save()+ to persist the changes', para.sub_quotes(para.source) para = block_from_string(%(call [method]#{BACKSLASH}`save()` to persist the changes)) assert_equal 'call [method]`save()` to persist the changes', para.sub_quotes(para.source) end test 'escaped role on single-line constrained monospaced chars' do para = block_from_string(%(call #{BACKSLASH}[method]+save()+ to persist the changes), :attributes => {'compat-mode' => ''}) assert_equal 'call [method]save() to persist the changes', para.sub_quotes(para.source) para = block_from_string(%(call #{BACKSLASH}[method]`save()` to persist the changes)) assert_equal 'call [method]save() to persist the changes', para.sub_quotes(para.source) end test 'escaped role on escaped single-line constrained monospaced chars' do para = block_from_string(%(call #{BACKSLASH}[method]#{BACKSLASH}+save()+ to persist the changes), :attributes => {'compat-mode' => ''}) assert_equal %(call #{BACKSLASH}[method]+save()+ to persist the changes), para.sub_quotes(para.source) para = block_from_string(%(call #{BACKSLASH}[method]#{BACKSLASH}`save()` to persist the changes)) assert_equal %(call #{BACKSLASH}[method]`save()` to persist the changes), para.sub_quotes(para.source) end test 'single-line unconstrained monospaced chars' do para = block_from_string(%q{Git++Hub++}, :attributes => {'compat-mode' => ''}) assert_equal 'GitHub', para.sub_quotes(para.source) para = block_from_string(%q{Git[x-]++Hub++}) assert_equal 'GitHub', para.apply_subs(para.source) para = block_from_string(%q{Git``Hub``}) assert_equal 'GitHub', para.sub_quotes(para.source) end test 'escaped single-line unconstrained monospaced chars' do para = block_from_string(%(Git#{BACKSLASH}++Hub++), :attributes => {'compat-mode' => ''}) assert_equal 'Git+Hub+', para.sub_quotes(para.source) para = block_from_string(%(Git#{BACKSLASH * 2}++Hub++), :attributes => {'compat-mode' => ''}) assert_equal 'Git++Hub++', para.sub_quotes(para.source) para = block_from_string(%(Git#{BACKSLASH}``Hub``)) assert_equal 'Git``Hub``', para.sub_quotes(para.source) end test 'multi-line unconstrained monospaced chars' do para = block_from_string(%Q{Git++\nH\nu\nb++}, :attributes => {'compat-mode' => ''}) assert_equal "Git\nH\nu\nb", para.sub_quotes(para.source) para = block_from_string(%Q{Git[x-]++\nH\nu\nb++}) assert_equal %(Git\nH\nu\nb), para.apply_subs(para.source) para = block_from_string(%Q{Git``\nH\nu\nb``}) assert_equal "Git\nH\nu\nb", para.sub_quotes(para.source) end test 'single-line superscript chars' do para = block_from_string(%(x^2^ = x * x, e = mc^2^, there's a 1^st^ time for everything)) assert_equal %(x2 = x * x, e = mc2, there\'s a 1st time for everything), para.sub_quotes(para.source) end test 'escaped single-line superscript chars' do para = block_from_string(%(x#{BACKSLASH}^2^ = x * x)) assert_equal 'x^2^ = x * x', para.sub_quotes(para.source) end test 'does not match superscript across whitespace' do para = block_from_string(%Q{x^(n\n-\n1)^}) assert_equal para.source, para.sub_quotes(para.source) end test 'does not match adjacent superscript chars' do para = block_from_string 'a ^^ b' assert_equal 'a ^^ b', para.sub_quotes(para.source) end test 'does not confuse superscript and links with blank window shorthand' do para = block_from_string(%Q{http://localhost[Text^] on the 21^st^ and 22^nd^}) assert_equal 'Text on the 21st and 22nd', para.content end test 'single-line subscript chars' do para = block_from_string(%q{H~2~O}) assert_equal 'H2O', para.sub_quotes(para.source) end test 'escaped single-line subscript chars' do para = block_from_string(%(H#{BACKSLASH}~2~O)) assert_equal 'H~2~O', para.sub_quotes(para.source) end test 'does not match subscript across whitespace' do para = block_from_string(%Q{project~ view\non\nGitHub~}) assert_equal para.source, para.sub_quotes(para.source) end test 'does not match adjacent subscript chars' do para = block_from_string 'a ~~ b' assert_equal 'a ~~ b', para.sub_quotes(para.source) end test 'does not match subscript across distinct URLs' do para = block_from_string(%Q{http://www.abc.com/~def[DEF] and http://www.abc.com/~ghi[GHI]}) assert_equal para.source, para.sub_quotes(para.source) end test 'quoted text with role shorthand' do para = block_from_string(%q{[.white.red-background]#alert#}) assert_equal 'alert', para.sub_quotes(para.source) end test 'quoted text with id shorthand' do para = block_from_string(%q{[#bond]#007#}) assert_equal '007', para.sub_quotes(para.source) end test 'quoted text with id and role shorthand' do para = block_from_string(%q{[#bond.white.red-background]#007#}) assert_equal '007', para.sub_quotes(para.source) end test 'quoted text with id and role shorthand using docbook backend' do para = block_from_string(%q{[#bond.white.red-background]#007#}, :backend => 'docbook45') assert_equal '007', para.sub_quotes(para.source) end test 'should ignore attributes after comma' do para = block_from_string(%q{[red, foobar]#alert#}) assert_equal 'alert', para.sub_quotes(para.source) end test 'should assign role attribute when shorthand style contains a role' do para = block_from_string 'blah' result = para.parse_quoted_text_attributes '.red#idref' expect = {'id' => 'idref', 'role' => 'red'} assert_equal expect, result end test 'should not assign role attribute if shorthand style has no roles' do para = block_from_string 'blah' result = para.parse_quoted_text_attributes '#idref' expect = {'id' => 'idref'} assert_equal expect, result end end context 'Macros' do test 'a single-line link macro should be interpreted as a link' do para = block_from_string('link:/home.html[]') assert_equal %q{/home.html}, para.sub_macros(para.source) end test 'a single-line link macro with text should be interpreted as a link' do para = block_from_string('link:/home.html[Home]') assert_equal %q{Home}, para.sub_macros(para.source) end test 'a mailto macro should be interpreted as a mailto link' do para = block_from_string('mailto:doc.writer@asciidoc.org[]') assert_equal %q{doc.writer@asciidoc.org}, para.sub_macros(para.source) end test 'a mailto macro with text should be interpreted as a mailto link' do para = block_from_string('mailto:doc.writer@asciidoc.org[Doc Writer]') assert_equal %q{Doc Writer}, para.sub_macros(para.source) end test 'a mailto macro with text and subject should be interpreted as a mailto link' do para = block_from_string('mailto:doc.writer@asciidoc.org[Doc Writer, Pull request]', :attributes => {'linkattrs' => ''}) assert_equal %q{Doc Writer}, para.sub_macros(para.source) end test 'a mailto macro with text, subject and body should be interpreted as a mailto link' do para = block_from_string('mailto:doc.writer@asciidoc.org[Doc Writer, Pull request, Please accept my pull request]', :attributes => {'linkattrs' => ''}) assert_equal %q{Doc Writer}, para.sub_macros(para.source) end test 'should recognize inline email addresses' do para = block_from_string('doc.writer@asciidoc.org') assert_equal %q{doc.writer@asciidoc.org}, para.sub_macros(para.source) para = block_from_string('') assert_equal %q{<doc.writer@asciidoc.org>}, para.apply_normal_subs(para.lines) para = block_from_string('author+website@4fs.no') assert_equal %q{author+website@4fs.no}, para.sub_macros(para.source) para = block_from_string('john@domain.uk.co') assert_equal %q{john@domain.uk.co}, para.sub_macros(para.source) end test 'should ignore escaped inline email address' do para = block_from_string(%(#{BACKSLASH}doc.writer@asciidoc.org)) assert_equal %q{doc.writer@asciidoc.org}, para.sub_macros(para.source) end test 'a single-line raw url should be interpreted as a link' do para = block_from_string('http://google.com') assert_equal %q{http://google.com}, para.sub_macros(para.source) end test 'a single-line raw url with text should be interpreted as a link' do para = block_from_string('http://google.com[Google]') assert_equal %q{Google}, para.sub_macros(para.source) end test 'a multi-line raw url with text should be interpreted as a link' do para = block_from_string("http://google.com[Google\nHomepage]") assert_equal %{Google\nHomepage}, para.sub_macros(para.source) end test 'a multi-line raw url with attribute as text should be interpreted as a link with resolved attribute' do para = block_from_string("http://google.com[{google_homepage}]") para.document.attributes['google_homepage'] = 'Google Homepage' assert_equal %q{Google Homepage}, para.sub_macros(para.source) end test 'a single-line escaped raw url should not be interpreted as a link' do para = block_from_string(%(#{BACKSLASH}http://google.com)) assert_equal %q{http://google.com}, para.sub_macros(para.source) end test 'a comma separated list of links should not include commas in links' do para = block_from_string('http://foo.com, http://bar.com, http://example.org') assert_equal %q{http://foo.com, http://bar.com, http://example.org}, para.sub_macros(para.source) end test 'a single-line image macro should be interpreted as an image' do para = block_from_string('image:tiger.png[]') assert_equal %{tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'should replace underscore and hyphen with space in generated alt text for an inline image' do para = block_from_string('image:tiger-with-family_1.png[]') assert_equal %{tiger with family 1}, para.sub_macros(para.source).gsub(/>\s+<') end test 'a single-line image macro with text should be interpreted as an image with alt text' do para = block_from_string('image:tiger.png[Tiger]') assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an image macro with SVG image and text should be interpreted as an image with alt text' do para = block_from_string('image:tiger.svg[Tiger]') assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an image macro with an interactive SVG image and alt text should be converted to an object element' do para = block_from_string('image:tiger.svg[Tiger,opts=interactive]', :safe => Asciidoctor::SafeMode::SERVER, :attributes => { 'imagesdir' => 'images' }) assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an image macro with an interactive SVG image, fallback and alt text should be converted to an object element' do para = block_from_string('image:tiger.svg[Tiger,fallback=tiger.png,opts=interactive]', :safe => Asciidoctor::SafeMode::SERVER, :attributes => { 'imagesdir' => 'images' }) assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an image macro with an inline SVG image should be converted to an svg element' do para = block_from_string('image:circle.svg[Tiger,100,opts=inline]', :safe => Asciidoctor::SafeMode::SERVER, :attributes => { 'imagesdir' => 'fixtures', 'docdir' => ::File.dirname(__FILE__) }) result = para.sub_macros(para.source).gsub(/>\s+<') assert_match(/]*width="100px"[^>]*>/, result) refute_match(/]*width="500px"[^>]*>/, result) refute_match(/]*height="500px"[^>]*>/, result) refute_match(/]*style="width:500px;height:500px"[^>]*>/, result) end test 'an image macro with an inline SVG image should be converted to an svg element even when data-uri is set' do para = block_from_string('image:circle.svg[Tiger,100,opts=inline]', :safe => Asciidoctor::SafeMode::SERVER, :attributes => { 'data-uri' => '', 'imagesdir' => 'fixtures', 'docdir' => ::File.dirname(__FILE__) }) assert_match(/]*width="100px">/, para.sub_macros(para.source).gsub(/>\s+<')) end test 'an image macro with an SVG image should not use an object element when safe mode is secure' do para = block_from_string('image:tiger.svg[Tiger,opts=interactive]', :attributes => { 'imagesdir' => 'images' }) assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'a single-line image macro with text containing escaped square bracket should be interpreted as an image with alt text' do para = block_from_string(%(image:tiger.png[[Another#{BACKSLASH}] Tiger])) assert_equal %{[Another] Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'a single-line image macro with text and dimensions should be interpreted as an image with alt text and dimensions' do para = block_from_string('image:tiger.png[Tiger, 200, 100]') assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'a single-line image macro with text and dimensions should be interpreted as an image with alt text and dimensions in docbook' do para = block_from_string 'image:tiger.png[Tiger, 200, 100]', :backend => 'docbook' assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'a single-line image macro with text and link should be interpreted as a linked image with alt text' do para = block_from_string('image:tiger.png[Tiger, link="http://en.wikipedia.org/wiki/Tiger"]') assert_equal %{Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'a multi-line image macro with text and dimensions should be interpreted as an image with alt text and dimensions' do para = block_from_string(%(image:tiger.png[Another\nAwesome\nTiger, 200,\n100])) assert_equal %{Another Awesome Tiger}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an inline image macro with a url target should be interpreted as an image' do para = block_from_string %(Beware of the image:http://example.com/images/tiger.png[tiger].) assert_equal %{Beware of the tiger.}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an inline image macro with a float attribute should be interpreted as a floating image' do para = block_from_string %(image:http://example.com/images/tiger.png[tiger, float="right"] Beware of the tigers!) assert_equal %{tiger Beware of the tigers!}, para.sub_macros(para.source).gsub(/>\s+<') end test 'should prepend value of imagesdir attribute to inline image target if target is relative path' do para = block_from_string %(Beware of the image:tiger.png[tiger].), :attributes => {'imagesdir' => './images'} assert_equal %{Beware of the tiger.}, para.sub_macros(para.source).gsub(/>\s+<') end test 'should not prepend value of imagesdir attribute to inline image target if target is absolute path' do para = block_from_string %(Beware of the image:/tiger.png[tiger].), :attributes => {'imagesdir' => './images'} assert_equal %{Beware of the tiger.}, para.sub_macros(para.source).gsub(/>\s+<') end test 'should not prepend value of imagesdir attribute to inline image target if target is url' do para = block_from_string %(Beware of the image:http://example.com/images/tiger.png[tiger].), :attributes => {'imagesdir' => './images'} assert_equal %{Beware of the tiger.}, para.sub_macros(para.source).gsub(/>\s+<') end test 'a block image macro should not be detected within paragraph text' do para = block_from_string(%(Not an inline image macro image::tiger.png[].)) result = para.sub_macros(para.source) assert !result.include?(' {'icons' => ''} assert_equal %{github}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an icon macro should be interpreted as alt text if icons are disabled' do para = block_from_string 'icon:github[]' assert_equal %{[github]}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an icon macro should render alt text if icons are disabled and alt is given' do para = block_from_string 'icon:github[alt="GitHub"]' assert_equal %{[GitHub]}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an icon macro should be interpreted as a font-based icon when icons=font' do para = block_from_string 'icon:github[]', :attributes => {'icons' => 'font'} assert_equal %{}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an icon macro with a size should be interpreted as a font-based icon with a size when icons=font' do para = block_from_string 'icon:github[4x]', :attributes => {'icons' => 'font'} assert_equal %{}, para.sub_macros(para.source).gsub(/>\s+<') end test 'an icon macro with a role and title should be interpreted as a font-based icon with a class and title when icons=font' do para = block_from_string 'icon:heart[role="red", title="Heart me"]', :attributes => {'icons' => 'font'} assert_equal %{}, para.sub_macros(para.source).gsub(/>\s+<') end test 'a single-line footnote macro should be registered and rendered as a footnote' do para = block_from_string('Sentence text footnote:[An example footnote.].') assert_equal %(Sentence text [1].), para.sub_macros(para.source) assert_equal 1, para.document.references[:footnotes].size footnote = para.document.references[:footnotes].first assert_equal 1, footnote.index assert footnote.id.nil? assert_equal 'An example footnote.', footnote.text end test 'a multi-line footnote macro should be registered and rendered as a footnote without endline' do para = block_from_string("Sentence text footnote:[An example footnote\nwith wrapped text.].") assert_equal %(Sentence text [1].), para.sub_macros(para.source) assert_equal 1, para.document.references[:footnotes].size footnote = para.document.references[:footnotes].first assert_equal 1, footnote.index assert footnote.id.nil? assert_equal "An example footnote with wrapped text.", footnote.text end test 'an escaped closing square bracket in a footnote should be unescaped when rendered' do para = block_from_string(%(footnote:[a #{BACKSLASH}] b].)) assert_equal %([1].), para.sub_macros(para.source) assert_equal 1, para.document.references[:footnotes].size footnote = para.document.references[:footnotes].first assert_equal "a ] b", footnote.text end test 'a footnote macro can be directly adjacent to preceding word' do para = block_from_string('Sentence textfootnote:[An example footnote.].') assert_equal %(Sentence text[1].), para.sub_macros(para.source) end test 'a footnote macro may contain an escaped backslash' do para = block_from_string("footnote:[\\]]\nfootnote:[a \\] b]\nfootnote:[a \\]\\] b]") para.sub_macros(para.source) assert_equal 3, para.document.references[:footnotes].size footnote1 = para.document.references[:footnotes][0] assert_equal ']', footnote1.text footnote2 = para.document.references[:footnotes][1] assert_equal 'a ] b', footnote2.text footnote3 = para.document.references[:footnotes][2] assert_equal 'a ]] b', footnote3.text end test 'a footnote macro may contain a link macro' do para = block_from_string('Share your code. footnote:[http://github.com[GitHub]]') assert_equal %(Share your code. [1]), para.sub_macros(para.source) assert_equal 1, para.document.references[:footnotes].size footnote1 = para.document.references[:footnotes][0] assert_equal 'GitHub', footnote1.text end test 'a footnote macro may contain a plain URL' do para = block_from_string %(the JLine footnote:[https://github.com/jline/jline2]\nlibrary.) result = para.sub_macros para.source assert_equal %(the JLine [1]\nlibrary.), result assert_equal 1, para.document.references[:footnotes].size fn1 = para.document.references[:footnotes].first assert_equal 'https://github.com/jline/jline2', fn1.text end test 'a footnote macro followed by a semi-colon may contain a plain URL' do para = block_from_string %(the JLine footnote:[https://github.com/jline/jline2];\nlibrary.) result = para.sub_macros para.source assert_equal %(the JLine [1];\nlibrary.), result assert_equal 1, para.document.references[:footnotes].size fn1 = para.document.references[:footnotes].first assert_equal 'https://github.com/jline/jline2', fn1.text end test 'a footnote macro may contain an xref macro' do # specialcharacters escaping is simulated para = block_from_string('text footnote:[<<_install,Install>>]') assert_equal %(text [1]), para.sub_macros(para.source) assert_equal 1, para.document.references[:footnotes].size footnote1 = para.document.references[:footnotes][0] assert_equal 'Install', footnote1.text end test 'a footnote macro may contain an anchor macro' do para = block_from_string('text footnote:[a [[b\]\] \[[c\]\] d]') assert_equal %(text [1]), para.sub_macros(para.source) assert_equal 1, para.document.references[:footnotes].size footnote1 = para.document.references[:footnotes][0] assert_equal 'a [[c]] d', footnote1.text end test 'subsequent footnote macros with escaped URLs should be restored in DocBook' do input = <<-EOS foofootnote:[+http://example.com+]barfootnote:[+http://acme.com+]baz EOS result = render_embedded_string input, :doctype => 'inline', :backend => 'docbook' assert_equal 'foohttp://example.combarhttp://acme.combaz', result end test 'a footnote macro may contain a bibliographic anchor macro' do para = block_from_string('text footnote:[a [[[b\]\]\] c]') assert_equal %(text [1]), para.sub_macros(para.source) assert_equal 1, para.document.references[:footnotes].size footnote1 = para.document.references[:footnotes][0] assert_equal 'a [b] c', footnote1.text end test 'should increment index of subsequent footnote macros' do para = block_from_string("Sentence text footnote:[An example footnote.]. Sentence text footnote:[Another footnote.].") assert_equal %(Sentence text [1]. Sentence text [2].), para.sub_macros(para.source) assert_equal 2, para.document.references[:footnotes].size footnote1 = para.document.references[:footnotes][0] assert_equal 1, footnote1.index assert footnote1.id.nil? assert_equal "An example footnote.", footnote1.text footnote2 = para.document.references[:footnotes][1] assert_equal 2, footnote2.index assert footnote2.id.nil? assert_equal "Another footnote.", footnote2.text end test 'a footnoteref macro with id and single-line text should be registered and rendered as a footnote' do para = block_from_string('Sentence text footnoteref:[ex1, An example footnote.].') assert_equal %(Sentence text [1].), para.sub_macros(para.source) assert_equal 1, para.document.references[:footnotes].size footnote = para.document.references[:footnotes].first assert_equal 1, footnote.index assert_equal 'ex1', footnote.id assert_equal 'An example footnote.', footnote.text end test 'a footnoteref macro with id and multi-line text should be registered and rendered as a footnote without endlines' do para = block_from_string("Sentence text footnoteref:[ex1, An example footnote\nwith wrapped text.].") assert_equal %(Sentence text [1].), para.sub_macros(para.source) assert_equal 1, para.document.references[:footnotes].size footnote = para.document.references[:footnotes].first assert_equal 1, footnote.index assert_equal 'ex1', footnote.id assert_equal "An example footnote with wrapped text.", footnote.text end test 'a footnoteref macro with id should refer to footnoteref with same id' do para = block_from_string('Sentence text footnoteref:[ex1, An example footnote.]. Sentence text footnoteref:[ex1].') assert_equal %(Sentence text [1]. Sentence text [1].), para.sub_macros(para.source) assert_equal 1, para.document.references[:footnotes].size footnote = para.document.references[:footnotes].first assert_equal 1, footnote.index assert_equal 'ex1', footnote.id assert_equal 'An example footnote.', footnote.text end test 'an unresolved footnoteref should not crash the processor' do para = block_from_string('Sentence text footnoteref:[ex1].') para.sub_macros para.source end test 'a single-line index term macro with a primary term should be registered as an index reference' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macros = ['indexterm:[Tigers]', '(((Tigers)))'] macros.each do |macro| para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output assert_equal 1, para.document.references[:indexterms].size assert_equal ['Tigers'], para.document.references[:indexterms].first end end test 'a single-line index term macro with primary and secondary terms should be registered as an index reference' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macros = ['indexterm:[Big cats, Tigers]', '(((Big cats, Tigers)))'] macros.each do |macro| para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output assert_equal 1, para.document.references[:indexterms].size assert_equal ['Big cats', 'Tigers'], para.document.references[:indexterms].first end end test 'a single-line index term macro with primary, secondary and tertiary terms should be registered as an index reference' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macros = ['indexterm:[Big cats,Tigers , Panthera tigris]', '(((Big cats,Tigers , Panthera tigris)))'] macros.each do |macro| para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output assert_equal 1, para.document.references[:indexterms].size assert_equal ['Big cats', 'Tigers', 'Panthera tigris'], para.document.references[:indexterms].first end end test 'a multi-line index term macro should be compacted and registered as an index reference' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macros = ["indexterm:[Panthera\ntigris]", "(((Panthera\ntigris)))"] macros.each do |macro| para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output assert_equal 1, para.document.references[:indexterms].size assert_equal ['Panthera tigris'], para.document.references[:indexterms].first end end test 'should not split index terms on commas inside of quoted terms' do inputs = [] inputs.push <<-EOS Tigers are big, scary cats. indexterm:[Tigers, "[Big\\], scary cats"] EOS inputs.push <<-EOS Tigers are big, scary cats. (((Tigers, "[Big], scary cats"))) EOS inputs.each do |input| para = block_from_string input output = para.sub_macros(para.source) assert_equal input.lines.first, output assert_equal 1, para.document.references[:indexterms].size terms = para.document.references[:indexterms].first assert_equal 2, terms.size assert_equal 'Tigers', terms.first assert_equal '[Big], scary cats', terms.last end end test 'normal substitutions are performed on an index term macro' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macros = ['indexterm:[*Tigers*]', '(((*Tigers*)))'] macros.each do |macro| para = block_from_string("#{sentence}#{macro}") output = para.apply_normal_subs(para.lines) assert_equal sentence, output assert_equal 1, para.document.references[:indexterms].size assert_equal ['Tigers'], para.document.references[:indexterms].first end end test 'registers multiple index term macros' do sentence = "The tiger (Panthera tigris) is the largest cat species." macros = "(((Tigers)))\n(((Animals,Cats)))" para = block_from_string("#{sentence}\n#{macros}") output = para.sub_macros(para.source) assert_equal sentence, output.rstrip assert_equal 2, para.document.references[:indexterms].size assert_equal ['Tigers'], para.document.references[:indexterms][0] assert_equal ['Animals', 'Cats'], para.document.references[:indexterms][1] end test 'an index term macro with round bracket syntax may contain round brackets in term' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macro = '(((Tiger (Panthera tigris))))' para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output assert_equal 1, para.document.references[:indexterms].size assert_equal ['Tiger (Panthera tigris)'], para.document.references[:indexterms].first end test 'an index term macro with square bracket syntax may contain square brackets in term' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macro = 'indexterm:[Tiger [Panthera tigris\\]]' para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output assert_equal 1, para.document.references[:indexterms].size assert_equal ['Tiger [Panthera tigris]'], para.document.references[:indexterms].first end test 'a single-line index term 2 macro should be registered as an index reference and retain term inline' do sentence = 'The tiger (Panthera tigris) is the largest cat species.' macros = ['The indexterm2:[tiger] (Panthera tigris) is the largest cat species.', 'The ((tiger)) (Panthera tigris) is the largest cat species.'] macros.each do |macro| para = block_from_string(macro) output = para.sub_macros(para.source) assert_equal sentence, output assert_equal 1, para.document.references[:indexterms].size assert_equal ['tiger'], para.document.references[:indexterms].first end end test 'a multi-line index term 2 macro should be compacted and registered as an index reference and retain term inline' do sentence = 'The panthera tigris is the largest cat species.' macros = ["The indexterm2:[ panthera\ntigris ] is the largest cat species.", "The (( panthera\ntigris )) is the largest cat species."] macros.each do |macro| para = block_from_string(macro) output = para.sub_macros(para.source) assert_equal sentence, output assert_equal 1, para.document.references[:indexterms].size assert_equal ['panthera tigris'], para.document.references[:indexterms].first end end test 'registers multiple index term 2 macros' do sentence = "The ((tiger)) (Panthera tigris) is the largest ((cat)) species." para = block_from_string(sentence) output = para.sub_macros(para.source) assert_equal 'The tiger (Panthera tigris) is the largest cat species.', output assert_equal 2, para.document.references[:indexterms].size assert_equal ['tiger'], para.document.references[:indexterms][0] assert_equal ['cat'], para.document.references[:indexterms][1] end test 'normal substitutions are performed on an index term 2 macro' do sentence = 'The ((*tiger*)) (Panthera tigris) is the largest cat species.' para = block_from_string sentence output = para.apply_normal_subs(para.lines) assert_equal 'The tiger (Panthera tigris) is the largest cat species.', output assert_equal 1, para.document.references[:indexterms].size assert_equal ['tiger'], para.document.references[:indexterms].first end test 'index term 2 macro with round bracket syntex should not interfer with index term macro with round bracket syntax' do sentence = "The ((panthera tigris)) is the largest cat species.\n(((Big cats,Tigers)))" para = block_from_string sentence output = para.sub_macros(para.source) assert_equal "The panthera tigris is the largest cat species.\n", output terms = para.document.references[:indexterms] assert_equal 2, terms.size assert_equal ['panthera tigris'], terms[0] assert_equal ['Big cats', 'Tigers'], terms[1] end context 'Button macro' do test 'btn macro' do para = block_from_string('btn:[Save]', :attributes => {'experimental' => ''}) assert_equal %q{Save}, para.sub_macros(para.source) end test 'btn macro for docbook backend' do para = block_from_string('btn:[Save]', :backend => 'docbook', :attributes => {'experimental' => ''}) assert_equal %q{Save}, para.sub_macros(para.source) end end context 'Keyboard macro' do test 'kbd macro with single key' do para = block_from_string('kbd:[F3]', :attributes => {'experimental' => ''}) assert_equal %q{F3}, para.sub_macros(para.source) end test 'kbd macro with single key, docbook backend' do para = block_from_string('kbd:[F3]', :backend => 'docbook', :attributes => {'experimental' => ''}) assert_equal %q{F3}, para.sub_macros(para.source) end test 'kbd macro with key combination' do para = block_from_string('kbd:[Ctrl+Shift+T]', :attributes => {'experimental' => ''}) assert_equal %q{Ctrl+Shift+T}, para.sub_macros(para.source) end test 'kbd macro with key combination with spaces' do para = block_from_string('kbd:[Ctrl + Shift + T]', :attributes => {'experimental' => ''}) assert_equal %q{Ctrl+Shift+T}, para.sub_macros(para.source) end test 'kbd macro with key combination delimited by commas' do para = block_from_string('kbd:[Ctrl,Shift,T]', :attributes => {'experimental' => ''}) assert_equal %q{Ctrl+Shift+T}, para.sub_macros(para.source) end test 'kbd macro with key combination containing a plus key no spaces' do para = block_from_string('kbd:[Ctrl++]', :attributes => {'experimental' => ''}) assert_equal %q{Ctrl++}, para.sub_macros(para.source) end test 'kbd macro with key combination delimited by commands containing a comma key' do para = block_from_string('kbd:[Ctrl,,]', :attributes => {'experimental' => ''}) assert_equal %q{Ctrl+,}, para.sub_macros(para.source) end test 'kbd macro with key combination containing a plus key with spaces' do para = block_from_string('kbd:[Ctrl + +]', :attributes => {'experimental' => ''}) assert_equal %q{Ctrl++}, para.sub_macros(para.source) end test 'kbd macro with key combination containing escaped bracket' do para = block_from_string('kbd:[Ctrl + \]]', :attributes => {'experimental' => ''}) assert_equal %q{Ctrl+]}, para.sub_macros(para.source) end test 'kbd macro with key combination, docbook backend' do para = block_from_string('kbd:[Ctrl+Shift+T]', :backend => 'docbook', :attributes => {'experimental' => ''}) assert_equal %q{CtrlShiftT}, para.sub_macros(para.source) end end context 'Menu macro' do test 'should process menu using macro sytnax' do para = block_from_string('menu:File[]', :attributes => {'experimental' => ''}) assert_equal %q{File}, para.sub_macros(para.source) end test 'should process menu for docbook backend' do para = block_from_string('menu:File[]', :backend => 'docbook', :attributes => {'experimental' => ''}) assert_equal %q{File}, para.sub_macros(para.source) end test 'should process menu with menu item using macro syntax' do para = block_from_string('menu:File[Save As…]', :attributes => {'experimental' => ''}) assert_equal %q{File ▸ Save As…}, para.sub_macros(para.source) end test 'should process menu with menu item for docbook backend' do para = block_from_string('menu:File[Save As…]', :backend => 'docbook', :attributes => {'experimental' => ''}) assert_equal %q{File Save As…}, para.sub_macros(para.source) end test 'should process menu with menu item in submenu using macro syntax' do para = block_from_string('menu:Tools[Project > Build]', :attributes => {'experimental' => ''}) assert_equal %q{Tools ▸ Project ▸ Build}, para.sub_macros(para.source) end test 'should process menu with menu item in submenu for docbook backend' do para = block_from_string('menu:Tools[Project > Build]', :backend => 'docbook', :attributes => {'experimental' => ''}) assert_equal %q{Tools Project Build}, para.sub_macros(para.source) end test 'should process menu with menu item in submenu using macro syntax and comma delimiter' do para = block_from_string('menu:Tools[Project, Build]', :attributes => {'experimental' => ''}) assert_equal %q{Tools ▸ Project ▸ Build}, para.sub_macros(para.source) end test 'should process menu with menu item using inline syntax' do para = block_from_string('"File > Save As…"', :attributes => {'experimental' => ''}) assert_equal %q{File ▸ Save As…}, para.sub_macros(para.source) end test 'should process menu with menu item in submenu using inline syntax' do para = block_from_string('"Tools > Project > Build"', :attributes => {'experimental' => ''}) assert_equal %q{Tools ▸ Project ▸ Build}, para.sub_macros(para.source) end test 'inline syntax should not closing quote of XML attribute' do para = block_from_string('<node>r', :attributes => {'experimental' => ''}) assert_equal %q{<node>r}, para.sub_macros(para.source) end test 'should process menu macro with items containing multibyte characters' do para = block_from_string('menu:视图[放大, 重置]', :attributes => {'experimental' => ''}) assert_equal %q{视图 ▸ 放大 ▸ 重置}, para.sub_macros(para.source) end if ::RUBY_MIN_VERSION_1_9 test 'should process inline menu with items containing multibyte characters' do para = block_from_string('"视图 > 放大 > 重置"', :attributes => {'experimental' => ''}) assert_equal %q{视图 ▸ 放大 ▸ 重置}, para.sub_macros(para.source) end if ::RUBY_MIN_VERSION_1_9 end end context 'Passthroughs' do test 'collect inline triple plus passthroughs' do para = block_from_string('+++inline code+++') result = para.extract_passthroughs(para.source) assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, para.passthroughs.size assert_equal 'inline code', para.passthroughs[0][:text] assert para.passthroughs[0][:subs].empty? end test 'collect multi-line inline triple plus passthroughs' do para = block_from_string("+++inline\ncode+++") result = para.extract_passthroughs(para.source) assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, para.passthroughs.size assert_equal "inline\ncode", para.passthroughs[0][:text] assert para.passthroughs[0][:subs].empty? end test 'collect inline double dollar passthroughs' do para = block_from_string('$${code}$$') result = para.extract_passthroughs(para.source) assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, para.passthroughs.size assert_equal '{code}', para.passthroughs[0][:text] assert_equal [:specialcharacters], para.passthroughs[0][:subs] end test 'collect inline double plus passthroughs' do para = block_from_string('++{code}++') result = para.extract_passthroughs(para.source) assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, para.passthroughs.size assert_equal '{code}', para.passthroughs[0][:text] assert_equal [:specialcharacters], para.passthroughs[0][:subs] end test 'collect multi-line inline double dollar passthroughs' do para = block_from_string("$$\n{code}\n$$") result = para.extract_passthroughs(para.source) assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, para.passthroughs.size assert_equal "\n{code}\n", para.passthroughs[0][:text] assert_equal [:specialcharacters], para.passthroughs[0][:subs] end test 'collect multi-line inline double plus passthroughs' do para = block_from_string("++\n{code}\n++") result = para.extract_passthroughs(para.source) assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, para.passthroughs.size assert_equal "\n{code}\n", para.passthroughs[0][:text] assert_equal [:specialcharacters], para.passthroughs[0][:subs] end test 'collect passthroughs from inline pass macro' do para = block_from_string(%Q{pass:specialcharacters,quotes[['code'\\]]}) result = para.extract_passthroughs(para.source) assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, para.passthroughs.size assert_equal %q{['code']}, para.passthroughs[0][:text] assert_equal [:specialcharacters, :quotes], para.passthroughs[0][:subs] end test 'collect multi-line passthroughs from inline pass macro' do para = block_from_string(%Q{pass:specialcharacters,quotes[['more\ncode'\\]]}) result = para.extract_passthroughs(para.source) assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, para.passthroughs.size assert_equal %Q{['more\ncode']}, para.passthroughs[0][:text] assert_equal [:specialcharacters, :quotes], para.passthroughs[0][:subs] end test 'resolves sub shorthands on inline pass macro' do para = block_from_string 'pass:q,a[*<{backend}>*]' result = para.extract_passthroughs para.source assert_equal 1, para.passthroughs.size assert_equal [:quotes, :attributes], para.passthroughs[0][:subs] result = para.restore_passthroughs result assert_equal '', result end # NOTE placeholder is surrounded by text to prevent reader from stripping trailing boundary char (unique to test scenario) test 'restore inline passthroughs without subs' do para = block_from_string("some #{Asciidoctor::Substitutors::PASS_START}" + '0' + "#{Asciidoctor::Substitutors::PASS_END} to study") para.passthroughs[0] = {:text => 'inline code', :subs => []} result = para.restore_passthroughs(para.source) assert_equal "some inline code to study", result end # NOTE placeholder is surrounded by text to prevent reader from stripping trailing boundary char (unique to test scenario) test 'restore inline passthroughs with subs' do para = block_from_string("some #{Asciidoctor::Substitutors::PASS_START}" + '0' + "#{Asciidoctor::Substitutors::PASS_END} to study in the #{Asciidoctor::Substitutors::PASS_START}" + '1' + "#{Asciidoctor::Substitutors::PASS_END} programming language") para.passthroughs[0] = {:text => '{code}', :subs => [:specialcharacters]} para.passthroughs[1] = {:text => '{language}', :subs => [:specialcharacters]} result = para.restore_passthroughs(para.source) assert_equal 'some <code>{code}</code> to study in the {language} programming language', result end test 'should restore nested passthroughs' do result = render_embedded_string %q(+Sometimes you feel pass:q[`mono`].+ Sometimes you +$$don't$$+.), :doctype => :inline assert_equal %q(Sometimes you feel mono. Sometimes you don't.), result end test 'should honor role on double plus passthrough' do result = render_embedded_string 'Print the version using [var]++{asciidoctor-version}++.', :doctype => :inline assert_equal 'Print the version using {asciidoctor-version}.', result end test 'complex inline passthrough macro' do text_to_escape = %q{[(] <'basic form'> <'logical operator'> <'basic form'> [)]} para = block_from_string %($$#{text_to_escape}$$) result = para.extract_passthroughs(para.source) assert_equal 1, para.passthroughs.size assert_equal text_to_escape, para.passthroughs[0][:text] text_to_escape_escaped = %q{[(\] <'basic form'> <'logical operator'> <'basic form'> [)\]} para = block_from_string %(pass:specialcharacters[#{text_to_escape_escaped}]) result = para.extract_passthroughs(para.source) assert_equal 1, para.passthroughs.size assert_equal text_to_escape, para.passthroughs[0][:text] end test 'inline pass macro with a composite sub' do para = block_from_string %(pass:verbatim[<{backend}>]) assert_equal '<{backend}>', para.content end context 'Math macros' do test 'should passthrough text in asciimath macro and surround with AsciiMath delimiters' do input = 'asciimath:[x/x={(1,if x!=0),(text{undefined},if x=0):}]' para = block_from_string input assert_equal '\$x/x={(1,if x!=0),(text{undefined},if x=0):}\$', para.content end test 'should not recognize asciimath macro with no content' do input = 'asciimath:[]' para = block_from_string input assert_equal 'asciimath:[]', para.content end test 'should perform specialcharacters subs on asciimath macro content in html backend by default' do input = 'asciimath:[a < b]' para = block_from_string input assert_equal '\$a < b\$', para.content end # NOTE this test doesn't work once AsciiMath has been loaded #test 'should not perform specialcharacters subs on asciimath macro content in docbook backend by default' do # input = 'asciimath:[a < b]' # para = block_from_string input, :backend => :docbook # para.document.converter.instance_variable_set :@asciimath_available, false # assert_equal '', para.content #end test 'should convert asciimath macro content to MathML when asciimath gem is available' do input = 'asciimath:[a < b]' para = block_from_string input, :backend => :docbook assert_equal 'a<b', para.content end test 'should honor explicit subslist on asciimath macro' do input = 'asciimath:attributes[{expr}]' para = block_from_string input, :attributes => {'expr' => 'x != 0'} assert_equal '\$x != 0\$', para.content end test 'should passthrough text in latexmath macro and surround with LaTeX math delimiters' do input = 'latexmath:[C = \alpha + \beta Y^{\gamma} + \epsilon]' para = block_from_string input assert_equal '\(C = \alpha + \beta Y^{\gamma} + \epsilon\)', para.content end test 'should not recognize latexmath macro with no content' do input = 'latexmath:[]' para = block_from_string input assert_equal 'latexmath:[]', para.content end test 'should perform specialcharacters subs on latexmath macro in html backend by default' do input = 'latexmath:[a < b]' para = block_from_string input assert_equal '\(a < b\)', para.content end test 'should not perform specialcharacters subs on latexmath macro content in docbook backend by default' do input = 'latexmath:[a < b]' para = block_from_string input, :backend => :docbook assert_equal '', para.content end test 'should honor explicit subslist on latexmath macro' do input = 'latexmath:attributes[{expr}]' para = block_from_string input, :attributes => {'expr' => '\sqrt{4} = 2'} assert_equal '\(\sqrt{4} = 2\)', para.content end test 'should passthrough math macro inside another passthrough' do input = 'the text `asciimath:[x = y]` should be passed through as +literal+ text' para = block_from_string input, :attributes => {'compat-mode' => ''} assert_equal 'the text asciimath:[x = y] should be passed through as literal text', para.content input = 'the text [x-]`asciimath:[x = y]` should be passed through as `literal` text' para = block_from_string input assert_equal 'the text asciimath:[x = y] should be passed through as literal text', para.content input = 'the text `+asciimath:[x = y]+` should be passed through as `literal` text' para = block_from_string input assert_equal 'the text asciimath:[x = y] should be passed through as literal text', para.content end test 'should not recognize stem macro with no content' do input = 'stem:[]' para = block_from_string input assert_equal input, para.content end test 'should passthrough text in stem macro and surround with AsciiMath delimiters if stem attribute != latexmath' do [ {}, {'stem' => ''}, {'stem' => 'asciimath'} ].each do |attributes| input = 'stem:[x/x={(1,if x!=0),(text{undefined},if x=0):}]' para = block_from_string input, :attributes => attributes assert_equal '\$x/x={(1,if x!=0),(text{undefined},if x=0):}\$', para.content end end test 'should passthrough text in stem macro and surround with LaTeX math delimiters if stem attribute = latexmath' do input = 'stem:[C = \alpha + \beta Y^{\gamma} + \epsilon]' para = block_from_string input, :attributes => {'stem' => 'latexmath'} assert_equal '\(C = \alpha + \beta Y^{\gamma} + \epsilon\)', para.content end test 'should find and replace placeholder duplicated by substitution' do input = %q(+first passthrough+ followed by link:$$http://example.com/__u_no_format_me__$$[] with passthrough) result = render_embedded_string input, :doctype => :inline assert_equal 'first passthrough followed by http://example.com/__u_no_format_me__ with passthrough', result end end end context 'Replacements' do test 'unescapes XML entities' do para = block_from_string '< " " " >' assert_equal '< " " " >', para.apply_normal_subs(para.lines) end test 'replaces arrows' do para = block_from_string '<- -> <= => \<- \-> \<= \=>' assert_equal '← → ⇐ ⇒ <- -> <= =>', para.apply_normal_subs(para.source) end test 'replaces dashes' do para = block_from_string %(-- foo foo--bar foo\\--bar foo -- bar foo \\-- bar stuff in between -- foo stuff in between foo -- stuff in between foo --) expected = ' — foo foo—​bar foo--bar foo — bar foo -- bar stuff in between — foo stuff in between foo — stuff in between foo — ' assert_equal expected, para.sub_replacements(para.source) end test 'replaces dashes between multibyte word characters' do para = block_from_string %(富--巴) expected = '富—​巴' assert_equal expected, para.sub_replacements(para.source) end if ::RUBY_MIN_VERSION_1_9 test 'replaces marks' do para = block_from_string '(C) (R) (TM) \(C) \(R) \(TM)' assert_equal '© ® ™ (C) (R) (TM)', para.sub_replacements(para.source) end test 'preserves entity references' do input = '& © ✔ 😀 • 😀' result = render_embedded_string input, :doctype => :inline assert_equal input, result end test 'only preserves named entities with two or more letters' do input = '& &a; >' result = render_embedded_string input, :doctype => :inline assert_equal '& &a; >', result end test 'replaces punctuation' do para = block_from_string %(John's Hideout is the Whites`' place... foo\\'bar) assert_equal "John’s Hideout is the Whites’ place…​ foo'bar", para.sub_replacements(para.source) end test 'should replace right single quote marks' do given = [ %(`'Twas the night), %(a `'57 Chevy!), %(the whites`' place), %(the whites`'.), %(the whites`'--where the wild things are), %(the whites`'\nhave), %(It's Mary`'s little lamb.), %(consecutive single quotes '' are not modified), %(he is 6' tall), %(\\`') ] expected = [ %(’Twas the night), %(a ’57 Chevy!), %(the whites’ place), %(the whites’.), %(the whites’--where the wild things are), %(the whites’\nhave), %(It’s Mary’s little lamb.), %(consecutive single quotes '' are not modified), %(he is 6' tall), %(`') ] given.size.times {|i| para = block_from_string given[i] assert_equal expected[i], para.sub_replacements(para.source) } end end context 'Post replacements' do test 'line break inserted after line with line break character' do para = block_from_string("First line +\nSecond line") result = para.apply_subs(para.lines, :post_replacements, true) assert_equal 'First line
    ', result.first end test 'line break inserted after line wrap with hardbreaks enabled' do para = block_from_string("First line\nSecond line", :attributes => {'hardbreaks' => ''}) result = para.apply_subs(para.lines, :post_replacements, true) assert_equal 'First line
    ', result.first end test 'line break character stripped from end of line with hardbreaks enabled' do para = block_from_string("First line +\nSecond line", :attributes => {'hardbreaks' => ''}) result = para.apply_subs(para.lines, :post_replacements, true) assert_equal 'First line
    ', result.first end test 'line break not inserted for single line with hardbreaks enabled' do para = block_from_string('First line', :attributes => {'hardbreaks' => ''}) result = para.apply_subs(para.lines, :post_replacements, true) assert_equal 'First line', result.first end end context 'Resolve subs' do test 'should resolve subs for block' do block = Asciidoctor::Block.new(empty_document, :paragraph) block.attributes['subs'] = 'quotes,normal' block.lock_in_subs assert_equal [:quotes, :specialcharacters, :attributes, :replacements, :macros, :post_replacements], block.subs end test 'should resolve specialcharacters sub as highlight for source block when source highlighter is coderay' do doc = empty_document :attributes => {'source-highlighter' => 'coderay'} block = Asciidoctor::Block.new(doc, :listing, :content_model => :verbatim) block.style = 'source' block.attributes['subs'] = 'specialcharacters' block.attributes['language'] = 'ruby' block.lock_in_subs assert_equal [:highlight], block.subs end test 'should resolve specialcharacters sub as highlight for source block when source highlighter is pygments' do doc = empty_document :attributes => {'source-highlighter' => 'pygments'} block = Asciidoctor::Block.new(doc, :listing, :content_model => :verbatim) block.style = 'source' block.attributes['subs'] = 'specialcharacters' block.attributes['language'] = 'ruby' block.lock_in_subs assert_equal [:highlight], block.subs end test 'should not resolve specialcharacters sub as highlight for source block when source highlighter is not set' do doc = empty_document block = Asciidoctor::Block.new(doc, :listing, :content_model => :verbatim) block.style = 'source' block.attributes['subs'] = 'specialcharacters' block.attributes['language'] = 'ruby' block.lock_in_subs assert_equal [:specialcharacters], block.subs end test 'should not use subs if subs option passed to block constructor is nil' do doc = empty_document block = Asciidoctor::Block.new doc, :paragraph, :source => '*bold* _italic_', :subs => nil, :attributes => {'subs' => 'quotes'} assert block.subs.empty? block.lock_in_subs assert block.subs.empty? end test 'should not use subs if subs option passed to block constructor is empty array' do doc = empty_document block = Asciidoctor::Block.new doc, :paragraph, :source => '*bold* _italic_', :subs => [], :attributes => {'subs' => 'quotes'} assert block.subs.empty? block.lock_in_subs assert block.subs.empty? end test 'should use subs from subs option passed to block constructor' do doc = empty_document block = Asciidoctor::Block.new doc, :paragraph, :source => '*bold* _italic_', :subs => [:specialcharacters], :attributes => {'subs' => 'quotes'} assert_equal [:specialcharacters], block.subs block.lock_in_subs assert_equal [:specialcharacters], block.subs end test 'should use subs from subs attribute if subs option is not passed to block constructor' do doc = empty_document block = Asciidoctor::Block.new doc, :paragraph, :source => '*bold* _italic_', :attributes => {'subs' => 'quotes'} assert block.subs.empty? # in this case, we have to call lock_in_subs to resolve the subs block.lock_in_subs assert_equal [:quotes], block.subs end test 'should use subs from subs attribute if subs option passed to block constructor is :default' do doc = empty_document block = Asciidoctor::Block.new doc, :paragraph, :source => '*bold* _italic_', :subs => :default, :attributes => {'subs' => 'quotes'} assert_equal [:quotes], block.subs block.lock_in_subs assert_equal [:quotes], block.subs end test 'should use built-in subs if subs option passed to block constructor is :default and subs attribute is absent' do doc = empty_document block = Asciidoctor::Block.new doc, :paragraph, :source => '*bold* _italic_', :subs => :default assert_equal [:specialcharacters, :quotes, :attributes, :replacements, :macros, :post_replacements], block.subs block.lock_in_subs assert_equal [:specialcharacters, :quotes, :attributes, :replacements, :macros, :post_replacements], block.subs end end # TODO move to helpers_test.rb context 'Helpers' do test 'should URI encode non-word characters generally' do given = ' /%&?\\' expect = '%20%2F%25%26%3F%5C' assert_equal expect, (Asciidoctor::Helpers.encode_uri given) end test 'should not URI select non-word characters' do given = '-.!~*\';:@=+$,()[]' expect = given assert_equal expect, (Asciidoctor::Helpers.encode_uri given) end end end asciidoctor-1.5.5/test/tables_test.rb000066400000000000000000001202611277513741400176570ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end context 'Tables' do context 'PSV' do test 'renders simple psv table' do input = <<-EOS |======= |A |B |C |a |b |c |1 |2 |3 |======= EOS cells = [%w(A B C), %w(a b c), %w(1 2 3)] doc = document_from_string input, :header_footer => false table = doc.blocks[0] assert 100, table.columns.map {|col| col.attributes['colpcwidth'] }.reduce(:+) output = doc.convert assert_css 'table', output, 1 assert_css 'table.tableblock.frame-all.grid-all.spread', output, 1 assert_css 'table > colgroup > col[style*="width: 33.3333%"]', output, 2 assert_css 'table > colgroup > col:last-of-type[style*="width: 33.3334%"]', output, 1 assert_css 'table tr', output, 3 assert_css 'table > tbody > tr', output, 3 assert_css 'table td', output, 9 assert_css 'table > tbody > tr > td.tableblock.halign-left.valign-top > p.tableblock', output, 9 cells.each_with_index {|row, rowi| assert_css "table > tbody > tr:nth-child(#{rowi + 1}) > td", output, row.size assert_css "table > tbody > tr:nth-child(#{rowi + 1}) > td > p", output, row.size row.each_with_index {|cell, celli| assert_xpath "(//tr)[#{rowi + 1}]/td[#{celli + 1}]/p[text()='#{cell}']", output, 1 } } end test 'renders caption on simple psv table' do input = <<-EOS .Simple psv table |======= |A |B |C |a |b |c |1 |2 |3 |======= EOS output = render_embedded_string input assert_xpath '/table/caption[@class="title"][text()="Table 1. Simple psv table"]', output, 1 assert_xpath '/table/caption/following-sibling::colgroup', output, 1 end test 'only increments table counter for tables that have a title' do input = <<-EOS .First numbered table |======= |1 |2 |3 |======= |======= |4 |5 |6 |======= .Second numbered table |======= |7 |8 |9 |======= EOS output = render_embedded_string input assert_css 'table:root', output, 3 assert_xpath '(/table)[1]/caption', output, 1 assert_xpath '(/table)[1]/caption[text()="Table 1. First numbered table"]', output, 1 assert_xpath '(/table)[2]/caption', output, 0 assert_xpath '(/table)[3]/caption', output, 1 assert_xpath '(/table)[3]/caption[text()="Table 2. Second numbered table"]', output, 1 end test 'renders explicit caption on simple psv table' do input = <<-EOS [caption="All the Data. "] .Simple psv table |======= |A |B |C |a |b |c |1 |2 |3 |======= EOS output = render_embedded_string input assert_xpath '/table/caption[@class="title"][text()="All the Data. Simple psv table"]', output, 1 assert_xpath '/table/caption/following-sibling::colgroup', output, 1 end test 'ignores escaped separators' do input = <<-EOS |=== |A \\| here| a \\| there |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 1 assert_css 'table > tbody > tr > td', output, 2 assert_xpath '/table/tbody/tr/td[1]/p[text()="A | here"]', output, 1 assert_xpath '/table/tbody/tr/td[2]/p[text()="a | there"]', output, 1 end test 'preserves escaped delimiters at the end of the line' do input = <<-EOS [%header,cols="1,1"] |==== |A |B\\| |A1 |B1\\| |A2 |B2\\| |==== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead > tr', output, 1 assert_css 'table > thead > tr:nth-child(1) > th', output, 2 assert_xpath '/table/thead/tr[1]/th[2][text()="B|"]', output, 1 assert_css 'table > tbody > tr', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td', output, 2 assert_xpath '/table/tbody/tr[1]/td[2]/p[text()="B1|"]', output, 1 assert_css 'table > tbody > tr:nth-child(2) > td', output, 2 assert_xpath '/table/tbody/tr[2]/td[2]/p[text()="B2|"]', output, 1 end test 'should treat trailing pipe as an empty cell' do input = <<-EOS |==== |A1 | |B1 |B2 |C1 |C2 |==== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 3 assert_xpath '/table/tbody/tr[1]/td', output, 2 assert_xpath '/table/tbody/tr[1]/td[1]/p[text()="A1"]', output, 1 assert_xpath '/table/tbody/tr[1]/td[2]/p', output, 0 assert_xpath '/table/tbody/tr[2]/td[1]/p[text()="B1"]', output, 1 end test 'should auto recover with warning if missing leading separator on first cell' do input = <<-EOS |=== A | here| a | there |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 4 assert_css 'table > tbody > tr', output, 1 assert_css 'table > tbody > tr > td', output, 4 assert_xpath '/table/tbody/tr/td[1]/p[text()="A"]', output, 1 assert_xpath '/table/tbody/tr/td[2]/p[text()="here"]', output, 1 assert_xpath '/table/tbody/tr/td[3]/p[text()="a"]', output, 1 assert_xpath '/table/tbody/tr/td[4]/p[text()="there"]', output, 1 end test 'performs normal substitutions on cell content' do input = <<-EOS :show_title: Cool new show |=== |{show_title} |Coming soon... |=== EOS output = render_embedded_string input assert_xpath '//tbody/tr/td[1]/p[text()="Cool new show"]', output, 1 assert_xpath %(//tbody/tr/td[2]/p[text()='Coming soon#{expand_entity 8230}#{expand_entity 8203}']), output, 1 end test 'table and col width not assigned when autowidth option is specified' do input = <<-EOS [options="autowidth"] |======= |A |B |C |a |b |c |1 |2 |3 |======= EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table[style*="width"]', output, 0 assert_css 'table colgroup col', output, 3 assert_css 'table colgroup col[width]', output, 0 end test 'explicit table width is used even when autowidth option is specified' do input = <<-EOS [%autowidth,width=75%] |======= |A |B |C |a |b |c |1 |2 |3 |======= EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table[style*="width"]', output, 1 assert_css 'table colgroup col', output, 3 assert_css 'table colgroup col[width]', output, 0 end test 'first row sets number of columns when not specified' do input = <<-EOS |==== |first |second |third |fourth |1 |2 |3 |4 |==== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 4 assert_css 'table > tbody > tr', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td', output, 4 assert_css 'table > tbody > tr:nth-child(2) > td', output, 4 end test 'colspec attribute using asterisk syntax sets number of columns' do input = <<-EOS [cols="3*"] |=== |A |B |C |a |b |c |1 |2 |3 |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > tbody > tr', output, 3 end test 'table with explicit column count can have multiple rows on a single line' do input = <<-EOS [cols="3*"] |=== |one |two |1 |2 |a |b |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 end test 'table with explicit deprecated colspec syntax can have multiple rows on a single line' do input = <<-EOS [cols="3"] |=== |one |two |1 |2 |a |b |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 end test 'columns are added for empty records in colspec attribute' do input = <<-EOS [cols="<,"] |=== |one |two |1 |2 |a |b |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 3 end test 'cols attribute may include spaces' do input = <<-EOS [cols=" 1, 1 "] |=== |one |two |1 |2 |a |b |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'col[style="width: 50%;"]', output, 2 assert_css 'table > tbody > tr', output, 3 end test 'blank cols attribute should be ignored' do input = <<-EOS [cols=" "] |=== |one |two |1 |2 |a |b |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'col[style="width: 50%;"]', output, 2 assert_css 'table > tbody > tr', output, 3 end test 'empty cols attribute should be ignored' do input = <<-EOS [cols=""] |=== |one |two |1 |2 |a |b |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'col[style="width: 50%;"]', output, 2 assert_css 'table > tbody > tr', output, 3 end test 'table with header and footer' do input = <<-EOS [frame="topbot",options="header,footer"] |=== |Item |Quantity |Item 1 |1 |Item 2 |2 |Item 3 |3 |Total |6 |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 1 assert_css 'table > thead > tr', output, 1 assert_css 'table > thead > tr > th', output, 2 assert_css 'table > tfoot', output, 1 assert_css 'table > tfoot > tr', output, 1 assert_css 'table > tfoot > tr > td', output, 2 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 3 end test 'table with header and footer docbook' do input = <<-EOS .Table with header, body and footer [frame="topbot",options="header,footer"] |=== |Item |Quantity |Item 1 |1 |Item 2 |2 |Item 3 |3 |Total |6 |=== EOS output = render_embedded_string input, :backend => 'docbook' assert_css 'table', output, 1 assert_css 'table[frame="topbot"]', output, 1 assert_css 'table > title', output, 1 assert_css 'table > tgroup', output, 1 assert_css 'table > tgroup[cols="2"]', output, 1 assert_css 'table > tgroup[cols="2"] > colspec', output, 2 assert_css 'table > tgroup[cols="2"] > colspec[colwidth="50*"]', output, 2 assert_css 'table > tgroup > thead', output, 1 assert_css 'table > tgroup > thead > row', output, 1 assert_css 'table > tgroup > thead > row > entry', output, 2 assert_css 'table > tgroup > thead > row > entry > simpara', output, 0 assert_css 'table > tgroup > tfoot', output, 1 assert_css 'table > tgroup > tfoot > row', output, 1 assert_css 'table > tgroup > tfoot > row > entry', output, 2 assert_css 'table > tgroup > tfoot > row > entry > simpara', output, 2 assert_css 'table > tgroup > tbody', output, 1 assert_css 'table > tgroup > tbody > row', output, 3 assert_css 'table > tgroup > tbody > row', output, 3 end test 'table with landscape orientation in DocBook' do ['orientation=landscape', '%rotate'].each do |attrs| input = <<-EOS [#{attrs}] |=== |Column A | Column B | Column C |=== EOS output = render_embedded_string input, :backend => 'docbook' assert_css 'informaltable', output, 1 assert_css 'informaltable[orient="land"]', output, 1 end end test 'table with implicit header row' do input = <<-EOS |=== |Column 1 |Column 2 |Data A1 |Data B1 |Data A2 |Data B2 |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 1 assert_css 'table > thead > tr', output, 1 assert_css 'table > thead > tr > th', output, 2 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 2 end test 'table with implicit header row when other options set' do input = <<-EOS [%autowidth] |=== |Column 1 |Column 2 |Data A1 |Data B1 |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table[style*="width"]', output, 0 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 1 assert_css 'table > thead > tr', output, 1 assert_css 'table > thead > tr > th', output, 2 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 1 end test 'no implicit header row if second line not blank' do input = <<-EOS |=== |Column 1 |Column 2 |Data A1 |Data B1 |Data A2 |Data B2 |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 0 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 3 end test 'no implicit header row if first line blank' do input = <<-EOS |=== |Column 1 |Column 2 |Data A1 |Data B1 |Data A2 |Data B2 |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 0 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 3 end test 'no implicit header row if noheader option is specified' do input = <<-EOS [%noheader] |=== |Column 1 |Column 2 |Data A1 |Data B1 |Data A2 |Data B2 |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > thead', output, 0 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 3 end test 'styles not applied to header cells' do input = <<-EOS [cols="1h,1s,1e",options="header,footer"] |==== |Name |Occupation| Website |Octocat |Social coding| http://github.com |Name |Occupation| Website |==== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > thead > tr > th', output, 3 assert_css 'table > thead > tr > th > *', output, 0 assert_css 'table > tfoot > tr > th', output, 1 assert_css 'table > tfoot > tr > td', output, 2 assert_css 'table > tfoot > tr > td > p > strong', output, 1 assert_css 'table > tfoot > tr > td > p > em', output, 1 assert_css 'table > tbody > tr > th', output, 1 assert_css 'table > tbody > tr > td', output, 2 assert_css 'table > tbody > tr > td > p.header', output, 0 assert_css 'table > tbody > tr > td > p > strong', output, 1 assert_css 'table > tbody > tr > td > p > em > a', output, 1 end test 'vertical table headers use th element instead of header class' do input = <<-EOS [cols="1h,1s,1e"] |==== |Name |Occupation| Website |Octocat |Social coding| http://github.com |Name |Occupation| Website |==== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > tbody > tr > th', output, 3 assert_css 'table > tbody > tr > td', output, 6 assert_css 'table > tbody > tr .header', output, 0 assert_css 'table > tbody > tr > td > p > strong', output, 3 assert_css 'table > tbody > tr > td > p > em', output, 3 assert_css 'table > tbody > tr > td > p > em > a', output, 1 end test 'supports horizontal and vertical source data with blank lines and table header' do input = <<-EOS .Horizontal and vertical source data [width="80%",cols="3,^2,^2,10",options="header"] |=== |Date |Duration |Avg HR |Notes |22-Aug-08 |10:24 | 157 | Worked out MSHR (max sustainable heart rate) by going hard for this interval. |22-Aug-08 |23:03 | 152 | Back-to-back with previous interval. |24-Aug-08 |40:00 | 145 | Moderately hard interspersed with 3x 3min intervals (2 min hard + 1 min really hard taking the HR up to 160). I am getting in shape! |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table[style*="width: 80%"]', output, 1 assert_xpath '/table/caption[@class="title"][text()="Table 1. Horizontal and vertical source data"]', output, 1 assert_css 'table > colgroup > col', output, 4 assert_css 'table > colgroup > col:nth-child(1)[@style*="width: 17.647%"]', output, 1 assert_css 'table > colgroup > col:nth-child(2)[@style*="width: 11.7647%"]', output, 1 assert_css 'table > colgroup > col:nth-child(3)[@style*="width: 11.7647%"]', output, 1 assert_css 'table > colgroup > col:nth-child(4)[@style*="width: 58.8236%"]', output, 1 assert_css 'table > thead', output, 1 assert_css 'table > thead > tr', output, 1 assert_css 'table > thead > tr > th', output, 4 assert_css 'table > tbody > tr', output, 3 assert_css 'table > tbody > tr:nth-child(1) > td', output, 4 assert_css 'table > tbody > tr:nth-child(2) > td', output, 4 assert_css 'table > tbody > tr:nth-child(3) > td', output, 4 assert_xpath "/table/tbody/tr[1]/td[4]/p[text()='Worked out MSHR (max sustainable heart rate) by going hard\nfor this interval.']", output, 1 assert_css 'table > tbody > tr:nth-child(3) > td:nth-child(4) > p', output, 2 assert_xpath '/table/tbody/tr[3]/td[4]/p[2][text()="I am getting in shape!"]', output, 1 end test 'percentages as column widths' do input = <<-EOS [cols="<.^10%,<90%"] |=== |column A |column B |=== EOS output = render_embedded_string input assert_xpath '/table/colgroup/col', output, 2 assert_xpath '(/table/colgroup/col)[1][@style="width: 10%;"]', output, 1 assert_xpath '(/table/colgroup/col)[2][@style="width: 90%;"]', output, 1 end test 'spans, alignments and styles' do input = <<-EOS [cols="e,m,^,>s",width="25%"] |=== |1 >s|2 |3 |4 ^|5 2.2+^.^|6 .3+<.>m|7 ^|8 d|9 2+>|10 |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col[style*="width: 25%"]', output, 4 assert_css 'table > tbody > tr', output, 4 assert_css 'table > tbody > tr > td', output, 10 assert_css 'table > tbody > tr:nth-child(1) > td', output, 4 assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 assert_css 'table > tbody > tr:nth-child(3) > td', output, 1 assert_css 'table > tbody > tr:nth-child(4) > td', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(1).halign-left.valign-top p em', output, 1 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(2).halign-right.valign-top p strong', output, 1 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(3).halign-center.valign-top p', output, 1 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(3).halign-center.valign-top p *', output, 0 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(4).halign-right.valign-top p strong', output, 1 assert_css 'table > tbody > tr:nth-child(2) > td:nth-child(1).halign-center.valign-top p em', output, 1 assert_css 'table > tbody > tr:nth-child(2) > td:nth-child(2).halign-center.valign-middle[colspan="2"][rowspan="2"] p code', output, 1 assert_css 'table > tbody > tr:nth-child(2) > td:nth-child(3).halign-left.valign-bottom[rowspan="3"] p code', output, 1 assert_css 'table > tbody > tr:nth-child(3) > td:nth-child(1).halign-center.valign-top p em', output, 1 assert_css 'table > tbody > tr:nth-child(4) > td:nth-child(1).halign-left.valign-top p', output, 1 assert_css 'table > tbody > tr:nth-child(4) > td:nth-child(1).halign-left.valign-top p em', output, 0 assert_css 'table > tbody > tr:nth-child(4) > td:nth-child(2).halign-right.valign-top[colspan="2"] p code', output, 1 end test 'sets up columns correctly if first row has cell that spans columns' do input = <<-EOS |=== 2+^|AAA |CCC |AAA |BBB |CCC |AAA |BBB |CCC |=== EOS output = render_embedded_string input assert_css 'table > tbody > tr:nth-child(1) > td', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(1)[colspan="2"]', output, 1 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(2):not([colspan])', output, 1 assert_css 'table > tbody > tr:nth-child(2) > td:not([colspan])', output, 3 assert_css 'table > tbody > tr:nth-child(3) > td:not([colspan])', output, 3 end test 'supports repeating cells' do input = <<-EOS |=== 3*|A |1 3*|2 |b |c |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 3 assert_css 'table > tbody > tr:nth-child(1) > td', output, 3 assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 assert_css 'table > tbody > tr:nth-child(3) > td', output, 3 assert_xpath '/table/tbody/tr[1]/td[1]/p[text()="A"]', output, 1 assert_xpath '/table/tbody/tr[1]/td[2]/p[text()="A"]', output, 1 assert_xpath '/table/tbody/tr[1]/td[3]/p[text()="A"]', output, 1 assert_xpath '/table/tbody/tr[2]/td[1]/p[text()="1"]', output, 1 assert_xpath '/table/tbody/tr[2]/td[2]/p[text()="2"]', output, 1 assert_xpath '/table/tbody/tr[2]/td[3]/p[text()="2"]', output, 1 assert_xpath '/table/tbody/tr[3]/td[1]/p[text()="2"]', output, 1 assert_xpath '/table/tbody/tr[3]/td[2]/p[text()="b"]', output, 1 assert_xpath '/table/tbody/tr[3]/td[3]/p[text()="c"]', output, 1 end test 'calculates colnames correctly when using implicit column count and single cell with colspan' do input = <<-EOS |=== 2+|Two Columns |One Column |One Column |=== EOS output = render_embedded_string input, :backend => 'docbook' assert_xpath '//colspec', output, 2 assert_xpath '(//colspec)[1][@colname="col_1"]', output, 1 assert_xpath '(//colspec)[2][@colname="col_2"]', output, 1 assert_xpath '//row', output, 2 assert_xpath '(//row)[1]/entry', output, 1 assert_xpath '(//row)[1]/entry[@namest="col_1"][@nameend="col_2"]', output, 1 end test 'calculates colnames correctly when using implicit column count and cells with mixed colspans' do input = <<-EOS |=== 2+|Two Columns | One Column |One Column |One Column |One Column |=== EOS output = render_embedded_string input, :backend => 'docbook' assert_xpath '//colspec', output, 3 assert_xpath '(//colspec)[1][@colname="col_1"]', output, 1 assert_xpath '(//colspec)[2][@colname="col_2"]', output, 1 assert_xpath '(//colspec)[3][@colname="col_3"]', output, 1 assert_xpath '//row', output, 2 assert_xpath '(//row)[1]/entry', output, 2 assert_xpath '(//row)[1]/entry[@namest="col_1"][@nameend="col_2"]', output, 1 assert_xpath '(//row)[2]/entry[@namest]', output, 0 assert_xpath '(//row)[2]/entry[@nameend]', output, 0 end test 'assigns unique column names for table with implicit column count and colspans in first row' do input = <<-EOS |==== | 2+| Node 0 2+| Node 1 | Host processes | Core 0 | Core 1 | Core 4 | Core 5 | Guest processes | Core 2 | Core 3 | Core 6 | Core 7 |==== EOS output = render_embedded_string input, :backend => 'docbook' assert_xpath '//colspec', output, 5 (1..5).each do |n| assert_xpath %((//colspec)[#{n}][@colname="col_#{n}"]), output, 1 end assert_xpath '(//row)[1]/entry', output, 3 assert_xpath '((//row)[1]/entry)[1][@namest]', output, 0 assert_xpath '((//row)[1]/entry)[1][@namend]', output, 0 assert_xpath '((//row)[1]/entry)[2][@namest="col_2"][@nameend="col_3"]', output, 1 assert_xpath '((//row)[1]/entry)[3][@namest="col_4"][@nameend="col_5"]', output, 1 end test 'ignores cell with colspan that exceeds colspec' do input = <<-EOS [cols="1,1"] |=== 3+|A |B a|C more C |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table *', output, 0 end test 'paragraph, verse and literal content' do input = <<-EOS [cols=",^v,^l",options="header"] |=== |Paragraphs |Verse |Literal 3*|The discussion about what is good, what is beautiful, what is noble, what is pure, and what is true could always go on. Why is that important? Why would I like to do that? Because that's the only conversation worth having. And whether it goes on or not after I die, I don't know. But, I do know that it is the conversation I want to have while I am still alive. Which means that to me the offer of certainty, the offer of complete security, the offer of an impermeable faith that can't give way is an offer of something not worth having. I want to live my life taking the risk all the time that I don't know anything like enough yet... that I haven't understood enough... that I can't know enough... that I am always hungrily operating on the margins of a potentially great harvest of future knowledge and wisdom. I wouldn't have it any other way. |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > thead', output, 1 assert_css 'table > thead > tr', output, 1 assert_css 'table > thead > tr > th', output, 3 assert_css 'table > tbody', output, 1 assert_css 'table > tbody > tr', output, 1 assert_css 'table > tbody > tr > td', output, 3 assert_css 'table > tbody > tr > td:nth-child(1).halign-left.valign-top > p.tableblock', output, 7 assert_css 'table > tbody > tr > td:nth-child(2).halign-center.valign-top > div.verse', output, 1 verse = xmlnodes_at_css 'table > tbody > tr > td:nth-child(2).halign-center.valign-top > div.verse', output, 1 assert_equal 26, verse.text.lines.entries.size assert_css 'table > tbody > tr > td:nth-child(3).halign-center.valign-top > div.literal > pre', output, 1 literal = xmlnodes_at_css 'table > tbody > tr > td:nth-child(3).halign-center.valign-top > div.literal > pre', output, 1 assert_equal 26, literal.text.lines.entries.size end test 'basic asciidoc cell' do input = <<-EOS |=== a|-- NOTE: content content -- |=== EOS result = render_embedded_string input assert_css 'table.tableblock', result, 1 assert_css 'table.tableblock td.tableblock', result, 1 assert_css 'table.tableblock td.tableblock .openblock', result, 1 assert_css 'table.tableblock td.tableblock .openblock .admonitionblock', result, 1 assert_css 'table.tableblock td.tableblock .openblock .paragraph', result, 1 end test 'doctype can be set in asciidoc table cell' do input = <<-EOS |=== a| :doctype: inline content |=== EOS result = render_embedded_string input assert_css 'table.tableblock', result, 1 assert_css 'table.tableblock .paragraph', result, 0 end test 'compat mode can be activated in asciidoc table cell' do input = <<-EOS |=== a| :compat-mode: 'italic' |=== EOS result = render_embedded_string input assert_css 'table.tableblock td em', result, 1 end test 'asciidoc content' do input = <<-EOS [cols="1e,1,5a",frame="topbot",options="header"] |=== |Name |Backends |Description |badges |xhtml11, html5 | Link badges ('XHTML 1.1' and 'CSS') in document footers. NOTE: The path names of images, icons and scripts are relative path names to the output document not the source document. |[[X97]] docinfo, docinfo1, docinfo2 |All backends | These three attributes control which document information files will be included in the the header of the output file: docinfo:: Include `-docinfo.` docinfo1:: Include `docinfo.` docinfo2:: Include `docinfo.` and `-docinfo.` Where `` is the file name (sans extension) of the AsciiDoc input file and `` is `.html` for HTML outputs or `.xml` for DocBook outputs. If the input file is the standard input then the output file name is used. |=== EOS doc = document_from_string input table = doc.blocks.first assert !table.nil? tbody = table.rows.body assert_equal 2, tbody.size body_cell_1_3 = tbody[0][2] assert !body_cell_1_3.inner_document.nil? assert body_cell_1_3.inner_document.nested? assert_equal doc, body_cell_1_3.inner_document.parent_document assert_equal doc.converter, body_cell_1_3.inner_document.converter output = doc.render assert_css 'table > tbody > tr', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(3) div.admonitionblock', output, 1 assert_css 'table > tbody > tr:nth-child(2) > td:nth-child(3) div.dlist', output, 1 end test 'preprocessor directive on first line of an AsciiDoc table cell should be processed' do input = <<-EOS |=== a|include::fixtures/include-file.asciidoc[] |=== EOS output = render_embedded_string input, :safe => :safe, :base_dir => File.dirname(__FILE__) assert_match(/included content/, output) end test 'cross reference link in an AsciiDoc table cell should resolve to reference in main document' do input = <<-EOS == Some |=== a|See <<_more>> |=== == More content EOS result = render_string input assert_xpath '//a[@href="#_more"]', result, 1 assert_xpath '//a[@href="#_more"][text()="More"]', result, 1 end test 'footnotes should not be shared between an AsciiDoc table cell and the main document' do input = <<-EOS |=== a|AsciiDoc footnote:[A lightweight markup language.] |=== EOS result = render_string input assert_css '#_footnote_1', result, 1 end test 'callout numbers should be globally unique, including AsciiDoc table cells' do input = <<-EOS = Document Title == Section 1 |==== a| [source, yaml] ---- key: value <1> ---- <1> First callout |==== == Section 2 |==== a| [source, yaml] ---- key: value <1> ---- <1> Second callout |==== == Section 3 [source, yaml] ---- key: value <1> ---- <1> Third callout EOS result = render_string input, :backend => 'docbook' conums = xmlnodes_at_xpath '//co', result assert_equal 3, conums.size ['CO1-1', 'CO2-1', 'CO3-1'].each_with_index do |conum, idx| assert_equal conum, conums[idx].attribute('xml:id').value end callouts = xmlnodes_at_xpath '//callout', result assert_equal 3, callouts.size ['CO1-1', 'CO2-1', 'CO3-1'].each_with_index do |callout, idx| assert_equal callout, callouts[idx].attribute('arearefs').value end end test 'nested table' do input = <<-EOS [cols="1,2a"] |=== |Normal cell |Cell with nested table [cols="2,1"] !=== !Nested table cell 1 !Nested table cell 2 !=== |=== EOS output = render_embedded_string input assert_css 'table', output, 2 assert_css 'table table', output, 1 assert_css 'table table', output, 1 assert_css 'table > tbody > tr > td:nth-child(2) table', output, 1 assert_css 'table > tbody > tr > td:nth-child(2) table > tbody > tr > td', output, 2 end test 'toc from parent document should not be included in an AsciiDoc table cell' do input = <<-EOS = Document Title :toc: == Section A |=== a|AsciiDoc content |=== EOS output = render_string input assert_css '.toc', output, 1 assert_css 'table .toc', output, 0 end test 'should be able to enable toc in an AsciiDoc table cell' do input = <<-EOS = Document Title == Section A |=== a| = Subdocument Title :toc: == Subdocument Section A content |=== EOS output = render_string input assert_css '.toc', output, 1 assert_css 'table .toc', output, 1 end test 'should be able to enable toc in both outer document and in an AsciiDoc table cell' do input = <<-EOS = Document Title :toc: == Section A |=== a| = Subdocument Title :toc: macro [#table-cell-toc] toc::[] == Subdocument Section A content |=== EOS output = render_string input assert_css '.toc', output, 2 assert_css '#toc', output, 1 assert_css 'table .toc', output, 1 assert_css 'table #table-cell-toc', output, 1 end test 'document in an AsciiDoc table cell should not see doctitle of parent' do input = <<-EOS = Document Title [cols="1a"] |=== |AsciiDoc content |=== EOS output = render_string input assert_css 'table', output, 1 assert_css 'table > tbody > tr > td', output, 1 assert_css 'table > tbody > tr > td #preamble', output, 0 assert_css 'table > tbody > tr > td .paragraph', output, 1 end test 'cell background color' do input = <<-EOS [cols="1e,1", options="header"] |=== |{set:cellbgcolor:green}green |{set:cellbgcolor!} plain |{set:cellbgcolor:red}red |{set:cellbgcolor!} plain |=== EOS output = render_embedded_string input assert_xpath '(/table/thead/tr/th)[1][@style="background-color: green;"]', output, 1 assert_xpath '(/table/thead/tr/th)[2][@style="background-color: green;"]', output, 0 assert_xpath '(/table/tbody/tr/td)[1][@style="background-color: red;"]', output, 1 assert_xpath '(/table/tbody/tr/td)[2][@style="background-color: green;"]', output, 0 end end context 'DSV' do test 'renders simple dsv table' do input = <<-EOS [width="75%",format="dsv"] |=== root:x:0:0:root:/root:/bin/bash bin:x:1:1:bin:/bin:/sbin/nologin mysql:x:27:27:MySQL\\:Server:/var/lib/mysql:/bin/bash gdm:x:42:42::/var/lib/gdm:/sbin/nologin sshd:x:74:74:Privilege-separated SSH:/var/empty/sshd:/sbin/nologin nobody:x:99:99:Nobody:/:/sbin/nologin |=== EOS doc = document_from_string input, :header_footer => false table = doc.blocks[0] assert 100, table.columns.map {|col| col.attributes['colpcwidth'] }.reduce(:+) output = doc.convert assert_css 'table', output, 1 assert_css 'table > colgroup > col[style*="width: 14.2857"]', output, 6 assert_css 'table > colgroup > col:last-of-type[style*="width: 14.2858%"]', output, 1 assert_css 'table > tbody > tr', output, 6 assert_xpath '//tr[4]/td[5]/p/text()', output, 0 assert_xpath '//tr[3]/td[5]/p[text()="MySQL:Server"]', output, 1 end test 'dsv format shorthand' do input = <<-EOS :=== a:b:c 1:2:3 :=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td', output, 3 assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 end test 'single cell in DSV table should only produce single row' do input = <<-EOS :=== single cell :=== EOS output = render_embedded_string input assert_css 'table td', output, 1 end test 'should treat trailing colon as an empty cell' do input = <<-EOS :==== A1: B1:B2 C1:C2 :==== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 3 assert_xpath '/table/tbody/tr[1]/td', output, 2 assert_xpath '/table/tbody/tr[1]/td[1]/p[text()="A1"]', output, 1 assert_xpath '/table/tbody/tr[1]/td[2]/p', output, 0 assert_xpath '/table/tbody/tr[2]/td[1]/p[text()="B1"]', output, 1 end end context 'CSV' do test 'should treat trailing comma as an empty cell' do input = <<-EOS ,==== A1, B1,B2 C1,C2 ,==== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 3 assert_xpath '/table/tbody/tr[1]/td', output, 2 assert_xpath '/table/tbody/tr[1]/td[1]/p[text()="A1"]', output, 1 assert_xpath '/table/tbody/tr[1]/td[2]/p', output, 0 assert_xpath '/table/tbody/tr[2]/td[1]/p[text()="B1"]', output, 1 end test 'mixed unquoted records and quoted records with escaped quotes, commas and wrapped lines' do input = <<-EOS [format="csv",options="header"] |=== Year,Make,Model,Description,Price 1997,Ford,E350,"ac, abs, moon",3000.00 1999,Chevy,"Venture ""Extended Edition""","",4900.00 1999,Chevy,"Venture ""Extended Edition, Very Large""",,5000.00 1996,Jeep,Grand Cherokee,"MUST SELL! air, moon roof, loaded",4799.00 |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col[style*="width: 20%"]', output, 5 assert_css 'table > thead > tr', output, 1 assert_css 'table > tbody > tr', output, 4 assert_xpath '((//tbody/tr)[1]/td)[4]/p[text()="ac, abs, moon"]', output, 1 assert_xpath %(((//tbody/tr)[2]/td)[3]/p[text()='Venture "Extended Edition"']), output, 1 assert_xpath '((//tbody/tr)[4]/td)[4]/p[text()="MUST SELL! air, moon roof, loaded"]', output, 1 end test 'csv format shorthand' do input = <<-EOS ,=== a,b,c 1,2,3 ,=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td', output, 3 assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 end test 'custom separator' do input = <<-EOS [format="csv", separator=";"] |=== a;b;c 1;2;3 |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 3 assert_css 'table > tbody > tr', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td', output, 3 assert_css 'table > tbody > tr:nth-child(2) > td', output, 3 end test 'custom separator for an AsciiDoc table cell' do input = <<-EOS [cols=2,separator=!] |=== !Pipe output to vim a! ---- asciidoctor -o - -s test.adoc | view - ---- |=== EOS output = render_embedded_string input assert_css 'table', output, 1 assert_css 'table > colgroup > col', output, 2 assert_css 'table > tbody > tr', output, 1 assert_css 'table > tbody > tr:nth-child(1) > td', output, 2 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(1) p', output, 1 assert_css 'table > tbody > tr:nth-child(1) > td:nth-child(2) .listingblock', output, 1 end test 'single cell in CSV table should only produce single row' do input = <<-EOS ,=== single cell ,=== EOS output = render_embedded_string input assert_css 'table td', output, 1 end test 'table with breakable db45' do input = <<-EOS .Table with breakable [options="breakable"] |=== |Item |Quantity |Item 1 |1 |=== EOS output = render_embedded_string input, :backend => 'docbook45' assert output.include?('') end test 'table with breakable db5' do input = <<-EOS .Table with breakable [options="breakable"] |=== |Item |Quantity |Item 1 |1 |=== EOS output = render_embedded_string input, :backend => 'docbook5' assert output.include?('') end test 'table with unbreakable db5' do input = <<-EOS .Table with unbreakable [options="unbreakable"] |=== |Item |Quantity |Item 1 |1 |=== EOS output = render_embedded_string input, :backend => 'docbook5' assert output.include?('') end test 'table with unbreakable db45' do input = <<-EOS .Table with unbreakable [options="unbreakable"] |=== |Item |Quantity |Item 1 |1 |=== EOS output = render_embedded_string input, :backend => 'docbook45' assert output.include?('') end end end asciidoctor-1.5.5/test/test_helper.rb000066400000000000000000000256231277513741400176720ustar00rootroot00000000000000# encoding: UTF-8 ASCIIDOCTOR_PROJECT_DIR = File.dirname File.dirname(__FILE__) Dir.chdir ASCIIDOCTOR_PROJECT_DIR if RUBY_VERSION < '1.9' require 'rubygems' end require 'simplecov' if ENV['COVERAGE'] == 'true' require File.join(ASCIIDOCTOR_PROJECT_DIR, 'lib', 'asciidoctor') require 'socket' require 'nokogiri' require 'tmpdir' autoload :FileUtils, 'fileutils' autoload :Pathname, 'pathname' RE_XMLNS_ATTRIBUTE = / xmlns="[^"]+"/ RE_DOCTYPE = /\s* "<" # # Returns the String entity expanded to its equivalent UTF-8 glyph def expand_entity(number) [number].pack('U*') end alias :entity :expand_entity def invoke_cli_with_filenames(argv = [], filenames = [], &block) filepaths = Array.new filenames.each { |filename| if filenames.nil?|| ::Pathname.new(filename).absolute? filepaths.push(filename) else filepaths.push(File.join(File.dirname(__FILE__), 'fixtures', filename)) end } invoker = Asciidoctor::Cli::Invoker.new(argv + filepaths) invoker.invoke!(&block) invoker end def invoke_cli_to_buffer(argv = [], filename = 'sample.asciidoc', &block) invoke_cli(argv, filename, [StringIO.new, StringIO.new], &block) end def invoke_cli(argv = [], filename = 'sample.asciidoc', buffers = nil, &block) if filename.nil? || filename == '-' || ::Pathname.new(filename).absolute? filepath = filename else filepath = File.join(File.dirname(__FILE__), 'fixtures', filename) end invoker = Asciidoctor::Cli::Invoker.new(argv + [filepath]) if buffers invoker.redirect_streams(*buffers) end invoker.invoke!(&block) invoker end def redirect_streams old_stdout, $stdout = $stdout, (tmp_stdout = ::StringIO.new) old_stderr, $stderr = $stderr, (tmp_stderr = ::StringIO.new) begin yield tmp_stdout, tmp_stderr ensure $stdout = old_stdout $stderr = old_stderr end end def resolve_localhost (RUBY_VERSION < '1.9' || RUBY_ENGINE == 'rbx') ? Socket.gethostname : Socket.ip_address_list.find {|addr| addr.ipv4? }.ip_address end def using_test_webserver host = resolve_localhost, port = 9876 server = TCPServer.new host, port base_dir = File.expand_path File.dirname __FILE__ t = Thread.new do while (session = server.accept) request = session.gets resource = nil if (m = /GET (\S+) HTTP\/1\.1$/.match(request.chomp)) resource = (resource = m[1]) == '' ? '.' : resource else session.print %(HTTP/1.1 405 Method Not Allowed\r\nContent-Type: text/plain\r\n\r\n) session.print %(405 - Method not allowed\n) session.close break end if resource == '/name/asciidoctor' session.print %(HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n\r\n) session.print %({"name": "asciidoctor"}\n) elsif File.file?(resource_file = (File.join base_dir, resource)) mimetype = if (ext = ::File.extname(resource_file)[1..-1]) ext == 'adoc' ? 'text/plain' : %(image/#{ext}) else 'text/plain' end session.print %(HTTP/1.1 200 OK\r\nContent-Type: #{mimetype}\r\n\r\n) File.open resource_file, 'rb' do |fd| until fd.eof? do buffer = fd.read 256 session.write buffer end end else session.print %(HTTP/1.1 404 File Not Found\r\nContent-Type: text/plain\r\n\r\n) session.print %(404 - Resource not found.\n) end session.close end end begin yield ensure begin server.shutdown # "Errno::ENOTCONN: Socket is not connected' is reported on some platforms; call #close instead of #shutdown rescue Errno::ENOTCONN server.close end t.exit end end end ### # # Context goodness provided by @citrusbyte's contest. # See https://github.com/citrusbyte/contest # ### # Contest adds +teardown+, +test+ and +context+ as class methods, and the # instance methods +setup+ and +teardown+ now iterate on the corresponding # blocks. Note that all setup and teardown blocks must be defined with the # block syntax. Adding setup or teardown instance methods defeats the purpose # of this library. class Minitest::Test def self.setup(&block) define_method :setup do super(&block) instance_eval(&block) end end def self.teardown(&block) define_method :teardown do instance_eval(&block) super(&block) end end def self.context(*name, &block) subclass = Class.new(self) remove_tests(subclass) subclass.class_eval(&block) if block_given? const_set(context_name(name.join(" ")), subclass) end def self.test(name, &block) define_method(test_name(name), &block) end class << self alias_method :should, :test alias_method :describe, :context end private def self.context_name(name) "Test#{sanitize_name(name).gsub(/(^| )(\w)/) { $2.upcase }}".to_sym end def self.test_name(name) "test_#{sanitize_name(name).gsub(/\s+/,'_')}".to_sym end def self.sanitize_name(name) name.gsub(/\W+/, ' ').strip end def self.remove_tests(subclass) subclass.public_instance_methods.grep(/^test_/).each do |meth| subclass.send(:undef_method, meth.to_sym) end end end def context(*name, &block) Minitest::Test.context(name, &block) end asciidoctor-1.5.5/test/text_test.rb000066400000000000000000000250131277513741400173700ustar00rootroot00000000000000# encoding: UTF-8 unless defined? ASCIIDOCTOR_PROJECT_DIR $: << File.dirname(__FILE__); $:.uniq! require 'test_helper' end context "Text" do test "proper encoding to handle utf8 characters in document using html backend" do output = example_document(:encoding).render assert_xpath '//p', output, 4 assert_xpath '//a', output, 1 end test "proper encoding to handle utf8 characters in embedded document using html backend" do output = example_document(:encoding, :header_footer => false).render assert_xpath '//p', output, 4 assert_xpath '//a', output, 1 end test "proper encoding to handle utf8 characters in document using docbook45 backend" do output = example_document(:encoding, :attributes => {'backend' => 'docbook45', 'xmlns' => ''}).render assert_xpath '//xmlns:simpara', output, 4 assert_xpath '//xmlns:ulink', output, 1 end test "proper encoding to handle utf8 characters in embedded document using docbook45 backend" do output = example_document(:encoding, :header_footer => false, :attributes => {'backend' => 'docbook45'}).render assert_xpath '//simpara', output, 4 assert_xpath '//ulink', output, 1 end # NOTE this test ensures we have the encoding line on block templates too test 'proper encoding to handle utf8 characters in arbitrary block' do input = [] input << "[verse]\n" input.concat(File.readlines(sample_doc_path(:encoding))) doc = empty_document reader = Asciidoctor::PreprocessorReader.new doc, input block = Asciidoctor::Parser.next_block(reader, doc) assert_xpath '//pre', block.render.gsub(/^\s*\n/, ''), 1 end test 'proper encoding to handle utf8 characters from included file' do input = <<-EOS include::fixtures/encoding.asciidoc[tags=romé] EOS doc = empty_safe_document :base_dir => File.expand_path(File.dirname(__FILE__)) reader = Asciidoctor::PreprocessorReader.new doc, input block = Asciidoctor::Parser.next_block(reader, doc) output = block.render assert_css '.paragraph', output, 1 end test 'escaped text markup' do assert_match(/All your <em>inline<\/em> markup belongs to <strong>us<\/strong>!/, render_string('All your inline markup belongs to us!')) end test "line breaks" do assert_xpath "//br", render_string("Well this is +\njust fine and dandy, isn't it?"), 1 end test 'single- and double-quoted text' do rendered = render_embedded_string(%q(``Where?,'' she said, flipping through her copy of `The New Yorker.'), :attributes => {'compat-mode' => ''}) assert_match(/“Where\?,”/, rendered) assert_match(/‘The New Yorker.’/, rendered) rendered = render_embedded_string(%q("`Where?,`" she said, flipping through her copy of '`The New Yorker.`')) assert_match(/“Where\?,”/, rendered) assert_match(/‘The New Yorker.’/, rendered) end test 'multiple double-quoted text on a single line' do assert_equal '“Our business is constantly changing” or “We need faster time to market.”', render_embedded_string(%q(``Our business is constantly changing'' or ``We need faster time to market.''), :doctype => :inline, :attributes => {'compat-mode' => ''}) assert_equal '“Our business is constantly changing” or “We need faster time to market.”', render_embedded_string(%q("`Our business is constantly changing`" or "`We need faster time to market.`"), :doctype => :inline) end test 'horizontal rule' do input = <<-EOS This line is separated by a horizontal rule... ''' ...from this line. EOS output = render_embedded_string input assert_xpath "//hr", output, 1 assert_xpath "/*[@class='paragraph']", output, 2 assert_xpath "(/*[@class='paragraph'])[1]/following-sibling::hr", output, 1 assert_xpath "/hr/following-sibling::*[@class='paragraph']", output, 1 end test 'markdown horizontal rules' do variants = [ '---', '- - -', '***', '* * *', '___', '_ _ _' ] offsets = [ '', ' ', ' ', ' ' ] variants.each do |variant| offsets.each do |offset| input = <<-EOS This line is separated by a horizontal rule... #{offset}#{variant} ...from this line. EOS output = render_embedded_string input assert_xpath "//hr", output, 1 assert_xpath "/*[@class='paragraph']", output, 2 assert_xpath "(/*[@class='paragraph'])[1]/following-sibling::hr", output, 1 assert_xpath "/hr/following-sibling::*[@class='paragraph']", output, 1 end end end test 'markdown horizontal rules negative case' do bad_variants = [ '- - - -', '* * * *', '_ _ _ _' ] good_offsets = [ '', ' ', ' ', ' ' ] bad_variants.each do |variant| good_offsets.each do |offset| input = <<-EOS This line is separated something that is not a horizontal rule... #{offset}#{variant} ...from this line. EOS output = render_embedded_string input assert_xpath '//hr', output, 0 end end good_variants = [ '- - -', '* * *', '_ _ _' ] bad_offsets = [ "\t", ' ' ] good_variants.each do |variant| bad_offsets.each do |offset| input = <<-EOS This line is separated something that is not a horizontal rule... #{offset}#{variant} ...from this line. EOS output = render_embedded_string input assert_xpath '//hr', output, 0 end end end test "emphasized text using underscore characters" do assert_xpath "//em", render_string("An _emphatic_ no") end test 'emphasized text with single quote using apostrophe characters' do rsquo = [8217].pack 'U*' assert_xpath %(//em[text()="Johnny#{rsquo}s"]), render_string(%q(It's 'Johnny's' phone), :attributes => {'compat-mode' => ''}) assert_xpath %(//p[text()="It#{rsquo}s 'Johnny#{rsquo}s' phone"]), render_string(%q(It's 'Johnny's' phone)) end test 'emphasized text with escaped single quote using apostrophe characters' do assert_xpath %(//em[text()="Johnny's"]), render_string(%q(It's 'Johnny\\'s' phone), :attributes => {'compat-mode' => ''}) assert_xpath %(//p[text()="It's 'Johnny's' phone"]), render_string(%q(It\\'s 'Johnny\\'s' phone)) end test "escaped single quote is restored as single quote" do assert_xpath "//p[contains(text(), \"Let's do it!\")]", render_string("Let\\'s do it!") end test 'unescape escaped single quote emphasis in compat mode only' do assert_xpath %(//p[text()="A 'single quoted string' example"]), render_embedded_string(%(A \\'single quoted string' example), :attributes => {'compat-mode' => ''}) assert_xpath %(//p[text()="'single quoted string'"]), render_embedded_string(%(\\'single quoted string'), :attributes => {'compat-mode' => ''}) assert_xpath %(//p[text()="A \\'single quoted string' example"]), render_embedded_string(%(A \\'single quoted string' example)) assert_xpath %(//p[text()="\\'single quoted string'"]), render_embedded_string(%(\\'single quoted string')) end test "emphasized text at end of line" do assert_xpath "//em", render_string("This library is _awesome_") end test "emphasized text at beginning of line" do assert_xpath "//em", render_string("_drop_ it") end test "emphasized text across line" do assert_xpath "//em", render_string("_check it_") end test "unquoted text" do refute_match(/#/, render_string("An #unquoted# word")) end test 'backticks and straight quotes in text' do backslash = '\\' assert_equal %q(run foo dog), render_embedded_string(%q(run `foo` 'dog'), :doctype => :inline, :attributes => {'compat-mode' => ''}) assert_equal %q(run foo 'dog'), render_embedded_string(%q(run `foo` 'dog'), :doctype => :inline) assert_equal %q(run `foo` 'dog'), render_embedded_string(%(run #{backslash}`foo` 'dog'), :doctype => :inline) assert_equal %q(run ‘foo` 'dog’), render_embedded_string(%q(run '`foo` 'dog`'), :doctype => :inline) assert_equal %q(run '`foo` 'dog`'), render_embedded_string(%(run #{backslash}'`foo` 'dog#{backslash}`'), :doctype => :inline) end test 'plus characters inside single plus passthrough' do assert_xpath '//p[text()="+"]', render_embedded_string('+++') assert_xpath '//p[text()="+="]', render_embedded_string('++=+') end test 'plus passthrough escapes entity reference' do assert_match(/&#44;/, render_embedded_string('+,+')) assert_match(/one&#44;two/, render_embedded_string('one++,++two')) end context "basic styling" do setup do @rendered = render_string("A *BOLD* word. An _italic_ word. A `mono` word. ^superscript!^ and some ~subscript~.") end test "strong" do assert_xpath "//strong", @rendered, 1 end test "italic" do assert_xpath "//em", @rendered, 1 end test "monospaced" do assert_xpath "//code", @rendered, 1 end test "superscript" do assert_xpath "//sup", @rendered, 1 end test "subscript" do assert_xpath "//sub", @rendered, 1 end test "passthrough" do assert_xpath "//code", render_string("This is +passed through+."), 0 assert_xpath "//code", render_string("This is +passed through and monospaced+.", :attributes => {'compat-mode' => ''}), 1 end test "nested styles" do rendered = render_string("Winning *big _time_* in the +city *boyeeee*+.", :attributes => {'compat-mode' => ''}) assert_xpath "//strong/em", rendered assert_xpath "//code/strong", rendered rendered = render_string("Winning *big _time_* in the `city *boyeeee*`.") assert_xpath "//strong/em", rendered assert_xpath "//code/strong", rendered end test "unconstrained quotes" do rendered_chars = render_string("**B**__I__++M++", :attributes => {'compat-mode' => ''}) assert_xpath "//strong", rendered_chars assert_xpath "//em", rendered_chars assert_xpath "//code", rendered_chars rendered_chars = render_string("**B**__I__``M``") assert_xpath "//strong", rendered_chars assert_xpath "//em", rendered_chars assert_xpath "//code", rendered_chars end end test 'should format Asian characters as words' do assert_xpath '//strong', (render_embedded_string 'bold *要* bold') assert_xpath '//strong', (render_embedded_string 'bold *素* bold') assert_xpath '//strong', (render_embedded_string 'bold *要素* bold') end end