pax_global_header00006660000000000000000000000064145361727240014525gustar00rootroot0000000000000052 comment=9c5d8b6b7fa1c453ee30e213043f66b41720eb11 golang-github-gorilla-css-1.0.1/000077500000000000000000000000001453617272400164705ustar00rootroot00000000000000golang-github-gorilla-css-1.0.1/.editorconfig000066400000000000000000000005051453617272400211450ustar00rootroot00000000000000; https://editorconfig.org/ root = true [*] insert_final_newline = true charset = utf-8 trim_trailing_whitespace = true indent_style = space indent_size = 2 [{Makefile,go.mod,go.sum,*.go,.gitmodules}] indent_style = tab indent_size = 4 [*.md] indent_size = 4 trim_trailing_whitespace = false eclint_indent_style = unset golang-github-gorilla-css-1.0.1/.github/000077500000000000000000000000001453617272400200305ustar00rootroot00000000000000golang-github-gorilla-css-1.0.1/.github/workflows/000077500000000000000000000000001453617272400220655ustar00rootroot00000000000000golang-github-gorilla-css-1.0.1/.github/workflows/issues.yml000066400000000000000000000007351453617272400241300ustar00rootroot00000000000000# Add all the issues created to the project. name: Add issue or pull request to Project on: issues: types: - opened pull_request_target: types: - opened - reopened jobs: add-to-project: runs-on: ubuntu-latest steps: - name: Add issue to project uses: actions/add-to-project@v0.5.0 with: project-url: https://github.com/orgs/gorilla/projects/4 github-token: ${{ secrets.ADD_TO_PROJECT_TOKEN }} golang-github-gorilla-css-1.0.1/.github/workflows/security.yml000066400000000000000000000013561453617272400244640ustar00rootroot00000000000000name: Security on: push: branches: - main pull_request: branches: - main permissions: contents: read jobs: scan: strategy: matrix: go: ['1.20','1.21'] fail-fast: true runs-on: ubuntu-latest steps: - name: Checkout Code uses: actions/checkout@v3 - name: Setup Go ${{ matrix.go }} uses: actions/setup-go@v4 with: go-version: ${{ matrix.go }} cache: false - name: Run GoSec uses: securego/gosec@master with: args: -exclude-dir examples ./... - name: Run GoVulnCheck uses: golang/govulncheck-action@v1 with: go-version-input: ${{ matrix.go }} go-package: ./... golang-github-gorilla-css-1.0.1/.github/workflows/test.yml000066400000000000000000000013701453617272400235700ustar00rootroot00000000000000name: Test on: push: branches: - main pull_request: branches: - main permissions: contents: read jobs: unit: strategy: matrix: go: ['1.20','1.21'] os: [ubuntu-latest, macos-latest, windows-latest] fail-fast: true runs-on: ${{ matrix.os }} steps: - name: Checkout Code uses: actions/checkout@v3 - name: Setup Go ${{ matrix.go }} uses: actions/setup-go@v4 with: go-version: ${{ matrix.go }} cache: false - name: Run Tests run: go test -race -cover -coverprofile=coverage -covermode=atomic -v ./... - name: Upload coverage to Codecov uses: codecov/codecov-action@v3 with: files: ./coverage golang-github-gorilla-css-1.0.1/.github/workflows/verify.yml000066400000000000000000000011501453617272400241110ustar00rootroot00000000000000name: Verify on: push: branches: - main pull_request: branches: - main permissions: contents: read jobs: lint: strategy: matrix: go: ['1.20','1.21'] fail-fast: true runs-on: ubuntu-latest steps: - name: Checkout Code uses: actions/checkout@v3 - name: Setup Go ${{ matrix.go }} uses: actions/setup-go@v4 with: go-version: ${{ matrix.go }} cache: false - name: Run GolangCI-Lint uses: golangci/golangci-lint-action@v3 with: version: v1.53 args: --timeout=5m golang-github-gorilla-css-1.0.1/.gitignore000066400000000000000000000000261453617272400204560ustar00rootroot00000000000000coverage.coverprofile golang-github-gorilla-css-1.0.1/LICENSE000066400000000000000000000027121453617272400174770ustar00rootroot00000000000000Copyright (c) 2023 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. golang-github-gorilla-css-1.0.1/Makefile000066400000000000000000000016661453617272400201410ustar00rootroot00000000000000GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest GO_SEC=$(shell which gosec 2> /dev/null || echo '') GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest .PHONY: golangci-lint golangci-lint: $(if $(GO_LINT), ,go install $(GO_LINT_URI)) @echo "##### Running golangci-lint" golangci-lint run -v .PHONY: gosec gosec: $(if $(GO_SEC), ,go install $(GO_SEC_URI)) @echo "##### Running gosec" gosec ./... .PHONY: govulncheck govulncheck: $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) @echo "##### Running govulncheck" govulncheck ./... .PHONY: verify verify: golangci-lint gosec govulncheck .PHONY: test test: @echo "##### Running tests" go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... golang-github-gorilla-css-1.0.1/README.md000066400000000000000000000011001453617272400177370ustar00rootroot00000000000000# gorilla/css ![testing](https://github.com/gorilla/css/actions/workflows/test.yml/badge.svg) [![codecov](https://codecov.io/github/gorilla/css/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/css) [![godoc](https://godoc.org/github.com/gorilla/css?status.svg)](https://godoc.org/github.com/gorilla/css) [![sourcegraph](https://sourcegraph.com/github.com/gorilla/css/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/css?badge) ![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) A CSS3 tokenizer. golang-github-gorilla-css-1.0.1/go.mod000066400000000000000000000000471453617272400175770ustar00rootroot00000000000000module github.com/gorilla/css go 1.20 golang-github-gorilla-css-1.0.1/scanner/000077500000000000000000000000001453617272400201215ustar00rootroot00000000000000golang-github-gorilla-css-1.0.1/scanner/doc.go000066400000000000000000000021201453617272400212100ustar00rootroot00000000000000// Copyright 2012 The Gorilla Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* Package gorilla/css/scanner generates tokens for a CSS3 input. It follows the CSS3 specification located at: http://www.w3.org/TR/css3-syntax/ To use it, create a new scanner for a given CSS string and call Next() until the token returned has type TokenEOF or TokenError: s := scanner.New(myCSS) for { token := s.Next() if token.Type == scanner.TokenEOF || token.Type == scanner.TokenError { break } // Do something with the token... } Following the CSS3 specification, an error can only occur when the scanner finds an unclosed quote or unclosed comment. In these cases the text becomes "untokenizable". Everything else is tokenizable and it is up to a parser to make sense of the token stream (or ignore nonsensical token sequences). Note: the scanner doesn't perform lexical analysis or, in other words, it doesn't care about the token context. It is intended to be used by a lexer or parser. */ package scanner golang-github-gorilla-css-1.0.1/scanner/scanner.go000066400000000000000000000251031453617272400221020ustar00rootroot00000000000000// Copyright 2012 The Gorilla Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package scanner import ( "fmt" "regexp" "strings" "unicode" "unicode/utf8" ) // tokenType identifies the type of lexical tokens. type tokenType int // String returns a string representation of the token type. func (t tokenType) String() string { return tokenNames[t] } // Token represents a token and the corresponding string. type Token struct { Type tokenType Value string Line int Column int } // String returns a string representation of the token. func (t *Token) String() string { if len(t.Value) > 10 { return fmt.Sprintf("%s (line: %d, column: %d): %.10q...", t.Type, t.Line, t.Column, t.Value) } return fmt.Sprintf("%s (line: %d, column: %d): %q", t.Type, t.Line, t.Column, t.Value) } // All tokens ----------------------------------------------------------------- // The complete list of tokens in CSS3. const ( // Scanner flags. TokenError tokenType = iota TokenEOF // From now on, only tokens from the CSS specification. TokenIdent TokenAtKeyword TokenString TokenHash TokenNumber TokenPercentage TokenDimension TokenURI TokenUnicodeRange TokenCDO TokenCDC TokenS TokenComment TokenFunction TokenIncludes TokenDashMatch TokenPrefixMatch TokenSuffixMatch TokenSubstringMatch TokenChar TokenBOM ) // tokenNames maps tokenType's to their names. Used for conversion to string. var tokenNames = map[tokenType]string{ TokenError: "error", TokenEOF: "EOF", TokenIdent: "IDENT", TokenAtKeyword: "ATKEYWORD", TokenString: "STRING", TokenHash: "HASH", TokenNumber: "NUMBER", TokenPercentage: "PERCENTAGE", TokenDimension: "DIMENSION", TokenURI: "URI", TokenUnicodeRange: "UNICODE-RANGE", TokenCDO: "CDO", TokenCDC: "CDC", TokenS: "S", TokenComment: "COMMENT", TokenFunction: "FUNCTION", TokenIncludes: "INCLUDES", TokenDashMatch: "DASHMATCH", TokenPrefixMatch: "PREFIXMATCH", TokenSuffixMatch: "SUFFIXMATCH", TokenSubstringMatch: "SUBSTRINGMATCH", TokenChar: "CHAR", TokenBOM: "BOM", } // Macros and productions ----------------------------------------------------- // http://www.w3.org/TR/css3-syntax/#tokenization var macroRegexp = regexp.MustCompile(`\{[a-z]+\}`) // macros maps macro names to patterns to be expanded. var macros = map[string]string{ // must be escaped: `\.+*?()|[]{}^$` "ident": `-?{nmstart}{nmchar}*`, "name": `{nmchar}+`, "nmstart": `[a-zA-Z_]|{nonascii}|{escape}`, "nonascii": "[\u0080-\uD7FF\uE000-\uFFFD\U00010000-\U0010FFFF]", "unicode": `\\[0-9a-fA-F]{1,6}{wc}?`, "escape": "{unicode}|\\\\[\u0020-\u007E\u0080-\uD7FF\uE000-\uFFFD\U00010000-\U0010FFFF]", "nmchar": `[a-zA-Z0-9_-]|{nonascii}|{escape}`, "num": `[0-9]*\.[0-9]+|[0-9]+`, "string": `"(?:{stringchar}|')*"|'(?:{stringchar}|")*'`, "stringchar": `{urlchar}|[ ]|\\{nl}`, "nl": `[\n\r\f]|\r\n`, "w": `{wc}*`, "wc": `[\t\n\f\r ]`, // urlchar should accept [(ascii characters minus those that need escaping)|{nonascii}|{escape}] // ASCII characters range = `[\u0020-\u007e]` // Skip space \u0020 = `[\u0021-\u007e]` // Skip quotation mark \0022 = `[\u0021\u0023-\u007e]` // Skip apostrophe \u0027 = `[\u0021\u0023-\u0026\u0028-\u007e]` // Skip reverse solidus \u005c = `[\u0021\u0023-\u0026\u0028-\u005b\u005d\u007e]` // Finally, the left square bracket (\u005b) and right (\u005d) needs escaping themselves "urlchar": "[\u0021\u0023-\u0026\u0028-\\\u005b\\\u005d-\u007E]|{nonascii}|{escape}", } // productions maps the list of tokens to patterns to be expanded. var productions = map[tokenType]string{ // Unused regexps (matched using other methods) are commented out. TokenIdent: `{ident}`, TokenAtKeyword: `@{ident}`, TokenString: `{string}`, TokenHash: `#{name}`, TokenNumber: `{num}`, TokenPercentage: `{num}%`, TokenDimension: `{num}{ident}`, TokenURI: `url\({w}(?:{string}|{urlchar}*?){w}\)`, TokenUnicodeRange: `U\+[0-9A-F\?]{1,6}(?:-[0-9A-F]{1,6})?`, //TokenCDO: ``, TokenS: `{wc}+`, TokenComment: `/\*[^\*]*[\*]+(?:[^/][^\*]*[\*]+)*/`, TokenFunction: `{ident}\(`, //TokenIncludes: `~=`, //TokenDashMatch: `\|=`, //TokenPrefixMatch: `\^=`, //TokenSuffixMatch: `\$=`, //TokenSubstringMatch: `\*=`, //TokenChar: `[^"']`, //TokenBOM: "\uFEFF", } // matchers maps the list of tokens to compiled regular expressions. // // The map is filled on init() using the macros and productions defined in // the CSS specification. var matchers = map[tokenType]*regexp.Regexp{} // matchOrder is the order to test regexps when first-char shortcuts // can't be used. var matchOrder = []tokenType{ TokenURI, TokenFunction, TokenUnicodeRange, TokenIdent, TokenDimension, TokenPercentage, TokenNumber, TokenCDC, } func init() { // replace macros and compile regexps for productions. replaceMacro := func(s string) string { return "(?:" + macros[s[1:len(s)-1]] + ")" } for t, s := range productions { for macroRegexp.MatchString(s) { s = macroRegexp.ReplaceAllStringFunc(s, replaceMacro) } matchers[t] = regexp.MustCompile("^(?:" + s + ")") } } // Scanner -------------------------------------------------------------------- // New returns a new CSS scanner for the given input. func New(input string) *Scanner { // Normalize newlines. // https://www.w3.org/TR/css-syntax-3/#input-preprocessing input = strings.Replace(input, "\r\n", "\n", -1) input = strings.Replace(input, "\r", "\n", -1) input = strings.Replace(input, "\f", "\n", -1) input = strings.Replace(input, "\u0000", "\ufffd", -1) return &Scanner{ input: input, row: 1, col: 1, } } // Scanner scans an input and emits tokens following the CSS3 specification. type Scanner struct { input string pos int row int col int err *Token } // Next returns the next token from the input. // // At the end of the input the token type is TokenEOF. // // If the input can't be tokenized the token type is TokenError. This occurs // in case of unclosed quotation marks or comments. func (s *Scanner) Next() *Token { if s.err != nil { return s.err } if s.pos >= len(s.input) { s.err = &Token{TokenEOF, "", s.row, s.col} return s.err } if s.pos == 0 { // Test BOM only once, at the beginning of the file. if strings.HasPrefix(s.input, "\uFEFF") { return s.emitSimple(TokenBOM, "\uFEFF") } } // There's a lot we can guess based on the first byte so we'll take a // shortcut before testing multiple regexps. input := s.input[s.pos:] switch input[0] { case '\t', '\n', ' ': // Whitespace. return s.emitToken(TokenS, matchers[TokenS].FindString(input)) case '.': // Dot is too common to not have a quick check. // We'll test if this is a Char; if it is followed by a number it is a // dimension/percentage/number, and this will be matched later. if len(input) > 1 && !unicode.IsDigit(rune(input[1])) { return s.emitSimple(TokenChar, ".") } case '#': // Another common one: Hash or Char. if match := matchers[TokenHash].FindString(input); match != "" { return s.emitToken(TokenHash, match) } return s.emitSimple(TokenChar, "#") case '@': // Another common one: AtKeyword or Char. if match := matchers[TokenAtKeyword].FindString(input); match != "" { return s.emitSimple(TokenAtKeyword, match) } return s.emitSimple(TokenChar, "@") case ':', ',', ';', '%', '&', '+', '=', '>', '(', ')', '[', ']', '{', '}': // More common chars. return s.emitSimple(TokenChar, string(input[0])) case '"', '\'': // String or error. match := matchers[TokenString].FindString(input) if match != "" { return s.emitToken(TokenString, match) } s.err = &Token{TokenError, "unclosed quotation mark", s.row, s.col} return s.err case '/': // Comment, error or Char. if len(input) > 1 && input[1] == '*' { match := matchers[TokenComment].FindString(input) if match != "" { return s.emitToken(TokenComment, match) } else { s.err = &Token{TokenError, "unclosed comment", s.row, s.col} return s.err } } return s.emitSimple(TokenChar, "/") case '~': // Includes or Char. return s.emitPrefixOrChar(TokenIncludes, "~=") case '|': // DashMatch or Char. return s.emitPrefixOrChar(TokenDashMatch, "|=") case '^': // PrefixMatch or Char. return s.emitPrefixOrChar(TokenPrefixMatch, "^=") case '$': // SuffixMatch or Char. return s.emitPrefixOrChar(TokenSuffixMatch, "$=") case '*': // SubstringMatch or Char. return s.emitPrefixOrChar(TokenSubstringMatch, "*=") case '<': // CDO or Char. return s.emitPrefixOrChar(TokenCDO, "", TokenCDC, "-->") checkMatch(" \n \t \n", TokenS, " \n \t \n") checkMatch("/* foo */", TokenComment, "/* foo */") checkMatch("bar(", TokenFunction, "bar(") checkMatch("~=", TokenIncludes, "~=") checkMatch("|=", TokenDashMatch, "|=") checkMatch("^=", TokenPrefixMatch, "^=") checkMatch("$=", TokenSuffixMatch, "$=") checkMatch("*=", TokenSubstringMatch, "*=") checkMatch("{", TokenChar, "{") checkMatch("\uFEFF", TokenBOM, "\uFEFF") checkMatch(`╯︵┻━┻"stuff"`, TokenIdent, "╯︵┻━┻", TokenString, `"stuff"`) } func TestPreprocess(t *testing.T) { tcs := []struct{ desc, input, expected string }{ { "CR", ".a{ \r color:red}", ".a{ \n color:red}", }, { "FF", ".a{ \f color:red}", ".a{ \n color:red}", }, { "CRLF", ".a{ \r\n color:red}", ".a{ \n color:red}", }, { "NULL", ".a{ \u0000 color:red}", ".a{ \ufffd color:red}", }, { "mixture", ".a{ \r\r\n\u0000\f color:red}", ".a{ \n\n\ufffd\n color:red}", }, } for _, tc := range tcs { s := New(tc.input) if s.input != tc.expected { t.Errorf("%s: got=%q, want=%q", tc.desc, s.input, tc.expected) } } }