pax_global_header00006660000000000000000000000064140072632460014516gustar00rootroot0000000000000052 comment=8841c629cdef9d4c4f1913ddff5b806e49963071 lua-http-0.4/000077500000000000000000000000001400726324600131175ustar00rootroot00000000000000lua-http-0.4/.busted000066400000000000000000000001541400726324600144060ustar00rootroot00000000000000return { default = { lpath = "./?.lua"; ["auto-insulate"] = false; helper = "spec/helper.lua"; }; } lua-http-0.4/.github/000077500000000000000000000000001400726324600144575ustar00rootroot00000000000000lua-http-0.4/.github/workflows/000077500000000000000000000000001400726324600165145ustar00rootroot00000000000000lua-http-0.4/.github/workflows/ci.yml000066400000000000000000000052051400726324600176340ustar00rootroot00000000000000name: ci on: pull_request: {} push: branches: [ $default-branch ] jobs: luacheck: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 with: path: lua-http - uses: leafo/gh-actions-lua@v8.0.0 - uses: leafo/gh-actions-luarocks@v4.0.0 - name: install-tooling run: luarocks install luacheck - name: luacheck run: | cd lua-http luacheck . test: runs-on: ubuntu-latest strategy: matrix: luaVersion: - "5.1" - "5.2" - "5.3" - "5.4" - luajit-2.0.5 - luajit-2.1.0-beta3 luaCompileFlags: [""] zlib: ["", "lzlib", "lua-zlib"] remove_compat53: [false] exclude: # lzlib doesn't support Lua 5.4+ - luaVersion: "5.4" zlib: "lzlib" include: - luaVersion: "5.3" luaCompileFlags: LUA_CFLAGS="-DLUA_INT_TYPE=LUA_INT_INT" - luaVersion: "5.3" remove_compat53: true steps: - uses: actions/checkout@v2 with: path: lua-http - uses: leafo/gh-actions-lua@v8.0.0 with: luaVersion: ${{ matrix.luaVersion }} - uses: leafo/gh-actions-luarocks@v4.0.0 - name: install-tooling run: | luarocks install luacov-coveralls luarocks install busted - name: install-dependencies run: | cd lua-http luarocks install --only-deps http-scm-0.rockspec - name: install-lzlib if: matrix.zlib == 'lzlib' run: luarocks install lzlib - name: install-lua-zlib if: matrix.zlib == 'lua-zlib' run: luarocks install lua-zlib - name: remove-compat53 if: matrix.remove_compat53 run: luarocks remove compat53 - name: test run: | cd lua-http busted -c -o utfTerminal - name: coveralls continue-on-error: true env: COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_REPO_TOKEN }} run: | cd lua-http luacov-coveralls -v typedlua: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 with: path: lua-http - uses: leafo/gh-actions-lua@v8.0.0 with: luaVersion: "5.3" # tlc doesn't work with 5.4+ - uses: leafo/gh-actions-luarocks@v4.0.0 - name: install-tooling run: luarocks install https://raw.githubusercontent.com/andremm/typedlua/master/typedlua-scm-1.rockspec - name: install-dependencies run: | cd lua-http luarocks install --only-deps http-scm-0.rockspec - name: typedlua run: | cd lua-http tlc -o /dev/null spec/require-all.lua lua-http-0.4/.gitignore000066400000000000000000000000551400726324600151070ustar00rootroot00000000000000/luacov.report.out /luacov.stats.out /*.rock lua-http-0.4/.luacheckrc000066400000000000000000000002021400726324600152160ustar00rootroot00000000000000std = "min" files["spec"] = { std = "+busted"; new_globals = { "TEST_TIMEOUT"; "assert_loop"; }; } max_line_length = false lua-http-0.4/.luacov000066400000000000000000000002611400726324600144100ustar00rootroot00000000000000return { statsfile = "luacov.stats.out"; reportfile = "luacov.report.out"; deletestats = true; include = { "/http/[^/]+$"; "/http/compat/[^/]+$"; }; exclude = { }; } lua-http-0.4/CONTRIBUTING.md000066400000000000000000000066461400726324600153640ustar00rootroot00000000000000Hello and thank-you for considering contributing to lua-http! If you haven't already, see the [getting started](https://github.com/daurnimator/lua-http#getting-started) section of the main readme. # Contributing To submit your code for inclusion, please [send a "pull request" using github](https://github.com/daurnimator/lua-http/pulls). For a speedy approval, please: - Follow the [coding style](#coding-style) - Run [`luacheck`](https://github.com/mpeterv/luacheck) to lint your code - Include [tests](#tests) - Bug fixes should add a test exhibiting the issue - Enhancements must add tests for the new feature - [Sign off](#dco) your code If you are requested by a project maintainer to fix an issue with your pull request, please edit your existing commits (using e.g. `git commit --amend` or [`git fixup`](https://github.com/hashbang/dotfiles/blob/master/git/.local/bin/git-fixup)) rather than pushing new commits on top of the old ones. All commits *should* have the project in an operational state. # Coding Style When editing an existing file, please follow the coding style used in that file. If not clear from context or if you're starting a new file: - Indent with tabs - Alignment should not be done; when unavoidable, align with spaces - Remove any trailing whitespace (unless whitespace is significant as it can be in e.g. markdown) - Things (e.g. table fields) should be ordered by: 1. Required vs optional 2. Importance 3. Lexographically (alphabetically) ## Lua conventions - Add a `__name` field to metatables - Use a separate table than the metatable itself for `__index` - Single-line table definitions should use commas (`,`) for delimiting elements - Multi-line table definitions should use semicolons (`;`) for delimiting elements ## Markdown conventions - Files should have two blank lines at the end of a section - Repository information files (e.g. README.md/CONTRIBUTING.md) should use github compatible markdown features - Files used to generate documentation can use any `pandoc` features they want # Tests The project has a test suite using the [`busted`](https://github.com/Olivine-Labs/busted) framework. Coverage is measured using [`luacov`](https://github.com/keplerproject/luacov). Tests can be found in the `spec/` directory at the root of the repository. Each source file should have its own file full of tests. Tests should avoid running any external processes. Use `cqueues` to start up various test servers and clients in-process. A successful test should close any file handles and sockets to avoid resource exhaustion. # Legal All code in the repository is covered by `LICENSE.md`. ## DCO A git `Signed-off-by` statement in a commit message in this repository refers to the [Developer Certificate of Origin](https://developercertificate.org/) (DCO). By signing off your commit you are making a legal statement that the work is contributed under the license of this project. You can add the statement to your commit by passing `-s` to `git commit` # Security If you find a security vulnerabilities in the project and do not wish to file it publically on the [issue tracker](https://github.com/daurnimator/lua-http/issues) then you may email [lua-http-security@daurnimator.com](mailto:lua-http-security@daurnimator.com). You may encrypt your mail using PGP to the key with fingerprint [954A3772D62EF90E4B31FBC6C91A9911192C187A](https://daurnimator.com/post/109075829529/gpg-key). lua-http-0.4/LICENSE.md000066400000000000000000000020731400726324600145250ustar00rootroot00000000000000The MIT License (MIT) Copyright (c) 2015-2021 Daurnimator Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. lua-http-0.4/NEWS000066400000000000000000000032631400726324600136220ustar00rootroot000000000000000.4 - 2021-02-06 - Support multiple elliptic curves under OpenSSL 1.1.1+ (#150) - Improve support for Lua 5.4 (not longer require bit library to be installed) (#180) - Ignore delayed RST_STREAM frames in HTTP 2 (#145) 0.3 - 2019-02-13 - Fix incorrect Sec-WebSocket-Protocol negotiation - Fix incorrect timeout handling in `websocket:receive()` - Add workaround to allow being required in openresty (#98) - Add http.tls.old_cipher_list (#112) - Add http.cookie module (#117) - Improvements to http.hsts module (#119) - Add `options` argument form to `stream:write_body_from_file()` (#125) 0.2 - 2017-05-28 - Remove broken http.server `.client_timeout` option (replaced with `.connection_setup_timeout`) - Fix http1 pipelining locks - Miscellaneous http2 fixes - HTTP 2 streams no longer have to be used in order of creation - No longer raise decode errors in hpack module - Fix `hpack:lookup_index()` to treat static entries without values as empty string - Fix HTTP 1 client in locales with non-"." decimal separator - Add h1_stream.max_header_lines property to prevent infinite list of headers - New '.bind' option for requests and http.client module 0.1 - 2016-12-17 - Support for HTTP versions 1, 1.1 and 2 - Provides both client and server APIs - Friendly request API with sensible defaults for security - All operations are fully non-blocking and can be managed with cqueues - Support for WebSockets (client and server), including ping/pong, binary data transfer and TLS encryption. - Transport Layer Security (TLS) - lua-http supports HTTPS and WSS via luaossl. - luasocket compatibility API if you're looking to use lua-http with older projects. lua-http-0.4/README.md000066400000000000000000000061211400726324600143760ustar00rootroot00000000000000# HTTP library for Lua. ## Features - Optionally asynchronous (including DNS lookups and TLS) - Supports HTTP(S) version 1.0, 1.1 and 2 - Functionality for both client and server - Cookie Management - Websockets - Compatible with Lua 5.1, 5.2, 5.3 and [LuaJIT](http://luajit.org/) ## Documentation Can be found at [https://daurnimator.github.io/lua-http/](https://daurnimator.github.io/lua-http/) ## Status [![Build Status](https://github.com/daurnimator/lua-http/workflows/ci/badge.svg)](https://github.com/daurnimator/lua-http/actions?query=workflow%3Aci) [![Coverage Status](https://coveralls.io/repos/daurnimator/lua-http/badge.svg?branch=master&service=github)](https://coveralls.io/github/daurnimator/lua-http?branch=master) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/108/badge)](https://bestpractices.coreinfrastructure.org/projects/108) # Installation It's recommended to install lua-http by using [luarocks](https://luarocks.org/). This will automatically install run-time lua dependencies for you. $ luarocks install http ## Dependencies - [cqueues](http://25thandclement.com/~william/projects/cqueues.html) >= 20161214 (Note: cqueues currently doesn't support Microsoft Windows operating systems) - [luaossl](http://25thandclement.com/~william/projects/luaossl.html) >= 20161208 - [basexx](https://github.com/aiq/basexx/) >= 0.2.0 - [lpeg](http://www.inf.puc-rio.br/~roberto/lpeg/lpeg.html) - [lpeg_patterns](https://github.com/daurnimator/lpeg_patterns) >= 0.5 - [binaryheap](https://github.com/Tieske/binaryheap.lua) >= 0.3 - [fifo](https://github.com/daurnimator/fifo.lua) To use gzip compression you need **one** of: - [lzlib](https://github.com/LuaDist/lzlib) or [lua-zlib](https://github.com/brimworks/lua-zlib) To check cookies against a public suffix list: - [lua-psl](https://github.com/daurnimator/lua-psl) If using lua < 5.3 you will need - [compat-5.3](https://github.com/keplerproject/lua-compat-5.3) >= 0.3 If using lua 5.1 you will need - [luabitop](http://bitop.luajit.org/) (comes [with LuaJIT](http://luajit.org/extensions.html)) or a [backported bit32](https://luarocks.org/modules/siffiejoe/bit32) ### For running tests - [luacheck](https://github.com/mpeterv/luacheck) - [busted](http://olivinelabs.com/busted/) - [luacov](https://keplerproject.github.io/luacov/) # Development ## Getting started - Clone the repo: ``` $ git clone https://github.com/daurnimator/lua-http.git $ cd lua-http ``` - Install dependencies ``` $ luarocks install --only-deps http-scm-0.rockspec ``` - Lint the code (check for common programming errors) ``` $ luacheck . ``` - Run tests and view coverage report ([install tools first](#for-running-tests)) ``` $ busted -c $ luacov && less luacov.report.out ``` - Install your local copy: ``` $ luarocks make http-scm-0.rockspec ``` ## Generating documentation Documentation is written in markdown and intended to be consumed by [pandoc](http://pandoc.org/). See the `doc/` directory for more information. lua-http-0.4/doc/000077500000000000000000000000001400726324600136645ustar00rootroot00000000000000lua-http-0.4/doc/Makefile000066400000000000000000000024321400726324600153250ustar00rootroot00000000000000INTERFACES = \ connection.md \ stream.md MODULES = \ http.bit.md \ http.client.md \ http.cookie.md \ http.h1_connection.md \ http.h1_reason_phrases.md \ http.h1_stream.md \ http.h2_connection.md \ http.h2_error.md \ http.h2_stream.md \ http.headers.md \ http.hpack.md \ http.hsts.md \ http.proxies.md \ http.request.md \ http.server.md \ http.socks.md \ http.tls.md \ http.util.md \ http.version.md \ http.websocket.md \ http.zlib.md \ http.compat.prosody.md \ http.compat.socket.md FILES = \ introduction.md \ interfaces.md \ $(addprefix interfaces/,$(INTERFACES)) \ modules.md \ $(addprefix modules/,$(MODULES)) \ links.md all: lua-http.html lua-http.pdf lua-http.3 lua-http.html: template.html site.css metadata.yaml $(FILES) pandoc -o $@ -t html5 -s --toc --template=template.html --section-divs --self-contained -c site.css metadata.yaml $(FILES) lua-http.pdf: metadata.yaml $(FILES) pandoc -o $@ -t latex -s --toc --toc-depth=2 -V documentclass=article -V classoption=oneside -V links-as-notes -V geometry=a4paper,includeheadfoot,margin=2.54cm metadata.yaml $(FILES) lua-http.3: metadata.yaml $(FILES) pandoc -o $@ -t man -s metadata.yaml $(FILES) man: lua-http.3 man -l $^ clean: rm -f lua-http.html lua-http.pdf lua-http.3 .PHONY: all man install clean lua-http-0.4/doc/README.md000066400000000000000000000004731400726324600151470ustar00rootroot00000000000000Documentation in this directory is intended to be converted to other formats using [pandoc](http://pandoc.org/). An online HTML version can be found at [https://daurnimator.github.io/lua-http/](https://daurnimator.github.io/lua-http/) The *Makefile* in this directory should be used to compile the documentation. lua-http-0.4/doc/interfaces.md000066400000000000000000000005301400726324600163270ustar00rootroot00000000000000# Interfaces lua-http has separate modules for HTTP 1 vs HTTP 2 protocols, yet the different versions share many common concepts. lua-http provides a common interface for operations that make sense for both protocol versions (as well as any future developments). The following sections outline the interfaces exposed by the lua-http library. lua-http-0.4/doc/interfaces/000077500000000000000000000000001400726324600160075ustar00rootroot00000000000000lua-http-0.4/doc/interfaces/connection.md000066400000000000000000000101221400726324600204640ustar00rootroot00000000000000## connection A connection encapsulates a socket and provides protocol specific operations. A connection may have [*streams*](#stream) which encapsulate the requests/responses happening over a conenction. Alternatively, you can ignore streams entirely and use low level protocol specific operations to read and write to the socket. All *connection* types expose the following fields: ### `connection.type` {#connection.type} The mode of use for the connection object. Valid values are: - `"client"`: Acts as a client; this connection type is used by entities who want to make requests - `"server"`: Acts as a server; this conenction type is used by entities who want to respond to requests ### `connection.version` {#connection.version} The HTTP version number of the connection as a number. ### `connection:pollfd()` {#connection:pollfd} ### `connection:events()` {#connection:events} ### `connection:timeout()` {#connection:timeout} ### `connection:connect(timeout)` {#connection:connect} Completes the connection to the remote server using the address specified, HTTP version and any options specified in the `connection.new` constructor. The `connect` function will yield until the connection attempt finishes (success or failure) or until `timeout` is exceeded. Connecting may include DNS lookups, TLS negotiation and HTTP2 settings exchange. Returns `true` on success. On error, returns `nil`, an error message and an error number. ### `connection:checktls()` {#connection:checktls} Checks the socket for a valid Transport Layer Security connection. Returns the luaossl ssl object if the connection is secured. Returns `nil` and an error message if there is no active TLS session. Please see the [luaossl website](http://25thandclement.com/~william/projects/luaossl.html) for more information about the ssl object. ### `connection:localname()` {#connection:localname} Returns the connection information for the local socket. Returns address family, IP address and port for an external socket. For Unix domain sockets, the function returns `AF_UNIX` and the path. If the connection object is not connected, returns `AF_UNSPEC` (0). On error, returns `nil`, an error message and an error number. ### `connection:peername()` {#connection:peername} Returns the connection information for the socket *peer* (as in, the next hop). Returns address family, IP address and port for an external socket. For unix sockets, the function returns `AF_UNIX` and the path. If the connection object is not connected, returns `AF_UNSPEC` (0). On error, returns `nil`, an error message and an error number. *Note: If the client is using a proxy, the values returned `:peername()` point to the proxy, not the remote server.* ### `connection:flush(timeout)` {#connection:flush} Flushes buffered outgoing data on the socket to the operating system. Returns `true` on success. On error, returns `nil`, an error message and an error number. ### `connection:shutdown()` {#connection:shutdown} Performs an orderly shutdown of the connection by closing all streams and calls `:shutdown()` on the socket. The connection cannot be re-opened. ### `connection:close()` {#connection:close} Closes a connection and releases operating systems resources. Note that `:close()` performs a [`connection:shutdown()`](#connection:shutdown) prior to releasing resources. ### `connection:new_stream()` {#connection:new_stream} Creates and returns a new [*stream*](#stream) on the connection. ### `connection:get_next_incoming_stream(timeout)` {#connection:get_next_incoming_stream} Returns the next peer initiated [*stream*](#stream) on the connection. This function can be used to yield and "listen" for incoming HTTP streams. ### `connection:onidle(new_handler)` {#http.connection:onidle} Provide a callback to get called when the connection becomes idle i.e. when there is no request in progress and no pipelined streams waiting. When called it will receive the `connection` as the first argument. Returns the previous handler. lua-http-0.4/doc/interfaces/stream.md000066400000000000000000000115571400726324600176350ustar00rootroot00000000000000## stream An HTTP *stream* is an abstraction of a request/response within a HTTP connection. Within a stream there may be a number of "header" blocks as well as data known as the "body". All stream types expose the following fields and functions: ### `stream.connection` {#stream.connection} The underlying [*connection*](#connection) object. ### `stream:checktls()` {#stream:checktls} Convenience wrapper equivalent to [`stream.connection:checktls()`](#connection:checktls) ### `stream:localname()` {#stream:localname} Convenience wrapper equivalent to [`stream.connection:localname()`](#connection:localname) ### `stream:peername()` {#stream:peername} Convenience wrapper equivalent to [`stream.connection:peername()`](#connection:peername) ### `stream:get_headers(timeout)` {#stream:get_headers} Retrieves the next complete headers object (i.e. a block of headers or trailers) from the stream. ### `stream:write_headers(headers, end_stream, timeout)` {#stream:write_headers} Write the given [*headers*](#http.headers) object to the stream. The function takes a flag indicating if this is the last chunk in the stream, if `true` the stream will be closed. If `timeout` is specified, the stream will wait for the send to complete until `timeout` is exceeded. ### `stream:write_continue(timeout)` {#stream:write_continue} Sends a 100-continue header block. ### `stream:get_next_chunk(timeout)` {#stream:get_next_chunk} Returns the next chunk of the http body from the socket, potentially yielding for up to `timeout` seconds. On error, returns `nil`, an error message and an error number. ### `stream:each_chunk()` {#stream:each_chunk} Iterator over [`stream:get_next_chunk()`](#stream:get_next_chunk) ### `stream:get_body_as_string(timeout)` {#stream:get_body_as_string} Reads the entire body from the stream and return it as a string. On error, returns `nil`, an error message and an error number. ### `stream:get_body_chars(n, timeout)` {#stream:get_body_chars} Reads `n` characters (bytes) of body from the stream and return them as a string. If the stream ends before `n` characters are read then returns the partial result. On error, returns `nil`, an error message and an error number. ### `stream:get_body_until(pattern, plain, include_pattern, timeout)` {#stream:get_body_until} Reads in body data from the stream until the [lua pattern](http://www.lua.org/manual/5.3/manual.html#6.4.1) `pattern` is found and returns the data as a string. `plain` is a boolean that indicates that pattern matching facilities should be turned off so that function does a plain "find substring" operation, with no characters in pattern being considered magic. `include_patterns` specifies if the pattern itself should be included in the returned string. On error, returns `nil`, an error message and an error number. ### `stream:save_body_to_file(file, timeout)` {#stream:save_body_to_file} Reads the body from the stream and saves it to the [lua file handle](http://www.lua.org/manual/5.3/manual.html#6.8) `file`. On error, returns `nil`, an error message and an error number. ### `stream:get_body_as_file(timeout)` {#stream:get_body_as_file} Reads the body from the stream into a temporary file and returns a [lua file handle](http://www.lua.org/manual/5.3/manual.html#6.8). On error, returns `nil`, an error message and an error number. ### `stream:unget(str)` {#stream:unget} Places `str` back on the incoming data buffer, allowing it to be returned again on a subsequent command ("un-gets" the data). Returns `true` on success. On error, returns `nil`, an error message and an error number. ### `stream:write_chunk(chunk, end_stream, timeout)` {#stream:write_chunk} Writes the string `chunk` to the stream. If `end_stream` is true, the body will be finalized and the stream will be closed. `write_chunk` yields indefinitely, or until `timeout` is exceeded. On error, returns `nil`, an error message and an error number. ### `stream:write_body_from_string(str, timeout)` {#stream:write_body_from_string} Writes the string `str` to the stream and ends the stream. On error, returns `nil`, an error message and an error number. ### `stream:write_body_from_file(options|file, timeout)` {#stream:write_body_from_file} - `options` is a table containing: - `.file` (file) - `.count` (positive integer): number of bytes of `file` to write defaults to infinity (the whole file will be written) Writes the contents of file `file` to the stream and ends the stream. `file` will not be automatically seeked, so ensure it is at the correct offset before calling. On error, returns `nil`, an error message and an error number. ### `stream:shutdown()` {#stream:shutdown} Closes the stream. The resources are released and the stream can no longer be used. lua-http-0.4/doc/introduction.md000066400000000000000000000161461400726324600167370ustar00rootroot00000000000000# Introduction lua-http is an performant, capable HTTP and WebSocket library for Lua 5.1, 5.2, 5.3 and LuaJIT. Some of the features of the library include: - Support for HTTP versions 1, 1.1 and 2 as specified by [RFC 7230](https://tools.ietf.org/html/rfc7230) and [RFC 7540](https://tools.ietf.org/html/rfc7540) - Provides both client and server APIs - Fully asynchronous API that does not block the current thread when executing operations that typically block - Support for WebSockets as specified by [RFC 6455](https://tools.ietf.org/html/rfc6455) including ping/pong, binary data transfer and TLS encryption - Transport Layer Security (TLS) - lua-http supports HTTPS and WSS via [luaossl](https://github.com/wahern/luaossl). - Easy integration into other event-loop or scripts ### Why lua-http? The lua-http library was written to fill a gap in the Lua ecosystem by providing an HTTP and WebSocket library with the following traits: - Asynchronous and performant - Can be used without forcing the developer to follow a specific pattern. Conversely, the library can be adapted to many common patterns. - Can be used at a very high level without need to understand the transportation of HTTP data (other than connection addresses). - Provides a rich low level API, if desired, for creating powerful HTTP based tools at the protocol level. As a result of these design goals, the library is simple and unobtrusive and can accommodate tens of thousands of connections on commodity hardware. lua-http is a flexible HTTP and WebSocket library that allows developers to concentrate on line-of-business features when building Internet enabled applications. If you are looking for a way to streamline development of an internet enabled application, enable HTTP networking in your game, create a new Internet Of Things (IoT) system, or write a performant custom web server for a specific use case, lua-http has the tools you need. ### Portability lua-http is pure Lua code with dependencies on the following external libraries: - [cqueues](http://25thandclement.com/~william/projects/cqueues.html) - Posix API library for Lua - [luaossl](http://25thandclement.com/~william/projects/luaossl.html) - Lua bindings for TLS/SSL - [lua-zlib](https://github.com/brimworks/lua-zlib) - Optional Lua bindings for zlib lua-http can run on any operating system supported by cqueues and openssl, which at the time of writing is GNU/Linux, FreeBSD, NetBSD, OpenBSD, OSX and Solaris. ## Common Use Cases The following are two simple demonstrations of how the lua-http library can be used: ### Retrieving a Document The highest level interface for clients is [*http.request*](#http.request). By constructing a [*request*](#http.request) object from a URI using [`new_from_uri`](#http.request.new_from_uri) and immediately evaluating it, you can easily fetch an HTTP resource. ```lua local http_request = require "http.request" local headers, stream = assert(http_request.new_from_uri("http://example.com"):go()) local body = assert(stream:get_body_as_string()) if headers:get ":status" ~= "200" then error(body) end print(body) ``` ### WebSocket Communications {#http.websocket-example} To request information from a WebSocket server, use the `websocket` module to create a new WebSocket client. ```lua local websocket = require "http.websocket" local ws = websocket.new_from_uri("wss://echo.websocket.org") assert(ws:connect()) assert(ws:send("koo-eee!")) local data = assert(ws:receive()) assert(data == "koo-eee!") assert(ws:close()) ``` ## Asynchronous Operation lua-http has been written to perform asynchronously so that it can be used in your application, server or game without blocking your main loop. Asynchronous operations are achieved by utilizing cqueues, a Lua/C library that incorporates Lua yielding and kernel level APIs to reduce CPU usage. All lua-http operations including DNS lookup, TLS negotiation and read/write operations will not block the main application thread when run from inside a cqueue or cqueue enabled "container". While sometimes it is necessary to block a routine (yield) and wait for external data, any blocking API calls take an optional timeout to ensure good behaviour of networked applications and avoid unresponsive or "dead" routines. Asynchronous operations are one of the most powerful features of lua-http and require no effort on the developers part. For instance, an HTTP server can be instantiated within any Lua main loop and run alongside application code without adversely affecting the main application process. If other cqueue enabled components are integrated within a cqueue loop, the application is entirely event driven through kernel level polling APIs. cqueues can be used in conjunction with lua-http to integrate other features into your lua application and create powerful, performant, web enabled applications. Some of the examples in this guide will use cqueues for simple demonstrations. For more resources about cqueues, please see: - [The cqueues website](http://25thandclement.com/~william/projects/cqueues.html) for more information about the cqueues library. - cqueues examples can be found with the cqueues source code available through [git or archives](http://www.25thandclement.com/~william/projects/cqueues.html#download) or accessed online [here](https://github.com/wahern/cqueues/tree/master/examples). - For more information on integrating cqueues with other event loop libraries please see [integration with other event loops](https://github.com/wahern/cqueues/wiki/Integrations-with-other-main-loops). - For other libraries that use cqueues such as asynchronous APIs for Redis and PostgreSQL, please see [the cqueues wiki entry here](https://github.com/wahern/cqueues/wiki/Libraries-that-use-cqueues). ## Conventions The following is a list of API conventions and general reference: ### HTTP - HTTP 1 request and status line fields are passed around inside of _[headers](#http.headers)_ objects under keys `":authority"`, `":method"`, `":path"`, `":scheme"` and `":status"` as defined in HTTP 2. As such, they are all kept in string form (important to remember for the `:status` field). - Header fields should always be used with lower case keys. ### Errors - Invalid function parameters will throw a lua error (if validated). - Errors are returned as `nil`, error, errno unless noted otherwise. - Some HTTP 2 operations return/throw special [http 2 error objects](#http.h2_error). ### Timeouts All operations that may block the current thread take a `timeout` argument. This argument is always the number of seconds to allow before returning `nil, err_msg, ETIMEDOUT` where `err_msg` is a localised error message such as `"connection timed out"`. ## Terminology Much lua-http terminology is borrowed from HTTP 2. _[Connection](#connection)_ - An abstraction over an underlying TCP/IP socket. lua-http currently has two connection types: one for HTTP 1, one for HTTP 2. _[Stream](#stream)_ - A request/response on a connection object. lua-http has two stream types: one for [*HTTP 1 streams*](#http.h1_stream), and one for [*HTTP 2 streams*](#http.h2_stream). The common interfaces is described in [*stream*](#stream). lua-http-0.4/doc/links.md000066400000000000000000000002031400726324600153210ustar00rootroot00000000000000# Links - [Github](https://github.com/daurnimator/lua-http) - [Issue tracker](https://github.com/daurnimator/lua-http/issues) lua-http-0.4/doc/metadata.yaml000066400000000000000000000001431400726324600163260ustar00rootroot00000000000000 --- title: lua-http subtitle: HTTP library for Lua author: Daurnimator ... lua-http-0.4/doc/modules.md000066400000000000000000000000121400726324600156470ustar00rootroot00000000000000# Modules lua-http-0.4/doc/modules/000077500000000000000000000000001400726324600153345ustar00rootroot00000000000000lua-http-0.4/doc/modules/http.bit.md000066400000000000000000000007771400726324600174250ustar00rootroot00000000000000## http.bit An abstraction layer over the various lua bit libraries. Results are only consistent between underlying implementations when parameters and results are in the range of `0` to `0x7fffffff`. ### `band(a, b)` {#http.bit.band} Bitwise And operation. ### `bor(a, b)` {#http.bit.bor} Bitwise Or operation. ### `bxor(a, b)` {#http.bit.bxor} Bitwise XOr operation. ### Example {#http.bit-example} ```lua local bit = require "http.bit" print(bit.band(1, 3)) --> 1 ``` lua-http-0.4/doc/modules/http.client.md000066400000000000000000000053021400726324600201120ustar00rootroot00000000000000## http.client Deals with obtaining a connection to an HTTP server. ### `negotiate(socket, options, timeout)` {#http.client.negotiate} Negotiates the HTTP settings with the remote server. If TLS has been specified, this function instantiates the encryption tunnel. Parameters are as follows: - `socket` is a cqueues socket object - `options` is a table containing: - `.tls` (boolean, optional): Should TLS be used? defaults to `false` - `.ctx` (userdata, optional): the `SSL_CTX*` to use if `.tls` is `true`. If `.ctx` is `nil` then a default context will be used. - `.sendname` (string|boolean, optional): the [TLS SNI](https://en.wikipedia.org/wiki/Server_Name_Indication) host to send. defaults to `true` - `true` indicates to copy the `.host` field as long as it is **not** an IP - `false` disables SNI - `.version` (`nil`|1.0|1.1|2): HTTP version to use. - `nil`: attempts HTTP 2 and falls back to HTTP 1.1 - `1.0` - `1.1` - `2` - `.h2_settings` (table, optional): HTTP 2 settings to use. See [*http.h2_connection*](#http.h2_connection) for details ### `connect(options, timeout)` {#http.client.connect} This function returns a new connection to an HTTP server. Once a connection has been opened, a stream can be created to start a request/response exchange. Please see [`h1_stream.new_stream`](#h1_stream.new_stream) and [`h2_stream.new_stream`](#h2_stream.new_stream) for more information about creating streams. - `options` is a table containing the options to [`http.client.negotiate`](#http.client.negotiate), plus the following: - `family` (integer, optional): socket family to use. defaults to `AF_INET` - `host` (string): host to connect to. may be either a hostname or an IP address - `port` (string|integer): port to connect to in numeric form e.g. `"80"` or `80` - `path` (string): path to connect to (UNIX sockets) - `v6only` (boolean, optional): if the `IPV6_V6ONLY` flag should be set on the underlying socket. - `bind` (string, optional): the local outgoing address and optionally port to bind in the form of `"address[:port]"`, IPv6 addresses may be specified via square bracket notation. e.g. `"127.0.0.1"`, `"127.0.0.1:50000"`, `"[::1]:30000"`. - `timeout` (optional) is the maximum amount of time (in seconds) to allow for connection to be established. This includes time for DNS lookup, connection, TLS negotiation (if TLS enabled) and in the case of HTTP 2: settings exchange. #### Example {#http.client.connect-example} Connect to a local HTTP server running on port 8000 ```lua local http_client = require "http.client" local myconnection = http_client.connect { host = "localhost"; port = 8000; tls = false; } ``` lua-http-0.4/doc/modules/http.compat.prosody.md000066400000000000000000000020061400726324600216130ustar00rootroot00000000000000## http.compat.prosody Provides usage similar to [prosody's net.http](https://prosody.im/doc/developers/net/http) ### `request(url, ex, callback)` {#http.compat.prosody.request} A few key differences to the prosody `net.http.request`: - must be called from within a running cqueue - The callback may be called from a different thread in the cqueue - The returned object will be a [*http.request*](#http.request) object - This object is passed to the callback on errors and as the fourth argument on success - The default user-agent will be from lua-http (rather than `"Prosody XMPP Server"`) - lua-http features (such as HTTP2) will be used where possible ### Example {#http.compat.prosody-example} ```lua local prosody_http = require "http.compat.prosody" local cqueues = require "cqueues" local cq = cqueues.new() cq:wrap(function() prosody_http.request("http://httpbin.org/ip", {}, function(b, c, r) print(c) --> 200 print(b) --> {"origin": "123.123.123.123"} end) end) assert(cq:loop()) ``` lua-http-0.4/doc/modules/http.compat.socket.md000066400000000000000000000010661400726324600214110ustar00rootroot00000000000000## http.compat.socket Provides compatibility with [luasocket's http.request module](http://w3.impa.br/~diego/software/luasocket/http.html). Differences: - Will automatically be non-blocking when run inside a cqueues managed coroutine - lua-http features (such as HTTP2) will be used where possible ### Example {#http.compat.socket-example} Using the 'simple' interface as part of a normal script: ```lua local socket_http = require "http.compat.socket" local body, code = assert(socket_http.request("http://lua.org")) print(code, #body) --> 200, 2514 ``` lua-http-0.4/doc/modules/http.cookie.md000066400000000000000000000203171400726324600201100ustar00rootroot00000000000000## http.cookie A module for working with cookies. ### `bake(name, value, expiry_time, domain, path, secure_only, http_only, same_site)` {#http.cookie.bake} Returns a string suitable for use in a `Set-Cookie` header with the passed parameters. ### `parse_cookie(cookie)` {#http.cookie.parse_cookie} Parses the `Cookie` header contents `cookie`. Returns a table containing `name` and `value` pairs as strings. ### `parse_cookies(req_headers)` {#http.cookie.parse_cookies} Parses all `Cookie` headers in the [*http.headers*](#http.headers) object `req_headers`. Returns a table containing `name` and `value` pairs as strings. ### `parse_setcookie(setcookie)` {#http.cookie.parse_setcookie} Parses the `Set-Cookie` header contents `setcookie`. Returns `name`, `value` and `params` where: - `name` is a string containing the cookie name - `value` is a string containing the cookie value - `params` is the a table where the keys are cookie attribute names and values are cookie attribute values ### `new_store()` {#http.cookie.new_store} Creates a new cookie store. Cookies are unique for a tuple of domain, path and name; although multiple cookies with the same name may exist in a request due to overlapping paths or domains. ### `store.psl` {#http.cookie.store.psl} A [lua-psl](https://github.com/daurnimator/lua-psl) object to use for checking against the Public Suffix List. Set the field to `false` to skip checking the suffix list. Defaults to the [latest](https://rockdaboot.github.io/libpsl/libpsl-Public-Suffix-List-functions.html#psl-latest) PSL on the system. If lua-psl is not installed then it will be `nil`. ### `store.time()` {#http.cookie.store.time} A function used by the `store` to get the current time for expiries and such. Defaults to a function based on [`os.time`](https://www.lua.org/manual/5.3/manual.html#pdf-os.time). ### `store.max_cookie_length` {#http.cookie.store.max_cookie_length} The maximum length (in bytes) of cookies in the store; this value is also used as default maximum cookie length for `:lookup()`. Decreasing this value will only prevent new cookies from being added, it will not remove old cookies. Defaults to infinity (no maximum size). ### `store.max_cookies` {#http.cookie.store.max_cookies} The maximum number of cookies allowed in the `store`. Decreasing this value will only prevent new cookies from being added, it will not remove old cookies. Defaults to infinity (any number of cookies is allowed). ### `store.max_cookies_per_domain` {#http.cookie.store.max_cookies_per_domain} The maximum number of cookies allowed in the `store` per domain. Decreasing this value will only prevent new cookies from being added, it will not remove old cookies. Defaults to infinity (any number of cookies is allowed). ### `store:store(req_domain, req_path, req_is_http, req_is_secure, req_site_for_cookies, name, value, params)` {#http.cookie.store:store} Attempts to add a cookie to the `store`. - `req_domain` is the domain that the cookie was obtained from - `req_path` is the path that the cookie was obtained from - `req_is_http` is a boolean flag indicating if the cookie was obtained from a "non-HTTP" API - `req_is_secure` is a boolean flag indicating if the cookie was obtained from a "secure" protocol - `req_site_for_cookies` is a string containing the host that should be considered as the "site for cookies" (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)), can be `nil` if unknown. - `name` is a string containing the cookie name - `value` is a string containing the cookie value - `params` is the a table where the keys are cookie attribute names and values are cookie attribute values Returns a boolean indicating if a cookie was stored. ### `store:store_from_request(req_headers, resp_headers, req_host, req_site_for_cookies)` {#http.cookie.store:store_from_request} Attempt to store any cookies found in the response headers. - `req_headers` is the [*http.headers*](#http.headers) object for the outgoing request - `resp_headers` is the [*http.headers*](#http.headers) object received in response - `req_host` is the host that your query was directed at (only used if `req_headers` is missing a `Host` header) - `req_site_for_cookies` is a string containing the host that should be considered as the "site for cookies" (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)), can be `nil` if unknown. ### `store:get(domain, path, name)` {#http.cookie.store:get} Returns the cookie value for the cookie stored for the passed `domain`, `path` and `name`. ### `store:remove(domain, path, name)` {#http.cookie.store:remove} Deletes the cookie stored for the passed `domain`, `path` and `name`. If `name` is `nil` or not passed then all cookies for the `domain` and `path` are removed. If `path` is `nil` or not passed (in addition to `name`) then all cookies for the `domain` are removed. ### `store:lookup(domain, path, is_http, is_secure, is_safe_method, site_for_cookies, is_top_level, max_cookie_length)` {#http.cookie.store:lookup} Finds cookies visible to suitable for passing to an entity. - `domain` is the domain that will be sent the cookie - `path` is the path that will be sent the cookie - `is_http` is a boolean flag indicating if the destination is a "non-HTTP" API - `is_secure` is a boolean flag indicating if the destination will be communicated with over a "secure" protocol - `is_safe_method` is a boolean flag indicating if the cookie will be sent via a safe HTTP method (See also [http.util.is_safe_method](#http.util.is_safe_method)) - `site_for_cookies` is a string containing the host that should be considered as the "site for cookies" (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)), can be `nil` if unknown. - `is_top_level` is a boolean flag indicating if this request is a "top level" request (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)) - `max_cookie_length` is the maximum cookie length to allow (See also [`store.max_cookie_length`](#http.cookie.store.max_cookie_length)) Returns a string suitable for use in a `Cookie` header. ### `store:lookup_for_request(headers, host, site_for_cookies, is_top_level, max_cookie_length)` {#http.cookie.store:lookup_for_request} Finds cookies suitable for adding to a request. - `headers` is the [*http.headers*](#http.headers) object for the outgoing request - `host` is the host that your query was directed at (only used if `headers` is missing a `Host` header) - `site_for_cookies` is a string containing the host that should be considered as the "site for cookies" (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)), can be `nil` if unknown. - `is_top_level` is a boolean flag indicating if this request is a "top level" request (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)) - `max_cookie_length` is the maximum cookie length to allow (See also [`store.max_cookie_length`](#http.cookie.store.max_cookie_length)) Returns a string suitable for use in a `Cookie` header. ### `store:clean_due()` {#http.cookie.store:clean_due} Returns the number of seconds until the next cookie in the `store` expires. ### `store:clean()` {#http.cookie.store:clean} Remove all expired cookies from the `store`. ### `store:load_from_file(file)` {#http.cookie.store:load_from_file} Loads cookie data from the file object `file` into `store`. The file should be in the Netscape Cookiejar format. Invalid lines in the file are ignored. Returns `true` on success or passes along `nil, err, errno` if a `:read` call fails. ### `store:save_to_file(file)` {#http.cookie.store:save_to_file} Writes the cookie data from `store` into the file object `file` in the Netscape Cookiejar format. `file` is not `seek`-ed or truncated before writing. Returns `true` on success or passes along `nil, err, errno` if a `:write` call fails. lua-http-0.4/doc/modules/http.h1_connection.md000066400000000000000000000232121400726324600213630ustar00rootroot00000000000000## http.h1_connection The *h1_connection* module adheres to the [*connection*](#connection) interface and provides HTTP 1 and 1.1 specific operations. ### `new(socket, conn_type, version)` {#connection.new} Constructor for a new connection. Takes a cqueues socket object, a [connection type string](#connection.type) and a numeric HTTP version number. Valid values for the connection type are `"client"` and `"server"`. Valid values for the version number are `1` and `1.1`. Returns the newly initialized connection object. ### `h1_connection.version` {#http.h1_connection.version} Specifies the HTTP version used for the connection handshake. Valid values are: - `1.0` - `1.1` See [`connection.version`](#connection.version) ### `h1_connection:pollfd()` {#http.h1_connection:pollfd} See [`connection:pollfd()`](#connection:pollfd) ### `h1_connection:events()` {#http.h1_connection:events} See [`connection:events()`](#connection:events) ### `h1_connection:timeout()` {#http.h1_connection:timeout} See [`connection:timeout()`](#connection:timeout) ### `h1_connection:connect(timeout)` {#http.h1_connection:connect} See [`connection:connect(timeout)`](#connection:connect) ### `h1_connection:checktls()` {#http.h1_connection:checktls} See [`connection:checktls()`](#connection:checktls) ### `h1_connection:localname()` {#http.h1_connection:localname} See [`connection:localname()`](#connection:localname) ### `h1_connection:peername()` {#http.h1_connection:peername} See [`connection:peername()`](#connection:peername) ### `h1_connection:flush(timeout)` {#http.h1_connection:flush} See [`connection:flush(timeout)`](#connection:flush) ### `h1_connection:shutdown(dir)` {#http.h1_connection:shutdown} Shut down is as graceful as possible: pipelined streams are [shutdown](#http.h1_stream:shutdown), then the underlying socket is shut down in the appropriate direction(s). `dir` is a string representing the direction of communication to shut down communication in. If it contains `"r"` it will shut down reading, if it contains `"w"` it will shut down writing. The default is `"rw"`, i.e. to shutdown communication in both directions. See [`connection:shutdown()`](#connection:shutdown) ### `h1_connection:close()` {#http.h1_connection:close} See [`connection:close()`](#connection:close) ### `h1_connection:new_stream()` {#http.h1_connection:new_stream} In HTTP 1, only a client may initiate new streams with this function. See [`connection:new_stream()`](#connection:new_stream) for more information. ### `h1_connection:get_next_incoming_stream(timeout)` {#http.h1_connection:get_next_incoming_stream} See [`connection:get_next_incoming_stream(timeout)`](#connection:get_next_incoming_stream) ### `h1_connection:onidle(new_handler)` {#http.h1_connection:onidle} See [`connection:onidle(new_handler)`](#connection:onidle) ### `h1_connection:setmaxline(read_length)` {#http.h1_connection:setmaxline} Sets the maximum read buffer size (in bytes) to `read_length`. i.e. sets the maximum length lines (such as headers). The default comes from the underlying socket, which gets the (changable) cqueues default at time of construction. The default cqueues default is 4096 bytes. ### `h1_connection:clearerr(...)` {#http.h1_connection:clearerr} Clears errors to allow for further read or write operations on the connection. Returns the error number of existing errors. This function is used to recover from known errors. ### `h1_connection:error(...)` {#http.h1_connection:error} Returns the error number of existing errors. ### `h1_connection:take_socket()` {#http.h1_connection:take_socket} Used to hand the reference of the connection socket to another object. Resets the socket to defaults and returns the single existing reference of the socket to the calling routine. This function can be used for connection upgrades such as upgrading from HTTP 1 to a WebSocket. ### `h1_connection:read_request_line(timeout)` {#http.h1_connection:read_request_line} Reads a request line from the socket. Returns the request method, request target and HTTP version for an incoming request. `:read_request_line()` yields until a `"\r\n"` terminated chunk is received, or `timeout` is exceeded. If the incoming chunk is not a valid HTTP request line, `nil` is returned. On error, returns `nil`, an error message and an error number. ### `h1_connection:read_status_line(timeout)` {#http.h1_connection:read_status_line} Reads a line of input from the socket. If the input is a valid status line, the HTTP version (1 or 1.1), status code and reason description (if applicable) is returned. `:read_status_line()` yields until a `"\r\n"` terminated chunk is received, or `timeout` is exceeded. If the socket could not be read, returns `nil`, an error message and an error number. ### `h1_connection:read_header(timeout)` {#http.h1_connection:read_header} Reads a CRLF terminated HTTP header from the socket and returns the header key and value. This function will yield until a MIME compliant header item is received or until `timeout` is exceeded. If the header could not be read, the function returns `nil` an error and an error message. ### `h1_connection:read_headers_done(timeout)` {#http.h1_connection:read_headers_done} Checks for an empty line, which indicates the end of the HTTP headers. Returns `true` if an empty line is received. Any other value is pushed back on the socket receive buffer (unget) and the function returns `false`. This function will yield waiting for input from the socket or until `timeout` is exceeded. Returns `nil`, an error and an error message if the socket cannot be read. ### `h1_connection:read_body_by_length(len, timeout)` {#http.h1_connection:read_body_by_length} Get `len` number of bytes from the socket. Use a negative number for *up to* that number of bytes. This function will yield and wait on the socket if length of the buffered body is less than `len`. Asserts if len is not a number. ### `h1_connection:read_body_till_close(timeout)` {#http.h1_connection:read_body_till_close} Reads the entire request body. This function will yield until the body is complete or `timeout` is expired. If the read fails the function returns `nil`, an error message and an error number. ### `h1_connection:read_body_chunk(timeout)` {#http.h1_connection:read_body_chunk} Reads the next available line of data from the request and returns the chunk and any chunk extensions. This function will yield until chunk size is received or `timeout` is exceeded. If the chunk size is indicated as `0` then `false` and any chunk extensions are returned. Returns `nil`, an error message and an error number if there was an error reading reading the chunk header or the socket. ### `h1_connection:write_request_line(method, target, httpversion, timeout)` {#http.h1_connection:write_request_line} Writes the opening HTTP 1.x request line for a new request to the socket buffer. Yields until success or `timeout`. If the write fails, returns `nil`, an error message and an error number. *Note the request line will not be flushed to the remote server until* [`write_headers_done`](#http.h1_connection:write_headers_done) *is called.* ### `h1_connection:write_status_line(httpversion, status_code, reason_phrase, timeout)` {#http.h1_connection:write_status_line} Writes an HTTP status line to the socket buffer. Yields until success or `timeout`. If the write fails, the function returns `nil`, an error message and an error number. *Note the status line will not be flushed to the remote server until* [`write_headers_done`](#http.h1_connection:write_headers_done) *is called.* ### `h1_connection:write_header(k, v, timeout)` {#http.h1_connection:write_header} Writes a header item to the socket buffer as a `key:value` string. Yields until success or `timeout`. Returns `nil`, an error message and an error if the write fails. *Note the header item will not be flushed to the remote server until* [`write_headers_done`](#http.h1_connection:write_headers_done) *is called.* ### `h1_connection:write_headers_done(timeout)` {#http.h1_connection:write_headers_done} Terminates a header block by writing a blank line (`"\r\n"`) to the socket. This function will flush all outstanding data in the socket output buffer. Yields until success or `timeout`. Returns `nil`, an error message and an error if the write fails. ### `h1_connection:write_body_chunk(chunk, chunk_ext, timeout)` {#http.h1_connection:write_body_chunk} Writes a chunk of data to the socket. `chunk_ext` must be `nil` as chunk extensions are not supported. Will yield until complete or `timeout` is exceeded. Returns true on success. Returns `nil`, an error message and an error number if the write fails. ### `h1_connection:write_body_last_chunk(chunk_ext, timeout)` {#http.h1_connection:write_body_last_chunk} Writes the chunked body terminator `"0\r\n"` to the socket. `chunk_ext` must be `nil` as chunk extensions are not supported. Will yield until complete or `timeout` is exceeded. Returns `nil`, an error message and an error number if the write fails. *Note that the connection will not be immediately flushed to the remote server; normally this will occur when trailers are written.* ### `h1_connection:write_body_plain(body, timeout)` {#http.h1_connection:write_body_plain} Writes the contents of `body` to the socket and flushes the socket output buffer immediately. Yields until success or `timeout` is exceeded. Returns `nil`, an error message and an error number if the write fails. lua-http-0.4/doc/modules/http.h1_reason_phrases.md000066400000000000000000000005361400726324600222440ustar00rootroot00000000000000## http.h1_reason_phrases A table mapping from status codes (as strings) to reason phrases for HTTP 1. Any unknown status codes return `"Unassigned"` ### Example {#http.h1_reason_phrases-example} ```lua local reason_phrases = require "http.h1_reason_phrases" print(reason_phrases["200"]) --> "OK" print(reason_phrases["342"]) --> "Unassigned" ``` lua-http-0.4/doc/modules/http.h1_stream.md000066400000000000000000000103561400726324600205240ustar00rootroot00000000000000## http.h1_stream The *h1_stream* module adheres to the [*stream*](#stream) interface and provides HTTP 1.x specific operations. The gzip transfer encoding is supported transparently. ### `h1_stream.connection` {#http.h1_stream.connection} See [`stream.connection`](#stream.connection) ### `h1_stream.max_header_lines` {#http.h1_stream.max_header_lines} The maximum number of header lines to read. Default is `100`. ### `h1_stream:checktls()` {#http.h1_stream:checktls} See [`stream:checktls()`](#stream:checktls) ### `h1_stream:localname()` {#http.h1_stream:localname} See [`stream:localname()`](#stream:localname) ### `h1_stream:peername()` {#http.h1_stream:peername} See [`stream:peername()`](#stream:peername) ### `h1_stream:get_headers(timeout)` {#http.h1_stream:get_headers} See [`stream:get_headers(timeout)`](#stream:get_headers) ### `h1_stream:write_headers(headers, end_stream, timeout)` {#http.h1_stream:write_headers} See [`stream:write_headers(headers, end_stream, timeout)`](#stream:write_headers) ### `h1_stream:write_continue(timeout)` {#http.h1_stream:write_continue} See [`stream:write_continue(timeout)`](#stream:write_continue) ### `h1_stream:get_next_chunk(timeout)` {#http.h1_stream:get_next_chunk} See [`stream:get_next_chunk(timeout)`](#stream:get_next_chunk) ### `h1_stream:each_chunk()` {#http.h1_stream:each_chunk} See [`stream:each_chunk()`](#stream:each_chunk) ### `h1_stream:get_body_as_string(timeout)` {#http.h1_stream:get_body_as_string} See [`stream:get_body_as_string(timeout)`](#stream:get_body_as_string) ### `h1_stream:get_body_chars(n, timeout)` {#http.h1_stream:get_body_chars} See [`stream:get_body_chars(n, timeout)`](#stream:get_body_chars) ### `h1_stream:get_body_until(pattern, plain, include_pattern, timeout)` {#http.h1_stream:get_body_until} See [`stream:get_body_until(pattern, plain, include_pattern, timeout)`](#stream:get_body_until) ### `h1_stream:save_body_to_file(file, timeout)` {#http.h1_stream:save_body_to_file} See [`stream:save_body_to_file(file, timeout)`](#stream:save_body_to_file) ### `h1_stream:get_body_as_file(timeout)` {#http.h1_stream:get_body_as_file} See [`stream:get_body_as_file(timeout)`](#stream:get_body_as_file) ### `h1_stream:unget(str)` {#http.h1_stream:unget} See [`stream:unget(str)`](#stream:unget) ### `h1_stream:write_chunk(chunk, end_stream, timeout)` {#http.h1_stream:write_chunk} See [`stream:write_chunk(chunk, end_stream, timeout)`](#stream:write_chunk) ### `h1_stream:write_body_from_string(str, timeout)` {#http.h1_stream:write_body_from_string} See [`stream:write_body_from_string(str, timeout)`](#stream:write_body_from_string) ### `h1_stream:write_body_from_file(options|file, timeout)` {#http.h1_stream:write_body_from_file} See [`stream:write_body_from_file(options|file, timeout)`](#stream:write_body_from_file) ### `h1_stream:shutdown()` {#http.h1_stream:shutdown} See [`stream:shutdown()`](#stream:shutdown) ### `h1_stream:set_state(new)` {#http.h1_stream:set_state} Sets the state of the stream to `new`. `new` must be one of the following valid states: - `"open"`: have sent or received headers; haven't sent body yet - `"half closed (local)"`: have sent whole body - `"half closed (remote)"`: have received whole body - `"closed"`: complete Not all state transitions are allowed. ### `h1_stream:read_headers(timeout)` {#http.h1_stream:read_headers} Reads and returns a [header block](#http.headers) from the underlying connection. Does *not* take into account buffered header blocks. On error, returns `nil`, an error message and an error number. This function should rarely be used, you're probably looking for [`:get_headers()`](#http.h1_stream:get_headers). ### `h1_stream:read_next_chunk(timeout)` {#http.h1_stream:read_next_chunk} Reads and returns the next chunk as a string from the underlying connection. Does *not* take into account buffered chunks. On error, returns `nil`, an error message and an error number. This function should rarely be used, you're probably looking for [`:get_next_chunk()`](#http.h1_stream:get_next_chunk). lua-http-0.4/doc/modules/http.h2_connection.md000066400000000000000000000074021400726324600213670ustar00rootroot00000000000000## http.h2_connection The *h2_connection* module adheres to the [*connection*](#connection) interface and provides HTTP 2 specific operations. An HTTP 2 connection can have multiple streams actively transmitting data at once, hence an *http.h2_connection* acts much like a scheduler. ### `new(socket, conn_type, settings)` {#http.h2_connection.new} Constructor for a new connection. Takes a cqueues socket object, a [connection type string](#connection.type) and an optional table of HTTP 2 settings. Returns the newly initialized connection object in a non-connected state. ### `h2_connection.version` {#http.h2_connection.version} Contains the HTTP connection version. Currently this will always be `2`. See [`connection.version`](#connection.version) ### `h2_connection:pollfd()` {#http.h2_connection:pollfd} See [`connection:pollfd()`](#connection:pollfd) ### `h2_connection:events()` {#http.h2_connection:events} See [`connection:events()`](#connection:events) ### `h2_connection:timeout()` {#http.h2_connection:timeout} See [`connection:timeout()`](#connection:timeout) ### `h2_connection:empty()` {#http.h2_connection:empty} ### `h2_connection:step(timeout)` {#http.h2_connection:step} ### `h2_connection:loop(timeout)` {#http.h2_connection:loop} ### `h2_connection:connect(timeout)` {#http.h2_connection:connect} See [`connection:connect(timeout)`](#connection:connect) ### `h2_connection:checktls()` {#http.h2_connection:checktls} See [`connection:checktls()`](#connection:checktls) ### `h2_connection:localname()` {#http.h2_connection:localname} See [`connection:localname()`](#connection:localname) ### `h2_connection:peername()` {#http.h2_connection:peername} See [`connection:peername()`](#connection:peername) ### `h2_connection:flush(timeout)` {#http.h2_connection:flush} See [`connection:flush(timeout)`](#connection:flush) ### `h2_connection:shutdown()` {#http.h2_connection:shutdown} See [`connection:shutdown()`](#connection:shutdown) ### `h2_connection:close()` {#http.h2_connection:close} See [`connection:close()`](#connection:close) ### `h2_connection:new_stream(id)` {#http.h2_connection:new_stream} Create and return a new [*h2_stream*](#http.h2_stream). `id` (optional) is the stream id to assign the new stream, if not specified for client initiated streams this will be the next free odd numbered stream, for server initiated streams this will be the next free even numbered stream. See [`connection:new_stream()`](#connection:new_stream) for more information. ### `h2_connection:get_next_incoming_stream(timeout)` {#http.h2_connection:get_next_incoming_stream} See [`connection:get_next_incoming_stream(timeout)`](#connection:get_next_incoming_stream) ### `h2_connection:onidle(new_handler)` {#http.h2_connection:onidle} See [`connection:onidle(new_handler)`](#connection:onidle) ### `h2_connection:read_http2_frame(timeout)` {#http.h2_connection:read_http2_frame} ### `h2_connection:write_http2_frame(typ, flags, streamid, payload, timeout, flush)` {#http.h2_connection:write_http2_frame} ### `h2_connection:ping(timeout)` {#http.h2_connection:ping} ### `h2_connection:write_window_update(inc, timeout)` {#http.h2_connection:write_window_update} ### `h2_connection:write_goaway_frame(last_stream_id, err_code, debug_msg, timeout)` {#http.h2_connection:write_goaway_frame} ### `h2_connection:set_peer_settings(peer_settings)` {#http.h2_connection:set_peer_settings} ### `h2_connection:ack_settings()` {#http.h2_connection:ack_settings} ### `h2_connection:settings(tbl, timeout)` {#http.h2_connection:settings} lua-http-0.4/doc/modules/http.h2_error.md000066400000000000000000000032741400726324600203640ustar00rootroot00000000000000## http.h2_error A type of error object that encapsulates HTTP 2 error information. An `http.h2_error` object has fields: - `name`: The error name: a short identifier for this error - `code`: The error code - `description`: The description of the error code - `message`: An error message - `traceback`: A traceback taken at the point the error was thrown - `stream_error`: A boolean that indicates if this is a stream level or protocol level error ### `errors` {#http.h2_error.errors} A table containing errors [as defined by the HTTP 2 specification](https://http2.github.io/http2-spec/#iana-errors). It can be indexed by error name (e.g. `errors.PROTOCOL_ERROR`) or numeric code (e.g. `errors[0x1]`). ### `is(ob)` {#http.h2_error.is} Returns a boolean indicating if the object `ob` is an `http.h2_error` object ### `h2_error:new(ob)` {#http.h2_error:new} Creates a new error object from the passed table. The table should have the form of an error object i.e. with fields `name`, `code`, `message`, `traceback`, etc. Fields `name`, `code` and `description` are inherited from the parent `h2_error` object if not specified. `stream_error` defaults to `false`. ### `h2_error:new_traceback(message, stream_error, lvl)` {#http.h2_error:new_traceback} Creates a new error object, recording a traceback from the current thread. ### `h2_error:error(message, stream_error, lvl)` {#http.h2_error:error} Creates and throws a new error. ### `h2_error:assert(cond, ...)` {#http.h2_error:assert} If `cond` is truthy, returns `cond, ...` If `cond` is falsy (i.e. `false` or `nil`), throws an error with the first element of `...` as the `message`. lua-http-0.4/doc/modules/http.h2_stream.md000066400000000000000000000120621400726324600205210ustar00rootroot00000000000000## http.h2_stream An h2_stream represents an HTTP 2 stream. The module follows the [*stream*](#stream) interface as well as HTTP 2 specific functions. ### `h2_stream.connection` {#http.h2_stream.connection} See [`stream.connection`](#stream.connection) ### `h2_stream:checktls()` {#http.h2_stream:checktls} See [`stream:checktls()`](#stream:checktls) ### `h2_stream:localname()` {#http.h2_stream:localname} See [`stream:localname()`](#stream:localname) ### `h2_stream:peername()` {#http.h2_stream:peername} See [`stream:peername()`](#stream:peername) ### `h2_stream:get_headers(timeout)` {#http.h2_stream:get_headers} See [`stream:get_headers(timeout)`](#stream:get_headers) ### `h2_stream:write_headers(headers, end_stream, timeout)` {#http.h2_stream:write_headers} See [`stream:write_headers(headers, end_stream, timeout)`](#stream:write_headers) ### `h2_stream:write_continue(timeout)` {#http.h2_stream:write_continue} See [`stream:write_continue(timeout)`](#stream:write_continue) ### `h2_stream:get_next_chunk(timeout)` {#http.h2_stream:get_next_chunk} See [`stream:get_next_chunk(timeout)`](#stream:get_next_chunk) ### `h2_stream:each_chunk()` {#http.h2_stream:each_chunk} See [`stream:each_chunk()`](#stream:each_chunk) ### `h2_stream:get_body_as_string(timeout)` {#http.h2_stream:get_body_as_string} See [`stream:get_body_as_string(timeout)`](#stream:get_body_as_string) ### `h2_stream:get_body_chars(n, timeout)` {#http.h2_stream:get_body_chars} See [`stream:get_body_chars(n, timeout)`](#stream:get_body_chars) ### `h2_stream:get_body_until(pattern, plain, include_pattern, timeout)` {#http.h2_stream:get_body_until} See [`stream:get_body_until(pattern, plain, include_pattern, timeout)`](#stream:get_body_until) ### `h2_stream:save_body_to_file(file, timeout)` {#http.h2_stream:save_body_to_file} See [`stream:save_body_to_file(file, timeout)`](#stream:save_body_to_file) ### `h2_stream:get_body_as_file(timeout)` {#http.h2_stream:get_body_as_file} See [`stream:get_body_as_file(timeout)`](#stream:get_body_as_file) ### `h2_stream:unget(str)` {#http.h2_stream:unget} See [`stream:unget(str)`](#stream:unget) ### `h2_stream:write_chunk(chunk, end_stream, timeout)` {#http.h2_stream:write_chunk} See [`stream:write_chunk(chunk, end_stream, timeout)`](#stream:write_chunk) ### `h2_stream:write_body_from_string(str, timeout)` {#http.h2_stream:write_body_from_string} See [`stream:write_body_from_string(str, timeout)`](#stream:write_body_from_string) ### `h2_stream:write_body_from_file(options|file, timeout)` {#http.h2_stream:write_body_from_file} See [`stream:write_body_from_file(options|file, timeout)`](#stream:write_body_from_file) ### `h2_stream:shutdown()` {#http.h2_stream:shutdown} See [`stream:shutdown()`](#stream:shutdown) ### `h2_stream:pick_id(id)` {#http.h2_stream:pick_id} ### `h2_stream:set_state(new)` {#http.h2_stream:set_state} ### `h2_stream:reprioritise(child, exclusive)` {#http.h2_stream:reprioritise} ### `h2_stream:write_http2_frame(typ, flags, payload, timeout, flush)` {#http.h2_stream:write_http2_frame} Writes a frame with `h2_stream`'s stream id. See [`h2_connection:write_http2_frame(typ, flags, streamid, payload, timeout, flush)`](#http.h2_connection:write_http2_frame) ### `h2_stream:write_data_frame(payload, end_stream, padded, timeout, flush)` {#http.h2_stream:write_data_frame} ### `h2_stream:write_headers_frame(payload, end_stream, end_headers, padded, exclusive, stream_dep, weight, timeout, flush)` {#http.h2_stream:write_headers_frame} ### `h2_stream:write_priority_frame(exclusive, stream_dep, weight, timeout, flush)` {#http.h2_stream:write_priority_frame} ### `h2_stream:write_rst_stream_frame(err_code, timeout, flush)` {#http.h2_stream:write_rst_stream} ### `h2_stream:rst_stream(err, timeout)` {#http.h2_stream:rst_stream} ### `h2_stream:write_settings_frame(ACK, settings, timeout, flush)` {#http.h2_stream:write_settings_frame} ### `h2_stream:write_push_promise_frame(promised_stream_id, payload, end_headers, padded, timeout, flush)` {#http.h2_stream:write_push_promise_frame} ### `h2_stream:push_promise(headers, timeout)` {#http.h2_stream:push_promise} Pushes a new promise to the client. Returns the new stream as a [h2_stream](#http.h2_stream). ### `h2_stream:write_ping_frame(ACK, payload, timeout, flush)` {#http.h2_stream:write_ping_frame} ### `h2_stream:write_goaway_frame(last_streamid, err_code, debug_msg, timeout, flush)` {#http.h2_stream:write_goaway_frame} ### `h2_stream:write_window_update_frame(inc, timeout, flush)` {#http.h2_stream:write_window_update_frame} ### `h2_stream:write_window_update(inc, timeout)` {#http.h2_stream:write_window_update} ### `h2_stream:write_continuation_frame(payload, end_headers, timeout, flush)` {#http.h2_stream:write_continuation_frame} lua-http-0.4/doc/modules/http.headers.md000066400000000000000000000101561400726324600202520ustar00rootroot00000000000000## http.headers An ordered list of header fields. Each field has a *name*, a *value* and a *never_index* flag that indicates if the header field is potentially sensitive data. Each headers object has an index by field name to efficiently retrieve values by key. Keep in mind that there can be multiple values for a given field name. (e.g. an HTTP server may send two `Set-Cookie` headers). As noted in the [Conventions](#conventions) section, HTTP 1 request and status line fields are passed around inside of headers objects under keys `":authority"`, `":method"`, `":path"`, `":scheme"` and `":status"` as defined in HTTP 2. As such, they are all kept in string form (important to remember for the `":status"` field). ### `new()` {#http.headers.new} Creates and returns a new headers object. ### `headers:len()` {#http.headers:len} Returns the number of headers. Also available as `#headers` in Lua 5.2+. ### `headers:clone()` {#http.headers:clone} Creates and returns a clone of the headers object. ### `headers:append(name, value, never_index)` {#http.headers:append} Append a header. - `name` is the header field name. Lower case is the convention. It will not be validated at this time. - `value` is the header field value. It will not be validated at this time. - `never_index` is an optional boolean that indicates if the `value` should be considered secret. Defaults to true for header fields: authorization, proxy-authorization, cookie and set-cookie. ### `headers:each()` {#http.headers:each} An iterator over all headers that emits `name, value, never_index`. #### Example ```lua local http_headers = require "http.headers" local myheaders = http_headers.new() myheaders:append(":status", "200") myheaders:append("set-cookie", "foo=bar") myheaders:append("connection", "close") myheaders:append("set-cookie", "baz=qux") for name, value, never_index in myheaders:each() do print(name, value, never_index) end --[[ prints: ":status", "200", false "set-cookie", "foo=bar", true "connection", "close", false "set-cookie", "baz=qux", true ]] ``` ### `headers:has(name)` {#http.headers:has} Returns a boolean indicating if the headers object has a field with the given `name`. ### `headers:delete(name)` {#http.headers:delete} Removes all occurrences of a field name from the headers object. ### `headers:geti(i)` {#http.headers:geti} Return the `i`-th header as `name, value, never_index` ### `headers:get_as_sequence(name)` {#http.headers:get_as_sequence} Returns all headers with the given name in a table. The table will contain a field `.n` with the number of elements. #### Example ```lua local http_headers = require "http.headers" local myheaders = http_headers.new() myheaders:append(":status", "200") myheaders:append("set-cookie", "foo=bar") myheaders:append("connection", "close") myheaders:append("set-cookie", "baz=qux") local mysequence = myheaders:get_as_sequence("set-cookie") --[[ mysequence will be: {n = 2; "foo=bar"; "baz=qux"} ]] ``` ### `headers:get(name)` {#http.headers:get} Returns all headers with the given name as multiple return values. ### `headers:get_comma_separated(name)` {#http.headers:get_comma_separated} Returns all headers with the given name as items in a comma separated string. ### `headers:modifyi(i, value, never_index)` {#http.headers:modifyi} Change the `i`-th's header to a new `value` and `never_index`. ### `headers:upsert(name, value, never_index)` {#http.headers:upsert} If a header with the given `name` already exists, replace it. If not, [`append`](#http.headers:append) it to the list of headers. Cannot be used when a header `name` already has multiple values. ### `headers:sort()` {#http.headers:sort} Sort the list of headers by their field name, ordering those starting with `:` first. If `name`s are equal then sort by `value`, then by `never_index`. ### `headers:dump(file, prefix)` {#http.headers:dump} Print the headers list to the given file, one per line. If `file` is not given, then print to `stderr`. `prefix` is prefixed to each line. lua-http-0.4/doc/modules/http.hpack.md000066400000000000000000000031631400726324600177250ustar00rootroot00000000000000## http.hpack ### `new(SETTINGS_HEADER_TABLE_SIZE)` {#http.hpack.new} ### `hpack_context:append_data(val)` {#http.hpack:append_data} ### `hpack_context:render_data()` {#http.hpack:render_data} ### `hpack_context:clear_data()` {#http.hpack:clear_data} ### `hpack_context:evict_from_dynamic_table()` {#http.hpack:evict_from_dynamic_table} ### `hpack_context:dynamic_table_tostring()` {#http.hpack:dynamic_table_tostring} ### `hpack_context:set_max_dynamic_table_size(SETTINGS_HEADER_TABLE_SIZE)` {#http.hpack:set_max_dynamic_table_size} ### `hpack_context:encode_max_size(val)` {#http.hpack:encode_max_size} ### `hpack_context:resize_dynamic_table(new_size)` {#http.hpack:resize_dynamic_table} ### `hpack_context:add_to_dynamic_table(name, value, k)` {#http.hpack:add_to_dynamic_table} ### `hpack_context:dynamic_table_id_to_index(id)` {#http.hpack:dynamic_table_id_to_index} ### `hpack_context:lookup_pair_index(k)` {#http.hpack:lookup_pair_index} ### `hpack_context:lookup_name_index(name)` {#http.hpack:lookup_name_index} ### `hpack_context:lookup_index(index)` {#http.hpack:lookup_index} ### `hpack_context:add_header_indexed(name, value, huffman)` {#http.hpack:add_header_indexed} ### `hpack_context:add_header_never_indexed(name, value, huffman)` {#http.hpack:add_header_never_indexed} ### `hpack_context:encode_headers(headers)` {#http.hpack:encode_headers} ### `hpack_context:decode_headers(payload, header_list, pos)` {#http.hpack:decode_headers} lua-http-0.4/doc/modules/http.hsts.md000066400000000000000000000024071400726324600176200ustar00rootroot00000000000000## http.hsts Data structures useful for HSTS (HTTP Strict Transport Security) ### `new_store()` {#http.hsts.new_store} Creates and returns a new HSTS store. ### `hsts_store.max_items` {#http.hsts.max_items} The maximum number of items allowed in the store. Decreasing this value will only prevent new items from being added, it will not remove old items. Defaults to infinity (any number of items is allowed). ### `hsts_store:clone()` {#http.hsts:clone} Creates and returns a copy of a store. ### `hsts_store:store(host, directives)` {#http.hsts:store} Add new directives to the store about the given `host`. `directives` should be a table of directives, which *must* include the key `"max-age"`. Returns a boolean indicating if the item was accepted. ### `hsts_store:remove(host)` {#http.hsts:remove} Removes the entry for `host` from the store (if it exists). ### `hsts_store:check(host)` {#http.hsts:check} Returns a boolean indicating if the given `host` is a known HSTS host. ### `hsts_store:clean_due()` {#http.hsts:clean_due} Returns the number of seconds until the next item in the store expires. ### `hsts_store:clean()` {#http.hsts:clean} Removes expired entries from the store. lua-http-0.4/doc/modules/http.proxies.md000066400000000000000000000015651400726324600203340ustar00rootroot00000000000000## http.proxies ### `new()` {#http.proxies.new} Returns an empty 'proxies' object ### `proxies:update(getenv)` {#http.proxies:update} `getenv` defaults to [`os.getenv`](http://www.lua.org/manual/5.3/manual.html#pdf-os.getenv) Reads environmental variables that are used to control if requests go through a proxy. - `http_proxy` (or `CGI_HTTP_PROXY` if running in a program with `GATEWAY_INTERFACE` set): the proxy to use for normal HTTP connections - `https_proxy` or `HTTPS_PROXY`: the proxy to use for HTTPS connections - `all_proxy` or `ALL_PROXY`: the proxy to use for **all** connections, overridden by other options - `no_proxy` or `NO_PROXY`: a list of hosts to **not** use a proxy for Returns `proxies`. ### `proxies:choose(scheme, host)` {#http.proxies:choose} Returns the proxy to use for the given `scheme` and `host` as a URI. lua-http-0.4/doc/modules/http.request.md000066400000000000000000000141511400726324600203260ustar00rootroot00000000000000## http.request The http.request module encapsulates all the functionality required to retrieve an HTTP document from a server. ### `new_from_uri(uri)` {#http.request.new_from_uri} Creates a new `http.request` object from the given URI. ### `new_connect(uri, connect_authority)` {#http.request.new_connect} Creates a new `http.request` object from the given URI that will perform a *CONNECT* request. ### `request.host` {#http.request.host} The host this request should be sent to. ### `request.port` {#http.request.port} The port this request should be sent to. ### `request.bind` {#http.request.bind} The local outgoing address and optionally port to bind in the form of `"address[:port]"`. Default is to allow the kernel to choose an address+port. IPv6 addresses may be specified via square bracket notation. e.g. `"127.0.0.1"`, `"127.0.0.1:50000"`, `"[::1]:30000"`. This option is rarely needed. Supplying an address can be used to manually select the network interface to make the request from, while supplying a port is only really used to interoperate with firewalls or devices that demand use of a certain port. ### `request.tls` {#http.request.tls} A boolean indicating if TLS should be used. ### `request.ctx` {#http.request.ctx} An alternative `SSL_CTX*` to use. If not specified, uses the default TLS settings (see [*http.tls*](#http.tls) for information). ### `request.sendname` {#http.request.sendname} The TLS SNI host name used. ### `request.version` {#http.request.version} The HTTP version to use; leave as `nil` to auto-select. ### `request.proxy` {#http.request.proxy} Specifies the a proxy that the request will be made through. The value should be a URI or `false` to turn off proxying for the request. ### `request.headers` {#http.request.headers} A [*http.headers*](#http.headers) object of headers that will be sent in the request. ### `request.hsts` {#http.request.hsts} The [*http.hsts*](#http.hsts) store that will be used to enforce HTTP strict transport security. An attempt will be made to add strict transport headers from a response to the store. Defaults to a shared store. ### `request.proxies` {#http.request.proxies} The [*http.proxies*](#http.proxies) object used to select a proxy for the request. Only consulted if `request.proxy` is `nil`. ### `request.cookie_store` {#http.request.cookie_store} The [*http.cookie.store*](#http.cookie.store) that will be used to find cookies for the request. An attempt will be made to add cookies from a response to the store. Defaults to a shared store. ### `request.is_top_level` {#http.request.is_top_level} A boolean flag indicating if this request is a "top level" request (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)) Defaults to `true` ### `request.site_for_cookies` {#http.request.site_for_cookies} A string containing the host that should be considered as the "site for cookies" (See [RFC 6265bis-02 Section 5.2](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2)), can be `nil` if unknown. Defaults to `nil`. ### `request.follow_redirects` {#http.request.follow_redirects} Boolean indicating if `:go()` should follow redirects. Defaults to `true`. ### `request.expect_100_timeout` {#http.request.expect_100_timeout} Number of seconds to wait for a 100 Continue response before proceeding to send a request body. Defaults to `1`. ### `request.max_redirects` {#http.request.max_redirects} Maximum number of redirects to follow before giving up. Defaults to `5`. Set to `math.huge` to not give up. ### `request.post301` {#http.request.post301} Respect RFC 2616 Section 10.3.2 and **don't** convert POST requests into body-less GET requests when following a 301 redirect. The non-RFC behaviour is ubiquitous in web browsers and assumed by servers. Modern HTTP endpoints send status code 308 to indicate that they don't want the method to be changed. Defaults to `false`. ### `request.post302` {#http.request.post302} Respect RFC 2616 Section 10.3.3 and **don't** convert POST requests into body-less GET requests when following a 302 redirect. The non-RFC behaviour is ubiquitous in web browsers and assumed by servers. Modern HTTP endpoints send status code 307 to indicate that they don't want the method to be changed. Defaults to `false`. ### `request:clone()` {#http.request:clone} Creates and returns a clone of the request. The clone has its own deep copies of the [`.headers`](#http.request.headers) and [`.h2_settings`](#http.request.h2_settings) fields. The [`.tls`](#http.request.tls) and [`.body`](#http.request.body) fields are shallow copied from the original request. ### `request:handle_redirect(headers)` {#http.request:handle_redirect} Process a redirect. `headers` should be response headers for a redirect. Returns a new `request` object that will fetch from new location. ### `request:to_uri(with_userinfo)` {#http.request:to_uri} Returns a URI for the request. If `with_userinfo` is `true` and the request has an `authorization` header (or `proxy-authorization` for a CONNECT request), the returned URI will contain a userinfo component. ### `request:set_body(body)` {#http.request:set_body} Allows setting a request body. `body` may be a string, function or lua file object. - If `body` is a string it will be sent as given. - If `body` is a function, it will be called repeatedly like an iterator. It should return chunks of the request body as a string or `nil` if done. - If `body` is a lua file object, it will be [`:seek`'d](http://www.lua.org/manual/5.3/manual.html#pdf-file:seek) to the start, then sent as a body. Any errors encountered during file operations **will be thrown**. ### `request:go(timeout)` {#http.request:timeout} Performs the request. The request object is **not** invalidated; and can be reused for a new request. On success, returns the response [*headers*](#http.headers) and a [*stream*](#stream). lua-http-0.4/doc/modules/http.server.md000066400000000000000000000176031400726324600201510ustar00rootroot00000000000000## http.server *http.server* objects are used to encapsulate the `accept()` and dispatch of http clients. Each new client request will invoke the `onstream` callback in a new cqueues managed coroutine. In addition to constructing and returning a HTTP response, an `onstream` handler may decide to take ownership of the connection for other purposes, e.g. upgrade from a HTTP 1.1 connection to a WebSocket connection. For examples of how to use the server library, please see the [examples directory](https://github.com/daurnimator/lua-http/tree/master/examples) in the source tree. ### `new(options)` {#http.server.new} Creates a new instance of an HTTP server listening on the given socket. - `.socket` (*cqueues.socket*): the socket that `accept()` will be called on - `.onerror` (*function*): Function that will be called when an error occurs (default handler throws an error). See [server:onerror()](#http.server:onerror) - `.onstream` (*function*): Callback function for handling a new client request. The function receives the [*server*](#http.server) and the new [*stream*](#stream) as parameters. If the callback throws an error it will be reported from [`:step()`](#http.server:step) or [`:loop()`](#http.server:loop) - `.tls` (*boolean*): Specifies if the system should use Transport Layer Security. Values are: - `nil`: Allow both tls and non-tls connections - `true`: Allows tls connections only - `false`: Allows non-tls connections only - `.ctx` (*context object*): An `openssl.ssl.context` object to use for tls connections. If `nil` is passed, a self-signed context will be generated. - `.connection_setup_timeout` (*number*): Timeout (in seconds) to wait for client to send first bytes and/or complete TLS handshake. Default is 10 seconds. - `.intra_stream_timeout` (*number*): Timeout (in seconds) to wait for a new [*stream*](#stream) on an idle connection before giving up and closing the connection - `.version` (*number*): The http version to allow to connect (default: any) - `.cq` (*cqueue*): A cqueues controller to use as a main loop. The default is a new controller for the server. - `.max_concurrent` (*number*): Maximum number of connections to allow live at a time. Default is infinity. ### `listen(options)` {#http.server.listen} Creates a new socket and returns an HTTP server that will accept() from it. Parameters are the same as [`new(options)`](#http.server.new) except instead of `.socket` you provide the following: - `.host` (*string*): Local IP address in dotted decimal or IPV6 notation. This value is required if `.path` is not specified. - `.port` (*number*): IP port for the local socket. Specify 0 for automatic port selection. Ports 1-1024 require the application has root privilege to run. Maximum value is 65535. If `.tls == nil` then this value is required. Otherwise, the defaults are: - `80` if `.tls == false` - `443` if `.tls == true` - `.path` (*string*): Path to UNIX a socket. This value is required if `.host` is not specified. - `.family` (*string*): Protocol family. Default is `"AF_INET"` - `.v6only` (*boolean*): Specify `true` to limit all connections to ipv6 only (no ipv4-mapped-ipv6). Default is `false`. - `.mode` (*string*): `fchmod` or `chmod` socket after creating UNIX domain socket. - `.mask` (*boolean*): Set and restore umask when binding UNIX domain socket. - `.unlink` (*boolean*): `true` means unlink socket path before binding. - `.reuseaddr` (*boolean*): Turn on `SO_REUSEADDR` flag. - `.reuseport` (*boolean*): Turn on `SO_REUSEPORT` flag. ### `server:onerror(new_handler)` {#http.server:onerror} If called with parameters, the function replaces the current error handler function with `new_handler` and returns a reference to the old function. Calling the function with no parameters returns the current error handler. The default handler throws an error. The `onerror` function for the server can be set during instantiation through the `options` table passed to the [`server.listen(options)`](#server.listen) function. ### `server:listen(timeout)` {#http.server:listen} Initializes the server socket and if required, resolves DNS. `server:listen()` is required if [*localname*](#http.server:localname) is called before [*step*](#http.server:step) or [*loop*](#http.server:loop). On error, returns `nil`, an error message and an error number. ### `server:localname()` {#http.server:localname} Returns the connection information for the local socket. Returns address family, IP address and port for an external socket. For Unix domain sockets, the function returns AF_UNIX and the path. If the connection object is not connected, returns AF_UNSPEC (0). On error, returns `nil`, an error message and an error number. ### `server:pause()` {#http.server:pause} Cause the server loop to stop processing new clients until [*resume*](#http.server:resume) is called. Existing client connections will run until closed. ### `server:resume()` {#http.server:resume} Resumes a [*paused*](#http.server:pause) `server` and processes new client requests. ### `server:close()` {#http.server:close} Shutdown the server and close the socket. A closed server cannot be reused. ### `server:pollfd()` {#http.server:pollfd} Returns a file descriptor (as an integer) or `nil`. The file descriptor can be passed to a system API like `select` or `kqueue` to wait on anything this server object wants to do. This method is used for integrating with other main loops, and should be used in combination with [`:events()`](#http.server:events) and [`:timeout()`](#http.server:timeout). ### `server:events()` {#http.server:events} Returns a string indicating the type of events the object is waiting on: the string will contain `"r"` if it wants to be *step*ed when the file descriptor returned by [`pollfd()`](#http.server:pollfd) has had POLLIN indicated; `"w"` for POLLOUT or `"p"` for POLLPRI. This method is used for integrating with other main loops, and should be used in combination with [`:pollfd()`](#http.server:pollfd) and [`:timeout()`](#http.server:timeout). ### `server:timeout()` {#http.server:timeout} The maximum time (in seconds) to wait before calling [`server:step()`](#http.server:step). This method is used for integrating with other main loops, and should be used in combination with [`:pollfd()`](#http.server:pollfd) and [`:events()`](#http.server:events). ### `server:empty()` {#http.server:empty} Returns `true` if the master socket and all client connection have been closed, `false` otherwise. ### `server:step(timeout)` {#http.server:step} Step once through server's main loop: any waiting clients will be `accept()`-ed, any pending streams will start getting processed, and each `onstream` handler will get be run at most once. This method will block for *up to* `timeout` seconds. On error, returns `nil`, an error message and an error number. This can be used for integration with external main loops. ### `server:loop(timeout)` {#http.server:loop} Run the server as a blocking loop for up to `timeout` seconds. The server will continue to listen and accept client requests until either [`:pause()`](#http.server:pause) or [`:close()`](#http.server:close) is called, or an error is experienced. ### `server:add_socket(socket)` {#http.server:add_socket} Add a new connection socket to the server for processing. The server will use the current `onstream` request handler and all `options` currently specified through the [`server.listen(options)`](#http.server.listen) constructor. `add_socket` can be used to process connection sockets obtained from an external source such as: - Another cqueues thread with some other master socket. - From inetd for start on demand daemons. - A Unix socket with `SCM_RIGHTS`. ### `server:add_stream(stream)` {#http.server:add_stream} Add an existing stream to the server for processing. lua-http-0.4/doc/modules/http.socks.md000066400000000000000000000033001400726324600177520ustar00rootroot00000000000000## http.socks Implements a subset of the SOCKS proxy protocol. ### `connect(uri)` {#http.socks.connect} `uri` is a string with the address of the SOCKS server. A scheme of `"socks5"` will resolve hosts locally, a scheme of `"socks5h"` will resolve hosts on the SOCKS server. If the URI has a userinfo component it will be sent to the SOCKS server as a username and password. Returns a *http.socks* object. ### `fdopen(socket)` {#http.socks.fdopen} This function takes an existing cqueues.socket as a parameter and returns a *http.socks* object with `socket` as its base. ### `socks.needs_resolve` {#http.socks.needs_resolve} Specifies if the destination host should be resolved locally. ### `socks:clone()` {#http.socks:clone} Make a clone of a given socks object. ### `socks:add_username_password_auth(username, password)` {#http.socks:add_username_password_auth} Add username + password authorisation to the set of allowed authorisation methods with the given credentials. ### `socks:negotiate(host, port, timeout)` {#http.socks:negotiate} Complete the SOCKS connection. Negotiates a socks connection. `host` is a required string passed to the SOCKS server as the host address. The address will be resolved locally if [`.needs_resolve`](#http.socks.needs_resolve) is `true`. `port` is a required number to pass to the SOCKS server as the connection port. On error, returns `nil`, an error message and an error number. ### `socks:close()` {#http.socks:close} ### `socks:take_socket()` {#http.socks:take_socket} Take possession of the socket object managed by the http.socks object. Returns the socket (or `nil` if not available). lua-http-0.4/doc/modules/http.tls.md000066400000000000000000000034041400726324600174370ustar00rootroot00000000000000## http.tls ### `has_alpn` {#http.tls.has_alpn} Boolean indicating if ALPN is available in the current environment. It may be disabled if OpenSSL was compiled without ALPN support, or is an old version. ### `has_hostname_validation` {#http.tls.has_hostname_validation} Boolean indicating if [hostname validation](https://wiki.openssl.org/index.php/Hostname_validation) is available in the current environment. It may be disabled if OpenSSL is an old version. ### `modern_cipher_list` {#http.tls.modern_cipher_list} The [Mozilla "Modern" cipher list](https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility) as a colon separated list, ready to pass to OpenSSL ### `intermediate_cipher_list` {#http.tls.intermediate_cipher_list} The [Mozilla "Intermediate" cipher list](https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29) as a colon separated list, ready to pass to OpenSSL ### `old_cipher_list` {#http.tls.old_cipher_list} The [Mozilla "Old" cipher list](https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility) as a colon separated list, ready to pass to OpenSSL ### `banned_ciphers` {#http.tls.banned_ciphers} A set (table with string keys and values of `true`) of the [ciphers banned in HTTP 2](https://http2.github.io/http2-spec/#BadCipherSuites) where the keys are OpenSSL cipher names. Ciphers not known by OpenSSL are missing from the set. ### `new_client_context()` {#http.tls.new_client_context} Create and return a new luaossl SSL context useful for HTTP client connections. ### `new_server_context()` {#http.tls.new_server_context} Create and return a new luaossl SSL context useful for HTTP server connections. lua-http-0.4/doc/modules/http.util.md000066400000000000000000000047601400726324600176200ustar00rootroot00000000000000## http.util ### `encodeURI(str)` {#http.util.encodeURI} ### `encodeURIComponent(str)` {#http.util.encodeURIComponent} ### `decodeURI(str)` {#http.util.decodeURI} ### `decodeURIComponent(str)` {#http.util.decodeURIComponent} ### `query_args(str)` {#http.util.query_args} Returns an iterator over the pairs in `str` #### Example ```lua local http_util = require "http.util" for name, value in http_util.query_args("foo=bar&baz=qux") do print(name, value) end --[[ prints: "foo", "bar" "baz", "qux" ]] ``` ### `dict_to_query(dict)` {#http.util.dict_to_query} Converts a dictionary (table with string keys) with string values to an encoded query string. #### Example ```lua local http_util = require "http.util" print(http_util.dict_to_query({foo = "bar"; baz = "qux"})) --> "baz=qux&foo=bar" ``` ### `resolve_relative_path(orig_path, relative_path)` {#http.util.resolve_relative_path} ### `is_safe_method(method)` {#http.util.is_safe_method} Returns a boolean indicating if the passed string `method` is a "safe" method. See [RFC 7231 section 4.2.1](https://tools.ietf.org/html/rfc7231#section-4.2.1) for more information. ### `is_ip(str)` {#http.util.is_ip} Returns a boolean indicating if the passed string `str` is a valid IP. ### `scheme_to_port` {#http.util.scheme_to_port} Map from schemes (as strings) to default ports (as integers). ### `split_authority(authority, scheme)` {#http.util.split_authority} Splits an `authority` into host and port components. If the authority has no port component, will attempt to use the default for the `scheme`. #### Example ```lua local http_util = require "http.util" print(http_util.split_authority("localhost:8000", "http")) --> "localhost", 8000 print(http_util.split_authority("example.com", "https")) --> "localhost", 443 ``` ### `to_authority(host, port, scheme)` {#http.util.to_authority} Joins the `host` and `port` to create a valid authority component. Omits the port if it is the default for the `scheme`. ### `imf_date(time)` {#http.util.imf_date} Returns the time in HTTP preferred date format (See [RFC 7231 section 7.1.1.1](https://tools.ietf.org/html/rfc7231#section-7.1.1.1)) `time` defaults to the current time ### `maybe_quote(str)` {#http.util.maybe_quote} - If `str` is a valid `token`, return it as-is. - If `str` would be valid as a `quoted-string`, return the quoted version - Otherwise, returns `nil` lua-http-0.4/doc/modules/http.version.md000066400000000000000000000002431400726324600203200ustar00rootroot00000000000000## http.version ### `name` {#http.version.name} `"lua-http"` ### `version` {#http.version.version} Current version of lua-http as a string. lua-http-0.4/doc/modules/http.websocket.md000066400000000000000000000057641400726324600206360ustar00rootroot00000000000000## http.websocket ### `new_from_uri(uri, protocols)` {#http.websocket.new_from_uri} Creates a new `http.websocket` object of type `"client"` from the given URI. - `protocols` (optional) should be a lua table containing a sequence of protocols to send to the server ### `new_from_stream(stream, headers)` {#http.websocket.new_from_stream} Attempts to create a new `http.websocket` object of type `"server"` from the given request headers and stream. - [`stream`](#http.h1_stream) should be a live HTTP 1 stream of the `"server"` type. - [`headers`](#http.headers) should be headers of a suspected websocket upgrade request from an HTTP 1 client. This function does **not** have side effects, and is hence okay to use tentatively. ### `websocket.close_timeout` {#http.websocket.close_timeout} Amount of time (in seconds) to wait between sending a close frame and actually closing the connection. Defaults to `3` seconds. ### `websocket:accept(options, timeout)` {#http.websocket:accept} Completes negotiation with a websocket client. - `options` is a table containing: - `headers` (optional) a [headers](#http.headers) object to use as a prototype for the response headers - `protocols` (optional) should be a lua table containing a sequence of protocols to allow from the client Usually called after a successful [`new_from_stream`](#http.websocket.new_from_stream) ### `websocket:connect(timeout)` {#http.websocket:connect} Connect to a websocket server. Usually called after a successful [`new_from_uri`](#http.websocket.new_from_uri) ### `websocket:receive(timeout)` {#http.websocket:receive} Reads and returns the next data frame plus its opcode. Any ping frames received while reading will be responded to. The opcode `0x1` will be returned as `"text"` and `0x2` will be returned as `"binary"`. ### `websocket:each()` {#http.websocket:each} Iterator over [`websocket:receive()`](#http.websocket:receive). ### `websocket:send_frame(frame, timeout)` {#http.websocket:send_frame} Low level function to send a raw frame. ### `websocket:send(data, opcode, timeout)` {#http.websocket:send} Send the given `data` as a data frame. - `data` should be a string - `opcode` can be a numeric opcode, `"text"` or `"binary"`. If `nil`, defaults to a text frame. Note this `opcode` is the websocket frame opcode, not an application specific opcode. The opcode should be one from the [IANA registry](https://www.iana.org/assignments/websocket/websocket.xhtml#opcode). ### `websocket:send_ping(data, timeout)` {#http.websocket:send_ping} Sends a ping frame. - `data` is optional ### `websocket:send_pong(data, timeout)` {#http.websocket:send_pong} Sends a pong frame. Works as a unidirectional keep-alive. - `data` is optional ### `websocket:close(code, reason, timeout)` {#http.websocket:close} Closes the websocket connection. - `code` defaults to `1000` - `reason` is an optional string lua-http-0.4/doc/modules/http.zlib.md000066400000000000000000000022751400726324600176020ustar00rootroot00000000000000## http.zlib An abstraction layer over the various lua zlib libraries. ### `engine` {#http.zlib.engine} Currently either [`"lua-zlib"`](https://github.com/brimworks/lua-zlib) or [`"lzlib"`](https://github.com/LuaDist/lzlib) ### `inflate()` {#http.zlib.inflate} Returns a closure that inflates (uncompresses) a zlib stream. The closure takes a string of compressed data and an end of stream flag (`boolean`) as parameters and returns the inflated output as a string. The function will throw an error if the input is not a valid zlib stream. ### `deflate()` {#http.zlib.deflate} Returns a closure that deflates (compresses) a zlib stream. The closure takes a string of uncompressed data and an end of stream flag (`boolean`) as parameters and returns the deflated output as a string. ### Example {#http.zlib-example} ```lua local zlib = require "http.zlib" local original = "the racecar raced around the racecar track" local deflater = zlib.deflate() local compressed = deflater(original, true) print(#original, #compressed) -- compressed should be smaller local inflater = zlib.inflate() local uncompressed = inflater(compressed, true) assert(original == uncompressed) ``` lua-http-0.4/doc/site.css000066400000000000000000000037631400726324600153530ustar00rootroot00000000000000* { -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box } html, body { height: 100% } article, aside, figure, footer, header, hgroup, menu, nav, section { display: block } body { margin: 0 } h1, h2, h3 { margin: 1rem 0 } h4, h5, h6, ul, ol, dl, blockquote, address, p, figure { margin: 0 0 1rem 0 } img { max-width: 100% } h1, h2, h3, h4, h5, h6 { font-weight: 700 } h1 { font-size: 2.5rem; line-height: 3rem } h2 { font-size: 1.5rem; line-height: 2rem } h3 { font-size: 1.25rem; line-height: 1.5rem } h4, h5, h6 { font-size: 1rem; line-height: 1.25rem } hr { border: 0; border-bottom: 1px solid; margin-top: -1px; margin-bottom: 1rem } a:hover { color: inherit } small { font-size: .875rem } ul, ol { padding-left: 1rem } ul ul, ul ol, ol ol, ol ul { margin: 0 } dt { font-weight: 700 } dd { margin: 0 } blockquote { border-left: 1px solid; padding-left: 1rem } address { font-style: normal } html { color: #333; font: 100%/1.5 Avenir, 'Helvetica Neue', Helvetica, Arial, sans-serif; -webkit-font-smoothing: antialiased; -webkit-text-size-adjust: 100%; -ms-text-size-adjust: 100%; background: #FFF; } a { color: #999; text-decoration: none; transition: color 0.3s; } a > h1, a > h2, a > h3 { color: #333; } body > * { padding: 0 1rem; } .subtitle { font-size: 1rem; line-height: 1.5rem } .author { display: none } @media screen and (min-width: 55rem) { .meta { position: fixed; width: 20rem; height: 100%; overflow: auto; background: #FFF; z-index: 1; } main { display: block; /* required for e.g. konqueror */ margin-left: 20rem; overflow: auto; } } @media print { section.level1 { page-break-inside: avoid } nav a::after { content: leader('.') target-counter(attr(href url), page, decimal) } } lua-http-0.4/doc/template.html000066400000000000000000000046731400726324600163770ustar00rootroot00000000000000 $for(author-meta)$ $endfor$ $if(date-meta)$ $endif$ $if(keywords)$ $endif$ $if(title-prefix)$$title-prefix$ – $endif$$pagetitle$ $if(quotes)$ $endif$ $if(highlighting-css)$ $endif$ $for(css)$ $endfor$ $if(math)$ $math$ $endif$ $for(header-includes)$ $header-includes$ $endfor$ $for(include-before)$ $include-before$ $endfor$
$if(title)$

$title$

$if(subtitle)$

$subtitle$

$endif$ $for(author)$

$author$

$endfor$ $if(date)$

$date$

$endif$
$endif$ $if(toc)$ $endif$
$body$
$for(include-after)$ $include-after$ $endfor$ lua-http-0.4/examples/000077500000000000000000000000001400726324600147355ustar00rootroot00000000000000lua-http-0.4/examples/h2_streaming.lua000077500000000000000000000006571400726324600200350ustar00rootroot00000000000000#!/usr/bin/env lua --[[ Makes a request to an HTTP2 endpoint that has an infinite length response. Usage: lua examples/h2_streaming.lua ]] local request = require "http.request" -- This endpoint returns a never-ending stream of chunks containing the current time local req = request.new_from_uri("https://http2.golang.org/clockstream") local _, stream = assert(req:go()) for chunk in stream:each_chunk() do io.write(chunk) end lua-http-0.4/examples/serve_dir.lua000077500000000000000000000145771400726324600174430ustar00rootroot00000000000000#!/usr/bin/env lua --[=[ This example serves a file/directory browser It defaults to serving the current directory. Usage: lua examples/serve_dir.lua [ []] ]=] local port = arg[1] or 8000 local dir = arg[2] or "." local new_headers = require "http.headers".new local http_server = require "http.server" local http_util = require "http.util" local http_version = require "http.version" local ce = require "cqueues.errno" local lfs = require "lfs" local lpeg = require "lpeg" local uri_patts = require "lpeg_patterns.uri" local mdb do -- If available, use libmagic https://github.com/mah0x211/lua-magic local ok, magic = pcall(require, "magic") if ok then mdb = magic.open(magic.MIME_TYPE+magic.PRESERVE_ATIME+magic.RAW+magic.ERROR) if mdb:load() ~= 0 then error(magic:error()) end end end local uri_reference = uri_patts.uri_reference * lpeg.P(-1) local default_server = string.format("%s/%s", http_version.name, http_version.version) local xml_escape do local escape_table = { ["'"] = "'"; ["\""] = """; ["<"] = "<"; [">"] = ">"; ["&"] = "&"; } function xml_escape(str) str = string.gsub(str, "['&<>\"]", escape_table) str = string.gsub(str, "[%c\r\n]", function(c) return string.format("&#x%x;", string.byte(c)) end) return str end end local human do -- Utility function to convert to a human readable number local suffixes = { [0] = ""; [1] = "K"; [2] = "M"; [3] = "G"; [4] = "T"; [5] = "P"; } local log = math.log if _VERSION:match("%d+%.?%d*") < "5.1" then log = require "compat53.module".math.log end function human(n) if n == 0 then return "0" end local order = math.floor(log(n, 2) / 10) if order > 5 then order = 5 end n = math.ceil(n / 2^(order*10)) return string.format("%d%s", n, suffixes[order]) end end local function reply(myserver, stream) -- luacheck: ignore 212 -- Read in headers local req_headers = assert(stream:get_headers()) local req_method = req_headers:get ":method" -- Log request to stdout assert(io.stdout:write(string.format('[%s] "%s %s HTTP/%g" "%s" "%s"\n', os.date("%d/%b/%Y:%H:%M:%S %z"), req_method or "", req_headers:get(":path") or "", stream.connection.version, req_headers:get("referer") or "-", req_headers:get("user-agent") or "-" ))) -- Build response headers local res_headers = new_headers() res_headers:append(":status", nil) res_headers:append("server", default_server) res_headers:append("date", http_util.imf_date()) if req_method ~= "GET" and req_method ~= "HEAD" then res_headers:upsert(":status", "405") assert(stream:write_headers(res_headers, true)) return end local path = req_headers:get(":path") local uri_t = assert(uri_reference:match(path), "invalid path") path = http_util.resolve_relative_path("/", uri_t.path) local real_path = dir .. path local file_type = lfs.attributes(real_path, "mode") if file_type == "directory" then -- directory listing path = path:gsub("/+$", "") .. "/" res_headers:upsert(":status", "200") res_headers:append("content-type", "text/html; charset=utf-8") assert(stream:write_headers(res_headers, req_method == "HEAD")) if req_method ~= "HEAD" then assert(stream:write_chunk(string.format([[ Index of %s

Index of %s

]], xml_escape(path), xml_escape(path)), false)) -- lfs doesn't provide a way to get an errno for attempting to open a directory -- See https://github.com/keplerproject/luafilesystem/issues/87 for filename in lfs.dir(real_path) do if not (filename == ".." and path == "/") then -- Exclude parent directory entry listing from top level local stats = lfs.attributes(real_path .. "/" .. filename) if stats.mode == "directory" then filename = filename .. "/" end assert(stream:write_chunk(string.format("\t\t\t\n", xml_escape(stats.mode:gsub("%s", "-")), xml_escape(http_util.encodeURI(path .. filename)), xml_escape(filename), stats.size, xml_escape(human(stats.size)), xml_escape(os.date("!%Y-%m-%d %X", stats.modification)) ), false)) end end assert(stream:write_chunk([[
File NameSizeModified
%s%s
]], true)) end elseif file_type == "file" then local fd, err, errno = io.open(real_path, "rb") local code if not fd then if errno == ce.ENOENT then code = "404" elseif errno == ce.EACCES then code = "403" else code = "503" end res_headers:upsert(":status", code) res_headers:append("content-type", "text/plain") assert(stream:write_headers(res_headers, req_method == "HEAD")) if req_method ~= "HEAD" then assert(stream:write_body_from_string("Fail!\n"..err.."\n")) end else res_headers:upsert(":status", "200") local mime_type = mdb and mdb:file(real_path) or "application/octet-stream" res_headers:append("content-type", mime_type) assert(stream:write_headers(res_headers, req_method == "HEAD")) if req_method ~= "HEAD" then assert(stream:write_body_from_file(fd)) end end elseif file_type == nil then res_headers:upsert(":status", "404") assert(stream:write_headers(res_headers, true)) else res_headers:upsert(":status", "403") assert(stream:write_headers(res_headers, true)) end end local myserver = assert(http_server.listen { host = "localhost"; port = port; max_concurrent = 100; onstream = reply; onerror = function(myserver, context, op, err, errno) -- luacheck: ignore 212 local msg = op .. " on " .. tostring(context) .. " failed" if err then msg = msg .. ": " .. tostring(err) end assert(io.stderr:write(msg, "\n")) end; }) -- Manually call :listen() so that we are bound before calling :localname() assert(myserver:listen()) do local bound_port = select(3, myserver:localname()) assert(io.stderr:write(string.format("Now listening on port %d\n", bound_port))) end -- Start the main server loop assert(myserver:loop()) lua-http-0.4/examples/server_hello.lua000077500000000000000000000035011400726324600201330ustar00rootroot00000000000000#!/usr/bin/env lua --[[ A simple HTTP server If a request is not a HEAD method, then reply with "Hello world!" Usage: lua examples/server_hello.lua [] ]] local port = arg[1] or 0 -- 0 means pick one at random local http_server = require "http.server" local http_headers = require "http.headers" local function reply(myserver, stream) -- luacheck: ignore 212 -- Read in headers local req_headers = assert(stream:get_headers()) local req_method = req_headers:get ":method" -- Log request to stdout assert(io.stdout:write(string.format('[%s] "%s %s HTTP/%g" "%s" "%s"\n', os.date("%d/%b/%Y:%H:%M:%S %z"), req_method or "", req_headers:get(":path") or "", stream.connection.version, req_headers:get("referer") or "-", req_headers:get("user-agent") or "-" ))) -- Build response headers local res_headers = http_headers.new() res_headers:append(":status", "200") res_headers:append("content-type", "text/plain") -- Send headers to client; end the stream immediately if this was a HEAD request assert(stream:write_headers(res_headers, req_method == "HEAD")) if req_method ~= "HEAD" then -- Send body, ending the stream assert(stream:write_chunk("Hello world!\n", true)) end end local myserver = assert(http_server.listen { host = "localhost"; port = port; onstream = reply; onerror = function(myserver, context, op, err, errno) -- luacheck: ignore 212 local msg = op .. " on " .. tostring(context) .. " failed" if err then msg = msg .. ": " .. tostring(err) end assert(io.stderr:write(msg, "\n")) end; }) -- Manually call :listen() so that we are bound before calling :localname() assert(myserver:listen()) do local bound_port = select(3, myserver:localname()) assert(io.stderr:write(string.format("Now listening on port %d\n", bound_port))) end -- Start the main server loop assert(myserver:loop()) lua-http-0.4/examples/server_sent_events.lua000077500000000000000000000060741400726324600213750ustar00rootroot00000000000000#!/usr/bin/env lua --[[ A server that responds with an infinite server-side-events format. https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format Usage: lua examples/server_sent_events.lua [] ]] local port = arg[1] or 0 -- 0 means pick one at random local cqueues = require "cqueues" local http_server = require "http.server" local http_headers = require "http.headers" local myserver = assert(http_server.listen { host = "localhost"; port = port; onstream = function(myserver, stream) -- luacheck: ignore 212 -- Read in headers local req_headers = assert(stream:get_headers()) local req_method = req_headers:get ":method" -- Build response headers local res_headers = http_headers.new() if req_method ~= "GET" and req_method ~= "HEAD" then res_headers:upsert(":status", "405") assert(stream:write_headers(res_headers, true)) return end if req_headers:get ":path" == "/" then res_headers:append(":status", "200") res_headers:append("content-type", "text/html") -- Send headers to client; end the stream immediately if this was a HEAD request assert(stream:write_headers(res_headers, req_method == "HEAD")) if req_method ~= "HEAD" then assert(stream:write_chunk([[ EventSource demo

This page uses server-sent_events to show the live server time:

]], true)) end elseif req_headers:get ":path" == "/event-stream" then res_headers:append(":status", "200") res_headers:append("content-type", "text/event-stream") -- Send headers to client; end the stream immediately if this was a HEAD request assert(stream:write_headers(res_headers, req_method == "HEAD")) if req_method ~= "HEAD" then -- Start a loop that sends the current time to the client each second while true do local msg = string.format("data: The time is now %s.\n\n", os.date()) assert(stream:write_chunk(msg, false)) cqueues.sleep(1) -- yield the current thread for a second. end end else res_headers:append(":status", "404") assert(stream:write_headers(res_headers, true)) end end; onerror = function(myserver, context, op, err, errno) -- luacheck: ignore 212 local msg = op .. " on " .. tostring(context) .. " failed" if err then msg = msg .. ": " .. tostring(err) end assert(io.stderr:write(msg, "\n")) end; }) -- Manually call :listen() so that we are bound before calling :localname() assert(myserver:listen()) do local bound_port = select(3, myserver:localname()) assert(io.stderr:write(string.format("Now listening on port %d\nOpen http://localhost:%d/ in your browser\n", bound_port, bound_port))) end -- Start the main server loop assert(myserver:loop()) lua-http-0.4/examples/simple_request.lua000077500000000000000000000016721400726324600205120ustar00rootroot00000000000000#!/usr/bin/env lua --[[ Verbosely fetches an HTTP resource If a body is given, use a POST request Usage: lua examples/simple_request.lua [] ]] local uri = assert(arg[1], "URI needed") local req_body = arg[2] local req_timeout = 10 local request = require "http.request" local req = request.new_from_uri(uri) if req_body then req.headers:upsert(":method", "POST") req:set_body(req_body) end print("# REQUEST") print("## HEADERS") for k, v in req.headers:each() do print(k, v) end print() if req.body then print("## BODY") print(req.body) print() end print("# RESPONSE") local headers, stream = req:go(req_timeout) if headers == nil then io.stderr:write(tostring(stream), "\n") os.exit(1) end print("## HEADERS") for k, v in headers:each() do print(k, v) end print() print("## BODY") local body, err = stream:get_body_as_string() if not body and err then io.stderr:write(tostring(err), "\n") os.exit(1) end print(body) print() lua-http-0.4/examples/websocket_client.lua000077500000000000000000000010311400726324600207620ustar00rootroot00000000000000#!/usr/bin/env lua --[[ Example of websocket client usage - Connects to the gdax market data feed. Documentation of feed: https://docs.gdax.com/#websocket-feed - Sends a subscribe message - Prints off 5 messages - Close the socket and clean up. ]] local websocket = require "http.websocket" local ws = websocket.new_from_uri("wss://ws-feed.gdax.com") assert(ws:connect()) assert(ws:send([[{"type": "subscribe", "product_id": "BTC-USD"}]])) for _=1, 5 do local data = assert(ws:receive()) print(data) end assert(ws:close()) lua-http-0.4/http-0.4-0.rockspec000066400000000000000000000032101400726324600162610ustar00rootroot00000000000000package = "http" version = "0.4-0" description = { summary = "HTTP library for Lua"; homepage = "https://github.com/daurnimator/lua-http"; license = "MIT"; } source = { url = "https://github.com/daurnimator/lua-http/archive/v0.4.zip"; dir = "lua-http-0.4"; } dependencies = { "lua >= 5.1"; "compat53 >= 0.3"; -- Only if lua < 5.3 "bit32"; -- Only if lua == 5.1 "cqueues >= 20161214"; "luaossl >= 20161208"; "basexx >= 0.2.0"; "lpeg"; "lpeg_patterns >= 0.5"; "binaryheap >= 0.3"; "fifo"; -- "psl"; -- Optional } build = { type = "builtin"; modules = { ["http.bit"] = "http/bit.lua"; ["http.client"] = "http/client.lua"; ["http.connection_common"] = "http/connection_common.lua"; ["http.cookie"] = "http/cookie.lua"; ["http.h1_connection"] = "http/h1_connection.lua"; ["http.h1_reason_phrases"] = "http/h1_reason_phrases.lua"; ["http.h1_stream"] = "http/h1_stream.lua"; ["http.h2_connection"] = "http/h2_connection.lua"; ["http.h2_error"] = "http/h2_error.lua"; ["http.h2_stream"] = "http/h2_stream.lua"; ["http.headers"] = "http/headers.lua"; ["http.hpack"] = "http/hpack.lua"; ["http.hsts"] = "http/hsts.lua"; ["http.proxies"] = "http/proxies.lua"; ["http.request"] = "http/request.lua"; ["http.server"] = "http/server.lua"; ["http.socks"] = "http/socks.lua"; ["http.stream_common"] = "http/stream_common.lua"; ["http.tls"] = "http/tls.lua"; ["http.util"] = "http/util.lua"; ["http.version"] = "http/version.lua"; ["http.websocket"] = "http/websocket.lua"; ["http.zlib"] = "http/zlib.lua"; ["http.compat.prosody"] = "http/compat/prosody.lua"; ["http.compat.socket"] = "http/compat/socket.lua"; }; } lua-http-0.4/http/000077500000000000000000000000001400726324600140765ustar00rootroot00000000000000lua-http-0.4/http/bit.lua000066400000000000000000000024671400726324600153700ustar00rootroot00000000000000--[[ This module smooths over all the various lua bit libraries The bit operations are only done - on bytes (8 bits), - with quantities <= LONG_MAX (0x7fffffff) - band with 0x80000000 that is subsequently compared with 0 This means we can ignore the differences between bit libraries. ]] -- Lua 5.1 didn't have `load` or bitwise operators, just let it fall through. if _VERSION ~= "Lua 5.1" then -- Lua 5.3+ has built-in bit operators, wrap them in a function. -- Use debug.getinfo to get correct file+line numbers for loaded snippet local info = debug.getinfo(1, "Sl") local has_bitwise, bitwise = pcall(load(("\n"):rep(info.currentline+1)..[[return { band = function(a, b) return a & b end; bor = function(a, b) return a | b end; bxor = function(a, b) return a ~ b end; }]], info.source)) if has_bitwise then return bitwise end end -- The "bit" library that comes with luajit -- also available for lua 5.1 as "luabitop": http://bitop.luajit.org/ local has_bit, bit = pcall(require, "bit") if has_bit then return { band = bit.band; bor = bit.bor; bxor = bit.bxor; } end -- The "bit32" library shipped with lua 5.2 local has_bit32, bit32 = pcall(require, "bit32") if has_bit32 then return { band = bit32.band; bor = bit32.bor; bxor = bit32.bxor; } end error("Please install a bit library") lua-http-0.4/http/bit.tld000066400000000000000000000001611400726324600153570ustar00rootroot00000000000000band: (integer, integer) -> (integer) bor: (integer, integer) -> (integer) bxor: (integer, integer) -> (integer) lua-http-0.4/http/client.lua000066400000000000000000000061351400726324600160640ustar00rootroot00000000000000local ca = require "cqueues.auxlib" local cs = require "cqueues.socket" local http_tls = require "http.tls" local http_util = require "http.util" local connection_common = require "http.connection_common" local onerror = connection_common.onerror local new_h1_connection = require "http.h1_connection".new local new_h2_connection = require "http.h2_connection".new local openssl_ssl = require "openssl.ssl" local openssl_ctx = require "openssl.ssl.context" local openssl_verify_param = require "openssl.x509.verify_param" -- Create a shared 'default' TLS context local default_ctx = http_tls.new_client_context() local function negotiate(s, options, timeout) s:onerror(onerror) local tls = options.tls local version = options.version if tls then local ctx = options.ctx or default_ctx local ssl = openssl_ssl.new(ctx) local host = options.host local host_is_ip = host and http_util.is_ip(host) local sendname = options.sendname if sendname == nil and not host_is_ip and host then sendname = host end if sendname then -- false indicates no sendname wanted ssl:setHostName(sendname) end if http_tls.has_alpn then if version == nil then ssl:setAlpnProtos({"h2", "http/1.1"}) elseif version == 1.1 then ssl:setAlpnProtos({"http/1.1"}) elseif version == 2 then ssl:setAlpnProtos({"h2"}) end end if version == 2 then ssl:setOptions(openssl_ctx.OP_NO_TLSv1 + openssl_ctx.OP_NO_TLSv1_1) end if host and http_tls.has_hostname_validation then local params = openssl_verify_param.new() if host_is_ip then params:setIP(host) else params:setHost(host) end -- Allow user defined params to override local old = ssl:getParam() old:inherit(params) ssl:setParam(old) end local ok, err, errno = s:starttls(ssl, timeout) if not ok then return nil, err, errno end end if version == nil then local ssl = s:checktls() if ssl then if http_tls.has_alpn and ssl:getAlpnSelected() == "h2" then version = 2 else version = 1.1 end else -- TODO: attempt upgrading http1 to http2 version = 1.1 end end if version < 2 then return new_h1_connection(s, "client", version) elseif version == 2 then return new_h2_connection(s, "client", options.h2_settings) else error("Unknown HTTP version: " .. tostring(version)) end end local function connect(options, timeout) local bind = options.bind if bind ~= nil then assert(type(bind) == "string") local bind_address, bind_port = bind:match("^(.-):(%d+)$") if bind_address then bind_port = tonumber(bind_port, 10) else bind_address = bind end local ipv6 = bind_address:match("^%[([:%x]+)%]$") if ipv6 then bind_address = ipv6 end bind = { address = bind_address; port = bind_port; } end local s, err, errno = ca.fileresult(cs.connect { family = options.family; host = options.host; port = options.port; path = options.path; bind = bind; sendname = false; v6only = options.v6only; nodelay = true; }) if s == nil then return nil, err, errno end return negotiate(s, options, timeout) end return { negotiate = negotiate; connect = connect; } lua-http-0.4/http/compat/000077500000000000000000000000001400726324600153615ustar00rootroot00000000000000lua-http-0.4/http/compat/prosody.lua000066400000000000000000000045311400726324600175660ustar00rootroot00000000000000--[[ Compatibility module for prosody's net.http Documentation: https://prosody.im/doc/developers/net/http This has a few key differences: - `compat.prosody.request` must be called from within a running cqueue - The callback will be called from a different thread in the cqueue - The returned "request" object will be a lua-http request object - Same request object is passed to the callback on errors and as the 4th argument on success - The user-agent will be from lua-http - lua-http features (such as HTTP2) will be used where possible ]] local new_from_uri = require "http.request".new_from_uri local cqueues = require "cqueues" local function do_request(self, callback) local headers, stream = self:go() if headers == nil then -- `stream` is error message callback(stream, 0, self) return end local response_body, err = stream:get_body_as_string() stream:shutdown() if response_body == nil then callback(err, 0, self) return end -- code might not be convertible to a number in http2, so need `or` case local code = headers:get(":status") code = tonumber(code, 10) or code -- convert headers to table with comma separated values local headers_as_kv = {} for key, value in headers:each() do if key ~= ":status" then local old = headers_as_kv[key] if old then headers_as_kv[key] = old .. "," .. value else headers_as_kv[key] = value end end end local response = { code = code; httpversion = stream.peer_version; headers = headers_as_kv; body = response_body; } callback(response_body, code, response, self) end local function new_prosody(url, ex, callback) local cq = assert(cqueues.running(), "must be running inside a cqueue") local ok, req = pcall(new_from_uri, url) if not ok then callback(nil, 0, req) return nil, "invalid-url" end req.follow_redirects = false -- prosody doesn't follow redirects if ex then if ex.body then req.headers:upsert(":method", "POST") req:set_body(ex.body) req.headers:append("content-type", "application/x-www-form-urlencoded") end if ex.method then req.headers:upsert(":method", ex.method) end if ex.headers then for k, v in pairs(ex.headers) do req.headers:upsert(k:lower(), v) end end if ex.sslctx then req.ctx = ex.sslctx end end cq:wrap(do_request, req, callback) return req end return { request = new_prosody; } lua-http-0.4/http/compat/socket.lua000066400000000000000000000115141400726324600173560ustar00rootroot00000000000000--[[ Compatibility layer with luasocket's socket.http module Documentation: http://w3.impa.br/~diego/software/luasocket/http.html This module a few key differences: - The `.create` member is not supported - The user-agent will be from lua-http - lua-http features (such as HTTPS and HTTP2) will be used where possible - trailers are currently discarded - error messages are different ]] local monotime = require "cqueues".monotime local ce = require "cqueues.errno" local request = require "http.request" local version = require "http.version" local reason_phrases = require "http.h1_reason_phrases" local M = { PROXY = nil; -- default proxy used for connections TIMEOUT = 60; -- timeout for all I/O operations -- default user agent reported to server. USERAGENT = string.format("%s/%s (luasocket compatibility layer)", version.name, version.version); } local function ltn12_pump_step(src, snk) local chunk, src_err = src() local ret, snk_err = snk(chunk, src_err) if chunk and ret then return 1 else return nil, src_err or snk_err end end local function get_body_as_string(stream, deadline) local body, err, errno = stream:get_body_as_string(deadline and deadline-monotime()) if not body then if err == nil then return nil elseif errno == ce.ETIMEDOUT then return nil, "timeout" else return nil, err end end return body end local function returns_1() return 1 end function M.request(reqt, b) local deadline = M.TIMEOUT and monotime()+M.TIMEOUT local req, proxy, user_headers, get_body if type(reqt) == "string" then req = request.new_from_uri(reqt) proxy = M.PROXY if b ~= nil then assert(type(b) == "string", "body must be nil or string") req.headers:upsert(":method", "POST") req:set_body(b) req.headers:upsert("content-type", "application/x-www-form-urlencoded") end get_body = get_body_as_string else assert(reqt.create == nil, "'create' option not supported") req = request.new_from_uri(reqt.url) proxy = reqt.proxy or M.PROXY if reqt.host ~= nil then req.host = reqt.host end if reqt.port ~= nil then req.port = reqt.port end if reqt.method ~= nil then assert(type(reqt.method) == "string", "'method' option must be nil or string") req.headers:upsert(":method", reqt.method) end if reqt.redirect == false then req.follow_redirects = false else req.max_redirects = 5 - (reqt.nredirects or 0) end user_headers = reqt.headers local step = reqt.step or ltn12_pump_step local src = reqt.source if src ~= nil then local co = coroutine.create(function() while true do assert(step(src, coroutine.yield)) end end) req:set_body(function() -- Pass true through to coroutine to indicate success of last write local ok, chunk, err = coroutine.resume(co, true) if not ok then error(chunk) elseif err then error(err) else return chunk end end) end local sink = reqt.sink -- luasocket returns `1` when using a request table if sink ~= nil then get_body = function(stream, deadline) -- luacheck: ignore 431 local function res_body_source() local chunk, err, errno = stream:get_next_chunk(deadline and deadline-monotime()) if not chunk then if err == nil then return nil elseif errno == ce.EPIPE then return nil, "closed" elseif errno == ce.ETIMEDOUT then return nil, "timeout" else return nil, err end end return chunk end -- This loop is the same as ltn12.pump.all while true do local ok, err = step(res_body_source, sink) if not ok then if err then return nil, err else return 1 end end end end else get_body = returns_1 end end req.headers:upsert("user-agent", M.USERAGENT) req.proxy = proxy or false if user_headers then for name, field in pairs(user_headers) do name = name:lower() field = "" .. field .. "" -- force coercion in same style as luasocket if name == "host" then req.headers:upsert(":authority", field) else req.headers:append(name, field) end end end local res_headers, stream, errno = req:go(deadline and deadline-monotime()) if not res_headers then if errno == ce.EPIPE or stream == nil then return nil, "closed" elseif errno == ce.ETIMEDOUT then return nil, "timeout" else return nil, stream end end local code = res_headers:get(":status") local status = reason_phrases[code] -- In luasocket, status codes are returned as numbers code = tonumber(code, 10) or code local headers = {} for name in res_headers:each() do if name ~= ":status" and headers[name] == nil then headers[name] = res_headers:get_comma_separated(name) end end local body, err = get_body(stream, deadline) stream:shutdown() if not body then return nil, err end return body, code, headers, status end return M lua-http-0.4/http/connection_common.lua000066400000000000000000000041571400726324600203170ustar00rootroot00000000000000local cqueues = require "cqueues" local ca = require "cqueues.auxlib" local ce = require "cqueues.errno" local connection_methods = {} local function onerror(socket, op, why, lvl) -- luacheck: ignore 212 local err = string.format("%s: %s", op, ce.strerror(why)) if op == "starttls" then local ssl = socket:checktls() if ssl and ssl.getVerifyResult then local code, msg = ssl:getVerifyResult() if code ~= 0 then err = err .. ":" .. msg end end end if why == ce.ETIMEDOUT then if op == "fill" or op == "read" then socket:clearerr("r") elseif op == "flush" then socket:clearerr("w") end end return err, why end function connection_methods:pollfd() if self.socket == nil then return nil end return self.socket:pollfd() end function connection_methods:events() if self.socket == nil then return nil end return self.socket:events() end function connection_methods:timeout() if self.socket == nil then return nil end return self.socket:timeout() end function connection_methods:onidle_() -- luacheck: ignore 212 end function connection_methods:onidle(...) local old_handler = self.onidle_ if select("#", ...) > 0 then self.onidle_ = ... end return old_handler end function connection_methods:connect(timeout) if self.socket == nil then return nil end local ok, err, errno = self.socket:connect(timeout) if not ok then return nil, err, errno end return true end function connection_methods:checktls() if self.socket == nil then return nil end return self.socket:checktls() end function connection_methods:localname() if self.socket == nil then return nil end return ca.fileresult(self.socket:localname()) end function connection_methods:peername() if self.socket == nil then return nil end return ca.fileresult(self.socket:peername()) end -- Primarily used for testing function connection_methods:flush(timeout) return self.socket:flush("n", timeout) end function connection_methods:close() self:shutdown() if self.socket then cqueues.poll() cqueues.poll() self.socket:close() end return true end return { onerror = onerror; methods = connection_methods; } lua-http-0.4/http/connection_common.tld000066400000000000000000000020741400726324600203150ustar00rootroot00000000000000interface connection -- implements cqueues polling interface const pollfd: (self) -> (nil)|(integer) -- TODO: cqueues condition const events: (self) -> (nil)|(string|integer) const timeout: (self) -> (nil)|(number) const checktls: (self) -> (nil)|(any) -- TODO: luaossl SSL object const localname: (self) -> (integer, string, integer?)|(nil)|(nil, string, number) const peername: (self) -> (integer, string, integer?)|(nil)|(nil, string, number) const onidle: (self, (connection)->()) -> ((connection)->()) const connect: (self) -> (true)|(nil)|(nil, string, number) const flush: (self, number) -> (true)|(nil, string, number) const close: (self) -> (true) -- Not in connection_common.lua const version: integer -- XXX: needs circular require https://github.com/andremm/typedlua/issues/120 -- const new_stream: (self) -> (stream)|(nil) -- Note: in http2 this takes optional id argument -- const get_next_incoming_stream: (self, number?) -> (stream)|(nil)|(nil, string, number) const shutdown: (self) -> (true) end lua-http-0.4/http/cookie.lua000066400000000000000000000612551400726324600160630ustar00rootroot00000000000000--[[ Data structures useful for Cookies RFC 6265 ]] local http_patts = require "lpeg_patterns.http" local binaryheap = require "binaryheap" local http_util = require "http.util" local has_psl, psl = pcall(require, "psl") local EOF = require "lpeg".P(-1) local sane_cookie_date = http_patts.IMF_fixdate * EOF local Cookie = http_patts.Cookie * EOF local Set_Cookie = http_patts.Set_Cookie * EOF local function bake(name, value, expiry_time, domain, path, secure_only, http_only, same_site) -- This function is optimised to only do one concat operation at the end local cookie = { name, "=", value } local n = 3 if expiry_time and expiry_time ~= (1e999) then -- Prefer Expires over Max-age unless it is a deletion request if expiry_time == (-1e999) then n = n + 1 cookie[n] = "; Max-Age=0" else n = n + 2 cookie[n-1] = "; Expires=" cookie[n] = http_util.imf_date(expiry_time) end end if domain then n = n + 2 cookie[n-1] = "; Domain=" cookie[n] = domain end if path then n = n + 2 cookie[n-1] = "; Path=" cookie[n] = http_util.encodeURI(path) end if secure_only then n = n + 1 cookie[n] = "; Secure" end if http_only then n = n + 1 cookie[n] = "; HttpOnly" end -- https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-02#section-5.2 if same_site then local v if same_site == "strict" then v = "; SameSite=Strict" elseif same_site == "lax" then v = "; SameSite=Lax" else error('invalid value for same_site, expected "strict" or "lax"') end n = n + 1 cookie[n] = v end return table.concat(cookie, "", 1, n) end local function parse_cookie(cookie_header) return Cookie:match(cookie_header) end local function parse_cookies(req_headers) local cookie_headers = req_headers:get_as_sequence("cookie") local cookies for i=1, cookie_headers.n do local header_cookies = parse_cookie(cookie_headers[i]) if header_cookies then if cookies then for k, v in pairs(header_cookies) do cookies[k] = v end else cookies = header_cookies end end end return cookies or {} end local function parse_setcookie(setcookie_header) return Set_Cookie:match(setcookie_header) end local canonicalise_host if has_psl then canonicalise_host = psl.str_to_utf8lower else canonicalise_host = function(str) -- fail on non-ascii chars if str:find("[^%p%w]") then return nil end return str:lower() end end --[[ A string domain-matches a given domain string if at least one of the following conditions hold: - The domain string and the string are identical. (Note that both the domain string and the string will have been canonicalized to lower case at this point.) - All of the following conditions hold: - The domain string is a suffix of the string. - The last character of the string that is not included in the domain string is a %x2E (".") character. - The string is a host name (i.e., not an IP address). ]] local function domain_match(domain_string, str) return str == domain_string or ( str:sub(-#domain_string) == domain_string and str:sub(-#domain_string-1, -#domain_string-1) == "." and not http_util.is_ip(str) ) end --[[ A request-path path-matches a given cookie-path if at least one of the following conditions holds: - The cookie-path and the request-path are identical. - The cookie-path is a prefix of the request-path, and the last character of the cookie-path is %x2F ("/"). - The cookie-path is a prefix of the request-path, and the first character of the request-path that is not included in the cookie-path is a %x2F ("/") character. ]] local function path_match(path, req_path) if path == req_path then return true elseif path == req_path:sub(1, #path) then if path:sub(-1, -1) == "/" then return true elseif req_path:sub(#path + 1, #path + 1) == "/" then return true end end return false end local cookie_methods = {} local cookie_mt = { __name = "http.cookie.cookie"; __index = cookie_methods; } function cookie_methods:netscape_format() return string.format("%s%s\t%s\t%s\t%s\t%d\t%s\t%s\n", self.http_only and "#HttpOnly_" or "", self.domain or "unknown", self.host_only and "TRUE" or "FALSE", self.path, self.secure_only and "TRUE" or "FALSE", math.max(0, math.min(2147483647, self.expiry_time)), self.name, self.value) end local default_psl if has_psl and psl.latest then default_psl = psl.latest() elseif has_psl then default_psl = psl.builtin() end local store_methods = { psl = default_psl; time = function() return os.time() end; max_cookie_length = (1e999); max_cookies = (1e999); max_cookies_per_domain = (1e999); } local store_mt = { __name = "http.cookie.store"; __index = store_methods; } local function new_store() return setmetatable({ domains = {}; expiry_heap = binaryheap.minUnique(); n_cookies = 0; n_cookies_per_domain = {}; }, store_mt) end local function add_to_store(self, cookie, req_is_http, now) if cookie.expiry_time < now then -- This was all just a trigger to delete the old cookie self:remove(cookie.domain, cookie.path, cookie.name) else local name = cookie.name local cookie_length = #name + 1 + #cookie.value if cookie_length > self.max_cookie_length then return false end local domain = cookie.domain local domain_cookies = self.domains[domain] local path_cookies local old_cookie if domain_cookies ~= nil then path_cookies = domain_cookies[cookie.path] if path_cookies ~= nil then old_cookie = path_cookies[name] end end -- If the cookie store contains a cookie with the same name, -- domain, and path as the newly created cookie: if old_cookie then -- If the newly created cookie was received from a "non-HTTP" -- API and the old-cookie's http-only-flag is set, abort these -- steps and ignore the newly created cookie entirely. if not req_is_http and old_cookie.http_only then return false end -- Update the creation-time of the newly created cookie to -- match the creation-time of the old-cookie. cookie.creation_time = old_cookie.creation_time -- Remove the old-cookie from the cookie store. self.expiry_heap:remove(old_cookie) else if self.n_cookies >= self.max_cookies or self.max_cookies_per_domain < 1 then return false end -- Cookie will be added if domain_cookies == nil then path_cookies = {} domain_cookies = { [cookie.path] = path_cookies; } self.domains[domain] = domain_cookies self.n_cookies_per_domain[domain] = 1 else local n_cookies_per_domain = self.n_cookies_per_domain[domain] if n_cookies_per_domain >= self.max_cookies_per_domain then return false end path_cookies = domain_cookies[cookie.path] if path_cookies == nil then path_cookies = {} domain_cookies[cookie.path] = path_cookies end self.n_cookies_per_domain[domain] = n_cookies_per_domain end self.n_cookies = self.n_cookies + 1 end path_cookies[name] = cookie self.expiry_heap:insert(cookie.expiry_time, cookie) end return true end function store_methods:store(req_domain, req_path, req_is_http, req_is_secure, req_site_for_cookies, name, value, params) assert(type(req_domain) == "string") assert(type(req_path) == "string") assert(type(name) == "string") assert(type(value) == "string") assert(type(params) == "table") local now = self.time() req_domain = assert(canonicalise_host(req_domain), "invalid request domain") -- Clean now so that we can assume there are no expired cookies in store self:clean() -- RFC 6265 Section 5.3 local cookie = setmetatable({ name = name; value = value; expiry_time = (1e999); domain = req_domain; path = nil; creation_time = now; last_access_time = now; persistent = false; host_only = true; secure_only = not not params.secure; http_only = not not params.httponly; same_site = nil; }, cookie_mt) -- If a cookie has both the Max-Age and the Expires attribute, the Max- -- Age attribute has precedence and controls the expiration date of the -- cookie. local max_age = params["max-age"] if max_age and max_age:find("^%-?[0-9]+$") then max_age = tonumber(max_age, 10) cookie.persistent = true if max_age <= 0 then cookie.expiry_time = (-1e999) else cookie.expiry_time = now + max_age end elseif params.expires then local date = sane_cookie_date:match(params.expires) if date then cookie.persistent = true cookie.expiry_time = os.time(date) end end local domain = params.domain or ""; -- If the first character of the attribute-value string is %x2E ("."): -- Let cookie-domain be the attribute-value without the leading %x2E (".") character. if domain:sub(1, 1) == "." then domain = domain:sub(2) end -- Convert the cookie-domain to lower case. domain = canonicalise_host(domain) if not domain then return false end -- If the user agent is configured to reject "public suffixes" and -- the domain-attribute is a public suffix: if domain ~= "" and self.psl and self.psl:is_public_suffix(domain) then -- If the domain-attribute is identical to the canonicalized request-host: if domain == req_domain then -- Let the domain-attribute be the empty string. domain = "" else -- Ignore the cookie entirely and abort these steps. return false end end -- If the domain-attribute is non-empty: if domain ~= "" then -- If the canonicalized request-host does not domain-match the -- domain-attribute: if not domain_match(domain, req_domain) then -- Ignore the cookie entirely and abort these steps. return false else -- Set the cookie's host-only-flag to false. cookie.host_only = false -- Set the cookie's domain to the domain-attribute. cookie.domain = domain end end -- RFC 6265 Section 5.2.4 -- If the attribute-value is empty or if the first character of the -- attribute-value is not %x2F ("/") local path = params.path or "" if path:sub(1, 1) ~= "/" then -- Let cookie-path be the default-path. local default_path -- RFC 6265 Section 5.1.4 -- Let uri-path be the path portion of the request-uri if such a -- portion exists (and empty otherwise). For example, if the -- request-uri contains just a path (and optional query string), -- then the uri-path is that path (without the %x3F ("?") character -- or query string), and if the request-uri contains a full -- absoluteURI, the uri-path is the path component of that URI. -- If the uri-path is empty or if the first character of the uri- -- path is not a %x2F ("/") character, output %x2F ("/") and skip -- the remaining steps. -- If the uri-path contains no more than one %x2F ("/") character, -- output %x2F ("/") and skip the remaining step. if req_path:sub(1, 1) ~= "/" or not req_path:find("/", 2, true) then default_path = "/" else -- Output the characters of the uri-path from the first character up -- to, but not including, the right-most %x2F ("/"). default_path = req_path:match("^([^?]*)/") end cookie.path = default_path else cookie.path = path end -- If the scheme component of the request-uri does not denote a -- "secure" protocol (as defined by the user agent), and the -- cookie's secure-only-flag is true, then abort these steps and -- ignore the cookie entirely. if not req_is_secure and cookie.secure_only then return false end -- If the cookie was received from a "non-HTTP" API and the -- cookie's http-only-flag is set, abort these steps and ignore the -- cookie entirely. if not req_is_http and cookie.http_only then return false end -- If the cookie's secure-only-flag is not set, and the scheme -- component of request-uri does not denote a "secure" protocol, if not req_is_secure and not cookie.secure_only then -- then abort these steps and ignore the cookie entirely if the -- cookie store contains one or more cookies that meet all of the -- following criteria: for d, domain_cookies in pairs(self.domains) do -- See '3' below if domain_match(cookie.domain, d) or domain_match(d, cookie.domain) then for p, path_cookies in pairs(domain_cookies) do local cmp_cookie = path_cookies[name] -- 1. Their name matches the name of the newly-created cookie. if cmp_cookie -- 2. Their secure-only-flag is true. and cmp_cookie.secure_only -- 3. Their domain domain-matches the domain of the newly-created -- cookie, or vice-versa. -- Note: already checked above in domain_match -- 4. The path of the newly-created cookie path-matches the path -- of the existing cookie. and path_match(p, cookie.path) then return false end end end end end -- If the cookie-attribute-list contains an attribute with an -- attribute-name of "SameSite", set the cookie's same-site-flag to -- attribute-value (i.e. either "Strict" or "Lax"). Otherwise, set -- the cookie's same-site-flag to "None". local same_site = params.samesite if same_site then same_site = same_site:lower() if same_site == "lax" or same_site == "strict" then -- If the cookie's "same-site-flag" is not "None", and the cookie -- is being set from a context whose "site for cookies" is not an -- exact match for request-uri's host's registered domain, then -- abort these steps and ignore the newly created cookie entirely. if req_domain ~= req_site_for_cookies then return false end cookie.same_site = same_site end end -- If the cookie-name begins with a case-sensitive match for the -- string "__Secure-", abort these steps and ignore the cookie -- entirely unless the cookie's secure-only-flag is true. if not cookie.secure_only and name:sub(1, 9) == "__Secure-" then return false end -- If the cookie-name begins with a case-sensitive match for the -- string "__Host-", abort these steps and ignore the cookie -- entirely unless the cookie meets all the following criteria: -- 1. The cookie's secure-only-flag is true. -- 2. The cookie's host-only-flag is true. -- 3. The cookie-attribute-list contains an attribute with an -- attribute-name of "Path", and the cookie's path is "/". if not (cookie.secure_only and cookie.host_only and cookie.path == "/") and name:sub(1, 7) == "__Host-" then return false end return add_to_store(self, cookie, req_is_http, now) end function store_methods:store_from_request(req_headers, resp_headers, req_host, req_site_for_cookies) local set_cookies = resp_headers:get_as_sequence("set-cookie") local n = set_cookies.n if n == 0 then return true end local req_scheme = req_headers:get(":scheme") local req_authority = req_headers:get(":authority") local req_domain if req_authority then req_domain = http_util.split_authority(req_authority, req_scheme) else -- :authority can be missing for HTTP/1.0 requests; fall back to req_host req_domain = req_host end local req_path = req_headers:get(":path") local req_is_secure = req_scheme == "https" for i=1, n do local name, value, params = parse_setcookie(set_cookies[i]) if name then self:store(req_domain, req_path, true, req_is_secure, req_site_for_cookies, name, value, params) end end return true end function store_methods:get(domain, path, name) assert(type(domain) == "string") assert(type(path) == "string") assert(type(name) == "string") -- Clean now so that we can assume there are no expired cookies in store self:clean() local domain_cookies = self.domains[domain] if domain_cookies then local path_cookies = domain_cookies[path] if path_cookies then local cookie = path_cookies[name] if cookie then return cookie.value end end end return nil end function store_methods:remove(domain, path, name) assert(type(domain) == "string") assert(type(path) == "string" or (path == nil and name == nil)) assert(type(name) == "string" or name == nil) local domain_cookies = self.domains[domain] if not domain_cookies then return end local n_cookies = self.n_cookies if path == nil then -- Delete whole domain for _, path_cookies in pairs(domain_cookies) do for _, cookie in pairs(path_cookies) do self.expiry_heap:remove(cookie) n_cookies = n_cookies - 1 end end self.domains[domain] = nil self.n_cookies_per_domain[domain] = nil else local path_cookies = domain_cookies[path] if path_cookies then if name == nil then -- Delete all names at path local domains_deleted = 0 for _, cookie in pairs(path_cookies) do self.expiry_heap:remove(cookie) domains_deleted = domains_deleted + 1 end domain_cookies[path] = nil n_cookies = n_cookies - domains_deleted if next(domain_cookies) == nil then self.domains[domain] = nil self.n_cookies_per_domain[domain] = nil else self.n_cookies_per_domain[domain] = self.n_cookies_per_domain[domain] - domains_deleted end else -- Delete singular cookie local cookie = path_cookies[name] if cookie then self.expiry_heap:remove(cookie) n_cookies = n_cookies - 1 self.n_cookies_per_domain[domain] = self.n_cookies_per_domain[domain] - 1 path_cookies[name] = nil if next(path_cookies) == nil then domain_cookies[path] = nil if next(domain_cookies) == nil then self.domains[domain] = nil self.n_cookies_per_domain[domain] = nil end end end end end end self.n_cookies = n_cookies end --[[ The user agent SHOULD sort the cookie-list in the following order: - Cookies with longer paths are listed before cookies with shorter paths. - Among cookies that have equal-length path fields, cookies with earlier creation-times are listed before cookies with later creation-times. ]] local function cookie_cmp(a, b) if #a.path ~= #b.path then return #a.path > #b.path end if a.creation_time ~= b.creation_time then return a.creation_time < b.creation_time end -- Now order doesn't matter, but have to be consistent for table.sort: -- use the fields that make a cookie unique if a.domain ~= b.domain then return a.domain < b.domain end return a.name < b.name end local function cookie_match(cookie, req_domain, req_is_http, req_is_secure, req_is_safe_method, req_site_for_cookies, req_is_top_level) -- req_domain should be already canonicalized if cookie.host_only then -- Either: -- The cookie's host-only-flag is true and the canonicalized -- request-host is identical to the cookie's domain. if cookie.domain ~= req_domain then return false end end -- Or: -- The cookie's host-only-flag is false and the canonicalized -- request-host domain-matches the cookie's domain. -- already done domain_match and path_match -- If the cookie's http-only-flag is true, then exclude the -- cookie if the cookie-string is being generated for a "non- -- HTTP" API (as defined by the user agent). if cookie.http_only and not req_is_http then return false end if cookie.secure_only and not req_is_secure then return false end -- If the cookie's same-site-flag is not "None", and the HTTP -- request is cross-site (as defined in Section 5.2) then exclude -- the cookie unless all of the following statements hold: if cookie.same_site and req_site_for_cookies ~= req_domain and not ( -- 1. The same-site-flag is "Lax" cookie.same_site == "lax" -- 2. The HTTP request's method is "safe". and req_is_safe_method -- 3. The HTTP request's target browsing context is a top-level browsing context. and req_is_top_level ) then return false end return true end function store_methods:lookup(req_domain, req_path, req_is_http, req_is_secure, req_is_safe_method, req_site_for_cookies, req_is_top_level, max_cookie_length) req_domain = assert(type(req_domain) == "string" and canonicalise_host(req_domain), "invalid request domain") assert(type(req_path) == "string") if max_cookie_length ~= nil then assert(type(max_cookie_length) == "number") else max_cookie_length = self.max_cookie_length end local now = self.time() -- Clean now so that we can assume there are no expired cookies in store self:clean() local list = {} local n = 0 for domain, domain_cookies in pairs(self.domains) do if domain_match(domain, req_domain) then for path, path_cookies in pairs(domain_cookies) do if path_match(path, req_path) then for _, cookie in pairs(path_cookies) do if cookie_match(cookie, req_domain, req_is_http, req_is_secure, req_is_safe_method, req_site_for_cookies, req_is_top_level) then cookie.last_access_time = now n = n + 1 list[n] = cookie end end end end end end table.sort(list, cookie_cmp) local cookie_length = -2 -- length of separator ("; ") for i=1, n do local cookie = list[i] -- TODO: validate? local cookie_pair = cookie.name .. "=" .. cookie.value local new_length = cookie_length + #cookie_pair + 2 if new_length > max_cookie_length then break end list[i] = cookie_pair cookie_length = new_length end return table.concat(list, "; ", 1, n) end function store_methods:lookup_for_request(req_headers, req_host, req_site_for_cookies, req_is_top_level, max_cookie_length) local req_method = req_headers:get(":method") if req_method == "CONNECT" then return "" end local req_scheme = req_headers:get(":scheme") local req_authority = req_headers:get(":authority") local req_domain if req_authority then req_domain = http_util.split_authority(req_authority, req_scheme) else -- :authority can be missing for HTTP/1.0 requests; fall back to req_host req_domain = req_host end local req_path = req_headers:get(":path") local req_is_secure = req_scheme == "https" local req_is_safe_method = http_util.is_safe_method(req_method) return self:lookup(req_domain, req_path, true, req_is_secure, req_is_safe_method, req_site_for_cookies, req_is_top_level, max_cookie_length) end function store_methods:clean_due() local next_expiring = self.expiry_heap:peek() if not next_expiring then return (1e999) end return next_expiring.expiry_time end function store_methods:clean() local now = self.time() while self:clean_due() < now do local cookie = self.expiry_heap:pop() self.n_cookies = self.n_cookies - 1 local domain = cookie.domain local domain_cookies = self.domains[domain] if domain_cookies then self.n_cookies_per_domain[domain] = self.n_cookies_per_domain[domain] - 1 local path_cookies = domain_cookies[cookie.path] if path_cookies then path_cookies[cookie.name] = nil if next(path_cookies) == nil then domain_cookies[cookie.path] = nil if next(domain_cookies) == nil then self.domains[domain] = nil self.n_cookies_per_domain[domain] = nil end end end end end return true end -- Files in 'netscape format' -- curl's lib/cookie.c is best reference for the format local function parse_netscape_format(line, now) if line == "" then return end local i = 1 local http_only = false if line:sub(1, 1) == "#" then if line:sub(1, 10) == "#HttpOnly_" then http_only = true i = 11 else return end end local domain, host_only, path, secure_only, expiry, name, value = line:match("^%.?([^\t]+)\t([^\t]+)\t([^\t]+)\t([^\t]+)\t(%d+)\t([^\t]+)\t(.+)", i) if not domain then return end domain = canonicalise_host(domain) if domain == nil then return end if host_only == "TRUE" then host_only = true elseif host_only == "FALSE" then host_only = false else return end if secure_only == "TRUE" then secure_only = true elseif secure_only == "FALSE" then secure_only = false else return end expiry = tonumber(expiry, 10) return setmetatable({ name = name; value = value; expiry_time = expiry; domain = domain; path = path; creation_time = now; last_access_time = now; persistent = expiry == 0; host_only = host_only; secure_only = secure_only; http_only = http_only; same_site = nil; }, cookie_mt) end function store_methods:load_from_file(file) local now = self.time() -- Clean now so that we don't hit storage limits self:clean() local cookies = {} local n = 0 while true do local line, err, errno = file:read() if not line then if err ~= nil then return nil, err, errno end break end local cookie = parse_netscape_format(line, now) if cookie then n = n + 1 cookies[n] = cookie end end for i=1, n do local cookie = cookies[i] add_to_store(self, cookie, cookie.http_only, now) end return true end function store_methods:save_to_file(file) do -- write a preamble local ok, err, errno = file:write [[ # Netscape HTTP Cookie File # This file was generated by lua-http ]] if not ok then return nil, err, errno end end for _, domain_cookies in pairs(self.domains) do for _, path_cookies in pairs(domain_cookies) do for _, cookie in pairs(path_cookies) do local ok, err, errno = file:write(cookie:netscape_format()) if not ok then return nil, err, errno end end end end return true end return { bake = bake; parse_cookie = parse_cookie; parse_cookies = parse_cookies; parse_setcookie = parse_setcookie; new_store = new_store; store_mt = store_mt; store_methods = store_methods; } lua-http-0.4/http/cookie.tld000066400000000000000000000022561400726324600160610ustar00rootroot00000000000000require "http.headers" bake: (string, string, number?, string?, string?, true?, true?, string?) -> (string) parse_cookie: (string) -> ({string:string}) parse_cookies: (headers) -> ({{string:string}}) parse_setcookie: (string) -> (string, string, {string:string}) interface cookie_store psl: any|false -- TODO: use psl type time: () -> (number) max_cookie_length: number max_cookies: number max_cookies_per_domain: number const store: (self, string, string, boolean, boolean, string?, string, string, {string:string}) -> (boolean) const store_from_request: (self, headers, headers, string, string?) -> (boolean) const get: (self, string, string, string) -> (string) const remove: (self, string, string?, string?) -> () const lookup: (self, string, string, boolean?, boolean?, boolean?, string?, boolean?, integer?) -> () const lookup_for_request: (self, headers, string, string?, boolean?, integer?) -> () const clean_due: (self) -> (number) const clean: (self) -> (boolean) const load_from_file: (self, file) -> (true) | (nil, string, integer) const save_to_file: (self, file) -> (true) | (nil, string, integer) end new_store: () -> (cookie_store) lua-http-0.4/http/h1_connection.lua000066400000000000000000000321621400726324600173340ustar00rootroot00000000000000-- This module implements the socket level functionality needed for an HTTP 1 connection local cqueues = require "cqueues" local monotime = cqueues.monotime local ca = require "cqueues.auxlib" local cc = require "cqueues.condition" local ce = require "cqueues.errno" local connection_common = require "http.connection_common" local onerror = connection_common.onerror local h1_stream = require "http.h1_stream" local new_fifo = require "fifo" local connection_methods = {} for k,v in pairs(connection_common.methods) do connection_methods[k] = v end local connection_mt = { __name = "http.h1_connection"; __index = connection_methods; } function connection_mt:__tostring() return string.format("http.h1_connection{type=%q;version=%.1f}", self.type, self.version) end -- assumes ownership of the socket local function new_connection(socket, conn_type, version) assert(socket, "must provide a socket") if conn_type ~= "client" and conn_type ~= "server" then error('invalid connection type. must be "client" or "server"') end assert(version == 1 or version == 1.1, "unsupported version") local self = setmetatable({ socket = socket; type = conn_type; version = version; -- for server: streams waiting to go out -- for client: streams waiting for a response pipeline = new_fifo(); -- pipeline condition is stored in stream itself -- for server: held while request being read -- for client: held while writing request req_locked = nil; -- signaled when unlocked req_cond = cc.new(); -- A function that will be called if the connection becomes idle onidle_ = nil; }, connection_mt) socket:setvbuf("full", math.huge) -- 'infinite' buffering; no write locks needed socket:setmode("b", "bf") socket:onerror(onerror) return self end function connection_methods:setmaxline(read_length) if self.socket == nil then return nil end self.socket:setmaxline(read_length) return true end function connection_methods:clearerr(...) if self.socket == nil then return nil end return self.socket:clearerr(...) end function connection_methods:error(...) if self.socket == nil then return nil end return self.socket:error(...) end function connection_methods:take_socket() local s = self.socket if s == nil then -- already taken return nil end self.socket = nil -- Shutdown *after* taking away socket so shutdown handlers can't effect the socket self:shutdown() -- Reset socket to some defaults s:onerror(nil) return s end function connection_methods:shutdown(dir) if dir == nil or dir:match("w") then while self.pipeline:length() > 0 do local stream = self.pipeline:peek() stream:shutdown() end end if self.socket then return ca.fileresult(self.socket:shutdown(dir)) else return true end end function connection_methods:new_stream() assert(self.type == "client") if self.socket == nil or self.socket:eof("w") then return nil end local stream = h1_stream.new(self) return stream end -- this function *should never throw* function connection_methods:get_next_incoming_stream(timeout) assert(self.type == "server") -- Make sure we don't try and read before the previous request has been fully read if self.req_locked then local deadline = timeout and monotime()+timeout assert(cqueues.running(), "cannot wait for condition if not within a cqueues coroutine") if cqueues.poll(self.req_cond, timeout) == timeout then return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT end timeout = deadline and deadline-monotime() assert(self.req_locked == nil) end if self.socket == nil then return nil end -- Wait for at least one byte local ok, err, errno = self.socket:fill(1, 0) if not ok then if errno == ce.ETIMEDOUT then local deadline = timeout and monotime()+timeout if cqueues.poll(self.socket, timeout) ~= timeout then return self:get_next_incoming_stream(deadline and deadline-monotime()) end end return nil, err, errno end local stream = h1_stream.new(self) self.pipeline:push(stream) self.req_locked = stream return stream end function connection_methods:read_request_line(timeout) local deadline = timeout and (monotime()+timeout) local preline local line, err, errno = self.socket:xread("*L", timeout) if line == "\r\n" then -- RFC 7230 3.5: a server that is expecting to receive and parse a request-line -- SHOULD ignore at least one empty line (CRLF) received prior to the request-line. preline = line line, err, errno = self.socket:xread("*L", deadline and (deadline-monotime())) end if line == nil then if preline then local ok, errno2 = self.socket:unget(preline) if not ok then return nil, onerror(self.socket, "unget", errno2) end end return nil, err, errno end local method, target, httpversion = line:match("^(%w+) (%S+) HTTP/(1%.[01])\r\n$") if not method then self.socket:seterror("r", ce.EILSEQ) local ok, errno2 = self.socket:unget(line) if not ok then return nil, onerror(self.socket, "unget", errno2) end if preline then ok, errno2 = self.socket:unget(preline) if not ok then return nil, onerror(self.socket, "unget", errno2) end end return nil, onerror(self.socket, "read_request_line", ce.EILSEQ) end httpversion = httpversion == "1.0" and 1.0 or 1.1 -- Avoid tonumber() due to locale issues return method, target, httpversion end function connection_methods:read_status_line(timeout) local line, err, errno = self.socket:xread("*L", timeout) if line == nil then return nil, err, errno end local httpversion, status_code, reason_phrase = line:match("^HTTP/(1%.[01]) (%d%d%d) (.*)\r\n$") if not httpversion then self.socket:seterror("r", ce.EILSEQ) local ok, errno2 = self.socket:unget(line) if not ok then return nil, onerror(self.socket, "unget", errno2) end return nil, onerror(self.socket, "read_status_line", ce.EILSEQ) end httpversion = httpversion == "1.0" and 1.0 or 1.1 -- Avoid tonumber() due to locale issues return httpversion, status_code, reason_phrase end function connection_methods:read_header(timeout) local line, err, errno = self.socket:xread("*h", timeout) if line == nil then -- Note: the *h read returns *just* nil when data is a non-mime compliant header if err == nil then local pending_bytes = self.socket:pending() -- check if we're at end of headers if pending_bytes >= 2 then local peek = assert(self.socket:xread(2, "b", 0)) local ok, errno2 = self.socket:unget(peek) if not ok then return nil, onerror(self.socket, "unget", errno2) end if peek == "\r\n" then return nil end end if pending_bytes > 0 then self.socket:seterror("r", ce.EILSEQ) return nil, onerror(self.socket, "read_header", ce.EILSEQ) end end return nil, err, errno end -- header fields can have optional surrounding whitespace --[[ RFC 7230 3.2.4: No whitespace is allowed between the header field-name and colon. In the past, differences in the handling of such whitespace have led to security vulnerabilities in request routing and response handling. A server MUST reject any received request message that contains whitespace between a header field-name and colon with a response code of 400 (Bad Request). A proxy MUST remove any such whitespace from a response message before forwarding the message downstream.]] local key, val = line:match("^([^%s:]+):[ \t]*(.-)[ \t]*$") if not key then self.socket:seterror("r", ce.EILSEQ) local ok, errno2 = self.socket:unget(line) if not ok then return nil, onerror(self.socket, "unget", errno2) end return nil, onerror(self.socket, "read_header", ce.EILSEQ) end return key, val end function connection_methods:read_headers_done(timeout) local crlf, err, errno = self.socket:xread(2, timeout) if crlf == "\r\n" then return true elseif crlf ~= nil or (err == nil and self.socket:pending() > 0) then self.socket:seterror("r", ce.EILSEQ) if crlf then local ok, errno2 = self.socket:unget(crlf) if not ok then return nil, onerror(self.socket, "unget", errno2) end end return nil, onerror(self.socket, "read_headers_done", ce.EILSEQ) else return nil, err, errno end end -- pass a negative length for *up to* that number of bytes function connection_methods:read_body_by_length(len, timeout) assert(type(len) == "number") return self.socket:xread(len, timeout) end function connection_methods:read_body_till_close(timeout) return self.socket:xread("*a", timeout) end function connection_methods:read_body_chunk(timeout) local deadline = timeout and (monotime()+timeout) local chunk_header, err, errno = self.socket:xread("*L", timeout) if chunk_header == nil then return nil, err, errno end local chunk_size, chunk_ext = chunk_header:match("^(%x+) *(.-)\r\n") if chunk_size == nil then self.socket:seterror("r", ce.EILSEQ) local unget_ok1, unget_errno1 = self.socket:unget(chunk_header) if not unget_ok1 then return nil, onerror(self.socket, "unget", unget_errno1) end return nil, onerror(self.socket, "read_body_chunk", ce.EILSEQ) elseif #chunk_size > 8 then self.socket:seterror("r", ce.E2BIG) return nil, onerror(self.socket, "read_body_chunk", ce.E2BIG) end chunk_size = tonumber(chunk_size, 16) if chunk_ext == "" then chunk_ext = nil end if chunk_size == 0 then -- you MUST read trailers after this! return false, chunk_ext else local ok, err2, errno2 = self.socket:fill(chunk_size+2, 0) if not ok then local unget_ok1, unget_errno1 = self.socket:unget(chunk_header) if not unget_ok1 then return nil, onerror(self.socket, "unget", unget_errno1) end if errno2 == ce.ETIMEDOUT then timeout = deadline and deadline-monotime() if cqueues.poll(self.socket, timeout) ~= timeout then -- retry return self:read_body_chunk(deadline and deadline-monotime()) end elseif err2 == nil then self.socket:seterror("r", ce.EILSEQ) return nil, onerror(self.socket, "read_body_chunk", ce.EILSEQ) end return nil, err2, errno2 end -- if `fill` succeeded these shouldn't be able to fail local chunk_data = assert(self.socket:xread(chunk_size, "b", 0)) local crlf = assert(self.socket:xread(2, "b", 0)) if crlf ~= "\r\n" then self.socket:seterror("r", ce.EILSEQ) local unget_ok3, unget_errno3 = self.socket:unget(crlf) if not unget_ok3 then return nil, onerror(self.socket, "unget", unget_errno3) end local unget_ok2, unget_errno2 = self.socket:unget(chunk_data) if not unget_ok2 then return nil, onerror(self.socket, "unget", unget_errno2) end local unget_ok1, unget_errno1 = self.socket:unget(chunk_header) if not unget_ok1 then return nil, onerror(self.socket, "unget", unget_errno1) end return nil, onerror(self.socket, "read_body_chunk", ce.EILSEQ) end -- Success! return chunk_data, chunk_ext end end function connection_methods:write_request_line(method, target, httpversion, timeout) assert(method:match("^[^ \r\n]+$")) assert(target:match("^[^ \r\n]+$")) assert(httpversion == 1.0 or httpversion == 1.1) local line = string.format("%s %s HTTP/%s\r\n", method, target, httpversion == 1.0 and "1.0" or "1.1") local ok, err, errno = self.socket:xwrite(line, "f", timeout) if not ok then return nil, err, errno end return true end function connection_methods:write_status_line(httpversion, status_code, reason_phrase, timeout) assert(httpversion == 1.0 or httpversion == 1.1) assert(status_code:match("^[1-9]%d%d$"), "invalid status code") assert(type(reason_phrase) == "string" and reason_phrase:match("^[^\r\n]*$"), "invalid reason phrase") local line = string.format("HTTP/%s %s %s\r\n", httpversion == 1.0 and "1.0" or "1.1", status_code, reason_phrase) local ok, err, errno = self.socket:xwrite(line, "f", timeout) if not ok then return nil, err, errno end return true end function connection_methods:write_header(k, v, timeout) assert(type(k) == "string" and k:match("^[^:\r\n]+$"), "field name invalid") assert(type(v) == "string" and v:sub(-1, -1) ~= "\n" and not v:match("\n[^ ]"), "field value invalid") local ok, err, errno = self.socket:xwrite(k..": "..v.."\r\n", "f", timeout) if not ok then return nil, err, errno end return true end function connection_methods:write_headers_done(timeout) -- flushes write buffer local ok, err, errno = self.socket:xwrite("\r\n", "n", timeout) if not ok then return nil, err, errno end return true end function connection_methods:write_body_chunk(chunk, chunk_ext, timeout) assert(chunk_ext == nil, "chunk extensions not supported") local data = string.format("%x\r\n", #chunk) .. chunk .. "\r\n" -- flushes write buffer local ok, err, errno = self.socket:xwrite(data, "n", timeout) if not ok then return nil, err, errno end return true end function connection_methods:write_body_last_chunk(chunk_ext, timeout) assert(chunk_ext == nil, "chunk extensions not supported") -- no flush; writing trailers (via write_headers_done) will do that local ok, err, errno = self.socket:xwrite("0\r\n", "f", timeout) if not ok then return nil, err, errno end return true end function connection_methods:write_body_plain(body, timeout) -- flushes write buffer local ok, err, errno = self.socket:xwrite(body, "n", timeout) if not ok then return nil, err, errno end return true end return { new = new_connection; methods = connection_methods; mt = connection_mt; } lua-http-0.4/http/h1_reason_phrases.lua000066400000000000000000000041131400726324600202040ustar00rootroot00000000000000-- This list should be kept in sync with IANA. -- http://www.iana.org/assignments/http-status-codes local reason_phrases = setmetatable({ ["100"] = "Continue"; ["101"] = "Switching Protocols"; ["102"] = "Processing"; ["103"] = "Early Hints"; ["200"] = "OK"; ["201"] = "Created"; ["202"] = "Accepted"; ["203"] = "Non-Authoritative Information"; ["204"] = "No Content"; ["205"] = "Reset Content"; ["206"] = "Partial Content"; ["207"] = "Multi-Status"; ["208"] = "Already Reported"; ["226"] = "IM Used"; ["300"] = "Multiple Choices"; ["301"] = "Moved Permanently"; ["302"] = "Found"; ["303"] = "See Other"; ["304"] = "Not Modified"; ["305"] = "Use Proxy"; ["307"] = "Temporary Redirect"; ["308"] = "Permanent Redirect"; ["400"] = "Bad Request"; ["401"] = "Unauthorized"; ["402"] = "Payment Required"; ["403"] = "Forbidden"; ["404"] = "Not Found"; ["405"] = "Method Not Allowed"; ["406"] = "Not Acceptable"; ["407"] = "Proxy Authentication Required"; ["408"] = "Request Timeout"; ["409"] = "Conflict"; ["410"] = "Gone"; ["411"] = "Length Required"; ["412"] = "Precondition Failed"; ["413"] = "Request Entity Too Large"; ["414"] = "Request-URI Too Long"; ["415"] = "Unsupported Media Type"; ["416"] = "Requested Range Not Satisfiable"; ["417"] = "Expectation Failed"; ["418"] = "I'm a teapot"; -- not in IANA registry ["421"] = "Misdirected Request"; ["422"] = "Unprocessable Entity"; ["423"] = "Locked"; ["424"] = "Failed Dependency"; ["426"] = "Upgrade Required"; ["428"] = "Precondition Required"; ["429"] = "Too Many Requests"; ["431"] = "Request Header Fields Too Large"; ["451"] = "Unavailable For Legal Reasons"; ["500"] = "Internal Server Error"; ["501"] = "Not Implemented"; ["502"] = "Bad Gateway"; ["503"] = "Service Unavailable"; ["504"] = "Gateway Timeout"; ["505"] = "HTTP Version Not Supported"; ["506"] = "Variant Also Negotiates"; ["507"] = "Insufficient Storage"; ["508"] = "Loop Detected"; ["510"] = "Not Extended"; ["511"] = "Network Authentication Required"; }, {__index = function() return "Unassigned" end}) return reason_phrases lua-http-0.4/http/h1_reason_phrases.tld000066400000000000000000000000401400726324600202010ustar00rootroot00000000000000reason_phrases: {string:string} lua-http-0.4/http/h1_stream.lua000066400000000000000000001014171400726324600164700ustar00rootroot00000000000000local cqueues = require "cqueues" local monotime = cqueues.monotime local cc = require "cqueues.condition" local ce = require "cqueues.errno" local new_fifo = require "fifo" local lpeg = require "lpeg" local http_patts = require "lpeg_patterns.http" local new_headers = require "http.headers".new local reason_phrases = require "http.h1_reason_phrases" local stream_common = require "http.stream_common" local util = require "http.util" local has_zlib, zlib = pcall(require, "http.zlib") --[[ Maximum amount of data to read during shutdown before giving up on a clean stream shutdown 500KB seems is a round number that is: - larger than most bandwidth-delay products - larger than most dynamically generated http documents]] local clean_shutdown_limit = 500*1024 local EOF = lpeg.P(-1) local Connection = lpeg.Ct(http_patts.Connection) * EOF local Content_Encoding = lpeg.Ct(http_patts.Content_Encoding) * EOF local Transfer_Encoding = lpeg.Ct(http_patts.Transfer_Encoding) * EOF local TE = lpeg.Ct(http_patts.TE) * EOF local function has(list, val) if list then for i=1, #list do if list[i] == val then return true end end end return false end local function has_any(list, val, ...) if has(list, val) then return true elseif (...) then return has(list, ...) else return false end end local stream_methods = { use_zlib = has_zlib; max_header_lines = 100; } for k,v in pairs(stream_common.methods) do stream_methods[k] = v end local stream_mt = { __name = "http.h1_stream"; __index = stream_methods; } function stream_mt:__tostring() return string.format("http.h1_stream{connection=%s;state=%q}", tostring(self.connection), self.state) end local function new_stream(connection) local self = setmetatable({ connection = connection; type = connection.type; state = "idle"; stats_sent = 0; stats_recv = 0; pipeline_cond = cc.new(); -- signalled when stream reaches front of pipeline req_method = nil; -- string peer_version = nil; -- 1.0 or 1.1 has_main_headers = false; headers_in_progress = nil; headers_fifo = new_fifo(); headers_cond = cc.new(); chunk_fifo = new_fifo(); chunk_cond = cc.new(); body_write_type = nil; -- "closed", "chunked", "length" or "missing" body_write_left = nil; -- integer: only set when body_write_type == "length" body_write_deflate_encoding = nil; body_write_deflate = nil; -- nil or stateful deflate closure body_read_type = nil; body_read_inflate = nil; close_when_done = nil; -- boolean }, stream_mt) return self end local valid_states = { ["idle"] = 1; -- initial ["open"] = 2; -- have sent or received headers; haven't sent body yet ["half closed (local)"] = 3; -- have sent whole body ["half closed (remote)"] = 3; -- have received whole body ["closed"] = 4; -- complete } function stream_methods:set_state(new) local new_order = assert(valid_states[new]) local old = self.state if new_order <= valid_states[old] then error("invalid state progression ('"..old.."' to '"..new.."')") end local have_lock, want_no_lock local blocking_pipeline, notify_pipeline if self.type == "server" then -- If we have just finished reading the request then remove our read lock have_lock = old == "idle" or old == "open" or old == "half closed (local)" want_no_lock = new == "half closed (remote)" or new == "closed" -- If we have just finished writing the response blocking_pipeline = old == "idle" or old == "open" or old == "half closed (remote)" notify_pipeline = blocking_pipeline and (new == "half closed (local)" or new == "closed") else -- client -- If we have just finished writing the request then remove our write lock have_lock = old == "open" or old == "half closed (remote)" want_no_lock = new == "half closed (local)" or new == "closed" -- If we have just finished reading the response; blocking_pipeline = old == "idle" or old == "open" or old == "half closed (local)" notify_pipeline = blocking_pipeline and (new == "half closed (remote)" or new == "closed") end self.state = new if have_lock then assert(self.connection.req_locked == self) if want_no_lock then self.connection.req_locked = nil self.connection.req_cond:signal(1) end end local pipeline_empty if notify_pipeline then assert(self.connection.pipeline:pop() == self) local next_stream = self.connection.pipeline:peek() if next_stream then pipeline_empty = false next_stream.pipeline_cond:signal() else pipeline_empty = true end else pipeline_empty = not blocking_pipeline end if self.close_when_done then if new == "half closed (remote)" then self.connection:shutdown("r") elseif new == "half closed (local)" and self.type == "server" then -- NOTE: Do not shutdown("w") the socket when a client moves to -- "half closed (local)", many servers will close a connection -- immediately if a client closes their write stream self.connection:shutdown("w") elseif new == "closed" then self.connection:shutdown() end end if want_no_lock and pipeline_empty then self.connection:onidle()(self.connection) end end local bad_request_headers = new_headers() bad_request_headers:append(":status", "400") local server_error_headers = new_headers() server_error_headers:append(":status", "503") function stream_methods:shutdown() if self.state == "idle" then self:set_state("closed") else if self.type == "server" and (self.state == "open" or self.state == "half closed (remote)") then -- Make sure we're at the front of the pipeline if self.connection.pipeline:peek() ~= self then -- FIXME: shouldn't have time-taking operation here self.pipeline_cond:wait() -- wait without a timeout should never fail assert(self.connection.pipeline:peek() == self) end if not self.body_write_type then -- Can send an automatic error response local error_headers if self.connection:error("r") == ce.EILSEQ then error_headers = bad_request_headers else error_headers = server_error_headers end self:write_headers(error_headers, true, 0) end end -- read any remaining available response and get out of the way local start = self.stats_recv while (self.state == "open" or self.state == "half closed (local)") and (self.stats_recv - start) < clean_shutdown_limit do if not self:step(0) then break end end if self.state ~= "closed" then -- This is a bad situation: we are trying to shutdown a connection that has the body partially sent -- Especially in the case of Connection: close, where closing indicates EOF, -- this will result in a client only getting a partial response. -- Could also end up here if a client sending headers fails. if self.connection.socket then self.connection.socket:shutdown() end self:set_state("closed") end end return true end function stream_methods:step(timeout) if self.state == "open" or self.state == "half closed (local)" or (self.state == "idle" and self.type == "server") then if self.connection.socket == nil then return nil, ce.strerror(ce.EPIPE), ce.EPIPE end if not self.has_main_headers then local headers, err, errno = self:read_headers(timeout) if headers == nil then return nil, err, errno end self.headers_fifo:push(headers) self.headers_cond:signal(1) return true end if self.body_read_left ~= 0 then local chunk, err, errno = self:read_next_chunk(timeout) if chunk == nil then if err == nil then return true end return nil, err, errno end self.chunk_fifo:push(chunk) self.chunk_cond:signal() return true end if self.body_read_type == "chunked" then local trailers, err, errno = self:read_headers(timeout) if trailers == nil then return nil, err, errno end self.headers_fifo:push(trailers) self.headers_cond:signal(1) return true end end if self.state == "half closed (remote)" then return nil, ce.strerror(ce.EIO), ce.EIO end return true end -- read_headers may be called more than once for a stream -- e.g. for 100 Continue -- this function *should never throw* under normal operation function stream_methods:read_headers(timeout) local deadline = timeout and (monotime()+timeout) if self.state == "closed" or self.state == "half closed (remote)" then return nil end local status_code local is_trailers = self.body_read_type == "chunked" local headers = self.headers_in_progress if not headers then if is_trailers then headers = new_headers() elseif self.type == "server" then if self.state == "half closed (local)" then return nil end local method, target, httpversion = self.connection:read_request_line(0) if method == nil then if httpversion == ce.ETIMEDOUT then timeout = deadline and deadline-monotime() if cqueues.poll(self.connection.socket, timeout) ~= timeout then return self:read_headers(deadline and deadline-monotime()) end end return nil, target, httpversion end self.req_method = method self.peer_version = httpversion headers = new_headers() headers:append(":method", method) if method == "CONNECT" then headers:append(":authority", target) else headers:append(":path", target) end headers:append(":scheme", self:checktls() and "https" or "http") self:set_state("open") else -- client -- Make sure we're at front of connection pipeline if self.connection.pipeline:peek() ~= self then assert(cqueues.running(), "cannot wait for condition if not within a cqueues coroutine") if cqueues.poll(self.pipeline_cond, timeout) == timeout then return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT end assert(self.connection.pipeline:peek() == self) end local httpversion, reason_phrase httpversion, status_code, reason_phrase = self.connection:read_status_line(0) if httpversion == nil then if reason_phrase == ce.ETIMEDOUT then timeout = deadline and deadline-monotime() if cqueues.poll(self.connection.socket, timeout) ~= timeout then return self:read_headers(deadline and deadline-monotime()) end elseif status_code == nil then return nil, ce.strerror(ce.EPIPE), ce.EPIPE end return nil, status_code, reason_phrase end self.peer_version = httpversion headers = new_headers() headers:append(":status", status_code) -- reason phase intentionally does not exist in HTTP2; discard for consistency end self.headers_in_progress = headers else if not is_trailers and self.type == "client" then status_code = headers:get(":status") end end -- Use while loop for lua 5.1 compatibility while true do if headers:len() >= self.max_header_lines then return nil, ce.strerror(ce.E2BIG), ce.E2BIG end local k, v, errno = self.connection:read_header(0) if k == nil then if v ~= nil then if errno == ce.ETIMEDOUT then timeout = deadline and deadline-monotime() if cqueues.poll(self.connection.socket, timeout) ~= timeout then return self:read_headers(deadline and deadline-monotime()) end end return nil, v, errno end break -- Success: End of headers. end k = k:lower() -- normalise to lower case if k == "host" and not is_trailers then k = ":authority" end headers:append(k, v) end do local ok, err, errno = self.connection:read_headers_done(0) if ok == nil then if errno == ce.ETIMEDOUT then timeout = deadline and deadline-monotime() if cqueues.poll(self.connection.socket, timeout) ~= timeout then return self:read_headers(deadline and deadline-monotime()) end elseif err == nil then return nil, ce.strerror(ce.EPIPE), ce.EPIPE end return nil, err, errno end self.headers_in_progress = nil self.has_main_headers = status_code == nil or status_code:sub(1,1) ~= "1" or status_code == "101" end do -- if client is sends `Connection: close`, server knows it can close at end of response local h = headers:get_comma_separated("connection") if h then local connection_header = Connection:match(h) if connection_header and has(connection_header, "close") then self.close_when_done = true end end end -- Now guess if there's a body... -- RFC 7230 Section 3.3.3 local no_body if is_trailers then -- there cannot be a body after trailers no_body = true elseif self.type == "client" and ( self.req_method == "HEAD" or status_code == "204" or status_code == "304" ) then no_body = true elseif self.type == "client" and ( status_code:sub(1,1) == "1" ) then -- note: different to spec: -- we don't want to go into body reading mode; -- we want to stay in header modes no_body = false if status_code == "101" then self.body_read_type = "close" end elseif headers:has("transfer-encoding") then no_body = false local transfer_encoding = Transfer_Encoding:match(headers:get_comma_separated("transfer-encoding")) local n = #transfer_encoding local last_transfer_encoding = transfer_encoding[n][1] if last_transfer_encoding == "chunked" then self.body_read_type = "chunked" n = n - 1 if n == 0 then last_transfer_encoding = nil else last_transfer_encoding = transfer_encoding[n][1] end else self.body_read_type = "close" end if last_transfer_encoding == "gzip" or last_transfer_encoding == "deflate" or last_transfer_encoding == "x-gzip" then self.body_read_inflate = zlib.inflate() n = n - 1 end if n > 0 then return nil, "unknown transfer-encoding" end elseif headers:has("content-length") then local cl = tonumber(headers:get("content-length"), 10) if cl == nil then return nil, "invalid content-length" end if cl == 0 then no_body = true else no_body = false self.body_read_type = "length" self.body_read_left = cl end elseif self.type == "server" then -- A request defaults to no body no_body = true else -- client no_body = false self.body_read_type = "close" end if self.use_zlib and self.type == "server" and self.state == "open" and not is_trailers and headers:has("te") then local te = TE:match(headers:get_comma_separated("te")) for _, v in ipairs(te) do local tcoding = v[1] if (tcoding == "gzip" or tcoding == "x-gzip" or tcoding == "deflate") and v.q ~= 0 then v.q = nil self.body_write_deflate_encoding = v self.body_write_deflate = zlib.deflate() break end end end if no_body then if self.state == "open" then self:set_state("half closed (remote)") else -- self.state == "half closed (local)" self:set_state("closed") end end return headers end function stream_methods:get_headers(timeout) if self.headers_fifo:length() > 0 then return self.headers_fifo:pop() else if self.state == "closed" or self.state == "half closed (remote)" then return nil end local deadline = timeout and monotime()+timeout local ok, err, errno = self:step(timeout) if not ok then return nil, err, errno end return self:get_headers(deadline and deadline-monotime()) end end local ignore_fields = { [":authority"] = true; [":method"] = true; [":path"] = true; [":scheme"] = true; [":status"] = true; [":protocol"] = true; -- from RFC 8441 -- fields written manually in :write_headers ["connection"] = true; ["content-length"] = true; ["transfer-encoding"] = true; } -- Writes the given headers to the stream; optionally ends the stream at end of headers -- -- We're free to insert any of the "Hop-by-hop" headers (as listed in RFC 2616 Section 13.5.1) -- Do this by directly writing the headers, rather than adding them to the passed headers object, -- as we don't want to modify the caller owned object. -- Note from RFC 7230 Appendix 2: -- "hop-by-hop" header fields are required to appear in the Connection header field; -- just because they're defined as hop-by-hop doesn't exempt them. function stream_methods:write_headers(headers, end_stream, timeout) local deadline = timeout and (monotime()+timeout) assert(headers, "missing argument: headers") -- Validate up front local connection_header do local h = headers:get_comma_separated("connection") if h then connection_header = Connection:match(h) if not connection_header then error("invalid connection header") end else connection_header = {} end end local transfer_encoding_header do local h = headers:get_comma_separated("transfer-encoding") if h then transfer_encoding_header = Transfer_Encoding:match(h) if not transfer_encoding_header then error("invalid transfer-encoding header") end end end assert(type(end_stream) == "boolean", "'end_stream' MUST be a boolean") if self.state == "closed" or self.state == "half closed (local)" or self.connection.socket == nil then return nil, ce.strerror(ce.EPIPE), ce.EPIPE end local status_code, method local is_trailers if self.body_write_type == "chunked" then -- we are writing trailers; close off body is_trailers = true local ok, err, errno = self.connection:write_body_last_chunk(nil, 0) if not ok then return nil, err, errno end elseif self.type == "server" then if self.state == "idle" then error("cannot write headers when stream is idle") end status_code = headers:get(":status") -- RFC 7231 Section 6.2: -- Since HTTP/1.0 did not define any 1xx status codes, a server MUST NOT send a 1xx response to an HTTP/1.0 client. if status_code and status_code:sub(1,1) == "1" and self.peer_version < 1.1 then error("a server MUST NOT send a 1xx response to an HTTP/1.0 client") end -- Make sure we're at the front of the pipeline if self.connection.pipeline:peek() ~= self then assert(cqueues.running(), "cannot wait for condition if not within a cqueues coroutine") headers = headers:clone() -- don't want user to edit it and send wrong headers if cqueues.poll(self.pipeline_cond, timeout) == timeout then return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT end assert(self.connection.pipeline:peek() == self) end if status_code then -- Should send status line local reason_phrase = reason_phrases[status_code] local version = math.min(self.connection.version, self.peer_version) local ok, err, errno = self.connection:write_status_line(version, status_code, reason_phrase, 0) if not ok then return nil, err, errno end end else -- client if self.state == "idle" then method = assert(headers:get(":method"), "missing method") self.req_method = method local target if method == "CONNECT" then target = assert(headers:get(":authority"), "missing authority") assert(not headers:has(":path"), "CONNECT requests should not have a path") else -- RFC 7230 Section 5.4: A client MUST send a Host header field in all HTTP/1.1 request messages. assert(self.connection.version < 1.1 or headers:has(":authority"), "missing authority") target = assert(headers:get(":path"), "missing path") end if self.connection.req_locked then -- Wait until previous request has been fully written assert(cqueues.running(), "cannot wait for condition if not within a cqueues coroutine") headers = headers:clone() -- don't want user to edit it and send wrong headers if cqueues.poll(self.connection.req_cond, timeout) == timeout then return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT end assert(self.connection.req_locked == nil) end self.connection.pipeline:push(self) self.connection.req_locked = self -- write request line local ok, err, errno = self.connection:write_request_line(method, target, self.connection.version, 0) if not ok then return nil, err, errno end self:set_state("open") else assert(self.state == "open") end end local cl = headers:get("content-length") -- ignore subsequent content-length values local add_te_gzip = false if self.req_method == "CONNECT" and (self.type == "client" or status_code == "200") then -- successful CONNECT requests always continue until the connection is closed self.body_write_type = "close" self.close_when_done = true if self.type == "server" and (cl or transfer_encoding_header) then -- RFC 7231 Section 4.3.6: -- A server MUST NOT send any Transfer-Encoding or Content-Length header -- fields in a 2xx (Successful) response to CONNECT. error("Content-Length and Transfer-Encoding not allowed with successful CONNECT response") end elseif self.type == "server" and status_code and status_code:sub(1, 1) == "1" then assert(not end_stream, "cannot end stream directly after 1xx status code") -- A server MUST NOT send a Content-Length header field in any response -- with a status code of 1xx (Informational) or 204 (No Content) if cl then error("Content-Length not allowed in response with 1xx status code") end if status_code == "101" then self.body_write_type = "switched protocol" end elseif not self.body_write_type then -- only figure out how to send the body if we haven't figured it out yet... TODO: use better check if self.close_when_done == nil then if self.connection.version == 1.0 or (self.type == "server" and self.peer_version == 1.0) then self.close_when_done = not has(connection_header, "keep-alive") else self.close_when_done = has(connection_header, "close") end end if cl then -- RFC 7230 Section 3.3.2: -- A sender MUST NOT send a Content-Length header field in any -- message that contains a Transfer-Encoding header field. if transfer_encoding_header then error("Content-Length not allowed in message with a transfer-encoding") elseif self.type == "server" then -- A server MUST NOT send a Content-Length header field in any response -- with a status code of 1xx (Informational) or 204 (No Content) if status_code == "204" then error("Content-Length not allowed in response with 204 status code") end end end if end_stream then -- Make sure 'end_stream' is respected if self.type == "server" and (self.req_method == "HEAD" or status_code == "304") then self.body_write_type = "missing" elseif transfer_encoding_header then if transfer_encoding_header[#transfer_encoding_header][1] == "chunked" then -- Set body type to chunked so that we know how to end the stream self.body_write_type = "chunked" else error("unknown transfer-encoding") end else -- By adding `content-length: 0` we can be sure that our peer won't wait for a body -- This is somewhat suggested in RFC 7231 section 8.1.2 if cl then -- might already have content-length: 0 assert(cl:match("^ *0+ *$"), "cannot end stream after headers if you have a non-zero content-length") elseif self.type ~= "client" or (method ~= "GET" and method ~= "HEAD") then cl = "0" end self.body_write_type = "length" self.body_write_left = 0 end else -- The order of these checks matter: -- chunked must be checked first, as it totally changes the body format -- content-length is next -- closing the connection is ordered after length -- this potentially means an early EOF can be caught if a connection -- closure occurs before body size reaches the specified length -- for HTTP/1.1, we can fall-back to a chunked encoding -- chunked is mandatory to implement in HTTP/1.1 -- this requires amending the transfer-encoding header -- for an HTTP/1.0 server, we fall-back to closing the connection at the end of the stream -- else is an HTTP/1.0 client with `connection: keep-alive` but no other header indicating the body form. -- this cannot be reasonably handled, so throw an error. if transfer_encoding_header and transfer_encoding_header[#transfer_encoding_header][1] == "chunked" then self.body_write_type = "chunked" elseif cl then self.body_write_type = "length" self.body_write_left = assert(tonumber(cl, 10), "invalid content-length") elseif self.close_when_done then -- ordered after length delimited self.body_write_type = "close" elseif self.connection.version == 1.1 and (self.type == "client" or self.peer_version == 1.1) then self.body_write_type = "chunked" -- transfer-encodings are ordered. we need to make sure we place "chunked" last if not transfer_encoding_header then transfer_encoding_header = {nil} -- preallocate end table.insert(transfer_encoding_header, {"chunked"}) elseif self.type == "server" then -- default for servers if they don't send a particular header self.body_write_type = "close" self.close_when_done = true else error("a client cannot send a body with connection: keep-alive without indicating body delimiter in headers") end end -- Add 'Connection: close' header if we're going to close after if self.close_when_done and not has(connection_header, "close") then table.insert(connection_header, "close") end if self.use_zlib then if self.type == "client" then -- If we support zlib; add a "te" header indicating we support the gzip transfer-encoding add_te_gzip = true else -- server -- Whether to use transfer-encoding: gzip if self.body_write_deflate -- only use if client sent the TE header allowing it and not cl -- not allowed to use both content-length *and* transfer-encoding and not end_stream -- no point encoding body if there isn't one and not has_any(Content_Encoding:match(headers:get_comma_separated("content-encoding") or ""), "gzip", "x-gzip", "deflate") -- don't bother if content-encoding is already gzip/deflate -- TODO: need to take care of quality suffixes ("deflate; q=0.5") then if transfer_encoding_header then local n = #transfer_encoding_header -- Possibly need to insert before "chunked" if transfer_encoding_header[n][1] == "chunked" then transfer_encoding_header[n+1] = transfer_encoding_header[n] transfer_encoding_header[n] = self.body_write_deflate_encoding else transfer_encoding_header[n+1] = self.body_write_deflate_encoding end else transfer_encoding_header = {self.body_write_deflate_encoding} end else -- discard the encoding context (if there was one) self.body_write_deflate_encoding = nil self.body_write_deflate = nil end end end end for name, value in headers:each() do if not ignore_fields[name] then local ok, err, errno = self.connection:write_header(name, value, 0) if not ok then return nil, err, errno end elseif name == ":authority" then -- for CONNECT requests, :authority is the path if self.req_method ~= "CONNECT" then -- otherwise it's the Host header local ok, err, errno = self.connection:write_header("host", value, 0) if not ok then return nil, err, errno end end end end if add_te_gzip then -- Doesn't matter if it gets added more than once. if not has(connection_header, "te") then table.insert(connection_header, "te") end local ok, err, errno = self.connection:write_header("te", "gzip, deflate", 0) if not ok then return nil, err, errno end end -- Write transfer-encoding, content-length and connection headers separately if transfer_encoding_header and transfer_encoding_header[1] then -- Add to connection header if not has(connection_header, "transfer-encoding") then table.insert(connection_header, "transfer-encoding") end local value = {} for i, v in ipairs(transfer_encoding_header) do local params = {v[1]} for k, vv in pairs(v) do if type(k) == "string" then params[#params+1] = k .. "=" .. util.maybe_quote(vv) end end value[i] = table.concat(params, ";") end value = table.concat(value, ",") local ok, err, errno = self.connection:write_header("transfer-encoding", value, 0) if not ok then return nil, err, errno end elseif cl then local ok, err, errno = self.connection:write_header("content-length", cl, 0) if not ok then return nil, err, errno end end if connection_header and connection_header[1] then local value = table.concat(connection_header, ",") local ok, err, errno = self.connection:write_header("connection", value, 0) if not ok then return nil, err, errno end end do local ok, err, errno = self.connection:write_headers_done(deadline and (deadline-monotime())) if not ok then return nil, err, errno end end if end_stream then if is_trailers then if self.state == "half closed (remote)" then self:set_state("closed") else self:set_state("half closed (local)") end else local ok, err, errno = self:write_chunk("", true) if not ok then return nil, err, errno end end end return true end function stream_methods:read_next_chunk(timeout) if self.state == "closed" or self.state == "half closed (remote)" then return nil end local end_stream local chunk, err, errno if self.body_read_type == "chunked" then local deadline = timeout and (monotime()+timeout) if self.body_read_left == 0 then chunk = false else chunk, err, errno = self.connection:read_body_chunk(timeout) end if chunk == false then -- last chunk, :read_headers should be called to get trailers self.body_read_left = 0 -- for API compat: attempt to read trailers local ok ok, err, errno = self:step(deadline and deadline-monotime()) if not ok then return nil, err, errno end return nil else end_stream = false if chunk == nil and err == nil then return nil, ce.strerror(ce.EPIPE), ce.EPIPE end end elseif self.body_read_type == "length" then local length_n = self.body_read_left if length_n > 0 then -- Read *upto* length_n bytes -- This function only has to read chunks; not the whole body chunk, err, errno = self.connection:read_body_by_length(-length_n, timeout) if chunk ~= nil then self.body_read_left = length_n - #chunk end_stream = (self.body_read_left == 0) end elseif length_n == 0 then chunk = "" end_stream = true else error("invalid length: "..tostring(length_n)) end elseif self.body_read_type == "close" then -- Use a big negative number instead of *a. see https://github.com/wahern/cqueues/issues/89 chunk, err, errno = self.connection:read_body_by_length(-0x80000000, timeout) end_stream = chunk == nil and err == nil elseif self.body_read_type == nil then -- Might get here if haven't read headers yet, or if only headers so far have been 1xx codes local deadline = timeout and (monotime()+timeout) local headers headers, err, errno = self:read_headers(timeout) if not headers then return nil, err, errno end self.headers_fifo:push(headers) self.headers_cond:signal(1) return self:get_next_chunk(deadline and deadline-monotime()) else error("unknown body read type") end if chunk then if self.body_read_inflate then chunk = self.body_read_inflate(chunk, end_stream) end self.stats_recv = self.stats_recv + #chunk end if end_stream then if self.state == "half closed (local)" then self:set_state("closed") else self:set_state("half closed (remote)") end end return chunk, err, errno end function stream_methods:get_next_chunk(timeout) if self.chunk_fifo:length() > 0 then return self.chunk_fifo:pop() end return self:read_next_chunk(timeout) end function stream_methods:unget(str) self.chunk_fifo:insert(1, str) self.chunk_cond:signal() return true end local empty_headers = new_headers() function stream_methods:write_chunk(chunk, end_stream, timeout) if self.state == "idle" then error("cannot write chunk when stream is " .. self.state) elseif self.state == "closed" or self.state == "half closed (local)" or self.connection.socket == nil then return nil, ce.strerror(ce.EPIPE), ce.EPIPE elseif self.body_write_type == nil then error("cannot write body before headers") end if self.type == "client" then assert(self.connection.req_locked == self) else assert(self.connection.pipeline:peek() == self) end local orig_size = #chunk if self.body_write_deflate then chunk = self.body_write_deflate(chunk, end_stream) end if #chunk > 0 then if self.body_write_type == "chunked" then local deadline = timeout and monotime()+timeout local ok, err, errno = self.connection:write_body_chunk(chunk, nil, timeout) if not ok then return nil, err, errno end timeout = deadline and (deadline-monotime()) elseif self.body_write_type == "length" then assert(self.body_write_left >= #chunk, "invalid content-length") local ok, err, errno = self.connection:write_body_plain(chunk, timeout) if not ok then return nil, err, errno end self.body_write_left = self.body_write_left - #chunk elseif self.body_write_type == "close" then local ok, err, errno = self.connection:write_body_plain(chunk, timeout) if not ok then return nil, err, errno end elseif self.body_write_type ~= "missing" then error("unknown body writing method") end end self.stats_sent = self.stats_sent + orig_size if end_stream then if self.body_write_type == "chunked" then return self:write_headers(empty_headers, true, timeout) elseif self.body_write_type == "length" then assert(self.body_write_left == 0, "invalid content-length") end if self.state == "half closed (remote)" then self:set_state("closed") else self:set_state("half closed (local)") end end return true end return { new = new_stream; methods = stream_methods; mt = stream_mt; } lua-http-0.4/http/h2_connection.lua000066400000000000000000000433141400726324600173360ustar00rootroot00000000000000local cqueues = require "cqueues" local monotime = cqueues.monotime local cc = require "cqueues.condition" local ce = require "cqueues.errno" local rand = require "openssl.rand" local new_fifo = require "fifo" local band = require "http.bit".band local connection_common = require "http.connection_common" local onerror = connection_common.onerror local h2_error = require "http.h2_error" local h2_stream = require "http.h2_stream" local known_settings = h2_stream.known_settings local hpack = require "http.hpack" local h2_banned_ciphers = require "http.tls".banned_ciphers local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143 local sunpack = string.unpack or require "compat53.string".unpack -- luacheck: ignore 143 local assert = assert if _VERSION:match("%d+%.?%d*") < "5.3" then assert = require "compat53.module".assert end local function xor(a, b) return (a and b) or not (a or b) end local preface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" local default_settings = { [known_settings.HEADER_TABLE_SIZE] = 4096; [known_settings.ENABLE_PUSH] = true; [known_settings.MAX_CONCURRENT_STREAMS] = math.huge; [known_settings.INITIAL_WINDOW_SIZE] = 65535; [known_settings.MAX_FRAME_SIZE] = 16384; [known_settings.MAX_HEADER_LIST_SIZE] = math.huge; [known_settings.SETTINGS_ENABLE_CONNECT_PROTOCOL] = false; [known_settings.TLS_RENEG_PERMITTED] = 0; } local function merge_settings(tbl, new) for i=0x1, 0x6 do local v = new[i] if v ~= nil then tbl[i] = v end end end local connection_methods = {} for k,v in pairs(connection_common.methods) do connection_methods[k] = v end local connection_mt = { __name = "http.h2_connection"; __index = connection_methods; } function connection_mt:__tostring() return string.format("http.h2_connection{type=%q}", self.type) end -- Read bytes from the given socket looking for the http2 connection preface -- optionally ungets the bytes in case of failure local function socket_has_preface(socket, unget, timeout) local deadline = timeout and (monotime()+timeout) local bytes = "" local is_h2 = true while #bytes < #preface do -- read *up to* number of bytes left in preface local ok, err, errno = socket:xread(#bytes-#preface, deadline and (deadline-monotime())) if ok == nil then if err == nil then if #bytes == 0 then -- client immediately closed return end is_h2 = false break else return nil, err, errno end end bytes = bytes .. ok if bytes ~= preface:sub(1, #bytes) then is_h2 = false break end end if unget then local ok, errno = socket:unget(bytes) if not ok then return nil, onerror(socket, "unget", errno, 2) end end return is_h2 end local function new_connection(socket, conn_type, settings) if conn_type ~= "client" and conn_type ~= "server" then error('invalid connection type. must be "client" or "server"') end local ssl = socket:checktls() if ssl then local cipher = ssl:getCipherInfo() if h2_banned_ciphers[cipher.name] then h2_error.errors.INADEQUATE_SECURITY("bad cipher: " .. cipher.name) end end local self = setmetatable({ socket = socket; type = conn_type; version = 2; -- for compat with h1_connection streams = setmetatable({}, {__mode="kv"}); n_active_streams = 0; onidle_ = nil; stream0 = nil; -- store separately with a strong reference has_confirmed_preface = false; has_first_settings = false; had_eagain = false; -- For continuations need_continuation = nil; -- stream promised_stream = nil; -- stream recv_headers_end_stream = nil; recv_headers_buffer = nil; recv_headers_buffer_pos = nil; recv_headers_buffer_pad_len = nil; recv_headers_buffer_items = nil; recv_headers_buffer_length = nil; highest_odd_stream = -1; highest_odd_non_idle_stream = -1; highest_even_stream = -2; highest_even_non_idle_stream = -2; send_goaway_lowest = nil; recv_goaway_lowest = nil; recv_goaway = cc.new(); new_streams = new_fifo(); new_streams_cond = cc.new(); peer_settings = {}; peer_settings_cond = cc.new(); -- signaled when the peer has changed their settings acked_settings = {}; send_settings = {n = 0}; send_settings_ack_cond = cc.new(); -- for when server ACKs our settings send_settings_acked = 0; peer_flow_credits = 65535; -- 5.2.1 peer_flow_credits_change = cc.new(); encoding_context = nil; decoding_context = nil; pongs = {}; -- pending pings we've sent. keyed by opaque 8 byte payload }, connection_mt) self:new_stream(0) merge_settings(self.peer_settings, default_settings) merge_settings(self.acked_settings, default_settings) self.encoding_context = hpack.new(default_settings[known_settings.HEADER_TABLE_SIZE]) self.decoding_context = hpack.new(default_settings[known_settings.HEADER_TABLE_SIZE]) socket:setvbuf("full", math.huge) -- 'infinite' buffering; no write locks needed socket:setmode("b", "bna") -- writes that don't explicitly buffer will now flush the buffer. autoflush on socket:onerror(onerror) if self.type == "client" then assert(socket:xwrite(preface, "f", 0)) end assert(self.stream0:write_settings_frame(false, settings or {}, 0, "f")) -- note that the buffer is *not* flushed right now return self end function connection_methods:timeout() if not self.had_eagain then return 0 end return connection_common.methods.timeout(self) end local function handle_frame(self, typ, flag, streamid, payload, deadline) if self.need_continuation and (typ ~= 0x9 or self.need_continuation.id ~= streamid) then return nil, h2_error.errors.PROTOCOL_ERROR:new_traceback("CONTINUATION frame expected"), ce.EILSEQ end local handler = h2_stream.frame_handlers[typ] -- http2 spec section 4.1: -- Implementations MUST ignore and discard any frame that has a type that is unknown. if handler then local stream = self.streams[streamid] if stream == nil then if xor(streamid % 2 == 1, self.type == "client") then return nil, h2_error.errors.PROTOCOL_ERROR:new_traceback("Streams initiated by a client MUST use odd-numbered stream identifiers; those initiated by the server MUST use even-numbered stream identifiers"), ce.EILSEQ end -- TODO: check MAX_CONCURRENT_STREAMS stream = self:new_stream(streamid) --[[ http2 spec section 6.8 the sender will ignore frames sent on streams initiated by the receiver if the stream has an identifier higher than the included last stream identifier ... After sending a GOAWAY frame, the sender can discard frames for streams initiated by the receiver with identifiers higher than the identified last stream. However, any frames that alter connection state cannot be completely ignored. For instance, HEADERS, PUSH_PROMISE, and CONTINUATION frames MUST be minimally processed to ensure the state maintained for header compression is consistent (see Section 4.3); similarly, DATA frames MUST be counted toward the connection flow-control window. Failure to process these frames can cause flow control or header compression state to become unsynchronized.]] -- If we haven't seen this stream before, and we should be discarding frames from it, -- then don't push it into the new_streams fifo if self.send_goaway_lowest == nil or streamid <= self.send_goaway_lowest then self.new_streams:push(stream) self.new_streams_cond:signal(1) end end local ok, err, errno = handler(stream, flag, payload, deadline) if not ok then if h2_error.is(err) and err.stream_error and streamid ~= 0 and stream.state ~= "idle" then local ok2, err2, errno2 = stream:rst_stream(err, deadline and deadline-monotime()) if not ok2 then return nil, err2, errno2 end else -- connection error or unknown error return nil, err, errno end end end return true end function connection_methods:step(timeout) local deadline = timeout and monotime()+timeout if not self.has_confirmed_preface and self.type == "server" then local ok, err, errno = socket_has_preface(self.socket, false, timeout) self.had_eagain = false if ok == nil then if errno == ce.ETIMEDOUT then self.had_eagain = true return true end return nil, err, errno end if not ok then return nil, h2_error.errors.PROTOCOL_ERROR:new_traceback("invalid connection preface. not an http2 client?"), ce.EILSEQ end self.has_confirmed_preface = true end local ok, connection_error, errno local typ, flag, streamid, payload = self:read_http2_frame(deadline and deadline-monotime()) if typ == nil then -- flag might be `nil` on EOF ok, connection_error, errno = nil, flag, streamid elseif not self.has_first_settings and typ ~= 0x4 then -- XXX: Should this be more strict? e.g. what if it's an ACK? ok, connection_error, errno = false, h2_error.errors.PROTOCOL_ERROR:new_traceback("A SETTINGS frame MUST be the first frame sent in an HTTP/2 connection"), ce.EILSEQ else ok, connection_error, errno = handle_frame(self, typ, flag, streamid, payload, deadline) if ok then self.has_first_settings = true end end if not ok and connection_error and errno ~= ce.ETIMEDOUT then if not self.socket:eof("w") then local code, message if h2_error.is(connection_error) then code, message = connection_error.code, connection_error.message else code = h2_error.errors.INTERNAL_ERROR.code end -- ignore write failure here; there's nothing that can be done self:write_goaway_frame(nil, code, message, deadline and deadline-monotime()) end if errno == nil and h2_error.is(connection_error) and connection_error.code == h2_error.errors.PROTOCOL_ERROR.code then errno = ce.EILSEQ end return nil, connection_error, errno end return true end function connection_methods:empty() return self.socket:eof("r") end function connection_methods:loop(timeout) local deadline = timeout and monotime()+timeout while not self:empty() do local ok, err, errno = self:step(deadline and deadline-monotime()) if not ok then return nil, err, errno end end return true end function connection_methods:shutdown() local ok, err, errno if self.send_goaway_lowest then ok = true else ok, err, errno = self:write_goaway_frame(nil, h2_error.errors.NO_ERROR.code, "connection closed", 0) if not ok and errno == ce.EPIPE then -- other end already closed ok, err, errno = true, nil, nil end end for _, stream in pairs(self.streams) do stream:shutdown() end self.socket:shutdown("r") return ok, err, errno end function connection_methods:new_stream(id) if id and self.streams[id] ~= nil then error("stream id already in use") end local stream = h2_stream.new(self) if id then stream:pick_id(id) end return stream end -- this function *should never throw* function connection_methods:get_next_incoming_stream(timeout) local deadline = timeout and (monotime()+timeout) while self.new_streams:length() == 0 do if self.recv_goaway_lowest or self.socket:eof("r") then -- TODO? clarification required: can the sender of a GOAWAY subsequently start streams? -- (with a lower stream id than they sent in the GOAWAY) -- For now, assume not. return nil end local which = cqueues.poll(self.new_streams_cond, self.recv_goaway, self, timeout) if which == self then local ok, err, errno = self:step(0) if not ok then return nil, err, errno end elseif which == timeout then return nil, onerror(self.socket, "get_next_incoming_stream", ce.ETIMEDOUT) end timeout = deadline and (deadline-monotime()) end local stream = self.new_streams:pop() return stream end -- On success, returns type, flags, stream id and payload -- If the socket has been shutdown for reading, and there is no data left unread, returns nil -- safe to retry on error function connection_methods:read_http2_frame(timeout) local deadline = timeout and (monotime()+timeout) local frame_header, err, errno = self.socket:xread(9, timeout) self.had_eagain = false if frame_header == nil then if errno == ce.ETIMEDOUT then self.had_eagain = true return nil, err, errno elseif err == nil then if self.socket:pending() > 0 then self.socket:seterror("r", ce.EILSEQ) return nil, onerror(self.socket, "read_http2_frame", ce.EILSEQ) end return nil else return nil, err, errno end end local size, typ, flags, streamid = sunpack(">I3 B B I4", frame_header) if size > self.acked_settings[known_settings.MAX_FRAME_SIZE] then local ok, errno2 = self.socket:unget(frame_header) if not ok then return nil, onerror(self.socket, "unget", errno2, 2) end return nil, h2_error.errors.FRAME_SIZE_ERROR:new_traceback("frame too large"), ce.E2BIG end local payload, err2, errno2 = self.socket:xread(size, 0) self.had_eagain = false if payload and #payload < size then -- hit EOF local ok, errno4 = self.socket:unget(payload) if not ok then return nil, onerror(self.socket, "unget", errno4, 2) end payload = nil end if payload == nil then -- put frame header back into socket so a retry will work local ok, errno3 = self.socket:unget(frame_header) if not ok then return nil, onerror(self.socket, "unget", errno3, 2) end if errno2 == ce.ETIMEDOUT then self.had_eagain = true timeout = deadline and deadline-monotime() if cqueues.poll(self.socket, timeout) ~= timeout then return self:read_http2_frame(deadline and deadline-monotime()) end elseif err2 == nil then self.socket:seterror("r", ce.EILSEQ) return nil, onerror(self.socket, "read_http2_frame", ce.EILSEQ) end return nil, err2, errno2 end -- reserved bit MUST be ignored by receivers streamid = band(streamid, 0x7fffffff) return typ, flags, streamid, payload end -- If this times out, it was the flushing; not the write itself -- hence it's not always total failure. -- It's up to the caller to take some action (e.g. closing) rather than doing it here function connection_methods:write_http2_frame(typ, flags, streamid, payload, timeout, flush) if #payload > self.peer_settings[known_settings.MAX_FRAME_SIZE] then return nil, h2_error.errors.FRAME_SIZE_ERROR:new_traceback("frame too large"), ce.E2BIG end local header = spack(">I3 B B I4", #payload, typ, flags, streamid) local ok, err, errno = self.socket:xwrite(header, "f", 0) if not ok then return nil, err, errno end return self.socket:xwrite(payload, flush, timeout) end function connection_methods:ping(timeout) local deadline = timeout and (monotime()+timeout) local payload -- generate a random, unique payload repeat -- keep generating until we don't have a collision payload = rand.bytes(8) until self.pongs[payload] == nil local cond = cc.new() self.pongs[payload] = cond assert(self.stream0:write_ping_frame(false, payload, timeout)) while self.pongs[payload] do timeout = deadline and (deadline-monotime()) local which = cqueues.poll(cond, self, timeout) if which == self then local ok, err, errno = self:step(0) if not ok then return nil, err, errno end elseif which == timeout then return nil, onerror(self.socket, "ping", ce.ETIMEDOUT) end end return true end function connection_methods:write_window_update(...) return self.stream0:write_window_update(...) end function connection_methods:write_goaway_frame(last_stream_id, err_code, debug_msg, timeout) if last_stream_id == nil then last_stream_id = math.max(self.highest_odd_stream, self.highest_even_stream) end return self.stream0:write_goaway_frame(last_stream_id, err_code, debug_msg, timeout) end function connection_methods:set_peer_settings(peer_settings) --[[ 6.9.2: In addition to changing the flow-control window for streams that are not yet active, a SETTINGS frame can alter the initial flow-control window size for streams with active flow-control windows (that is, streams in the "open" or "half-closed (remote)" state). When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust the size of all stream flow-control windows that it maintains by the difference between the new value and the old value. A change to SETTINGS_INITIAL_WINDOW_SIZE can cause the available space in a flow-control window to become negative. A sender MUST track the negative flow-control window and MUST NOT send new flow- controlled frames until it receives WINDOW_UPDATE frames that cause the flow-control window to become positive.]] local new_window_size = peer_settings[known_settings.INITIAL_WINDOW_SIZE] if new_window_size then local old_windows_size = self.peer_settings[known_settings.INITIAL_WINDOW_SIZE] local delta = new_window_size - old_windows_size if delta ~= 0 then for _, stream in pairs(self.streams) do stream.peer_flow_credits = stream.peer_flow_credits + delta stream.peer_flow_credits_change:signal() end end end merge_settings(self.peer_settings, peer_settings) self.peer_settings_cond:signal() end function connection_methods:ack_settings() local n = self.send_settings_acked + 1 self.send_settings_acked = n local acked_settings = self.send_settings[n] if acked_settings then self.send_settings[n] = nil merge_settings(self.acked_settings, acked_settings) end self.send_settings_ack_cond:signal() end function connection_methods:settings(tbl, timeout) local deadline = timeout and monotime()+timeout local n, err, errno = self.stream0:write_settings_frame(false, tbl, timeout) if not n then return nil, err, errno end -- Now wait for ACK while self.send_settings_acked < n do timeout = deadline and (deadline-monotime()) local which = cqueues.poll(self.send_settings_ack_cond, self, timeout) if which == self then local ok2, err2, errno2 = self:step(0) if not ok2 then return nil, err2, errno2 end elseif which == timeout then self:write_goaway_frame(nil, h2_error.errors.SETTINGS_TIMEOUT.code, "timeout exceeded", 0) return nil, onerror(self.socket, "settings", ce.ETIMEDOUT) end end return true end return { preface = preface; socket_has_preface = socket_has_preface; new = new_connection; methods = connection_methods; mt = connection_mt; } lua-http-0.4/http/h2_error.lua000066400000000000000000000057271400726324600163360ustar00rootroot00000000000000--[[ This module implements an error object that can encapsulate the data required of an HTTP2 error. This data is: - the error 'name' - the error 'code' - the error 'description' - an optional error message Additionally, there is a field for a traceback. ]] local errors = {} local http_error_methods = {} local http_error_mt = { __name = "http.h2_error"; __index = http_error_methods; } function http_error_mt:__tostring() local s = string.format("%s(0x%x): %s", self.name, self.code, self.description) if self.message then s = s .. ": " .. self.message end if self.traceback then s = s .. "\n" .. self.traceback end return s end function http_error_methods:new(ob) return setmetatable({ name = ob.name or self.name; code = ob.code or self.code; description = ob.description or self.description; message = ob.message; traceback = ob.traceback; stream_error = ob.stream_error or false; }, http_error_mt) end function http_error_methods:new_traceback(message, stream_error, lvl) if lvl == nil then lvl = 2 elseif lvl ~= 0 then lvl = lvl + 1 end local e = { message = message; stream_error = stream_error; } if lvl ~= 0 then -- COMPAT: should be passing `nil` message (not the empty string) -- see https://github.com/keplerproject/lua-compat-5.3/issues/16 e.traceback = debug.traceback("", lvl) end return self:new(e) end function http_error_methods:error(...) error(self:new_traceback(...), 0) end http_error_mt.__call = http_error_methods.error function http_error_methods:assert(cond, ...) if cond then return cond, ... else local message = ... self:error(message, 2) -- don't tail call, as error levels aren't well defined end end local function is(ob) return getmetatable(ob) == http_error_mt end local function add_error(name, code, description) local e = setmetatable({ name = name; code = code; description = description; }, http_error_mt) errors[name] = e errors[code] = e end -- Taken from https://http2.github.io/http2-spec/#iana-errors add_error("NO_ERROR", 0x0, "Graceful shutdown") add_error("PROTOCOL_ERROR", 0x1, "Protocol error detected") add_error("INTERNAL_ERROR", 0x2, "Implementation fault") add_error("FLOW_CONTROL_ERROR", 0x3, "Flow control limits exceeded") add_error("SETTINGS_TIMEOUT", 0x4, "Settings not acknowledged") add_error("STREAM_CLOSED", 0x5, "Frame received for closed stream") add_error("FRAME_SIZE_ERROR", 0x6, "Frame size incorrect") add_error("REFUSED_STREAM", 0x7, "Stream not processed") add_error("CANCEL", 0x8, "Stream cancelled") add_error("COMPRESSION_ERROR", 0x9, "Compression state not updated") add_error("CONNECT_ERROR", 0xa, "TCP connection error for CONNECT method") add_error("ENHANCE_YOUR_CALM", 0xb, "Processing capacity exceeded") add_error("INADEQUATE_SECURITY", 0xc, "Negotiated TLS parameters not acceptable") add_error("HTTP_1_1_REQUIRED", 0xd, "Use HTTP/1.1 for the request") return { errors = errors; is = is; } lua-http-0.4/http/h2_error.tld000066400000000000000000000005701400726324600163270ustar00rootroot00000000000000interface h2_error const new: (self, { "name": string?, "code": integer?, "description": string?, "message": string?, "traceback": string?, "stream_error": boolean? }) -> (h2_error) const new_traceback: (self, string, boolean, integer?) -> (h2_error) const error: (self, string, boolean, integer?) -> (void) end errors: {any:h2_error} is: (any) -> (boolean) lua-http-0.4/http/h2_stream.lua000066400000000000000000001363731400726324600165020ustar00rootroot00000000000000local cqueues = require "cqueues" local monotime = cqueues.monotime local cc = require "cqueues.condition" local ce = require "cqueues.errno" local new_fifo = require "fifo" local band = require "http.bit".band local bor = require "http.bit".bor local h2_error = require "http.h2_error" local h2_errors = h2_error.errors local stream_common = require "http.stream_common" local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143 local sunpack = string.unpack or require "compat53.string".unpack -- luacheck: ignore 143 local unpack = table.unpack or unpack -- luacheck: ignore 113 143 local assert = assert if _VERSION:match("%d+%.?%d*") < "5.3" then assert = require "compat53.module".assert end local MAX_HEADER_BUFFER_SIZE = 400*1024 -- 400 KB is max size in h2o local known_settings = {} for i, s in pairs({ [0x1] = "HEADER_TABLE_SIZE"; [0x2] = "ENABLE_PUSH"; [0x3] = "MAX_CONCURRENT_STREAMS"; [0x4] = "INITIAL_WINDOW_SIZE"; [0x5] = "MAX_FRAME_SIZE"; [0x6] = "MAX_HEADER_LIST_SIZE"; [0x8] = "SETTINGS_ENABLE_CONNECT_PROTOCOL"; [0x10] = "TLS_RENEG_PERMITTED"; }) do known_settings[i] = s known_settings[s] = i end local frame_types = { [0x0] = "DATA"; [0x1] = "HEADERS"; [0x2] = "PRIORITY"; [0x3] = "RST_STREAM"; [0x4] = "SETTING"; [0x5] = "PUSH_PROMISE"; [0x6] = "PING"; [0x7] = "GOAWAY"; [0x8] = "WINDOW_UPDATE"; [0x9] = "CONTINUATION"; [0xa] = "ALTSVC"; [0xc] = "ORIGIN"; } for i=0x0, 0x9 do frame_types[frame_types[i]] = i end local frame_handlers = {} local stream_methods = {} for k, v in pairs(stream_common.methods) do stream_methods[k] = v end local stream_mt = { __name = "http.h2_stream"; __index = stream_methods; } function stream_mt:__tostring() local dependee_list = {} for s in pairs(self.dependees) do dependee_list[#dependee_list+1] = string.format("%d", s.id) end table.sort(dependee_list) dependee_list = table.concat(dependee_list, ",") return string.format("http.h2_stream{connection=%s;id=%s;state=%q;parent=%s;dependees={%s}}", tostring(self.connection), tostring(self.id), self.state, (self.parent and tostring(self.parent.id) or "nil"), dependee_list) end local function new_stream(connection) local self = setmetatable({ connection = connection; type = connection.type; state = "idle"; id = nil; peer_flow_credits = 0; peer_flow_credits_change = cc.new(); parent = nil; dependees = setmetatable({}, {__mode="kv"}); weight = 16; -- http2 spec, section 5.3.5 rst_stream_error = nil; stats_sent_headers = 0; -- number of header blocks sent stats_recv_headers = 0; -- number of header blocks received stats_sent = 0; -- #bytes sent in DATA blocks stats_recv = 0; -- #bytes received in DATA blocks recv_headers_fifo = new_fifo(); recv_headers_cond = cc.new(); chunk_fifo = new_fifo(); chunk_cond = cc.new(); end_stream_after_continuation = nil; content_length = nil; }, stream_mt) return self end function stream_methods:pick_id(id) assert(self.id == nil) if id == nil then if self.connection.recv_goaway_lowest then h2_error.errors.PROTOCOL_ERROR("Receivers of a GOAWAY frame MUST NOT open additional streams on the connection") end if self.type == "client" then -- Pick next free odd number id = self.connection.highest_odd_stream + 2 self.connection.highest_odd_stream = id else -- Pick next free even number id = self.connection.highest_even_stream + 2 self.connection.highest_even_stream = id end self.id = id else assert(type(id) == "number" and id >= 0 and id <= 0x7fffffff and id % 1 == 0, "invalid stream id") assert(self.connection.streams[id] == nil) self.id = id if id % 2 == 0 then if id > self.connection.highest_even_stream then self.connection.highest_even_stream = id end -- stream 'already' existed but was possibly collected. see http2 spec 5.1.1 if id <= self.connection.highest_even_non_idle_stream then self:set_state("closed") end else if id > self.connection.highest_odd_stream then self.connection.highest_odd_stream = id end -- stream 'already' existed but was possibly collected. see http2 spec 5.1.1 if id <= self.connection.highest_odd_non_idle_stream then self:set_state("closed") end end end -- TODO: check MAX_CONCURRENT_STREAMS self.connection.streams[id] = self if id == 0 then self.connection.stream0 = self else self.peer_flow_credits = self.connection.peer_settings[known_settings.INITIAL_WINDOW_SIZE] self.peer_flow_credits_change:signal() -- Add dependency on stream 0. http2 spec, 5.3.1 self.connection.stream0:reprioritise(self) end return true end local valid_states = { ["idle"] = 1; -- initial ["open"] = 2; -- have sent or received headers; haven't sent body yet ["reserved (local)"] = 2; -- have sent a PUSH_PROMISE ["reserved (remote)"] = 2; -- have received a PUSH_PROMISE ["half closed (local)"] = 3; -- have sent whole body ["half closed (remote)"] = 3; -- have received whole body ["closed"] = 4; -- complete } function stream_methods:set_state(new) local new_order = assert(valid_states[new]) local old = self.state if new_order <= valid_states[old] then error("invalid state progression ('"..old.."' to '"..new.."')") end if new ~= "closed" then assert(self.id) end self.state = new if new == "closed" or new == "half closed (remote)" then self.recv_headers_cond:signal() self.chunk_cond:signal() end if old == "idle" then if self.id % 2 == 0 then if self.id > self.connection.highest_even_non_idle_stream then self.connection.highest_even_non_idle_stream = self.id end else if self.id > self.connection.highest_odd_non_idle_stream then self.connection.highest_odd_non_idle_stream = self.id end end end if old == "idle" and new ~= "closed" then self.connection.n_active_streams = self.connection.n_active_streams + 1 elseif old ~= "idle" and new == "closed" then local n_active_streams = self.connection.n_active_streams - 1 self.connection.n_active_streams = n_active_streams if n_active_streams == 0 then self.connection:onidle()(self.connection) end end end function stream_methods:write_http2_frame(typ, flags, payload, timeout, flush) local stream_id = assert(self.id, "stream has unset id") return self.connection:write_http2_frame(typ, flags, stream_id, payload, timeout, flush) end function stream_methods:reprioritise(child, exclusive) assert(child) assert(child.id) assert(child.id ~= 0) -- cannot reprioritise stream 0 if self == child then -- http2 spec, section 5.3.1 return nil, h2_errors.PROTOCOL_ERROR:new_traceback("A stream cannot depend on itself", true), ce.EILSEQ end do -- Check if the child is an ancestor local ancestor = self.parent while ancestor do if ancestor == child then -- Break the loop. http spec, section 5.3.3 local ok, err, errno = child.parent:reprioritise(self, false) if not ok then return nil, err, errno end break end ancestor = ancestor.parent end end -- Remove old parent if child.parent then child.parent.dependees[child] = nil end -- We are now the parent child.parent = self if exclusive then -- All the parent's deps are now the child's for s, v in pairs(self.dependees) do s.parent = child child.dependees[s] = v self.dependees[s] = nil end else self.dependees[child] = true end return true end local chunk_methods = {} local chunk_mt = { __name = "http.h2_stream.chunk"; __index = chunk_methods; } local function new_chunk(original_length, data) return setmetatable({ original_length = original_length; acked = false; data = data; }, chunk_mt) end function chunk_methods:ack() if self.acked then return 0 else self.acked = true return self.original_length end end frame_handlers[frame_types.DATA] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 if stream.id == 0 then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'DATA' framess MUST be associated with a stream"), ce.EILSEQ end if stream.state == "idle" or stream.state == "reserved (remote)" then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'DATA' frames not allowed in 'idle' state"), ce.EILSEQ elseif stream.state ~= "open" and stream.state ~= "half closed (local)" then return nil, h2_errors.STREAM_CLOSED:new_traceback("'DATA' frames not allowed in '" .. stream.state .. "' state"), ce.EILSEQ end local end_stream = band(flags, 0x1) ~= 0 local padded = band(flags, 0x8) ~= 0 local original_length = #payload if padded then local pad_len = sunpack("> B", payload) if pad_len >= #payload then -- >= will take care of the pad_len itself return nil, h2_errors.PROTOCOL_ERROR:new_traceback("length of the padding is the length of the frame payload or greater"), ce.EILSEQ elseif payload:match("[^%z]", -pad_len) then -- 6.1: A receiver is not obligated to verify padding but MAY treat non-zero padding as a connection error of type PROTOCOL_ERROR. return nil, h2_errors.PROTOCOL_ERROR:new_traceback("padding not null bytes"), ce.EILSEQ end payload = payload:sub(2, -pad_len-1) end local stats_recv = stream.stats_recv + #payload if stream.content_length and stats_recv > stream.content_length then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("content-length exceeded", true), ce.EILSEQ end local chunk = new_chunk(original_length, payload) stream.chunk_fifo:push(chunk) stream.stats_recv = stats_recv if end_stream then stream.chunk_fifo:push(nil) -- chunk_cond gets signaled by :set_state if stream.state == "half closed (local)" then stream:set_state("closed") else stream:set_state("half closed (remote)") end else stream.chunk_cond:signal() end return true end function stream_methods:write_data_frame(payload, end_stream, padded, timeout, flush) if self.id == 0 then h2_errors.PROTOCOL_ERROR("'DATA' frames MUST be associated with a stream") end if self.state ~= "open" and self.state ~= "half closed (remote)" then h2_errors.STREAM_CLOSED("'DATA' frame not allowed in '" .. self.state .. "' state") end local pad_len, padding = "", "" local flags = 0 if end_stream then flags = bor(flags, 0x1) end if padded then flags = bor(flags, 0x8) pad_len = spack("> B", padded) padding = ("\0"):rep(padded) end payload = pad_len .. payload .. padding -- The entire DATA frame payload is included in flow control, -- including Pad Length and Padding fields if present local new_stream_peer_flow_credits = self.peer_flow_credits - #payload local new_connection_peer_flow_credits = self.connection.peer_flow_credits - #payload if new_stream_peer_flow_credits < 0 or new_connection_peer_flow_credits < 0 then h2_errors.FLOW_CONTROL_ERROR("not enough flow credits") end local ok, err, errno = self:write_http2_frame(frame_types.DATA, flags, payload, timeout, flush) if not ok then return nil, err, errno end self.peer_flow_credits = new_stream_peer_flow_credits self.connection.peer_flow_credits = new_connection_peer_flow_credits self.stats_sent = self.stats_sent + #payload if end_stream then if self.state == "half closed (remote)" then self:set_state("closed") else self:set_state("half closed (local)") end end return ok end -- Map from header name to whether it belongs in a request (vs a response) local valid_pseudo_headers = { [":method"] = true; [":scheme"] = true; [":path"] = true; [":authority"] = true; [":status"] = false; } local function validate_headers(headers, is_request, nth_header, ended_stream) -- Section 8.1.2: A request or response containing uppercase header field names MUST be treated as malformed for name in headers:each() do if name:lower() ~= name then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("header field names MUST be lowercase", true), ce.EINVAL end end do -- Section 8.1.2.1: Validate that all colon fields are before other ones local seen_non_colon = false for name, value in headers:each() do if name:sub(1,1) == ":" then --[[ Pseudo-header fields are only valid in the context in which they are defined. Pseudo-header fields defined for requests MUST NOT appear in responses; pseudo-header fields defined for responses MUST NOT appear in requests. Pseudo-header fields MUST NOT appear in trailers. Endpoints MUST treat a request or response that contains undefined or invalid pseudo-header fields as malformed]] if (is_request and nth_header ~= 1) or valid_pseudo_headers[name] ~= is_request then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("Pseudo-header fields are only valid in the context in which they are defined", true), ce.EILSEQ end if seen_non_colon then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("All pseudo-header fields MUST appear in the header block before regular header fields", true), ce.EILSEQ end else seen_non_colon = true end if type(value) ~= "string" then return nil, "invalid header field", ce.EINVAL end end end if headers:has("connection") then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("An endpoint MUST NOT generate an HTTP/2 message containing connection-specific header fields", true), ce.EILSEQ end local te = headers:get_as_sequence("te") if te.n > 0 and (te[1] ~= "trailers" or te.n ~= 1) then return nil, h2_errors.PROTOCOL_ERROR:new_traceback([[The TE header field, which MAY be present in an HTTP/2 request; when it is, it MUST NOT contain any value other than "trailers"]], true), ce.EILSEQ end if is_request then if nth_header == 1 then --[[ All HTTP/2 requests MUST include exactly one valid value for the :method, :scheme, and :path pseudo-header fields, unless it is a CONNECT request (Section 8.3). An HTTP request that omits mandatory pseudo-header fields is malformed (Section 8.1.2.6).]] local methods = headers:get_as_sequence(":method") if methods.n ~= 1 then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("requests MUST include exactly one valid value for the :method, :scheme, and :path pseudo-header fields, unless it is a CONNECT request", true), ce.EILSEQ elseif methods[1] ~= "CONNECT" then local scheme = headers:get_as_sequence(":scheme") local path = headers:get_as_sequence(":path") if scheme.n ~= 1 or path.n ~= 1 then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("requests MUST include exactly one valid value for the :method, :scheme, and :path pseudo-header fields, unless it is a CONNECT request", true), ce.EILSEQ end if path[1] == "" and (scheme[1] == "http" or scheme[1] == "https") then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("The :path pseudo-header field MUST NOT be empty for http or https URIs", true), ce.EILSEQ end else -- is CONNECT method -- Section 8.3 if headers:has(":scheme") or headers:has(":path") then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("For a CONNECT request, the :scheme and :path pseudo-header fields MUST be omitted", true), ce.EILSEQ end end elseif nth_header == 2 then if not ended_stream then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("Trailers MUST be at end of stream", true), ce.EILSEQ end elseif nth_header > 2 then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("An HTTP request consists of maximum 2 HEADER blocks", true), ce.EILSEQ end else --[[ For HTTP/2 responses, a single :status pseudo-header field is defined that carries the HTTP status code field (RFC7231, Section 6). This pseudo-header field MUST be included in all responses; otherwise, the response is malformed (Section 8.1.2.6)]] if not headers:has(":status") then return nil, h2_errors.PROTOCOL_ERROR:new_traceback(":status pseudo-header field MUST be included in all responses", true), ce.EILSEQ end end return true end local function process_end_headers(stream, end_stream, pad_len, pos, promised_stream, payload) if pad_len > 0 then if pad_len + pos - 1 > #payload then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("length of the padding is the length of the frame payload or greater"), ce.EILSEQ elseif payload:match("[^%z]", -pad_len) then -- 6.2: Padding fields and flags are identical to those defined for DATA frames -- 6.1: A receiver is not obligated to verify padding but MAY treat non-zero padding as a connection error of type PROTOCOL_ERROR. return nil, h2_errors.PROTOCOL_ERROR:new_traceback("padding not null bytes"), ce.EILSEQ end payload = payload:sub(1, -pad_len-1) end local headers, newpos, errno = stream.connection.decoding_context:decode_headers(payload, nil, pos) if not headers then return nil, newpos, errno end if newpos ~= #payload + 1 then return nil, h2_errors.COMPRESSION_ERROR:new_traceback("incomplete header fragment"), ce.EILSEQ end if not promised_stream then stream.stats_recv_headers = stream.stats_recv_headers + 1 local validate_ok, validate_err, errno2 = validate_headers(headers, stream.type ~= "client", stream.stats_recv_headers, end_stream) if not validate_ok then return nil, validate_err, errno2 end if headers:has("content-length") then stream.content_length = tonumber(headers:get("content-length"), 10) end stream.recv_headers_fifo:push(headers) if end_stream then stream.chunk_fifo:push(nil) -- recv_headers_cond and chunk_cond get signaled by :set_state if stream.state == "half closed (local)" then stream:set_state("closed") else stream:set_state("half closed (remote)") end else stream.recv_headers_cond:signal() if stream.state == "idle" then stream:set_state("open") end end else local validate_ok, validate_err, errno2 = validate_headers(headers, true, 1, false) if not validate_ok then return nil, validate_err, errno2 end promised_stream:set_state("reserved (remote)") promised_stream.recv_headers_fifo:push(headers) promised_stream.recv_headers_cond:signal() -- If we have sent a haven't seen this stream before, and we should be discarding frames from it, -- then don't push it into the new_streams fifo if stream.connection.send_goaway_lowest == nil or promised_stream.id <= stream.connection.send_goaway_lowest then stream.connection.new_streams:push(promised_stream) stream.connection.new_streams_cond:signal(1) end end return true end frame_handlers[frame_types.HEADERS] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 if stream.id == 0 then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'HEADERS' frames MUST be associated with a stream"), ce.EILSEQ end if stream.state ~= "idle" and stream.state ~= "open" and stream.state ~= "half closed (local)" and stream.state ~= "reserved (remote)" then return nil, h2_errors.STREAM_CLOSED:new_traceback("'HEADERS' frame not allowed in '" .. stream.state .. "' state"), ce.EILSEQ end local end_stream = band(flags, 0x1) ~= 0 local end_headers = band(flags, 0x04) ~= 0 local padded = band(flags, 0x8) ~= 0 local priority = band(flags, 0x20) ~= 0 -- index where payload body starts local pos = 1 local pad_len if padded then pad_len = sunpack("> B", payload, pos) pos = 2 else pad_len = 0 end if priority then local exclusive, stream_dep, weight local tmp tmp, weight = sunpack(">I4 B", payload, pos) exclusive = band(tmp, 0x80000000) ~= 0 stream_dep = band(tmp, 0x7fffffff) weight = weight + 1 pos = pos + 5 local new_parent = stream.connection.streams[stream_dep] -- 5.3.1. Stream Dependencies -- A dependency on a stream that is not currently in the tree -- results in that stream being given a default priority if new_parent then local ok, err, errno = new_parent:reprioritise(stream, exclusive) if not ok then return nil, err, errno end stream.weight = weight end end local len = #payload - pos + 1 -- TODO: minus pad_len? if len > MAX_HEADER_BUFFER_SIZE then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("headers too large"), ce.E2BIG end if end_headers then return process_end_headers(stream, end_stream, pad_len, pos, nil, payload) else stream.connection.need_continuation = stream stream.connection.recv_headers_end_stream = end_stream stream.connection.recv_headers_buffer = { payload } stream.connection.recv_headers_buffer_pos = pos stream.connection.recv_headers_buffer_pad_len = pad_len stream.connection.recv_headers_buffer_items = 1 stream.connection.recv_headers_buffer_length = len return true end end function stream_methods:write_headers_frame(payload, end_stream, end_headers, padded, exclusive, stream_dep, weight, timeout, flush) assert(self.state ~= "closed" and self.state ~= "half closed (local)") if self.id == nil then self:pick_id() end local pad_len, pri, padding = "", "", "" local flags = 0 if end_stream then flags = bor(flags, 0x1) end if end_headers then flags = bor(flags, 0x4) end if padded then flags = bor(flags, 0x8) pad_len = spack("> B", padded) padding = ("\0"):rep(padded) end if weight or stream_dep then flags = bor(flags, 0x20) assert(stream_dep <= 0x7fffffff) local tmp = stream_dep if exclusive then tmp = bor(tmp, 0x80000000) end weight = weight and weight - 1 or 0 pri = spack("> I4 B", tmp, weight) end payload = pad_len .. pri .. payload .. padding local ok, err, errno = self:write_http2_frame(frame_types.HEADERS, flags, payload, timeout, flush) if ok == nil then return nil, err, errno end self.stats_sent_headers = self.stats_sent_headers + 1 if end_headers then if end_stream then if self.state == "half closed (remote)" or self.state == "reserved (local)" then self:set_state("closed") else self:set_state("half closed (local)") end else if self.state == "idle" then self:set_state("open") elseif self.state == "reserved (local)" then self:set_state("half closed (remote)") end end else self.end_stream_after_continuation = end_stream end return ok end frame_handlers[frame_types.PRIORITY] = function(stream, flags, payload) -- luacheck: ignore 212 if stream.id == 0 then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'PRIORITY' frames MUST be associated with a stream"), ce.EILSEQ end if #payload ~= 5 then return nil, h2_errors.FRAME_SIZE_ERROR:new_traceback("'PRIORITY' frames must be 5 bytes", true), ce.EILSEQ end local exclusive, stream_dep, weight local tmp tmp, weight = sunpack(">I4 B", payload) weight = weight + 1 exclusive = band(tmp, 0x80000000) ~= 0 stream_dep = band(tmp, 0x7fffffff) -- 5.3.1. Stream Dependencies -- A dependency on a stream that is not currently in the tree -- results in that stream being given a default priority local new_parent = stream.connection.streams[stream_dep] if new_parent then local ok, err, errno = new_parent:reprioritise(stream, exclusive) if not ok then return nil, err, errno end stream.weight = weight end return true end function stream_methods:write_priority_frame(exclusive, stream_dep, weight, timeout, flush) assert(stream_dep <= 0x7fffffff) if self.id == nil then self:pick_id() end local tmp = stream_dep if exclusive then tmp = bor(tmp, 0x80000000) end weight = weight and weight - 1 or 0 local payload = spack("> I4 B", tmp, weight) return self:write_http2_frame(frame_types.PRIORITY, 0, payload, timeout, flush) end frame_handlers[frame_types.RST_STREAM] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 if stream.id == 0 then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'RST_STREAM' frames MUST be associated with a stream"), ce.EILSEQ end if #payload ~= 4 then return nil, h2_errors.FRAME_SIZE_ERROR:new_traceback("'RST_STREAM' frames must be 4 bytes"), ce.EILSEQ end if stream.state == "idle" then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'RST_STREAM' frames MUST NOT be sent for a stream in the 'idle' state"), ce.EILSEQ elseif stream.state == "closed" then -- probably a delayed RST_STREAM, ignore return true end local err_code = sunpack(">I4", payload) stream.rst_stream_error = (h2_errors[err_code] or h2_errors.INTERNAL_ERROR):new { message = string.format("'RST_STREAM' on stream #%d (code=0x%x)", stream.id, err_code); stream_error = true; } stream:set_state("closed") return true end function stream_methods:write_rst_stream_frame(err_code, timeout, flush) if self.id == 0 then h2_errors.PROTOCOL_ERROR("'RST_STREAM' frames MUST be associated with a stream") end if self.state == "idle" then h2_errors.PROTOCOL_ERROR([['RST_STREAM' frames MUST NOT be sent for a stream in the "idle" state]]) end local flags = 0 local payload = spack(">I4", err_code) local ok, err, errno = self:write_http2_frame(frame_types.RST_STREAM, flags, payload, timeout, flush) if not ok then return nil, err, errno end if self.state ~= "closed" then self:set_state("closed") end self:shutdown() return ok end function stream_methods:rst_stream(err, timeout) local code if err == nil then code = 0 elseif h2_error.is(err) then code = err.code else err = h2_errors.INTERNAL_ERROR:new { message = tostring(err); stream_error = true; } code = err.code end if self.rst_stream_error == nil then self.rst_stream_error = err end return self:write_rst_stream_frame(code, timeout) end frame_handlers[frame_types.SETTING] = function(stream, flags, payload, deadline) if stream.id ~= 0 then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("stream identifier for a 'SETTINGS' frame MUST be zero"), ce.EILSEQ end local ack = band(flags, 0x1) ~= 0 if ack then -- server is ACK-ing our settings if #payload ~= 0 then return nil, h2_errors.FRAME_SIZE_ERROR:new_traceback("Receipt of a 'SETTINGS' frame with the ACK flag set and a length field value other than 0"), ce.EILSEQ end stream.connection:ack_settings() return true else -- settings from server if #payload % 6 ~= 0 then return nil, h2_errors.FRAME_SIZE_ERROR:new_traceback("'SETTINGS' frame with a length other than a multiple of 6 octets"), ce.EILSEQ end local peer_settings = {} for i=1, #payload, 6 do local id, val = sunpack(">I2 I4", payload, i) if id == known_settings.HEADER_TABLE_SIZE then stream.connection.encoding_context:set_max_dynamic_table_size(val) -- Add a 'max size' element to the next outgoing header stream.connection.encoding_context:encode_max_size(val) elseif id == known_settings.ENABLE_PUSH then -- Convert to boolean if val == 0 then val = false elseif val == 1 then val = true else return nil, h2_errors.PROTOCOL_ERROR:new_traceback("invalid value for boolean"), ce.EILSEQ end if val and stream.type == "client" then -- Clients MUST reject any attempt to change the SETTINGS_ENABLE_PUSH -- setting to a value other than 0 by treating the message as a connection -- error of type PROTOCOL_ERROR. return nil, h2_errors.PROTOCOL_ERROR:new_traceback("SETTINGS_ENABLE_PUSH not allowed for clients"), ce.EILSEQ end elseif id == known_settings.INITIAL_WINDOW_SIZE then if val >= 2^31 then return nil, h2_errors.FLOW_CONTROL_ERROR:new_traceback("SETTINGS_INITIAL_WINDOW_SIZE must be less than 2^31"), ce.EILSEQ end elseif id == known_settings.MAX_FRAME_SIZE then if val < 16384 then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("SETTINGS_MAX_FRAME_SIZE must be greater than or equal to 16384"), ce.EILSEQ elseif val >= 2^24 then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("SETTINGS_MAX_FRAME_SIZE must be less than 2^24"), ce.EILSEQ end end peer_settings[id] = val end stream.connection:set_peer_settings(peer_settings) -- Ack server's settings local ok, err, errno = stream:write_settings_frame(true, nil, 0, "f") if not ok then return ok, err, errno end -- ignore :flush failure stream.connection:flush(deadline and deadline-monotime()) return true end end local function pack_settings_payload(settings) local i = 0 local a = {} local function append(k, v) a[i*2+1] = k a[i*2+2] = v i = i + 1 end local HEADER_TABLE_SIZE = settings[0x1] if HEADER_TABLE_SIZE == nil then HEADER_TABLE_SIZE = settings.HEADER_TABLE_SIZE end if HEADER_TABLE_SIZE ~= nil then append(0x1, HEADER_TABLE_SIZE) end local ENABLE_PUSH = settings[0x2] if ENABLE_PUSH == nil then ENABLE_PUSH = settings.ENABLE_PUSH end if ENABLE_PUSH ~= nil then if type(ENABLE_PUSH) == "boolean" then ENABLE_PUSH = ENABLE_PUSH and 1 or 0 end append(0x2, ENABLE_PUSH) ENABLE_PUSH = ENABLE_PUSH ~= 0 end local MAX_CONCURRENT_STREAMS = settings[0x3] if MAX_CONCURRENT_STREAMS == nil then MAX_CONCURRENT_STREAMS = settings.MAX_CONCURRENT_STREAMS end if MAX_CONCURRENT_STREAMS ~= nil then append(0x3, MAX_CONCURRENT_STREAMS) end local INITIAL_WINDOW_SIZE = settings[0x4] if INITIAL_WINDOW_SIZE == nil then INITIAL_WINDOW_SIZE = settings.INITIAL_WINDOW_SIZE end if INITIAL_WINDOW_SIZE ~= nil then if INITIAL_WINDOW_SIZE >= 2^31 then h2_errors.FLOW_CONTROL_ERROR("SETTINGS_INITIAL_WINDOW_SIZE must be less than 2^31") end append(0x4, INITIAL_WINDOW_SIZE) end local MAX_FRAME_SIZE = settings[0x5] if MAX_FRAME_SIZE == nil then MAX_FRAME_SIZE = settings.MAX_FRAME_SIZE end if MAX_FRAME_SIZE ~= nil then if MAX_FRAME_SIZE < 16384 then h2_errors.PROTOCOL_ERROR("SETTINGS_MAX_FRAME_SIZE must be greater than or equal to 16384") elseif MAX_FRAME_SIZE >= 2^24 then h2_errors.PROTOCOL_ERROR("SETTINGS_MAX_FRAME_SIZE must be less than 2^24") end append(0x5, MAX_FRAME_SIZE) end local MAX_HEADER_LIST_SIZE = settings[0x6] if MAX_HEADER_LIST_SIZE == nil then MAX_HEADER_LIST_SIZE = settings.MAX_HEADER_LIST_SIZE end if MAX_HEADER_LIST_SIZE ~= nil then append(0x6, MAX_HEADER_LIST_SIZE) end local settings_to_merge = { HEADER_TABLE_SIZE; ENABLE_PUSH; MAX_CONCURRENT_STREAMS; INITIAL_WINDOW_SIZE; MAX_FRAME_SIZE; MAX_HEADER_LIST_SIZE; } return spack(">" .. ("I2 I4"):rep(i), unpack(a, 1, i*2)), settings_to_merge end function stream_methods:write_settings_frame(ACK, settings, timeout, flush) if self.id ~= 0 then h2_errors.PROTOCOL_ERROR("'SETTINGS' frames must be on stream id 0") end local flags, payload, settings_to_merge if ACK then if settings ~= nil then h2_errors.PROTOCOL_ERROR("'SETTINGS' ACK cannot have new settings") end flags = 0x1 payload = "" else flags = 0 payload, settings_to_merge = pack_settings_payload(settings) end local ok, err, errno = self:write_http2_frame(frame_types.SETTING, flags, payload, timeout, flush) if ok and not ACK then local n = self.connection.send_settings.n + 1 self.connection.send_settings.n = n self.connection.send_settings[n] = settings_to_merge ok = n end return ok, err, errno end frame_handlers[frame_types.PUSH_PROMISE] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 if not stream.connection.acked_settings[known_settings.ENABLE_PUSH] then -- An endpoint that has both set this parameter to 0 and had it acknowledged MUST -- treat the receipt of a PUSH_PROMISE frame as a connection error of type PROTOCOL_ERROR. return nil, h2_errors.PROTOCOL_ERROR:new_traceback("SETTINGS_ENABLE_PUSH is 0"), ce.EILSEQ elseif stream.type == "server" then -- A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE -- frame as a connection error of type PROTOCOL_ERROR. return nil, h2_errors.PROTOCOL_ERROR:new_traceback("A client cannot push"), ce.EILSEQ end if stream.id == 0 then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'PUSH_PROMISE' frames MUST be associated with a stream"), ce.EILSEQ end local end_headers = band(flags, 0x04) ~= 0 local padded = band(flags, 0x8) ~= 0 -- index where payload body starts local pos = 1 local pad_len if padded then pad_len = sunpack("> B", payload, pos) pos = 2 else pad_len = 0 end local tmp = sunpack(">I4", payload, pos) local promised_stream_id = band(tmp, 0x7fffffff) pos = pos + 4 local len = #payload - pos + 1 -- TODO: minus pad_len? if len > MAX_HEADER_BUFFER_SIZE then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("headers too large"), ce.EILSEQ end local promised_stream = stream.connection:new_stream(promised_stream_id) stream:reprioritise(promised_stream) if end_headers then return process_end_headers(stream, false, pad_len, pos, promised_stream, payload) else stream.connection.need_continuation = stream stream.connection.promised_stream = promised_stream stream.connection.recv_headers_end_stream = false stream.connection.recv_headers_buffer = { payload } stream.connection.recv_headers_buffer_pos = pos stream.connection.recv_headers_buffer_pad_len = pad_len stream.connection.recv_headers_buffer_items = 1 stream.connection.recv_headers_buffer_length = len return true end end function stream_methods:write_push_promise_frame(promised_stream_id, payload, end_headers, padded, timeout, flush) assert(self.state == "open" or self.state == "half closed (remote)") assert(self.id ~= 0) local promised_stream = self.connection.streams[promised_stream_id] assert(promised_stream and promised_stream.state == "idle") -- 8.2.1: PUSH_PROMISE frames MUST NOT be sent by the client. assert(self.type == "server" and promised_stream.id % 2 == 0) local pad_len, padding = "", "" local flags = 0 if end_headers then flags = bor(flags, 0x4) end if padded then flags = bor(flags, 0x8) pad_len = spack("> B", padded) padding = ("\0"):rep(padded) end promised_stream_id = spack(">I4", promised_stream_id) payload = pad_len .. promised_stream_id .. payload .. padding local ok, err, errno = self:write_http2_frame(frame_types.PUSH_PROMISE, flags, payload, 0, "f") if ok == nil then return nil, err, errno end if end_headers then promised_stream:set_state("reserved (local)") else promised_stream.end_stream_after_continuation = false end if flush ~= "f" then return self.connection:flush(timeout) else return true end end frame_handlers[frame_types.PING] = function(stream, flags, payload, deadline) if stream.id ~= 0 then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'PING' must be on stream id 0"), ce.EILSEQ end if #payload ~= 8 then return nil, h2_errors.FRAME_SIZE_ERROR:new_traceback("'PING' frames must be 8 bytes"), ce.EILSEQ end local ack = band(flags, 0x1) ~= 0 if ack then local cond = stream.connection.pongs[payload] if cond then cond:signal() stream.connection.pongs[payload] = nil end return true else return stream:write_ping_frame(true, payload, deadline and deadline-monotime()) end end function stream_methods:write_ping_frame(ACK, payload, timeout, flush) if self.id ~= 0 then h2_errors.PROTOCOL_ERROR("'PING' frames must be on stream id 0") end if #payload ~= 8 then h2_errors.FRAME_SIZE_ERROR("'PING' frames must have 8 byte payload") end local flags = ACK and 0x1 or 0 return self:write_http2_frame(frame_types.PING, flags, payload, timeout, flush) end frame_handlers[frame_types.GOAWAY] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 if stream.id ~= 0 then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'GOAWAY' frames must be on stream id 0"), ce.EILSEQ end if #payload < 8 then return nil, h2_errors.FRAME_SIZE_ERROR:new_traceback("'GOAWAY' frames must be at least 8 bytes"), ce.EILSEQ end local last_streamid = sunpack(">I4 I4", payload) if stream.connection.recv_goaway_lowest == nil or last_streamid < stream.connection.recv_goaway_lowest then stream.connection.recv_goaway_lowest = last_streamid stream.connection.recv_goaway:signal() end return true end function stream_methods:write_goaway_frame(last_streamid, err_code, debug_msg, timeout, flush) if self.id ~= 0 then h2_errors.PROTOCOL_ERROR("'GOAWAY' frames MUST be on stream 0") end if self.connection.send_goaway_lowest and last_streamid > self.connection.send_goaway_lowest then h2_errors.PROTOCOL_ERROR("Endpoints MUST NOT increase the value they send in the last stream identifier") end local flags = 0 local payload = spack(">I4 I4", last_streamid, err_code) if debug_msg then payload = payload .. debug_msg end local ok, err, errno = self:write_http2_frame(frame_types.GOAWAY, flags, payload, 0, "f") if not ok then return nil, err, errno end self.connection.send_goaway_lowest = last_streamid if flush ~= "f" then return self.connection:flush(timeout) else return true end end frame_handlers[frame_types.WINDOW_UPDATE] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 if #payload ~= 4 then return nil, h2_errors.FRAME_SIZE_ERROR:new_traceback("'WINDOW_UPDATE' frames must be 4 bytes"), ce.EILSEQ end if stream.id ~= 0 and stream.state == "idle" then return nil, h2_errors.PROTOCOL_ERROR:new_traceback([['WINDOW_UPDATE' frames not allowed in "idle" state]]), ce.EILSEQ end local tmp = sunpack(">I4", payload) if band(tmp, 0x80000000) ~= 0 then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'WINDOW_UPDATE' reserved bit set"), ce.EILSEQ end local increment = band(tmp, 0x7fffffff) if increment == 0 then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'WINDOW_UPDATE' MUST not have an increment of 0", stream.id ~= 0), ce.EILSEQ end local ob if stream.id == 0 then -- for connection ob = stream.connection else ob = stream end local newval = ob.peer_flow_credits + increment if newval > 2^31-1 then return nil, h2_errors.FLOW_CONTROL_ERROR:new_traceback("A sender MUST NOT allow a flow-control window to exceed 2^31-1 octets", stream.id ~= 0), ce.EILSEQ end ob.peer_flow_credits = newval ob.peer_flow_credits_change:signal() return true end function stream_methods:write_window_update_frame(inc, timeout, flush) local flags = 0 if self.id ~= 0 and self.state == "idle" then h2_errors.PROTOCOL_ERROR([['WINDOW_UPDATE' frames not allowed in "idle" state]]) end if inc > 0x7fffffff or inc <= 0 then h2_errors.PROTOCOL_ERROR("invalid window update increment", true) end local payload = spack(">I4", inc) return self:write_http2_frame(frame_types.WINDOW_UPDATE, flags, payload, timeout, flush) end function stream_methods:write_window_update(inc, timeout) while inc > 0x7fffffff do local ok, err, errno = self:write_window_update_frame(0x7fffffff, 0, "f") if not ok then return nil, err, errno end inc = inc - 0x7fffffff end return self:write_window_update_frame(inc, timeout) end frame_handlers[frame_types.CONTINUATION] = function(stream, flags, payload, deadline) -- luacheck: ignore 212 if stream.id == 0 then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'CONTINUATION' frames MUST be associated with a stream"), ce.EILSEQ end if not stream.connection.need_continuation then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("'CONTINUATION' frames MUST be preceded by a 'HEADERS', 'PUSH_PROMISE' or 'CONTINUATION' frame without the 'END_HEADERS' flag set"), ce.EILSEQ end local end_headers = band(flags, 0x04) ~= 0 local len = stream.connection.recv_headers_buffer_length + #payload if len > MAX_HEADER_BUFFER_SIZE then return nil, h2_errors.PROTOCOL_ERROR:new_traceback("headers too large"), ce.E2BIG end table.insert(stream.connection.recv_headers_buffer, payload) stream.connection.recv_headers_buffer_items = stream.connection.recv_headers_buffer_items + 1 stream.connection.recv_headers_buffer_length = len if end_headers then local promised_stream = stream.connection.promised_stream local pad_len = stream.connection.recv_headers_buffer_pad_len local pos = stream.connection.recv_headers_buffer_pos local end_stream = stream.connection.recv_headers_end_stream payload = table.concat(stream.connection.recv_headers_buffer, "", 1, stream.connection.recv_headers_buffer_items) stream.connection.recv_headers_end_stream = nil stream.connection.recv_headers_buffer = nil stream.connection.recv_headers_buffer_pos = nil stream.connection.recv_headers_buffer_pad_len = nil stream.connection.recv_headers_buffer_items = nil stream.connection.recv_headers_buffer_length = nil stream.connection.promised_stream = nil stream.connection.need_continuation = nil return process_end_headers(stream, end_stream, pad_len, pos, promised_stream, payload) else return true end end function stream_methods:write_continuation_frame(payload, end_headers, timeout, flush) assert(self.state ~= "closed" and self.state ~= "half closed (local)") local flags = 0 if end_headers then flags = bor(flags, 0x4) end local ok, err, errno = self:write_http2_frame(frame_types.CONTINUATION, flags, payload, timeout, flush) if ok == nil then return nil, err, errno end if end_headers then if self.end_stream_after_continuation then if self.state == "half closed (remote)" or self.state == "reserved (local)" then self:set_state("closed") else self:set_state("half closed (local)") end else if self.state == "idle" then self:set_state("open") elseif self.state == "reserved (local)" then self:set_state("half closed (remote)") end end else self.end_stream_after_continuation = nil end return ok end ------------------------------------------- function stream_methods:shutdown() if self.state ~= "idle" and self.state ~= "closed" and self.id ~= 0 then self:rst_stream(nil, 0) -- ignore result end local len = 0 for i=1, self.chunk_fifo:length() do local chunk = self.chunk_fifo:peek(i) if chunk ~= nil then len = len + chunk:ack() end end if len > 0 then self.connection:write_window_update(len, 0) end return true end -- this function *should never throw* function stream_methods:get_headers(timeout) local deadline = timeout and (monotime()+timeout) while self.recv_headers_fifo:length() < 1 do if self.state == "closed" then return nil, self.rst_stream_error end local which = cqueues.poll(self.recv_headers_cond, self.connection, timeout) if which == self.connection then local ok, err, errno = self.connection:step(0) if not ok then return nil, err, errno end elseif which == timeout then return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT end timeout = deadline and (deadline-monotime()) end local headers = self.recv_headers_fifo:pop() return headers end function stream_methods:get_next_chunk(timeout) local deadline = timeout and (monotime()+timeout) while self.chunk_fifo:length() == 0 do if self.state == "closed" or self.state == "half closed (remote)" then return nil, self.rst_stream_error end local which = cqueues.poll(self.chunk_cond, self.connection, timeout) if which == self.connection then local ok, err, errno = self.connection:step(0) if not ok then return nil, err, errno end elseif which == timeout then return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT end timeout = deadline and (deadline-monotime()) end local chunk = self.chunk_fifo:pop() if chunk == nil then return nil else local data = chunk.data local len = chunk:ack() if len > 0 then -- if they don't get flushed now they will get flushed on next read or write self:write_window_update(len, 0) self.connection:write_window_update(len, 0) end return data end end function stream_methods:unget(str) local chunk = new_chunk(0, str) self.chunk_fifo:insert(1, chunk) self.chunk_cond:signal() return true end local function write_headers(self, func, headers, extra_frame_data_len, timeout) local deadline = timeout and (monotime()+timeout) local SETTINGS_MAX_FRAME_SIZE = self.connection.peer_settings[known_settings.MAX_FRAME_SIZE] local first_frame_max_size = SETTINGS_MAX_FRAME_SIZE - extra_frame_data_len assert(first_frame_max_size >= 0) local encoding_context = self.connection.encoding_context encoding_context:encode_headers(headers) local payload = encoding_context:render_data() encoding_context:clear_data() if #payload <= first_frame_max_size then local ok, err, errno = func(payload, true, deadline) if not ok then return ok, err, errno end else do local partial = payload:sub(1, first_frame_max_size) local ok, err, errno = func(partial, false, deadline) if not ok then return ok, err, errno end end local sent = first_frame_max_size local max = #payload-SETTINGS_MAX_FRAME_SIZE while sent < max do local partial = payload:sub(sent+1, sent+SETTINGS_MAX_FRAME_SIZE) local ok, err, errno = self:write_continuation_frame(partial, false, deadline and deadline-monotime()) if not ok then return ok, err, errno end sent = sent + SETTINGS_MAX_FRAME_SIZE end do local partial = payload:sub(sent+1) local ok, err, errno = self:write_continuation_frame(partial, true, deadline and deadline-monotime()) if not ok then return ok, err, errno end end end return true end function stream_methods:write_headers(headers, end_stream, timeout) assert(headers, "missing argument: headers") assert(validate_headers(headers, self.type == "client", self.stats_sent_headers+1, end_stream)) assert(type(end_stream) == "boolean", "'end_stream' MUST be a boolean") local padded, exclusive, stream_dep, weight = nil, nil, nil, nil return write_headers(self, function(payload, end_headers, deadline) return self:write_headers_frame(payload, end_stream, end_headers, padded, exclusive, stream_dep, weight, deadline and deadline-monotime()) end, headers, 0, timeout) end function stream_methods:push_promise(headers, timeout) assert(self.type == "server") assert(headers, "missing argument: headers") assert(validate_headers(headers, true, 1, false)) assert(headers:has(":authority"), "PUSH_PROMISE must have an :authority") local promised_stream = self.connection:new_stream() promised_stream:pick_id() self:reprioritise(promised_stream) local padded = nil local ok, err, errno = write_headers(self, function(payload, end_headers, deadline) return self:write_push_promise_frame(promised_stream.id, payload, end_headers, padded, deadline) end, headers, 4, timeout) -- 4 is size of promised stream id if not ok then return nil, err, errno end promised_stream.recv_headers_fifo:push(headers) promised_stream.recv_headers_cond:signal() return promised_stream end function stream_methods:write_chunk(payload, end_stream, timeout) local deadline = timeout and (monotime()+timeout) local sent = 0 while true do while self.peer_flow_credits <= 0 do local which = cqueues.poll(self.peer_flow_credits_change, self.connection, timeout) if which == self.connection then local ok, err, errno = self.connection:step(0) if not ok then return nil, err, errno end elseif which == timeout then return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT end timeout = deadline and (deadline-monotime()) end while self.connection.peer_flow_credits <= 0 do local which = cqueues.poll(self.connection.peer_flow_credits_change, self.connection, timeout) if which == self.connection then local ok, err, errno = self.connection:step(0) if not ok then return nil, err, errno end elseif which == timeout then return nil, ce.strerror(ce.ETIMEDOUT), ce.ETIMEDOUT end timeout = deadline and (deadline-monotime()) end local SETTINGS_MAX_FRAME_SIZE = self.connection.peer_settings[known_settings.MAX_FRAME_SIZE] local max_available = math.min(self.peer_flow_credits, self.connection.peer_flow_credits, SETTINGS_MAX_FRAME_SIZE) if max_available < (#payload - sent) then if max_available > 0 then -- send partial payload local ok, err, errno = self:write_data_frame(payload:sub(sent+1, sent+max_available), false, false, timeout) if not ok then return nil, err, errno end sent = sent + max_available end else break end timeout = deadline and (deadline-monotime()) end local ok, err, errno = self:write_data_frame(payload:sub(sent+1), end_stream, false, timeout) if not ok then return nil, err, errno end return true end return { new = new_stream; methods = stream_methods; mt = stream_mt; known_settings = known_settings; frame_types = frame_types; frame_handlers = frame_handlers; pack_settings_payload = pack_settings_payload; } lua-http-0.4/http/headers.lua000066400000000000000000000116101400726324600162130ustar00rootroot00000000000000--[[ HTTP Header data structure/type Design criteria: - the same header field is allowed more than once - must be able to fetch separate occurences (important for some headers e.g. Set-Cookie) - optionally available as comma separated list - http2 adds flag to headers that they should never be indexed - header order should be recoverable I chose to implement headers as an array of entries. An index of field name => array indices is kept. ]] local unpack = table.unpack or unpack -- luacheck: ignore 113 143 local entry_methods = {} local entry_mt = { __name = "http.headers.entry"; __index = entry_methods; } local never_index_defaults = { authorization = true; ["proxy-authorization"] = true; cookie = true; ["set-cookie"] = true; } local function new_entry(name, value, never_index) if never_index == nil then never_index = never_index_defaults[name] or false end return setmetatable({ name = name; value = value; never_index = never_index; }, entry_mt) end function entry_methods:modify(value, never_index) self.value = value if never_index == nil then never_index = never_index_defaults[self.name] or false end self.never_index = never_index end function entry_methods:unpack() return self.name, self.value, self.never_index end function entry_methods:clone() return new_entry(self.name, self.value, self.never_index) end local headers_methods = {} local headers_mt = { __name = "http.headers"; __index = headers_methods; } local function new_headers() return setmetatable({ _n = 0; _data = {}; _index = {}; }, headers_mt) end function headers_methods:len() return self._n end headers_mt.__len = headers_methods.len function headers_mt:__tostring() return string.format("http.headers{%d headers}", self._n) end local function add_to_index(_index, name, i) local dex = _index[name] if dex == nil then dex = {n=1, i} _index[name] = dex else local n = dex.n + 1 dex[n] = i dex.n = n end end local function rebuild_index(self) local index = {} for i=1, self._n do local entry = self._data[i] add_to_index(index, entry.name, i) end self._index = index end function headers_methods:clone() local index, new_data = {}, {} for i=1, self._n do local entry = self._data[i] new_data[i] = entry:clone() add_to_index(index, entry.name, i) end return setmetatable({ _n = self._n; _data = new_data; _index = index; }, headers_mt) end function headers_methods:append(name, ...) local n = self._n + 1 self._data[n] = new_entry(name, ...) add_to_index(self._index, name, n) self._n = n end function headers_methods:each() local i = 0 return function(self) -- luacheck: ignore 432 if i >= self._n then return end i = i + 1 local entry = self._data[i] return entry:unpack() end, self end headers_mt.__pairs = headers_methods.each function headers_methods:has(name) local dex = self._index[name] return dex ~= nil end function headers_methods:delete(name) local dex = self._index[name] if dex then local n = dex.n for i=n, 1, -1 do table.remove(self._data, dex[i]) end self._n = self._n - n rebuild_index(self) return true else return false end end function headers_methods:geti(i) local e = self._data[i] if e == nil then return nil end return e:unpack() end function headers_methods:get_as_sequence(name) local dex = self._index[name] if dex == nil then return { n = 0; } end local r = { n = dex.n; } for i=1, r.n do r[i] = self._data[dex[i]].value end return r end function headers_methods:get(name) local r = self:get_as_sequence(name) return unpack(r, 1, r.n) end function headers_methods:get_comma_separated(name) local r = self:get_as_sequence(name) if r.n == 0 then return nil else return table.concat(r, ",", 1, r.n) end end function headers_methods:modifyi(i, ...) local e = self._data[i] if e == nil then error("invalid index") end e:modify(...) end function headers_methods:upsert(name, ...) local dex = self._index[name] if dex == nil then self:append(name, ...) else assert(dex[2] == nil, "Cannot upsert multi-valued field") self:modifyi(dex[1], ...) end end local function default_cmp(a, b) if a.name ~= b.name then -- Things with a colon *must* be before others local a_is_colon = a.name:sub(1,1) == ":" local b_is_colon = b.name:sub(1,1) == ":" if a_is_colon and not b_is_colon then return true elseif not a_is_colon and b_is_colon then return false else return a.name < b.name end end if a.value ~= b.value then return a.value < b.value end return a.never_index end function headers_methods:sort() table.sort(self._data, default_cmp) rebuild_index(self) end function headers_methods:dump(file, prefix) file = file or io.stderr prefix = prefix or "" for name, value in self:each() do assert(file:write(string.format("%s%s: %s\n", prefix, name, value))) end assert(file:flush()) end return { new = new_headers; methods = headers_methods; mt = headers_mt; } lua-http-0.4/http/headers.tld000066400000000000000000000012571400726324600162230ustar00rootroot00000000000000interface headers const clone: (self) -> (headers) const append: (self, string, string, boolean?) -> () const each: (self) -> ((self) -> (string, string, boolean)) const has: (self, string) -> (boolean) const delete: (self, string) -> (boolean) const geti: (self, integer) -> (string, string, boolean) const get_as_sequence: (self, string) -> ({"n": integer, integer:string}) const get: (self, string) -> (string*) const get_comma_separated: (self, string) -> (string|nil) const modifyi: (self, integer, string, boolean?) -> () const upsert: (self, string, string, boolean?) -> () const sort: (self) -> () const dump: (self, file?, string?) -> () end new : () -> (headers) lua-http-0.4/http/hpack.lua000066400000000000000000000644631400726324600157040ustar00rootroot00000000000000-- This module implements HPACK - Header Compression for HTTP/2 -- Reference documentation: https://http2.github.io/http2-spec/compression.html local schar = string.char local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143 local sunpack = string.unpack or require "compat53.string".unpack -- luacheck: ignore 143 local band = require "http.bit".band local bor = require "http.bit".bor local new_headers = require "http.headers".new local unpack = table.unpack or unpack -- luacheck: ignore 113 143 local h2_errors = require "http.h2_error".errors -- Section 5.1 local function encode_integer(i, prefix_len, mask) assert(i >= 0 and i % 1 == 0) assert(prefix_len >= 0 and prefix_len <= 8 and prefix_len % 1 == 0) assert(mask >= 0 and mask <= 256 and mask % 1 == 0) if i < 2^prefix_len then return schar(bor(mask, i)) else local prefix_mask = 2^prefix_len-1 local chars = { bor(prefix_mask, mask); } local j = 2 i = i - prefix_mask while i >= 128 do chars[j] = i % 128 + 128 j = j + 1 i = math.floor(i / 128) end chars[j] = i return schar(unpack(chars, 1, j)) end end local function decode_integer(str, prefix_len, pos) pos = pos or 1 local prefix_mask = 2^prefix_len-1 if pos > #str then return end local I = band(prefix_mask, str:byte(pos, pos)) if I == prefix_mask then local M = 0 repeat pos = pos + 1 if pos > #str then return end local B = str:byte(pos, pos) I = I + band(B, 127) * 2^M M = M + 7 until band(B, 128) ~= 128 end return I, pos+1 end local huffman_decode, huffman_encode do local huffman_codes = { [ 0] = "1111111111000"; [ 1] = "11111111111111111011000"; [ 2] = "1111111111111111111111100010"; [ 3] = "1111111111111111111111100011"; [ 4] = "1111111111111111111111100100"; [ 5] = "1111111111111111111111100101"; [ 6] = "1111111111111111111111100110"; [ 7] = "1111111111111111111111100111"; [ 8] = "1111111111111111111111101000"; [ 9] = "111111111111111111101010"; [ 10] = "111111111111111111111111111100"; [ 11] = "1111111111111111111111101001"; [ 12] = "1111111111111111111111101010"; [ 13] = "111111111111111111111111111101"; [ 14] = "1111111111111111111111101011"; [ 15] = "1111111111111111111111101100"; [ 16] = "1111111111111111111111101101"; [ 17] = "1111111111111111111111101110"; [ 18] = "1111111111111111111111101111"; [ 19] = "1111111111111111111111110000"; [ 20] = "1111111111111111111111110001"; [ 21] = "1111111111111111111111110010"; [ 22] = "111111111111111111111111111110"; [ 23] = "1111111111111111111111110011"; [ 24] = "1111111111111111111111110100"; [ 25] = "1111111111111111111111110101"; [ 26] = "1111111111111111111111110110"; [ 27] = "1111111111111111111111110111"; [ 28] = "1111111111111111111111111000"; [ 29] = "1111111111111111111111111001"; [ 30] = "1111111111111111111111111010"; [ 31] = "1111111111111111111111111011"; [ 32] = "010100"; [ 33] = "1111111000"; [ 34] = "1111111001"; [ 35] = "111111111010"; [ 36] = "1111111111001"; [ 37] = "010101"; [ 38] = "11111000"; [ 39] = "11111111010"; [ 40] = "1111111010"; [ 41] = "1111111011"; [ 42] = "11111001"; [ 43] = "11111111011"; [ 44] = "11111010"; [ 45] = "010110"; [ 46] = "010111"; [ 47] = "011000"; [ 48] = "00000"; [ 49] = "00001"; [ 50] = "00010"; [ 51] = "011001"; [ 52] = "011010"; [ 53] = "011011"; [ 54] = "011100"; [ 55] = "011101"; [ 56] = "011110"; [ 57] = "011111"; [ 58] = "1011100"; [ 59] = "11111011"; [ 60] = "111111111111100"; [ 61] = "100000"; [ 62] = "111111111011"; [ 63] = "1111111100"; [ 64] = "1111111111010"; [ 65] = "100001"; [ 66] = "1011101"; [ 67] = "1011110"; [ 68] = "1011111"; [ 69] = "1100000"; [ 70] = "1100001"; [ 71] = "1100010"; [ 72] = "1100011"; [ 73] = "1100100"; [ 74] = "1100101"; [ 75] = "1100110"; [ 76] = "1100111"; [ 77] = "1101000"; [ 78] = "1101001"; [ 79] = "1101010"; [ 80] = "1101011"; [ 81] = "1101100"; [ 82] = "1101101"; [ 83] = "1101110"; [ 84] = "1101111"; [ 85] = "1110000"; [ 86] = "1110001"; [ 87] = "1110010"; [ 88] = "11111100"; [ 89] = "1110011"; [ 90] = "11111101"; [ 91] = "1111111111011"; [ 92] = "1111111111111110000"; [ 93] = "1111111111100"; [ 94] = "11111111111100"; [ 95] = "100010"; [ 96] = "111111111111101"; [ 97] = "00011"; [ 98] = "100011"; [ 99] = "00100"; [100] = "100100"; [101] = "00101"; [102] = "100101"; [103] = "100110"; [104] = "100111"; [105] = "00110"; [106] = "1110100"; [107] = "1110101"; [108] = "101000"; [109] = "101001"; [110] = "101010"; [111] = "00111"; [112] = "101011"; [113] = "1110110"; [114] = "101100"; [115] = "01000"; [116] = "01001"; [117] = "101101"; [118] = "1110111"; [119] = "1111000"; [120] = "1111001"; [121] = "1111010"; [122] = "1111011"; [123] = "111111111111110"; [124] = "11111111100"; [125] = "11111111111101"; [126] = "1111111111101"; [127] = "1111111111111111111111111100"; [128] = "11111111111111100110"; [129] = "1111111111111111010010"; [130] = "11111111111111100111"; [131] = "11111111111111101000"; [132] = "1111111111111111010011"; [133] = "1111111111111111010100"; [134] = "1111111111111111010101"; [135] = "11111111111111111011001"; [136] = "1111111111111111010110"; [137] = "11111111111111111011010"; [138] = "11111111111111111011011"; [139] = "11111111111111111011100"; [140] = "11111111111111111011101"; [141] = "11111111111111111011110"; [142] = "111111111111111111101011"; [143] = "11111111111111111011111"; [144] = "111111111111111111101100"; [145] = "111111111111111111101101"; [146] = "1111111111111111010111"; [147] = "11111111111111111100000"; [148] = "111111111111111111101110"; [149] = "11111111111111111100001"; [150] = "11111111111111111100010"; [151] = "11111111111111111100011"; [152] = "11111111111111111100100"; [153] = "111111111111111011100"; [154] = "1111111111111111011000"; [155] = "11111111111111111100101"; [156] = "1111111111111111011001"; [157] = "11111111111111111100110"; [158] = "11111111111111111100111"; [159] = "111111111111111111101111"; [160] = "1111111111111111011010"; [161] = "111111111111111011101"; [162] = "11111111111111101001"; [163] = "1111111111111111011011"; [164] = "1111111111111111011100"; [165] = "11111111111111111101000"; [166] = "11111111111111111101001"; [167] = "111111111111111011110"; [168] = "11111111111111111101010"; [169] = "1111111111111111011101"; [170] = "1111111111111111011110"; [171] = "111111111111111111110000"; [172] = "111111111111111011111"; [173] = "1111111111111111011111"; [174] = "11111111111111111101011"; [175] = "11111111111111111101100"; [176] = "111111111111111100000"; [177] = "111111111111111100001"; [178] = "1111111111111111100000"; [179] = "111111111111111100010"; [180] = "11111111111111111101101"; [181] = "1111111111111111100001"; [182] = "11111111111111111101110"; [183] = "11111111111111111101111"; [184] = "11111111111111101010"; [185] = "1111111111111111100010"; [186] = "1111111111111111100011"; [187] = "1111111111111111100100"; [188] = "11111111111111111110000"; [189] = "1111111111111111100101"; [190] = "1111111111111111100110"; [191] = "11111111111111111110001"; [192] = "11111111111111111111100000"; [193] = "11111111111111111111100001"; [194] = "11111111111111101011"; [195] = "1111111111111110001"; [196] = "1111111111111111100111"; [197] = "11111111111111111110010"; [198] = "1111111111111111101000"; [199] = "1111111111111111111101100"; [200] = "11111111111111111111100010"; [201] = "11111111111111111111100011"; [202] = "11111111111111111111100100"; [203] = "111111111111111111111011110"; [204] = "111111111111111111111011111"; [205] = "11111111111111111111100101"; [206] = "111111111111111111110001"; [207] = "1111111111111111111101101"; [208] = "1111111111111110010"; [209] = "111111111111111100011"; [210] = "11111111111111111111100110"; [211] = "111111111111111111111100000"; [212] = "111111111111111111111100001"; [213] = "11111111111111111111100111"; [214] = "111111111111111111111100010"; [215] = "111111111111111111110010"; [216] = "111111111111111100100"; [217] = "111111111111111100101"; [218] = "11111111111111111111101000"; [219] = "11111111111111111111101001"; [220] = "1111111111111111111111111101"; [221] = "111111111111111111111100011"; [222] = "111111111111111111111100100"; [223] = "111111111111111111111100101"; [224] = "11111111111111101100"; [225] = "111111111111111111110011"; [226] = "11111111111111101101"; [227] = "111111111111111100110"; [228] = "1111111111111111101001"; [229] = "111111111111111100111"; [230] = "111111111111111101000"; [231] = "11111111111111111110011"; [232] = "1111111111111111101010"; [233] = "1111111111111111101011"; [234] = "1111111111111111111101110"; [235] = "1111111111111111111101111"; [236] = "111111111111111111110100"; [237] = "111111111111111111110101"; [238] = "11111111111111111111101010"; [239] = "11111111111111111110100"; [240] = "11111111111111111111101011"; [241] = "111111111111111111111100110"; [242] = "11111111111111111111101100"; [243] = "11111111111111111111101101"; [244] = "111111111111111111111100111"; [245] = "111111111111111111111101000"; [246] = "111111111111111111111101001"; [247] = "111111111111111111111101010"; [248] = "111111111111111111111101011"; [249] = "1111111111111111111111111110"; [250] = "111111111111111111111101100"; [251] = "111111111111111111111101101"; [252] = "111111111111111111111101110"; [253] = "111111111111111111111101111"; [254] = "111111111111111111111110000"; [255] = "11111111111111111111101110"; EOS = "111111111111111111111111111111"; } local function bit_string_to_byte(bitstring) return string.char(tonumber(bitstring, 2)) end huffman_encode = function(s) -- [TODO]: optimize local t = { s:byte(1, -1) } for i=1, #s do t[i] = huffman_codes[t[i]] end local bitstring = table.concat(t) -- round up to next octet bitstring = bitstring .. ("1"):rep(7 - (#bitstring - 1) % 8) local bytes = bitstring:gsub("........", bit_string_to_byte) return bytes end -- Build tree for huffman decoder local huffman_tree = {} for k, v in pairs(huffman_codes) do local prev_node local node = huffman_tree local lr for j=1, #v do lr = v:sub(j, j) prev_node = node node = prev_node[lr] if node == nil then node = {} prev_node[lr] = node end end prev_node[lr] = k end local byte_to_bitstring = {} for i=0, 255 do local val = "" for j=7, 0, -1 do val = val .. (band(i, 2^j) ~= 0 and "1" or "0") end byte_to_bitstring[string.char(i)] = val end local EOS_length = #huffman_codes.EOS huffman_decode = function(s) local bitstring = s:gsub(".", byte_to_bitstring) local node = huffman_tree local output = {} for c in bitstring:gmatch(".") do node = node[c] local nt = type(node) if nt == "number" then table.insert(output, node) node = huffman_tree elseif node == "EOS" then -- 5.2: A Huffman encoded string literal containing the EOS symbol MUST be treated as a decoding error. return nil, h2_errors.COMPRESSION_ERROR:new_traceback("invalid huffman code (EOS)") elseif nt ~= "table" then return nil, h2_errors.COMPRESSION_ERROR:new_traceback("invalid huffman code") end end --[[ Ensure that any left over bits are all one. Section 5.2: A padding not corresponding to the most significant bits of the code for the EOS symbol MUST be treated as a decoding error]] if node ~= huffman_tree then -- We check this by continuing through on the '1' branch and ensure that we end up at EOS local n_padding = EOS_length while type(node) == "table" do node = node["1"] n_padding = n_padding - 1 end if node ~= "EOS" then return nil, h2_errors.COMPRESSION_ERROR:new_traceback("invalid huffman padding: expected most significant bits to match EOS") end -- Section 5.2: A padding strictly longer than 7 bits MUST be treated as a decoding error if n_padding < 0 or n_padding >= 8 then return nil, h2_errors.COMPRESSION_ERROR:new_traceback("invalid huffman padding: too much padding") end end return string.char(unpack(output)) end end --[[ Section 5.2, String Literal Representation Huffman is a tristate. - true: always use huffman encoding - false: never use huffman encoding - nil: don't care ]] local function encode_string(s, huffman) -- For now we default to huffman off -- In future: encode with huffman, if the string is shorter, use it. if huffman then s = huffman_encode(s) return encode_integer(#s, 7, 0x80) .. s else return encode_integer(#s, 7, 0) .. s end end local function decode_string(str, pos) pos = pos or 1 if pos > #str then return end local first_byte = str:byte(pos, pos) local huffman = band(first_byte, 0x80) ~= 0 local len len, pos = decode_integer(str, 7, pos) if len == nil then return end local newpos = pos + len if newpos > #str+1 then return end local val = str:sub(pos, newpos-1) if huffman then local err val, err = huffman_decode(val) if not val then return nil, err end end return val, newpos end local function compound_key(name, value) return spack("s4s4", name, value) end local function uncompound_key(key) return sunpack("s4s4", key) end -- Section 4.1 local function dynamic_table_entry_size(k) return 32 - 8 + #k -- 8 is number of bytes of overhead introduced by compound_key end local static_names_to_index = {} local static_pairs = {} local max_static_index do -- We prefer earlier indexes as examples in spec are like that local function p(i, name, value) if not static_names_to_index[name] then static_names_to_index[name] = i end local k = compound_key(name, value or "") static_pairs[k] = i static_pairs[i] = k end p( 1, ":authority") p( 2, ":method", "GET") p( 3, ":method", "POST") p( 4, ":path", "/") p( 5, ":path", "/index.html") p( 6, ":scheme", "http") p( 7, ":scheme", "https") p( 8, ":status", "200") p( 9, ":status", "204") p(10, ":status", "206") p(11, ":status", "304") p(12, ":status", "400") p(13, ":status", "404") p(14, ":status", "500") p(15, "accept-charset") p(16, "accept-encoding", "gzip, deflate") p(17, "accept-language") p(18, "accept-ranges") p(19, "accept") p(20, "access-control-allow-origin") p(21, "age") p(22, "allow") p(23, "authorization") p(24, "cache-control") p(25, "content-disposition") p(26, "content-encoding") p(27, "content-language") p(28, "content-length") p(29, "content-location") p(30, "content-range") p(31, "content-type") p(32, "cookie") p(33, "date") p(34, "etag") p(35, "expect") p(36, "expires") p(37, "from") p(38, "host") p(39, "if-match") p(40, "if-modified-since") p(41, "if-none-match") p(42, "if-range") p(43, "if-unmodified-since") p(44, "last-modified") p(45, "link") p(46, "location") p(47, "max-forwards") p(48, "proxy-authenticate") p(49, "proxy-authorization") p(50, "range") p(51, "referer") p(52, "refresh") p(53, "retry-after") p(54, "server") p(55, "set-cookie") p(56, "strict-transport-security") p(57, "transfer-encoding") p(58, "user-agent") p(59, "vary") p(60, "via") p(61, "www-authenticate") max_static_index = 61 end -- Section 6.1 local function encode_indexed_header(index) assert(index > 0) return encode_integer(index, 7, 0x80) end -- Section 6.2.1 local function encode_literal_header_indexed(index, value, huffman) return encode_integer(index, 6, 0x40) .. encode_string(value, huffman) end local function encode_literal_header_indexed_new(name, value, huffman) return "\64" .. encode_string(name, huffman) .. encode_string(value, huffman) end -- Section 6.2.2 local function encode_literal_header_none(index, value, huffman) return encode_integer(index, 4, 0) .. encode_string(value, huffman) end local function encode_literal_header_none_new(name, value, huffman) return "\0" .. encode_string(name, huffman) .. encode_string(value, huffman) end -- Section 6.2.3 local function encode_literal_header_never(index, value, huffman) return encode_integer(index, 4, 0x10) .. encode_string(value, huffman) end local function encode_literal_header_never_new(name, value, huffman) return "\16" .. encode_string(name, huffman) .. encode_string(value, huffman) end -- Section 6.3 local function encode_max_size(n) return encode_integer(n, 5, 0x20) end --[[ "class" to represent an encoding/decoding context This object encapulates a dynamic table The FIFO implementation uses an ever growing head/tail; with the exception that when empty, the indexes are reset. This requires indexes to be corrected, as in the specification the 'newest' item is always just after the static section. ]] local methods = {} local mt = { __name = "http.hpack"; __index = methods; } local function new(SETTINGS_HEADER_TABLE_SIZE) local self = { dynamic_names_to_indexes = {}; dynamic_pairs = {}; dynamic_index_head = 1; dynamic_index_tail = 0; dynamic_current_size = 0; dynamic_max = nil; -- filled in below total_max = SETTINGS_HEADER_TABLE_SIZE or 0; data = {}; } self.dynamic_max = self.total_max; return setmetatable(self, mt) end function methods:append_data(val) table.insert(self.data, val) return self end function methods:render_data() return table.concat(self.data) end function methods:clear_data() self.data = {} return true end -- Returns a boolean indicating if an entry was successfully removed function methods:evict_from_dynamic_table() local old_head = self.dynamic_index_head if old_head > self.dynamic_index_tail then return false end local pair = self.dynamic_pairs[old_head] if self.dynamic_pairs[pair] == old_head then -- don't want to evict a duplicate entry (2.3.2) self.dynamic_pairs[pair] = nil end self.dynamic_pairs[old_head] = nil local name = self.dynamic_names_to_indexes[old_head] if name ~= nil then if self.dynamic_names_to_indexes[name] == old_head then self.dynamic_names_to_indexes[name] = nil end self.dynamic_names_to_indexes[old_head] = nil end local old_entry_size = dynamic_table_entry_size(pair) self.dynamic_current_size = self.dynamic_current_size - old_entry_size if self.dynamic_current_size == 0 then -- [Premature Optimisation]: reset to head at 1 and tail at 0 self.dynamic_index_head = 1 self.dynamic_index_tail = 0 else self.dynamic_index_head = old_head + 1 end return true end -- Returns a string in the format of the examples in the spec function methods:dynamic_table_tostring() local r = {} local size = 0 for i=self.dynamic_index_tail, self.dynamic_index_head, -1 do local pair = self.dynamic_pairs[i] local name, value = uncompound_key(pair) local entry_size = dynamic_table_entry_size(pair) local j = self.dynamic_index_tail - i + 1 local line = string.format("[%3i] (s =%4d) %s: %s", j, entry_size, name, value) line = line:gsub(("."):rep(68), "%0\\\n ") -- Wrap lines size = size + entry_size table.insert(r, line) end table.insert(r, string.format(" Table size:%4d", size)) return table.concat(r, "\n") end function methods:set_max_dynamic_table_size(SETTINGS_HEADER_TABLE_SIZE) self.total_max = SETTINGS_HEADER_TABLE_SIZE return true end function methods:encode_max_size(val) self:append_data(encode_max_size(val)) return true end -- Section 4.3 function methods:resize_dynamic_table(new_size) assert(new_size >= 0) if new_size > self.total_max then return nil, h2_errors.COMPRESSION_ERROR:new_traceback("Dynamic Table size update new maximum size MUST be lower than or equal to the limit") end while new_size < self.dynamic_current_size do assert(self:evict_from_dynamic_table()) end self.dynamic_max = new_size return true end function methods:add_to_dynamic_table(name, value, k) -- luacheck: ignore 212 -- Early exit if we can't fit into dynamic table if self.dynamic_max == 0 then return true end local new_entry_size = dynamic_table_entry_size(k) -- Evict old entries until we can fit, Section 4.4 while self.dynamic_current_size + new_entry_size > self.dynamic_max do if not self:evict_from_dynamic_table() then --[[It is not an error to attempt to add an entry that is larger than the maximum size; an attempt to add an entry larger than the maximum size causes the table to be emptied of all existing entries, and results in an empty table.]] return true end end -- Increment current index local index = self.dynamic_index_tail + 1 self.dynamic_index_tail = index -- Add to dynamic table self.dynamic_pairs[k] = index self.dynamic_pairs[index] = k -- [Premature Optimisation]: Don't both putting it in dynamic table if it's in static table if static_names_to_index[name] == nil then self.dynamic_names_to_indexes[index] = name self.dynamic_names_to_indexes[name] = index -- This intentionally overwrites to keep up to date end self.dynamic_current_size = self.dynamic_current_size + new_entry_size return true end function methods:dynamic_table_id_to_index(id) return max_static_index + self.dynamic_index_tail - id + 1 end methods.dynamic_index_to_table_id = methods.dynamic_table_id_to_index function methods:lookup_pair_index(k) local pair_static_index = static_pairs[k] if pair_static_index ~= nil then return pair_static_index end local pair_dynamic_id = self.dynamic_pairs[k] if pair_dynamic_id then return self:dynamic_table_id_to_index(pair_dynamic_id) end return nil end function methods:lookup_name_index(name) local name_static_index = static_names_to_index[name] if name_static_index then return name_static_index end local name_dynamic_id = self.dynamic_names_to_indexes[name] if name_dynamic_id then return self:dynamic_table_id_to_index(name_dynamic_id) end return nil end function methods:lookup_index(index) if index <= max_static_index then local k = static_pairs[index] if k then return uncompound_key(k) end else -- Dynamic? local id = self:dynamic_index_to_table_id(index) local k = self.dynamic_pairs[id] if k then return uncompound_key(k) end end return end function methods:add_header_indexed(name, value, huffman) local k = compound_key(name, value) local pair_index = self:lookup_pair_index(k) if pair_index then local data = encode_indexed_header(pair_index) return self:append_data(data) end local name_index = self:lookup_name_index(name) if name_index then local data = encode_literal_header_indexed(name_index, value, huffman) self:add_to_dynamic_table(name, value, k) return self:append_data(data) end -- Never before seen name local data = encode_literal_header_indexed_new(name, value, huffman) self:add_to_dynamic_table(name, value, k) return self:append_data(data) end function methods:add_header_never_indexed(name, value, huffman) local name_index = self:lookup_name_index(name) if name_index then local data = encode_literal_header_never(name_index, value, huffman) return self:append_data(data) end -- Never before seen name local data = encode_literal_header_never_new(name, value, huffman) return self:append_data(data) end function methods:encode_headers(headers) for name, value, never_index in headers:each() do if never_index then self:add_header_never_indexed(name, value) else self:add_header_indexed(name, value) end end return true end local function decode_header_helper(self, payload, prefix_len, pos) local index, name, value index, pos = decode_integer(payload, prefix_len, pos) if index == nil then return index, pos end if index == 0 then name, pos = decode_string(payload, pos) if name == nil then return name, pos end value, pos = decode_string(payload, pos) if value == nil then return value, pos end else name = self:lookup_index(index) if name == nil then return nil, h2_errors.COMPRESSION_ERROR:new_traceback(string.format("index %d not found in table", index)) end value, pos = decode_string(payload, pos) if value == nil then return value, pos end end return name, value, pos end function methods:decode_headers(payload, header_list, pos) header_list = header_list or new_headers() pos = pos or 1 while pos <= #payload do local first_byte = payload:byte(pos, pos) if band(first_byte, 0x80) ~= 0 then -- Section 6.1 -- indexed header local index, newpos = decode_integer(payload, 7, pos) if index == nil then break end pos = newpos local name, value = self:lookup_index(index) if name == nil then return nil, h2_errors.COMPRESSION_ERROR:new_traceback(string.format("index %d not found in table", index)) end header_list:append(name, value, false) elseif band(first_byte, 0x40) ~= 0 then -- Section 6.2.1 local name, value, newpos = decode_header_helper(self, payload, 6, pos) if name == nil then if value == nil then break -- EOF end return nil, value end pos = newpos self:add_to_dynamic_table(name, value, compound_key(name, value)) header_list:append(name, value, false) elseif band(first_byte, 0x20) ~= 0 then -- Section 6.3 --[[ Section 4.2 This dynamic table size update MUST occur at the beginning of the first header block following the change to the dynamic table size. In HTTP/2, this follows a settings acknowledgment.]] if header_list:len() > 0 then return nil, h2_errors.COMPRESSION_ERROR:new_traceback("dynamic table size update MUST occur at the beginning of a header block") end local size, newpos = decode_integer(payload, 5, pos) if size == nil then break end pos = newpos local ok, err = self:resize_dynamic_table(size) if not ok then return nil, err end else -- Section 6.2.2 and 6.2.3 local never_index = band(first_byte, 0x10) ~= 0 local name, value, newpos = decode_header_helper(self, payload, 4, pos) if name == nil then if value == nil then break -- EOF end return nil, value end pos = newpos header_list:append(name, value, never_index) end end return header_list, pos end return { new = new; methods = methods; mt = mt; encode_integer = encode_integer; decode_integer = decode_integer; encode_string = encode_string; decode_string = decode_string; encode_indexed_header = encode_indexed_header; encode_literal_header_indexed = encode_literal_header_indexed; encode_literal_header_indexed_new = encode_literal_header_indexed_new; encode_literal_header_none = encode_literal_header_none; encode_literal_header_none_new = encode_literal_header_none_new; encode_literal_header_never = encode_literal_header_never; encode_literal_header_never_new = encode_literal_header_never_new; encode_max_size = encode_max_size; } lua-http-0.4/http/hsts.lua000066400000000000000000000057301400726324600155670ustar00rootroot00000000000000--[[ Data structures useful for HSTS (HTTP Strict Transport Security) HSTS is described in RFC 6797 ]] local binaryheap = require "binaryheap" local http_util = require "http.util" local store_methods = { time = function() return os.time() end; max_items = (1e999); } local store_mt = { __name = "http.hsts.store"; __index = store_methods; } local store_item_methods = {} local store_item_mt = { __name = "http.hsts.store_item"; __index = store_item_methods; } local function new_store() return setmetatable({ domains = {}; expiry_heap = binaryheap.minUnique(); n_items = 0; }, store_mt) end function store_methods:clone() local r = new_store() r.time = rawget(self, "time") r.n_items = rawget(self, "n_items") r.expiry_heap = binaryheap.minUnique() for host, item in pairs(self.domains) do r.domains[host] = item r.expiry_heap:insert(item.expires, item) end return r end function store_methods:store(host, directives) local now = self.time() local max_age = directives["max-age"] if max_age == nil then return nil, "max-age directive is required" elseif type(max_age) ~= "string" or max_age:match("[^0-9]") then return nil, "max-age directive does not match grammar" else max_age = tonumber(max_age, 10) end -- Clean now so that we can assume there are no expired items in store self:clean() if max_age == 0 then return self:remove(host) else if http_util.is_ip(host) then return false end -- add to store local old_item = self.domains[host] if old_item then self.expiry_heap:remove(old_item) else local n_items = self.n_items if n_items >= self.max_items then return false end self.n_items = n_items + 1 end local expires = now + max_age local item = setmetatable({ host = host; includeSubdomains = directives.includeSubdomains; expires = expires; }, store_item_mt) self.domains[host] = item self.expiry_heap:insert(expires, item) end return true end function store_methods:remove(host) local item = self.domains[host] if item then self.expiry_heap:remove(item) self.domains[host] = nil self.n_items = self.n_items - 1 end return true end function store_methods:check(host) if http_util.is_ip(host) then return false end -- Clean now so that we can assume there are no expired items in store self:clean() local h = host repeat local item = self.domains[h] if item then if host == h or item.includeSubdomains then return true end end local n h, n = h:gsub("^[^%.]+%.", "", 1) until n == 0 return false end function store_methods:clean_due() local next_expiring = self.expiry_heap:peek() if not next_expiring then return (1e999) end return next_expiring.expires end function store_methods:clean() local now = self.time() while self:clean_due() < now do local item = self.expiry_heap:pop() self.domains[item.host] = nil self.n_items = self.n_items - 1 end return true end return { new_store = new_store; store_mt = store_mt; store_methods = store_methods; } lua-http-0.4/http/hsts.tld000066400000000000000000000005341400726324600155660ustar00rootroot00000000000000interface hsts_store time: () -> (number) max_items: number clone: (self) -> (hsts_store) store: (self, string, {string:string}) -> (boolean) remove: (self, string) -> (boolean) check: (self, hsts_store) -> (boolean) const clean_due: (self) -> (number) const clean: (self) -> (boolean) end new_store: () -> (hsts_store) lua-http-0.4/http/proxies.lua000066400000000000000000000037431400726324600163010ustar00rootroot00000000000000-- Proxy from e.g. environmental variables. local proxies_methods = {} local proxies_mt = { __name = "http.proxies"; __index = proxies_methods; } local function new() return setmetatable({ http_proxy = nil; https_proxy = nil; all_proxy = nil; no_proxy = nil; }, proxies_mt) end function proxies_methods:update(getenv) if getenv == nil then getenv = os.getenv end -- prefers lower case over upper case; except for http_proxy where no upper case if getenv "GATEWAY_INTERFACE" then -- Mitigate httpoxy. see https://httpoxy.org/ self.http_proxy = getenv "CGI_HTTP_PROXY" else self.http_proxy = getenv "http_proxy" end self.https_proxy = getenv "https_proxy" or getenv "HTTPS_PROXY"; self.all_proxy = getenv "all_proxy" or getenv "ALL_PROXY"; self.no_proxy = getenv "no_proxy" or getenv "NO_PROXY"; return self end -- Finds the correct proxy for a given scheme/host function proxies_methods:choose(scheme, host) if self.no_proxy == "*" then return nil elseif self.no_proxy then -- cache no_proxy_set by overwriting self.no_proxy if type(self.no_proxy) == "string" then local no_proxy_set = {} -- wget allows domains in no_proxy list to be prefixed by "." -- e.g. no_proxy=.mit.edu for host_suffix in self.no_proxy:gmatch("%.?([^,]+)") do no_proxy_set[host_suffix] = true end self.no_proxy = no_proxy_set end -- From curl docs: -- matched as either a domain which contains the hostname, or the -- hostname itself. For example local.com would match local.com, -- local.com:80, and www.local.com, but not www.notlocal.com. for pos in host:gmatch("%f[^%z%.]()") do local host_suffix = host:sub(pos, -1) if self.no_proxy[host_suffix] then return nil end end end if scheme == "http" then if self.http_proxy then return self.http_proxy end elseif scheme == "https" then if self.https_proxy then return self.https_proxy end end return self.all_proxy end return { new = new; methods = proxies_methods; mt = proxies_mt; } lua-http-0.4/http/proxies.tld000066400000000000000000000002161400726324600162730ustar00rootroot00000000000000interface proxies const update: (self, (string)->(string?))->(self) const choose: (self, string, string)->(string?) end new: proxies lua-http-0.4/http/request.lua000066400000000000000000000467451400726324600163110ustar00rootroot00000000000000local lpeg = require "lpeg" local http_patts = require "lpeg_patterns.http" local uri_patts = require "lpeg_patterns.uri" local basexx = require "basexx" local client = require "http.client" local new_headers = require "http.headers".new local http_cookie = require "http.cookie" local http_hsts = require "http.hsts" local http_socks = require "http.socks" local http_proxies = require "http.proxies" local http_util = require "http.util" local http_version = require "http.version" local monotime = require "cqueues".monotime local ce = require "cqueues.errno" local default_user_agent = string.format("%s/%s", http_version.name, http_version.version) local default_hsts_store = http_hsts.new_store() local default_proxies = http_proxies.new():update() local default_cookie_store = http_cookie.new_store() local default_h2_settings = { ENABLE_PUSH = false; } local request_methods = { hsts = default_hsts_store; proxies = default_proxies; cookie_store = default_cookie_store; is_top_level = true; site_for_cookies = nil; expect_100_timeout = 1; follow_redirects = true; max_redirects = 5; post301 = false; post302 = false; } local request_mt = { __name = "http.request"; __index = request_methods; } local EOF = lpeg.P(-1) local sts_patt = http_patts.Strict_Transport_Security * EOF local uri_patt = uri_patts.uri * EOF local uri_ref = uri_patts.uri_reference * EOF local function new_from_uri(uri_t, headers) if type(uri_t) == "string" then uri_t = assert(uri_patt:match(uri_t), "invalid URI") else assert(type(uri_t) == "table") end local scheme = assert(uri_t.scheme, "URI missing scheme") assert(scheme == "https" or scheme == "http" or scheme == "ws" or scheme == "wss", "scheme not valid") local host = assert(uri_t.host, "URI must include a host") local port = uri_t.port or http_util.scheme_to_port[scheme] local is_connect -- CONNECT requests are a bit special, see http2 spec section 8.3 if headers == nil then headers = new_headers() headers:append(":method", "GET") is_connect = false else is_connect = headers:get(":method") == "CONNECT" end if is_connect then assert(uri_t.path == nil or uri_t.path == "", "CONNECT requests cannot have a path") assert(uri_t.query == nil, "CONNECT requests cannot have a query") assert(headers:has(":authority"), ":authority required for CONNECT requests") else headers:upsert(":authority", http_util.to_authority(host, port, scheme)) local path = uri_t.path if path == nil or path == "" then path = "/" end if uri_t.query then path = path .. "?" .. uri_t.query end headers:upsert(":path", path) if scheme == "wss" then scheme = "https" elseif scheme == "ws" then scheme = "http" end headers:upsert(":scheme", scheme) end if uri_t.userinfo then local field if is_connect then field = "proxy-authorization" else field = "authorization" end local userinfo = http_util.decodeURIComponent(uri_t.userinfo) -- XXX: this doesn't seem right, but it's the same behaviour as curl headers:upsert(field, "basic " .. basexx.to_base64(userinfo), true) end if not headers:has("user-agent") then headers:append("user-agent", default_user_agent) end return setmetatable({ host = host; port = port; tls = (scheme == "https"); headers = headers; body = nil; }, request_mt) end local function new_connect(uri, connect_authority) local headers = new_headers() headers:append(":authority", connect_authority) headers:append(":method", "CONNECT") return new_from_uri(uri, headers) end function request_methods:clone() return setmetatable({ host = self.host; port = self.port; bind = self.bind; tls = self.tls; ctx = self.ctx; sendname = self.sendname; version = self.version; proxy = self.proxy; headers = self.headers:clone(); body = self.body; hsts = rawget(self, "hsts"); proxies = rawget(self, "proxies"); cookie_store = rawget(self, "cookie_store"); is_top_level = rawget(self, "is_top_level"); site_for_cookies = rawget(self, "site_for_cookies"); expect_100_timeout = rawget(self, "expect_100_timeout"); follow_redirects = rawget(self, "follow_redirects"); max_redirects = rawget(self, "max_redirects"); post301 = rawget(self, "post301"); post302 = rawget(self, "post302"); }, request_mt) end function request_methods:to_uri(with_userinfo) local scheme = self.headers:get(":scheme") local method = self.headers:get(":method") local path if scheme == nil then scheme = self.tls and "https" or "http" end local authority local authorization_field if method == "CONNECT" then authorization_field = "proxy-authorization" path = "" else path = self.headers:get(":path") local path_t if method == "OPTIONS" and path == "*" then path = "" else path_t = uri_ref:match(path) assert(path_t, "path not a valid uri reference") end if path_t and path_t.host then -- path was a full URI. This is used for proxied requests. scheme = path_t.scheme or scheme path = path_t.path or "" if path_t.query then path = path .. "?" .. path_t.query end authority = http_util.to_authority(path_t.host, path_t.port, scheme) else authority = self.headers:get(":authority") -- TODO: validate authority can fit in a url end authorization_field = "authorization" end if authority == nil then authority = http_util.to_authority(self.host, self.port, scheme) end if with_userinfo and self.headers:has(authorization_field) then local authorization = self.headers:get(authorization_field) local auth_type, userinfo = authorization:match("^%s*(%S+)%s+(%S+)%s*$") if auth_type and auth_type:lower() == "basic" then userinfo = basexx.from_base64(userinfo) userinfo = http_util.encodeURI(userinfo) authority = userinfo .. "@" .. authority else error("authorization cannot be converted to uri") end end return scheme .. "://" .. authority .. path end function request_methods:handle_redirect(orig_headers) local max_redirects = self.max_redirects if max_redirects <= 0 then return nil, "maximum redirects exceeded", ce.ELOOP end local location = orig_headers:get("location") if not location then return nil, "missing location header for redirect", ce.EINVAL end local uri_t = uri_ref:match(location) if not uri_t then return nil, "invalid URI in location header", ce.EINVAL end local new_req = self:clone() new_req.max_redirects = max_redirects - 1 local method = new_req.headers:get(":method") local is_connect = method == "CONNECT" local new_scheme = uri_t.scheme if new_scheme then if not is_connect then new_req.headers:upsert(":scheme", new_scheme) end if new_scheme == "https" then new_req.tls = true elseif new_scheme == "http" then new_req.tls = false else return nil, "unknown scheme", ce.EINVAL end else if not is_connect then new_scheme = new_req.headers:get(":scheme") end if new_scheme == nil then new_scheme = self.tls and "https" or "http" end end local orig_target local target_authority if not is_connect then orig_target = self.headers:get(":path") orig_target = uri_ref:match(orig_target) if orig_target and orig_target.host then -- was originally a proxied request local new_authority if uri_t.host then -- we have a new host new_authority = http_util.to_authority(uri_t.host, uri_t.port, new_scheme) new_req.headers:upsert(":authority", new_authority) else new_authority = self.headers:get(":authority") end if new_authority == nil then new_authority = http_util.to_authority(self.host, self.port, new_scheme) end -- prefix for new target target_authority = new_scheme .. "://" .. new_authority end end if target_authority == nil and uri_t.host then -- we have a new host and it wasn't placed into :authority new_req.host = uri_t.host if not is_connect then new_req.headers:upsert(":authority", http_util.to_authority(uri_t.host, uri_t.port, new_scheme)) end new_req.port = uri_t.port or http_util.scheme_to_port[new_scheme] new_req.sendname = nil end -- otherwise same host as original request; don't need change anything if is_connect then if uri_t.path ~= nil and uri_t.path ~= "" then return nil, "CONNECT requests cannot have a path", ce.EINVAL elseif uri_t.query ~= nil then return nil, "CONNECT requests cannot have a query", ce.EINVAL end else local new_path if uri_t.path == nil or uri_t.path == "" then new_path = "/" else new_path = uri_t.path if new_path:sub(1, 1) ~= "/" then -- relative path if not orig_target then return nil, "base path not valid for relative redirect", ce.EINVAL end local orig_path = orig_target.path or "/" new_path = http_util.resolve_relative_path(orig_path, new_path) end end if uri_t.query then new_path = new_path .. "?" .. uri_t.query end if target_authority then new_path = target_authority .. new_path end new_req.headers:upsert(":path", new_path) end if uri_t.userinfo then local field if is_connect then field = "proxy-authorization" else field = "authorization" end new_req.headers:upsert(field, "basic " .. basexx.to_base64(uri_t.userinfo), true) end if not new_req.tls and self.tls then --[[ RFC 7231 5.5.2: A user agent MUST NOT send a Referer header field in an unsecured HTTP request if the referring page was received with a secure protocol.]] new_req.headers:delete("referer") else new_req.headers:upsert("referer", self:to_uri(false)) end -- Change POST requests to a body-less GET on redirect? local orig_status = orig_headers:get(":status") if (orig_status == "303" or (orig_status == "301" and not self.post301) or (orig_status == "302" and not self.post302) ) and method == "POST" then new_req.headers:upsert(":method", "GET") -- Remove headers that don't make sense without a body -- Headers that require a body new_req.headers:delete("transfer-encoding") new_req.headers:delete("content-length") -- Representation Metadata from RFC 7231 Section 3.1 new_req.headers:delete("content-encoding") new_req.headers:delete("content-language") new_req.headers:delete("content-location") new_req.headers:delete("content-type") -- Other... local expect = new_req.headers:get("expect") if expect and expect:lower() == "100-continue" then new_req.headers:delete("expect") end new_req.body = nil end return new_req end function request_methods:set_body(body) self.body = body local length if type(self.body) == "string" then length = #body end if length then self.headers:upsert("content-length", string.format("%d", #body)) end if not length or length > 1024 then self.headers:append("expect", "100-continue") end return true end local function non_final_status(status) return status:sub(1, 1) == "1" and status ~= "101" end function request_methods:go(timeout) local deadline = timeout and (monotime()+timeout) local cloned_headers = false -- only clone headers when we need to local request_headers = self.headers local host = self.host local port = self.port local tls = self.tls local version = self.version -- RFC 6797 Section 8.3 if not tls and self.hsts and self.hsts:check(host) then tls = true if request_headers:get(":scheme") == "http" then -- The UA MUST replace the URI scheme with "https" if not cloned_headers then request_headers = request_headers:clone() cloned_headers = true end request_headers:upsert(":scheme", "https") end -- if the URI contains an explicit port component of "80", then -- the UA MUST convert the port component to be "443", or -- if the URI contains an explicit port component that is not -- equal to "80", the port component value MUST be preserved if port == 80 then port = 443 end end if self.cookie_store then local cookie_header = self.cookie_store:lookup_for_request(request_headers, host, self.site_for_cookies, self.is_top_level) if cookie_header ~= "" then if not cloned_headers then request_headers = request_headers:clone() cloned_headers = true end -- Append rather than upsert: user may have added their own cookies request_headers:append("cookie", cookie_header) end end local connection local proxy = self.proxy if proxy == nil and self.proxies then assert(getmetatable(self.proxies) == http_proxies.mt, "proxies property should be an http.proxies object") local scheme = tls and "https" or "http" -- rather than :scheme proxy = self.proxies:choose(scheme, host) end if proxy then if type(proxy) == "string" then proxy = assert(uri_patt:match(proxy), "invalid proxy URI") proxy.path = nil -- ignore proxy.path component else assert(type(proxy) == "table" and getmetatable(proxy) == nil and proxy.scheme, "invalid proxy URI") proxy = { scheme = proxy.scheme; userinfo = proxy.userinfo; host = proxy.host; port = proxy.port; -- ignore proxy.path component } end if proxy.scheme == "http" or proxy.scheme == "https" then if tls then -- Proxy via a CONNECT request local authority = http_util.to_authority(host, port, nil) local connect_request = new_connect(proxy, authority) connect_request.proxy = false connect_request.version = 1.1 -- TODO: CONNECT over HTTP/2 if connect_request.tls then error("NYI: TLS over TLS") end -- Perform CONNECT request local headers, stream, errno = connect_request:go(deadline and deadline-monotime()) if not headers then return nil, stream, errno end -- RFC 7231 Section 4.3.6: -- Any 2xx (Successful) response indicates that the sender (and all -- inbound proxies) will switch to tunnel mode local status_reply = headers:get(":status") if status_reply:sub(1, 1) ~= "2" then stream:shutdown() return nil, ce.strerror(ce.ECONNREFUSED), ce.ECONNREFUSED end local sock = stream.connection:take_socket() local err, errno2 connection, err, errno2 = client.negotiate(sock, { host = host; tls = tls; ctx = self.ctx; sendname = self.sendname; version = version; h2_settings = default_h2_settings; }, deadline and deadline-monotime()) if connection == nil then sock:close() return nil, err, errno2 end else if request_headers:get(":method") == "CONNECT" then error("cannot use HTTP Proxy with CONNECT method") end -- TODO: Check if :path already has authority? local old_url = self:to_uri(false) host = assert(proxy.host, "proxy is missing host") port = proxy.port or http_util.scheme_to_port[proxy.scheme] -- proxy requests get a uri that includes host as their path if not cloned_headers then request_headers = request_headers:clone() cloned_headers = true -- luacheck: ignore 311 end request_headers:upsert(":path", old_url) if proxy.userinfo then request_headers:upsert("proxy-authorization", "basic " .. basexx.to_base64(proxy.userinfo), true) end end elseif proxy.scheme:match "^socks" then local socks = http_socks.connect(proxy) local ok, err, errno = socks:negotiate(host, port, deadline and deadline-monotime()) if not ok then return nil, err, errno end local sock = socks:take_socket() connection, err, errno = client.negotiate(sock, { tls = tls; ctx = self.ctx; sendname = self.sendname ~= nil and self.sendname or host; version = version; h2_settings = default_h2_settings; }, deadline and deadline-monotime()) if connection == nil then sock:close() return nil, err, errno end else error(string.format("unsupported proxy type (%s)", proxy.scheme)) end end if not connection then local err, errno connection, err, errno = client.connect({ host = host; port = port; bind = self.bind; tls = tls; ctx = self.ctx; sendname = self.sendname; version = version; h2_settings = default_h2_settings; }, deadline and deadline-monotime()) if connection == nil then return nil, err, errno end -- Close the connection (and free resources) when done connection:onidle(connection.close) end local stream do local err, errno stream, err, errno = connection:new_stream() if stream == nil then return nil, err, errno end end local body = self.body do -- Write outgoing headers local ok, err, errno = stream:write_headers(request_headers, body == nil, deadline and deadline-monotime()) if not ok then stream:shutdown() return nil, err, errno end end local headers if body then local expect = request_headers:get("expect") if expect and expect:lower() == "100-continue" then -- Try to wait for 100-continue before proceeding if deadline then local err, errno headers, err, errno = stream:get_headers(math.min(self.expect_100_timeout, deadline-monotime())) if headers == nil and (errno ~= ce.ETIMEDOUT or monotime() > deadline) then stream:shutdown() if err == nil then return nil, ce.strerror(ce.EPIPE), ce.EPIPE end return nil, err, errno end else local err, errno headers, err, errno = stream:get_headers(self.expect_100_timeout) if headers == nil and errno ~= ce.ETIMEDOUT then stream:shutdown() if err == nil then return nil, ce.strerror(ce.EPIPE), ce.EPIPE end return nil, err, errno end end if headers and headers:get(":status") ~= "100" then -- Don't send body body = nil end end if body then local ok, err, errno if type(body) == "string" then ok, err, errno = stream:write_body_from_string(body, deadline and deadline-monotime()) elseif io.type(body) == "file" then ok, err, errno = body:seek("set") if ok then ok, err, errno = stream:write_body_from_file(body, deadline and deadline-monotime()) end elseif type(body) == "function" then -- call function to get body segments while true do local chunk = body() if chunk then ok, err, errno = stream:write_chunk(chunk, false, deadline and deadline-monotime()) if not ok then break end else ok, err, errno = stream:write_chunk("", true, deadline and deadline-monotime()) break end end end if not ok then stream:shutdown() return nil, err, errno end end end if not headers or non_final_status(headers:get(":status")) then -- Skip through 1xx informational headers. -- From RFC 7231 Section 6.2: "A user agent MAY ignore unexpected 1xx responses" repeat local err, errno headers, err, errno = stream:get_headers(deadline and (deadline-monotime())) if headers == nil then stream:shutdown() if err == nil then return nil, ce.strerror(ce.EPIPE), ce.EPIPE end return nil, err, errno end until not non_final_status(headers:get(":status")) end -- RFC 6797 Section 8.1 if tls and self.hsts and headers:has("strict-transport-security") then -- If a UA receives more than one STS header field in an HTTP -- response message over secure transport, then the UA MUST process -- only the first such header field. local sts = headers:get("strict-transport-security") sts = sts_patt:match(sts) -- The UA MUST ignore any STS header fields not conforming to the grammar specified. if sts then self.hsts:store(self.host, sts) end end if self.cookie_store then self.cookie_store:store_from_request(request_headers, headers, self.host, self.site_for_cookies) end if self.follow_redirects and headers:get(":status"):sub(1,1) == "3" then stream:shutdown() local new_req, err2, errno2 = self:handle_redirect(headers) if not new_req then return nil, err2, errno2 end return new_req:go(deadline and (deadline-monotime())) end return headers, stream end return { new_from_uri = new_from_uri; new_connect = new_connect; methods = request_methods; mt = request_mt; } lua-http-0.4/http/request.tld000066400000000000000000000013621400726324600162750ustar00rootroot00000000000000require "http.cookie" require "http.hsts" require "http.proxies" require "http.stream_common" interface request hsts: hsts_store|false proxies: proxies|false cookie_store: cookie_store|false is_top_level: boolean site_for_cookies: string? expect_100_timeout: integer follow_redirects: boolean max_redirects: integer post301: boolean post302: boolean headers: headers const clone: (self) -> (request) const to_uri: (self, boolean?) -> (string) const handle_redirect: (self, headers) -> (request)|(nil, string, integer) const set_body: (self, string|file|()->(string?)) -> () const go: (self, number) -> (headers, stream)|(nil, string, integer) end new_from_uri: (string, headers?) -> (request) new_connect: (string, string) -> (request) lua-http-0.4/http/server.lua000066400000000000000000000330511400726324600161110ustar00rootroot00000000000000local cqueues = require "cqueues" local monotime = cqueues.monotime local ca = require "cqueues.auxlib" local cc = require "cqueues.condition" local ce = require "cqueues.errno" local cs = require "cqueues.socket" local connection_common = require "http.connection_common" local onerror = connection_common.onerror local h1_connection = require "http.h1_connection" local h2_connection = require "http.h2_connection" local http_tls = require "http.tls" local http_util = require "http.util" local openssl_bignum = require "openssl.bignum" local pkey = require "openssl.pkey" local openssl_rand = require "openssl.rand" local openssl_ssl = require "openssl.ssl" local openssl_ctx = require "openssl.ssl.context" local x509 = require "openssl.x509" local name = require "openssl.x509.name" local altname = require "openssl.x509.altname" local hang_timeout = 0.03 -- Sense for TLS or SSL client hello -- returns `true`, `false` or `nil, err` local function is_tls_client_hello(socket, timeout) -- reading for 6 bytes should be safe, as no HTTP version -- has a valid client request shorter than 6 bytes local first_bytes, err, errno = socket:xread(6, timeout) if first_bytes == nil then return nil, err or ce.EPIPE, errno end local use_tls = not not ( first_bytes:match("^[\21\22]\3[\1\2\3]..\1") or -- TLS first_bytes:match("^[\128-\255][\9-\255]\1") -- SSLv2 ) local ok ok, errno = socket:unget(first_bytes) if not ok then return nil, onerror(socket, "unget", errno, 2) end return use_tls end -- Wrap a bare cqueues socket in an HTTP connection of a suitable version -- Starts TLS if necessary -- this function *should never throw* local function wrap_socket(self, socket, timeout) local deadline = timeout and monotime()+timeout socket:setmode("b", "b") socket:onerror(onerror) local version = self.version local use_tls = self.tls if use_tls == nil then local err, errno use_tls, err, errno = is_tls_client_hello(socket, deadline and (deadline-monotime())) if use_tls == nil then return nil, err, errno end end if use_tls then local ok, err, errno = socket:starttls(self.ctx, deadline and (deadline-monotime())) if not ok then return nil, err, errno end local ssl = assert(socket:checktls()) if http_tls.has_alpn then local proto = ssl:getAlpnSelected() if proto then if proto == "h2" and (version == nil or version == 2) then version = 2 elseif proto == "http/1.1" and (version == nil or version < 2) then version = 1.1 elseif proto == "http/1.0" and (version == nil or version == 1.0) then version = 1.0 else return nil, "unexpected ALPN protocol: " .. proto, ce.EILSEQNOSUPPORT end end end end -- Still not sure if incoming connection is an HTTP1 or HTTP2 connection -- Need to sniff for the h2 connection preface to find out for sure if version == nil then local is_h2, err, errno = h2_connection.socket_has_preface(socket, true, deadline and (deadline-monotime())) if is_h2 == nil then return nil, err or ce.EPIPE, errno end version = is_h2 and 2 or 1.1 end local conn, err, errno if version == 2 then conn, err, errno = h2_connection.new(socket, "server", nil) else conn, err, errno = h1_connection.new(socket, "server", version) end if not conn then return nil, err, errno end return conn end local function server_loop(self) while self.socket do if self.paused then cqueues.poll(self.pause_cond) elseif self.n_connections >= self.max_concurrent then cqueues.poll(self.connection_done) else local socket, accept_errno = self.socket:accept({nodelay = true;}, 0) if socket == nil then if accept_errno == ce.ETIMEDOUT then -- Yield this thread until a client arrives cqueues.poll(self.socket, self.pause_cond) elseif accept_errno == ce.EMFILE then -- Wait for another request to finish if cqueues.poll(self.connection_done, hang_timeout) == hang_timeout then -- If we're stuck waiting, run a garbage collection sweep -- This can prevent a hang collectgarbage() end else self:onerror()(self, self, "accept", ce.strerror(accept_errno), accept_errno) end else self:add_socket(socket) end end end end local function handle_socket(self, socket) local error_operation, error_context local conn, err, errno = wrap_socket(self, socket, self.connection_setup_timeout) if not conn then socket:close() if err ~= ce.EPIPE -- client closed connection and errno ~= ce.ETIMEDOUT -- an operation timed out and errno ~= ce.ECONNRESET then error_operation = "wrap" error_context = socket end else local cond = cc.new() local idle = true local deadline conn:onidle(function() idle = true deadline = self.intra_stream_timeout + monotime() cond:signal(1) end) while true do local timeout = deadline and deadline-monotime() or self.intra_stream_timeout local stream stream, err, errno = conn:get_next_incoming_stream(timeout) if stream == nil then if (err ~= nil -- client closed connection and errno ~= ce.ECONNRESET and errno ~= ce.ENOTCONN and errno ~= ce.ETIMEDOUT) then error_operation = "get_next_incoming_stream" error_context = conn break elseif errno ~= ce.ETIMEDOUT or not idle or (deadline and deadline <= monotime()) then -- want to go around loop again if deadline not hit break end else idle = false deadline = nil self:add_stream(stream) end end -- wait for streams to complete if not idle then cond:wait() end conn:close() end self.n_connections = self.n_connections - 1 self.connection_done:signal(1) if error_operation then self:onerror()(self, error_context, error_operation, err, errno) end end local function handle_stream(self, stream) local ok, err = http_util.yieldable_pcall(self.onstream, self, stream) stream:shutdown() if not ok then self:onerror()(self, stream, "onstream", err) end end -- Prefer whichever comes first local function alpn_select(ssl, protos, version) for _, proto in ipairs(protos) do if proto == "h2" and (version == nil or version == 2) then -- HTTP2 only allows >= TLSv1.2 -- allow override via version if ssl:getVersion() >= openssl_ssl.TLS1_2_VERSION or version == 2 then return proto end elseif (proto == "http/1.1" and (version == nil or version == 1.1)) or (proto == "http/1.0" and (version == nil or version == 1.0)) then return proto end end return nil end -- create a new self signed cert local function new_ctx(host, version) local ctx = http_tls.new_server_context() if http_tls.has_alpn then ctx:setAlpnSelect(alpn_select, version) end if version == 2 then ctx:setOptions(openssl_ctx.OP_NO_TLSv1 + openssl_ctx.OP_NO_TLSv1_1) end local crt = x509.new() crt:setVersion(3) -- serial needs to be unique or browsers will show uninformative error messages crt:setSerial(openssl_bignum.fromBinary(openssl_rand.bytes(16))) -- use the host we're listening on as canonical name local dn = name.new() dn:add("CN", host) crt:setSubject(dn) crt:setIssuer(dn) -- should match subject for a self-signed local alt = altname.new() alt:add("DNS", host) crt:setSubjectAlt(alt) -- lasts for 10 years crt:setLifetime(os.time(), os.time()+86400*3650) -- can't be used as a CA crt:setBasicConstraints{CA=false} crt:setBasicConstraintsCritical(true) -- generate a new private/public key pair local key = pkey.new({bits=2048}) crt:setPublicKey(key) crt:sign(key) assert(ctx:setPrivateKey(key)) assert(ctx:setCertificate(crt)) return ctx end local server_methods = { version = nil; max_concurrent = math.huge; connection_setup_timeout = 10; intra_stream_timeout = 10; } local server_mt = { __name = "http.server"; __index = server_methods; } function server_mt:__tostring() return string.format("http.server{socket=%s;n_connections=%d}", tostring(self.socket), self.n_connections) end --[[ Creates a new server object Takes a table of options: - `.cq` (optional): A cqueues controller to use - `.socket` (optional): A cqueues socket object to accept() from - `.onstream`: function to call back for each stream read - `.onerror`: function that will be called when an error occurs (default: throw an error) - `.tls`: `nil`: allow both tls and non-tls connections - `true`: allows tls connections only - `false`: allows non-tls connections only - `.ctx`: an `openssl.ssl.context` object to use for tls connections - ` `nil`: a self-signed context will be generated - `.version`: the http version to allow to connect (default: any) - `.max_concurrent`: Maximum number of connections to allow live at a time (default: infinity) - `.connection_setup_timeout`: Timeout (in seconds) to wait for client to send first bytes and/or complete TLS handshake (default: 10) - `.intra_stream_timeout`: Timeout (in seoncds) to wait between start of client streams (default: 10) ]] local function new_server(tbl) local cq = tbl.cq if cq == nil then cq = cqueues.new() else assert(cqueues.type(cq) == "controller", "optional cq field should be a cqueue controller") end local socket = tbl.socket if socket ~= nil then assert(cs.type(socket), "optional socket field should be a cqueues socket") end local onstream = assert(tbl.onstream, "missing 'onstream'") if tbl.ctx == nil and tbl.tls ~= false then error("OpenSSL context required if .tls isn't false") end local self = setmetatable({ cq = cq; socket = socket; onstream = onstream; onerror_ = tbl.onerror; tls = tbl.tls; ctx = tbl.ctx; version = tbl.version; max_concurrent = tbl.max_concurrent; n_connections = 0; pause_cond = cc.new(); paused = false; connection_done = cc.new(); -- signalled when connection has been closed connection_setup_timeout = tbl.connection_setup_timeout; intra_stream_timeout = tbl.intra_stream_timeout; }, server_mt) if socket then -- Return errors rather than throwing socket:onerror(function(socket, op, why, lvl) -- luacheck: ignore 431 212 return why end) cq:wrap(server_loop, self) end return self end --[[ Extra options: - `.family`: protocol family - `.host`: address to bind to (required if not `.path`) - `.port`: port to bind to (optional if tls isn't `nil`, in which case defaults to 80 for `.tls == false` or 443 if `.tls == true`) - `.path`: path to UNIX socket (required if not `.host`) - `.v6only`: allow ipv6 only (no ipv4-mapped-ipv6) - `.mode`: fchmod or chmod socket after creating UNIX domain socket - `.mask`: set and restore umask when binding UNIX domain socket - `.unlink`: unlink socket path before binding? - `.reuseaddr`: turn on SO_REUSEADDR flag? - `.reuseport`: turn on SO_REUSEPORT flag? ]] local function listen(tbl) local tls = tbl.tls local host = tbl.host local path = tbl.path assert(host or path, "need host or path") local port = tbl.port if host and port == nil then if tls == true then port = "443" elseif tls == false then port = "80" else error("need port") end end local ctx = tbl.ctx if ctx == nil and tls ~= false then if host then ctx = new_ctx(host, tbl.version) else error("Custom OpenSSL context required when using a UNIX domain socket") end end local s, err, errno = ca.fileresult(cs.listen { family = tbl.family; host = host; port = port; path = path; mode = tbl.mode; mask = tbl.mask; unlink = tbl.unlink; reuseaddr = tbl.reuseaddr; reuseport = tbl.reuseport; v6only = tbl.v6only; }) if not s then return nil, err, errno end return new_server { cq = tbl.cq; socket = s; onstream = tbl.onstream; onerror = tbl.onerror; tls = tls; ctx = ctx; version = tbl.version; max_concurrent = tbl.max_concurrent; connection_setup_timeout = tbl.connection_setup_timeout; intra_stream_timeout = tbl.intra_stream_timeout; } end function server_methods:onerror_(context, op, err, errno) -- luacheck: ignore 212 local msg = op if err then msg = msg .. ": " .. tostring(err) end error(msg, 2) end function server_methods:onerror(...) local old_handler = self.onerror_ if select("#", ...) > 0 then self.onerror_ = ... end return old_handler end -- Actually wait for and *do* the binding -- Don't *need* to call this, as if not it will be done lazily function server_methods:listen(timeout) if self.socket then local ok, err, errno = ca.fileresult(self.socket:listen(timeout)) if not ok then return nil, err, errno end end return true end function server_methods:localname() if self.socket == nil then return end return ca.fileresult(self.socket:localname()) end function server_methods:pause() self.paused = true self.pause_cond:signal() return true end function server_methods:resume() self.paused = false self.pause_cond:signal() return true end function server_methods:close() if self.cq then cqueues.cancel(self.cq:pollfd()) cqueues.poll() cqueues.poll() self.cq = nil end if self.socket then self.socket:close() self.socket = nil end self.pause_cond:signal() self.connection_done:signal() return true end function server_methods:pollfd() return self.cq:pollfd() end function server_methods:events() return self.cq:events() end function server_methods:timeout() return self.cq:timeout() end function server_methods:empty() return self.cq:empty() end function server_methods:step(...) return self.cq:step(...) end function server_methods:loop(...) return self.cq:loop(...) end function server_methods:add_socket(socket) self.n_connections = self.n_connections + 1 self.cq:wrap(handle_socket, self, socket) return true end function server_methods:add_stream(stream) self.cq:wrap(handle_stream, self, stream) return true end return { new = new_server; listen = listen; mt = server_mt; } lua-http-0.4/http/socks.lua000066400000000000000000000252021400726324600157240ustar00rootroot00000000000000--[[ This module implements a subset of SOCKS as defined in RFC 1928. SOCKS5 has different authentication mechanisms, currently this code only supports username+password auth (defined in RFC 1929). URI format is taken from curl: - socks5:// is SOCKS5, resolving the authority locally - socks5h:// is SOCKS5, but let the proxy resolve the hostname ]] local cqueues = require "cqueues" local monotime = cqueues.monotime local ca = require "cqueues.auxlib" local ce = require "cqueues.errno" local cs = require "cqueues.socket" local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143 local sunpack = string.unpack or require "compat53.string".unpack -- luacheck: ignore 143 local IPv4 = require "lpeg_patterns.IPv4" local IPv6 = require "lpeg_patterns.IPv6" local uri_patts = require "lpeg_patterns.uri" local http_util = require "http.util" local EOF = require "lpeg".P(-1) local IPv4address = require "lpeg_patterns.IPv4".IPv4address local IPv6address = require "lpeg_patterns.IPv6".IPv6address local IPaddress = (IPv4address + IPv6address) * EOF local socks_methods = {} local socks_mt = { __name = "http.socks"; __index = socks_methods; } local function onerror(socket, op, why, lvl) -- luacheck: ignore 212 return string.format("%s: %s", op, ce.strerror(why)), why end local function new() return setmetatable({ version = 5; socket = nil; family = nil; host = nil; port = nil; needs_resolve = false; available_auth_methods = { "\0", ["\0"] = true; }; username = nil; password = nil; dst_family = nil; dst_host = nil; dst_port = nil; }, socks_mt) end local function connect(socks_uri) if type(socks_uri) == "string" then socks_uri = assert(uri_patts.uri:match(socks_uri), "invalid URI") end local self = new() if socks_uri.scheme == "socks5" then self.needs_resolve = true elseif socks_uri.scheme ~= "socks5h" then error("only SOCKS5 proxys supported") end assert(socks_uri.path == nil, "path not expected") local username, password if socks_uri.userinfo then username, password = socks_uri.userinfo:match("^([^:]*):(.*)$") if username == nil then error("invalid username/password format") end end self.host = socks_uri.host self.port = socks_uri.port or 1080 if username then self:add_username_password_auth(username, password) end return self end local function fdopen(socket) local self = new() socket:onerror(onerror) self.socket = socket return self end function socks_methods:clone() if self.socket then error("cannot clone live http.socks object") end local clone = new() clone.family = self.family clone.host = self.host clone.port = self.port clone.needs_resolve = self.needs_resolve if self.username then clone:add_username_password_auth(self.username, self.password) end return clone end function socks_methods:add_username_password_auth(username, password) self.username = http_util.decodeURIComponent(username) self.password = http_util.decodeURIComponent(password) if not self.available_auth_methods["\2"] then table.insert(self.available_auth_methods, "\2") self.available_auth_methods["\2"] = true end return true end -- RFC 1929 local function username_password_auth(self, deadline) do local data = spack("Bs1s1", 1, self.username, self.password) local ok, err, errno = self.socket:xwrite(data, "bn", deadline and deadline-monotime()) if not ok then return nil, err, errno end end do local version, err, errno = self.socket:xread(1, "b", deadline and deadline-monotime()) if not version then if err == nil then return nil, "username_password_auth: "..ce.strerror(ce.EPIPE), ce.EPIPE end return nil, err, errno end if version ~= "\1" then return nil, "username_password_auth: invalid username/password auth version", ce.EILSEQ end end do local ok, err, errno = self.socket:xread(1, "b", deadline and deadline-monotime()) if not ok then if err == nil then return nil, "username_password_auth: "..ce.strerror(ce.EPIPE), ce.EPIPE end return nil, err, errno end if ok ~= "\0" then return nil, "username_password_auth: "..ce.strerror(ce.EACCES), ce.EACCES end end return true end function socks_methods:negotiate(host, port, timeout) local deadline = timeout and monotime()+timeout assert(host, "host expected") port = assert(tonumber(port), "numeric port expected") if self.socket == nil then assert(self.host) local socket, err, errno = ca.fileresult(cs.connect { family = self.family; host = self.host; port = self.port; sendname = false; nodelay = true; }) if socket == nil then return nil, err, errno end socket:onerror(onerror) self.socket = socket end local ip = IPaddress:match(host) if self.needs_resolve and not ip then -- Waiting on https://github.com/wahern/cqueues/issues/164 error("NYI: need to resolve locally") end do local data = "\5"..string.char(#self.available_auth_methods)..table.concat(self.available_auth_methods) local ok, err, errno = self.socket:xwrite(data, "bn", deadline and deadline-monotime()) if not ok then return nil, err, errno end end do local byte, err, errno = self.socket:xread(1, "b", deadline and deadline-monotime()) if not byte then if err == nil then return nil, "socks:negotiate: "..ce.strerror(ce.EPIPE), ce.EPIPE end return nil, err, errno elseif byte ~= "\5" then return nil, "socks:negotiate: not SOCKS5", ce.EILSEQ end end local auth_method do local err, errno auth_method, err, errno = self.socket:xread(1, "b", deadline and deadline-monotime()) if not auth_method then if err == nil then return nil, "socks:negotiate: "..ce.strerror(ce.EPIPE), ce.EPIPE end return nil, err, errno end if self.available_auth_methods[auth_method] == nil then return nil, "socks:negotiate: unknown authentication method", ce.EILSEQ end end if auth_method == "\0" then -- luacheck: ignore 542 -- do nothing elseif auth_method == "\2" then local ok, err, errno = username_password_auth(self, deadline) if not ok then return nil, err, errno end else error("unreachable") -- implies `available_auth_methods` was edited while this was in progress end do local data if getmetatable(ip) == IPv4.IPv4_mt then data = spack(">BBx Bc4I2", 5, 1, 1, ip:binary(), port) elseif getmetatable(ip) == IPv6.IPv6_mt then data = spack(">BBx Bc16I2", 5, 1, 4, ip:binary(), port) else -- domain name data = spack(">BBx Bs1I2", 5, 1, 3, host, port) end local ok, err, errno = self.socket:xwrite(data, "bn", deadline and deadline-monotime()) if not ok then return nil, err, errno end end do local byte, err, errno = self.socket:xread(1, "b", deadline and deadline-monotime()) if not byte then if err == nil then return nil, "socks:negotiate: "..ce.strerror(ce.EPIPE), ce.EPIPE end return nil, err, errno elseif byte ~= "\5" then return nil, "socks:negotiate: not SOCKS5", ce.EILSEQ end end do local code, err, errno = self.socket:xread(1, "b", deadline and deadline-monotime()) if not code then if err == nil then return nil, "socks:negotiate: "..ce.strerror(ce.EPIPE), ce.EPIPE end return nil, err, errno elseif code ~= "\0" then local num_code = code:byte() if num_code == 1 then err = "general SOCKS server failure" elseif num_code == 2 then err = "connection not allowed by ruleset" errno = ce.EACCES elseif num_code == 3 then err = "Network unreachable" errno = ce.ENETUNREACH elseif num_code == 4 then err = "Host unreachable" errno = ce.EHOSTUNREACH elseif num_code == 5 then err = "Connection refused" errno = ce.ECONNREFUSED elseif num_code == 6 then err = "TTL expired" errno = ce.ETIMEDOUT elseif num_code == 7 then err = "Command not supported" errno = ce.EOPNOTSUPP elseif num_code == 8 then err = "Address type not supported" errno = ce.EAFNOSUPPORT else err = "Unknown code" errno = ce.PROTO end return nil, string.format("socks:negotiate: remote error %d: %s", num_code, err), errno end end do local byte, err, errno = self.socket:xread(1, "b", deadline and deadline-monotime()) if not byte then if err == nil then return nil, "socks:negotiate: "..ce.strerror(ce.EPIPE), ce.EPIPE end return nil, err, errno elseif byte ~= "\0" then return nil, "socks:negotiate: reserved field set to non-zero", ce.EILSEQ end end local dst_family, dst_host, dst_port do local atype, err, errno = self.socket:xread(1, "b", deadline and deadline-monotime()) if not atype then if err == nil then return nil, "socks:negotiate: "..ce.strerror(ce.EPIPE), ce.EPIPE end return nil, err, errno end if atype == "\1" then local ipv4 ipv4, err, errno = self.socket:xread(4, "b", deadline and deadline-monotime()) if not ipv4 or #ipv4 < 4 then if err == nil then return nil, "socks:negotiate: "..ce.strerror(ce.EPIPE), ce.EPIPE end return nil, err, errno end dst_family = cs.AF_INET dst_host = string.format("%d.%d.%d.%d", ipv4:byte(1, 4)) elseif atype == "\4" then local ipv6 ipv6, err, errno = self.socket:xread(16, "b", deadline and deadline-monotime()) if not ipv6 or #ipv6 < 16 then if err == nil then return nil, "socks:negotiate: "..ce.strerror(ce.EPIPE), ce.EPIPE end return nil, err, errno end dst_family = cs.AF_INET6 dst_host = string.format("%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", ipv6:byte(1, 16)) elseif atype == "\3" then local len len, err, errno = self.socket:xread(1, "b", deadline and deadline-monotime()) if not len then if err == nil then return nil, "socks:negotiate: "..ce.strerror(ce.EPIPE), ce.EPIPE end return nil, err, errno end dst_family = cs.AF_UNSPEC len = string.byte(len) dst_host, err, errno = self.socket:xread(len, "b", deadline and deadline-monotime()) if not dst_host or #dst_host < len then if err == nil then return nil, "socks:negotiate: "..ce.strerror(ce.EPIPE), ce.EPIPE end return nil, err, errno end else return nil, "socks:negotiate: unknown address type", ce.EAFNOSUPPORT end end do local dst_port_bin, err, errno = self.socket:xread(2, "b", deadline and deadline-monotime()) if not dst_port_bin then return nil, err or ce.EPIPE, errno end dst_port = sunpack(">I2", dst_port_bin) end self.dst_family = dst_family self.dst_host = dst_host self.dst_port = dst_port return true end function socks_methods:close() if self.socket then self.socket:close() end end function socks_methods:take_socket() local s = self.socket if s == nil then -- already taken return nil end self.socket = nil return s end return { connect = connect; fdopen = fdopen; } lua-http-0.4/http/stream_common.lua000066400000000000000000000105771400726324600174560ustar00rootroot00000000000000--[[ This module provides common functions for HTTP streams no matter the underlying protocol version. This is usually an internal module, and should be used by adding the `methods` exposed to your own HTTP stream objects. ]] local cqueues = require "cqueues" local monotime = cqueues.monotime local new_headers = require "http.headers".new local CHUNK_SIZE = 2^20 -- write in 1MB chunks local stream_methods = {} function stream_methods:checktls() return self.connection:checktls() end function stream_methods:localname() return self.connection:localname() end function stream_methods:peername() return self.connection:peername() end -- 100-Continue response local continue_headers = new_headers() continue_headers:append(":status", "100") function stream_methods:write_continue(timeout) return self:write_headers(continue_headers, false, timeout) end -- need helper to discard 'last' argument -- (which would otherwise end up going in 'timeout') local function each_chunk_helper(self) return self:get_next_chunk() end function stream_methods:each_chunk() return each_chunk_helper, self end function stream_methods:get_body_as_string(timeout) local deadline = timeout and (monotime()+timeout) local body, i = {}, 0 while true do local chunk, err, errno = self:get_next_chunk(timeout) if chunk == nil then if err == nil then break else return nil, err, errno end end i = i + 1 body[i] = chunk timeout = deadline and (deadline-monotime()) end return table.concat(body, "", 1, i) end function stream_methods:get_body_chars(n, timeout) local deadline = timeout and (monotime()+timeout) local body, i, len = {}, 0, 0 while len < n do local chunk, err, errno = self:get_next_chunk(timeout) if chunk == nil then if err == nil then break else return nil, err, errno end end i = i + 1 body[i] = chunk len = len + #chunk timeout = deadline and (deadline-monotime()) end if i == 0 then return nil end local r = table.concat(body, "", 1, i) if n < len then self:unget(r:sub(n+1, -1)) r = r:sub(1, n) end return r end function stream_methods:get_body_until(pattern, plain, include_pattern, timeout) local deadline = timeout and (monotime()+timeout) local body while true do local chunk, err, errno = self:get_next_chunk(timeout) if chunk == nil then if err == nil then return body, err else return nil, err, errno end end if body then body = body .. chunk else body = chunk end local s, e = body:find(pattern, 1, plain) if s then if e < #body then self:unget(body:sub(e+1, -1)) end if include_pattern then return body:sub(1, e) else return body:sub(1, s-1) end end timeout = deadline and (deadline-monotime()) end end function stream_methods:save_body_to_file(file, timeout) local deadline = timeout and (monotime()+timeout) while true do local chunk, err, errno = self:get_next_chunk(timeout) if chunk == nil then if err == nil then break else return nil, err, errno end end assert(file:write(chunk)) timeout = deadline and (deadline-monotime()) end return true end function stream_methods:get_body_as_file(timeout) local file = assert(io.tmpfile()) local ok, err, errno = self:save_body_to_file(file, timeout) if not ok then return nil, err, errno end assert(file:seek("set")) return file end function stream_methods:write_body_from_string(str, timeout) return self:write_chunk(str, true, timeout) end function stream_methods:write_body_from_file(options, timeout) local deadline = timeout and (monotime()+timeout) local file, count if io.type(options) then -- lua-http <= 0.2 took a file handle file = options else file = options.file count = options.count end if count == nil then count = math.huge elseif type(count) ~= "number" or count < 0 or count % 1 ~= 0 then error("invalid .count parameter (expected positive integer)") end while count > 0 do local chunk, err = file:read(math.min(CHUNK_SIZE, count)) if chunk == nil then if err then error(err) elseif count ~= math.huge and count > 0 then error("unexpected EOF") end break end local ok, err2, errno2 = self:write_chunk(chunk, false, deadline and (deadline-monotime())) if not ok then return nil, err2, errno2 end count = count - #chunk end return self:write_chunk("", true, deadline and (deadline-monotime())) end return { methods = stream_methods; } lua-http-0.4/http/stream_common.tld000066400000000000000000000030201400726324600174410ustar00rootroot00000000000000require "http.connection_common" interface stream const checktls: (self) -> (nil)|(any) const localname: (self) -> (integer, string, integer?)|(nil)|(nil, string, number) const peername: (self) -> (integer, string, integer?)|(nil)|(nil, string, number) const write_continue: (self, number?) -> (true)|(nil, string, number) const each_chunk: (self) -> ((stream)->(string)|(nil)|(nil, string, number), self) const get_body_as_string: (self, number?) -> (string)|(nil, string, number) const get_body_chars: (self, integer, number?) -> (string)|(nil, string, number) const get_body_until: (self, string, boolean, boolean, number?) -> (string)|(nil, string, number) const save_body_to_file: (self, file, number?) -> (true)|(nil, string, number) const get_body_as_file: (self, number?) -> (file)|(nil, string, number) const write_body_from_string: (self, string, number?) -> (true)|(nil, string, number) const write_body_from_file: (self, {"file":file, "count": integer?}|file, number?) -> (true)|(nil, string, number) -- Not in stream_common.lua const connection: connection const get_headers: (self, number?) -> (headers)|(nil)|(nil, string, number) const get_next_chunk: (self, number?) -> (string)|(nil)|(nil, string, number) const write_headers: (self, headers, boolean, number?) -> (true)|(nil, string, number) const write_chunk: (self, string, boolean, number?) -> (true)|(nil, string, number) const unget: (self, string) -> (true) const shutdown: (self) -> (true) end lua-http-0.4/http/tls.lua000066400000000000000000001000471400726324600154050ustar00rootroot00000000000000local openssl_ctx = require "openssl.ssl.context" local openssl_pkey = require "openssl.pkey" local openssl_verify_param = require "openssl.x509.verify_param" -- Detect if openssl was compiled with ALPN enabled local has_alpn = openssl_ctx.new().setAlpnSelect ~= nil -- OpenSSL did not always have hostname validation. local has_hostname_validation = openssl_verify_param.new().setHost ~= nil -- Creates a cipher list suitable for passing to `setCipherList` local function cipher_list(arr) return table.concat(arr, ":") end -- Cipher lists from Mozilla. -- https://wiki.mozilla.org/Security/Server_Side_TLS -- This list of ciphers should be kept up to date. -- "Modern" cipher list local modern_cipher_list = cipher_list { "ECDHE-ECDSA-AES256-GCM-SHA384"; "ECDHE-RSA-AES256-GCM-SHA384"; "ECDHE-ECDSA-CHACHA20-POLY1305"; "ECDHE-RSA-CHACHA20-POLY1305"; "ECDHE-ECDSA-AES128-GCM-SHA256"; "ECDHE-RSA-AES128-GCM-SHA256"; "ECDHE-ECDSA-AES256-SHA384"; "ECDHE-RSA-AES256-SHA384"; "ECDHE-ECDSA-AES128-SHA256"; "ECDHE-RSA-AES128-SHA256"; } -- "Intermediate" cipher list local intermediate_cipher_list = cipher_list { "ECDHE-ECDSA-CHACHA20-POLY1305"; "ECDHE-RSA-CHACHA20-POLY1305"; "ECDHE-ECDSA-AES128-GCM-SHA256"; "ECDHE-RSA-AES128-GCM-SHA256"; "ECDHE-ECDSA-AES256-GCM-SHA384"; "ECDHE-RSA-AES256-GCM-SHA384"; "DHE-RSA-AES128-GCM-SHA256"; "DHE-RSA-AES256-GCM-SHA384"; "ECDHE-ECDSA-AES128-SHA256"; "ECDHE-RSA-AES128-SHA256"; "ECDHE-ECDSA-AES128-SHA"; "ECDHE-RSA-AES256-SHA384"; "ECDHE-RSA-AES128-SHA"; "ECDHE-ECDSA-AES256-SHA384"; "ECDHE-ECDSA-AES256-SHA"; "ECDHE-RSA-AES256-SHA"; "DHE-RSA-AES128-SHA256"; "DHE-RSA-AES128-SHA"; "DHE-RSA-AES256-SHA256"; "DHE-RSA-AES256-SHA"; "ECDHE-ECDSA-DES-CBC3-SHA"; "ECDHE-RSA-DES-CBC3-SHA"; "EDH-RSA-DES-CBC3-SHA"; "AES128-GCM-SHA256"; "AES256-GCM-SHA384"; "AES128-SHA256"; "AES256-SHA256"; "AES128-SHA"; "AES256-SHA"; "DES-CBC3-SHA"; "!DSS"; } -- "Old" cipher list local old_cipher_list = cipher_list { "ECDHE-ECDSA-CHACHA20-POLY1305"; "ECDHE-RSA-CHACHA20-POLY1305"; "ECDHE-RSA-AES128-GCM-SHA256"; "ECDHE-ECDSA-AES128-GCM-SHA256"; "ECDHE-RSA-AES256-GCM-SHA384"; "ECDHE-ECDSA-AES256-GCM-SHA384"; "DHE-RSA-AES128-GCM-SHA256"; "DHE-DSS-AES128-GCM-SHA256"; "kEDH+AESGCM"; "ECDHE-RSA-AES128-SHA256"; "ECDHE-ECDSA-AES128-SHA256"; "ECDHE-RSA-AES128-SHA"; "ECDHE-ECDSA-AES128-SHA"; "ECDHE-RSA-AES256-SHA384"; "ECDHE-ECDSA-AES256-SHA384"; "ECDHE-RSA-AES256-SHA"; "ECDHE-ECDSA-AES256-SHA"; "DHE-RSA-AES128-SHA256"; "DHE-RSA-AES128-SHA"; "DHE-DSS-AES128-SHA256"; "DHE-RSA-AES256-SHA256"; "DHE-DSS-AES256-SHA"; "DHE-RSA-AES256-SHA"; "ECDHE-RSA-DES-CBC3-SHA"; "ECDHE-ECDSA-DES-CBC3-SHA"; "EDH-RSA-DES-CBC3-SHA"; "AES128-GCM-SHA256"; "AES256-GCM-SHA384"; "AES128-SHA256"; "AES256-SHA256"; "AES128-SHA"; "AES256-SHA"; "AES"; "DES-CBC3-SHA"; "HIGH"; "SEED"; "!aNULL"; "!eNULL"; "!EXPORT"; "!DES"; "!RC4"; "!MD5"; "!PSK"; "!RSAPSK"; "!aDH"; "!aECDH"; "!EDH-DSS-DES-CBC3-SHA"; "!KRB5-DES-CBC3-SHA"; "!SRP"; } -- A map from the cipher identifiers used in specifications to -- the identifiers used by OpenSSL. local spec_to_openssl = { -- SSL cipher suites SSL_DH_DSS_WITH_3DES_EDE_CBC_SHA = "DH-DSS-DES-CBC3-SHA"; SSL_DH_RSA_WITH_3DES_EDE_CBC_SHA = "DH-RSA-DES-CBC3-SHA"; SSL_DHE_DSS_WITH_3DES_EDE_CBC_SHA = "DHE-DSS-DES-CBC3-SHA"; SSL_DHE_RSA_WITH_3DES_EDE_CBC_SHA = "DHE-RSA-DES-CBC3-SHA"; SSL_DH_anon_WITH_RC4_128_MD5 = "ADH-RC4-MD5"; SSL_DH_anon_WITH_3DES_EDE_CBC_SHA = "ADH-DES-CBC3-SHA"; -- TLS v1.0 cipher suites. TLS_RSA_WITH_NULL_MD5 = "NULL-MD5"; TLS_RSA_WITH_NULL_SHA = "NULL-SHA"; TLS_RSA_WITH_RC4_128_MD5 = "RC4-MD5"; TLS_RSA_WITH_RC4_128_SHA = "RC4-SHA"; TLS_RSA_WITH_IDEA_CBC_SHA = "IDEA-CBC-SHA"; TLS_RSA_WITH_DES_CBC_SHA = "DES-CBC-SHA"; TLS_RSA_WITH_3DES_EDE_CBC_SHA = "DES-CBC3-SHA"; TLS_DH_DSS_WITH_DES_CBC_SHA = "DH-DSS-DES-CBC-SHA"; TLS_DH_RSA_WITH_DES_CBC_SHA = "DH-RSA-DES-CBC-SHA"; TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA = "DH-DSS-DES-CBC3-SHA"; TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA = "DH-RSA-DES-CBC3-SHA"; TLS_DHE_DSS_WITH_DES_CBC_SHA = "EDH-DSS-DES-CBC-SHA"; TLS_DHE_RSA_WITH_DES_CBC_SHA = "EDH-RSA-DES-CBC-SHA"; TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA = "DHE-DSS-DES-CBC3-SHA"; TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA = "DHE-RSA-DES-CBC3-SHA"; TLS_DH_anon_WITH_RC4_128_MD5 = "ADH-RC4-MD5"; TLS_DH_anon_WITH_DES_CBC_SHA = "ADH-DES-CBC-SHA"; TLS_DH_anon_WITH_3DES_EDE_CBC_SHA = "ADH-DES-CBC3-SHA"; -- AES ciphersuites from RFC3268, extending TLS v1.0 TLS_RSA_WITH_AES_128_CBC_SHA = "AES128-SHA"; TLS_RSA_WITH_AES_256_CBC_SHA = "AES256-SHA"; TLS_DH_DSS_WITH_AES_128_CBC_SHA = "DH-DSS-AES128-SHA"; TLS_DH_DSS_WITH_AES_256_CBC_SHA = "DH-DSS-AES256-SHA"; TLS_DH_RSA_WITH_AES_128_CBC_SHA = "DH-RSA-AES128-SHA"; TLS_DH_RSA_WITH_AES_256_CBC_SHA = "DH-RSA-AES256-SHA"; TLS_DHE_DSS_WITH_AES_128_CBC_SHA = "DHE-DSS-AES128-SHA"; TLS_DHE_DSS_WITH_AES_256_CBC_SHA = "DHE-DSS-AES256-SHA"; TLS_DHE_RSA_WITH_AES_128_CBC_SHA = "DHE-RSA-AES128-SHA"; TLS_DHE_RSA_WITH_AES_256_CBC_SHA = "DHE-RSA-AES256-SHA"; TLS_DH_anon_WITH_AES_128_CBC_SHA = "ADH-AES128-SHA"; TLS_DH_anon_WITH_AES_256_CBC_SHA = "ADH-AES256-SHA"; -- Camellia ciphersuites from RFC4132, extending TLS v1.0 TLS_RSA_WITH_CAMELLIA_128_CBC_SHA = "CAMELLIA128-SHA"; TLS_RSA_WITH_CAMELLIA_256_CBC_SHA = "CAMELLIA256-SHA"; TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA = "DH-DSS-CAMELLIA128-SHA"; TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA = "DH-DSS-CAMELLIA256-SHA"; TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA = "DH-RSA-CAMELLIA128-SHA"; TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA = "DH-RSA-CAMELLIA256-SHA"; TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA = "DHE-DSS-CAMELLIA128-SHA"; TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA = "DHE-DSS-CAMELLIA256-SHA"; TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA = "DHE-RSA-CAMELLIA128-SHA"; TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA = "DHE-RSA-CAMELLIA256-SHA"; TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA = "ADH-CAMELLIA128-SHA"; TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA = "ADH-CAMELLIA256-SHA"; -- SEED ciphersuites from RFC4162, extending TLS v1.0 TLS_RSA_WITH_SEED_CBC_SHA = "SEED-SHA"; TLS_DH_DSS_WITH_SEED_CBC_SHA = "DH-DSS-SEED-SHA"; TLS_DH_RSA_WITH_SEED_CBC_SHA = "DH-RSA-SEED-SHA"; TLS_DHE_DSS_WITH_SEED_CBC_SHA = "DHE-DSS-SEED-SHA"; TLS_DHE_RSA_WITH_SEED_CBC_SHA = "DHE-RSA-SEED-SHA"; TLS_DH_anon_WITH_SEED_CBC_SHA = "ADH-SEED-SHA"; -- GOST ciphersuites from draft-chudov-cryptopro-cptls, extending TLS v1.0 TLS_GOSTR341094_WITH_28147_CNT_IMIT = "GOST94-GOST89-GOST89"; TLS_GOSTR341001_WITH_28147_CNT_IMIT = "GOST2001-GOST89-GOST89"; TLS_GOSTR341094_WITH_NULL_GOSTR3411 = "GOST94-NULL-GOST94"; TLS_GOSTR341001_WITH_NULL_GOSTR3411 = "GOST2001-NULL-GOST94"; -- Additional Export 1024 and other cipher suites TLS_DHE_DSS_WITH_RC4_128_SHA = "DHE-DSS-RC4-SHA"; -- Elliptic curve cipher suites. TLS_ECDH_RSA_WITH_NULL_SHA = "ECDH-RSA-NULL-SHA"; TLS_ECDH_RSA_WITH_RC4_128_SHA = "ECDH-RSA-RC4-SHA"; TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA = "ECDH-RSA-DES-CBC3-SHA"; TLS_ECDH_RSA_WITH_AES_128_CBC_SHA = "ECDH-RSA-AES128-SHA"; TLS_ECDH_RSA_WITH_AES_256_CBC_SHA = "ECDH-RSA-AES256-SHA"; TLS_ECDH_ECDSA_WITH_NULL_SHA = "ECDH-ECDSA-NULL-SHA"; TLS_ECDH_ECDSA_WITH_RC4_128_SHA = "ECDH-ECDSA-RC4-SHA"; TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA = "ECDH-ECDSA-DES-CBC3-SHA"; TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA = "ECDH-ECDSA-AES128-SHA"; TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA = "ECDH-ECDSA-AES256-SHA"; TLS_ECDHE_RSA_WITH_NULL_SHA = "ECDHE-RSA-NULL-SHA"; TLS_ECDHE_RSA_WITH_RC4_128_SHA = "ECDHE-RSA-RC4-SHA"; TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA = "ECDHE-RSA-DES-CBC3-SHA"; TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = "ECDHE-RSA-AES128-SHA"; TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = "ECDHE-RSA-AES256-SHA"; TLS_ECDHE_ECDSA_WITH_NULL_SHA = "ECDHE-ECDSA-NULL-SHA"; TLS_ECDHE_ECDSA_WITH_RC4_128_SHA = "ECDHE-ECDSA-RC4-SHA"; TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA = "ECDHE-ECDSA-DES-CBC3-SHA"; TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = "ECDHE-ECDSA-AES128-SHA"; TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = "ECDHE-ECDSA-AES256-SHA"; TLS_ECDH_anon_WITH_NULL_SHA = "AECDH-NULL-SHA"; TLS_ECDH_anon_WITH_RC4_128_SHA = "AECDH-RC4-SHA"; TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA = "AECDH-DES-CBC3-SHA"; TLS_ECDH_anon_WITH_AES_128_CBC_SHA = "AECDH-AES128-SHA"; TLS_ECDH_anon_WITH_AES_256_CBC_SHA = "AECDH-AES256-SHA"; -- TLS v1.2 cipher suites TLS_RSA_WITH_NULL_SHA256 = "NULL-SHA256"; TLS_RSA_WITH_AES_128_CBC_SHA256 = "AES128-SHA256"; TLS_RSA_WITH_AES_256_CBC_SHA256 = "AES256-SHA256"; TLS_RSA_WITH_AES_128_GCM_SHA256 = "AES128-GCM-SHA256"; TLS_RSA_WITH_AES_256_GCM_SHA384 = "AES256-GCM-SHA384"; TLS_DH_RSA_WITH_AES_128_CBC_SHA256 = "DH-RSA-AES128-SHA256"; TLS_DH_RSA_WITH_AES_256_CBC_SHA256 = "DH-RSA-AES256-SHA256"; TLS_DH_RSA_WITH_AES_128_GCM_SHA256 = "DH-RSA-AES128-GCM-SHA256"; TLS_DH_RSA_WITH_AES_256_GCM_SHA384 = "DH-RSA-AES256-GCM-SHA384"; TLS_DH_DSS_WITH_AES_128_CBC_SHA256 = "DH-DSS-AES128-SHA256"; TLS_DH_DSS_WITH_AES_256_CBC_SHA256 = "DH-DSS-AES256-SHA256"; TLS_DH_DSS_WITH_AES_128_GCM_SHA256 = "DH-DSS-AES128-GCM-SHA256"; TLS_DH_DSS_WITH_AES_256_GCM_SHA384 = "DH-DSS-AES256-GCM-SHA384"; TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = "DHE-RSA-AES128-SHA256"; TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = "DHE-RSA-AES256-SHA256"; TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = "DHE-RSA-AES128-GCM-SHA256"; TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = "DHE-RSA-AES256-GCM-SHA384"; TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = "DHE-DSS-AES128-SHA256"; TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = "DHE-DSS-AES256-SHA256"; TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = "DHE-DSS-AES128-GCM-SHA256"; TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = "DHE-DSS-AES256-GCM-SHA384"; TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 = "ECDH-RSA-AES128-SHA256"; TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 = "ECDH-RSA-AES256-SHA384"; TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 = "ECDH-RSA-AES128-GCM-SHA256"; TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 = "ECDH-RSA-AES256-GCM-SHA384"; TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 = "ECDH-ECDSA-AES128-SHA256"; TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 = "ECDH-ECDSA-AES256-SHA384"; TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 = "ECDH-ECDSA-AES128-GCM-SHA256"; TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 = "ECDH-ECDSA-AES256-GCM-SHA384"; TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = "ECDHE-RSA-AES128-SHA256"; TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = "ECDHE-RSA-AES256-SHA384"; TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = "ECDHE-RSA-AES128-GCM-SHA256"; TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = "ECDHE-RSA-AES256-GCM-SHA384"; TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = "ECDHE-ECDSA-AES128-SHA256"; TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = "ECDHE-ECDSA-AES256-SHA384"; TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = "ECDHE-ECDSA-AES128-GCM-SHA256"; TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = "ECDHE-ECDSA-AES256-GCM-SHA384"; TLS_DH_anon_WITH_AES_128_CBC_SHA256 = "ADH-AES128-SHA256"; TLS_DH_anon_WITH_AES_256_CBC_SHA256 = "ADH-AES256-SHA256"; TLS_DH_anon_WITH_AES_128_GCM_SHA256 = "ADH-AES128-GCM-SHA256"; TLS_DH_anon_WITH_AES_256_GCM_SHA384 = "ADH-AES256-GCM-SHA384"; TLS_RSA_WITH_AES_128_CCM = "AES128-CCM"; TLS_RSA_WITH_AES_256_CCM = "AES256-CCM"; TLS_DHE_RSA_WITH_AES_128_CCM = "DHE-RSA-AES128-CCM"; TLS_DHE_RSA_WITH_AES_256_CCM = "DHE-RSA-AES256-CCM"; TLS_RSA_WITH_AES_128_CCM_8 = "AES128-CCM8"; TLS_RSA_WITH_AES_256_CCM_8 = "AES256-CCM8"; TLS_DHE_RSA_WITH_AES_128_CCM_8 = "DHE-RSA-AES128-CCM8"; TLS_DHE_RSA_WITH_AES_256_CCM_8 = "DHE-RSA-AES256-CCM8"; TLS_ECDHE_ECDSA_WITH_AES_128_CCM = "ECDHE-ECDSA-AES128-CCM"; TLS_ECDHE_ECDSA_WITH_AES_256_CCM = "ECDHE-ECDSA-AES256-CCM"; TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 = "ECDHE-ECDSA-AES128-CCM8"; TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 = "ECDHE-ECDSA-AES256-CCM8"; -- Camellia HMAC-Based ciphersuites from RFC6367, extending TLS v1.2 TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = "ECDHE-ECDSA-CAMELLIA128-SHA256"; TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = "ECDHE-ECDSA-CAMELLIA256-SHA384"; TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = "ECDH-ECDSA-CAMELLIA128-SHA256"; TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = "ECDH-ECDSA-CAMELLIA256-SHA384"; TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = "ECDHE-RSA-CAMELLIA128-SHA256"; TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 = "ECDHE-RSA-CAMELLIA256-SHA384"; TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = "ECDH-RSA-CAMELLIA128-SHA256"; TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 = "ECDH-RSA-CAMELLIA256-SHA384"; -- Pre shared keying (PSK) ciphersuites TLS_PSK_WITH_NULL_SHA = "PSK-NULL-SHA"; TLS_DHE_PSK_WITH_NULL_SHA = "DHE-PSK-NULL-SHA"; TLS_RSA_PSK_WITH_NULL_SHA = "RSA-PSK-NULL-SHA"; TLS_PSK_WITH_RC4_128_SHA = "PSK-RC4-SHA"; TLS_PSK_WITH_3DES_EDE_CBC_SHA = "PSK-3DES-EDE-CBC-SHA"; TLS_PSK_WITH_AES_128_CBC_SHA = "PSK-AES128-CBC-SHA"; TLS_PSK_WITH_AES_256_CBC_SHA = "PSK-AES256-CBC-SHA"; TLS_DHE_PSK_WITH_RC4_128_SHA = "DHE-PSK-RC4-SHA"; TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA = "DHE-PSK-3DES-EDE-CBC-SHA"; TLS_DHE_PSK_WITH_AES_128_CBC_SHA = "DHE-PSK-AES128-CBC-SHA"; TLS_DHE_PSK_WITH_AES_256_CBC_SHA = "DHE-PSK-AES256-CBC-SHA"; TLS_RSA_PSK_WITH_RC4_128_SHA = "RSA-PSK-RC4-SHA"; TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA = "RSA-PSK-3DES-EDE-CBC-SHA"; TLS_RSA_PSK_WITH_AES_128_CBC_SHA = "RSA-PSK-AES128-CBC-SHA"; TLS_RSA_PSK_WITH_AES_256_CBC_SHA = "RSA-PSK-AES256-CBC-SHA"; TLS_PSK_WITH_AES_128_GCM_SHA256 = "PSK-AES128-GCM-SHA256"; TLS_PSK_WITH_AES_256_GCM_SHA384 = "PSK-AES256-GCM-SHA384"; TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 = "DHE-PSK-AES128-GCM-SHA256"; TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 = "DHE-PSK-AES256-GCM-SHA384"; TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 = "RSA-PSK-AES128-GCM-SHA256"; TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 = "RSA-PSK-AES256-GCM-SHA384"; TLS_PSK_WITH_AES_128_CBC_SHA256 = "PSK-AES128-CBC-SHA256"; TLS_PSK_WITH_AES_256_CBC_SHA384 = "PSK-AES256-CBC-SHA384"; TLS_PSK_WITH_NULL_SHA256 = "PSK-NULL-SHA256"; TLS_PSK_WITH_NULL_SHA384 = "PSK-NULL-SHA384"; TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 = "DHE-PSK-AES128-CBC-SHA256"; TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 = "DHE-PSK-AES256-CBC-SHA384"; TLS_DHE_PSK_WITH_NULL_SHA256 = "DHE-PSK-NULL-SHA256"; TLS_DHE_PSK_WITH_NULL_SHA384 = "DHE-PSK-NULL-SHA384"; TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 = "RSA-PSK-AES128-CBC-SHA256"; TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 = "RSA-PSK-AES256-CBC-SHA384"; TLS_RSA_PSK_WITH_NULL_SHA256 = "RSA-PSK-NULL-SHA256"; TLS_RSA_PSK_WITH_NULL_SHA384 = "RSA-PSK-NULL-SHA384"; TLS_ECDHE_PSK_WITH_RC4_128_SHA = "ECDHE-PSK-RC4-SHA"; TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA = "ECDHE-PSK-3DES-EDE-CBC-SHA"; TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA = "ECDHE-PSK-AES128-CBC-SHA"; TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA = "ECDHE-PSK-AES256-CBC-SHA"; TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 = "ECDHE-PSK-AES128-CBC-SHA256"; TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 = "ECDHE-PSK-AES256-CBC-SHA384"; TLS_ECDHE_PSK_WITH_NULL_SHA = "ECDHE-PSK-NULL-SHA"; TLS_ECDHE_PSK_WITH_NULL_SHA256 = "ECDHE-PSK-NULL-SHA256"; TLS_ECDHE_PSK_WITH_NULL_SHA384 = "ECDHE-PSK-NULL-SHA384"; TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 = "PSK-CAMELLIA128-SHA256"; TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 = "PSK-CAMELLIA256-SHA384"; TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = "DHE-PSK-CAMELLIA128-SHA256"; TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = "DHE-PSK-CAMELLIA256-SHA384"; TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 = "RSA-PSK-CAMELLIA128-SHA256"; TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 = "RSA-PSK-CAMELLIA256-SHA384"; TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = "ECDHE-PSK-CAMELLIA128-SHA256"; TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = "ECDHE-PSK-CAMELLIA256-SHA384"; TLS_PSK_WITH_AES_128_CCM = "PSK-AES128-CCM"; TLS_PSK_WITH_AES_256_CCM = "PSK-AES256-CCM"; TLS_DHE_PSK_WITH_AES_128_CCM = "DHE-PSK-AES128-CCM"; TLS_DHE_PSK_WITH_AES_256_CCM = "DHE-PSK-AES256-CCM"; TLS_PSK_WITH_AES_128_CCM_8 = "PSK-AES128-CCM8"; TLS_PSK_WITH_AES_256_CCM_8 = "PSK-AES256-CCM8"; TLS_DHE_PSK_WITH_AES_128_CCM_8 = "DHE-PSK-AES128-CCM8"; TLS_DHE_PSK_WITH_AES_256_CCM_8 = "DHE-PSK-AES256-CCM8"; -- Export ciphers TLS_RSA_EXPORT_WITH_RC4_40_MD5 = "EXP-RC4-MD5"; TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 = "EXP-RC2-CBC-MD5"; TLS_RSA_EXPORT_WITH_DES40_CBC_SHA = "EXP-DES-CBC-SHA"; TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA = "EXP-ADH-DES-CBC-SHA"; TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 = "EXP-ADH-RC4-MD5"; TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA = "EXP-EDH-RSA-DES-CBC-SHA"; TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA = "EXP-EDH-DSS-DES-CBC-SHA"; TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA = "EXP-DH-DSS-DES-CBC-SHA"; TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA = "EXP-DH-RSA-DES-CBC-SHA"; -- KRB5 TLS_KRB5_WITH_DES_CBC_SHA = "KRB5-DES-CBC-SHA"; TLS_KRB5_WITH_3DES_EDE_CBC_SHA = "KRB5-DES-CBC3-SHA"; TLS_KRB5_WITH_RC4_128_SHA = "KRB5-RC4-SHA"; TLS_KRB5_WITH_IDEA_CBC_SHA = "KRB5-IDEA-CBC-SHA"; TLS_KRB5_WITH_DES_CBC_MD5 = "KRB5-DES-CBC-MD5"; TLS_KRB5_WITH_3DES_EDE_CBC_MD5 = "KRB5-DES-CBC3-MD5"; TLS_KRB5_WITH_RC4_128_MD5 = "KRB5-RC4-MD5"; TLS_KRB5_WITH_IDEA_CBC_MD5 = "KRB5-IDEA-CBC-MD5"; TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA = "EXP-KRB5-DES-CBC-SHA"; TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA = "EXP-KRB5-RC2-CBC-SHA"; TLS_KRB5_EXPORT_WITH_RC4_40_SHA = "EXP-KRB5-RC4-SHA"; TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 = "EXP-KRB5-DES-CBC-MD5"; TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 = "EXP-KRB5-RC2-CBC-MD5"; TLS_KRB5_EXPORT_WITH_RC4_40_MD5 = "EXP-KRB5-RC4-MD5"; -- SRP5 TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = "SRP-3DES-EDE-CBC-SHA"; TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = "SRP-RSA-3DES-EDE-CBC-SHA"; TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA = "SRP-DSS-3DES-EDE-CBC-SHA"; TLS_SRP_SHA_WITH_AES_128_CBC_SHA = "SRP-AES-128-CBC-SHA"; TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = "SRP-RSA-AES-128-CBC-SHA"; TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA = "SRP-DSS-AES-128-CBC-SHA"; TLS_SRP_SHA_WITH_AES_256_CBC_SHA = "SRP-AES-256-CBC-SHA"; TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = "SRP-RSA-AES-256-CBC-SHA"; TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA = "SRP-DSS-AES-256-CBC-SHA"; -- CHACHA20+POLY1305 TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = "ECDHE-RSA-CHACHA20-POLY1305"; TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = "ECDHE-ECDSA-CHACHA20-POLY1305"; TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = "DHE-RSA-CHACHA20-POLY1305"; TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 = "PSK-CHACHA20-POLY1305"; TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = "ECDHE-PSK-CHACHA20-POLY1305"; TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = "DHE-PSK-CHACHA20-POLY1305"; TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 = "RSA-PSK-CHACHA20-POLY1305"; } -- Banned ciphers from https://http2.github.io/http2-spec/#BadCipherSuites local banned_ciphers = {} for _, v in ipairs { "TLS_NULL_WITH_NULL_NULL"; "TLS_RSA_WITH_NULL_MD5"; "TLS_RSA_WITH_NULL_SHA"; "TLS_RSA_EXPORT_WITH_RC4_40_MD5"; "TLS_RSA_WITH_RC4_128_MD5"; "TLS_RSA_WITH_RC4_128_SHA"; "TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5"; "TLS_RSA_WITH_IDEA_CBC_SHA"; "TLS_RSA_EXPORT_WITH_DES40_CBC_SHA"; "TLS_RSA_WITH_DES_CBC_SHA"; "TLS_RSA_WITH_3DES_EDE_CBC_SHA"; "TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA"; "TLS_DH_DSS_WITH_DES_CBC_SHA"; "TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA"; "TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA"; "TLS_DH_RSA_WITH_DES_CBC_SHA"; "TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA"; "TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA"; "TLS_DHE_DSS_WITH_DES_CBC_SHA"; "TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA"; "TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA"; "TLS_DHE_RSA_WITH_DES_CBC_SHA"; "TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA"; "TLS_DH_anon_EXPORT_WITH_RC4_40_MD5"; "TLS_DH_anon_WITH_RC4_128_MD5"; "TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA"; "TLS_DH_anon_WITH_DES_CBC_SHA"; "TLS_DH_anon_WITH_3DES_EDE_CBC_SHA"; "TLS_KRB5_WITH_DES_CBC_SHA"; "TLS_KRB5_WITH_3DES_EDE_CBC_SHA"; "TLS_KRB5_WITH_RC4_128_SHA"; "TLS_KRB5_WITH_IDEA_CBC_SHA"; "TLS_KRB5_WITH_DES_CBC_MD5"; "TLS_KRB5_WITH_3DES_EDE_CBC_MD5"; "TLS_KRB5_WITH_RC4_128_MD5"; "TLS_KRB5_WITH_IDEA_CBC_MD5"; "TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA"; "TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA"; "TLS_KRB5_EXPORT_WITH_RC4_40_SHA"; "TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5"; "TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5"; "TLS_KRB5_EXPORT_WITH_RC4_40_MD5"; "TLS_PSK_WITH_NULL_SHA"; "TLS_DHE_PSK_WITH_NULL_SHA"; "TLS_RSA_PSK_WITH_NULL_SHA"; "TLS_RSA_WITH_AES_128_CBC_SHA"; "TLS_DH_DSS_WITH_AES_128_CBC_SHA"; "TLS_DH_RSA_WITH_AES_128_CBC_SHA"; "TLS_DHE_DSS_WITH_AES_128_CBC_SHA"; "TLS_DHE_RSA_WITH_AES_128_CBC_SHA"; "TLS_DH_anon_WITH_AES_128_CBC_SHA"; "TLS_RSA_WITH_AES_256_CBC_SHA"; "TLS_DH_DSS_WITH_AES_256_CBC_SHA"; "TLS_DH_RSA_WITH_AES_256_CBC_SHA"; "TLS_DHE_DSS_WITH_AES_256_CBC_SHA"; "TLS_DHE_RSA_WITH_AES_256_CBC_SHA"; "TLS_DH_anon_WITH_AES_256_CBC_SHA"; "TLS_RSA_WITH_NULL_SHA256"; "TLS_RSA_WITH_AES_128_CBC_SHA256"; "TLS_RSA_WITH_AES_256_CBC_SHA256"; "TLS_DH_DSS_WITH_AES_128_CBC_SHA256"; "TLS_DH_RSA_WITH_AES_128_CBC_SHA256"; "TLS_DHE_DSS_WITH_AES_128_CBC_SHA256"; "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA"; "TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA"; "TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA"; "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA"; "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA"; "TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA"; "TLS_DHE_RSA_WITH_AES_128_CBC_SHA256"; "TLS_DH_DSS_WITH_AES_256_CBC_SHA256"; "TLS_DH_RSA_WITH_AES_256_CBC_SHA256"; "TLS_DHE_DSS_WITH_AES_256_CBC_SHA256"; "TLS_DHE_RSA_WITH_AES_256_CBC_SHA256"; "TLS_DH_anon_WITH_AES_128_CBC_SHA256"; "TLS_DH_anon_WITH_AES_256_CBC_SHA256"; "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA"; "TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA"; "TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA"; "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA"; "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA"; "TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA"; "TLS_PSK_WITH_RC4_128_SHA"; "TLS_PSK_WITH_3DES_EDE_CBC_SHA"; "TLS_PSK_WITH_AES_128_CBC_SHA"; "TLS_PSK_WITH_AES_256_CBC_SHA"; "TLS_DHE_PSK_WITH_RC4_128_SHA"; "TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA"; "TLS_DHE_PSK_WITH_AES_128_CBC_SHA"; "TLS_DHE_PSK_WITH_AES_256_CBC_SHA"; "TLS_RSA_PSK_WITH_RC4_128_SHA"; "TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA"; "TLS_RSA_PSK_WITH_AES_128_CBC_SHA"; "TLS_RSA_PSK_WITH_AES_256_CBC_SHA"; "TLS_RSA_WITH_SEED_CBC_SHA"; "TLS_DH_DSS_WITH_SEED_CBC_SHA"; "TLS_DH_RSA_WITH_SEED_CBC_SHA"; "TLS_DHE_DSS_WITH_SEED_CBC_SHA"; "TLS_DHE_RSA_WITH_SEED_CBC_SHA"; "TLS_DH_anon_WITH_SEED_CBC_SHA"; "TLS_RSA_WITH_AES_128_GCM_SHA256"; "TLS_RSA_WITH_AES_256_GCM_SHA384"; "TLS_DH_RSA_WITH_AES_128_GCM_SHA256"; "TLS_DH_RSA_WITH_AES_256_GCM_SHA384"; "TLS_DH_DSS_WITH_AES_128_GCM_SHA256"; "TLS_DH_DSS_WITH_AES_256_GCM_SHA384"; "TLS_DH_anon_WITH_AES_128_GCM_SHA256"; "TLS_DH_anon_WITH_AES_256_GCM_SHA384"; "TLS_PSK_WITH_AES_128_GCM_SHA256"; "TLS_PSK_WITH_AES_256_GCM_SHA384"; "TLS_RSA_PSK_WITH_AES_128_GCM_SHA256"; "TLS_RSA_PSK_WITH_AES_256_GCM_SHA384"; "TLS_PSK_WITH_AES_128_CBC_SHA256"; "TLS_PSK_WITH_AES_256_CBC_SHA384"; "TLS_PSK_WITH_NULL_SHA256"; "TLS_PSK_WITH_NULL_SHA384"; "TLS_DHE_PSK_WITH_AES_128_CBC_SHA256"; "TLS_DHE_PSK_WITH_AES_256_CBC_SHA384"; "TLS_DHE_PSK_WITH_NULL_SHA256"; "TLS_DHE_PSK_WITH_NULL_SHA384"; "TLS_RSA_PSK_WITH_AES_128_CBC_SHA256"; "TLS_RSA_PSK_WITH_AES_256_CBC_SHA384"; "TLS_RSA_PSK_WITH_NULL_SHA256"; "TLS_RSA_PSK_WITH_NULL_SHA384"; "TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256"; "TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256"; "TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256"; "TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256"; "TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256"; "TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256"; "TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256"; "TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256"; "TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256"; "TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256"; "TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256"; "TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256"; "TLS_EMPTY_RENEGOTIATION_INFO_SCSV"; "TLS_ECDH_ECDSA_WITH_NULL_SHA"; "TLS_ECDH_ECDSA_WITH_RC4_128_SHA"; "TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA"; "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA"; "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA"; "TLS_ECDHE_ECDSA_WITH_NULL_SHA"; "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA"; "TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA"; "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA"; "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA"; "TLS_ECDH_RSA_WITH_NULL_SHA"; "TLS_ECDH_RSA_WITH_RC4_128_SHA"; "TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA"; "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA"; "TLS_ECDH_RSA_WITH_AES_256_CBC_SHA"; "TLS_ECDHE_RSA_WITH_NULL_SHA"; "TLS_ECDHE_RSA_WITH_RC4_128_SHA"; "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA"; "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA"; "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"; "TLS_ECDH_anon_WITH_NULL_SHA"; "TLS_ECDH_anon_WITH_RC4_128_SHA"; "TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA"; "TLS_ECDH_anon_WITH_AES_128_CBC_SHA"; "TLS_ECDH_anon_WITH_AES_256_CBC_SHA"; "TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA"; "TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA"; "TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA"; "TLS_SRP_SHA_WITH_AES_128_CBC_SHA"; "TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA"; "TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA"; "TLS_SRP_SHA_WITH_AES_256_CBC_SHA"; "TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA"; "TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA"; "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256"; "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384"; "TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256"; "TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384"; "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"; "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384"; "TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256"; "TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384"; "TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256"; "TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384"; "TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256"; "TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384"; "TLS_ECDHE_PSK_WITH_RC4_128_SHA"; "TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA"; "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA"; "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA"; "TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256"; "TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384"; "TLS_ECDHE_PSK_WITH_NULL_SHA"; "TLS_ECDHE_PSK_WITH_NULL_SHA256"; "TLS_ECDHE_PSK_WITH_NULL_SHA384"; "TLS_RSA_WITH_ARIA_128_CBC_SHA256"; "TLS_RSA_WITH_ARIA_256_CBC_SHA384"; "TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256"; "TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384"; "TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256"; "TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384"; "TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256"; "TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384"; "TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256"; "TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384"; "TLS_DH_anon_WITH_ARIA_128_CBC_SHA256"; "TLS_DH_anon_WITH_ARIA_256_CBC_SHA384"; "TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256"; "TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384"; "TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256"; "TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384"; "TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256"; "TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384"; "TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256"; "TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384"; "TLS_RSA_WITH_ARIA_128_GCM_SHA256"; "TLS_RSA_WITH_ARIA_256_GCM_SHA384"; "TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256"; "TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384"; "TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256"; "TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384"; "TLS_DH_anon_WITH_ARIA_128_GCM_SHA256"; "TLS_DH_anon_WITH_ARIA_256_GCM_SHA384"; "TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256"; "TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384"; "TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256"; "TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384"; "TLS_PSK_WITH_ARIA_128_CBC_SHA256"; "TLS_PSK_WITH_ARIA_256_CBC_SHA384"; "TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256"; "TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384"; "TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256"; "TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384"; "TLS_PSK_WITH_ARIA_128_GCM_SHA256"; "TLS_PSK_WITH_ARIA_256_GCM_SHA384"; "TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256"; "TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384"; "TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256"; "TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384"; "TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256"; "TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384"; "TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256"; "TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384"; "TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256"; "TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384"; "TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256"; "TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384"; "TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256"; "TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384"; "TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256"; "TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384"; "TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256"; "TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384"; "TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256"; "TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384"; "TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256"; "TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384"; "TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256"; "TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384"; "TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256"; "TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384"; "TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256"; "TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384"; "TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256"; "TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384"; "TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256"; "TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384"; "TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256"; "TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384"; "TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256"; "TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384"; "TLS_RSA_WITH_AES_128_CCM"; "TLS_RSA_WITH_AES_256_CCM"; "TLS_RSA_WITH_AES_128_CCM_8"; "TLS_RSA_WITH_AES_256_CCM_8"; "TLS_PSK_WITH_AES_128_CCM"; "TLS_PSK_WITH_AES_256_CCM"; "TLS_PSK_WITH_AES_128_CCM_8"; "TLS_PSK_WITH_AES_256_CCM_8"; } do local openssl_cipher_name = spec_to_openssl[v] if openssl_cipher_name then banned_ciphers[openssl_cipher_name] = true end end local default_tls_options = openssl_ctx.OP_NO_COMPRESSION + openssl_ctx.OP_SINGLE_ECDH_USE + openssl_ctx.OP_NO_SSLv2 + openssl_ctx.OP_NO_SSLv3 local function new_client_context() local ctx = openssl_ctx.new("TLS", false) ctx:setCipherList(intermediate_cipher_list) ctx:setOptions(default_tls_options) if ctx.setGroups then ctx:setGroups("P-521:P-384:P-256") else ctx:setEphemeralKey(openssl_pkey.new{ type = "EC", curve = "prime256v1" }) end local store = ctx:getStore() store:addDefaults() ctx:setVerify(openssl_ctx.VERIFY_PEER) return ctx end local function new_server_context() local ctx = openssl_ctx.new("TLS", true) ctx:setCipherList(intermediate_cipher_list) ctx:setOptions(default_tls_options) if ctx.setGroups then ctx:setGroups("P-521:P-384:P-256") else ctx:setEphemeralKey(openssl_pkey.new{ type = "EC", curve = "prime256v1" }) end return ctx end return { has_alpn = has_alpn; has_hostname_validation = has_hostname_validation; modern_cipher_list = modern_cipher_list; intermediate_cipher_list = intermediate_cipher_list; old_cipher_list = old_cipher_list; banned_ciphers = banned_ciphers; new_client_context = new_client_context; new_server_context = new_server_context; } lua-http-0.4/http/tls.tld000066400000000000000000000003701400726324600154050ustar00rootroot00000000000000has_alpn: boolean has_hostname_validation: boolean modern_cipher_list: string intermediate_cipher_list: string old_cipher_list: string banned_ciphers: {string: true} -- TODO: luaossl SSL context type new_client_context: any new_server_context: any lua-http-0.4/http/util.lua000066400000000000000000000167011400726324600155630ustar00rootroot00000000000000local lpeg = require "lpeg" local http_patts = require "lpeg_patterns.http" local IPv4_patts = require "lpeg_patterns.IPv4" local IPv6_patts = require "lpeg_patterns.IPv6" local EOF = lpeg.P(-1) -- Encodes a character as a percent encoded string local function char_to_pchar(c) return string.format("%%%02X", c:byte(1,1)) end -- encodeURI replaces all characters except the following with the appropriate UTF-8 escape sequences: -- ; , / ? : @ & = + $ -- alphabetic, decimal digits, - _ . ! ~ * ' ( ) -- # local function encodeURI(str) return (str:gsub("[^%;%,%/%?%:%@%&%=%+%$%w%-%_%.%!%~%*%'%(%)%#]", char_to_pchar)) end -- encodeURIComponent escapes all characters except the following: alphabetic, decimal digits, - _ . ! ~ * ' ( ) local function encodeURIComponent(str) return (str:gsub("[^%w%-_%.%!%~%*%'%(%)]", char_to_pchar)) end -- decodeURI unescapes url encoded characters -- excluding characters that are special in urls local decodeURI do local decodeURI_blacklist = {} for char in ("#$&+,/:;=?@"):gmatch(".") do decodeURI_blacklist[string.byte(char)] = true end local function decodeURI_helper(str) local x = tonumber(str, 16) if not decodeURI_blacklist[x] then return string.char(x) end -- return nothing; gsub will not perform the replacement end function decodeURI(str) return (str:gsub("%%(%x%x)", decodeURI_helper)) end end -- Converts a hex string to a character local function pchar_to_char(str) return string.char(tonumber(str, 16)) end -- decodeURIComponent unescapes *all* url encoded characters local function decodeURIComponent(str) return (str:gsub("%%(%x%x)", pchar_to_char)) end -- An iterator over query segments (delimited by "&") as key/value pairs -- if a query segment has no '=', the value will be `nil` local function query_args(str) local iter, state, first = str:gmatch("([^=&]+)(=?)([^&]*)&?") return function(state, last) -- luacheck: ignore 431 local name, equals, value = iter(state, last) if name == nil then return nil end name = decodeURIComponent(name) if equals == "" then value = nil else value = decodeURIComponent(value) end return name, value end, state, first end -- Converts a dictionary (string keys, string values) to an encoded query string local function dict_to_query(form) local r, i = {}, 0 for name, value in pairs(form) do i = i + 1 r[i] = encodeURIComponent(name).."="..encodeURIComponent(value) end return table.concat(r, "&", 1, i) end -- Resolves a relative path local function resolve_relative_path(orig_path, relative_path) local t, i = {}, 0 local is_abs if relative_path:sub(1,1) == "/" then -- "relative" argument is actually absolute. ignore orig_path argument is_abs = true else is_abs = orig_path:sub(1,1) == "/" -- this will skip empty path components due to + -- the / on the end ignores trailing component for segment in orig_path:gmatch("([^/]+)/") do i = i + 1 t[i] = segment end end for segment in relative_path:gmatch("([^/]+)") do if segment == ".." then -- if we're at the root, do nothing if i > 0 then -- discard a component i = i - 1 end elseif segment ~= "." then i = i + 1 t[i] = segment end end -- Make sure leading slash is kept local s if is_abs then if i == 0 then return "/" end t[0] = "" s = 0 else s = 1 end -- Make sure trailing slash is kept if relative_path:sub(-1, -1) == "/" then i = i + 1 t[i] = "" end return table.concat(t, "/", s, i) end local safe_methods = { -- RFC 7231 Section 4.2.1: -- Of the request methods defined by this specification, the GET, HEAD, -- OPTIONS, and TRACE methods are defined to be safe. GET = true; HEAD = true; OPTIONS = true; TRACE = true; } local function is_safe_method(method) return safe_methods[method] or false end local IPaddress = (IPv4_patts.IPv4address + IPv6_patts.IPv6addrz) * EOF local function is_ip(str) return IPaddress:match(str) ~= nil end local scheme_to_port = { http = 80; ws = 80; https = 443; wss = 443; } -- Splits a :authority header (same as Host) into host and port local function split_authority(authority, scheme) local host, port local h, p = authority:match("^[ \t]*(.-):(%d+)[ \t]*$") if p then authority = h port = tonumber(p, 10) else -- when port missing from host header, it defaults to the default for that scheme port = scheme_to_port[scheme] if port == nil then return nil, "unknown scheme" end end local ipv6 = authority:match("^%[([:%x]+)%]$") if ipv6 then host = ipv6 else host = authority end return host, port end -- Reverse of `split_authority`: converts a host, port and scheme -- into a string suitable for an :authority header. local function to_authority(host, port, scheme) local authority = host if host:match("^[%x:]+:[%x:]*$") then -- IPv6 authority = "[" .. authority .. "]" end local default_port = scheme_to_port[scheme] if default_port == port then port = nil end if port then authority = string.format("%s:%d", authority, port) end return authority end -- HTTP prefered date format -- See RFC 7231 section 7.1.1.1 local function imf_date(time) return os.date("!%a, %d %b %Y %H:%M:%S GMT", time) end -- This pattern checks if its argument is a valid token, if so, it returns it as is. -- Otherwise, it returns it as a quoted string (with any special characters escaped) local maybe_quote do local patt = http_patts.token * EOF + lpeg.Cs(lpeg.Cc'"' * ((lpeg.S"\\\"") / "\\%0" + http_patts.qdtext)^0 * lpeg.Cc'"') * EOF maybe_quote = function (s) return patt:match(s) end end -- A pcall-alike function that can be yielded over even in PUC 5.1 local yieldable_pcall --[[ If pcall can already yield, then we want to use that. However, we can't do the feature check straight away, Openresty breaks coroutine.wrap in some contexts. See #98 Openresty nominally only supports LuaJIT, which always supports a yieldable pcall, so we short-circuit the feature check by checking if the 'ngx' library is loaded, plus that jit.version_num indicates LuaJIT 2.0. This combination ensures that we don't take the wrong branch if: - lua-http is being used to mock the openresty environment - openresty is compiled with something other than LuaJIT ]] if ( package.loaded.ngx and type(package.loaded.jit) == "table" and type(package.loaded.jit.version_num) == "number" and package.loaded.jit.version_num >= 20000 ) -- See if pcall can be yielded over or coroutine.wrap(function() return pcall(coroutine.yield, true) end )() then yieldable_pcall = pcall else local function handle_resume(co, ok, ...) if not ok then return false, ... elseif coroutine.status(co) == "dead" then return true, ... end return handle_resume(co, coroutine.resume(co, coroutine.yield(...))) end yieldable_pcall = function(func, ...) if type(func) ~= "function" or debug.getinfo(func, "S").what == "C" then local C_func = func -- Can't give C functions to coroutine.create func = function(...) return C_func(...) end end local co = coroutine.create(func) return handle_resume(co, coroutine.resume(co, ...)) end end return { encodeURI = encodeURI; encodeURIComponent = encodeURIComponent; decodeURI = decodeURI; decodeURIComponent = decodeURIComponent; query_args = query_args; dict_to_query = dict_to_query; resolve_relative_path = resolve_relative_path; is_safe_method = is_safe_method; is_ip = is_ip; scheme_to_port = scheme_to_port; split_authority = split_authority; to_authority = to_authority; imf_date = imf_date; maybe_quote = maybe_quote; yieldable_pcall = yieldable_pcall; } lua-http-0.4/http/util.tld000066400000000000000000000012301400726324600155540ustar00rootroot00000000000000encodeURI: (string) -> (string) encodeURIComponent: (string) -> (string) decodeURI: (string) -> (string) decodeURIComponent: (string) -> (string) query_args: (string) -> ((any) -> (string, string), any, any) dict_to_query: ({string:string}) -> (string) resolve_relative_path: (orig_path, relative_path) -> (string) is_safe_method: (method) -> (boolean) is_ip: (string) -> (boolean) scheme_to_port: {string:integer} split_authority: (string, string) -> (string, integer)|(nil, string) to_authority: (string, integer, string|nil) -> (string) imf_date: (time) -> (string) maybe_quote: (string) -> (string) yieldable_pcall: ((any*) -> (any*), any*) -> (boolean, any*) lua-http-0.4/http/version.lua000066400000000000000000000002271400726324600162670ustar00rootroot00000000000000--[[ This file contains the lua-http release. It should be updated as part of the release process ]] return { name = "lua-http"; version = "0.4"; } lua-http-0.4/http/version.tld000066400000000000000000000000351400726324600162660ustar00rootroot00000000000000name: string version: string lua-http-0.4/http/websocket.lua000066400000000000000000000623661400726324600166040ustar00rootroot00000000000000--[[ WebSocket module Specified in RFC-6455 This code is partially based on MIT/X11 code Copyright (C) 2012 Florian Zeitz Design criteria: - Client API must work without an event loop - Borrow from the Browser Javascript WebSocket API when sensible - server-side API should mirror client-side API - avoid reading data from the socket when the application doesn't want it (and loosing our TCP provided backpressure) ## Notes on websocket pings: - You MAY not receive a pong for every ping you send. - You MAY receive extra pongs These two facts together mean that you can't track pings. Hence pings are only useful to know if the peer is still connected. If the peer is sending *anything*, then you know they are still connected. ]] local basexx = require "basexx" local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143 local sunpack = string.unpack or require "compat53.string".unpack -- luacheck: ignore 143 local unpack = table.unpack or unpack -- luacheck: ignore 113 143 local utf8 = utf8 or require "compat53.utf8" -- luacheck: ignore 113 local cqueues = require "cqueues" local monotime = cqueues.monotime local ce = require "cqueues.errno" local lpeg = require "lpeg" local http_patts = require "lpeg_patterns.http" local rand = require "openssl.rand" local digest = require "openssl.digest" local bit = require "http.bit" local onerror = require "http.connection_common".onerror local new_headers = require "http.headers".new local http_request = require "http.request" local EOF = lpeg.P(-1) local Connection = lpeg.Ct(http_patts.Connection) * EOF local Sec_WebSocket_Protocol_Client = lpeg.Ct(http_patts.Sec_WebSocket_Protocol_Client) * EOF local Sec_WebSocket_Extensions = lpeg.Ct(http_patts.Sec_WebSocket_Extensions) * EOF local websocket_methods = { -- Max seconds to wait after sending close frame until closing connection close_timeout = 3; } local websocket_mt = { __name = "http.websocket"; __index = websocket_methods; } function websocket_mt:__tostring() return string.format("http.websocket{type=%q;readyState=%d}", self.type, self.readyState) end local magic = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" -- a nonce consisting of a randomly selected 16-byte value that has been base64-encoded local function new_key() return basexx.to_base64(rand.bytes(16)) end local function base64_sha1(str) return basexx.to_base64(digest.new("sha1"):final(str)) end -- trim12 from http://lua-users.org/wiki/StringTrim local function trim(s) local from = s:match"^%s*()" return from > #s and "" or s:match(".*%S", from) end --[[ this value MUST be non-empty strings with characters in the range U+0021 to U+007E not including separator characters as defined in [RFC2616] ]] local function validate_protocol(p) return p:match("^[\33\35-\39\42\43\45\46\48-\57\65-\90\94-\122\124\126\127]+$") end local function validate_utf8(s) local ok, pos = utf8.len(s) if not ok then return nil, pos end -- UTF-16 surrogates not allowed for p, c in utf8.codes(s) do if c >= 0xD800 and c <= 0xDFFF then return nil, p end end return true end -- XORs the string `str` with a 32bit key local function apply_mask(str, key) assert(#key == 4) local data = {} for i = 1, #str do local key_index = (i-1)%4 + 1 data[i] = string.char(bit.bxor(key[key_index], str:byte(i))) end return table.concat(data, "", 1, #str) end local function build_frame(desc) local data = desc.data or "" assert(desc.opcode and desc.opcode >= 0 and desc.opcode <= 0xF, "Invalid WebSocket opcode") if desc.opcode >= 0x8 then -- RFC 6455 5.5 assert(#data <= 125, "WebSocket control frames MUST have a payload length of 125 bytes or less.") end local b1 = desc.opcode if desc.FIN then b1 = bit.bor(b1, 0x80) end if desc.RSV1 then b1 = bit.bor(b1, 0x40) end if desc.RSV2 then b1 = bit.bor(b1, 0x20) end if desc.RSV3 then b1 = bit.bor(b1, 0x10) end local b2 = #data local length_extra if b2 <= 125 then -- 7-bit length length_extra = "" elseif b2 <= 0xFFFF then -- 2-byte length b2 = 126 length_extra = spack(">I2", #data) else -- 8-byte length b2 = 127 length_extra = spack(">I8", #data) end local key = "" if desc.MASK then local key_a = desc.key if key_a then key = string.char(unpack(key_a, 1, 4)) else key = rand.bytes(4) key_a = {key:byte(1,4)} end b2 = bit.bor(b2, 0x80) data = apply_mask(data, key_a) end return string.char(b1, b2) .. length_extra .. key .. data end local function build_close(code, message, mask) local data if code then data = spack(">I2", code) if message then assert(#message<=123, "Close reason must be <=123 bytes") data = data .. message end else data = "" end return { opcode = 0x8; FIN = true; MASK = mask; data = data; } end local function read_frame(sock, deadline) local frame, first_2 do local err, errno first_2, err, errno = sock:xread(2, "b", deadline and (deadline-monotime())) if not first_2 then return nil, err, errno elseif #first_2 ~= 2 then sock:seterror("r", ce.EILSEQ) local ok, errno2 = sock:unget(first_2) if not ok then return nil, onerror(sock, "unget", errno2) end return nil, onerror(sock, "read_frame", ce.EILSEQ) end local byte1, byte2 = first_2:byte(1, 2) frame = { FIN = bit.band(byte1, 0x80) ~= 0; RSV1 = bit.band(byte1, 0x40) ~= 0; RSV2 = bit.band(byte1, 0x20) ~= 0; RSV3 = bit.band(byte1, 0x10) ~= 0; opcode = bit.band(byte1, 0x0F); MASK = bit.band(byte2, 0x80) ~= 0; length = bit.band(byte2, 0x7F); data = nil; } end local fill_length = frame.length if fill_length == 126 then fill_length = 2 elseif fill_length == 127 then fill_length = 8 end if frame.MASK then fill_length = fill_length + 4 end do local ok, err, errno = sock:fill(fill_length, 0) if not ok then local unget_ok1, unget_errno1 = sock:unget(first_2) if not unget_ok1 then return nil, onerror(sock, "unget", unget_errno1) end if errno == ce.ETIMEDOUT then local timeout = deadline and deadline-monotime() if cqueues.poll(sock, timeout) ~= timeout then -- retry return read_frame(sock, deadline) end elseif err == nil then sock:seterror("r", ce.EILSEQ) return nil, onerror(sock, "read_frame", ce.EILSEQ) end return nil, err, errno end end -- if `fill` succeeded these shouldn't be able to fail local extra_fill_unget if frame.length == 126 then extra_fill_unget = assert(sock:xread(2, "b", 0)) frame.length = sunpack(">I2", extra_fill_unget) fill_length = fill_length - 2 elseif frame.length == 127 then extra_fill_unget = assert(sock:xread(8, "b", 0)) frame.length = sunpack(">I8", extra_fill_unget) fill_length = fill_length - 8 + frame.length end if extra_fill_unget then local ok, err, errno = sock:fill(fill_length, 0) if not ok then local unget_ok1, unget_errno1 = sock:unget(extra_fill_unget) if not unget_ok1 then return nil, onerror(sock, "unget", unget_errno1) end local unget_ok2, unget_errno2 = sock:unget(first_2) if not unget_ok2 then return nil, onerror(sock, "unget", unget_errno2) end if errno == ce.ETIMEDOUT then local timeout = deadline and deadline-monotime() if cqueues.poll(sock, timeout) ~= timeout then -- retry return read_frame(sock, deadline) end elseif err == nil then sock:seterror("r", ce.EILSEQ) return nil, onerror(sock, "read_frame", ce.EILSEQ) end return nil, err, errno end end if frame.MASK then local key = assert(sock:xread(4, "b", 0)) frame.key = { key:byte(1, 4) } end do local data = assert(sock:xread(frame.length, "b", 0)) if frame.MASK then frame.data = apply_mask(data, frame.key) else frame.data = data end end return frame end local function parse_close(data) local code, message if #data >= 2 then code = sunpack(">I2", data) if #data > 2 then message = data:sub(3) end end return code, message end function websocket_methods:send_frame(frame, timeout) if self.readyState < 1 then return nil, onerror(self.socket, "send_frame", ce.ENOTCONN) elseif self.readyState > 2 then return nil, onerror(self.socket, "send_frame", ce.EPIPE) end local ok, err, errno = self.socket:xwrite(build_frame(frame), "bn", timeout) if not ok then return nil, err, errno end if frame.opcode == 0x8 then self.readyState = 2 end return true end function websocket_methods:send(data, opcode, timeout) assert(type(data) == "string") if opcode == "text" or opcode == nil then opcode = 0x1 elseif opcode == "binary" then opcode = 0x2; end return self:send_frame({ FIN = true; --[[ RFC 6455 5.1: A server MUST NOT mask any frames that it sends to the client 6.1.5: If the data is being sent by the client, the frame(s) MUST be masked]] MASK = self.type == "client"; opcode = opcode; data = data; }, timeout) end local function close_helper(self, code, reason, deadline) if self.readyState < 1 then self.request = nil self.stream = nil self.readyState = 3 -- return value doesn't matter; this branch cannot be called from anywhere that uses it return nil, ce.strerror(ce.ENOTCONN), ce.ENOTCONN elseif self.readyState == 3 then return nil, ce.strerror(ce.EPIPE), ce.EPIPE end if self.readyState < 2 then local close_frame = build_close(code, reason, self.type == "client") -- ignore failure self:send_frame(close_frame, deadline and deadline-monotime()) end if code ~= 1002 and not self.got_close_code and self.readyState == 2 then -- Do not close socket straight away, wait for acknowledgement from server local read_deadline = monotime() + self.close_timeout if deadline then read_deadline = math.min(read_deadline, deadline) end repeat if not self:receive(read_deadline-monotime()) then break end until self.got_close_code end if self.readyState < 3 then self.socket:shutdown() self.readyState = 3 cqueues.poll() cqueues.poll() self.socket:close() end return nil, reason, code end function websocket_methods:close(code, reason, timeout) local deadline = timeout and (monotime()+timeout) code = code or 1000 close_helper(self, code, reason, deadline) return true end function websocket_methods:receive(timeout) if self.readyState < 1 then return nil, onerror(self.socket, "receive", ce.ENOTCONN) elseif self.readyState > 2 then return nil, onerror(self.socket, "receive", ce.EPIPE) end local deadline = timeout and (monotime()+timeout) while true do local frame, err, errno = read_frame(self.socket, deadline) if frame == nil then return nil, err, errno end -- Error cases if frame.RSV1 or frame.RSV2 or frame.RSV3 then -- Reserved bits non zero return close_helper(self, 1002, "Reserved bits not zero", deadline) end if frame.opcode < 0x8 then if frame.opcode == 0x0 then -- Continuation frames if not self.databuffer then return close_helper(self, 1002, "Unexpected continuation frame", deadline) end self.databuffer[#self.databuffer+1] = frame.data elseif frame.opcode == 0x1 or frame.opcode == 0x2 then -- Text or Binary frame if self.databuffer then return close_helper(self, 1002, "Continuation frame expected", deadline) end self.databuffer = { frame.data } self.databuffer_type = frame.opcode else return close_helper(self, 1002, "Reserved opcode", deadline) end if frame.FIN then local databuffer_type = self.databuffer_type local databuffer = table.concat(self.databuffer) if databuffer_type == 0x1 then databuffer_type = "text" --[[ RFC 6455 8.1 When an endpoint is to interpret a byte stream as UTF-8 but finds that the byte stream is not, in fact, a valid UTF-8 stream, that endpoint MUST _Fail the WebSocket Connection_.]] local valid_utf8, err_pos = validate_utf8(databuffer) if not valid_utf8 then return close_helper(self, 1007, string.format("invalid utf-8 at position %d", err_pos)) end elseif databuffer_type == 0x2 then databuffer_type = "binary" end self.databuffer_type, self.databuffer = nil, nil return databuffer, databuffer_type end else -- Control frame if frame.length > 125 then -- Control frame with too much payload return close_helper(self, 1002, "Payload too large", deadline) elseif not frame.FIN then -- Fragmented control frame return close_helper(self, 1002, "Fragmented control frame", deadline) end if frame.opcode == 0x8 then -- Close request if frame.length == 1 then return close_helper(self, 1002, "Close frame with payload, but too short for status code", deadline) end local status_code, message = parse_close(frame.data) if status_code == nil then --[[ RFC 6455 7.4.1 1005 is a reserved value and MUST NOT be set as a status code in a Close control frame by an endpoint. It is designated for use in applications expecting a status code to indicate that no status code was actually present.]] self.got_close_code = 1005 status_code = 1000 elseif status_code < 1000 then self.got_close_code = true return close_helper(self, 1002, "Closed with invalid status code", deadline) elseif ((status_code > 1003 and status_code < 1007) or status_code > 1011) and status_code < 3000 then self.got_close_code = true return close_helper(self, 1002, "Closed with reserved status code", deadline) else self.got_close_code = status_code if message then local valid_utf8, err_pos = validate_utf8(message) if not valid_utf8 then return close_helper(self, 1007, string.format("invalid utf-8 at position %d", err_pos)) end self.got_close_message = message end end --[[ RFC 6455 5.5.1 When sending a Close frame in response, the endpoint typically echos the status code it received.]] return close_helper(self, status_code, message, deadline) elseif frame.opcode == 0x9 then -- Ping frame local ok, err2 = self:send_pong(frame.data, deadline and (deadline-monotime())) if not ok and err2 ~= ce.EPIPE then return close_helper(self, 1002, "Pong failed", deadline) end elseif frame.opcode == 0xA then -- luacheck: ignore 542 -- Received unexpected pong frame else return close_helper(self, 1002, "Reserved opcode", deadline) end end end end function websocket_methods:each() return function(self) -- luacheck: ignore 432 return self:receive() end, self end function websocket_methods:send_ping(data, timeout) return self:send_frame({ FIN = true; --[[ RFC 6455 5.1: A server MUST NOT mask any frames that it sends to the client 6.1.5: If the data is being sent by the client, the frame(s) MUST be masked]] MASK = self.type == "client"; opcode = 0x9; data = data; }, timeout) end --[[ RFC 6455 Section 5.5.3: A Pong frame MAY be sent unsolicited. This serves as a unidirectional heartbeat. A response to an unsolicited Pong frame is not expected.]] function websocket_methods:send_pong(data, timeout) return self:send_frame({ FIN = true; --[[ RFC 6455 5.1: A server MUST NOT mask any frames that it sends to the client 6.1.5: If the data is being sent by the client, the frame(s) MUST be masked]] MASK = self.type == "client"; opcode = 0xA; data = data; }, timeout) end local function new(type) assert(type == "client" or type == "server") local self = setmetatable({ socket = nil; type = type; readyState = 0; databuffer = nil; databuffer_type = nil; got_close_code = nil; got_close_reason = nil; key = nil; protocol = nil; protocols = nil; -- only used by client: request = nil; headers = nil; -- only used by server: stream = nil; }, websocket_mt) return self end local function new_from_uri(uri, protocols) local request = http_request.new_from_uri(uri) local self = new("client") self.request = request self.request.version = 1.1 self.request.headers:append("upgrade", "websocket") self.request.headers:append("connection", "upgrade") self.key = new_key() self.request.headers:append("sec-websocket-key", self.key, true) self.request.headers:append("sec-websocket-version", "13") if protocols then --[[ The request MAY include a header field with the name Sec-WebSocket-Protocol. If present, this value indicates one or more comma-separated subprotocol the client wishes to speak, ordered by preference. The elements that comprise this value MUST be non-empty strings with characters in the range U+0021 to U+007E not including separator characters as defined in [RFC2616] and MUST all be unique strings.]] local n_protocols = #protocols -- Copy the passed 'protocols' array so that caller is allowed to modify local protocols_copy = {} for i=1, n_protocols do local v = protocols[i] if protocols_copy[v] then error("duplicate protocol") end assert(validate_protocol(v), "invalid protocol") protocols_copy[v] = true protocols_copy[i] = v end self.protocols = protocols_copy self.request.headers:append("sec-websocket-protocol", table.concat(protocols_copy, ",", 1, n_protocols)) end return self end --[[ Takes a response to a websocket upgrade request, and attempts to complete a websocket connection]] local function handle_websocket_response(self, headers, stream) assert(self.type == "client" and self.readyState == 0) if stream.connection.version < 1 or stream.connection.version >= 2 then return nil, "websockets only supported with HTTP 1.x", ce.EINVAL end --[[ If the status code received from the server is not 101, the client handles the response per HTTP [RFC2616] procedures. In particular, the client might perform authentication if it receives a 401 status code; the server might redirect the client using a 3xx status code (but clients are not required to follow them), etc.]] if headers:get(":status") ~= "101" then return nil, "status code not 101", ce.EINVAL end --[[ If the response lacks an Upgrade header field or the Upgrade header field contains a value that is not an ASCII case- insensitive match for the value "websocket", the client MUST Fail the WebSocket Connection]] local upgrade = headers:get("upgrade") if not upgrade or upgrade:lower() ~= "websocket" then return nil, "upgrade header not websocket", ce.EINVAL end --[[ If the response lacks a Connection header field or the Connection header field doesn't contain a token that is an ASCII case-insensitive match for the value "Upgrade", the client MUST Fail the WebSocket Connection]] do local has_connection_upgrade = false local h = headers:get_comma_separated("connection") if not h then return nil, "invalid connection header", ce.EINVAL end local connection_header = Connection:match(h) for i=1, #connection_header do if connection_header[i] == "upgrade" then has_connection_upgrade = true break end end if not has_connection_upgrade then return nil, "connection header doesn't contain upgrade", ce.EINVAL end end --[[ If the response lacks a Sec-WebSocket-Accept header field or the Sec-WebSocket-Accept contains a value other than the base64-encoded SHA-1 of the concatenation of the Sec-WebSocket- Key (as a string, not base64-decoded) with the string "258EAFA5- E914-47DA-95CA-C5AB0DC85B11" but ignoring any leading and trailing whitespace, the client MUST Fail the WebSocket Connection]] local sec_websocket_accept = headers:get("sec-websocket-accept") if sec_websocket_accept == nil or trim(sec_websocket_accept) ~= base64_sha1(self.key .. magic) then return nil, "sec-websocket-accept header incorrect", ce.EINVAL end --[[ If the response includes a Sec-WebSocket-Extensions header field and this header field indicates the use of an extension that was not present in the client's handshake (the server has indicated an extension not requested by the client), the client MUST Fail the WebSocket Connection]] do -- For now, we don't support any extensions local h = headers:get_comma_separated("sec-websocket-extensions") if h then local extensions = Sec_WebSocket_Extensions:match(h) if not extensions then return nil, "invalid sec-websocket-extensions header", ce.EINVAL end return nil, "extensions not supported", ce.EINVAL end end --[[ If the response includes a Sec-WebSocket-Protocol header field and this header field indicates the use of a subprotocol that was not present in the client's handshake (the server has indicated a subprotocol not requested by the client), the client MUST Fail the WebSocket Connection]] local protocol = headers:get("sec-websocket-protocol") if protocol then local has_matching_protocol = self.protocols and self.protocols[protocol] if not has_matching_protocol then return nil, "unexpected protocol", ce.EINVAL end end -- Success! assert(self.socket == nil, "websocket:connect called twice") self.socket = assert(stream.connection:take_socket()) self.socket:onerror(onerror) self.request = nil self.headers = headers self.readyState = 1 self.protocol = protocol return true end function websocket_methods:connect(timeout) assert(self.type == "client" and self.readyState == 0) local headers, stream, errno = self.request:go(timeout) if not headers then return nil, stream, errno end return handle_websocket_response(self, headers, stream) end -- Given an incoming HTTP1 request, attempts to upgrade it to a websocket connection local function new_from_stream(stream, headers) assert(stream.connection.type == "server") if stream.connection.version < 1 or stream.connection.version >= 2 then return nil, "websockets only supported with HTTP 1.x", ce.EINVAL end --[[ RFC 7230: A server MUST ignore an Upgrade header field that is received in an HTTP/1.0 request]] if stream.peer_version == 1.0 then return nil, "upgrade headers MUST be ignored in HTTP 1.0", ce.EINVAL end local upgrade = headers:get("upgrade") if not upgrade or upgrade:lower() ~= "websocket" then return nil, "upgrade header not websocket", ce.EINVAL end do local has_connection_upgrade = false local h = headers:get_comma_separated("connection") if not h then return nil, "invalid connection header", ce.EINVAL end local connection_header = Connection:match(h) for i=1, #connection_header do if connection_header[i] == "upgrade" then has_connection_upgrade = true break end end if not has_connection_upgrade then return nil, "connection header doesn't contain upgrade", ce.EINVAL end end local key = headers:get("sec-websocket-key") if not key then return nil, "missing sec-websocket-key", ce.EINVAL end key = trim(key) if headers:get("sec-websocket-version") ~= "13" then return nil, "unsupported sec-websocket-version", ce.EINVAL end local protocols_available if headers:has("sec-websocket-protocol") then local h = headers:get_comma_separated("sec-websocket-protocol") local client_protocols = Sec_WebSocket_Protocol_Client:match(h) if not client_protocols then return nil, "invalid sec-websocket-protocol header", ce.EINVAL end --[[ The request MAY include a header field with the name Sec-WebSocket-Protocol. If present, this value indicates one or more comma-separated subprotocol the client wishes to speak, ordered by preference. The elements that comprise this value MUST be non-empty strings with characters in the range U+0021 to U+007E not including separator characters as defined in [RFC2616] and MUST all be unique strings.]] protocols_available = {} for i, protocol in ipairs(client_protocols) do protocol = trim(protocol) if protocols_available[protocol] then return nil, "duplicate protocol", ce.EINVAL end if not validate_protocol(protocol) then return nil, "invalid protocol", ce.EINVAL end protocols_available[protocol] = true protocols_available[i] = protocol end end local self = new("server") self.key = key self.protocols = protocols_available self.stream = stream return self end function websocket_methods:accept(options, timeout) assert(self.type == "server" and self.readyState == 0) options = options or {} local response_headers if options.headers then response_headers = options.headers:clone() else response_headers = new_headers() end response_headers:upsert(":status", "101") response_headers:upsert("upgrade", "websocket") response_headers:upsert("connection", "upgrade") response_headers:upsert("sec-websocket-accept", base64_sha1(self.key .. magic)) local chosen_protocol if self.protocols and options.protocols then --[[ The |Sec-WebSocket-Protocol| request-header field can be used to indicate what subprotocols (application-level protocols layered over the WebSocket Protocol) are acceptable to the client. The server selects one or none of the acceptable protocols and echoes that value in its handshake to indicate that it has selected that protocol.]] for _, protocol in ipairs(options.protocols) do if self.protocols[protocol] then response_headers:upsert("sec-websocket-protocol", protocol) chosen_protocol = protocol break end end end do local ok, err, errno = self.stream:write_headers(response_headers, false, timeout) if not ok then return ok, err, errno end end self.socket = assert(self.stream.connection:take_socket()) self.socket:onerror(onerror) self.stream = nil self.readyState = 1 self.protocol = chosen_protocol return true end return { new_from_uri = new_from_uri; new_from_stream = new_from_stream; methods = websocket_methods; mt = websocket_mt; new = new; build_frame = build_frame; read_frame = read_frame; build_close = build_close; parse_close = parse_close; } lua-http-0.4/http/zlib.lua000066400000000000000000000034611400726324600155450ustar00rootroot00000000000000-- Two different lua libraries claim the require string "zlib": -- lua-zlib and lzlib. -- They have very different APIs, but both provide the raw functionality we need. -- This module serves to normalise them to a single API local zlib = require "zlib" local _M = {} if zlib._VERSION:match "^lua%-zlib" then _M.engine = "lua-zlib" function _M.inflate() local stream = zlib.inflate() local end_of_gzip = false return function(chunk, end_stream) -- at end of file, end_of_gzip should have been set on the previous iteration assert(not end_of_gzip, "stream closed") chunk, end_of_gzip = stream(chunk) if end_stream then assert(end_of_gzip, "invalid stream") end return chunk end end function _M.deflate() local stream = zlib.deflate() return function(chunk, end_stream) local deflated = stream(chunk, end_stream and "finish" or "sync") return deflated end end elseif zlib._VERSION:match "^lzlib" then _M.engine = "lzlib" function _M.inflate() -- the function may get called multiple times local tmp local stream = zlib.inflate(function() local chunk = tmp tmp = nil return chunk end) return function(chunk, end_stream) -- lzlib doesn't report end of string tmp = chunk local data = assert(stream:read("*a")) if end_stream then stream:close() end return data end end function _M.deflate() local buf, n = {}, 0 local stream = zlib.deflate(function(chunk) n = n + 1 buf[n] = chunk end) return function(chunk, end_stream) stream:write(chunk) stream:flush() if end_stream then -- close performs a "finish" flush stream:close() end if n == 0 then return "" else local s = table.concat(buf, "", 1, n) buf, n = {}, 0 return s end end end else error("unknown zlib library") end return _M lua-http-0.4/http/zlib.tld000066400000000000000000000001361400726324600155430ustar00rootroot00000000000000inflate: () -> ((string, boolean) -> (string)) deflate: () -> ((string, boolean) -> (string)) lua-http-0.4/spec/000077500000000000000000000000001400726324600140515ustar00rootroot00000000000000lua-http-0.4/spec/client_spec.lua000066400000000000000000000100761400726324600170500ustar00rootroot00000000000000describe("http.client module", function() local client = require "http.client" local http_connection_common = require "http.connection_common" local http_h1_connection = require "http.h1_connection" local http_h2_connection = require "http.h2_connection" local http_headers = require "http.headers" local http_tls = require "http.tls" local cqueues = require "cqueues" local ca = require "cqueues.auxlib" local cs = require "cqueues.socket" local openssl_pkey = require "openssl.pkey" local openssl_ctx = require "openssl.ssl.context" local openssl_x509 = require "openssl.x509" it("invalid network parameters return nil, err, errno", function() -- Invalid network parameters will return nil, err, errno local ok, err, errno = client.connect{host="127.0.0.1", port="invalid"} assert.same(nil, ok) assert.same("string", type(err)) assert.same("number", type(errno)) end) local function test_pair(client_options, server_func) local s, c = ca.assert(cs.pair()) local cq = cqueues.new(); cq:wrap(function() local conn = assert(client.negotiate(c, client_options)) local stream = conn:new_stream() local req_headers = http_headers.new() req_headers:append(":authority", "myauthority") req_headers:append(":method", "GET") req_headers:append(":path", "/") req_headers:append(":scheme", client_options.tls and "https" or "http") assert(stream:write_headers(req_headers, true)) local res_headers = assert(stream:get_headers()) assert.same("200", res_headers:get(":status")) end) cq:wrap(function() s = server_func(s) if not s then return end if client_options.tls then local ssl = s:checktls() assert.same(client_options.sendname, ssl:getHostName()) end local stream = assert(s:get_next_incoming_stream()) assert(stream:get_headers()) local res_headers = http_headers.new() res_headers:append(":status", "200") assert(stream:write_headers(res_headers, true)) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) c:close() s:close() end local function new_server_ctx() local key = openssl_pkey.new() local crt = openssl_x509.new() crt:setPublicKey(key) crt:sign(key) local ctx = http_tls.new_server_context() assert(ctx:setPrivateKey(key)) assert(ctx:setCertificate(crt)) return ctx end it("works with an http/1.1 server", function() test_pair({}, function(s) return http_h1_connection.new(s, "server", 1.1) end) end) it("works with an http/2 server", function() test_pair({ version = 2; }, function(s) return http_h2_connection.new(s, "server", {}) end) end) it("fails with unknown http version", function() assert.has.error(function() test_pair({ version = 5; }, function() end) end) end) it("works with an https/1.1 server", function() local client_ctx = http_tls.new_client_context() client_ctx:setVerify(openssl_ctx.VERIFY_NONE) test_pair({ tls = true; ctx = client_ctx; sendname = "mysendname"; }, function(s) assert(s:starttls(new_server_ctx())) return http_h1_connection.new(s, "server", 1.1) end) end) -- pending as older openssl (used by e.g. travis-ci) doesn't have any non-disallowed ciphers pending("works with an https/2 server", function() local client_ctx = http_tls.new_client_context() client_ctx:setVerify(openssl_ctx.VERIFY_NONE) test_pair({ tls = true; ctx = client_ctx; sendname = "mysendname"; version = 2; }, function(s) assert(s:starttls(new_server_ctx())) return http_h2_connection.new(s, "server", {}) end) end) it("reports errors from :starttls", function() -- default settings should fail as it should't allow self-signed local s, c = ca.assert(cs.pair()) local cq = cqueues.new(); cq:wrap(function() local ok, err = client.negotiate(c, { tls = true; }) assert.falsy(ok) assert.truthy(err:match("starttls: ")) end) cq:wrap(function() s:onerror(http_connection_common.onerror) local ok, err = s:starttls() assert.falsy(ok) assert.truthy(err:match("starttls: ")) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) c:close() s:close() end) end) lua-http-0.4/spec/compat_prosody_spec.lua000066400000000000000000000066101400726324600206330ustar00rootroot00000000000000describe("http.compat.prosody module", function() local cqueues = require "cqueues" local request = require "http.compat.prosody".request local new_headers = require "http.headers".new local server = require "http.server" it("invalid uris fail", function() local s = spy.new(function() end) assert(cqueues.new():wrap(function() assert.same({nil, "invalid-url"}, {request("this is not a url", {}, s)}) end):loop()) assert.spy(s).was.called() end) it("can construct a request from a uri", function() -- Only step; not loop. use `error` as callback as it should never be called assert(cqueues.new():wrap(function() assert(request("http://example.com", {}, error)) end):step()) assert(cqueues.new():wrap(function() local r = assert(request("http://example.com/something", { method = "PUT"; body = '{}'; headers = { ["content-type"] = "application/json"; } }, error)) assert.same("PUT", r.headers:get(":method")) assert.same("application/json", r.headers:get("content-type")) assert.same("2", r.headers:get("content-length")) assert.same("{}", r.body) end):step()) end) it("can perform a GET request", function() local cq = cqueues.new() local s = server.listen { host = "localhost"; port = 0; onstream = function(s, stream) local h = assert(stream:get_headers()) assert.same("http", h:get ":scheme") assert.same("GET", h:get ":method") assert.same("/", h:get ":path") local headers = new_headers() headers:append(":status", "200") headers:append("connection", "close") assert(stream:write_headers(headers, false)) assert(stream:write_chunk("success!", true)) stream:shutdown() stream.connection:shutdown() s:close() end; } assert(s:listen()) local _, host, port = s:localname() cq:wrap(function() assert_loop(s) end) cq:wrap(function() request(string.format("http://%s:%d", host, port), {}, function(b, c) assert.same(200, c) assert.same("success!", b) end) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("can perform a POST request", function() local cq = cqueues.new() local s = server.listen { host = "localhost"; port = 0; onstream = function(s, stream) local h = assert(stream:get_headers()) assert.same("http", h:get ":scheme") assert.same("POST", h:get ":method") assert.same("/path", h:get ":path") assert.same("text/plain", h:get "content-type") local b = assert(stream:get_body_as_string()) assert.same("this is a body", b) local headers = new_headers() headers:append(":status", "201") headers:append("connection", "close") -- send duplicate headers headers:append("someheader", "foo") headers:append("someheader", "bar") assert(stream:write_headers(headers, false)) assert(stream:write_chunk("success!", true)) stream:shutdown() stream.connection:shutdown() s:close() end; } assert(s:listen()) local _, host, port = s:localname() cq:wrap(function() assert_loop(s) end) cq:wrap(function() request(string.format("http://%s:%d/path", host, port), { headers = { ["content-type"] = "text/plain"; }; body = "this is a body"; }, function(b, c, r) assert.same(201, c) assert.same("success!", b) assert.same("foo,bar", r.headers.someheader) end) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) end) lua-http-0.4/spec/compat_socket_spec.lua000066400000000000000000000160751400726324600204320ustar00rootroot00000000000000describe("http.compat.socket module", function() local http = require "http.compat.socket" local new_headers = require "http.headers".new local server = require "http.server" local util = require "http.util" local cqueues = require "cqueues" it("fails safely on an invalid host", function() -- in the luasocket example they use 'wrong.host', but 'host' is now a valid TLD. -- use 'wrong.invalid' instead for this test. local r, e = http.request("http://wrong.invalid/") assert.same(nil, r) -- in luasocket, the error is documented as "host not found", but we allow something else assert.same("string", type(e)) end) it("works against builtin server with GET request", function() local cq = cqueues.new() local authority local s = server.listen { host = "localhost"; port = 0; onstream = function(s, stream) local request_headers = assert(stream:get_headers()) assert.same("http", request_headers:get ":scheme") assert.same("GET", request_headers:get ":method") assert.same("/foo", request_headers:get ":path") assert.same(authority, request_headers:get ":authority") local headers = new_headers() headers:append(":status", "200") headers:append("connection", "close") assert(stream:write_headers(headers, false)) assert(stream:write_chunk("hello world", true)) s:close() end; } assert(s:listen()) local _, host, port = s:localname() authority = util.to_authority(host, port, "http") cq:wrap(function() assert_loop(s) end) cq:wrap(function() local r, e = http.request("http://"..authority.."/foo") assert.same("hello world", r) assert.same(200, e) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("works against builtin server with POST request", function() local cq = cqueues.new() local authority local s = server.listen { host = "localhost"; port = 0; onstream = function(s, stream) local request_headers = assert(stream:get_headers()) assert.same("http", request_headers:get ":scheme") assert.same("POST", request_headers:get ":method") assert.same("/foo", request_headers:get ":path") assert.same(authority, request_headers:get ":authority") local body = assert(stream:get_body_as_string()) assert.same("a body", body) local headers = new_headers() headers:append(":status", "201") headers:append("connection", "close") assert(stream:write_headers(headers, false)) assert(stream:write_chunk("hello world", true)) s:close() end; } assert(s:listen()) local _, host, port = s:localname() authority = util.to_authority(host, port, "http") cq:wrap(function() assert_loop(s) end) cq:wrap(function() local r, e = http.request("http://"..authority.."/foo", "a body") assert.same("hello world", r) assert.same(201, e) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("works against builtin server with complex request", function() local cq = cqueues.new() local s = server.listen { host = "localhost"; port = 0; onstream = function(s, stream) local a, b = stream:get_headers() local request_headers = assert(a,b) assert.same("http", request_headers:get ":scheme") assert.same("PUT", request_headers:get ":method") assert.same("/path?query", request_headers:get ":path") assert.same("otherhost.com:8080", request_headers:get ":authority") assert.same("fun", request_headers:get "myheader") assert.same("normalised", request_headers:get "camelcase") assert(stream:write_continue()) local body = assert(stream:get_body_as_string()) assert.same("a body", body) local headers = new_headers() headers:append(":status", "404") headers:append("connection", "close") assert(stream:write_headers(headers, false)) assert(stream:write_chunk("hello world", true)) s:close() end; } assert(s:listen()) cq:wrap(function() assert_loop(s) end) cq:wrap(function() local _, host, port = s:localname() local r, e = assert(http.request { url = "http://example.com/path?query"; host = host; port = port; method = "PUT"; headers = { host = "otherhost.com:8080"; myheader = "fun"; CamelCase = "normalised"; }; source = coroutine.wrap(function() coroutine.yield("a body") end); sink = coroutine.wrap(function(b) assert.same("hello world", b) assert.same(nil, coroutine.yield(true)) end); }) assert.same(1, r) assert.same(404, e) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("returns nil, 'timeout' on timeout", function() local cq = cqueues.new() local authority local s = server.listen { host = "localhost"; port = 0; onstream = function(s, stream) assert(stream:get_headers()) cqueues.sleep(0.2) stream:shutdown() s:close() end; } assert(s:listen()) local _, host, port = s:localname() authority = util.to_authority(host, port, "http") cq:wrap(function() assert_loop(s) end) cq:wrap(function() local old_TIMEOUT = http.TIMEOUT http.TIMEOUT = 0.01 local r, e = http.request("http://"..authority.."/") http.TIMEOUT = old_TIMEOUT assert.same(nil, r) assert.same("timeout", e) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("handles timeouts in complex form", function() local cq = cqueues.new() local s = server.listen { host = "localhost"; port = 0; onstream = function(s, stream) local a, b = stream:get_headers() local request_headers = assert(a,b) assert.same("http", request_headers:get ":scheme") assert.same("GET", request_headers:get ":method") assert.same("/path?query", request_headers:get ":path") assert.same("example.com", request_headers:get ":authority") cqueues.sleep(0.2) s:close() end; } assert(s:listen()) cq:wrap(function() assert_loop(s) end) cq:wrap(function() local _, host, port = s:localname() local old_TIMEOUT = http.TIMEOUT http.TIMEOUT = 0.01 local r, e = http.request { url = "http://example.com/path?query"; host = host; port = port; } http.TIMEOUT = old_TIMEOUT assert.same(nil, r) assert.same("timeout", e) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("coerces numeric header values to strings", function() local cq = cqueues.new() local s = server.listen { host = "localhost"; port = 0; onstream = function(s, stream) local request_headers = assert(stream:get_headers()) assert.truthy(request_headers:has("myheader")) local headers = new_headers() headers:append(":status", "200") headers:append("connection", "close") assert(stream:write_headers(headers, true)) s:close() end; } assert(s:listen()) cq:wrap(function() assert_loop(s) end) cq:wrap(function() local _, host, port = s:localname() local r, e = assert(http.request { url = "http://anything/"; host = host; port = port; headers = { myheader = 2; }; }) assert.same(1, r) assert.same(200, e) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) end) lua-http-0.4/spec/cookie_spec.lua000066400000000000000000000700721400726324600170450ustar00rootroot00000000000000describe("cookie module", function() local http_cookie = require "http.cookie" local http_headers = require "http.headers" describe(".parse_cookies", function() it("can parse a request with a single cookie headers", function() local h = http_headers.new() h:append("cookie", "foo=FOO; bar=BAR") assert.same({ foo = "FOO"; bar = "BAR"; }, http_cookie.parse_cookies(h)) end) it("can parse a request with a multiple cookie headers", function() local h = http_headers.new() h:append("cookie", "foo=FOO; bar=BAR") h:append("cookie", "baz=BAZ; bar=BAR2") h:append("cookie", "qux=QUX") assert.same({ foo = "FOO"; bar = "BAR2"; -- last occurence should win baz = "BAZ"; qux = "QUX"; }, http_cookie.parse_cookies(h)) end) end) it(":get works", function() local s = http_cookie.new_store() assert.same(nil, s:get("mysite.com", "/", "lang")) local key, value, params = http_cookie.parse_setcookie("lang=en-US; Expires=Wed, 09 Jun 2021 10:18:14 GMT") assert(s:store("mysite.com", "/", true, true, nil, key, value, params)) assert.same("en-US", s:get("mysite.com", "/", "lang")) assert.same(nil, s:get("other.com", "/", "lang")) assert.same(nil, s:get("mysite.com", "/other", "lang")) assert.same(nil, s:get("mysite.com", "/", "other")) end) describe("examples from spec", function() it("can handle basic cookie without parameters", function() local s = http_cookie.new_store() assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("SID=31d4d96e407aad42"))) assert.same("SID=31d4d96e407aad42", s:lookup("example.com", "/", true, true)) assert.same("SID=31d4d96e407aad42", s:lookup("example.com", "/other", true, true)) assert.same("", s:lookup("subdomain.example.com", "/", true, true)) assert.same("", s:lookup("other.com", "/", true, true)) end) it("can handle cookie with Path and Domain parameters", function() local s = http_cookie.new_store() assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("SID=31d4d96e407aad42; Path=/; Domain=example.com"))) assert.same("SID=31d4d96e407aad42", s:lookup("example.com", "/", true, true)) assert.same("SID=31d4d96e407aad42", s:lookup("example.com", "/other", true, true)) assert.same("SID=31d4d96e407aad42", s:lookup("subdomain.example.com", "/", true, true)) assert.same("", s:lookup("other.com", "/", true, true)) end) it("can handle two cookies with different names and parameters", function() local s = http_cookie.new_store() assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("SID=31d4d96e407aad42; Path=/; Secure; HttpOnly"))) assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("lang=en-US; Path=/; Domain=example.com"))) assert.same("SID=31d4d96e407aad42; lang=en-US", s:lookup("example.com", "/other", true, true)) assert.same("lang=en-US", s:lookup("subdomain.example.com", "/", true, true)) assert.same("lang=en-US", s:lookup("example.com", "/", true, false)) assert.same("lang=en-US", s:lookup("example.com", "/", false, true)) assert.same("", s:lookup("other.com", "/", true, true)) end) it("can expire a cookie", function() local s = http_cookie.new_store() s.time = function() return 1234567890 end -- set time to something before the expiry -- in spec this is kept from previous example. assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("SID=31d4d96e407aad42; Path=/; Secure; HttpOnly"))) assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("lang=en-US; Expires=Wed, 09 Jun 2021 10:18:14 GMT"))) assert.same("SID=31d4d96e407aad42; lang=en-US", s:lookup("example.com", "/", true, true)) s.time = function() return 9234567890 end -- set time to something after the expiry assert.same("SID=31d4d96e407aad42", s:lookup("example.com", "/", true, true)) end) end) describe(":store uses correct domain", function() it("ignores leading '.' in domain", function() local s = http_cookie.new_store() assert.truthy(s:store("subdomain.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=.example.com"))) assert.same("bar", s:get("example.com", "/", "foo")) end) ;(http_cookie.store_methods.psl and it or pending)("checks against public suffix list", function() assert(not http_cookie.store_methods.psl:is_cookie_domain_acceptable("foo.com", "com")) local s = http_cookie.new_store() assert.falsy(s:store("foo.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=com"))) end) ;(http_cookie.store_methods.psl and it or pending)("allows explicit domains even when on the public suffix list", function() assert(http_cookie.store_methods.psl:is_public_suffix("hashbang.sh")) local s = http_cookie.new_store() assert.truthy(s:store("hashbang.sh", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=hashbang.sh"))) -- And check that host_only flag has been set to true assert.same("foo=bar", s:lookup("hashbang.sh", "/", true, true)) assert.same("", s:lookup("sub.hashbang.sh", "/", true, true)) end) it("doesn't domain-match a completely different domain", function() local s = http_cookie.new_store() assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=other.example.com"))) end) it("doesn't domain-match a subdomain when request is at super-domain", function() local s = http_cookie.new_store() assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=subdomain.example.com"))) end) it("doesn't domain-match a partial ip", function() local s = http_cookie.new_store() assert.falsy(s:store("127.0.0.1", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=0.0.1"))) end) end) describe("domain-match on lookup", function() it("matches domains correctly when host_only flag is true", function() local s = http_cookie.new_store() assert.truthy(s:store("s.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar"))) assert.same("bar", s:get("s.example.com", "/", "foo")) assert.same("foo=bar", s:lookup("s.example.com", "/", true, true)) assert.same("", s:lookup("s.s.example.com", "/", true, true)) assert.same("", s:lookup("s.s.s.example.com", "/", true, true)) assert.same("", s:lookup("com", "/", true, true)) assert.same("", s:lookup("example.com", "/", true, true)) assert.same("", s:lookup("other.com", "/", true, true)) assert.same("", s:lookup("s.other.com", "/", true, true)) end) it("matches domains correctly when host_only flag is false", function() local s = http_cookie.new_store() assert.truthy(s:store("s.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Domain=s.example.com"))) assert.same("bar", s:get("s.example.com", "/", "foo")) assert.same("foo=bar", s:lookup("s.example.com", "/", true, true)) assert.same("foo=bar", s:lookup("s.s.example.com", "/", true, true)) assert.same("foo=bar", s:lookup("s.s.s.example.com", "/", true, true)) assert.same("", s:lookup("com", "/", true, true)) assert.same("", s:lookup("example.com", "/", true, true)) assert.same("", s:lookup("other.com", "/", true, true)) assert.same("", s:lookup("s.other.com", "/", true, true)) end) end) describe(":store uses correct path", function() it("handles absolute set-cookie header", function() local s = http_cookie.new_store() assert.truthy(s:store("example.com", "/absolute/path", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=/different/absolute/path"))) assert.same("bar", s:get("example.com", "/different/absolute/path", "foo")) end) it("handles relative set-cookie path", function() local s = http_cookie.new_store() assert.truthy(s:store("example.com", "/absolute/path", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=relative/path"))) -- should trim off last component assert.same("bar", s:get("example.com", "/absolute", "foo")) end) it("handles relative set-cookie path with no request path", function() local s = http_cookie.new_store() assert.truthy(s:store("example.com", "?", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=relative/path"))) -- should default to / assert.same("bar", s:get("example.com", "/", "foo")) end) it("handles absolute set-cookie path with relative request path", function() local s = http_cookie.new_store() assert.truthy(s:store("example.com", "relative/path", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=/absolute/path"))) assert.same("bar", s:get("example.com", "/absolute/path", "foo")) end) it("handles relative request path and relative set-cookie header", function() local s = http_cookie.new_store() assert.truthy(s:store("example.com", "relative/path", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=different/relative/path"))) assert.same("bar", s:get("example.com", "/", "foo")) end) end) it("matches paths correctly", function() local s = http_cookie.new_store() assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=/path/subpath"))) assert.same("foo=bar", s:lookup("example.com", "/path/subpath/foo", true, true)) assert.same("foo=bar", s:lookup("example.com", "/path/subpath/bar", true, true)) assert.same("foo=bar", s:lookup("example.com", "/path/subpath", true, true)) assert.same("", s:lookup("example.com", "/", true, true)) assert.same("", s:lookup("example.com", "/path", true, true)) assert.same("", s:lookup("example.com", "/path/otherpath/", true, true)) assert.same("", s:lookup("example.com", "/path/otherpath/things", true, true)) end) it("prefers max-age over expires", function() local s = http_cookie.new_store() assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; max-age=50; Expires=Thu, 01 Jan 1970 00:00:00 GMT"))) assert.truthy(s:get("example.com", "/", "foo")) assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; max-age=0; Expires=Tue, 19 Jan 2038 03:14:07 GMT"))) assert.falsy(s:get("example.com", "/", "foo")) end) it("supports HttpOnly attribute", function() local s = http_cookie.new_store() assert.falsy(s:store("example.com", "/", false, true, nil, http_cookie.parse_setcookie("foo=bar; HttpOnly"))) assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; HttpOnly"))) assert.same("", s:lookup("example.com", "/", false, true)) assert.same("foo=bar", s:lookup("example.com", "/", true, true)) -- Now try and overwrite it with non-http :store assert.falsy(s:store("example.com", "/", false, true, nil, http_cookie.parse_setcookie("foo=bar"))) end) it("supports Secure attribute", function() local s = http_cookie.new_store() assert.falsy(s:store("example.com", "/", true, false, nil, http_cookie.parse_setcookie("foo=bar; Secure"))) assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=bar; Secure"))) assert.same("", s:lookup("example.com", "/", true, false)) assert.same("foo=bar", s:lookup("example.com", "/", true, true)) end) describe("tough cookies", function() it("enforces __Secure- prefix", function() local s = http_cookie.new_store() assert.falsy(s:store("example.com", "/", true, false, nil, http_cookie.parse_setcookie("__Secure-foo=bar; Secure"))) assert.falsy(s:store("example.com", "/", true, false, nil, http_cookie.parse_setcookie("__Secure-foo=bar"))) assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Secure-foo=bar;"))) assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Secure-foo=bar; Secure"))) end) it("enforces __Host- prefix", function() local s = http_cookie.new_store() -- Checks secure flag assert.falsy(s:store("example.com", "/", true, false, nil, http_cookie.parse_setcookie("__Host-foo=bar; Secure"))) assert.falsy(s:store("example.com", "/", true, false, nil, http_cookie.parse_setcookie("__Host-foo=bar"))) assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Host-foo=bar;"))) -- Checks for host only flag assert.falsy(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Host-foo=bar; Secure; Domain=example.com"))) -- Checks that path is / assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Host-foo=bar; Secure; Path=/path"))) -- Success case assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("__Host-foo=bar; Secure"))) end) end) describe("cookie fixing mitigation", function() it("ignores already existing path", function() local s = http_cookie.new_store() assert.truthy(s:store("example.com", "/path/subpath/foo", true, true, nil, http_cookie.parse_setcookie("foo=bar; Path=/path; Secure"))) assert.falsy(s:store("example.com", "/path/subpath/foo", true, false, nil, http_cookie.parse_setcookie("foo=bar; Path=/path"))) end) end) describe("SameSite attribute", function() it("fails to store if domain and site_for_cookies don't match", function() local s = http_cookie.new_store() assert.falsy(s:store("example.com", "/", true, true, "other.com", http_cookie.parse_setcookie("foo=foo; SameSite=Strict"))) end) it("implements SameSite=Strict", function() local s = http_cookie.new_store() assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("foo=foo; SameSite=Strict"))) assert.same("foo=foo", s:lookup("example.com", "/", true, true, true, "example.com")) assert.same("", s:lookup("example.com", "/", true, true, true, "other.com")) end) it("implements SameSite=Lax", function() local s = http_cookie.new_store() assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("foo=foo; SameSite=Lax"))) assert.same("foo=foo", s:lookup("example.com", "/", true, true, true, "example.com", true)) assert.same("foo=foo", s:lookup("example.com", "/", true, true, true, "other.com", true)) assert.same("", s:lookup("example.com", "/", true, true, false, "other.com", true)) assert.same("", s:lookup("example.com", "/", true, true, true, "other.com", false)) assert.same("", s:lookup("example.com", "/", true, true, false, "other.com", false)) end) end) it("cleans up", function() local s = http_cookie.new_store() assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo; Expires=Wed, 09 Jun 2021 10:18:14 GMT"))) assert.same("foo", s:get("example.com", "/", "foo")) s.time = function() return 9876543210 end -- set time to something after the expiry s:clean() assert.same(nil, s:get("example.com", "/", "foo")) end) describe(":remove()", function() it("can remove cookies by domain", function() local s = http_cookie.new_store() -- Try remove on empty store s:remove("example.com") assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=other; Path=/subpath"))) assert.truthy(s:store("other.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar"))) assert.same("foo", s:get("example.com", "/", "foo")) assert.same("other", s:get("example.com", "/subpath", "foo")) assert.same("bar", s:get("other.com", "/", "bar")) s:remove("example.com") assert.same(nil, s:get("example.com", "/", "foo")) assert.same(nil, s:get("example.com", "/subpath", "foo")) assert.same("bar", s:get("other.com", "/", "bar")) end) it("can remove cookies by path", function() local s = http_cookie.new_store() -- Try remove on empty store s:remove("example.com", "/") assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=other; Path=/subpath"))) assert.truthy(s:store("other.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar"))) assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("qux=qux"))) assert.same("foo", s:get("example.com", "/", "foo")) assert.same("other", s:get("example.com", "/subpath", "foo")) assert.same("bar", s:get("other.com", "/", "bar")) assert.same("qux", s:get("example.com", "/", "qux")) -- Remove all names under "/" path s:remove("example.com", "/") assert.same(nil, s:get("example.com", "/", "foo")) assert.same("other", s:get("example.com", "/subpath", "foo")) assert.same("bar", s:get("other.com", "/", "bar")) assert.same(nil, s:get("example.com", "/", "qux")) -- Remove last path in domain (making domain empty) s:remove("example.com", "/subpath") assert.same(nil, s:get("example.com", "/", "foo")) assert.same(nil, s:get("example.com", "/subpath", "foo")) assert.same("bar", s:get("other.com", "/", "bar")) assert.same(nil, s:get("example.com", "/", "qux")) end) it("can remove cookies by name", function() local s = http_cookie.new_store() -- Try remove on empty store s:remove("example.com", "/", "foo") assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=other; Path=/subpath"))) assert.truthy(s:store("other.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar"))) assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("qux=qux"))) assert.same("foo", s:get("example.com", "/", "foo")) assert.same("other", s:get("example.com", "/subpath", "foo")) assert.same("bar", s:get("other.com", "/", "bar")) assert.same("qux", s:get("example.com", "/", "qux")) -- Remove just one name s:remove("example.com", "/", "foo") assert.same(nil, s:get("example.com", "/", "foo")) assert.same("other", s:get("example.com", "/subpath", "foo")) assert.same("bar", s:get("other.com", "/", "bar")) assert.same("qux", s:get("example.com", "/", "qux")) -- Remove last name in path (making path empty) s:remove("example.com", "/", "qux") assert.same(nil, s:get("example.com", "/", "foo")) assert.same("other", s:get("example.com", "/subpath", "foo")) assert.same("bar", s:get("other.com", "/", "bar")) assert.same(nil, s:get("example.com", "/", "qux")) -- Remove last name in domain (making domain empty) s:remove("example.com", "/subpath", "foo") assert.same(nil, s:get("example.com", "/", "foo")) assert.same(nil, s:get("example.com", "/subpath", "foo")) assert.same("bar", s:get("other.com", "/", "bar")) assert.same(nil, s:get("example.com", "/", "qux")) end) end) describe("cookie order", function() it("returns in order for simple cookies", function() -- used as assumed base case for future tests in this section local s = http_cookie.new_store() assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=basic"))) assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=basic"))) assert.same("bar=basic; foo=basic", s:lookup("example.com", "/", true, true)) end) it("returns in order for domain differing cookies", function() -- spec doesn't care about this case local s = http_cookie.new_store() assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=domain; Domain=sub.example.com"))) assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=domain; Domain=example.com"))) assert.same("bar=domain; foo=domain", s:lookup("sub.example.com", "/", true, true)) end) it("returns in order for different length paths", function() local s = http_cookie.new_store() assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=path; Path=/path/longerpath"))) assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=path; Path=/path/"))) assert.same("foo=path; bar=path", s:lookup("example.com", "/path/longerpath", true, true)) end) it("returns in order for different creation times", function() local s = http_cookie.new_store() s.time = function() return 0 end assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=time"))) s.time = function() return 50 end assert(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=time"))) assert.same("foo=time; bar=time", s:lookup("example.com", "/path/longerpath", true, true)) end) it("returns in order when all together!", function() local s = http_cookie.new_store() assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=basic"))) assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=basic"))) assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=path; Path=/path/longerpath"))) assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=path; Path=/path/"))) -- foo=domain case would get overridden below assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=domain; Domain=example.com"))) s.time = function() return 0 end assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=time"))) s.time = function() return 50 end assert(s:store("sub.example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=time"))) assert.same("foo=path; bar=path; bar=domain; bar=time; foo=time", s:lookup("sub.example.com", "/path/longerpath", true, true)) end) end) it("can store cookies from a request+response", function() local s = http_cookie.new_store() local req_headers = http_headers.new() req_headers:append(":scheme", "http") req_headers:append(":method", "GET") req_headers:append(":path", "/") local resp_headers = http_headers.new() resp_headers:append(":status", "200") resp_headers:append("set-cookie", http_cookie.bake("foo", "FOO")) resp_headers:append("set-cookie", http_cookie.bake("bar", "BAR", 0)) assert.truthy(s:store_from_request(req_headers, resp_headers, "my.host", "my.host")) assert.same("FOO", s:get("my.host", "/", "foo")) assert.same(nil, s:get("my.host", "/", "bar")) -- Now with an :authority header req_headers:append(":authority", "my.host") resp_headers:append("set-cookie", http_cookie.bake("baz", "BAZ")) assert.truthy(s:store_from_request(req_headers, resp_headers, "my.host", "my.host")) assert.same("FOO", s:get("my.host", "/", "foo")) assert.same(nil, s:get("my.host", "/", "bar")) assert.same("BAZ", s:get("my.host", "/", "baz")) end) it("enforces store.max_cookie_length", function() local s = http_cookie.new_store() s.max_cookie_length = 3 assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) s.max_cookie_length = 8 assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=longervalue"))) end) it("enforces store.max_cookies", function() local s = http_cookie.new_store() s.max_cookies = 0 assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) s.max_cookies = 1 assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar"))) s:remove("example.com", "/", "foo") assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar"))) end) it("enforces store.max_cookies_per_domain", function() local s = http_cookie.new_store() s.max_cookies_per_domain = 0 assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) s.max_cookies_per_domain = 1 assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("foo=foo"))) assert.falsy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar"))) assert.truthy(s:store("other.com", "/", true, true, nil, http_cookie.parse_setcookie("baz=baz"))) s:remove("example.com", "/", "foo") assert.truthy(s:store("example.com", "/", true, true, nil, http_cookie.parse_setcookie("bar=bar"))) end) it("can bake cookies", function() assert.same("foo=bar", http_cookie.bake("foo", "bar")) assert.same("foo=bar; Max-Age=0", http_cookie.bake("foo", "bar", -math.huge)) assert.same("foo=bar; Expires=Thu, 01 Jan 1970 00:00:00 GMT", http_cookie.bake("foo", "bar", 0)) assert.same("foo=bar; Max-Age=0; Domain=example.com; Path=/path; Secure; HttpOnly; SameSite=Strict", http_cookie.bake("foo", "bar", -math.huge, "example.com", "/path", true, true, "strict")) assert.same("foo=bar; Max-Age=0; Domain=example.com; Path=/path; Secure; HttpOnly; SameSite=Lax", http_cookie.bake("foo", "bar", -math.huge, "example.com", "/path", true, true, "lax")) assert.has.errors(function() http_cookie.bake("foo", "bar", -math.huge, "example.com", "/path", true, true, "somethingelse") end, [[invalid value for same_site, expected "strict" or "lax"]]) end) it("can dump a netscape format cookiejar", function() local s = http_cookie.new_store() assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("foo=FOO;"))) assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("bar=BAR; HttpOnly"))) assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("baz=BAZ; Path=/someplace"))) assert(s:store("sub.example.com", "/", true, true, "sub.example.com", http_cookie.parse_setcookie("subdomain=matched; Domain=sub.example.com"))) assert(s:store("example.com", "/", true, true, "example.com", http_cookie.parse_setcookie("qux=QUX; SameSite=Lax"))) assert(s:store("other.com", "/", true, true, "other.com", http_cookie.parse_setcookie("foo=somethingelse; HttpOnly"))) local file = io.tmpfile() assert(s:save_to_file(file)) assert(file:seek("set")) -- preamble assert.truthy(assert(file:read("*l")):match"^#.*HTTP Cookie File") assert.truthy(assert(file:read("*l")):match"^#") assert.same("", assert(file:read("*l"))) local lines = {} for line in file:lines() do table.insert(lines, line) end table.sort(lines) assert.same({ "#HttpOnly_example.com TRUE / FALSE 2147483647 bar BAR"; "#HttpOnly_other.com TRUE / FALSE 2147483647 foo somethingelse"; "example.com TRUE / FALSE 2147483647 foo FOO"; "example.com TRUE / FALSE 2147483647 qux QUX"; "example.com TRUE /someplace FALSE 2147483647 baz BAZ"; "sub.example.com FALSE / FALSE 2147483647 subdomain matched"; }, lines) end) it("can load a netscape format cookiejar", function() local s = http_cookie.new_store() local file = io.tmpfile() assert(file:write([[ # Netscape HTTP Cookie File # https://curl.haxx.se/docs/http-cookies.html # This file was generated by libcurl! Edit at your own risk. #HttpOnly_other.com TRUE / FALSE 2147483647 foo somethingelse sub.example.com FALSE / FALSE 2147483647 subdomain matched example.com TRUE / TRUE 2147483647 qux QUX #HttpOnly_example.com TRUE / FALSE 2147483647 bar BAR example.com TRUE / FALSE 2147483647 foo FOO example.com TRUE /someplace FALSE 2147483647 baz BAZ ]])) assert(file:seek("set")) assert(s:load_from_file(file)) assert.same("bar=BAR; foo=FOO; qux=QUX", s:lookup("example.com", "/", true, true)) end) it("can load a netscape format cookiejar with invalid lines", function() local s = http_cookie.new_store() local file = io.tmpfile() assert(file:write([[ example.com TRUE / TRUE 2147483647 qux QUX not a valid line example.com INVALID_BOOLEAN / FALSE 2147483647 should fail example.com TRUE / INVALID_BOOLEAN 2147483647 should fail example.com TRUE / FALSE not_a_number should fail #HttpOnly_example.com TRUE / FALSE 2147483647 bar BAR example.com TRUE / FALSE 2147483647 foo FOO ]])) assert(file:seek("set")) assert(s:load_from_file(file)) assert.same("bar=BAR; foo=FOO; qux=QUX", s:lookup("example.com", "/", true, true)) end) end) lua-http-0.4/spec/h1_connection_spec.lua000066400000000000000000000467771400726324600203420ustar00rootroot00000000000000describe("low level http 1 connection operations", function() local h1_connection = require "http.h1_connection" local cqueues = require "cqueues" local ca = require "cqueues.auxlib" local cs = require "cqueues.socket" local ce = require "cqueues.errno" it("cannot construct with invalid type", function() local s, c = ca.assert(cs.pair()) assert.has.errors(function() h1_connection.new(s, nil, 1.1) end) assert.has.errors(function() h1_connection.new(s, "", 1.1) end) assert.has.errors(function() h1_connection.new(s, "invalid", 1.1) end) s:close() c:close() end) it("__tostring works", function() local s, c = ca.assert(cs.pair()) local h = h1_connection.new(c, "client", 1.1) assert.same("http.h1_connection{", tostring(h):match("^.-%{")) s:close() h:close() end) local function new_pair(version) local s, c = ca.assert(cs.pair()) s = h1_connection.new(s, "server", version) c = h1_connection.new(c, "client", version) return s, c end it(":take_socket works", function() local s, c = new_pair(1.1) local sock = s:take_socket() assert.same("socket", cs.type(sock)) -- 2nd time it should return nil assert.same(nil, s:take_socket()) sock:close() c:close() end) it(":localname and :peername work", function() do local s, c = new_pair(1.1) -- these are unnamed sockets; so 2nd return should be `nil` assert.same({cs.AF_UNIX, nil}, {s:localname()}) assert.same({cs.AF_UNIX, nil}, {s:peername()}) assert.same({cs.AF_UNIX, nil}, {c:localname()}) assert.same({cs.AF_UNIX, nil}, {c:peername()}) s:close() c:close() end do local s, c = new_pair(1.1) s:take_socket():close() -- take out socket (and discard) c:close() assert.same({nil}, {s:localname()}) assert.same({nil}, {s:peername()}) end end) -- Pending as ECONNRESET behaviour is unportable pending("persists errors (except ETIMEDOUT) until cleared", function() local s, c = new_pair(1.1) assert.same(ce.ETIMEDOUT, select(3, s:read_request_line(0))) assert(s:write_status_line(1.0, "100", "continue", TEST_TIMEOUT)) assert(s:flush(TEST_TIMEOUT)) c:close() assert.same(ce.ECONNRESET, select(3, s:read_request_line(0))) assert.same(ce.ECONNRESET, select(3, s:read_request_line(0))) s:clearerr() assert.same({nil, nil}, {s:read_request_line(0)}) s:close() end) it(":clearerr doesn't throw when socket is gone", function() local s, c = new_pair(1.1) c:close() s:take_socket():close() -- take out socket (and discard) s:clearerr() end) it("persisted errors don't leave socket as readable", function() local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite("INVALID REQUEST\r\n", "n", TEST_TIMEOUT)) local first_stream = assert(s:get_next_incoming_stream(TEST_TIMEOUT)) assert.same(ce.EILSEQ, select(3, first_stream:get_headers(TEST_TIMEOUT))) first_stream:shutdown() assert.same(ce.EILSEQ, select(3, s:get_next_incoming_stream(TEST_TIMEOUT))) assert.same(ce.EILSEQ, select(3, s:read_request_line(TEST_TIMEOUT))) s:close() c:close() end) it("request line should round trip", function() local function test(req_method, req_path, req_version) local s, c = new_pair(req_version) assert(c:write_request_line(req_method, req_path, req_version)) assert(c:flush()) local res_method, res_path, res_version = assert(s:read_request_line()) assert.same(req_method, res_method) assert.same(req_path, res_path) assert.same(req_version, res_version) s:close() c:close() end test("GET", "/", 1.1) test("POST", "/foo", 1.0) test("OPTIONS", "*", 1.1) end) it(":write_request_line parameters should be validated", function() local s, c = new_pair(1.1) assert.has.errors(function() s:write_request_line("", "/foo", 1.0) end) assert.has.errors(function() s:write_request_line("GET", "", 1.0) end) assert.has.errors(function() s:write_request_line("GET", "/", 0) end) assert.has.errors(function() s:write_request_line("GET", "/", 2) end) s:close() c:close() end) it(":read_request_line should fail on invalid request", function() local function test(chunk) local s, c = new_pair(1.1) s = s:take_socket() assert(s:xwrite(chunk, "n", TEST_TIMEOUT)) s:close() assert.same(ce.EILSEQ, select(3, c:read_request_line(TEST_TIMEOUT))) c:close() end test("GET") -- no \r\n test("\r\nGET") -- no \r\n with preceeding \r\n test("invalid request line\r\n") test(" / HTTP/1.1\r\n") test("\r\n / HTTP/1.1\r\n") test("HTTP/1.1\r\n") test("GET HTTP/1.0\r\n") test("GET HTTP/1.0\r\n") test("GET HTTP/1.0\r\n") test("GET / HTP/1.1\r\n") test("GET / HTTP 1.1\r\n") test("GET / HTTP/1\r\n") test("GET / HTTP/2.0\r\n") test("GET / HTTP/1.1\nHeader: value\r\n") -- missing \r end) it(":read_request_line should allow a leading CRLF", function() local function test(chunk) local s, c = new_pair(1.1) s = s:take_socket() assert(s:xwrite(chunk, "n")) assert(c:read_request_line()) s:close() c:close() end test("\r\nGET / HTTP/1.1\r\n") end) describe("overlong lines", function() it(":read_request_line", function() local s, c = new_pair(1.1) s = s:take_socket() assert(s:xwrite(("a"):rep(10000), "n")) assert.same(ce.EILSEQ, select(3, c:read_request_line(TEST_TIMEOUT))) s:close() c:close() end) it(":read_status_line", function() local s, c = new_pair(1.1) s = s:take_socket() assert(s:xwrite(("a"):rep(10000), "n")) assert.same(ce.EILSEQ, select(3, c:read_status_line(TEST_TIMEOUT))) s:close() c:close() end) it(":read_header", function() local s, c = new_pair(1.1) s = s:take_socket() assert(s:xwrite(("a"):rep(10000), "n")) assert.same(ce.EILSEQ, select(3, c:read_header(TEST_TIMEOUT))) s:close() c:close() end) it(":read_body_chunk", function() local s, c = new_pair(1.1) s = s:take_socket() assert(s:xwrite(("a"):rep(10000), "n")) assert.same(ce.EILSEQ, select(3, c:read_body_chunk(TEST_TIMEOUT))) s:close() c:close() end) end) it("status line should round trip", function() local function test(req_version, req_status, req_reason) local s, c = new_pair(req_version) assert(s:write_status_line(req_version, req_status, req_reason)) assert(s:flush()) local res_version, res_status, res_reason = assert(c:read_status_line()) assert.same(req_version, res_version) assert.same(req_status, res_status) assert.same(req_reason, res_reason) s:close() c:close() end test(1.1, "200", "OK") test(1.0, "404", "Not Found") test(1.1, "200", "") test(1.1, "999", "weird\1\127and wonderful\4bytes") end) it(":write_status_line parameters should be validated", function() local s, c = new_pair(1.1) assert.has.errors(function() s:write_status_line(nil, "200", "OK") end) assert.has.errors(function() s:write_status_line(0, "200", "OK") end) assert.has.errors(function() s:write_status_line(2, "200", "OK") end) assert.has.errors(function() s:write_status_line(math.huge, "200", "OK") end) assert.has.errors(function() s:write_status_line("not a number", "200", "OK") end) assert.has.errors(function() s:write_status_line(1.1, "", "OK") end) assert.has.errors(function() s:write_status_line(1.1, "1000", "OK") end) assert.has.errors(function() s:write_status_line(1.1, 200, "OK") end) assert.has.errors(function() s:write_status_line(1.1, "200", "new lines\r\n") end) s:close() c:close() end) it(":read_status_line should return EILSEQ on invalid status line", function() local function test(chunk) local s, c = new_pair(1.1) s = s:take_socket() assert(s:write(chunk, "\r\n")) assert(s:flush()) assert.same(ce.EILSEQ, select(3, c:read_status_line())) s:close() c:close() end test("invalid status line") test("HTTP/0 200 OK") test("HTTP/0.0 200 OK") test("HTTP/2.0 200 OK") test("HTTP/1 200 OK") test("HTTP/.1 200 OK") test("HTP/1.1 200 OK") test("1.1 200 OK") test(" 200 OK") test("200 OK") test("HTTP/1.1 0 OK") test("HTTP/1.1 1000 OK") test("HTTP/1.1 OK") test("HTTP/1.1 OK") test("HTTP/1.1 200") test("HTTP/1.1 200 OK\nHeader: value") -- missing \r end) it(":read_status_line should return nil on EOF", function() local s, c = new_pair(1.1) s:close() assert.same({nil, nil}, {c:read_status_line()}) c:close() end) it("headers should round trip", function() local function test(input) local s, c = new_pair(1.1) assert(c:write_request_line("GET", "/", 1.1)) for _, t in ipairs(input) do assert(c:write_header(t[1], t[2])) end assert(c:write_headers_done()) assert(s:read_request_line()) for _, t in ipairs(input) do local k, v = assert(s:read_header()) assert.same(t[1], k) assert.same(t[2], v) end assert(s:read_headers_done()) s:close() c:close() end test{} test{ {"foo", "bar"}; } test{ {"Host", "example.com"}; {"User-Agent", "some user/agent"}; {"Accept", "*/*"}; } end) it(":read_header works in exotic conditions", function() do -- trailing whitespace local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite("foo: bar \r\n\r\n", "bn")) c:close() assert.same({"foo", "bar"}, {s:read_header()}) s:close() end do -- continuation local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite("foo: bar\r\n qux\r\n\r\n", "bn")) c:close() assert.same({"foo", "bar qux"}, {s:read_header()}) s:close() end do -- not a continuation, but only partial next header local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite("foo: bar\r\npartial", "bn")) c:close() assert.same({"foo", "bar"}, {s:read_header()}) s:close() end do -- not a continuation as gets a single byte of EOH local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite("foo: bar\r\n\r", "bn")) c:close() assert.same({"foo", "bar"}, {s:read_header()}) s:close() end do -- trickle local s, c = new_pair(1.1) c = c:take_socket() local cq = cqueues.new(); cq:wrap(function() for char in ("foo: bar\r\n\r\n"):gmatch(".") do assert(c:xwrite(char, "bn")) cqueues.sleep(0.01) end end) cq:wrap(function() assert.same({"foo", "bar"}, {s:read_header()}) end) assert(cq:loop()) s:close() c:close() end end) describe(":read_header failure conditions", function() it("handles no data", function() local s, c = new_pair(1.1) c:close() assert.same({nil, nil}, {s:read_header()}) s:close() end) it("handles sudden connection close", function() local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite("foo", "bn")) c:close() assert.same(ce.EILSEQ, select(3, s:read_header())) s:close() end) it("handles sudden connection close after field name", function() local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite("foo:", "bn")) c:close() assert.same(ce.EILSEQ, select(3, s:read_header())) s:close() end) it("handles sudden connection close after :", function() local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite("foo: ba", "bn")) c:close() assert.same(ce.EILSEQ, select(3, s:read_header())) s:close() end) it("handles has carriage return but no new line", function() -- unknown if it was going to be a header continuation or not local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite("foo: bar\r", "bn")) c:close() assert.same(ce.EILSEQ, select(3, s:read_header())) s:close() end) it("handles closed after new line", function() -- unknown if it was going to be a header continuation or not local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite("foo: bar\r\n", "bn")) c:close() assert.same(ce.EILSEQ, select(3, s:read_header())) s:close() end) it("handles timeout", function() local s, c = new_pair(1.1) assert.same(ce.ETIMEDOUT, select(3, s:read_header(0.01))) s:close() c:close() end) -- Pending as ECONNRESET behaviour is unportable pending("handles connection reset", function() local s, c = new_pair(1.1) assert(s:write_body_plain("something that flushes")) c:close() assert.same({nil, "read: Connection reset by peer", ce.ECONNRESET}, {s:read_header()}) s:close() end) it("disallows whitespace before :", function() local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite("foo : bar\r\n\r\n", "bn")) c:close() assert.same(ce.EILSEQ, select(3, s:read_header())) s:close() end) it("handles no field name", function() local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite(": fs\r\n\r\n", "bn")) c:close() assert.same(ce.EILSEQ, select(3, s:read_header())) s:close() end) it("handles no colon", function() local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite("foo bar\r\n\r\n", "bn")) c:close() assert.same(ce.EILSEQ, select(3, s:read_header())) s:close() end) end) describe(":read_headers_done should handle failure conditions", function() it("no data", function() local s, c = new_pair(1.1) c:close() assert.same({nil, nil}, {s:read_headers_done()}) s:close() end) it("sudden connection close", function() local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite("\r", "bn")) c:close() assert.same(ce.EILSEQ, select(3, s:read_headers_done())) s:close() end) it("timeout", function() local s, c = new_pair(1.1) assert.same(ce.ETIMEDOUT, select(3, s:read_headers_done(0.01))) s:close() c:close() end) -- Pending as ECONNRESET behaviour is unportable pending("connection reset", function() local s, c = new_pair(1.1) assert(s:write_body_plain("something that flushes")) c:close() assert.same({nil, "read: Connection reset by peer", ce.ECONNRESET}, {s:read_headers_done()}) s:close() end) it("wrong byte", function() local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite("\0", "bn")) c:close() assert.same(ce.EILSEQ, select(3, s:read_headers_done())) s:close() end) it("wrong bytes", function() local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite("hi", "bn")) c:close() assert.same(ce.EILSEQ, select(3, s:read_headers_done())) s:close() end) end) it(":write_header accepts odd fields", function() local s, c = new_pair(1.1) assert(s:write_header("foo", "bar")) assert(s:write_header("foo", " bar")) assert(s:write_header("foo", "bar ")) assert(s:write_header("foo", "bar: stuff")) assert(s:write_header("foo", "bar, stuff")) assert(s:write_header("foo", "bar\n continuation")) assert(s:write_header("foo", "bar\r\n continuation")) assert(s:write_header("foo", "bar\r\n continuation: with colon")) c:close() s:close() end) it(":write_header rejects invalid headers", function() local s, c = new_pair(1.1) assert.has.errors(function() s:write_header() end) -- odd field names assert.has.errors(function() s:write_header(nil, "bar") end) assert.has.errors(function() s:write_header(":", "bar") end) assert.has.errors(function() s:write_header("\n", "bar") end) assert.has.errors(function() s:write_header("foo\r\n", "bar") end) assert.has.errors(function() s:write_header("f\r\noo", "bar") end) -- odd values assert.has.errors(function() s:write_header("foo") end) assert.has.errors(function() s:write_header("foo", "bar\r\n") end) assert.has.errors(function() s:write_header("foo", "bar\r\n\r\n") end) assert.has.errors(function() s:write_header("foo", "bar\nbad continuation") end) assert.has.errors(function() s:write_header("foo", "bar\r\nbad continuation") end) s:close() c:close() end) it("chunks round trip", function() local s, c = new_pair(1.1) assert(c:write_request_line("POST", "/", 1.1)) assert(c:write_header("Transfer-Encoding", "chunked")) assert(c:write_headers_done()) assert(c:write_body_chunk("this is a chunk")) assert(c:write_body_chunk("this is another chunk")) assert(c:write_body_last_chunk()) assert(c:write_headers_done()) assert(s:read_request_line()) assert(s:read_header()) assert(s:read_headers_done()) assert.same("this is a chunk", s:read_body_chunk()) assert.same("this is another chunk", s:read_body_chunk()) assert.same(false, s:read_body_chunk()) assert(s:read_headers_done()) s:close() c:close() end) it(":read_body_chunk doesn't consume input on failure", function() local s, c = new_pair(1.1) c = c:take_socket() assert(c:xwrite("6", "n")) assert.same(ce.ETIMEDOUT, select(3, s:read_body_chunk(0.01))) s:clearerr() assert(c:xwrite("\r\nfoo", "n")) assert.same(ce.ETIMEDOUT, select(3, s:read_body_chunk(0.01))) s:clearerr() assert(c:xwrite("bar\r\n", "n")) assert.same({"foobar"}, {s:read_body_chunk(0.001)}) assert(c:xwrite("0", "n")) assert.same(ce.ETIMEDOUT, select(3, s:read_body_chunk(0.01))) s:clearerr() assert(c:xwrite("\r", "n")) assert.same(ce.ETIMEDOUT, select(3, s:read_body_chunk(0.01))) s:clearerr() assert(c:xwrite("\n", "n")) assert.same({false}, {s:read_body_chunk(0.001)}) s:close() c:close() end) it(":read_body_chunk fails on invalid chunk", function() local function test(chunk, expected_errno) local s, c = new_pair(1.1) s = s:take_socket() assert(s:xwrite(chunk, "n", TEST_TIMEOUT)) s:close() local data, _, errno = c:read_body_chunk(TEST_TIMEOUT) assert.same(nil, data) assert.same(expected_errno, errno) c:close() end test("", nil) test("5", ce.EILSEQ) test("5\r", ce.EILSEQ) test("fffffffffffffff\r\n", ce.E2BIG) test("not a number\r\n", ce.EILSEQ) test("4\r\n1", ce.EILSEQ) test("4\r\nfour\n", ce.EILSEQ) test("4\r\nlonger than four", ce.EILSEQ) test("4\r\nfour\nmissing \r", ce.EILSEQ) end) it(":read_body_chunk is cqueues thread-safe", function() local s, c = new_pair(1.1) s = s:take_socket() local cq = cqueues.new() cq:wrap(function() local chunk = assert(c:read_body_chunk()) assert.same("bytes", chunk) end) cq:wrap(function() assert(s:xwrite("5\r\n", "bn")) cqueues.sleep(0.001) -- let other thread block on reading chunk body assert(s:xwrite("chars\r\n", "bn")) local chunk = assert(c:read_body_chunk()) assert.same("chars", chunk) -- send a 2nd frame assert(s:xwrite("5\r\nbytes\r\n", "bn")) s:close() end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) c:close() end) end) describe("high level http1 connection operations", function() local h1_connection = require "http.h1_connection" local ca = require "cqueues.auxlib" local ce = require "cqueues.errno" local cs = require "cqueues.socket" local function new_pair(version) local s, c = ca.assert(cs.pair()) s = h1_connection.new(s, "server", version) c = h1_connection.new(c, "client", version) return s, c end it(":shutdown('r') shouldn't shutdown streams that have been read", function() local s, c = new_pair(1.1) -- luacheck: ignore 211 assert(c:write_request_line("GET", "/", 1.0)) assert(c:write_headers_done()) assert(c:write_request_line("GET", "/", 1.0)) assert(c:write_headers_done()) local stream1 = assert(s:get_next_incoming_stream()) assert(stream1:read_headers()) local stream2 = assert(s:get_next_incoming_stream()) assert.same("idle", stream2.state) s:shutdown("r") assert.same("idle", stream2.state) s:close() c:close() end) it(":get_next_incoming_stream times out", function() local s, c = new_pair(1.1) -- luacheck: ignore 211 assert.same(ce.ETIMEDOUT, select(3, s:get_next_incoming_stream(0.05))) s:close() c:close() end) it(":get_next_incoming_stream returns nil when no data", function() local s, c = new_pair(1.1) c:close() -- perform a read operation so we note the EOF assert.same({nil, nil}, {s:read_status_line()}) -- now waiting for a stream should also return EOF assert.same({nil, nil}, {s:get_next_incoming_stream()}) s:close() end) end) lua-http-0.4/spec/h1_stream_spec.lua000066400000000000000000000411131400726324600174510ustar00rootroot00000000000000describe("http1 stream", function() local h1_connection = require "http.h1_connection" local new_headers = require "http.headers".new local cqueues = require "cqueues" local ca = require "cqueues.auxlib" local cc = require "cqueues.condition" local ce = require "cqueues.errno" local cs = require "cqueues.socket" local function new_pair(version) local s, c = ca.assert(cs.pair()) s = h1_connection.new(s, "server", version) c = h1_connection.new(c, "client", version) return s, c end it("allows resuming :read_headers", function() local server, client = new_pair(1.1) client = client:take_socket() assert(client:xwrite("GET / HTTP/1.1\r\n", "n")) local stream = server:get_next_incoming_stream() assert.same(ce.ETIMEDOUT, select(3, stream:read_headers(0.001))) assert(client:xwrite("Foo: bar\r\n", "n")) assert.same(ce.ETIMEDOUT, select(3, stream:read_headers(0.001))) assert(client:xwrite("\r\n", "n")) local h = assert(stream:read_headers(0.01)) assert.same("/", h:get(":path")) assert.same("bar", h:get("foo")) end) it("Writing to a shutdown connection returns EPIPE", function() local server, client = new_pair(1.1) local stream = client:new_stream() client:shutdown() local headers = new_headers() headers:append(":method", "GET") headers:append(":scheme", "http") headers:append(":authority", "myauthority") headers:append(":path", "/a") assert.same(ce.EPIPE, select(3, stream:write_headers(headers, true))) client:close() server:close() end) it("shutdown of an open server stream sends an automatic 503", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() local stream = client:new_stream() local req_headers = new_headers() req_headers:append(":method", "GET") req_headers:append(":scheme", "http") req_headers:append(":authority", "myauthority") req_headers:append(":path", "/a") assert(stream:write_headers(req_headers, true)) local res_headers = assert(stream:get_headers()) assert.same("503", res_headers:get(":status")) end) cq:wrap(function() local stream = server:get_next_incoming_stream() assert(stream:get_headers()) stream:shutdown() end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) server:close() client:close() end) it("shutdown of an open server stream with client protocol errors sends an automatic 400", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() assert(client:write_request_line("GET", "/", 1.1)) assert(client.socket:xwrite(":not a valid header\r\n", "bn")) local _, status_code = assert(client:read_status_line()) assert.same("400", status_code) end) cq:wrap(function() local stream = assert(server:get_next_incoming_stream()) assert.same(ce.EILSEQ, select(3, stream:get_headers())) stream:shutdown() end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) server:close() client:close() end) it(":unget returns truthy value on success", function() local server, client = new_pair(1.1) local stream = client:new_stream() assert.truthy(stream:unget("foo")) assert.same("foo", stream:get_next_chunk()) client:close() server:close() end) it("doesn't hang when :shutdown is called when waiting for headers", function() local server, client = new_pair(1.1) local stream = client:new_stream() local headers = new_headers() headers:append(":method", "GET") headers:append(":scheme", "http") headers:append(":authority", "myauthority") headers:append(":path", "/a") assert(stream:write_headers(headers, true)) local cq = cqueues.new():wrap(function() stream:shutdown() end) assert_loop(cq, 0.01) assert.truthy(cq:empty()) server:close() client:close() end) it("inserts connection: close if the connection is going to be closed afterwards", function() local server, client = new_pair(1.0) local cq = cqueues.new() cq:wrap(function() local stream = client:new_stream() local req_headers = new_headers() req_headers:append(":method", "GET") req_headers:append(":scheme", "http") req_headers:append(":authority", "myauthority") req_headers:append(":path", "/a") assert(stream:write_headers(req_headers, true)) local res_headers = assert(stream:get_headers()) assert.same("close", res_headers:get("connection")) assert.same({}, {stream:get_next_chunk()}) end) cq:wrap(function() local stream = server:get_next_incoming_stream() assert(stream:get_headers()) local res_headers = new_headers() res_headers:append(":status", "200") assert(stream:write_headers(res_headers, true)) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) server:close() client:close() end) it("returns multiple chunks on slow 'connection: close' bodies", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() local stream = client:new_stream() local req_headers = new_headers() req_headers:append(":method", "GET") req_headers:append(":scheme", "http") req_headers:append(":authority", "myauthority") req_headers:append(":path", "/a") assert(stream:write_headers(req_headers, true)) assert(stream:get_headers()) assert.same("foo", stream:get_next_chunk()) assert.same("bar", stream:get_next_chunk()) assert.same({}, {stream:get_next_chunk()}) end) cq:wrap(function() local stream = server:get_next_incoming_stream() assert(stream:get_headers()) local res_headers = new_headers() res_headers:append(":status", "200") res_headers:append("connection", "close") assert(stream:write_headers(res_headers, false)) assert(stream:write_chunk("foo", false)) cqueues.sleep(0.1) assert(stream:write_chunk("bar", true)) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) server:close() client:close() end) it("queues up trailers and returns them from :get_headers", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() local stream = client:new_stream() local headers = new_headers() headers:append(":method", "GET") headers:append(":scheme", "http") headers:append(":authority", "myauthority") headers:append(":path", "/a") headers:append("transfer-encoding", "chunked") assert(stream:write_headers(headers, false)) local trailers = new_headers() trailers:append("foo", "bar") assert(stream:write_headers(trailers, true)) end) cq:wrap(function() local stream = server:get_next_incoming_stream() assert(stream:get_headers()) assert.same("", assert(stream:get_body_as_string())) -- check remote end has completed (and hence the following :get_headers won't be reading from socket) assert.same("half closed (remote)", stream.state) local trailers = assert(stream:get_headers()) assert.same("bar", trailers:get("foo")) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) server:close() client:close() end) it("doesn't return from last get_next_chunk until trailers are read", function() local server, client = new_pair(1.1) assert(client:write_request_line("GET", "/a", client.version, TEST_TIMEOUT)) assert(client:write_header("transfer-encoding", "chunked", TEST_TIMEOUT)) assert(client:write_headers_done(TEST_TIMEOUT)) assert(client:write_body_chunk("foo", nil, TEST_TIMEOUT)) assert(client:write_body_last_chunk(nil, TEST_TIMEOUT)) assert(client:write_header("sometrailer", "bar", TEST_TIMEOUT)) assert(client:flush(TEST_TIMEOUT)) local server_stream = server:get_next_incoming_stream(0.01) assert(server_stream:get_headers(0.01)) assert.same("foo", server_stream:get_next_chunk(0.01)) -- Shouldn't return `nil` (indicating EOF) until trailers are completely read. assert.same(ce.ETIMEDOUT, select(3, server_stream:get_next_chunk(0.01))) assert.same(ce.ETIMEDOUT, select(3, server_stream:get_headers(0.01))) assert(client:write_headers_done(TEST_TIMEOUT)) assert.same({}, {server_stream:get_next_chunk(0.01)}) local trailers = assert(server_stream:get_headers(0)) assert.same("bar", trailers:get("sometrailer")) server:close() client:close() end) it("waits for trailers when :get_headers is run in a second thread", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() local stream = client:new_stream() local headers = new_headers() headers:append(":method", "GET") headers:append(":scheme", "http") headers:append(":authority", "myauthority") headers:append(":path", "/a") headers:append("transfer-encoding", "chunked") assert(stream:write_headers(headers, false)) local trailers = new_headers() trailers:append("foo", "bar") assert(stream:write_headers(trailers, true)) end) cq:wrap(function() local stream = server:get_next_incoming_stream() assert(stream:get_headers()) cqueues.running():wrap(function() local trailers = assert(stream:get_headers()) assert.same("bar", trailers:get("foo")) end) cqueues.sleep(0.1) assert.same("", assert(stream:get_body_as_string())) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) server:close() client:close() end) it("Can read content-length delimited stream", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() do local stream = client:new_stream() local headers = new_headers() headers:append(":method", "GET") headers:append(":scheme", "http") headers:append(":authority", "myauthority") headers:append(":path", "/a") headers:append("content-length", "100") assert(stream:write_headers(headers, false)) assert(stream:write_chunk(("b"):rep(100), true)) end do local stream = client:new_stream() local headers = new_headers() headers:append(":method", "GET") headers:append(":scheme", "http") headers:append(":authority", "myauthority") headers:append(":path", "/b") headers:append("content-length", "0") assert(stream:write_headers(headers, true)) end end) cq:wrap(function() do local stream = server:get_next_incoming_stream() local headers = assert(stream:read_headers()) local body = assert(stream:get_body_as_string()) assert.same(100, tonumber(headers:get("content-length"))) assert.same(100, #body) end do local stream = server:get_next_incoming_stream() local headers = assert(stream:read_headers()) local body = assert(stream:get_body_as_string()) assert.same(0, tonumber(headers:get("content-length"))) assert.same(0, #body) end end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) server:close() client:close() end) it("allows pipelining", function() local server, client = new_pair(1.1) local cq = cqueues.new() local streams = {} cq:wrap(function() local x = server:get_next_incoming_stream() local xh = assert(x:read_headers()) while x:get_next_chunk() do end streams[xh:get(":path")] = x end) cq:wrap(function() local y = server:get_next_incoming_stream() local yh = assert(y:read_headers()) while y:get_next_chunk() do end streams[yh:get(":path")] = y end) cq:wrap(function() local z = server:get_next_incoming_stream() local zh = assert(z:read_headers()) while z:get_next_chunk() do end streams[zh:get(":path")] = z end) local client_sync = cc.new() cq:wrap(function() if client_sync then client_sync:wait() end local a = client:new_stream() local ah = new_headers() ah:append(":method", "GET") ah:append(":scheme", "http") ah:append(":authority", "myauthority") ah:append(":path", "/a") assert(a:write_headers(ah, true)) end) cq:wrap(function() client_sync:signal(); client_sync = nil; local b = client:new_stream() local bh = new_headers() bh:append(":method", "POST") bh:append(":scheme", "http") bh:append(":authority", "myauthority") bh:append(":path", "/b") assert(b:write_headers(bh, false)) cqueues.sleep(0.01) assert(b:write_chunk("this is some POST data", true)) end) cq:wrap(function() local c = client:new_stream() local ch = new_headers() ch:append(":method", "GET") ch:append(":scheme", "http") ch:append(":authority", "myauthority") ch:append(":path", "/c") assert(c:write_headers(ch, true)) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) -- All requests read; now for responses -- Don't want /a to be first. local server_sync = cc.new() cq:wrap(function() if server_sync then server_sync:wait() end local h = new_headers() h:append(":status", "200") assert(streams["/a"]:write_headers(h, true)) end) cq:wrap(function() server_sync:signal(); server_sync = nil; local h = new_headers() h:append(":status", "200") assert(streams["/b"]:write_headers(h, true)) end) cq:wrap(function() if server_sync then server_sync:wait() end local h = new_headers() h:append(":status", "200") assert(streams["/c"]:write_headers(h, true)) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) server:close() client:close() end) it("modifying pipelined headers doesn't affect what's sent", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() local a = client:new_stream() local b = client:new_stream() local c = client:new_stream() do local h = new_headers() h:append(":method", "POST") h:append(":scheme", "http") h:append(":authority", "myauthority") h:append(":path", "/") h:upsert("id", "a") assert(a:write_headers(h, false)) cq:wrap(function() cq:wrap(function() cq:wrap(function() assert(a:write_chunk("a", true)) end) h:upsert("id", "c") assert(c:write_headers(h, false)) assert(c:write_chunk("c", true)) end) h:upsert("id", "b") assert(b:write_headers(h, false)) assert(b:write_chunk("b", true)) end) end do local h = assert(a:get_headers()) assert.same("a", h:get "id") end do local h = assert(b:get_headers()) assert.same("b", h:get "id") end do local h = assert(c:get_headers()) assert.same("c", h:get "id") end end) cq:wrap(function() local h = new_headers() h:append(":status", "200") local a = assert(server:get_next_incoming_stream()) assert.same("a", assert(a:get_headers()):get "id") assert.same("a", a:get_body_as_string()) cq:wrap(function() h:upsert("id", "a") assert(a:write_headers(h, true)) end) local b = assert(server:get_next_incoming_stream()) assert.same("b", assert(b:get_headers()):get "id") assert.same("b", b:get_body_as_string()) h:upsert("id", "b") assert(b:write_headers(h, true)) local c = assert(server:get_next_incoming_stream()) assert.same("c", assert(c:get_headers()):get "id") assert.same("c", c:get_body_as_string()) assert(c:get_headers()) h:upsert("id", "c") assert(c:write_headers(h, true)) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) server:close() client:close() end) it("allows 100 continue", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() local a = client:new_stream() local h = new_headers() h:append(":method", "POST") h:append(":scheme", "http") h:append(":authority", "myauthority") h:append(":path", "/a") h:append("expect", "100-continue") assert(a:write_headers(h, false)) assert(assert(a:get_headers()):get(":status") == "100") assert(a:write_chunk("body", true)) assert(assert(a:get_headers()):get(":status") == "200") assert(a:get_next_chunk() == "done") assert.same({}, {a:get_next_chunk()}) end) cq:wrap(function() local b = assert(server:get_next_incoming_stream()) assert(b:get_headers()) assert(b:write_continue()) assert(b:get_next_chunk() == "body") assert.same({}, {b:get_next_chunk()}) local h = new_headers() h:append(":status", "200") assert(b:write_headers(h, false)) assert(b:write_chunk("done", true)) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) server:close() client:close() end) it("doesn't allow sending body before headers", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() local a = client:new_stream() local h = new_headers() h:append(":method", "GET") h:append(":scheme", "http") h:append(":authority", "myauthority") h:append(":path", "/") assert(a:write_headers(h, true)) end) cq:wrap(function() local b = assert(server:get_next_incoming_stream()) b.use_zlib = false assert(b:get_headers()) assert.has.errors(function() b:write_chunk("", true) end) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) server:close() client:close() end) end) lua-http-0.4/spec/h2_connection_spec.lua000066400000000000000000000213461400726324600203240ustar00rootroot00000000000000describe("http2 connection", function() local h2_connection = require "http.h2_connection" local new_headers = require "http.headers".new local cqueues = require "cqueues" local ca = require "cqueues.auxlib" local cc = require "cqueues.condition" local ce = require "cqueues.errno" local cs = require "cqueues.socket" local function new_pair() local s, c = ca.assert(cs.pair()) s = assert(h2_connection.new(s, "server")) c = assert(h2_connection.new(c, "client")) return s, c end it("has a pretty __tostring", function() do local s, c = new_pair() local stream = c:new_stream() assert.same("http.h2_stream{", tostring(stream):match("^.-%{")) assert.same("http.h2_connection{", tostring(c):match("^.-%{")) c:close() s:close() end do -- Start an actual connection so that the tostring shows dependant streams local s, c = new_pair() local stream = c:new_stream() assert.same("http.h2_stream{", tostring(stream):match("^.-%{")) assert.same("http.h2_connection{", tostring(c):match("^.-%{")) stream:shutdown() assert(c:close()) assert(s:close()) end end) it("Rejects invalid #preface", function() local function test_preface(text) local s, c = ca.assert(cs.pair()) local cq = cqueues.new() cq:wrap(function() s = assert(h2_connection.new(s, "server")) local ok, err = s:step() assert.same(nil, ok) assert.same("invalid connection preface. not an http2 client?", err.message) end) cq:wrap(function() assert(c:xwrite(text, "n")) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) c:close() s:close() end test_preface("invalid preface") test_preface("PRI * HTTP/2.0\r\n\r\nSM\r\n\r") -- missing last \n test_preface(("long string"):rep(1000)) end) it("Doesn't busy-loop looking for #preface", function() local s, c = ca.assert(cs.pair()) s = assert(h2_connection.new(s, "server")) assert(s:step(0)) assert.not_same(0, (s:timeout())) c:close() s:close() end) it("read_http2_frame fails with EILSEQ on corrupt frame", function() local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143 local s, c = ca.assert(cs.pair()) local cq = cqueues.new() cq:wrap(function() c = assert(h2_connection.new(c, "client")) assert.same(ce.EILSEQ, select(3, c:read_http2_frame())) c:close() end) cq:wrap(function() assert(s:xwrite(spack(">I3 B B I4", 100, 0x6, 0, 0), "bf")) assert(s:xwrite("not 100 bytes", "bn")) s:close() end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("read_http2_frame is cqueues thread-safe", function() local spack = string.pack or require "compat53.string".pack -- luacheck: ignore 143 local s, c = ca.assert(cs.pair()) c = assert(h2_connection.new(c, "client")) local cq = cqueues.new() cq:wrap(function() local typ, flags, id, payload = assert(c:read_http2_frame()) assert.same(0, typ) assert.same(0, flags) assert.same(0, id) assert.same("ninebytes", payload) end) cq:wrap(function() local frame_header = spack(">I3 B B I4", 9, 0, 0, 0) assert(s:xwrite(frame_header .. "nine", "bn")) cqueues.sleep(0.001) -- let other thread block on reading frame body assert(s:xwrite("chars", "bn")) local typ, flags, id, payload = assert(c:read_http2_frame()) assert.same(0, typ) assert.same(0, flags) assert.same(0, id) assert.same("ninechars", payload) -- send a 2nd frame assert(s:xwrite(frame_header .. "ninebytes", "bn")) s:close() end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) c:close() end) it("Can #ping back and forth", function() local s, c = new_pair() local cq = cqueues.new() cq:wrap(function() cq:wrap(function() for _=1, 10 do assert(c:ping()) end assert(c:shutdown()) end) assert_loop(c) assert(c:close()) end) cq:wrap(function() cq:wrap(function() assert(s:ping()) end) assert_loop(s) assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("Can #ping without a driving loop", function() local s, c = new_pair() local cq = cqueues.new() cq:wrap(function() for _=1, 10 do assert(c:ping()) end assert(c:close()) end) cq:wrap(function() assert_loop(s) assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("streams used out of order", function() local s, c = new_pair() local cq = cqueues.new() cq:wrap(function() local client_stream1 = c:new_stream() local client_stream2 = c:new_stream() local req_headers = new_headers() req_headers:append(":method", "GET") req_headers:append(":scheme", "http") req_headers:append(":path", "/2") assert(client_stream2:write_headers(req_headers, true)) req_headers:upsert(":path", "/1") assert(client_stream1:write_headers(req_headers, true)) assert(c:close()) end) cq:wrap(function() for i=1, 2 do local stream = assert(s:get_next_incoming_stream()) local headers = assert(stream:get_headers()) assert(string.format("/%d", i), headers:get(":path")) end assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("waits for peer flow #credits", function() local s, c = new_pair() local cq = cqueues.new() local client_stream cq:wrap(function() client_stream = c:new_stream() local req_headers = new_headers() req_headers:append(":method", "GET") req_headers:append(":scheme", "http") req_headers:append(":path", "/") assert(client_stream:write_headers(req_headers, false)) local ok, cond = 0, cc.new() cq:wrap(function() ok = ok + 1 if ok == 2 then cond:signal() end assert(c.peer_flow_credits_change:wait(TEST_TIMEOUT/2), "no connection credits") end) cq:wrap(function() ok = ok + 1 if ok == 2 then cond:signal() end assert(client_stream.peer_flow_credits_change:wait(TEST_TIMEOUT/2), "no stream credits") end) cond:wait() -- wait for above threads to get scheduled assert(client_stream:write_chunk(("really long string"):rep(1e4), true)) assert_loop(c) assert(c:close()) end) local len = 0 cq:wrap(function() local stream = assert(s:get_next_incoming_stream()) while true do local chunk, err = stream:get_next_chunk() if chunk == nil then if err == nil then break else error(err) end end len = len + #chunk end assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) assert.same(client_stream.stats_sent, len) end) describe("priority", function() it("allows sending priority frames", function() local s, c = new_pair() local cq = cqueues.new() cq:wrap(function() local parent_stream = c:new_stream() assert(parent_stream:write_priority_frame(false, 0, 201)) parent_stream:shutdown() assert(c:close()) end) cq:wrap(function() local stream = assert(s:get_next_incoming_stream()) assert.same(201, stream.weight) stream:shutdown() assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("sets default priority for streams with missing parent", function() local cq = cqueues.new() local s, c = new_pair() cq:wrap(function() local client_stream = c:new_stream() local req_headers = new_headers() req_headers:append(":method", "GET") req_headers:append(":scheme", "http") req_headers:append(":path", "/") -- Encode HEADER payload and send with dependency on missing stream c.encoding_context:encode_headers(req_headers) local payload = c.encoding_context:render_data() c.encoding_context:clear_data() assert(client_stream:write_headers_frame(payload, true, true, nil, nil, 99, 99)) client_stream:shutdown() assert(c:close()) end) cq:wrap(function() local stream = assert(s:get_next_incoming_stream()) -- Check if set to default priority instead of missing parent assert.is_not.same(stream.weight, 99) stream:shutdown() assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) end) describe("settings", function() it("correctly handles odd frame sizes", function() local s, c = ca.assert(cs.pair()) -- should error if < 16384 assert.has.errors(function() h2_connection.new(c, "client", {[0x5]=1}, TEST_TIMEOUT) end) assert.has.errors(function() h2_connection.new(c, "client", {[0x5]=16383}, TEST_TIMEOUT) end) -- should error if > 2^24 assert.has.errors(function() h2_connection.new(c, "client", {[0x5]=2^24}, TEST_TIMEOUT) end) assert.has.errors(function() h2_connection.new(c, "client", {[0x5]=2^32}, TEST_TIMEOUT) end) assert.has.errors(function() h2_connection.new(c, "client", {[0x5]=math.huge}, TEST_TIMEOUT) end) s:close() c:close() end) end) end) lua-http-0.4/spec/h2_error_spec.lua000066400000000000000000000030701400726324600173100ustar00rootroot00000000000000describe("", function() local h2_error = require "http.h2_error" it("has the registered errors", function() for i=0, 0xd do -- indexed by code assert.same(i, h2_error.errors[i].code) -- and indexed by name assert.same(h2_error.errors[i], h2_error.errors[h2_error.errors[i].name]) end end) it("has a nice tostring", function() local e = h2_error.errors[0]:new{ message = "oops"; traceback = "some traceback"; } assert.same("NO_ERROR(0x0): Graceful shutdown: oops\nsome traceback", tostring(e)) end) it("`is` function works", function() assert.truthy(h2_error.is(h2_error.errors[0])) assert.falsy(h2_error.is({})) assert.falsy(h2_error.is("string")) assert.falsy(h2_error.is(1)) assert.falsy(h2_error.is(coroutine.create(function()end))) assert.falsy(h2_error.is(io.stdin)) end) it("throws errors when called", function() assert.has.errors(function() h2_error.errors[0]("oops", false, 0) end, { name = "NO_ERROR"; code = 0; description = "Graceful shutdown"; message = "oops"; stream_error = false; }) end) it("adds a traceback field", function() local ok, err = pcall(h2_error.errors[0]) assert.falsy(ok) assert.truthy(err.traceback) end) it(":assert works", function() assert.falsy(pcall(h2_error.errors[0].assert, h2_error.errors[0], false)) assert.truthy(pcall(h2_error.errors[0].assert, h2_error.errors[0], true)) end) it(":assert adds a traceback field", function() local ok, err = pcall(h2_error.errors[0].assert, h2_error.errors[0], false) assert.falsy(ok) assert.truthy(err.traceback) end) end) lua-http-0.4/spec/h2_stream_spec.lua000066400000000000000000000223541400726324600174600ustar00rootroot00000000000000describe("http.h2_stream", function() local h2_connection = require "http.h2_connection" local h2_error = require "http.h2_error" local new_headers = require "http.headers".new local cqueues = require "cqueues" local ca = require "cqueues.auxlib" local cs = require "cqueues.socket" local function new_pair() local s, c = ca.assert(cs.pair()) s = assert(h2_connection.new(s, "server")) c = assert(h2_connection.new(c, "client")) return s, c end it("rejects header fields with uppercase characters", function() local s, c = new_pair() local client_stream = c:new_stream() local req_headers = new_headers() req_headers:append(":method", "GET") req_headers:append(":scheme", "http") req_headers:append(":path", "/") req_headers:append("Foo", "bar") assert.has.errors(function() client_stream:write_headers(req_headers, false, 0) end) c:close() s:close() end) it("breaks up a large header block into continuation frames", function() local s, c = new_pair() local cq = cqueues.new() local req_headers = new_headers() req_headers:append(":method", "GET") req_headers:append(":scheme", "http") req_headers:append(":path", "/") req_headers:append("unknown", ("a"):rep(16384*3)) -- at least 3 frames worth cq:wrap(function() local client_stream = c:new_stream() assert(client_stream:write_headers(req_headers, true)) assert(c:close()) end) cq:wrap(function() local stream = assert(s:get_next_incoming_stream()) local response_headers = assert(stream:get_headers()) assert.same(req_headers, response_headers) assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("can send a body", function() local s, c = new_pair() local cq = cqueues.new() cq:wrap(function() local client_stream = c:new_stream() local req_headers = new_headers() req_headers:append(":method", "GET") req_headers:append(":scheme", "http") req_headers:append(":path", "/") -- use non-integer timeouts to catch errors with integer vs number assert(client_stream:write_headers(req_headers, false, 1.1)) assert(client_stream:write_chunk("some body", false, 1.1)) assert(client_stream:write_chunk("more body", true, 1.1)) assert(c:close()) end) cq:wrap(function() local stream = assert(s:get_next_incoming_stream()) local body = assert(stream:get_body_as_string(1.1)) assert.same("some bodymore body", body) assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("errors if content-length is exceeded", function() local s, c = new_pair() local cq = cqueues.new() cq:wrap(function() local client_stream = c:new_stream() local req_headers = new_headers() req_headers:append(":method", "GET") req_headers:append(":scheme", "http") req_headers:append(":path", "/") req_headers:append("content-length", "2") assert(client_stream:write_headers(req_headers, false)) assert(client_stream:write_chunk("body longer than 2 bytes", true)) end) cq:wrap(function() local stream = assert(s:get_next_incoming_stream()) local ok, err = stream:get_body_as_string() assert.falsy(ok) assert.truthy(h2_error.is(err)) assert.same(h2_error.errors.PROTOCOL_ERROR.code, err.code) assert.same("content-length exceeded", err.message) assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) c:close() end) describe("correct state transitions", function() it("closes a stream when writing headers to a half-closed stream", function() local s, c = new_pair() local cq = cqueues.new() cq:wrap(function() local client_stream = c:new_stream() local req_headers = new_headers() req_headers:append(":method", "GET") req_headers:append(":scheme", "http") req_headers:append(":path", "/") req_headers:append(":authority", "example.com") assert(client_stream:write_headers(req_headers, false)) assert(client_stream:get_headers()) assert(c:close()) end) cq:wrap(function() local stream = assert(s:get_next_incoming_stream()) assert(stream:get_headers()) local res_headers = new_headers() res_headers:append(":status", "200") assert(stream:write_headers(res_headers, true)) assert("closed", stream.state) assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("ignores delayed RST_STREAM on already closed stream", function() local s, c = new_pair() local cq = cqueues.new() cq:wrap(function() local client_stream = c:new_stream() local req_headers = new_headers() req_headers:append(":method", "GET") req_headers:append(":scheme", "http") req_headers:append(":path", "/") req_headers:append(":authority", "example.com") assert(client_stream:write_headers(req_headers, true)) assert(client_stream:get_headers()) assert("closed", client_stream.state) -- both sides now have stream in closed state -- send server a RST_STREAM: it should get ignored assert(client_stream:rst_stream("post-closed rst_stream")) assert(c:close()) end) cq:wrap(function() local stream = assert(s:get_next_incoming_stream()) assert(stream:get_headers()) local res_headers = new_headers() res_headers:append(":status", "200") assert(stream:write_headers(res_headers, true)) -- both sides now have stream in closed state assert("closed", stream.state) -- process incoming frames until EOF (i.e. drain RST_STREAM) -- the RST_STREAM frame should be ignored. assert(s:loop()) assert(s:close()) end) cq:wrap(function() assert(s:loop()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) end) describe("push_promise", function() it("permits a simple push promise from server => client", function() local s, c = new_pair() local cq = cqueues.new() cq:wrap(function() local client_stream = c:new_stream() local req_headers = new_headers() req_headers:append(":method", "GET") req_headers:append(":scheme", "http") req_headers:append(":path", "/") req_headers:append(":authority", "example.com") assert(client_stream:write_headers(req_headers, true)) local pushed_stream = assert(c:get_next_incoming_stream()) do local h = assert(pushed_stream:get_headers()) assert.same("GET", h:get(":method")) assert.same("http", h:get(":scheme")) assert.same("/foo", h:get(":path")) assert.same(req_headers:get(":authority"), h:get(":authority")) assert.same(nil, pushed_stream:get_next_chunk()) end assert(c:close()) end) cq:wrap(function() local stream = assert(s:get_next_incoming_stream()) do local h = assert(stream:get_headers()) assert.same("GET", h:get(":method")) assert.same("http", h:get(":scheme")) assert.same("/", h:get(":path")) assert.same("example.com", h:get(":authority")) assert.same(nil, stream:get_next_chunk()) end local pushed_stream do local req_headers = new_headers() req_headers:append(":method", "GET") req_headers:append(":scheme", "http") req_headers:append(":path", "/foo") req_headers:append(":authority", "example.com") pushed_stream = assert(stream:push_promise(req_headers)) end do local req_headers = new_headers() req_headers:append(":status", "200") assert(pushed_stream:write_headers(req_headers, true)) end assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("handles large header blocks", function() local s, c = new_pair() local cq = cqueues.new() cq:wrap(function() local client_stream = c:new_stream() local req_headers = new_headers() req_headers:append(":method", "GET") req_headers:append(":scheme", "http") req_headers:append(":path", "/") req_headers:append(":authority", "example.com") assert(client_stream:write_headers(req_headers, true)) local pushed_stream = assert(c:get_next_incoming_stream()) do local h = assert(pushed_stream:get_headers()) assert.same("GET", h:get(":method")) assert.same("http", h:get(":scheme")) assert.same("/foo", h:get(":path")) assert.same(req_headers:get(":authority"), h:get(":authority")) assert.same(nil, pushed_stream:get_next_chunk()) end assert(c:close()) end) cq:wrap(function() local stream = assert(s:get_next_incoming_stream()) do local h = assert(stream:get_headers()) assert.same("GET", h:get(":method")) assert.same("http", h:get(":scheme")) assert.same("/", h:get(":path")) assert.same("example.com", h:get(":authority")) assert.same(nil, stream:get_next_chunk()) end local pushed_stream do local req_headers = new_headers() req_headers:append(":method", "GET") req_headers:append(":scheme", "http") req_headers:append(":path", "/foo") req_headers:append(":authority", "example.com") req_headers:append("unknown", ("a"):rep(16384*3)) -- at least 3 frames worth pushed_stream = assert(stream:push_promise(req_headers)) end do local req_headers = new_headers() req_headers:append(":status", "200") assert(pushed_stream:write_headers(req_headers, true)) end assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) end) end) lua-http-0.4/spec/headers_spec.lua000066400000000000000000000102201400726324600171740ustar00rootroot00000000000000describe("http.headers module", function() local headers = require "http.headers" it("__tostring works", function() local h = headers.new() assert.same("http.headers{", tostring(h):match("^.-%{")) end) it("multiple values can be added for same key", function() local h = headers.new() h:append("a", "a", false) h:append("a", "b", false) h:append("foo", "bar", true) h:append("a", "c", false) h:append("a", "a", true) local iter, state = h:each() assert.same({"a", "a", false}, {iter(state)}) assert.same({"a", "b", false}, {iter(state)}) assert.same({"foo", "bar", true}, {iter(state)}) assert.same({"a", "c", false}, {iter(state)}) assert.same({"a", "a", true}, {iter(state)}) end) it("entries are kept in order", function() local h = headers.new() h:append("a", "a", false) h:append("b", "b", true) h:append("c", "c", false) h:append("d", "d", true) h:append("d", "d", true) -- twice h:append("e", "e", false) local iter, state = h:each() assert.same({"a", "a", false}, {iter(state)}) assert.same({"b", "b", true}, {iter(state)}) assert.same({"c", "c", false}, {iter(state)}) assert.same({"d", "d", true}, {iter(state)}) assert.same({"d", "d", true}, {iter(state)}) assert.same({"e", "e", false}, {iter(state)}) end) it(":clone works", function() local h = headers.new() h:append("a", "a", false) h:append("b", "b", true) h:append("c", "c", false) local j = h:clone() assert.same(h, j) end) it(":has works", function() local h = headers.new() assert.same(h:has("a"), false) h:append("a", "a") assert.same(h:has("a"), true) assert.same(h:has("b"), false) end) it(":delete works", function() local h = headers.new() assert.falsy(h:delete("a")) h:append("a", "a") assert.truthy(h:has("a")) assert.truthy(h:delete("a")) assert.falsy(h:has("a")) assert.falsy(h:delete("a")) end) it(":get_comma_separated works", function() local h = headers.new() assert.same(nil, h:get_comma_separated("a")) h:append("a", "a") h:append("a", "b") h:append("a", "c") assert.same("a,b,c", h:get_comma_separated("a")) end) it(":modifyi works", function() local h = headers.new() h:append("key", "val") assert.same("val", h:get("key")) h:modifyi(1, "val") assert.same("val", h:get("key")) h:modifyi(1, "val2") assert.same("val2", h:get("key")) assert.has.errors(function() h:modifyi(2, "anything") end) end) it(":upsert works", function() local h = headers.new() h:append("a", "a", false) h:append("b", "b", true) h:append("c", "c", false) assert.same(3, h:len()) h:upsert("b", "foo", false) assert.same(3, h:len()) assert.same("foo", h:get("b")) h:upsert("d", "d", false) assert.same(4, h:len()) local iter, state = h:each() assert.same({"a", "a", false}, {iter(state)}) assert.same({"b", "foo", false}, {iter(state)}) assert.same({"c", "c", false}, {iter(state)}) assert.same({"d", "d", false}, {iter(state)}) end) it(":upsert fails on multi-valued field", function() local h = headers.new() h:append("a", "a") h:append("a", "b") assert.has.errors(function() h:upsert("a", "something else") end) end) it("never_index defaults to sensible boolean", function() local h = headers.new() h:append("content-type", "application/json") h:append("authorization", "supersecret") assert.same({"content-type", "application/json", false}, {h:geti(1)}) assert.same({"authorization", "supersecret", true}, {h:geti(2)}) h:upsert("authorization", "different secret") assert.same({"authorization", "different secret", true}, {h:geti(2)}) end) it(":sort works", function() -- should sort first by field name (':' first), then value, then never_index local h = headers.new() h:append("z", "1") h:append("b", "3") h:append("z", "2") h:append(":special", "!") h:append("a", "5") h:append("z", "6", true) for _=1, 2 do -- do twice to ensure consistency h:sort() assert.same({":special", "!", false}, {h:geti(1)}) assert.same({"a", "5", false}, {h:geti(2)}) assert.same({"b", "3", false}, {h:geti(3)}) assert.same({"z", "1", false}, {h:geti(4)}) assert.same({"z", "2", false}, {h:geti(5)}) assert.same({"z", "6", true }, {h:geti(6)}) end end) end) lua-http-0.4/spec/helper.lua000066400000000000000000000015361400726324600160400ustar00rootroot00000000000000TEST_TIMEOUT = 10 function assert_loop(cq, timeout) local ok, err, _, thd = cq:loop(timeout) if not ok then if thd then err = debug.traceback(thd, err) end error(err, 2) end end -- Solves https://github.com/keplerproject/luacov/issues/38 local cqueues = require "cqueues" local has_luacov, luacov_runner = pcall(require, "luacov.runner") if has_luacov then local wrap; wrap = cqueues.interpose("wrap", function(self, func, ...) func = luacov_runner.with_luacov(func) return wrap(self, func, ...) end) end -- Allow tests to pick up configured locale local locale = os.getenv("LOCALE") if locale then os.setlocale(locale) if locale ~= os.setlocale(locale) then print("Locale " .. locale .. " is not available.") os.exit(1) -- busted doesn't fail if helper script throws errors: https://github.com/Olivine-Labs/busted/issues/549 end end lua-http-0.4/spec/hpack_spec.lua000066400000000000000000000247741400726324600166720ustar00rootroot00000000000000describe("Correctly implements all examples in spec.", function() local hpack = require "http.hpack" local new_headers = require "http.headers".new local function xxd_escape(s) return (s :gsub(".", function(c) return string.format("%02x", c:byte(1,1)) end) :gsub("....", "%0 ") :gsub(" $", "") :gsub("(.......................................) ", "%1\n") ) end local function xxd_unescape(s) return (s :gsub("[^%x]+", "") :gsub("%x%x", function(c) return string.char(tonumber(c, 16)) end) ) end it("Example C.1.1", function() assert.same("0a", xxd_escape(hpack.encode_integer(10, 5, 0))) assert.same(10, (hpack.decode_integer(xxd_unescape("0a"), 5))) end) it("Example C.1.2", function() assert.same("1f9a 0a", xxd_escape(hpack.encode_integer(1337, 5, 0))) assert.same(1337, (hpack.decode_integer(xxd_unescape("1f9a 0a"), 5))) end) it("Example C.1.3", function() assert.same("2a", xxd_escape(hpack.encode_integer(42, 8, 0))) assert.same(42, (hpack.decode_integer(xxd_unescape("2a"), 8))) end) it("Example C.2.1", function() local encoded = hpack.encode_literal_header_indexed_new("custom-key", "custom-header") assert.same("@\10custom-key\13custom-header", encoded) local h = new_headers() h:append("custom-key", "custom-header", false) assert.same(h, hpack.new():decode_headers(encoded)) end) it("Example C.2.2", function() local encoded = hpack.encode_literal_header_none(4, "/sample/path") assert.same("\04\12/sample/path", encoded) local h = new_headers() h:append(":path", "/sample/path", false) assert.same(h, hpack.new():decode_headers(encoded)) end) it("Example C.2.3", function() local encoded = hpack.encode_literal_header_never_new("password", "secret") assert.same("\16\8password\6secret", encoded) local h = new_headers() h:append("password", "secret", true) assert.same(h, hpack.new():decode_headers(encoded)) end) it("Example C.2.4", function() local encoded = hpack.encode_indexed_header(2) assert.same("\130", encoded) local h = new_headers() h:append(":method", "GET", false) assert.same(h, hpack.new():decode_headers(encoded)) end) local function check_request(enc_ctx, dec_ctx, headers, dyn_table, xxd_req) for _, v in ipairs(headers) do enc_ctx:add_header_indexed(v[1], v[2], v[3]) end assert.same(dyn_table, enc_ctx:dynamic_table_tostring()) local raw = enc_ctx:render_data() assert.same(xxd_req, xxd_escape(raw)) enc_ctx:clear_data() local decoded = dec_ctx:decode_headers(raw) assert.same(dyn_table, dec_ctx:dynamic_table_tostring()) for i, input in ipairs(headers) do local name, val = decoded:geti(i) assert.same(input[1], name) assert.same(input[2], val) end end it("Example C.3", function() local enc_ctx = hpack.new(math.huge) local dec_ctx = hpack.new(math.huge) -- C.3.1 check_request(enc_ctx, dec_ctx, { { ":method", "GET", false }; { ":scheme", "http", false }; { ":path", "/", false }; { ":authority", "www.example.com", false }; }, [[ [ 1] (s = 57) :authority: www.example.com Table size: 57]], [[ 8286 8441 0f77 7777 2e65 7861 6d70 6c65 2e63 6f6d]]) -- C.3.2 check_request(enc_ctx, dec_ctx, { { ":method", "GET", false }; { ":scheme", "http", false }; { ":path", "/", false }; { ":authority", "www.example.com", false }; { "cache-control", "no-cache", false }; }, [[ [ 1] (s = 53) cache-control: no-cache [ 2] (s = 57) :authority: www.example.com Table size: 110]], [[ 8286 84be 5808 6e6f 2d63 6163 6865]]) -- C.3.3 check_request(enc_ctx, dec_ctx, { { ":method", "GET", false }; { ":scheme", "https", false }; { ":path", "/index.html", false }; { ":authority", "www.example.com", false }; { "custom-key", "custom-value", false }; }, [[ [ 1] (s = 54) custom-key: custom-value [ 2] (s = 53) cache-control: no-cache [ 3] (s = 57) :authority: www.example.com Table size: 164]], [[ 8287 85bf 400a 6375 7374 6f6d 2d6b 6579 0c63 7573 746f 6d2d 7661 6c75 65]]) end) it("Example C.4 #huffman", function() local enc_ctx = hpack.new(math.huge) local dec_ctx = hpack.new(math.huge) -- C.4.1 check_request(enc_ctx, dec_ctx, { { ":method", "GET", true }; { ":scheme", "http", true }; { ":path", "/", true }; { ":authority", "www.example.com", true }; }, [[ [ 1] (s = 57) :authority: www.example.com Table size: 57]], [[ 8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4 ff]]) -- C.4.2 check_request(enc_ctx, dec_ctx, { { ":method", "GET", true }; { ":scheme", "http", true }; { ":path", "/", true }; { ":authority", "www.example.com", true }; { "cache-control", "no-cache", true }; }, [[ [ 1] (s = 53) cache-control: no-cache [ 2] (s = 57) :authority: www.example.com Table size: 110]], [[ 8286 84be 5886 a8eb 1064 9cbf]]) -- C.4.3 check_request(enc_ctx, dec_ctx, { { ":method", "GET", true }; { ":scheme", "https", true }; { ":path", "/index.html", true }; { ":authority", "www.example.com", true }; { "custom-key", "custom-value", true }; }, [[ [ 1] (s = 54) custom-key: custom-value [ 2] (s = 53) cache-control: no-cache [ 3] (s = 57) :authority: www.example.com Table size: 164]], [[ 8287 85bf 4088 25a8 49e9 5ba9 7d7f 8925 a849 e95b b8e8 b4bf]]) end) it("Example C.5", function() local enc_ctx = hpack.new(256) local dec_ctx = hpack.new(256) -- C.5.1 check_request(enc_ctx, dec_ctx, { { ":status", "302", false }; { "cache-control", "private", false }; { "date", "Mon, 21 Oct 2013 20:13:21 GMT", false }; { "location", "https://www.example.com", false }; }, [[ [ 1] (s = 63) location: https://www.example.com [ 2] (s = 65) date: Mon, 21 Oct 2013 20:13:21 GMT [ 3] (s = 52) cache-control: private [ 4] (s = 42) :status: 302 Table size: 222]], [[ 4803 3330 3258 0770 7269 7661 7465 611d 4d6f 6e2c 2032 3120 4f63 7420 3230 3133 2032 303a 3133 3a32 3120 474d 546e 1768 7474 7073 3a2f 2f77 7777 2e65 7861 6d70 6c65 2e63 6f6d]]) -- C.5.2 check_request(enc_ctx, dec_ctx, { { ":status", "307", false }; { "cache-control", "private", false }; { "date", "Mon, 21 Oct 2013 20:13:21 GMT", false }; { "location", "https://www.example.com", false }; }, [[ [ 1] (s = 42) :status: 307 [ 2] (s = 63) location: https://www.example.com [ 3] (s = 65) date: Mon, 21 Oct 2013 20:13:21 GMT [ 4] (s = 52) cache-control: private Table size: 222]], [[ 4803 3330 37c1 c0bf]]) -- C.5.3 check_request(enc_ctx, dec_ctx, { { ":status", "200", false }; { "cache-control", "private", false }; { "date", "Mon, 21 Oct 2013 20:13:22 GMT", false }; { "location", "https://www.example.com", false }; { "content-encoding", "gzip", false }; { "set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", false }; }, [[ [ 1] (s = 98) set-cookie: foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age\ =3600; version=1 [ 2] (s = 52) content-encoding: gzip [ 3] (s = 65) date: Mon, 21 Oct 2013 20:13:22 GMT Table size: 215]], [[ 88c1 611d 4d6f 6e2c 2032 3120 4f63 7420 3230 3133 2032 303a 3133 3a32 3220 474d 54c0 5a04 677a 6970 7738 666f 6f3d 4153 444a 4b48 514b 425a 584f 5157 454f 5049 5541 5851 5745 4f49 553b 206d 6178 2d61 6765 3d33 3630 303b 2076 6572 7369 6f6e 3d31]]) end) it("Example C.6 #huffman", function() local enc_ctx = hpack.new(256) local dec_ctx = hpack.new(256) -- C.6.1 check_request(enc_ctx, dec_ctx, { { ":status", "302", true }; { "cache-control", "private", true }; { "date", "Mon, 21 Oct 2013 20:13:21 GMT", true }; { "location", "https://www.example.com", true }; }, [[ [ 1] (s = 63) location: https://www.example.com [ 2] (s = 65) date: Mon, 21 Oct 2013 20:13:21 GMT [ 3] (s = 52) cache-control: private [ 4] (s = 42) :status: 302 Table size: 222]], [[ 4882 6402 5885 aec3 771a 4b61 96d0 7abe 9410 54d4 44a8 2005 9504 0b81 66e0 82a6 2d1b ff6e 919d 29ad 1718 63c7 8f0b 97c8 e9ae 82ae 43d3]]) -- C.6.2 check_request(enc_ctx, dec_ctx, { { ":status", "307", true }; { "cache-control", "private", true }; { "date", "Mon, 21 Oct 2013 20:13:21 GMT", true }; { "location", "https://www.example.com", true }; }, [[ [ 1] (s = 42) :status: 307 [ 2] (s = 63) location: https://www.example.com [ 3] (s = 65) date: Mon, 21 Oct 2013 20:13:21 GMT [ 4] (s = 52) cache-control: private Table size: 222]], [[ 4883 640e ffc1 c0bf]]) -- C.6.3 check_request(enc_ctx, dec_ctx, { { ":status", "200", true }; { "cache-control", "private", true }; { "date", "Mon, 21 Oct 2013 20:13:22 GMT", true }; { "location", "https://www.example.com", true }; { "content-encoding", "gzip", true }; { "set-cookie", "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", true }; }, [[ [ 1] (s = 98) set-cookie: foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age\ =3600; version=1 [ 2] (s = 52) content-encoding: gzip [ 3] (s = 65) date: Mon, 21 Oct 2013 20:13:22 GMT Table size: 215]], [[ 88c1 6196 d07a be94 1054 d444 a820 0595 040b 8166 e084 a62d 1bff c05a 839b d9ab 77ad 94e7 821d d7f2 e6c7 b335 dfdf cd5b 3960 d5af 2708 7f36 72c1 ab27 0fb5 291f 9587 3160 65c0 03ed 4ee5 b106 3d50 07]]) end) end) describe("Partial input is returned with correct offset", function() local hpack = require "http.hpack" it("decodes integers without errors", function() -- Empty string should return nil assert.is._nil(hpack.decode_integer("", 7, 1)) -- Encode a large number and trim off last character local s = hpack.encode_integer(2^20, 7, 0) s = s:sub(1, -2) assert.is._nil(hpack.decode_integer(s, 7, 1)) end) it("decodes strings without errors", function() -- Empty string should return nil assert.is._nil(hpack.decode_string("")) -- Encode a large string and trim off last character local s1 = hpack.encode_string("this is a test", false) s1 = s1:sub(1, -2) assert.is._nil(hpack.decode_string(s1)) -- with huffman local s2 = hpack.encode_string("this is a test", true) s2 = s2:sub(1, -2) assert.is._nil(hpack.decode_string(s2)) end) it("decodes partial headers without errors", function() local h = hpack.new() -- empty string should do nothing assert.same(1, select(2, h:decode_headers(""))) -- trim off last character local s1 do local e = hpack.new() e:add_header_indexed("foo", "bar") s1 = e:render_data() end assert.same(1, select(2, h:decode_headers(s1:sub(1, -2)))) -- try again but this time with two headers local s2 do local e = hpack.new() e:add_header_indexed("foo", "bar") e:add_header_indexed("baz", "qux") s2 = e:render_data() end assert.same(#s1+1, select(2, h:decode_headers(s2:sub(1, -2)))) end) end) lua-http-0.4/spec/hsts_spec.lua000066400000000000000000000070761400726324600165610ustar00rootroot00000000000000describe("hsts module", function() local http_hsts = require "http.hsts" it("doesn't store ip addresses", function() local s = http_hsts.new_store() assert.falsy(s:store("127.0.0.1", { ["max-age"] = "100"; })) assert.falsy(s:check("127.0.0.1")) end) it("can be cloned", function() local s = http_hsts.new_store() do local clone = s:clone() local old_heap = s.expiry_heap s.expiry_heap = nil clone.expiry_heap = nil assert.same(s, clone) s.expiry_heap = old_heap end assert.truthy(s:store("foo.example.com", { ["max-age"] = "100"; })) do local clone = s:clone() local old_heap = s.expiry_heap s.expiry_heap = nil clone.expiry_heap = nil assert.same(s, clone) s.expiry_heap = old_heap end local clone = s:clone() assert.truthy(s:check("foo.example.com")) assert.truthy(clone:check("foo.example.com")) end) it("rejects :store() when max-age directive is missing", function() local s = http_hsts.new_store() assert.falsy(s:store("foo.example.com", {})) assert.falsy(s:check("foo.example.com")) end) it("rejects :store() when max-age directive is invalid", function() local s = http_hsts.new_store() assert.falsy(s:store("foo.example.com", { ["max-age"] = "-1"; })) assert.falsy(s:check("foo.example.com")) end) it("erases on max-age == 0", function() local s = http_hsts.new_store() assert.truthy(s:store("foo.example.com", { ["max-age"] = "100"; })) assert.truthy(s:check("foo.example.com")) assert.truthy(s:store("foo.example.com", { ["max-age"] = "0"; })) assert.falsy(s:check("foo.example.com")) end) it("respects includeSubdomains", function() local s = http_hsts.new_store() assert(s:store("foo.example.com", { ["max-age"] = "100"; includeSubdomains = true; })) assert.truthy(s:check("foo.example.com")) assert.truthy(s:check("qaz.bar.foo.example.com")) assert.falsy(s:check("example.com")) assert.falsy(s:check("other.com")) end) it("removes expired entries on :clean()", function() local s = http_hsts.new_store() assert(s:store("foo.example.com", { ["max-age"] = "100"; })) assert(s:store("other.com", { ["max-age"] = "200"; })) assert(s:store("keep.me", { ["max-age"] = "100000"; })) -- Set clock forward local now = s.time() s.time = function() return now+1000 end assert.truthy(s:clean()) assert.falsy(s:check("qaz.bar.foo.example.com")) assert.falsy(s:check("foo.example.com")) assert.falsy(s:check("example.com")) assert.truthy(s:check("keep.me")) end) it("cleans out expired entries automatically", function() local s = http_hsts.new_store() assert(s:store("foo.example.com", { ["max-age"] = "100"; })) assert(s:store("other.com", { ["max-age"] = "200"; })) assert(s:store("keep.me", { ["max-age"] = "100000"; })) -- Set clock forward local now = s.time() s.time = function() return now+1000 end assert.falsy(s:check("qaz.bar.foo.example.com")) -- Set clock back to current; everything should have been cleaned out already. s.time = function() return now end assert.falsy(s:check("foo.example.com")) assert.falsy(s:check("example.com")) assert.truthy(s:check("keep.me")) end) it("enforces .max_items", function() local s = http_hsts.new_store() s.max_items = 0 assert.falsy(s:store("example.com", { ["max-age"] = "100"; })) s.max_items = 1 assert.truthy(s:store("example.com", { ["max-age"] = "100"; })) assert.falsy(s:store("other.com", { ["max-age"] = "100"; })) s:remove("example.com", "/", "foo") assert.truthy(s:store("other.com", { ["max-age"] = "100"; })) end) end) lua-http-0.4/spec/path_spec.lua000066400000000000000000000046431400726324600165310ustar00rootroot00000000000000describe("Relative path resolution", function() local resolve_relative_path = require "http.util".resolve_relative_path it("should resolve .. correctly", function() assert.same("/foo", resolve_relative_path("/", "foo")) assert.same("/foo", resolve_relative_path("/", "./foo")) assert.same("/foo", resolve_relative_path("/", "../foo")) assert.same("/foo", resolve_relative_path("/", "../foo/../foo")) assert.same("/foo", resolve_relative_path("/", "foo/bar/..")) assert.same("/foo/", resolve_relative_path("/", "foo/bar/../")) assert.same("/foo/", resolve_relative_path("/", "foo/bar/../")) assert.same("/", resolve_relative_path("/", "../..")) assert.same("/", resolve_relative_path("/", "../../")) assert.same("/bar", resolve_relative_path("/foo/", "../bar")) assert.same("bar", resolve_relative_path("foo/", "../bar")) assert.same("bar/", resolve_relative_path("foo/", "../bar/")) end) it("should ignore .", function() assert.same("/", resolve_relative_path("/", ".")) assert.same("/", resolve_relative_path("/", "./././.")) assert.same("/", resolve_relative_path("/", "././././")) assert.same("/foo/bar/", resolve_relative_path("/foo/", "bar/././././")) end) it("should keep leading and trailing /", function() assert.same("/foo/", resolve_relative_path("/foo/", "./")) assert.same("foo/", resolve_relative_path("foo/", "./")) assert.same("/foo", resolve_relative_path("/foo/", ".")) assert.same("foo", resolve_relative_path("foo/", ".")) end) it("an absolute path as 2nd arg should be resolved", function() assert.same("/foo", resolve_relative_path("ignored", "/foo")) assert.same("/foo", resolve_relative_path("ignored", "/foo/./.")) assert.same("/foo", resolve_relative_path("ignored", "/foo/bar/..")) assert.same("/foo", resolve_relative_path("ignored", "/foo/bar/qux/./../././..")) assert.same("/foo/", resolve_relative_path("ignored", "/foo/././")) end) it("cannot go above root level", function() assert.same("/bar", resolve_relative_path("/", "../bar")) assert.same("/bar", resolve_relative_path("/foo", "../../../../bar")) assert.same("/bar", resolve_relative_path("/foo", "./../../../../bar")) assert.same("/", resolve_relative_path("/foo", "./../../../../")) assert.same("/", resolve_relative_path("/", "..")) assert.same("", resolve_relative_path("", "..")) assert.same("", resolve_relative_path("", "./..")) assert.same("bar", resolve_relative_path("", "../bar")) end) end) lua-http-0.4/spec/proxies_spec.lua000066400000000000000000000043231400726324600172610ustar00rootroot00000000000000describe("http.proxies module", function() local http_proxies = require "http.proxies" it("works", function() local proxies = http_proxies.new():update(function(k) return ({ http_proxy = "http://http.proxy"; https_proxy = "http://https.proxy"; all_proxy = "http://all.proxy"; no_proxy = nil; })[k] end) assert.same({ http_proxy = "http://http.proxy"; https_proxy = "http://https.proxy"; all_proxy = "http://all.proxy"; no_proxy = nil; }, proxies) assert.same("http://http.proxy", proxies:choose("http", "myhost")) assert.same("http://https.proxy", proxies:choose("https", "myhost")) assert.same("http://all.proxy", proxies:choose("other", "myhost")) end) it("isn't vulnerable to httpoxy", function() assert.same({}, http_proxies.new():update(function(k) return ({ GATEWAY_INTERFACE = "CGI/1.1"; http_proxy = "vulnerable to httpoxy"; })[k] end)) end) it("works with no_proxy set to *", function() local proxies = http_proxies.new():update(function(k) return ({ http_proxy = "http://http.proxy"; https_proxy = "http://https.proxy"; all_proxy = "http://all.proxy"; no_proxy = "*"; })[k] end) -- Should return nil due to no_proxy being * assert.same(nil, proxies:choose("http", "myhost")) assert.same(nil, proxies:choose("https", "myhost")) assert.same(nil, proxies:choose("other", "myhost")) end) it("works with a no_proxy set", function() local proxies = http_proxies.new():update(function(k) return ({ http_proxy = "http://http.proxy"; no_proxy = "foo,bar.com,.extra.dot.com"; })[k] end) assert.same("http://http.proxy", proxies:choose("http", "myhost")) assert.is.table(proxies.no_proxy) assert.same(nil, proxies:choose("http", "foo")) assert.same(nil, proxies:choose("http", "bar.com")) assert.same(nil, proxies:choose("http", "subdomain.bar.com")) assert.same(nil, proxies:choose("http", "sub.sub.subdomain.bar.com")) assert.same(nil, proxies:choose("http", "someting.foo")) assert.same("http://http.proxy", proxies:choose("http", "else.com")) assert.same(nil, proxies:choose("http", "more.extra.dot.com")) assert.same(nil, proxies:choose("http", "extra.dot.com")) assert.same("http://http.proxy", proxies:choose("http", "dot.com")) end) end) lua-http-0.4/spec/request_spec.lua000066400000000000000000001141621400726324600172630ustar00rootroot00000000000000describe("http.request module", function() local request = require "http.request" local http_util = require "http.util" it("can construct a request from a uri", function() do -- http url; no path local req = request.new_from_uri("http://example.com") assert.same("example.com", req.host) assert.same(80, req.port) assert.falsy(req.tls) assert.same("example.com", req.headers:get ":authority") assert.same("GET", req.headers:get ":method") assert.same("/", req.headers:get ":path") assert.same("http", req.headers:get ":scheme") assert.same(nil, req.body) end do -- https local req = request.new_from_uri("https://example.com/path?query") assert.same("example.com", req.host) assert.same(443, req.port) assert.truthy(req.tls) assert.same("example.com", req.headers:get ":authority") assert.same("GET", req.headers:get ":method") assert.same("/path?query", req.headers:get ":path") assert.same("https", req.headers:get ":scheme") assert.same(nil, req.body) end do -- needs url normalisation local req = request.new_from_uri("HTTP://exaMple.com/1%323%2f45?foo=ba%26r&another=more") assert.same("example.com", req.host) assert.same(80, req.port) assert.falsy(req.tls) assert.same("example.com", req.headers:get ":authority") assert.same("GET", req.headers:get ":method") assert.same("/123%2F45?foo=ba%26r&another=more", req.headers:get ":path") assert.same("http", req.headers:get ":scheme") assert.same(nil, req.body) end do -- with userinfo section local basexx = require "basexx" local req = request.new_from_uri("https://user:password@example.com/") assert.same("example.com", req.host) assert.same(443, req.port) assert.truthy(req.tls) assert.same("example.com", req.headers:get ":authority") assert.same("GET", req.headers:get ":method") assert.same("/", req.headers:get ":path") assert.same("https", req.headers:get ":scheme") assert.same("user:password", basexx.from_base64(req.headers:get "authorization":match "^basic%s+(.*)")) assert.same(nil, req.body) end end) it("can construct a request with custom proxies object", function() local http_proxies = require "http.proxies" -- No proxies local proxies = http_proxies.new():update(function() end) local req = request.new_from_uri("http://example.com", nil, proxies) assert.same("example.com", req.host) assert.same(80, req.port) assert.falsy(req.tls) assert.same("example.com", req.headers:get ":authority") assert.same("GET", req.headers:get ":method") assert.same("/", req.headers:get ":path") assert.same("http", req.headers:get ":scheme") assert.same(nil, req.body) end) it("can construct a CONNECT request", function() do -- http url; no path local req = request.new_connect("http://example.com", "connect.me") assert.same("example.com", req.host) assert.same(80, req.port) assert.falsy(req.tls) assert.same("connect.me", req.headers:get ":authority") assert.same("CONNECT", req.headers:get ":method") assert.falsy(req.headers:has ":path") assert.falsy(req.headers:has ":scheme") assert.same(nil, req.body) end do -- https local req = request.new_connect("https://example.com", "connect.me:1234") assert.same("example.com", req.host) assert.same(443, req.port) assert.truthy(req.tls) assert.same("connect.me:1234", req.headers:get ":authority") assert.same("CONNECT", req.headers:get ":method") assert.falsy(req.headers:has ":path") assert.falsy(req.headers:has ":scheme") assert.same(nil, req.body) end do -- with userinfo section local basexx = require "basexx" local req = request.new_connect("https://user:password@example.com", "connect.me") assert.same("example.com", req.host) assert.same(443, req.port) assert.truthy(req.tls) assert.same("connect.me", req.headers:get ":authority") assert.same("CONNECT", req.headers:get ":method") assert.falsy(req.headers:has ":path") assert.falsy(req.headers:has ":scheme") assert.same("user:password", basexx.from_base64(req.headers:get "proxy-authorization":match "^basic%s+(.*)")) assert.same(nil, req.body) end do -- anything with a path should fail assert.has.errors(function() request.new_connect("http://example.com/") end) assert.has.errors(function() request.new_connect("http://example.com/path") end) end end) it("fails on invalid URIs", function() assert.has.errors(function() request.new_from_uri("not a URI") end) -- no scheme assert.has.errors(function() request.new_from_uri("example.com") end) -- trailing junk assert.has.errors(function() request.new_from_uri("example.com/foo junk.") end) end) it("can (sometimes) roundtrip via :to_uri()", function() local function test(uri) local req = request.new_from_uri(uri) assert.same(uri, req:to_uri(true)) end test("http://example.com/") test("https://example.com/") test("https://example.com:1234/") test("http://foo:bar@example.com:1234/path?query") test("https://fo%20o:ba%20r@example.com:1234/path%20spaces") end) it(":to_uri() throws on un-coerable authorization", function() assert.has.errors(function() local req = request.new_from_uri("http://example.com/") req.headers:upsert("authorization", "singletoken") req:to_uri(true) end) assert.has.errors(function() local req = request.new_from_uri("http://example.com/") req.headers:upsert("authorization", "can't go in a uri") req:to_uri(true) end) assert.has.errors(function() local req = request.new_from_uri("http://example.com/") req.headers:upsert("authorization", "basic trailing data") req:to_uri(true) end) assert.has.errors(function() local req = request.new_from_uri("http://example.com/") req.headers:upsert("authorization", "bearer data") req:to_uri(true) end) end) it("handles CONNECT requests in :to_uri()", function() local function test(uri) local req = request.new_connect(uri, "connect.me") assert.same(uri, req:to_uri(true)) end test("http://example.com") test("https://example.com") test("https://example.com:1234") test("https://foo:bar@example.com:1234") assert.has.errors(function() test("https://example.com/path") end) end) it(":set_body sets content-length for string arguments", function() local req = request.new_from_uri("http://example.com") assert.falsy(req.headers:has("content-length")) local str = "a string" req:set_body(str) assert.same(string.format("%d", #str), req.headers:get("content-length")) end) it(":set_body sets expect 100-continue for file arguments", function() local req = request.new_from_uri("http://example.com") assert.falsy(req.headers:has("expect")) req:set_body(io.tmpfile()) assert.same("100-continue", req.headers:get("expect")) end) describe(":handle_redirect method", function() local headers = require "http.headers" it("works", function() local orig_req = request.new_from_uri("http://example.com") local orig_headers = headers.new() orig_headers:append(":status", "301") orig_headers:append("location", "/foo") local new_req = orig_req:handle_redirect(orig_headers) -- same assert.same(orig_req.host, new_req.host) assert.same(orig_req.port, new_req.port) assert.same(orig_req.tls, new_req.tls) assert.same(orig_req.headers:get ":authority", new_req.headers:get ":authority") assert.same(orig_req.headers:get ":method", new_req.headers:get ":method") assert.same(orig_req.headers:get ":scheme", new_req.headers:get ":scheme") assert.same(orig_req.body, new_req.body) -- different assert.same("/foo", new_req.headers:get ":path") assert.same(orig_req.max_redirects-1, new_req.max_redirects) end) it("works with cross-scheme port-less uri", function() local orig_req = request.new_from_uri("http://example.com") local orig_headers = headers.new() orig_headers:append(":status", "302") orig_headers:append("location", "https://blah.com/example") local new_req = orig_req:handle_redirect(orig_headers) -- same assert.same(orig_req.headers:get ":method", new_req.headers:get ":method") assert.same(orig_req.body, new_req.body) -- different assert.same(false, orig_req.tls) assert.same(true, new_req.tls) assert.same("https", new_req.headers:get ":scheme") assert.same("blah.com", new_req.host) assert.same(80, orig_req.port) assert.same(443, new_req.port) assert.same("blah.com", new_req.headers:get ":authority") assert.same("/example", new_req.headers:get ":path") assert.same(orig_req.max_redirects-1, new_req.max_redirects) end) it("works with scheme relative uri with just domain", function() local orig_req = request.new_from_uri("http://example.com") local orig_headers = headers.new() orig_headers:append(":status", "302") orig_headers:append("location", "//blah.com") local new_req = orig_req:handle_redirect(orig_headers) -- same assert.same(orig_req.port, new_req.port) assert.same(orig_req.tls, new_req.tls) assert.same(orig_req.headers:get ":method", new_req.headers:get ":method") assert.same(orig_req.headers:get ":scheme", new_req.headers:get ":scheme") assert.same(orig_req.body, new_req.body) -- different assert.same("blah.com", new_req.host) assert.same("blah.com", new_req.headers:get ":authority") assert.same("/", new_req.headers:get ":path") assert.same(orig_req.max_redirects-1, new_req.max_redirects) end) it("works with scheme relative uri", function() local orig_req = request.new_from_uri("http://example.com") local orig_headers = headers.new() orig_headers:append(":status", "302") orig_headers:append("location", "//blah.com:1234/example") local new_req = orig_req:handle_redirect(orig_headers) -- same assert.same(orig_req.tls, new_req.tls) assert.same(orig_req.headers:get ":method", new_req.headers:get ":method") assert.same(orig_req.headers:get ":scheme", new_req.headers:get ":scheme") assert.same(orig_req.body, new_req.body) -- different assert.same("blah.com", new_req.host) assert.same(1234, new_req.port) assert.same("blah.com:1234", new_req.headers:get ":authority") assert.same("/example", new_req.headers:get ":path") assert.same(orig_req.max_redirects-1, new_req.max_redirects) end) it("adds authorization headers for redirects with userinfo", function() local basexx = require "basexx" local orig_req = request.new_from_uri("http://example.com") local orig_headers = headers.new() orig_headers:append(":status", "302") orig_headers:append("location", "http://user:passwd@blah.com/") local new_req = orig_req:handle_redirect(orig_headers) -- same assert.same(orig_req.port, new_req.port) assert.same(orig_req.tls, new_req.tls) assert.same(orig_req.headers:get ":method", new_req.headers:get ":method") assert.same(orig_req.headers:get ":scheme", new_req.headers:get ":scheme") assert.same(orig_req.body, new_req.body) -- different assert.same("blah.com", new_req.host) assert.same("blah.com", new_req.headers:get ":authority") assert.same("/", new_req.headers:get ":path") assert.same("basic " .. basexx.to_base64("user:passwd"), new_req.headers:get("authorization")) assert.same(orig_req.max_redirects-1, new_req.max_redirects) end) it("simplifies relative paths", function() local orig_req = request.new_from_uri("http://example.com/foo/test") local orig_headers = headers.new() orig_headers:append(":status", "302") orig_headers:append("location", "../bar") local new_req = orig_req:handle_redirect(orig_headers) -- same assert.same(orig_req.host, new_req.host) assert.same(orig_req.port, new_req.port) assert.same(orig_req.tls, new_req.tls) assert.same(orig_req.headers:get ":method", new_req.headers:get ":method") assert.same(orig_req.headers:get ":scheme", new_req.headers:get ":scheme") assert.same(orig_req.body, new_req.body) -- different assert.same("example.com", new_req.headers:get ":authority") assert.same("/bar", new_req.headers:get ":path") assert.same(orig_req.max_redirects-1, new_req.max_redirects) end) it("rejects relative redirects when base is invalid", function() local ce = require "cqueues.errno" local orig_req = request.new_from_uri("http://example.com") orig_req.headers:upsert(":path", "^") local orig_headers = headers.new() orig_headers:append(":status", "302") orig_headers:append("location", "../path") assert.same({nil, "base path not valid for relative redirect", ce.EINVAL}, {orig_req:handle_redirect(orig_headers)}) end) it("works with query in uri", function() local orig_req = request.new_from_uri("http://example.com/path?query") local orig_headers = headers.new() orig_headers:append(":status", "301") orig_headers:append("location", "/foo?anotherquery") local new_req = orig_req:handle_redirect(orig_headers) -- same assert.same(orig_req.host, new_req.host) assert.same(orig_req.port, new_req.port) assert.same(orig_req.tls, new_req.tls) assert.same(orig_req.headers:get ":authority", new_req.headers:get ":authority") assert.same(orig_req.headers:get ":method", new_req.headers:get ":method") assert.same(orig_req.headers:get ":scheme", new_req.headers:get ":scheme") assert.same(orig_req.body, new_req.body) -- different assert.same("/foo?anotherquery", new_req.headers:get ":path") assert.same(orig_req.max_redirects-1, new_req.max_redirects) end) it("detects maximum redirects exceeded", function() local ce = require "cqueues.errno" local orig_req = request.new_from_uri("http://example.com") orig_req.max_redirects = 0 local orig_headers = headers.new() orig_headers:append(":status", "302") orig_headers:append("location", "/") assert.same({nil, "maximum redirects exceeded", ce.ELOOP}, {orig_req:handle_redirect(orig_headers)}) end) it("detects missing location header", function() local ce = require "cqueues.errno" local orig_req = request.new_from_uri("http://example.com") local orig_headers = headers.new() orig_headers:append(":status", "302") assert.same({nil, "missing location header for redirect", ce.EINVAL}, {orig_req:handle_redirect(orig_headers)}) end) it("detects invalid location header", function() local ce = require "cqueues.errno" local orig_req = request.new_from_uri("http://example.com") local orig_headers = headers.new() orig_headers:append(":status", "302") orig_headers:append("location", "this isn't valid") assert.same({nil, "invalid URI in location header", ce.EINVAL}, {orig_req:handle_redirect(orig_headers)}) end) it("fails on unknown scheme", function() local ce = require "cqueues.errno" local orig_req = request.new_from_uri("http://example.com") local orig_headers = headers.new() orig_headers:append(":status", "302") orig_headers:append("location", "mycoolscheme://blah.com:1234/example") assert.same({nil, "unknown scheme", ce.EINVAL}, {orig_req:handle_redirect(orig_headers)}) end) it("detects POST => GET transformation", function() local orig_req = request.new_from_uri("http://example.com") orig_req.headers:upsert(":method", "POST") orig_req.headers:upsert("content-type", "text/plain") orig_req:set_body(("foo"):rep(1000)) -- make sure it's big enough to automatically add an "expect" header local orig_headers = headers.new() orig_headers:append(":status", "303") orig_headers:append("location", "/foo") local new_req = orig_req:handle_redirect(orig_headers) -- same assert.same(orig_req.host, new_req.host) assert.same(orig_req.port, new_req.port) assert.same(orig_req.tls, new_req.tls) assert.same(orig_req.headers:get ":authority", new_req.headers:get ":authority") assert.same(orig_req.headers:get ":scheme", new_req.headers:get ":scheme") -- different assert.same("GET", new_req.headers:get ":method") assert.same("/foo", new_req.headers:get ":path") assert.falsy(new_req.headers:get "expect") assert.falsy(new_req.headers:has "content-type") assert.same(nil, new_req.body) assert.same(orig_req.max_redirects-1, new_req.max_redirects) end) it("deletes keeps original custom host, port and sendname if relative", function() local orig_req = request.new_from_uri("http://example.com") orig_req.host = "other.com" orig_req.sendname = "something.else" local orig_headers = headers.new() orig_headers:append(":status", "301") orig_headers:append("location", "/foo") local new_req = orig_req:handle_redirect(orig_headers) -- same assert.same(orig_req.host, new_req.host) assert.same(orig_req.port, new_req.port) assert.same(orig_req.tls, new_req.tls) assert.same(orig_req.sendname, new_req.sendname) assert.same(orig_req.headers:get ":authority", new_req.headers:get ":authority") assert.same(orig_req.headers:get ":method", new_req.headers:get ":method") assert.same(orig_req.headers:get ":scheme", new_req.headers:get ":scheme") assert.same(orig_req.body, new_req.body) -- different assert.same("/foo", new_req.headers:get ":path") assert.same(orig_req.max_redirects-1, new_req.max_redirects) end) it("removes referer header on https => http redirect", function() local orig_req = request.new_from_uri("https://example.com") local orig_headers = headers.new() orig_headers:append(":status", "301") orig_headers:append("location", "http://blah.com/foo") local new_req = orig_req:handle_redirect(orig_headers) -- same assert.same(orig_req.headers:get ":method", new_req.headers:get ":method") assert.same(orig_req.body, new_req.body) -- different assert.same("blah.com", new_req.host) assert.same(80, new_req.port) assert.same(false, new_req.tls) assert.same("http", new_req.headers:get ":scheme") assert.same("blah.com", new_req.headers:get ":authority") assert.same("/foo", new_req.headers:get ":path") assert.falsy(new_req.headers:has "referer") assert.same(orig_req.max_redirects-1, new_req.max_redirects) end) it("doesn't attach userinfo to referer header", function() local orig_req = request.new_from_uri("http://user:passwd@example.com") local orig_headers = headers.new() orig_headers:append(":status", "301") orig_headers:append("location", "/foo") local new_req = orig_req:handle_redirect(orig_headers) -- same assert.same(orig_req.host, new_req.host) assert.same(orig_req.port, new_req.port) assert.same(orig_req.tls, new_req.tls) assert.same(orig_req.headers:get ":authority", new_req.headers:get ":authority") assert.same(orig_req.headers:get ":method", new_req.headers:get ":method") assert.same(orig_req.headers:get ":scheme", new_req.headers:get ":scheme") assert.same(orig_req.body, new_req.body) -- different assert.same("/foo", new_req.headers:get ":path") assert.same(orig_req.max_redirects-1, new_req.max_redirects) assert.same("http://example.com/", new_req.headers:get "referer") end) it("works with CONNECT requests", function() local orig_req = request.new_connect("http://example.com", "connect.me") local orig_headers = headers.new() orig_headers:append(":status", "302") orig_headers:append("location", "http://other.com") local new_req = orig_req:handle_redirect(orig_headers) -- same assert.same(orig_req.port, new_req.port) assert.same(orig_req.tls, new_req.tls) assert.falsy(new_req.headers:has ":path") assert.same(orig_req.headers:get ":authority", new_req.headers:get ":authority") assert.same(orig_req.headers:get ":method", new_req.headers:get ":method") assert.falsy(new_req.headers:has ":scheme") assert.same(nil, new_req.body) -- different assert.same("other.com", new_req.host) assert.same(orig_req.max_redirects-1, new_req.max_redirects) end) it("rejects invalid CONNECT redirects", function() local ce = require "cqueues.errno" local orig_req = request.new_connect("http://example.com", "connect.me") local orig_headers = headers.new() orig_headers:append(":status", "302") orig_headers:append("location", "/path") assert.same({nil, "CONNECT requests cannot have a path", ce.EINVAL}, {orig_req:handle_redirect(orig_headers)}) orig_headers:upsert("location", "?query") assert.same({nil, "CONNECT requests cannot have a query", ce.EINVAL}, {orig_req:handle_redirect(orig_headers)}) end) end) describe(":go method", function() local cqueues = require "cqueues" local server = require "http.server" local new_headers = require "http.headers".new local http_tls = require "http.tls" local openssl_ctx = require "openssl.ssl.context" local non_verifying_tls_context = http_tls.new_client_context() non_verifying_tls_context:setVerify(openssl_ctx.VERIFY_NONE) local function test(server_cb, client_cb) local cq = cqueues.new() local s = assert(server.listen { host = "localhost"; port = 0; onstream = function(s, stream) local keep_going = server_cb(stream, s) stream:shutdown() stream.connection:shutdown() if not keep_going then s:close() end end; }) assert(s:listen()) local _, host, port = s:localname() cq:wrap(function() assert_loop(s) end) cq:wrap(function() local req = request.new_from_uri { scheme = "http"; host = host; port = port; } req.ctx = non_verifying_tls_context; client_cb(req) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end it("works with local server", function() test(function(stream) assert(stream:get_headers()) local resp_headers = new_headers() resp_headers:append(":status", "200") assert(stream:write_headers(resp_headers, false)) assert(stream:write_chunk("hello world", true)) end, function(req) local headers, stream = assert(req:go()) assert.same("200", headers:get(":status")) assert.same("hello world", assert(stream:get_body_as_string())) stream:shutdown() end) end) it("waits for 100-continue before sending body", function() local has_sent_continue = false test(function(stream) assert(stream:get_headers()) cqueues.sleep(0.1) assert(stream:write_continue()) has_sent_continue = true assert.same("foo", assert(stream:get_body_as_string())) local resp_headers = new_headers() resp_headers:append(":status", "204") assert(stream:write_headers(resp_headers, true)) end, function(req) req:set_body(coroutine.wrap(function() assert.truthy(has_sent_continue) coroutine.yield("foo") end)) local headers, stream = assert(req:go()) assert.same("204", headers:get(":status")) stream:shutdown() end) end) it("continues (eventually) if there is no 100-continue", function() test(function(stream) assert(stream:get_headers()) assert.same("foo", assert(stream:get_body_as_string())) local resp_headers = new_headers() resp_headers:append(":status", "204") assert(stream:write_headers(resp_headers, true)) end, function(req) req.expect_100_timeout = 0.2 req:set_body(coroutine.wrap(function() coroutine.yield("foo") end)) local headers, stream = assert(req:go()) assert.same("204", headers:get(":status")) stream:shutdown() end) end) it("skips sending body if expect set and no 100 received", function() test(function(stream) assert(stream:get_headers()) local resp_headers = new_headers() resp_headers:append(":status", "500") assert(stream:write_headers(resp_headers, true)) end, function(req) local body = spy.new(function() end) req:set_body(body) local headers, stream = assert(req:go()) assert.same("500", headers:get(":status")) assert.spy(body).was_not.called() stream:shutdown() end) end) it("works with file body", function() local file = assert(io.tmpfile()) assert(file:write("hello world")) test(function(stream) assert(stream:get_headers()) assert(stream:write_continue()) assert.same("hello world", assert(stream:get_body_as_string())) local resp_headers = new_headers() resp_headers:append(":status", "200") assert(stream:write_headers(resp_headers, false)) assert(stream:write_chunk("goodbye world", true)) end, function(req) req:set_body(file) local headers, stream = assert(req:go()) assert.same("200", headers:get(":status")) assert.same("goodbye world", assert(stream:get_body_as_string())) stream:shutdown() end) end) it("follows redirects", function() local n = 0 test(function(stream) n = n + 1 if n == 1 then local h = assert(stream:get_headers()) assert.same("/", h:get(":path")) local resp_headers = new_headers() resp_headers:append(":status", "302") resp_headers:append("location", "/foo") assert(stream:write_headers(resp_headers, true)) return true elseif n == 2 then local h = assert(stream:get_headers()) assert.same("/foo", h:get(":path")) local resp_headers = new_headers() resp_headers:append(":status", "200") assert(stream:write_headers(resp_headers, false)) assert(stream:write_chunk("hello world", true)) end end, function(req) local headers, stream = assert(req:go()) assert.same("200", headers:get(":status")) assert.same("hello world", assert(stream:get_body_as_string())) stream:shutdown() end) end) it("works with a proxy server", function() test(function(stream) local h = assert(stream:get_headers()) local _, host, port = stream:localname() local authority = http_util.to_authority(host, port, "http") assert.same(authority, h:get ":authority") assert.same("http://" .. authority .. "/", h:get(":path")) local resp_headers = new_headers() resp_headers:append(":status", "200") assert(stream:write_headers(resp_headers, false)) assert(stream:write_chunk("hello world", true)) end, function(req) req.proxy = { scheme = "http"; host = req.host; port = req.port; } local headers, stream = assert(req:go()) assert.same("200", headers:get(":status")) assert.same("hello world", assert(stream:get_body_as_string())) stream:shutdown() end) end) it("works with a proxy server with a path component", function() test(function(stream) local h = assert(stream:get_headers()) local _, host, port = stream:localname() local authority = http_util.to_authority(host, port, "http") assert.same(authority, h:get ":authority") assert.same("http://" .. authority .. "/", h:get(":path")) local resp_headers = new_headers() resp_headers:append(":status", "200") assert(stream:write_headers(resp_headers, false)) assert(stream:write_chunk("hello world", true)) end, function(req) req.proxy = { scheme = "http"; host = req.host; port = req.port; path = "/path"; } local headers, stream = assert(req:go()) assert.same("200", headers:get(":status")) assert.same("hello world", assert(stream:get_body_as_string())) stream:shutdown() end) end) it("works with http proxies on OPTIONS requests", function() test(function(stream) local h = assert(stream:get_headers()) assert.same("OPTIONS", h:get ":method") local _, host, port = stream:localname() assert.same("http://" .. http_util.to_authority(host, port, "http"), h:get(":path")) stream:shutdown() end, function(req) req.headers:upsert(":method", "OPTIONS") req.headers:upsert(":path", "*") req.proxy = { scheme = "http"; host = req.host; port = req.port; } local _, stream = assert(req:go()) stream:shutdown() end) end) it("adds proxy-authorization header", function() local basexx = require "basexx" test(function(stream) local h = assert(stream:get_headers()) assert.same("basic " ..basexx.to_base64("user:pass"), h:get "proxy-authorization") stream:shutdown() end, function(req) req.proxy = { scheme = "http"; host = req.host; port = req.port; userinfo = "user:pass"; } local _, stream = assert(req:go()) stream:shutdown() end) end) it(":handle_redirect doesn't drop proxy use within a domain", function() test(function(stream) local h = assert(stream:get_headers()) local _, host, port = stream:localname() local authority = http_util.to_authority(host, port, "http") assert.same(authority, h:get ":authority") assert.same("http://" .. authority .. "/foo", h:get(":path")) stream:shutdown() end, function(req) req.proxy = { scheme = "http"; host = req.host; port = req.port; userinfo = "user:pass"; } local orig_headers = new_headers() orig_headers:append(":status", "302") orig_headers:append("location", "/foo") local new_req = req:handle_redirect(orig_headers) local _, stream = assert(new_req:go()) stream:shutdown() end) end) it("CONNECT proxy", function() test(function(stream, s) local h = assert(stream:get_headers()) local resp_headers = new_headers() resp_headers:append(":status", "200") assert(stream:write_headers(resp_headers, false)) if h:get(":method") == "CONNECT" then assert(stream.connection.version < 2) local sock = assert(stream.connection:take_socket()) s:add_socket(sock) return true else assert(stream:write_chunk("hello world", true)) end end, function(req) req.tls = true req.proxy = { scheme = "http"; host = req.host; port = req.port; userinfo = "user:pass"; } local headers, stream = assert(req:go()) assert.same("200", headers:get(":status")) assert.same("hello world", assert(stream:get_body_as_string())) stream:shutdown() end) end) it("CONNECT proxy with path component", function() test(function(stream, s) local h = assert(stream:get_headers()) local resp_headers = new_headers() resp_headers:append(":status", "200") assert(stream:write_headers(resp_headers, false)) if h:get(":method") == "CONNECT" then assert(stream.connection.version < 2) local sock = assert(stream.connection:take_socket()) s:add_socket(sock) return true else assert(stream:write_chunk("hello world", true)) end end, function(req) req.tls = true req.proxy = { scheme = "http"; host = req.host; port = req.port; path = "/path"; } local headers, stream = assert(req:go()) assert.same("200", headers:get(":status")) assert.same("hello world", assert(stream:get_body_as_string())) stream:shutdown() end) end) it("fails correctly on non CONNECT proxy", function() test(function(stream) local h = assert(stream:get_headers()) assert.same("CONNECT", h:get(":method")) local sock = stream.connection:take_socket() assert(sock:write("foo")) sock:close() end, function(req) req.tls = true req.proxy = { scheme = "http"; host = req.host; port = req.port; userinfo = "user:pass"; } local ok = req:go() assert.falsy(ok) end) end) it("fails correctly on failed CONNECT proxy attempt", function() test(function(stream) local h = assert(stream:get_headers()) assert.same("CONNECT", h:get(":method")) local resp_headers = new_headers() resp_headers:append(":status", "400") assert(stream:write_headers(resp_headers, true)) end, function(req) req.tls = true req.proxy = { scheme = "http"; host = req.host; port = req.port; userinfo = "user:pass"; } local ok = req:go() assert.falsy(ok) end) end) it("can make request via SOCKS proxy", function() local ca = require "cqueues.auxlib" local cs = require "cqueues.socket" local socks_server = ca.assert(cs.listen { family = cs.AF_INET; host = "localhost"; port = 0; }) assert(socks_server:listen()) local _, socks_host, socks_port = socks_server:localname() local s = assert(server.listen { host = "localhost"; port = 0; onstream = function(s, stream) assert(stream:get_headers()) local resp_headers = new_headers() resp_headers:append(":status", "200") assert(stream:write_headers(resp_headers, false)) assert(stream:write_chunk("hello world", true)) stream:shutdown() stream.connection:shutdown() s:close() end; }) assert(s:listen()) local _, host, port = s:localname() local cq = cqueues.new() cq:wrap(function() assert_loop(s) end) cq:wrap(function() local req = request.new_from_uri { scheme = "http"; host = host; port = port; } req.ctx = non_verifying_tls_context; req.proxy = { scheme = "socks5h"; host = socks_host; port = socks_port; } local headers, stream = assert(req:go()) assert.same("200", headers:get(":status")) assert.same("hello world", assert(stream:get_body_as_string())) stream:shutdown() end) cq:wrap(function() -- SOCKS server local sock = socks_server:accept() sock:setmode("b", "b") assert.same("\5", sock:read(1)) local n = assert(sock:read(1)):byte() local available_auth = assert(sock:read(n)) assert.same("\0", available_auth) assert(sock:xwrite("\5\0", "n")) assert.same("\5\1\0\1", sock:read(4)) assert(sock:read(6)) -- ip + port assert(sock:xwrite("\5\0\0\3\4test\4\210", "n")) s:add_socket(sock) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) socks_server:close() end) it("pays attention to HSTS", function() local cq = cqueues.new() local n = 0 local s = assert(server.listen { host = "localhost"; port = 0; onstream = function(s, stream) assert(stream:get_headers()) n = n + 1 local resp_headers = new_headers() resp_headers:append(":status", "200") if n < 3 then resp_headers:append("strict-transport-security", "max-age=10") else resp_headers:append("strict-transport-security", "max-age=0") assert.truthy(stream:checktls()) end assert(stream:write_headers(resp_headers, false)) assert(stream:write_chunk("hello world", true)) if n == 3 then s:close() end end; }) assert(s:listen()) local _, _, port = s:localname() cq:wrap(function() assert_loop(s) end) cq:wrap(function() -- new store so we don't test with the default one (which will outlive tests) local hsts_store = require "http.hsts".new_store() do -- first an http request that *shouldn't* fill in the store local req = request.new_from_uri { scheme = "http"; host = "localhost"; port = port; } req.ctx = non_verifying_tls_context; req.hsts = hsts_store local headers, stream = assert(req:go()) assert.same("200", headers:get(":status")) assert.same("max-age=10", headers:get("strict-transport-security")) assert.same("hello world", assert(stream:get_body_as_string())) assert.falsy(hsts_store:check("localhost")) stream:shutdown() end do -- now an https request that *will* fill in the store local req = request.new_from_uri { scheme = "https"; host = "localhost"; port = port; } req.ctx = non_verifying_tls_context; req.hsts = hsts_store local headers, stream = assert(req:go()) assert.same("200", headers:get(":status")) assert.same("max-age=10", headers:get("strict-transport-security")) assert.same("hello world", assert(stream:get_body_as_string())) assert.truthy(hsts_store:check("localhost")) stream:shutdown() end do -- http request will be converted to https. max-age=0 should remove from store. local req = request.new_from_uri { scheme = "http"; host = "localhost"; port = port; } req.ctx = non_verifying_tls_context; req.hsts = hsts_store local headers, stream = assert(req:go()) assert.same("200", headers:get(":status")) assert.same("max-age=0", headers:get("strict-transport-security")) assert.same("hello world", assert(stream:get_body_as_string())) assert.falsy(hsts_store:check("localhost")) stream:shutdown() end end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("handles HSTS corner case: max-age missing value", function() test(function(stream) assert(stream:get_headers()) local resp_headers = new_headers() resp_headers:append(":status", "200") resp_headers:append("strict-transport-security", "max-age") assert(stream:write_headers(resp_headers, false)) assert(stream:write_chunk("hello world", true)) end, function(req) -- new store so we don't test with the default one (which will outlive tests) local hsts_store = require "http.hsts".new_store() req.host = "localhost" req.tls = true req.hsts = hsts_store local headers, stream = assert(req:go()) assert.same("200", headers:get(":status")) assert.same("max-age", headers:get("strict-transport-security")) assert.falsy(hsts_store:check("localhost")) stream:shutdown() end) test(function(stream) assert(stream:get_headers()) local resp_headers = new_headers() resp_headers:append(":status", "200") resp_headers:append("strict-transport-security", "max-age=") assert(stream:write_headers(resp_headers, false)) assert(stream:write_chunk("hello world", true)) end, function(req) -- new store so we don't test with the default one (which will outlive tests) local hsts_store = require "http.hsts".new_store() req.host = "localhost" req.tls = true req.hsts = hsts_store local headers, stream = assert(req:go()) assert.same("200", headers:get(":status")) assert.same("max-age=", headers:get("strict-transport-security")) assert.falsy(hsts_store:check("localhost")) stream:shutdown() end) end) it("handles HSTS corner case: 'preload' parameter", function() test(function(stream) assert(stream:get_headers()) local resp_headers = new_headers() resp_headers:append(":status", "200") resp_headers:append("strict-transport-security", "max-age=10; preload") assert(stream:write_headers(resp_headers, false)) assert(stream:write_chunk("hello world", true)) end, function(req) -- new store so we don't test with the default one (which will outlive tests) local hsts_store = require "http.hsts".new_store() req.host = "localhost" req.tls = true req.hsts = hsts_store local headers, stream = assert(req:go()) assert.same("200", headers:get(":status")) assert.same("max-age=10; preload", headers:get("strict-transport-security")) assert.truthy(hsts_store:check("localhost")) stream:shutdown() end) end) end) end) lua-http-0.4/spec/require-all.lua000066400000000000000000000012341400726324600167760ustar00rootroot00000000000000-- This file is used for linting .tld files with typedlua require "http.bit" require "http.client" require "http.connection_common" require "http.cookie" require "http.h1_connection" require "http.h1_reason_phrases" require "http.h1_stream" require "http.h2_connection" require "http.h2_error" require "http.h2_stream" require "http.headers" require "http.hpack" require "http.hsts" require "http.proxies" require "http.request" require "http.server" require "http.socks" require "http.stream_common" require "http.tls" require "http.util" require "http.version" require "http.websocket" require "http.zlib" require "http.compat.prosody" require "http.compat.socket" lua-http-0.4/spec/server_spec.lua000066400000000000000000000242311400726324600170760ustar00rootroot00000000000000describe("http.server module", function() local http_server = require "http.server" local http_client = require "http.client" local http_tls = require "http.tls" local http_headers = require "http.headers" local cqueues = require "cqueues" local ca = require "cqueues.auxlib" local ce = require "cqueues.errno" local cs = require "cqueues.socket" local openssl_ctx = require "openssl.ssl.context" local non_verifying_tls_context = http_tls.new_client_context() non_verifying_tls_context:setVerify(openssl_ctx.VERIFY_NONE) it("rejects missing 'ctx' field", function() local s, c = ca.assert(cs.pair()) assert.has.errors(function() http_server.new { socket = s; onstream = error; } end) s:close() c:close() end) it("rejects invalid 'cq' field", function() local s, c = ca.assert(cs.pair()) assert.has.errors(function() http_server.new { socket = s; tls = false; onstream = error; cq = 5; } end) s:close() c:close() end) it("__tostring works", function() local s, c = ca.assert(cs.pair()) s = http_server.new { socket = s; tls = false; onstream = error; } assert.same("http.server{", tostring(s):match("^.-%{")) s:close() c:close() end) it(":onerror with no arguments doesn't clear", function() local s, c = ca.assert(cs.pair()) s = http_server.new { socket = s; tls = false; onstream = error; } local onerror = s:onerror() assert.same("function", type(onerror)) assert.same(onerror, s:onerror()) s:close() c:close() end) local function simple_test(family, tls, client_version, server_version) local cq = cqueues.new() local options = { family = family; tls = tls; version = server_version; } if family == cs.AF_UNIX then local socket_path = os.tmpname() finally(function() os.remove(socket_path) end) options.path = socket_path options.unlink = true else options.host = "localhost" options.port = 0 end local onstream = spy.new(function(s, stream) stream:get_headers() stream:shutdown() s:close() end) options.onstream = onstream local s = assert(http_server.listen(options)) assert(s:listen()) cq:wrap(function() assert_loop(s) end) cq:wrap(function() local client_path local client_family, client_host, client_port = s:localname() if client_family == cs.AF_UNIX then client_path = client_host client_host = nil end local client_options = { family = client_family; host = client_host; port = client_port; path = client_path; tls = tls; ctx = non_verifying_tls_context; version = client_version; } local conn = assert(http_client.connect(client_options)) local stream = conn:new_stream() local headers = http_headers.new() headers:append(":authority", "myauthority") headers:append(":method", "GET") headers:append(":path", "/") headers:append(":scheme", "http") assert(stream:write_headers(headers, true)) stream:get_headers() if server_version then if conn.version == 1.1 then -- 1.1 client might have 1.0 server assert.same(server_version, stream.peer_version) else assert.same(server_version, conn.version) end end conn:close() end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) assert.spy(onstream).was.called() end it("works with plain http 1.1 using IP", function() simple_test(cs.AF_INET, false, 1.1) end) it("works with https 1.1 using IP", function() simple_test(cs.AF_INET, true, 1.1) end) it("works with plain http 2.0 using IP", function() simple_test(cs.AF_INET, false, 2.0) end); (http_tls.has_alpn and it or pending)("works with https 2.0 using IP", function() simple_test(cs.AF_INET, true, 2.0) end) --[[ TLS tests are pending for now as UNIX sockets don't automatically generate a TLS context ]] it("works with plain http 1.1 using UNIX socket", function() simple_test(cs.AF_UNIX, false, 1.1) end) pending("works with https 1.1 using UNIX socket", function() simple_test(cs.AF_UNIX, true, 1.1) end) it("works with plain http 2.0 using UNIX socket", function() simple_test(cs.AF_UNIX, false, 2.0) end); pending("works with https 2.0 using UNIX socket", function() simple_test(cs.AF_UNIX, true, 2.0) end) describe("pin server version", function() it("works when set to http 1.0 without TLS", function() simple_test(cs.AF_INET, false, nil, 1.0) end) it("works when set to http 1.1 without TLS", function() simple_test(cs.AF_INET, false, nil, 1.1) end) it("works when set to http 1.0 with TLS", function() simple_test(cs.AF_INET, true, nil, 1.0) end) it("works when set to http 1.1 with TLS", function() simple_test(cs.AF_INET, true, nil, 1.1) end) -- This test doesn't seem to work on travis pending("works when set to http 2.0 with TLS", function() simple_test(cs.AF_INET, true, nil, 2.0) end) end); (http_tls.has_alpn and it or pending)("works to set server version when alpn proto is not a normal http one", function() local ctx = http_tls.new_client_context() ctx:setAlpnProtos { "foo" } simple_test(cs.AF_INET, ctx, nil, nil) simple_test(cs.AF_INET, ctx, nil, 1.1) simple_test(cs.AF_INET, ctx, 2.0, 2.0) end) it("taking socket from underlying connection is handled well by server", function() local cq = cqueues.new() local onstream = spy.new(function(server, stream) local sock = stream.connection:take_socket() server:close() assert.same("test", sock:read("*a")) sock:close() end); local server = assert(http_server.new { tls = false; onstream = onstream; }) local s, c = ca.assert(cs.pair()) server:add_socket(s) cq:wrap(function() assert_loop(server) end) cq:wrap(function() assert(c:write("test")) assert(c:flush()) c:close() end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) assert.spy(onstream).was.called() end) it("an idle http2 stream doesn't block the server", function() local server = assert(http_server.new { tls = false; version = 2; onstream = function(_, stream) if stream.id == 1 then stream:get_next_chunk() else assert.same(3, stream.id) assert.same({}, {stream:get_next_chunk()}) local headers = http_headers.new() headers:append(":status", "200") assert(stream:write_headers(headers, true)) end end; }) local s, c = ca.assert(cs.pair()) server:add_socket(s) local cq = cqueues.new() cq:wrap(function() assert_loop(server) end) cq:wrap(function() local conn = assert(http_client.negotiate(c, { version = 2; })) local headers = http_headers.new() headers:append(":authority", "myauthority") headers:append(":method", "GET") headers:append(":path", "/") headers:append(":scheme", "http") local stream1 = assert(conn:new_stream()) assert(stream1:write_headers(headers, false)) local stream2 = assert(conn:new_stream()) assert(stream2:write_headers(headers, true)) assert(stream2:get_headers()) conn:close() server:close() end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("times out clients if intra_stream_timeout is exceeded", function() local server = assert(http_server.new { tls = false; onstream = function(_, stream) assert(stream:get_headers()) local headers = http_headers.new() headers:append(":status", "200") assert(stream:write_headers(headers, true)) end; intra_stream_timeout = 0.1; }) local s, c = ca.assert(cs.pair()) server:add_socket(s) local cq = cqueues.new() cq:wrap(function() assert_loop(server) end) cq:wrap(function() local conn = assert(http_client.negotiate(c, { version = 1.1; })) local headers = http_headers.new() headers:append(":method", "GET") headers:append(":scheme", "http") headers:append(":path", "/") headers:append(":authority", "foo") -- Normal request local stream1 = conn:new_stream() assert(stream1:write_headers(headers, true)) assert(stream1:get_headers()) -- Wait for less than intra_stream_timeout: should work as normal cqueues.sleep(0.05) local stream2 = conn:new_stream() assert(stream2:write_headers(headers, true)) assert(stream2:get_headers()) -- Wait for more then intra_stream_timeout: server should have closed connection cqueues.sleep(0.2) local stream3 = conn:new_stream() assert.same(ce.EPIPE, select(3, stream3:write_headers(headers, true))) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("allows pausing+resuming the server", function() local s = assert(http_server.listen { host = "localhost"; port = 0; onstream = function(_, stream) assert(stream:get_headers()) local headers = http_headers.new() headers:append(":status", "200") assert(stream:write_headers(headers, true)) end; }) assert(s:listen()) local client_family, client_host, client_port = s:localname() local client_options = { family = client_family; host = client_host; port = client_port; } local headers = http_headers.new() headers:append(":authority", "myauthority") headers:append(":method", "GET") headers:append(":path", "/") headers:append(":scheme", "http") local cq = cqueues.new() cq:wrap(function() assert_loop(s) end) local function do_req(timeout) local conn = assert(http_client.connect(client_options)) local stream = assert(conn:new_stream()) assert(stream:write_headers(headers, true)) local ok, err, errno = stream:get_headers(timeout) conn:close() return ok, err, errno end cq:wrap(function() s:pause() assert.same(ce.ETIMEDOUT, select(3, do_req(0.1))) s:resume() assert.truthy(do_req()) s:pause() assert.same(ce.ETIMEDOUT, select(3, do_req(0.1))) s:resume() assert.truthy(do_req()) s:close() end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("shouldn't throw an error calling :listen() after :close()", function() local s = assert(http_server.listen { host = "localhost"; port = 0; onstream = function() end; }) s:close() s:listen() end) it("shouldn't throw an error calling :localname() after :close()", function() local s = assert(http_server.listen { host = "localhost"; port = 0; onstream = function() end; }) s:close() s:localname() end) end) lua-http-0.4/spec/socks_spec.lua000066400000000000000000000132021400726324600167060ustar00rootroot00000000000000local TEST_TIMEOUT = 2 describe("http.socks module", function() local http_socks = require "http.socks" local cqueues = require "cqueues" local ca = require "cqueues.auxlib" local ce = require "cqueues.errno" local cs = require "cqueues.socket" it("works with connect constructor", function() assert(http_socks.connect("socks5://127.0.0.1")) assert(http_socks.connect("socks5h://username:password@127.0.0.1")) end) it("fails on unknown protocols", function() assert.has.errors(function() http_socks.connect("socks3://host") end) end) it("fails when userinfo is missing password", function() assert.has.errors(function() http_socks.connect("socks5h://user@host") end) end) it("has a working :clone", function() local socks = http_socks.connect("socks5://127.0.0.1") assert.same(socks, socks:clone()) end) it("has a working :clone when userinfo present", function() local socks = http_socks.connect("socks5://user:pass@127.0.0.1") assert.same(socks, socks:clone()) end) it("can negotiate a IPv4 connection with no auth", function() local s, c = ca.assert(cs.pair()) local cq = cqueues.new() cq:wrap(function() assert(http_socks.fdopen(c):negotiate("127.0.0.1", 123)) end) cq:wrap(function() assert.same("\5", s:read(1)) local n = assert(s:read(1)):byte() local available_auth = assert(s:read(n)) assert.same("\0", available_auth) assert(s:xwrite("\5\0", "n")) assert.same("\5\1\0\1\127\0\0\1\0\123", s:read(10)) assert(s:xwrite("\5\0\0\1\127\0\0\1\12\34", "n")) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) s:close() c:close() end) it("can negotiate a IPv6 connection with username+password auth", function() local s, c = ca.assert(cs.pair()) local cq = cqueues.new() cq:wrap(function() c = http_socks.fdopen(c) assert(c:add_username_password_auth("open", "sesame")) assert(c:negotiate("::1", 123)) c:close() end) cq:wrap(function() assert.same("\5", s:read(1)) local n = assert(s:read(1)):byte() local available_auth = assert(s:read(n)) assert.same("\0\2", available_auth) assert(s:xwrite("\5\2", "n")) assert.same("\1\4open\6sesame", s:read(13)) assert(s:xwrite("\1\0", "n")) assert.same("\5\1\0\4\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1\0\123", s:read(22)) assert(s:xwrite("\5\0\0\4\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1\12\34", "n")) s:close() end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("can negotiate a connection where peername is a domain", function() local s, c = ca.assert(cs.pair()) local cq = cqueues.new() cq:wrap(function() c = http_socks.fdopen(c) assert(c:negotiate("127.0.0.1", 123)) assert.same(cs.AF_UNSPEC, c.dst_family) assert.same("test", c.dst_host) assert.same(1234, c.dst_port) c:close() end) cq:wrap(function() assert.same("\5", s:read(1)) local n = assert(s:read(1)):byte() local available_auth = assert(s:read(n)) assert.same("\0", available_auth) assert(s:xwrite("\5\0", "n")) assert.same("\5\1\0\1\127\0\0\1\0\123", s:read(10)) assert(s:xwrite("\5\0\0\3\4test\4\210", "n")) s:close() end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("fails incorrect username+password with EACCES", function() local s, c = ca.assert(cs.pair()) local cq = cqueues.new() cq:wrap(function() c = http_socks.fdopen(c) assert(c:add_username_password_auth("open", "sesame")) assert.same(ce.EACCES, select(3, c:negotiate("unused", 123))) c:close() end) cq:wrap(function() assert.same("\5", s:read(1)) local n = assert(s:read(1)):byte() local available_auth = assert(s:read(n)) assert.same("\0\2", available_auth) assert(s:xwrite("\5\2", "n")) assert.same("\1\4open\6sesame", s:read(13)) assert(s:xwrite("\1\1", "n")) s:close() end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("fails with correct error messages", function() for i, correct_errno in ipairs({ false; ce.EACCES; ce.ENETUNREACH; ce.EHOSTUNREACH; ce.ECONNREFUSED; ce.ETIMEDOUT; ce.EOPNOTSUPP; ce.EAFNOSUPPORT; }) do local c, s = ca.assert(cs.pair()) local cq = cqueues.new() cq:wrap(function() c = http_socks.fdopen(c) local ok, _, errno = c:negotiate("127.0.0.1", 123) assert.falsy(ok) if correct_errno then assert.same(correct_errno, errno) end c:close() end) cq:wrap(function() assert.same("\5", s:read(1)) local n = assert(s:read(1)):byte() local available_auth = assert(s:read(n)) assert.same("\0", available_auth) assert(s:xwrite("\5\0", "n")) assert.same("\5\1\0\1\127\0\0\1\0\123", s:read(10)) assert(s:xwrite("\5" .. string.char(i), "n")) s:close() end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end end) it("fails with EAFNOSUPPORT on unknown address type", function() local s, c = ca.assert(cs.pair()) local cq = cqueues.new() cq:wrap(function() c = http_socks.fdopen(c) local ok, _, errno = c:negotiate("127.0.0.1", 123) assert.falsy(ok) assert.same(ce.EAFNOSUPPORT, errno) c:close() end) cq:wrap(function() assert.same("\5", s:read(1)) local n = assert(s:read(1)):byte() local available_auth = assert(s:read(n)) assert.same("\0", available_auth) assert(s:xwrite("\5\0", "n")) assert.same("\5\1\0\1\127\0\0\1\0\123", s:read(10)) assert(s:xwrite("\5\0\0\5", "n")) s:close() end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("has a working :take_socket", function() local s, c = ca.assert(cs.pair()) local socks = http_socks.fdopen(c) assert.same(c, socks:take_socket()) assert.same(nil, socks:take_socket()) s:close() c:close() end) end) lua-http-0.4/spec/stream_common_spec.lua000066400000000000000000000141501400726324600204320ustar00rootroot00000000000000describe("http.stream_common", function() local h1_connection = require "http.h1_connection" local new_headers = require "http.headers".new local cqueues = require "cqueues" local ca = require "cqueues.auxlib" local cs = require "cqueues.socket" local function new_pair(version) local s, c = ca.assert(cs.pair()) s = h1_connection.new(s, "server", version) c = h1_connection.new(c, "client", version) return s, c end local function new_request_headers() local headers = new_headers() headers:append(":method", "GET") headers:append(":scheme", "http") headers:append(":authority", "myauthority") headers:append(":path", "/") return headers end it("Can read a number of characters", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() local stream = client:new_stream() assert(stream:write_headers(new_request_headers(), false)) assert(stream:write_chunk("foo", false)) assert(stream:write_chunk("\nb", false)) assert(stream:write_chunk("ar\n", true)) end) cq:wrap(function() local stream = server:get_next_incoming_stream() -- same size as next chunk assert.same("foo", stream:get_body_chars(3)) -- less than chunk assert.same("\n", stream:get_body_chars(1)) -- crossing chunks assert.same("bar", stream:get_body_chars(3)) -- more than available assert.same("\n", stream:get_body_chars(8)) -- when none available assert.same(nil, stream:get_body_chars(8)) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) client:close() server:close() end) it("Can read a line", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() local stream = client:new_stream() assert(stream:write_headers(new_request_headers(), false)) assert(stream:write_chunk("foo", false)) assert(stream:write_chunk("\nb", false)) assert(stream:write_chunk("ar\n", true)) end) cq:wrap(function() local stream = server:get_next_incoming_stream() assert.same("foo", stream:get_body_until("\n", true, false)) assert.same("bar", stream:get_body_until("\n", true, false)) assert.same(nil, stream:get_body_until("\n", true, false)) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) client:close() server:close() end) it("can read into a temporary file", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() local stream = client:new_stream() assert(stream:write_headers(new_request_headers(), false)) assert(stream:write_chunk("hello world!", true)) end) cq:wrap(function() local stream = assert(server:get_next_incoming_stream()) local file = assert(stream:get_body_as_file()) assert.same("hello world!", file:read"*a") end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) client:close() server:close() end) describe("write_body_from_file", function() it("works with a temporary file", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() local file = io.tmpfile() assert(file:write("hello world!")) assert(file:seek("set")) local stream = client:new_stream() assert(stream:write_headers(new_request_headers(), false)) assert(stream:write_body_from_file(file)) end) cq:wrap(function() local stream = assert(server:get_next_incoming_stream()) assert.same("hello world!", assert(stream:get_body_as_string())) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) client:close() server:close() end) it("works using the options form", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() local file = io.tmpfile() assert(file:write("hello world!")) assert(file:seek("set")) local stream = client:new_stream() assert(stream:write_headers(new_request_headers(), false)) assert(stream:write_body_from_file({ file = file; })) end) cq:wrap(function() local stream = assert(server:get_next_incoming_stream()) assert.same("hello world!", assert(stream:get_body_as_string())) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) client:close() server:close() end) it("validates .count option", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() local stream = client:new_stream() assert(stream:write_headers(new_request_headers(), false)) assert.has_error(function() stream:write_body_from_file({ file = io.tmpfile(); count = "invalid count field"; }) end) end) cq:wrap(function() assert(server:get_next_incoming_stream()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) client:close() server:close() end) it("limits number of bytes when using .count option", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() local file = io.tmpfile() assert(file:write("hello world!")) assert(file:seek("set")) local stream = client:new_stream() assert(stream:write_headers(new_request_headers(), false)) assert(stream:write_body_from_file({ file = file; count = 5; })) end) cq:wrap(function() local stream = assert(server:get_next_incoming_stream()) assert.same("hello", assert(stream:get_body_as_string())) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) client:close() server:close() end) it("reports an error on early EOF", function() local server, client = new_pair(1.1) local cq = cqueues.new() cq:wrap(function() local file = io.tmpfile() assert(file:write("hello world!")) assert(file:seek("set")) local stream = client:new_stream() assert(stream:write_headers(new_request_headers(), false)) assert.has_error(function() assert(stream:write_body_from_file({ file = file; count = 50; -- longer than the file })) end) end) cq:wrap(function() assert(server:get_next_incoming_stream()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) client:close() server:close() end) end) end) lua-http-0.4/spec/tls_spec.lua000066400000000000000000000031361400726324600163730ustar00rootroot00000000000000describe("http.tls module", function() local tls = require "http.tls" local cqueues = require "cqueues" local ca = require "cqueues.auxlib" local cs = require "cqueues.socket" local openssl_ctx = require "openssl.ssl.context" local openssl_pkey = require "openssl.pkey" local openssl_x509 = require "openssl.x509" it("banned ciphers list denies a negotiated banned cipher", function() local banned_cipher_list do local t = {} for cipher in pairs(tls.banned_ciphers) do table.insert(t, cipher) end banned_cipher_list = table.concat(t, ":") end local s, c = ca.assert(cs.pair()) local cq = cqueues.new() cq:wrap(function() local ctx = openssl_ctx.new("TLSv1", false) assert(c:starttls(ctx)) local ssl = assert(s:checktls()) local cipher = ssl:getCipherInfo() assert(tls.banned_ciphers[cipher.name]) end) cq:wrap(function() local ctx = openssl_ctx.new("TLSv1", true) ctx:setCipherList(banned_cipher_list) ctx:setEphemeralKey(openssl_pkey.new{ type = "EC", curve = "prime256v1" }) local crt = openssl_x509.new() local key = openssl_pkey.new() crt:setPublicKey(key) crt:sign(key) assert(ctx:setPrivateKey(key)) assert(ctx:setCertificate(crt)) assert(s:starttls(ctx)) local ssl = assert(s:checktls()) local cipher = ssl:getCipherInfo() assert(tls.banned_ciphers[cipher.name]) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) s:close() c:close() end) it("can create a new client context", function() tls.new_client_context() end) it("can create a new server context", function() tls.new_server_context() end) end) lua-http-0.4/spec/util_spec.lua000066400000000000000000000130471400726324600165500ustar00rootroot00000000000000describe("http.util module", function() local unpack = table.unpack or unpack -- luacheck: ignore 113 143 local util = require "http.util" it("decodeURI works", function() assert.same("Encoded string", util.decodeURI("Encoded%20string")) end) it("decodeURI doesn't decode blacklisted characters", function() assert.same("%24", util.decodeURI("%24")) local s = util.encodeURIComponent("#$&+,/:;=?@") assert.same(s, util.decodeURI(s)) end) it("decodeURIComponent round-trips with encodeURIComponent", function() local allchars do local t = {} for i=0, 255 do t[i] = i end allchars = string.char(unpack(t, 0, 255)) end assert.same(allchars, util.decodeURIComponent(util.encodeURIComponent(allchars))) end) it("query_args works", function() do local iter, state, first = util.query_args("foo=bar") assert.same({"foo", "bar"}, {iter(state, first)}) assert.same(nil, iter(state, first)) end do local iter, state, first = util.query_args("foo=bar&baz=qux&foo=somethingelse") assert.same({"foo", "bar"}, {iter(state, first)}) assert.same({"baz", "qux"}, {iter(state, first)}) assert.same({"foo", "somethingelse"}, {iter(state, first)}) assert.same(nil, iter(state, first)) end do local iter, state, first = util.query_args("%3D=%26") assert.same({"=", "&"}, {iter(state, first)}) assert.same(nil, iter(state, first)) end do local iter, state, first = util.query_args("foo=bar&noequals") assert.same({"foo", "bar"}, {iter(state, first)}) assert.same({"noequals", nil}, {iter(state, first)}) assert.same(nil, iter(state, first)) end end) it("dict_to_query works", function() assert.same("foo=bar", util.dict_to_query{foo = "bar"}) assert.same("foo=%CE%BB", util.dict_to_query{foo = "λ"}) do local t = {foo = "bar"; baz = "qux"} local r = {} for k, v in util.query_args(util.dict_to_query(t)) do r[k] = v end assert.same(t, r) end end) it("is_safe_method works", function() assert.same(true, util.is_safe_method "GET") assert.same(true, util.is_safe_method "HEAD") assert.same(true, util.is_safe_method "OPTIONS") assert.same(true, util.is_safe_method "TRACE") assert.same(false, util.is_safe_method "POST") assert.same(false, util.is_safe_method "PUT") end) it("is_ip works", function() assert.same(true, util.is_ip "127.0.0.1") assert.same(true, util.is_ip "192.168.1.1") assert.same(true, util.is_ip "::") assert.same(true, util.is_ip "::1") assert.same(true, util.is_ip "2001:0db8:85a3:0042:1000:8a2e:0370:7334") assert.same(true, util.is_ip "::FFFF:204.152.189.116") assert.same(false, util.is_ip "not an ip") assert.same(false, util.is_ip "0x80") assert.same(false, util.is_ip "::FFFF:0.0.0") end) it("split_authority works", function() assert.same({"example.com", 80}, {util.split_authority("example.com", "http")}) assert.same({"example.com", 8000}, {util.split_authority("example.com:8000", "http")}) assert.falsy(util.split_authority("example.com", "madeupscheme")) -- IPv6 assert.same({"::1", 443}, {util.split_authority("[::1]", "https")}) assert.same({"::1", 8000}, {util.split_authority("[::1]:8000", "https")}) end) it("to_authority works", function() assert.same("example.com", util.to_authority("example.com", 80, "http")) assert.same("example.com:8000", util.to_authority("example.com", 8000, "http")) -- IPv6 assert.same("[::1]", util.to_authority("::1", 443, "https")) assert.same("[::1]:8000", util.to_authority("::1", 8000, "https")) end) it("generates correct looking Date header format", function() assert.same("Fri, 13 Feb 2009 23:31:30 GMT", util.imf_date(1234567890)) end) describe("maybe_quote", function() it("makes acceptable tokens or quoted-string", function() assert.same([[foo]], util.maybe_quote([[foo]])) assert.same([["with \" quote"]], util.maybe_quote([[with " quote]])) end) it("escapes all bytes correctly", function() local http_patts = require "lpeg_patterns.http" local s do -- Make a string containing every byte allowed in a quoted string local t = {"\t"} -- tab for i=32, 126 do t[#t+1] = string.char(i) end for i=128, 255 do t[#t+1] = string.char(i) end s = table.concat(t) end assert.same(s, http_patts.quoted_string:match(util.maybe_quote(s))) end) it("returns nil on invalid input", function() local function check(s) assert.same(nil, util.maybe_quote(s)) end for i=0, 8 do check(string.char(i)) end -- skip tab for i=10, 31 do check(string.char(i)) end check("\127") end) end) describe("yieldable_pcall", function() it("returns multiple return values", function() assert.same({true, 1, 2, 3, 4, nil, nil, nil, nil, nil, nil, "foo"}, {util.yieldable_pcall(function() return 1, 2, 3, 4, nil, nil, nil, nil, nil, nil, "foo" end)}) end) it("protects from errors", function() assert.falsy(util.yieldable_pcall(error)) end) it("returns error objects", function() local err = {"myerror"} local ok, err2 = util.yieldable_pcall(error, err) assert.falsy(ok) assert.equal(err, err2) end) it("works on all levels", function() local f = coroutine.wrap(function() return util.yieldable_pcall(coroutine.yield, true) end) assert.truthy(f()) -- 'true' that was yielded assert.truthy(f()) -- 'true' from the pcall assert.has.errors(f) -- cannot resume dead coroutine end) it("works with __call objects", function() local done = false local o = setmetatable({}, { __call=function() done = true end; }) util.yieldable_pcall(o) assert.truthy(done) end) end) end) lua-http-0.4/spec/websocket_spec.lua000066400000000000000000000330571400726324600175640ustar00rootroot00000000000000describe("http.websocket module's internal functions work", function() local websocket = require "http.websocket" it("build_frame works for simple cases", function() -- Examples from RFC 6455 Section 5.7 -- A single-frame unmasked text message assert.same(string.char(0x81,0x05,0x48,0x65,0x6c,0x6c,0x6f), websocket.build_frame { FIN = true; MASK = false; opcode = 0x1; data = "Hello"; }) -- A single-frame masked text message assert.same(string.char(0x81,0x85,0x37,0xfa,0x21,0x3d,0x7f,0x9f,0x4d,0x51,0x58), websocket.build_frame { FIN = true; MASK = true; key = {0x37,0xfa,0x21,0x3d}; opcode = 0x1; data = "Hello"; }) end) it("build_frame validates opcode", function() assert.has.errors(function() websocket.build_frame { opcode = -1; } end) assert.has.errors(function() websocket.build_frame { opcode = 16; } end) end) it("build_frame validates data length", function() assert.has.errors(function() websocket.build_frame { opcode = 0x8; data = ("f"):rep(200); } end) end) it("build_close works for common case", function() assert.same({ opcode = 0x8; FIN = true; MASK = false; data = "\3\232"; }, websocket.build_close(1000, nil, false)) assert.same({ opcode = 0x8; FIN = true; MASK = false; data = "\3\232error"; }, websocket.build_close(1000, "error", false)) end) it("build_close validates string length", function() assert.has.errors(function() websocket.build_close(1000, ("f"):rep(200), false) end) end) it("build_close can generate frames without a code", function() assert.same({ opcode = 0x8; FIN = true; MASK = false; data = ""; }, websocket.build_close(nil, nil, false)) end) it("parse_close works", function() assert.same({nil, nil}, {websocket.parse_close ""}) assert.same({1000, nil}, {websocket.parse_close "\3\232"}) assert.same({1000, "error"}, {websocket.parse_close "\3\232error"}) end) end) describe("http.websocket", function() local websocket = require "http.websocket" it("__tostring works", function() local ws = websocket.new_from_uri("wss://example.com") assert.same("http.websocket{", tostring(ws):match("^.-%{")) end) it("close on a new websocket doesn't throw an error", function() local ws = websocket.new_from_uri("wss://example.com") ws:close() -- this shouldn't throw end) describe("new_from_stream", function() local ca = require "cqueues.auxlib" local cs = require "cqueues.socket" local ce = require "cqueues.errno" local h1_connection = require "http.h1_connection" local http_headers = require "http.headers" local function new_connection_pair(version) local s, c = ca.assert(cs.pair()) s = h1_connection.new(s, "server", version) c = h1_connection.new(c, "client", version) return s, c end local correct_headers = http_headers.new() correct_headers:append(":method", "GET") correct_headers:append(":scheme", "http") correct_headers:append(":authority", "example.com") correct_headers:append(":path", "/") correct_headers:append("upgrade", "websocket") correct_headers:append("connection", "upgrade") correct_headers:append("sec-websocket-key", "foo", true) correct_headers:append("sec-websocket-version", "13") it("works with correct parameters", function() local s, c = new_connection_pair(1.1) local c_stream = c:new_stream() c_stream:write_headers(correct_headers, false) local s_stream = assert(s:get_next_incoming_stream(TEST_TIMEOUT)) local s_headers = assert(s_stream:get_headers(TEST_TIMEOUT)) local ws = assert(websocket.new_from_stream(s_stream, s_headers)) s:close() ws:close() end) it("rejects client streams", function() local s, c = new_connection_pair(1.1) local c_stream = c:new_stream() assert.has.errors(function() websocket.new_from_stream(c_stream, correct_headers) end) s:close() c:close() end) it("rejects non-1.0 connections", function() local s, c = new_connection_pair(1.0) local c_stream = c:new_stream() c_stream:write_headers(correct_headers, false) local s_stream = assert(s:get_next_incoming_stream(TEST_TIMEOUT)) local s_headers = assert(s_stream:get_headers(TEST_TIMEOUT)) assert.same({nil, "upgrade headers MUST be ignored in HTTP 1.0", ce.EINVAL}, {websocket.new_from_stream(s_stream, s_headers)}) s:close() c:close() end) local function test_invalid_headers(test_name, cb, err) it(test_name, function() local s, c = new_connection_pair(1.1) local c_stream = c:new_stream() local headers = correct_headers:clone() cb(headers) c_stream:write_headers(headers, false) local s_stream = assert(s:get_next_incoming_stream(TEST_TIMEOUT)) local s_headers = assert(s_stream:get_headers(TEST_TIMEOUT)) assert.same({nil, err, ce.EINVAL}, {websocket.new_from_stream(s_stream, s_headers)}) s:close() c:close() end) end test_invalid_headers("rejects missing upgrade header", function(headers) headers:delete("upgrade") end, "upgrade header not websocket") test_invalid_headers("rejects non-websocket upgrade header", function(headers) headers:upsert("upgrade", "notwebsocket") end, "upgrade header not websocket") test_invalid_headers("rejects missing connection header", function(headers) headers:delete("connection") end, "connection header doesn't contain upgrade") test_invalid_headers("rejects upgrade missing from connection header", function(headers) headers:upsert("connection", "other") end, "connection header doesn't contain upgrade") test_invalid_headers("rejects missing Sec-Websocket-Key header", function(headers) headers:delete("sec-websocket-key") end, "missing sec-websocket-key") test_invalid_headers("rejects missing Sec-Websocket-Version header", function(headers) headers:delete("sec-websocket-version") end, "unsupported sec-websocket-version") test_invalid_headers("rejects unknown Sec-Websocket-Version header", function(headers) headers:upsert("sec-websocket-version", "123456") end, "unsupported sec-websocket-version") test_invalid_headers("rejects invalid Sec-Websocket-Protocol header", function(headers) headers:upsert("sec-websocket-protocol", "invalid@protocol") end, "invalid sec-websocket-protocol header") test_invalid_headers("rejects duplicate Sec-Websocket-Protocol", function(headers) headers:upsert("sec-websocket-protocol", "foo, foo") end, "duplicate protocol") end) end) describe("http.websocket module two sided tests", function() local onerror = require "http.connection_common".onerror local server = require "http.server" local util = require "http.util" local websocket = require "http.websocket" local cqueues = require "cqueues" local ca = require "cqueues.auxlib" local ce = require "cqueues.errno" local cs = require "cqueues.socket" local function new_pair() local s, c = ca.assert(cs.pair()) s:onerror(onerror) c:onerror(onerror) local ws_server = websocket.new("server") ws_server.socket = s ws_server.readyState = 1 local ws_client = websocket.new("client") ws_client.socket = c ws_client.readyState = 1 return ws_client, ws_server end it("works with a socketpair", function() local cq = cqueues.new() local c, s = new_pair() cq:wrap(function() assert(c:send("hello")) assert.same("world", c:receive()) assert(c:close()) end) cq:wrap(function() assert.same("hello", s:receive()) assert(s:send("world")) assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("timeouts return nil, err, errno", function() local cq = cqueues.new() local c, s = new_pair() local ok, _, errno = c:receive(0) assert.same(nil, ok) assert.same(ce.ETIMEDOUT, errno) -- Check it still works afterwards cq:wrap(function() assert(c:send("hello")) assert.same("world", c:receive()) assert(c:close()) end) cq:wrap(function() assert.same("hello", s:receive()) assert(s:send("world")) assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("doesn't fail when data contains a \\r\\n", function() local cq = cqueues.new() local c, s = new_pair() cq:wrap(function() assert(c:send("hel\r\nlo")) assert.same("wor\r\nld", c:receive()) assert(c:close()) end) cq:wrap(function() assert.same("hel\r\nlo", s:receive()) assert(s:send("wor\r\nld")) assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) local function send_receive_test(name, data, data_type) it(name, function() data_type = data_type or "text" local cq = cqueues.new() local c, s = new_pair() cq:wrap(function() assert(c:send(data, data_type)) assert.same({data, data_type}, {assert(c:receive())}) assert(c:close()) end) cq:wrap(function() assert.same({data, data_type}, {assert(s:receive())}) assert(s:send(data, data_type)) assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) end send_receive_test("works with small size frames", "f") send_receive_test("works with medium size frames", ("f"):rep(200)) send_receive_test("works with large size frames", ("f"):rep(100000)) send_receive_test("works with binary frames", "\0\1\127\255", "binary") it("fails when text isn't valid utf8", function() local cq = cqueues.new() local c, s = new_pair() cq:wrap(function() assert(c:send("\230", "text")) local ok, _, errno = c:receive() assert.same(nil, ok) assert.same(1007, errno) assert(c:close()) end) cq:wrap(function() local ok, _, errno = s:receive() assert.same(nil, ok) assert.same(1007, errno) assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("fails when text isn't valid utf8 (utf16 surrogates)", function() local cq = cqueues.new() local c, s = new_pair() cq:wrap(function() assert(c:send("\237\160\128", "text")) local ok, _, errno = c:receive() assert.same(nil, ok) assert.same(1007, errno) assert(c:close()) end) cq:wrap(function() local ok, _, errno = s:receive() assert.same(nil, ok) assert.same(1007, errno) assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("doesn't allow invalid utf8 in close messages", function() local cq = cqueues.new() local c, s = new_pair() cq:wrap(function() assert(c:close(1000, "\237\160\128")) end) cq:wrap(function() local ok, _, errno = s:receive() assert.same(nil, ok) assert.same(1007, errno) assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) for _, flag in ipairs{"RSV1", "RSV2", "RSV3"} do it("fails correctly on "..flag.." flag set", function() local cq = cqueues.new() local c, s = new_pair() cq:wrap(function() assert(c:send_frame({ opcode = 1; [flag] = true; })) assert(c:close()) end) cq:wrap(function() local ok, _, errno = s:receive() assert.same(nil, ok) assert.same(1002, errno) assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) end it("doesn't blow up when given pings", function() local cq = cqueues.new() local c, s = new_pair() cq:wrap(function() assert(c:send_ping()) assert(c:send("test")) assert(c:close()) end) cq:wrap(function() assert.same("test", s:receive()) assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("ignores unsolicited pongs", function() local cq = cqueues.new() local c, s = new_pair() cq:wrap(function() assert(c:send_pong()) assert(c:send("test")) assert(c:close()) end) cq:wrap(function() assert.same("test", s:receive()) assert(s:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("works when using uri string constructor", function() local cq = cqueues.new() local s = server.listen { host = "localhost"; port = 0; onstream = function(s, stream) local headers = assert(stream:get_headers()) assert.same("http", headers:get(":scheme")) local ws = websocket.new_from_stream(stream, headers) assert(ws:accept()) assert(ws:close()) s:close() end; } assert(s:listen()) local _, host, port = s:localname() cq:wrap(function() assert_loop(s) end) cq:wrap(function() local ws = websocket.new_from_uri("ws://"..util.to_authority(host, port, "ws")); assert(ws:connect()) assert(ws:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) it("works when using uri table constructor and protocols", function() local new_headers = require "http.headers".new local cq = cqueues.new() local s = server.listen { host = "localhost"; port = 0; onstream = function(s, stream) local headers = assert(stream:get_headers()) local ws = websocket.new_from_stream(stream, headers) local response_headers = new_headers() response_headers:upsert(":status", "101") response_headers:upsert("server", "lua-http websocket test") assert(ws:accept { headers = response_headers; protocols = {"my awesome-protocol", "foo"}; }) -- Should prefer client protocol preference assert.same("foo", ws.protocol) assert(ws:close()) s:close() end; } assert(s:listen()) local _, host, port = s:localname() cq:wrap(function() assert_loop(s) end) cq:wrap(function() local ws = websocket.new_from_uri({ scheme = "ws"; host = host; port = port; }, {"foo", "my-awesome-protocol", "bar"}) assert(ws:connect()) assert.same("foo", ws.protocol) assert.same("lua-http websocket test", ws.headers:get("server")) assert(ws:close()) end) assert_loop(cq, TEST_TIMEOUT) assert.truthy(cq:empty()) end) end) lua-http-0.4/spec/zlib_spec.lua000066400000000000000000000040761400726324600165350ustar00rootroot00000000000000local ok, http_zlib = pcall(require, "http.zlib"); (ok and describe or pending)("zlib compat layer", function() it("round trips", function() local function test(str) local compressor = http_zlib.deflate() local decompressor = http_zlib.inflate() local z = compressor(str, true) assert.same(str, decompressor(z, true)) end test "foo" test "hi" test(("az"):rep(100000)) end) it("streaming round trips", function() local function test(...) local compressor = http_zlib.deflate() local decompressor = http_zlib.inflate() local t = {...} local out = {} for i=1, #t do local z = compressor(t[i], false) out[i] = decompressor(z, false) or "" end out[#t+1] = decompressor(compressor("", true), true) assert.same(table.concat(t), table.concat(out)) end test( "short string", ("foo"):rep(100000), "middle", ("bar"):rep(100000), "end" ) end) it("decompressor errors on invalid input", function() local decompressor = http_zlib.inflate() assert.has.errors(function() decompressor("asdfghjk", false) end) end) it("decompresses over multiple sections", function() -- for whatever reason for certain input streams, zlib will not consume it in one go local decompressor = http_zlib.inflate() decompressor("\31\139\8\0\0\0\0\0\0\3\237\93\235\142\35\199\117\254\61" .. "\122\138\50\39\22\103\34\178\73\206\117\119\110\182\44\217\177" .. "\16\43\82\188\107\27\182\32\44\154\205\34\217\59\205\110\170\47" .. "\195\161\101\1\190\4\200\15\7\206\143\188\72\18\196\129\99\195" .. "\242\43\204\190\66\158\36\223\57\167\170\187\154\108\114\102" .. "\163\93\95\96\105\177\34\217\93\85\231\212\185\87\157\170\179\23" , false) end); -- lzlib doesn't report a missing end of string in inflate (http_zlib.engine == "lzlib" and pending or it)("decompressor fails on incorrect end_stream flag", function() local compressor = http_zlib.deflate() local decompressor = http_zlib.inflate() local z = compressor(("foo"):rep(100000), false) assert(#z > 0) assert.has.errors(function() decompressor(z, true) end) end) end)