pax_global_header 0000666 0000000 0000000 00000000064 13240174456 0014520 g ustar 00root root 0000000 0000000 52 comment=9d69b0ed2689d5a10dbd464a92a23bd6f541fa1b
okio-okio-parent-1.14.0/ 0000775 0000000 0000000 00000000000 13240174456 0014772 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/.buildscript/ 0000775 0000000 0000000 00000000000 13240174456 0017374 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/.buildscript/deploy_snapshot.sh 0000775 0000000 0000000 00000001754 13240174456 0023155 0 ustar 00root root 0000000 0000000 #!/bin/bash
#
# Deploy a jar, source jar, and javadoc jar to Sonatype's snapshot repo.
#
# Adapted from https://coderwall.com/p/9b_lfq and
# http://benlimmer.com/2013/12/26/automatically-publish-javadoc-to-gh-pages-with-travis-ci/
SLUG="square/okio"
JDK="oraclejdk8"
BRANCH="master"
set -e
if [ "$TRAVIS_REPO_SLUG" != "$SLUG" ]; then
echo "Skipping snapshot deployment: wrong repository. Expected '$SLUG' but was '$TRAVIS_REPO_SLUG'."
elif [ "$TRAVIS_JDK_VERSION" != "$JDK" ]; then
echo "Skipping snapshot deployment: wrong JDK. Expected '$JDK' but was '$TRAVIS_JDK_VERSION'."
elif [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
echo "Skipping snapshot deployment: was pull request."
elif [ "$TRAVIS_BRANCH" != "$BRANCH" ]; then
echo "Skipping snapshot deployment: wrong branch. Expected '$BRANCH' but was '$TRAVIS_BRANCH'."
else
echo "Deploying snapshot..."
mvn clean source:jar javadoc:jar deploy --settings=".buildscript/settings.xml" -Dmaven.test.skip=true
echo "Snapshot deployed!"
fi
okio-okio-parent-1.14.0/.buildscript/settings.xml 0000664 0000000 0000000 00000000335 13240174456 0021757 0 ustar 00root root 0000000 0000000 sonatype-nexus-snapshots${env.CI_DEPLOY_USERNAME}${env.CI_DEPLOY_PASSWORD}
okio-okio-parent-1.14.0/.gitignore 0000664 0000000 0000000 00000000226 13240174456 0016762 0 ustar 00root root 0000000 0000000 .classpath
.project
.settings
eclipsebin
bin
gen
build
out
lib
target
pom.xml.*
release.properties
.idea
*.iml
*.ipr
*.iws
classes
obj
.DS_Store
okio-okio-parent-1.14.0/.travis.yml 0000664 0000000 0000000 00000001277 13240174456 0017112 0 ustar 00root root 0000000 0000000 language: java
jdk:
- oraclejdk8
addons:
apt:
packages:
- oracle-java8-installer # Updates JDK 8 to the latest available.
after_success:
- .buildscript/deploy_snapshot.sh
env:
global:
- secure: "gpdzVacMwUxhoHU1Ettfowgx0axV/L12bjoR8O4iKbRskE3Wr8AgM2GXmFMjoMVDr7vy45YhtOatuSlSKkwZRfgNIcAcTv8axjaFFt7xnPozXXPTU+pkIfaw5DnHzCwJlOo29mmY767Hz4CLomJi8znqKl5VguPAqXo/I8BqKwc="
- secure: "YMh3xjsPik4U9DORSFzyTlGMTGc5kXMeuhyu0/757g7fhG4fgwcuCFMm0hBJTUtBu5dV8Ao3EDqZj/hC7muiy7MODHPd3C/Q3LXolZViPVvu6cqaYFN4NGZpZY/fnxF7tz59lODE+OhvomI0RJHS34MdUDscnCAdYWjdkgmH8Y4="
branches:
except:
- gh-pages
notifications:
email: false
sudo: false
cache:
directories:
- $HOME/.m2
okio-okio-parent-1.14.0/BUG-BOUNTY.md 0000664 0000000 0000000 00000000554 13240174456 0016753 0 ustar 00root root 0000000 0000000 Serious about security
======================
Square recognizes the important contributions the security research community
can make. We therefore encourage reporting security issues with the code
contained in this repository.
If you believe you have discovered a security vulnerability, please follow the
guidelines at https://hackerone.com/square-open-source
okio-okio-parent-1.14.0/CHANGELOG.md 0000664 0000000 0000000 00000025356 13240174456 0016616 0 ustar 00root root 0000000 0000000 Change Log
==========
## Version 1.14.0
_2018-02-11_
* New: `Buffer.UnsafeCursor` provides direct access to Okio internals. This API
is like Okio's version of Java reflection: it's a very powerful API that can
be used for great things and dangerous things alike. The documentation is
extensive and anyone using it should review it carefully before proceeding!
* New: Change `BufferedSource` to implement `java.nio.ReadableByteChannel` and
`BufferedSink` to implement `java.nio.WritableByteChannel`. Now it's a little
easier to interop between Okio and NIO.
* New: Automatic module name of `okio` for use with the Java Platform Module
System.
* New: Optimize `Buffer.getByte()` to search backwards when doing so will be
more efficient.
* Fix: Honor the requested byte count in `InflaterSource`. Previously this
class could return more bytes than requested.
* Fix: Improve a performance bug in `AsyncTimeout.sink().write()`.
## Version 1.13.0
_2017-05-12_
* **Okio now uses `@Nullable` to annotate all possibly-null values.** We've
added a compile-time dependency on the JSR 305 annotations. This is a
[provided][maven_provided] dependency and does not need to be included in
your build configuration, `.jar` file, or `.apk`. We use
`@ParametersAreNonnullByDefault` and all parameters and return types are
never null unless explicitly annotated `@Nullable`.
* **Warning: this release is source-incompatible for Kotlin users.**
Nullability was previously ambiguous and lenient but now the compiler will
enforce strict null checks.
## Version 1.12.0
_2017-04-11_
* **Fix: Change Pipe's sink.flush() to not block.** Previously closing a pipe's
sink would block until the source had been exhausted. In practice this
blocked the caller for no benefit.
* **Fix: Change `writeUtf8CodePoint()` to emit `?` for partial surrogates.**
The previous behavior was inconsistent: given a malformed string with a
partial surrogate, `writeUtf8()` emitted `?` but `writeUtf8CodePoint()` threw
an `IllegalArgumentException`. Most applications will never encounter partial
surrogates, but for those that do this behavior was unexpected.
* New: Allow length of `readUtf8LineStrict()` to be limited.
* New: `Utf8.size()` method to get the number of bytes required to encode a
string as UTF-8. This may be useful for length-prefixed encodings.
* New: SHA-512 hash and HMAC APIs.
## Version 1.11.0
_2016-10-11_
* **Fix: The four-argument overload of `Buffer.writeString()` had a major bug
where it didn't respect offsets if the specified charset was UTF-8.** This
was because our short-circuit optimization omitted necessary offset
parameters.
* New: HMAC support in `HashingSource`, `HashingSink`, `ByteString`, and
`Buffer`. This makes it easy to create a keyed-hash message authentication
code (HMAC) wherever your data is. Unlike the other hashes, HMAC uses a
`ByteString` secret key for authentication.
* New: `ByteString.of(ByteBuffer)` makes it easier to mix NIO with Okio.
## Version 1.10.0
_2016-08-28_
* Fix: Support reading files larger than 2 GiB with `GzipSource`. Previously
attempting to decompress such files would fail due to an overflow when
validating the total length.
* Fix: Exit the watchdog thread after being idle for 60 seconds. This should
make it possible for class unloaders to fully unload Okio.
* New: `Okio.blackhole()` returns a sink where all bytes written are discarded.
This is Okio's equivalent of `/dev/null`.
* New: Encode a string with any charset using `ByteString.encodeString()` and
decode strings in any charset using `ByteString.string()`. Most applications
should prefer `ByteString.encodeUtf8()` and `ByteString.utf8()` unless it's
necessary to support a legacy charset.
* New: `GzipSink.deflater()` makes it possible to configure the compression
level.
## Version 1.9.0
_2016-07-01_
* New: `Pipe` makes it easy to connect a producer thread to a consumer thread.
Reads block until data is available to read. Writes block if the pipe's is
full. Both sources and sinks support timeouts.
* New: `BufferedSource.rangeEquals()` makes it easy to compare a range in a
stream to an expected value. This does the right thing: it blocks to load
the data required return a definitive result. But it won't block
unnecessarily.
* New: `Timeout.waitUntilNotified()` makes it possible to use nice timeout
abstractions on Java's built-in wait/notify primitives.
* Fix: Don't return incorrect results when `HashingSource` does large reads.
There was a bug where it wasn't traversing through the segments of the buffer
being hashed. This means that `HashingSource` was returning incorrect answers
for any writes that spanned multiple segment boundaries.
## Version 1.8.0
_2016-05-02_
* New: `BufferedSource.select(Options)` API for reading one of a set of
expected values.
* New: Make `ByteString.toString()` and `Buffer.toString()` friendlier.
These methods return text if the byte string is valid UTF-8.
* New: APIs to match byte strings: `indexOf()`, `startsWith()`, and
`endsWith()`.
## Version 1.7.0
_2016-04-10_
* New: Change the segment size to 8 KiB. This has been reported to dramatically
improve performance in some applications.
* New: `md5()`, `sha1()`, and `sha256()` methods on `Buffer`. Also add a
`sha1()` method on `ByteString` for symmetry.
* New: `HashingSource` and `HashingSink`. These classes are Okio’s equivalent
to the JDK’s `DigestInputStream` and `DigestOutputStream`. They offer
convenient `md5()`, `sha1()`, and `sha256()` factory methods to avoid an
impossible `NoSuchAlgorithmException`.
* New: `ByteString.asByteBuffer()`.
* Fix: Limit snapshot byte strings to requested size.
* Fix: Change write timeouts to have a maximum write size. Previously large
writes could easly suffer timeouts because the entire write was subject to a
single timeout.
* Fix: Recover from EBADF failures, which could be triggered by asynchronously
closing a stream on older versions of Android.
* Fix: Don't share segments if doing so only saves a small copy. This should
improve performance for all applications.
* Fix: Optimize `BufferedSource.indexOfElement()` and `indexOf(ByteString)`.
Previously this method had a bug that caused it to be very slow on large
buffers.
## Version 1.6.0
_2015-08-25_
* New: `BufferedSource.indexOf(ByteString)` searches a source for the next
occurrence of a byte string.
* Fix: Recover from unexpected `AssertionError` thrown on Android 4.2.2 and
earlier when asynchronously closing a socket.
## Version 1.5.0
_2015-06-19_
* Sockets streams now throw `SocketTimeoutException`. This builds on new
extension point in `AsyncTimeout` to customize the exception when a timeout
occurs.
* New: `ByteString` now implements `Comparable`. The comparison sorts bytes as
unsigned: {@code ff} sorts after {@code 00}.
## Version 1.4.0
_2015-05-16_
* **Timeout exception changed.** Previously `Timeout.throwIfReached()` would
throw `InterruptedIOException` on thread interruption, and `IOException` if
the deadline was reached. Now it throws `InterruptedIOException` in both
cases.
* Fix: throw `EOFException` when attempting to read digits from an empty
source. Previously this would crash with an unchecked exception.
* New: APIs to read and write UTF-8 code points without allocating strings.
* New: `BufferedSink` can now write substrings directly, potentially saving an
allocation for some callers.
* New: `ForwardingTimeout` class.
## Version 1.3.0
_2015-03-16_
* New: Read and write signed decimal and unsigned hexadecimal values in
`BufferedSource` and `BufferedSink`. Unlike the alternatives, these methods
don’t do any memory allocations!
* New: Segment sharing. This improves the runtime of operations like
`Buffer.clone()` and `Buffer.copyTo()` by sharing underlying segments between
buffers.
* New: `Buffer.snapshot()` returns an immutable snapshot of a buffer as a
`ByteString`. This builds on segment sharing so that snapshots are shallow,
immutable copies.
* New: `ByteString.rangeEquals()`.
* New: `ByteString.md5()` and `ByteString.sha256()`.
* New: `ByteString.base64Url()` returns URL-safe Base64. The existing
decoding method has been extended to support URL-safe Base64 input.
* New: `ByteString.substring()` returns a prefix, infix, or suffix.
* New: `Sink` now implements `java.io.Flushable`.
* Fix: `Buffer.write(Source, long)` now always writes fully. The previous
behavior would return as soon as any data had been written; this was
inconsistent with all other _write()_ methods in the API.
* Fix: don't leak empty segments in DeflaterSink and InflaterSource. (This was
unlikely to cause problems in practice.)
## Version 1.2.0
_2014-12-30_
* Fix: `Okio.buffer()` _always_ buffers for better predictability.
* Fix: Provide context when `readUtf8LineStrict()` throws.
* Fix: Buffers do not call through the `Source` on zero-byte writes.
## Version 1.1.0
_2014-12-11_
* Do UTF-8 encoding natively for a performance increase, particularly on Android.
* New APIs: `BufferedSink.emit()`, `BufferedSource.request()` and `BufferedSink.indexOfElement()`.
* Fixed a performance bug in `Buffer.indexOf()`
## Version 1.0.1
_2014-08-08_
* Added `read(byte[])`, `read(byte[], offset, byteCount)`, and
`void readFully(byte[])` to `BufferedSource`.
* Refined declared checked exceptions on `Buffer` methods.
## Version 1.0.0
_2014-05-23_
* Bumped release version. No other changes!
## Version 0.9.0
_2014-05-03_
* Use 0 as a sentinel for no timeout.
* Make AsyncTimeout public.
* Remove checked exception from Buffer.readByteArray.
## Version 0.8.0
_2014-04-24_
* Eagerly verify preconditions on public APIs.
* Quick return on Buffer instance equivalence.
* Add delegate types for Sink and Source.
* Small changes to the way deadlines are managed.
* Add append variant of Okio.sink for File.
* Methods to exhaust BufferedSource to byte[] and ByteString.
## Version 0.7.0
_2014-04-18_
* Don't use getters in timeout.
* Use the watchdog to interrupt sockets that have reached deadlines.
* Add java.io and java.nio file source/sink helpers.
## Version 0.6.1
_2014-04-17_
* Methods to read a buffered source fully in UTF-8 or supplied charset.
* API to read a byte[] directly.
* New methods to move all data from a source to a sink.
* Fix a bug on input stream exhaustion.
## Version 0.6.0
_2014-04-15_
* Make ByteString serializable.
* New API: `ByteString.of(byte[] data, int offset, int byteCount)`
* New API: stream-based copy, write, and read helpers.
## Version 0.5.0
_2014-04-08_
* Initial public release.
* Imported from OkHttp.
[maven_provided]: https://maven.apache.org/guides/introduction/introduction-to-dependency-mechanism.html
okio-okio-parent-1.14.0/CONTRIBUTING.md 0000664 0000000 0000000 00000001332 13240174456 0017222 0 ustar 00root root 0000000 0000000 Contributing
============
If you would like to contribute code to Okio you can do so through GitHub by
forking the repository and sending a pull request.
When submitting code, please make every effort to follow existing conventions
and style in order to keep the code as readable as possible. Please also make
sure your code compiles by running `mvn clean verify`. Checkstyle failures
during compilation indicate errors in your style and can be viewed in the
`checkstyle-result.xml` file.
Before your code can be accepted into the project you must also sign the
[Individual Contributor License Agreement (CLA)][1].
[1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1
okio-okio-parent-1.14.0/LICENSE.txt 0000664 0000000 0000000 00000026136 13240174456 0016625 0 ustar 00root root 0000000 0000000
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
okio-okio-parent-1.14.0/README.md 0000664 0000000 0000000 00000013566 13240174456 0016264 0 ustar 00root root 0000000 0000000 Okio
====
Okio is a library that complements `java.io` and `java.nio` to make it much
easier to access, store, and process your data.
ByteStrings and Buffers
-----------------------
Okio is built around two types that pack a lot of capability into a
straightforward API:
* [**ByteString**][3] is an immutable sequence of bytes. For character data, `String`
is fundamental. `ByteString` is String's long-lost brother, making it easy to
treat binary data as a value. This class is ergonomic: it knows how to encode
and decode itself as hex, base64, and UTF-8.
* [**Buffer**][4] is a mutable sequence of bytes. Like `ArrayList`, you don't need
to size your buffer in advance. You read and write buffers as a queue: write
data to the end and read it from the front. There's no obligation to manage
positions, limits, or capacities.
Internally, `ByteString` and `Buffer` do some clever things to save CPU and
memory. If you encode a UTF-8 string as a `ByteString`, it caches a reference to
that string so that if you decode it later, there's no work to do.
`Buffer` is implemented as a linked list of segments. When you move data from
one buffer to another, it _reassigns ownership_ of the segments rather than
copying the data across. This approach is particularly helpful for multithreaded
programs: a thread that talks to the network can exchange data with a worker
thread without any copying or ceremony.
Sources and Sinks
-----------------
An elegant part of the `java.io` design is how streams can be layered for
transformations like encryption and compression. Okio includes its own stream
types called [`Source`][5] and [`Sink`][6] that work like `InputStream` and
`OutputStream`, but with some key differences:
* **Timeouts.** The streams provide access to the timeouts of the underlying
I/O mechanism. Unlike the `java.io` socket streams, both `read()` and
`write()` calls honor timeouts.
* **Easy to implement.** `Source` declares three methods: `read()`, `close()`,
and `timeout()`. There are no hazards like `available()` or single-byte reads
that cause correctness and performance surprises.
* **Easy to use.** Although _implementations_ of `Source` and `Sink` have only
three methods to write, _callers_ are given a rich API with the
[`BufferedSource`][7] and [`BufferedSink`][8] interfaces. These interfaces give you
everything you need in one place.
* **No artificial distinction between byte streams and char streams.** It's all
data. Read and write it as bytes, UTF-8 strings, big-endian 32-bit integers,
little-endian shorts; whatever you want. No more `InputStreamReader`!
* **Easy to test.** The `Buffer` class implements both `BufferedSource` and
`BufferedSink` so your test code is simple and clear.
Sources and sinks interoperate with `InputStream` and `OutputStream`. You can
view any `Source` as an `InputStream`, and you can view any `InputStream` as a
`Source`. Similarly for `Sink` and `OutputStream`.
Dependable
----------
Okio started as a component of [OkHttp][1], the capable HTTP+SPDY client
included in Android. It's well-exercised and ready to solve new problems.
Example: a PNG decoder
----------------------
Decoding the chunks of a PNG file demonstrates Okio in practice.
```java
private static final ByteString PNG_HEADER = ByteString.decodeHex("89504e470d0a1a0a");
public void decodePng(InputStream in) throws IOException {
try (BufferedSource pngSource = Okio.buffer(Okio.source(in))) {
ByteString header = pngSource.readByteString(PNG_HEADER.size());
if (!header.equals(PNG_HEADER)) {
throw new IOException("Not a PNG.");
}
while (true) {
Buffer chunk = new Buffer();
// Each chunk is a length, type, data, and CRC offset.
int length = pngSource.readInt();
String type = pngSource.readUtf8(4);
pngSource.readFully(chunk, length);
int crc = pngSource.readInt();
decodeChunk(type, chunk);
if (type.equals("IEND")) break;
}
}
}
private void decodeChunk(String type, Buffer chunk) {
if (type.equals("IHDR")) {
int width = chunk.readInt();
int height = chunk.readInt();
System.out.printf("%08x: %s %d x %d%n", chunk.size(), type, width, height);
} else {
System.out.printf("%08x: %s%n", chunk.size(), type);
}
}
```
Download
--------
Download [the latest JAR][2] or grab via Maven:
```xml
com.squareup.okiookio1.14.0
```
or Gradle:
```groovy
compile 'com.squareup.okio:okio:1.14.0'
```
Snapshots of the development version are available in [Sonatype's `snapshots` repository][snap].
ProGuard
--------
If you are using ProGuard you might need to add the following option:
```
-dontwarn okio.**
```
License
--------
Copyright 2013 Square, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
[1]: https://github.com/square/okhttp
[2]: https://search.maven.org/remote_content?g=com.squareup.okio&a=okio&v=LATEST
[3]: http://square.github.io/okio/1.x/okio/okio/ByteString.html
[4]: http://square.github.io/okio/1.x/okio/okio/Buffer.html
[5]: http://square.github.io/okio/1.x/okio/okio/Source.html
[6]: http://square.github.io/okio/1.x/okio/okio/Sink.html
[7]: http://square.github.io/okio/1.x/okio/okio/BufferedSource.html
[8]: http://square.github.io/okio/1.x/okio/okio/BufferedSink.html
[snap]: https://oss.sonatype.org/content/repositories/snapshots/
okio-okio-parent-1.14.0/benchmarks/ 0000775 0000000 0000000 00000000000 13240174456 0017107 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/benchmarks/README.md 0000664 0000000 0000000 00000003060 13240174456 0020365 0 ustar 00root root 0000000 0000000 Okio Benchmarks
------------
This module contains microbenchmarks that can be used to measure various aspects of performance for Okio buffers. Okio benchmarks are written using JMH (version 1.4.1 at this time) and require Java 7.
Running Locally
-------------
To run benchmarks locally, first build and package the project modules:
```
$ mvn clean package
```
This should create a `benchmarks.jar` file in the `target` directory, which is a typical JMH benchmark JAR:
```
$ java -jar benchmarks/target/benchmarks.jar -l
Benchmarks:
com.squareup.okio.benchmarks.BufferPerformanceBench.cold
com.squareup.okio.benchmarks.BufferPerformanceBench.threads16hot
com.squareup.okio.benchmarks.BufferPerformanceBench.threads1hot
com.squareup.okio.benchmarks.BufferPerformanceBench.threads2hot
com.squareup.okio.benchmarks.BufferPerformanceBench.threads32hot
com.squareup.okio.benchmarks.BufferPerformanceBench.threads4hot
com.squareup.okio.benchmarks.BufferPerformanceBench.threads8hot
```
More help is available using the `-h` option. A typical run on Mac OS X looks like:
```
$ /usr/libexec/java_home -v 1.7 --exec java -jar benchmarks/target/benchmarks.jar \
"cold" -prof gc,hs_rt,stack -r 60 -t 4 \
-jvmArgsPrepend "-Xms1G -Xmx1G -XX:+HeapDumpOnOutOfMemoryError"
```
This executes the "cold" buffer usage benchmark, using the default number of measurement and warm-up iterations, forks, and threads; it adjusts the thread count to 4, iteration time to 60 seconds, fixes the heap size at 1GB and profiles the benchmark using JMH's GC, Hotspot runtime and stack sampling profilers.
okio-okio-parent-1.14.0/benchmarks/pom.xml 0000664 0000000 0000000 00000005010 13240174456 0020420 0 ustar 00root root 0000000 0000000 okio-parentcom.squareup.okio1.14.04.0.0benchmarksjarOkio Performance Benchmarksorg.openjdk.jmhjmh-coreorg.openjdk.jmhjmh-generator-annprocessprovidedcom.squareup.okiookio${project.version}benchmarksorg.apache.maven.pluginsmaven-compiler-pluginorg.apache.maven.pluginsmaven-shade-plugin2.2packageshade${uberjar.name}org.openjdk.jmh.Main*:*META-INF/*.SFMETA-INF/*.DSAMETA-INF/*.RSA
okio-okio-parent-1.14.0/benchmarks/src/ 0000775 0000000 0000000 00000000000 13240174456 0017676 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/benchmarks/src/main/ 0000775 0000000 0000000 00000000000 13240174456 0020622 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/benchmarks/src/main/java/ 0000775 0000000 0000000 00000000000 13240174456 0021543 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/benchmarks/src/main/java/com/ 0000775 0000000 0000000 00000000000 13240174456 0022321 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/benchmarks/src/main/java/com/squareup/ 0000775 0000000 0000000 00000000000 13240174456 0024166 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/benchmarks/src/main/java/com/squareup/okio/ 0000775 0000000 0000000 00000000000 13240174456 0025127 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/benchmarks/src/main/java/com/squareup/okio/benchmarks/ 0000775 0000000 0000000 00000000000 13240174456 0027244 5 ustar 00root root 0000000 0000000 BufferCursorSeekBenchmark.java 0000664 0000000 0000000 00000005301 13240174456 0035061 0 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/benchmarks/src/main/java/com/squareup/okio/benchmarks /*
* Copyright (C) 2018 Square, Inc. and others.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.squareup.okio.benchmarks;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import okio.Buffer;
import org.openjdk.jmh.Main;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.runner.RunnerException;
@Fork(1)
@Warmup(iterations = 5, time = 2)
@Measurement(iterations = 5, time = 2)
@State(Scope.Benchmark)
@BenchmarkMode(Mode.SampleTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public class BufferCursorSeekBenchmark {
Buffer buffer;
Buffer.UnsafeCursor cursor;
@Param({ "2097152" })
int bufferSize; // 2 MB = 256 Segments
@Setup
public void setup() throws IOException {
byte[] source = new byte[8192];
buffer = new Buffer();
while (buffer.size() < bufferSize) {
buffer.write(source);
}
cursor = new Buffer.UnsafeCursor();
}
@Benchmark
public void seekBeginning() {
buffer.readUnsafe(cursor);
try {
cursor.seek(0);
} finally {
cursor.close();
}
}
@Benchmark
public void seekEnd() {
buffer.readUnsafe(cursor);
try {
cursor.seek(buffer.size() - 1);
} finally {
cursor.close();
}
}
@Benchmark
public void seekForward() {
buffer.readUnsafe(cursor);
try {
cursor.seek(0);
cursor.seek(1);
} finally {
cursor.close();
}
}
@Benchmark
public void seekBackward() {
buffer.readUnsafe(cursor);
try {
cursor.seek(buffer.size() - 1);
cursor.seek(buffer.size() - 2);
} finally {
cursor.close();
}
}
public static void main(String[] args) throws IOException, RunnerException {
Main.main(new String[] {
BufferCursorSeekBenchmark.class.getName()
});
}
}
BufferPerformanceBenchmark.java 0000664 0000000 0000000 00000023543 13240174456 0035245 0 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/benchmarks/src/main/java/com/squareup/okio/benchmarks /*
* Copyright (C) 2014 Square, Inc. and others.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.squareup.okio.benchmarks;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.concurrent.TimeUnit;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Group;
import org.openjdk.jmh.annotations.GroupThreads;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Threads;
import org.openjdk.jmh.annotations.Warmup;
import okio.Buffer;
import okio.BufferedSource;
import okio.Okio;
import okio.Sink;
import okio.Timeout;
import static java.util.Objects.requireNonNull;
@Fork(1)
@Warmup(iterations = 10, time = 10)
@Measurement(iterations = 10, time = 10)
@State(Scope.Benchmark)
@BenchmarkMode(Mode.Throughput)
@OutputTimeUnit(TimeUnit.SECONDS)
public class BufferPerformanceBenchmark {
public static final File OriginPath =
new File(System.getProperty("okio.bench.origin.path", "/dev/urandom"));
/* Test Workload
*
* Each benchmark thread maintains three buffers; a receive buffer, a process buffer
* and a send buffer. At every operation:
*
* - We fill up the receive buffer using the origin, write the request to the process
* buffer, and consume the process buffer.
* - We fill up the process buffer using the origin, write the response to the send
* buffer, and consume the send buffer.
*
* We use an "origin" source that serves as a preexisting sequence of bytes we can read
* from the file system. The request and response bytes are initialized in the beginning
* and reused throughout the benchmark in order to eliminate GC effects.
*
* Typically, we simulate the usage of small reads and large writes. Requests and
* responses are satisfied with precomputed buffers to eliminate GC effects on
* results.
*
* There are two types of benchmark tests; hot tests are "pedal to the metal" and
* use all CPU they can take. These are useful to magnify performance effects of
* changes but are not realistic use cases that should drive optimization efforts.
* Cold tests introduce think time between the receiving of the request and sending
* of the response. They are more useful as a reasonably realistic workload where
* buffers can be read from and written to during request/response handling but
* may hide subtle effects of most changes on performance. Prefer to look at the cold
* benchmarks first to decide if a bottleneck is worth pursuing, then use the hot
* benchmarks to fine tune optimization efforts.
*
* Benchmark threads do not explicitly communicate between each other (except to sync
* iterations as needed by JMH).
*
* We simulate think time for each benchmark thread by parking the thread for a
* configurable number of microseconds (1000 by default).
*/
@Benchmark
@Threads(1)
public void threads1hot(HotBuffers buffers) throws IOException {
readWriteRecycle(buffers);
}
@Benchmark
@Threads(2)
public void threads2hot(HotBuffers buffers) throws IOException {
readWriteRecycle(buffers);
}
@Benchmark
@Threads(4)
public void threads4hot(HotBuffers buffers) throws IOException {
readWriteRecycle(buffers);
}
@Benchmark
@Threads(8)
public void threads8hot(HotBuffers buffers) throws IOException {
readWriteRecycle(buffers);
}
@Benchmark
@Threads(16)
public void threads16hot(HotBuffers buffers) throws IOException {
readWriteRecycle(buffers);
}
@Benchmark
@Threads(32)
public void threads32hot(HotBuffers buffers) throws IOException {
readWriteRecycle(buffers);
}
@Benchmark
@GroupThreads(1)
@Group("cold")
public void thinkReadHot(HotBuffers buffers) throws IOException {
buffers.receive(requestBytes).readAll(NullSink);
}
@Benchmark
@GroupThreads(3)
@Group("cold")
public void thinkWriteCold(ColdBuffers buffers) throws IOException {
buffers.transmit(responseBytes).readAll(NullSink);
}
private void readWriteRecycle(HotBuffers buffers) throws IOException {
buffers.receive(requestBytes).readAll(NullSink);
buffers.transmit(responseBytes).readAll(NullSink);
}
@Param({ "1000" })
int maxThinkMicros = 1000;
@Param({ "1024" })
int maxReadBytes = 1024;
@Param({ "1024" })
int maxWriteBytes = 1024;
@Param({ "2048" })
int requestSize = 2048;
@Param({ "1" })
int responseFactor = 1;
byte[] requestBytes;
byte[] responseBytes;
@Setup(Level.Trial)
public void storeRequestResponseData() throws IOException {
checkOrigin(OriginPath);
requestBytes = storeSourceData(new byte[requestSize]);
responseBytes = storeSourceData(new byte[requestSize * responseFactor]);
}
private byte[] storeSourceData(byte[] dest) throws IOException {
requireNonNull(dest, "dest == null");
try (BufferedSource source = Okio.buffer(Okio.source(OriginPath))) {
source.readFully(dest);
}
return dest;
}
private void checkOrigin(File path) throws IOException {
requireNonNull(path, "path == null");
if (!path.canRead()) {
throw new IllegalArgumentException("can not access: " + path);
}
try (InputStream in = new FileInputStream(path)) {
int available = in.read();
if (available < 0) {
throw new IllegalArgumentException("can not read: " + path);
}
}
}
/*
* The state class hierarchy is larger than it needs to be due to a JMH
* issue where states inheriting setup methods depending on another state
* do not get initialized correctly from benchmark methods making use
* of groups. To work around, we leave the common setup and teardown code
* in superclasses and move the setup method depending on the bench state
* to subclasses. Without the workaround, it would have been enough for
* `ColdBuffers` to inherit from `HotBuffers`.
*/
@State(Scope.Thread)
public static class ColdBuffers extends BufferSetup {
@Setup(Level.Trial)
public void setupBench(BufferPerformanceBenchmark bench) {
super.bench = bench;
}
@Setup(Level.Invocation)
public void lag() throws InterruptedException {
TimeUnit.MICROSECONDS.sleep(bench.maxThinkMicros);
}
}
@State(Scope.Thread)
public static class HotBuffers extends BufferSetup {
@Setup(Level.Trial)
public void setupBench(BufferPerformanceBenchmark bench) {
super.bench = bench;
}
}
@State(Scope.Thread)
public abstract static class BufferSetup extends BufferState {
BufferPerformanceBenchmark bench;
public BufferedSource receive(byte[] bytes) throws IOException {
return super.receive(bytes, bench.maxReadBytes);
}
public BufferedSource transmit(byte[] bytes) throws IOException {
return super.transmit(bytes, bench.maxWriteBytes);
}
@TearDown
public void dispose() throws IOException {
releaseBuffers();
}
}
public static class BufferState {
@SuppressWarnings("resource")
final Buffer received = new Buffer();
@SuppressWarnings("resource")
final Buffer sent = new Buffer();
@SuppressWarnings("resource")
final Buffer process = new Buffer();
public void releaseBuffers() throws IOException {
received.clear();
sent.clear();
process.clear();
}
/**
* Fills up the receive buffer, hands off to process buffer and returns it for consuming.
* Expects receive and process buffers to be empty. Leaves the receive buffer empty and
* process buffer full.
*/
protected Buffer receive(byte[] bytes, int maxChunkSize) throws IOException {
writeChunked(received, bytes, maxChunkSize).readAll(process);
return process;
}
/**
* Fills up the process buffer, hands off to send buffer and returns it for consuming.
* Expects process and sent buffers to be empty. Leaves the process buffer empty and
* sent buffer full.
*/
protected BufferedSource transmit(byte[] bytes, int maxChunkSize) throws IOException {
writeChunked(process, bytes, maxChunkSize).readAll(sent);
return sent;
}
private BufferedSource writeChunked(Buffer buffer, byte[] bytes, final int chunkSize) {
int remaining = bytes.length;
int offset = 0;
while (remaining > 0) {
int bytesToWrite = Math.min(remaining, chunkSize);
buffer.write(bytes, offset, bytesToWrite);
remaining -= bytesToWrite;
offset += bytesToWrite;
}
return buffer;
}
}
@SuppressWarnings("resource")
private static final Sink NullSink = new Sink() {
@Override public void write(Buffer source, long byteCount) throws EOFException {
source.skip(byteCount);
}
@Override public void flush() {
// nothing
}
@Override public Timeout timeout() {
return Timeout.NONE;
}
@Override public void close() {
// nothing
}
@Override public String toString() {
return "NullSink{}";
}
};
}
okio-okio-parent-1.14.0/benchmarks/src/main/java/com/squareup/okio/benchmarks/GetByteBenchmark.java 0000664 0000000 0000000 00000004201 13240174456 0033262 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2018 Square, Inc. and others.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.squareup.okio.benchmarks;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import okio.Buffer;
import org.openjdk.jmh.Main;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.runner.RunnerException;
@Fork(1)
@Warmup(iterations = 5, time = 2)
@Measurement(iterations = 5, time = 2)
@State(Scope.Benchmark)
@BenchmarkMode(Mode.SampleTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public class GetByteBenchmark {
Buffer buffer;
@Param({ "2097152" })
int bufferSize; // 2 MB = 256 Segments
@Setup
public void setup() throws IOException {
buffer = new Buffer();
while (buffer.size() < bufferSize) {
buffer.write(new byte[8192]);
}
}
@Benchmark
public void getByteBeginning() {
buffer.getByte(0);
}
@Benchmark
public void getByteEnd() {
buffer.getByte(buffer.size() - 1);
}
@Benchmark
public void getByteMiddle() {
buffer.getByte(buffer.size() / 2);
}
public static void main(String[] args) throws IOException, RunnerException {
Main.main(new String[] {
GetByteBenchmark.class.getName()
});
}
}
IndexOfElementBenchmark.java 0000664 0000000 0000000 00000004501 13240174456 0034511 0 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/benchmarks/src/main/java/com/squareup/okio/benchmarks /*
* Copyright (C) 2016 Square, Inc. and others.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.squareup.okio.benchmarks;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import okio.Buffer;
import okio.ByteString;
import org.openjdk.jmh.Main;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.runner.RunnerException;
@Fork(1)
@Warmup(iterations = 5, time = 2)
@Measurement(iterations = 5, time = 2)
@State(Scope.Benchmark)
@BenchmarkMode(Mode.SampleTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public class IndexOfElementBenchmark {
ByteString byteString = ByteString.encodeUtf8("abcd");
Buffer buffer;
@Param({ "32768" })
int bufferSize;
@Setup
public void setup() throws IOException {
buffer = new Buffer()
.write(new byte[bufferSize / 2])
.write(byteString)
.write(new byte[(bufferSize / 2) - byteString.size()]);
}
@Benchmark
public void indexOfByte() throws IOException {
buffer.indexOf((byte) 'b', 0L);
}
@Benchmark
public void indexOfByteString() throws IOException {
buffer.indexOf(byteString, 0L);
}
@Benchmark
public void indexOfElement() throws IOException {
buffer.indexOfElement(byteString, 0L);
}
public static void main(String[] args) throws IOException, RunnerException {
Main.main(new String[] {
IndexOfElementBenchmark.class.getName()
});
}
}
okio-okio-parent-1.14.0/checkstyle.xml 0000664 0000000 0000000 00000013107 13240174456 0017654 0 ustar 00root root 0000000 0000000
okio-okio-parent-1.14.0/deploy_javadoc.sh 0000775 0000000 0000000 00000001416 13240174456 0020316 0 ustar 00root root 0000000 0000000 #!/bin/bash
set -ex
REPO="git@github.com:square/okio.git"
GROUP_ID="com.squareup.okio"
ARTIFACT_ID="okio"
DIR=temp-clone
# Delete any existing temporary website clone
rm -rf $DIR
# Clone the current repo into temp folder
git clone $REPO $DIR
# Move working directory into temp folder
cd $DIR
# Checkout and track the gh-pages branch
git checkout -t origin/gh-pages
# Delete everything
rm -rf *
# Download the latest javadoc
curl -L "https://search.maven.org/remote_content?g=$GROUP_ID&a=$ARTIFACT_ID&v=LATEST&c=javadoc" > javadoc.zip
unzip javadoc.zip
rm javadoc.zip
# Stage all files in git and create a commit
git add .
git add -u
git commit -m "Website at $(date)"
# Push the new files up to GitHub
git push origin gh-pages
# Delete our temp folder
cd ..
rm -rf $DIR
okio-okio-parent-1.14.0/okio/ 0000775 0000000 0000000 00000000000 13240174456 0015733 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/okio/pom.xml 0000664 0000000 0000000 00000003676 13240174456 0017264 0 ustar 00root root 0000000 0000000
4.0.0com.squareup.okiookio-parent1.14.0okioOkiocom.google.code.findbugsjsr305providedorg.codehaus.mojoanimal-sniffer-annotationstruejunitjunittestorg.apache.maven.pluginsmaven-jar-pluginokioorg.codehaus.mojoanimal-sniffer-maven-plugin${animal.sniffer.version}testcheckorg.codehaus.mojo.signaturejava161.1
okio-okio-parent-1.14.0/okio/src/ 0000775 0000000 0000000 00000000000 13240174456 0016522 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/okio/src/main/ 0000775 0000000 0000000 00000000000 13240174456 0017446 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/okio/src/main/java/ 0000775 0000000 0000000 00000000000 13240174456 0020367 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/okio/src/main/java/okio/ 0000775 0000000 0000000 00000000000 13240174456 0021330 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/okio/src/main/java/okio/AsyncTimeout.java 0000664 0000000 0000000 00000031066 13240174456 0024625 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nullable;
import static okio.Util.checkOffsetAndCount;
/**
* This timeout uses a background thread to take action exactly when the timeout occurs. Use this to
* implement timeouts where they aren't supported natively, such as to sockets that are blocked on
* writing.
*
*
Subclasses should override {@link #timedOut} to take action when a timeout occurs. This method
* will be invoked by the shared watchdog thread so it should not do any long-running operations.
* Otherwise we risk starving other timeouts from being triggered.
*
*
Use {@link #sink} and {@link #source} to apply this timeout to a stream. The returned value
* will apply the timeout to each operation on the wrapped stream.
*
*
Callers should call {@link #enter} before doing work that is subject to timeouts, and {@link
* #exit} afterwards. The return value of {@link #exit} indicates whether a timeout was triggered.
* Note that the call to {@link #timedOut} is asynchronous, and may be called after {@link #exit}.
*/
public class AsyncTimeout extends Timeout {
/**
* Don't write more than 64 KiB of data at a time, give or take a segment. Otherwise slow
* connections may suffer timeouts even when they're making (slow) progress. Without this, writing
* a single 1 MiB buffer may never succeed on a sufficiently slow connection.
*/
private static final int TIMEOUT_WRITE_SIZE = 64 * 1024;
/** Duration for the watchdog thread to be idle before it shuts itself down. */
private static final long IDLE_TIMEOUT_MILLIS = TimeUnit.SECONDS.toMillis(60);
private static final long IDLE_TIMEOUT_NANOS = TimeUnit.MILLISECONDS.toNanos(IDLE_TIMEOUT_MILLIS);
/**
* The watchdog thread processes a linked list of pending timeouts, sorted in the order to be
* triggered. This class synchronizes on AsyncTimeout.class. This lock guards the queue.
*
*
Head's 'next' points to the first element of the linked list. The first element is the next
* node to time out, or null if the queue is empty. The head is null until the watchdog thread is
* started and also after being idle for {@link #IDLE_TIMEOUT_MILLIS}.
*/
static @Nullable AsyncTimeout head;
/** True if this node is currently in the queue. */
private boolean inQueue;
/** The next node in the linked list. */
private @Nullable AsyncTimeout next;
/** If scheduled, this is the time that the watchdog should time this out. */
private long timeoutAt;
public final void enter() {
if (inQueue) throw new IllegalStateException("Unbalanced enter/exit");
long timeoutNanos = timeoutNanos();
boolean hasDeadline = hasDeadline();
if (timeoutNanos == 0 && !hasDeadline) {
return; // No timeout and no deadline? Don't bother with the queue.
}
inQueue = true;
scheduleTimeout(this, timeoutNanos, hasDeadline);
}
private static synchronized void scheduleTimeout(
AsyncTimeout node, long timeoutNanos, boolean hasDeadline) {
// Start the watchdog thread and create the head node when the first timeout is scheduled.
if (head == null) {
head = new AsyncTimeout();
new Watchdog().start();
}
long now = System.nanoTime();
if (timeoutNanos != 0 && hasDeadline) {
// Compute the earliest event; either timeout or deadline. Because nanoTime can wrap around,
// Math.min() is undefined for absolute values, but meaningful for relative ones.
node.timeoutAt = now + Math.min(timeoutNanos, node.deadlineNanoTime() - now);
} else if (timeoutNanos != 0) {
node.timeoutAt = now + timeoutNanos;
} else if (hasDeadline) {
node.timeoutAt = node.deadlineNanoTime();
} else {
throw new AssertionError();
}
// Insert the node in sorted order.
long remainingNanos = node.remainingNanos(now);
for (AsyncTimeout prev = head; true; prev = prev.next) {
if (prev.next == null || remainingNanos < prev.next.remainingNanos(now)) {
node.next = prev.next;
prev.next = node;
if (prev == head) {
AsyncTimeout.class.notify(); // Wake up the watchdog when inserting at the front.
}
break;
}
}
}
/** Returns true if the timeout occurred. */
public final boolean exit() {
if (!inQueue) return false;
inQueue = false;
return cancelScheduledTimeout(this);
}
/** Returns true if the timeout occurred. */
private static synchronized boolean cancelScheduledTimeout(AsyncTimeout node) {
// Remove the node from the linked list.
for (AsyncTimeout prev = head; prev != null; prev = prev.next) {
if (prev.next == node) {
prev.next = node.next;
node.next = null;
return false;
}
}
// The node wasn't found in the linked list: it must have timed out!
return true;
}
/**
* Returns the amount of time left until the time out. This will be negative if the timeout has
* elapsed and the timeout should occur immediately.
*/
private long remainingNanos(long now) {
return timeoutAt - now;
}
/**
* Invoked by the watchdog thread when the time between calls to {@link #enter()} and {@link
* #exit()} has exceeded the timeout.
*/
protected void timedOut() {
}
/**
* Returns a new sink that delegates to {@code sink}, using this to implement timeouts. This works
* best if {@link #timedOut} is overridden to interrupt {@code sink}'s current operation.
*/
public final Sink sink(final Sink sink) {
return new Sink() {
@Override public void write(Buffer source, long byteCount) throws IOException {
checkOffsetAndCount(source.size, 0, byteCount);
while (byteCount > 0L) {
// Count how many bytes to write. This loop guarantees we split on a segment boundary.
long toWrite = 0L;
for (Segment s = source.head; toWrite < TIMEOUT_WRITE_SIZE; s = s.next) {
int segmentSize = s.limit - s.pos;
toWrite += segmentSize;
if (toWrite >= byteCount) {
toWrite = byteCount;
break;
}
}
// Emit one write. Only this section is subject to the timeout.
boolean throwOnTimeout = false;
enter();
try {
sink.write(source, toWrite);
byteCount -= toWrite;
throwOnTimeout = true;
} catch (IOException e) {
throw exit(e);
} finally {
exit(throwOnTimeout);
}
}
}
@Override public void flush() throws IOException {
boolean throwOnTimeout = false;
enter();
try {
sink.flush();
throwOnTimeout = true;
} catch (IOException e) {
throw exit(e);
} finally {
exit(throwOnTimeout);
}
}
@Override public void close() throws IOException {
boolean throwOnTimeout = false;
enter();
try {
sink.close();
throwOnTimeout = true;
} catch (IOException e) {
throw exit(e);
} finally {
exit(throwOnTimeout);
}
}
@Override public Timeout timeout() {
return AsyncTimeout.this;
}
@Override public String toString() {
return "AsyncTimeout.sink(" + sink + ")";
}
};
}
/**
* Returns a new source that delegates to {@code source}, using this to implement timeouts. This
* works best if {@link #timedOut} is overridden to interrupt {@code sink}'s current operation.
*/
public final Source source(final Source source) {
return new Source() {
@Override public long read(Buffer sink, long byteCount) throws IOException {
boolean throwOnTimeout = false;
enter();
try {
long result = source.read(sink, byteCount);
throwOnTimeout = true;
return result;
} catch (IOException e) {
throw exit(e);
} finally {
exit(throwOnTimeout);
}
}
@Override public void close() throws IOException {
boolean throwOnTimeout = false;
try {
source.close();
throwOnTimeout = true;
} catch (IOException e) {
throw exit(e);
} finally {
exit(throwOnTimeout);
}
}
@Override public Timeout timeout() {
return AsyncTimeout.this;
}
@Override public String toString() {
return "AsyncTimeout.source(" + source + ")";
}
};
}
/**
* Throws an IOException if {@code throwOnTimeout} is {@code true} and a timeout occurred. See
* {@link #newTimeoutException(java.io.IOException)} for the type of exception thrown.
*/
final void exit(boolean throwOnTimeout) throws IOException {
boolean timedOut = exit();
if (timedOut && throwOnTimeout) throw newTimeoutException(null);
}
/**
* Returns either {@code cause} or an IOException that's caused by {@code cause} if a timeout
* occurred. See {@link #newTimeoutException(java.io.IOException)} for the type of exception
* returned.
*/
final IOException exit(IOException cause) throws IOException {
if (!exit()) return cause;
return newTimeoutException(cause);
}
/**
* Returns an {@link IOException} to represent a timeout. By default this method returns {@link
* java.io.InterruptedIOException}. If {@code cause} is non-null it is set as the cause of the
* returned exception.
*/
protected IOException newTimeoutException(@Nullable IOException cause) {
InterruptedIOException e = new InterruptedIOException("timeout");
if (cause != null) {
e.initCause(cause);
}
return e;
}
private static final class Watchdog extends Thread {
Watchdog() {
super("Okio Watchdog");
setDaemon(true);
}
public void run() {
while (true) {
try {
AsyncTimeout timedOut;
synchronized (AsyncTimeout.class) {
timedOut = awaitTimeout();
// Didn't find a node to interrupt. Try again.
if (timedOut == null) continue;
// The queue is completely empty. Let this thread exit and let another watchdog thread
// get created on the next call to scheduleTimeout().
if (timedOut == head) {
head = null;
return;
}
}
// Close the timed out node.
timedOut.timedOut();
} catch (InterruptedException ignored) {
}
}
}
}
/**
* Removes and returns the node at the head of the list, waiting for it to time out if necessary.
* This returns {@link #head} if there was no node at the head of the list when starting, and
* there continues to be no node after waiting {@code IDLE_TIMEOUT_NANOS}. It returns null if a
* new node was inserted while waiting. Otherwise this returns the node being waited on that has
* been removed.
*/
static @Nullable AsyncTimeout awaitTimeout() throws InterruptedException {
// Get the next eligible node.
AsyncTimeout node = head.next;
// The queue is empty. Wait until either something is enqueued or the idle timeout elapses.
if (node == null) {
long startNanos = System.nanoTime();
AsyncTimeout.class.wait(IDLE_TIMEOUT_MILLIS);
return head.next == null && (System.nanoTime() - startNanos) >= IDLE_TIMEOUT_NANOS
? head // The idle timeout elapsed.
: null; // The situation has changed.
}
long waitNanos = node.remainingNanos(System.nanoTime());
// The head of the queue hasn't timed out yet. Await that.
if (waitNanos > 0) {
// Waiting is made complicated by the fact that we work in nanoseconds,
// but the API wants (millis, nanos) in two arguments.
long waitMillis = waitNanos / 1000000L;
waitNanos -= (waitMillis * 1000000L);
AsyncTimeout.class.wait(waitMillis, (int) waitNanos);
return null;
}
// The head of the queue has timed out. Remove it.
head.next = node.next;
node.next = null;
return node;
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/Base64.java 0000664 0000000 0000000 00000012705 13240174456 0023224 0 ustar 00root root 0000000 0000000 /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @author Alexander Y. Kleymenov
*/
package okio;
import java.io.UnsupportedEncodingException;
final class Base64 {
private Base64() {
}
public static byte[] decode(String in) {
// Ignore trailing '=' padding and whitespace from the input.
int limit = in.length();
for (; limit > 0; limit--) {
char c = in.charAt(limit - 1);
if (c != '=' && c != '\n' && c != '\r' && c != ' ' && c != '\t') {
break;
}
}
// If the input includes whitespace, this output array will be longer than necessary.
byte[] out = new byte[(int) (limit * 6L / 8L)];
int outCount = 0;
int inCount = 0;
int word = 0;
for (int pos = 0; pos < limit; pos++) {
char c = in.charAt(pos);
int bits;
if (c >= 'A' && c <= 'Z') {
// char ASCII value
// A 65 0
// Z 90 25 (ASCII - 65)
bits = c - 65;
} else if (c >= 'a' && c <= 'z') {
// char ASCII value
// a 97 26
// z 122 51 (ASCII - 71)
bits = c - 71;
} else if (c >= '0' && c <= '9') {
// char ASCII value
// 0 48 52
// 9 57 61 (ASCII + 4)
bits = c + 4;
} else if (c == '+' || c == '-') {
bits = 62;
} else if (c == '/' || c == '_') {
bits = 63;
} else if (c == '\n' || c == '\r' || c == ' ' || c == '\t') {
continue;
} else {
return null;
}
// Append this char's 6 bits to the word.
word = (word << 6) | (byte) bits;
// For every 4 chars of input, we accumulate 24 bits of output. Emit 3 bytes.
inCount++;
if (inCount % 4 == 0) {
out[outCount++] = (byte) (word >> 16);
out[outCount++] = (byte) (word >> 8);
out[outCount++] = (byte) word;
}
}
int lastWordChars = inCount % 4;
if (lastWordChars == 1) {
// We read 1 char followed by "===". But 6 bits is a truncated byte! Fail.
return null;
} else if (lastWordChars == 2) {
// We read 2 chars followed by "==". Emit 1 byte with 8 of those 12 bits.
word = word << 12;
out[outCount++] = (byte) (word >> 16);
} else if (lastWordChars == 3) {
// We read 3 chars, followed by "=". Emit 2 bytes for 16 of those 18 bits.
word = word << 6;
out[outCount++] = (byte) (word >> 16);
out[outCount++] = (byte) (word >> 8);
}
// If we sized our out array perfectly, we're done.
if (outCount == out.length) return out;
// Copy the decoded bytes to a new, right-sized array.
byte[] prefix = new byte[outCount];
System.arraycopy(out, 0, prefix, 0, outCount);
return prefix;
}
private static final byte[] MAP = new byte[] {
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S',
'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4',
'5', '6', '7', '8', '9', '+', '/'
};
private static final byte[] URL_MAP = new byte[] {
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S',
'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4',
'5', '6', '7', '8', '9', '-', '_'
};
public static String encode(byte[] in) {
return encode(in, MAP);
}
public static String encodeUrl(byte[] in) {
return encode(in, URL_MAP);
}
private static String encode(byte[] in, byte[] map) {
int length = (in.length + 2) / 3 * 4;
byte[] out = new byte[length];
int index = 0, end = in.length - in.length % 3;
for (int i = 0; i < end; i += 3) {
out[index++] = map[(in[i] & 0xff) >> 2];
out[index++] = map[((in[i] & 0x03) << 4) | ((in[i + 1] & 0xff) >> 4)];
out[index++] = map[((in[i + 1] & 0x0f) << 2) | ((in[i + 2] & 0xff) >> 6)];
out[index++] = map[(in[i + 2] & 0x3f)];
}
switch (in.length % 3) {
case 1:
out[index++] = map[(in[end] & 0xff) >> 2];
out[index++] = map[(in[end] & 0x03) << 4];
out[index++] = '=';
out[index++] = '=';
break;
case 2:
out[index++] = map[(in[end] & 0xff) >> 2];
out[index++] = map[((in[end] & 0x03) << 4) | ((in[end + 1] & 0xff) >> 4)];
out[index++] = map[((in[end + 1] & 0x0f) << 2)];
out[index++] = '=';
break;
}
try {
return new String(out, "US-ASCII");
} catch (UnsupportedEncodingException e) {
throw new AssertionError(e);
}
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/Buffer.java 0000664 0000000 0000000 00000217234 13240174456 0023415 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.Closeable;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.channels.ByteChannel;
import java.nio.charset.Charset;
import java.security.InvalidKeyException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import javax.annotation.Nullable;
import javax.crypto.Mac;
import javax.crypto.spec.SecretKeySpec;
import static okio.Util.checkOffsetAndCount;
import static okio.Util.reverseBytesLong;
/**
* A collection of bytes in memory.
*
*
Moving data from one buffer to another is fast. Instead
* of copying bytes from one place in memory to another, this class just changes
* ownership of the underlying byte arrays.
*
*
This buffer grows with your data. Just like ArrayList,
* each buffer starts small. It consumes only the memory it needs to.
*
*
This buffer pools its byte arrays. When you allocate a
* byte array in Java, the runtime must zero-fill the requested array before
* returning it to you. Even if you're going to write over that space anyway.
* This class avoids zero-fill and GC churn by pooling byte arrays.
*/
public final class Buffer implements BufferedSource, BufferedSink, Cloneable, ByteChannel {
private static final byte[] DIGITS =
{ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
static final int REPLACEMENT_CHARACTER = '\ufffd';
@Nullable Segment head;
long size;
public Buffer() {
}
/** Returns the number of bytes currently in this buffer. */
public long size() {
return size;
}
@Override public Buffer buffer() {
return this;
}
@Override public OutputStream outputStream() {
return new OutputStream() {
@Override public void write(int b) {
writeByte((byte) b);
}
@Override public void write(byte[] data, int offset, int byteCount) {
Buffer.this.write(data, offset, byteCount);
}
@Override public void flush() {
}
@Override public void close() {
}
@Override public String toString() {
return Buffer.this + ".outputStream()";
}
};
}
@Override public Buffer emitCompleteSegments() {
return this; // Nowhere to emit to!
}
@Override public BufferedSink emit() {
return this; // Nowhere to emit to!
}
@Override public boolean exhausted() {
return size == 0;
}
@Override public void require(long byteCount) throws EOFException {
if (size < byteCount) throw new EOFException();
}
@Override public boolean request(long byteCount) {
return size >= byteCount;
}
@Override public InputStream inputStream() {
return new InputStream() {
@Override public int read() {
if (size > 0) return readByte() & 0xff;
return -1;
}
@Override public int read(byte[] sink, int offset, int byteCount) {
return Buffer.this.read(sink, offset, byteCount);
}
@Override public int available() {
return (int) Math.min(size, Integer.MAX_VALUE);
}
@Override public void close() {
}
@Override public String toString() {
return Buffer.this + ".inputStream()";
}
};
}
/** Copy the contents of this to {@code out}. */
public Buffer copyTo(OutputStream out) throws IOException {
return copyTo(out, 0, size);
}
/**
* Copy {@code byteCount} bytes from this, starting at {@code offset}, to
* {@code out}.
*/
public Buffer copyTo(OutputStream out, long offset, long byteCount) throws IOException {
if (out == null) throw new IllegalArgumentException("out == null");
checkOffsetAndCount(size, offset, byteCount);
if (byteCount == 0) return this;
// Skip segments that we aren't copying from.
Segment s = head;
for (; offset >= (s.limit - s.pos); s = s.next) {
offset -= (s.limit - s.pos);
}
// Copy from one segment at a time.
for (; byteCount > 0; s = s.next) {
int pos = (int) (s.pos + offset);
int toCopy = (int) Math.min(s.limit - pos, byteCount);
out.write(s.data, pos, toCopy);
byteCount -= toCopy;
offset = 0;
}
return this;
}
/** Copy {@code byteCount} bytes from this, starting at {@code offset}, to {@code out}. */
public Buffer copyTo(Buffer out, long offset, long byteCount) {
if (out == null) throw new IllegalArgumentException("out == null");
checkOffsetAndCount(size, offset, byteCount);
if (byteCount == 0) return this;
out.size += byteCount;
// Skip segments that we aren't copying from.
Segment s = head;
for (; offset >= (s.limit - s.pos); s = s.next) {
offset -= (s.limit - s.pos);
}
// Copy one segment at a time.
for (; byteCount > 0; s = s.next) {
Segment copy = s.sharedCopy();
copy.pos += offset;
copy.limit = Math.min(copy.pos + (int) byteCount, copy.limit);
if (out.head == null) {
out.head = copy.next = copy.prev = copy;
} else {
out.head.prev.push(copy);
}
byteCount -= copy.limit - copy.pos;
offset = 0;
}
return this;
}
/** Write the contents of this to {@code out}. */
public Buffer writeTo(OutputStream out) throws IOException {
return writeTo(out, size);
}
/** Write {@code byteCount} bytes from this to {@code out}. */
public Buffer writeTo(OutputStream out, long byteCount) throws IOException {
if (out == null) throw new IllegalArgumentException("out == null");
checkOffsetAndCount(size, 0, byteCount);
Segment s = head;
while (byteCount > 0) {
int toCopy = (int) Math.min(byteCount, s.limit - s.pos);
out.write(s.data, s.pos, toCopy);
s.pos += toCopy;
size -= toCopy;
byteCount -= toCopy;
if (s.pos == s.limit) {
Segment toRecycle = s;
head = s = toRecycle.pop();
SegmentPool.recycle(toRecycle);
}
}
return this;
}
/** Read and exhaust bytes from {@code in} to this. */
public Buffer readFrom(InputStream in) throws IOException {
readFrom(in, Long.MAX_VALUE, true);
return this;
}
/** Read {@code byteCount} bytes from {@code in} to this. */
public Buffer readFrom(InputStream in, long byteCount) throws IOException {
if (byteCount < 0) throw new IllegalArgumentException("byteCount < 0: " + byteCount);
readFrom(in, byteCount, false);
return this;
}
private void readFrom(InputStream in, long byteCount, boolean forever) throws IOException {
if (in == null) throw new IllegalArgumentException("in == null");
while (byteCount > 0 || forever) {
Segment tail = writableSegment(1);
int maxToCopy = (int) Math.min(byteCount, Segment.SIZE - tail.limit);
int bytesRead = in.read(tail.data, tail.limit, maxToCopy);
if (bytesRead == -1) {
if (forever) return;
throw new EOFException();
}
tail.limit += bytesRead;
size += bytesRead;
byteCount -= bytesRead;
}
}
/**
* Returns the number of bytes in segments that are not writable. This is the
* number of bytes that can be flushed immediately to an underlying sink
* without harming throughput.
*/
public long completeSegmentByteCount() {
long result = size;
if (result == 0) return 0;
// Omit the tail if it's still writable.
Segment tail = head.prev;
if (tail.limit < Segment.SIZE && tail.owner) {
result -= tail.limit - tail.pos;
}
return result;
}
@Override public byte readByte() {
if (size == 0) throw new IllegalStateException("size == 0");
Segment segment = head;
int pos = segment.pos;
int limit = segment.limit;
byte[] data = segment.data;
byte b = data[pos++];
size -= 1;
if (pos == limit) {
head = segment.pop();
SegmentPool.recycle(segment);
} else {
segment.pos = pos;
}
return b;
}
/** Returns the byte at {@code pos}. */
public byte getByte(long pos) {
checkOffsetAndCount(size, pos, 1);
if (size - pos > pos) {
for (Segment s = head; true; s = s.next) {
int segmentByteCount = s.limit - s.pos;
if (pos < segmentByteCount) return s.data[s.pos + (int) pos];
pos -= segmentByteCount;
}
} else {
pos -= size;
for (Segment s = head.prev; true; s = s.prev) {
pos += s.limit - s.pos;
if (pos >= 0) return s.data[s.pos + (int) pos];
}
}
}
@Override public short readShort() {
if (size < 2) throw new IllegalStateException("size < 2: " + size);
Segment segment = head;
int pos = segment.pos;
int limit = segment.limit;
// If the short is split across multiple segments, delegate to readByte().
if (limit - pos < 2) {
int s = (readByte() & 0xff) << 8
| (readByte() & 0xff);
return (short) s;
}
byte[] data = segment.data;
int s = (data[pos++] & 0xff) << 8
| (data[pos++] & 0xff);
size -= 2;
if (pos == limit) {
head = segment.pop();
SegmentPool.recycle(segment);
} else {
segment.pos = pos;
}
return (short) s;
}
@Override public int readInt() {
if (size < 4) throw new IllegalStateException("size < 4: " + size);
Segment segment = head;
int pos = segment.pos;
int limit = segment.limit;
// If the int is split across multiple segments, delegate to readByte().
if (limit - pos < 4) {
return (readByte() & 0xff) << 24
| (readByte() & 0xff) << 16
| (readByte() & 0xff) << 8
| (readByte() & 0xff);
}
byte[] data = segment.data;
int i = (data[pos++] & 0xff) << 24
| (data[pos++] & 0xff) << 16
| (data[pos++] & 0xff) << 8
| (data[pos++] & 0xff);
size -= 4;
if (pos == limit) {
head = segment.pop();
SegmentPool.recycle(segment);
} else {
segment.pos = pos;
}
return i;
}
@Override public long readLong() {
if (size < 8) throw new IllegalStateException("size < 8: " + size);
Segment segment = head;
int pos = segment.pos;
int limit = segment.limit;
// If the long is split across multiple segments, delegate to readInt().
if (limit - pos < 8) {
return (readInt() & 0xffffffffL) << 32
| (readInt() & 0xffffffffL);
}
byte[] data = segment.data;
long v = (data[pos++] & 0xffL) << 56
| (data[pos++] & 0xffL) << 48
| (data[pos++] & 0xffL) << 40
| (data[pos++] & 0xffL) << 32
| (data[pos++] & 0xffL) << 24
| (data[pos++] & 0xffL) << 16
| (data[pos++] & 0xffL) << 8
| (data[pos++] & 0xffL);
size -= 8;
if (pos == limit) {
head = segment.pop();
SegmentPool.recycle(segment);
} else {
segment.pos = pos;
}
return v;
}
@Override public short readShortLe() {
return Util.reverseBytesShort(readShort());
}
@Override public int readIntLe() {
return Util.reverseBytesInt(readInt());
}
@Override public long readLongLe() {
return Util.reverseBytesLong(readLong());
}
@Override public long readDecimalLong() {
if (size == 0) throw new IllegalStateException("size == 0");
// This value is always built negatively in order to accommodate Long.MIN_VALUE.
long value = 0;
int seen = 0;
boolean negative = false;
boolean done = false;
long overflowZone = Long.MIN_VALUE / 10;
long overflowDigit = (Long.MIN_VALUE % 10) + 1;
do {
Segment segment = head;
byte[] data = segment.data;
int pos = segment.pos;
int limit = segment.limit;
for (; pos < limit; pos++, seen++) {
byte b = data[pos];
if (b >= '0' && b <= '9') {
int digit = '0' - b;
// Detect when the digit would cause an overflow.
if (value < overflowZone || value == overflowZone && digit < overflowDigit) {
Buffer buffer = new Buffer().writeDecimalLong(value).writeByte(b);
if (!negative) buffer.readByte(); // Skip negative sign.
throw new NumberFormatException("Number too large: " + buffer.readUtf8());
}
value *= 10;
value += digit;
} else if (b == '-' && seen == 0) {
negative = true;
overflowDigit -= 1;
} else {
if (seen == 0) {
throw new NumberFormatException(
"Expected leading [0-9] or '-' character but was 0x" + Integer.toHexString(b));
}
// Set a flag to stop iteration. We still need to run through segment updating below.
done = true;
break;
}
}
if (pos == limit) {
head = segment.pop();
SegmentPool.recycle(segment);
} else {
segment.pos = pos;
}
} while (!done && head != null);
size -= seen;
return negative ? value : -value;
}
@Override public long readHexadecimalUnsignedLong() {
if (size == 0) throw new IllegalStateException("size == 0");
long value = 0;
int seen = 0;
boolean done = false;
do {
Segment segment = head;
byte[] data = segment.data;
int pos = segment.pos;
int limit = segment.limit;
for (; pos < limit; pos++, seen++) {
int digit;
byte b = data[pos];
if (b >= '0' && b <= '9') {
digit = b - '0';
} else if (b >= 'a' && b <= 'f') {
digit = b - 'a' + 10;
} else if (b >= 'A' && b <= 'F') {
digit = b - 'A' + 10; // We never write uppercase, but we support reading it.
} else {
if (seen == 0) {
throw new NumberFormatException(
"Expected leading [0-9a-fA-F] character but was 0x" + Integer.toHexString(b));
}
// Set a flag to stop iteration. We still need to run through segment updating below.
done = true;
break;
}
// Detect when the shift will overflow.
if ((value & 0xf000000000000000L) != 0) {
Buffer buffer = new Buffer().writeHexadecimalUnsignedLong(value).writeByte(b);
throw new NumberFormatException("Number too large: " + buffer.readUtf8());
}
value <<= 4;
value |= digit;
}
if (pos == limit) {
head = segment.pop();
SegmentPool.recycle(segment);
} else {
segment.pos = pos;
}
} while (!done && head != null);
size -= seen;
return value;
}
@Override public ByteString readByteString() {
return new ByteString(readByteArray());
}
@Override public ByteString readByteString(long byteCount) throws EOFException {
return new ByteString(readByteArray(byteCount));
}
@Override public int select(Options options) {
Segment s = head;
if (s == null) return options.indexOf(ByteString.EMPTY);
ByteString[] byteStrings = options.byteStrings;
for (int i = 0, listSize = byteStrings.length; i < listSize; i++) {
ByteString b = byteStrings[i];
if (size >= b.size() && rangeEquals(s, s.pos, b, 0, b.size())) {
try {
skip(b.size());
return i;
} catch (EOFException e) {
throw new AssertionError(e);
}
}
}
return -1;
}
/**
* Returns the index of a value in {@code options} that is either the prefix of this buffer, or
* that this buffer is a prefix of. Unlike {@link #select} this never consumes the value, even
* if it is found in full.
*/
int selectPrefix(Options options) {
Segment s = head;
ByteString[] byteStrings = options.byteStrings;
for (int i = 0, listSize = byteStrings.length; i < listSize; i++) {
ByteString b = byteStrings[i];
int bytesLimit = (int) Math.min(size, b.size());
if (bytesLimit == 0 || rangeEquals(s, s.pos, b, 0, bytesLimit)) {
return i;
}
}
return -1;
}
@Override public void readFully(Buffer sink, long byteCount) throws EOFException {
if (size < byteCount) {
sink.write(this, size); // Exhaust ourselves.
throw new EOFException();
}
sink.write(this, byteCount);
}
@Override public long readAll(Sink sink) throws IOException {
long byteCount = size;
if (byteCount > 0) {
sink.write(this, byteCount);
}
return byteCount;
}
@Override public String readUtf8() {
try {
return readString(size, Util.UTF_8);
} catch (EOFException e) {
throw new AssertionError(e);
}
}
@Override public String readUtf8(long byteCount) throws EOFException {
return readString(byteCount, Util.UTF_8);
}
@Override public String readString(Charset charset) {
try {
return readString(size, charset);
} catch (EOFException e) {
throw new AssertionError(e);
}
}
@Override public String readString(long byteCount, Charset charset) throws EOFException {
checkOffsetAndCount(size, 0, byteCount);
if (charset == null) throw new IllegalArgumentException("charset == null");
if (byteCount > Integer.MAX_VALUE) {
throw new IllegalArgumentException("byteCount > Integer.MAX_VALUE: " + byteCount);
}
if (byteCount == 0) return "";
Segment s = head;
if (s.pos + byteCount > s.limit) {
// If the string spans multiple segments, delegate to readBytes().
return new String(readByteArray(byteCount), charset);
}
String result = new String(s.data, s.pos, (int) byteCount, charset);
s.pos += byteCount;
size -= byteCount;
if (s.pos == s.limit) {
head = s.pop();
SegmentPool.recycle(s);
}
return result;
}
@Override public @Nullable String readUtf8Line() throws EOFException {
long newline = indexOf((byte) '\n');
if (newline == -1) {
return size != 0 ? readUtf8(size) : null;
}
return readUtf8Line(newline);
}
@Override public String readUtf8LineStrict() throws EOFException {
return readUtf8LineStrict(Long.MAX_VALUE);
}
@Override public String readUtf8LineStrict(long limit) throws EOFException {
if (limit < 0) throw new IllegalArgumentException("limit < 0: " + limit);
long scanLength = limit == Long.MAX_VALUE ? Long.MAX_VALUE : limit + 1;
long newline = indexOf((byte) '\n', 0, scanLength);
if (newline != -1) return readUtf8Line(newline);
if (scanLength < size()
&& getByte(scanLength - 1) == '\r' && getByte(scanLength) == '\n') {
return readUtf8Line(scanLength); // The line was 'limit' UTF-8 bytes followed by \r\n.
}
Buffer data = new Buffer();
copyTo(data, 0, Math.min(32, size()));
throw new EOFException("\\n not found: limit=" + Math.min(size(), limit)
+ " content=" + data.readByteString().hex() + '…');
}
String readUtf8Line(long newline) throws EOFException {
if (newline > 0 && getByte(newline - 1) == '\r') {
// Read everything until '\r\n', then skip the '\r\n'.
String result = readUtf8((newline - 1));
skip(2);
return result;
} else {
// Read everything until '\n', then skip the '\n'.
String result = readUtf8(newline);
skip(1);
return result;
}
}
@Override public int readUtf8CodePoint() throws EOFException {
if (size == 0) throw new EOFException();
byte b0 = getByte(0);
int codePoint;
int byteCount;
int min;
if ((b0 & 0x80) == 0) {
// 0xxxxxxx.
codePoint = b0 & 0x7f;
byteCount = 1; // 7 bits (ASCII).
min = 0x0;
} else if ((b0 & 0xe0) == 0xc0) {
// 0x110xxxxx
codePoint = b0 & 0x1f;
byteCount = 2; // 11 bits (5 + 6).
min = 0x80;
} else if ((b0 & 0xf0) == 0xe0) {
// 0x1110xxxx
codePoint = b0 & 0x0f;
byteCount = 3; // 16 bits (4 + 6 + 6).
min = 0x800;
} else if ((b0 & 0xf8) == 0xf0) {
// 0x11110xxx
codePoint = b0 & 0x07;
byteCount = 4; // 21 bits (3 + 6 + 6 + 6).
min = 0x10000;
} else {
// We expected the first byte of a code point but got something else.
skip(1);
return REPLACEMENT_CHARACTER;
}
if (size < byteCount) {
throw new EOFException("size < " + byteCount + ": " + size
+ " (to read code point prefixed 0x" + Integer.toHexString(b0) + ")");
}
// Read the continuation bytes. If we encounter a non-continuation byte, the sequence consumed
// thus far is truncated and is decoded as the replacement character. That non-continuation byte
// is left in the stream for processing by the next call to readUtf8CodePoint().
for (int i = 1; i < byteCount; i++) {
byte b = getByte(i);
if ((b & 0xc0) == 0x80) {
// 0x10xxxxxx
codePoint <<= 6;
codePoint |= b & 0x3f;
} else {
skip(i);
return REPLACEMENT_CHARACTER;
}
}
skip(byteCount);
if (codePoint > 0x10ffff) {
return REPLACEMENT_CHARACTER; // Reject code points larger than the Unicode maximum.
}
if (codePoint >= 0xd800 && codePoint <= 0xdfff) {
return REPLACEMENT_CHARACTER; // Reject partial surrogates.
}
if (codePoint < min) {
return REPLACEMENT_CHARACTER; // Reject overlong code points.
}
return codePoint;
}
@Override public byte[] readByteArray() {
try {
return readByteArray(size);
} catch (EOFException e) {
throw new AssertionError(e);
}
}
@Override public byte[] readByteArray(long byteCount) throws EOFException {
checkOffsetAndCount(size, 0, byteCount);
if (byteCount > Integer.MAX_VALUE) {
throw new IllegalArgumentException("byteCount > Integer.MAX_VALUE: " + byteCount);
}
byte[] result = new byte[(int) byteCount];
readFully(result);
return result;
}
@Override public int read(byte[] sink) {
return read(sink, 0, sink.length);
}
@Override public void readFully(byte[] sink) throws EOFException {
int offset = 0;
while (offset < sink.length) {
int read = read(sink, offset, sink.length - offset);
if (read == -1) throw new EOFException();
offset += read;
}
}
@Override public int read(byte[] sink, int offset, int byteCount) {
checkOffsetAndCount(sink.length, offset, byteCount);
Segment s = head;
if (s == null) return -1;
int toCopy = Math.min(byteCount, s.limit - s.pos);
System.arraycopy(s.data, s.pos, sink, offset, toCopy);
s.pos += toCopy;
size -= toCopy;
if (s.pos == s.limit) {
head = s.pop();
SegmentPool.recycle(s);
}
return toCopy;
}
@Override public int read(ByteBuffer sink) throws IOException {
Segment s = head;
if (s == null) return -1;
int toCopy = Math.min(sink.remaining(), s.limit - s.pos);
sink.put(s.data, s.pos, toCopy);
s.pos += toCopy;
size -= toCopy;
if (s.pos == s.limit) {
head = s.pop();
SegmentPool.recycle(s);
}
return toCopy;
}
/**
* Discards all bytes in this buffer. Calling this method when you're done
* with a buffer will return its segments to the pool.
*/
public void clear() {
try {
skip(size);
} catch (EOFException e) {
throw new AssertionError(e);
}
}
/** Discards {@code byteCount} bytes from the head of this buffer. */
@Override public void skip(long byteCount) throws EOFException {
while (byteCount > 0) {
if (head == null) throw new EOFException();
int toSkip = (int) Math.min(byteCount, head.limit - head.pos);
size -= toSkip;
byteCount -= toSkip;
head.pos += toSkip;
if (head.pos == head.limit) {
Segment toRecycle = head;
head = toRecycle.pop();
SegmentPool.recycle(toRecycle);
}
}
}
@Override public Buffer write(ByteString byteString) {
if (byteString == null) throw new IllegalArgumentException("byteString == null");
byteString.write(this);
return this;
}
@Override public Buffer writeUtf8(String string) {
return writeUtf8(string, 0, string.length());
}
@Override public Buffer writeUtf8(String string, int beginIndex, int endIndex) {
if (string == null) throw new IllegalArgumentException("string == null");
if (beginIndex < 0) throw new IllegalArgumentException("beginIndex < 0: " + beginIndex);
if (endIndex < beginIndex) {
throw new IllegalArgumentException("endIndex < beginIndex: " + endIndex + " < " + beginIndex);
}
if (endIndex > string.length()) {
throw new IllegalArgumentException(
"endIndex > string.length: " + endIndex + " > " + string.length());
}
// Transcode a UTF-16 Java String to UTF-8 bytes.
for (int i = beginIndex; i < endIndex;) {
int c = string.charAt(i);
if (c < 0x80) {
Segment tail = writableSegment(1);
byte[] data = tail.data;
int segmentOffset = tail.limit - i;
int runLimit = Math.min(endIndex, Segment.SIZE - segmentOffset);
// Emit a 7-bit character with 1 byte.
data[segmentOffset + i++] = (byte) c; // 0xxxxxxx
// Fast-path contiguous runs of ASCII characters. This is ugly, but yields a ~4x performance
// improvement over independent calls to writeByte().
while (i < runLimit) {
c = string.charAt(i);
if (c >= 0x80) break;
data[segmentOffset + i++] = (byte) c; // 0xxxxxxx
}
int runSize = i + segmentOffset - tail.limit; // Equivalent to i - (previous i).
tail.limit += runSize;
size += runSize;
} else if (c < 0x800) {
// Emit a 11-bit character with 2 bytes.
writeByte(c >> 6 | 0xc0); // 110xxxxx
writeByte(c & 0x3f | 0x80); // 10xxxxxx
i++;
} else if (c < 0xd800 || c > 0xdfff) {
// Emit a 16-bit character with 3 bytes.
writeByte(c >> 12 | 0xe0); // 1110xxxx
writeByte(c >> 6 & 0x3f | 0x80); // 10xxxxxx
writeByte(c & 0x3f | 0x80); // 10xxxxxx
i++;
} else {
// c is a surrogate. Make sure it is a high surrogate & that its successor is a low
// surrogate. If not, the UTF-16 is invalid, in which case we emit a replacement character.
int low = i + 1 < endIndex ? string.charAt(i + 1) : 0;
if (c > 0xdbff || low < 0xdc00 || low > 0xdfff) {
writeByte('?');
i++;
continue;
}
// UTF-16 high surrogate: 110110xxxxxxxxxx (10 bits)
// UTF-16 low surrogate: 110111yyyyyyyyyy (10 bits)
// Unicode code point: 00010000000000000000 + xxxxxxxxxxyyyyyyyyyy (21 bits)
int codePoint = 0x010000 + ((c & ~0xd800) << 10 | low & ~0xdc00);
// Emit a 21-bit character with 4 bytes.
writeByte(codePoint >> 18 | 0xf0); // 11110xxx
writeByte(codePoint >> 12 & 0x3f | 0x80); // 10xxxxxx
writeByte(codePoint >> 6 & 0x3f | 0x80); // 10xxyyyy
writeByte(codePoint & 0x3f | 0x80); // 10yyyyyy
i += 2;
}
}
return this;
}
@Override public Buffer writeUtf8CodePoint(int codePoint) {
if (codePoint < 0x80) {
// Emit a 7-bit code point with 1 byte.
writeByte(codePoint);
} else if (codePoint < 0x800) {
// Emit a 11-bit code point with 2 bytes.
writeByte(codePoint >> 6 | 0xc0); // 110xxxxx
writeByte(codePoint & 0x3f | 0x80); // 10xxxxxx
} else if (codePoint < 0x10000) {
if (codePoint >= 0xd800 && codePoint <= 0xdfff) {
// Emit a replacement character for a partial surrogate.
writeByte('?');
} else {
// Emit a 16-bit code point with 3 bytes.
writeByte(codePoint >> 12 | 0xe0); // 1110xxxx
writeByte(codePoint >> 6 & 0x3f | 0x80); // 10xxxxxx
writeByte(codePoint & 0x3f | 0x80); // 10xxxxxx
}
} else if (codePoint <= 0x10ffff) {
// Emit a 21-bit code point with 4 bytes.
writeByte(codePoint >> 18 | 0xf0); // 11110xxx
writeByte(codePoint >> 12 & 0x3f | 0x80); // 10xxxxxx
writeByte(codePoint >> 6 & 0x3f | 0x80); // 10xxxxxx
writeByte(codePoint & 0x3f | 0x80); // 10xxxxxx
} else {
throw new IllegalArgumentException(
"Unexpected code point: " + Integer.toHexString(codePoint));
}
return this;
}
@Override public Buffer writeString(String string, Charset charset) {
return writeString(string, 0, string.length(), charset);
}
@Override
public Buffer writeString(String string, int beginIndex, int endIndex, Charset charset) {
if (string == null) throw new IllegalArgumentException("string == null");
if (beginIndex < 0) throw new IllegalAccessError("beginIndex < 0: " + beginIndex);
if (endIndex < beginIndex) {
throw new IllegalArgumentException("endIndex < beginIndex: " + endIndex + " < " + beginIndex);
}
if (endIndex > string.length()) {
throw new IllegalArgumentException(
"endIndex > string.length: " + endIndex + " > " + string.length());
}
if (charset == null) throw new IllegalArgumentException("charset == null");
if (charset.equals(Util.UTF_8)) return writeUtf8(string, beginIndex, endIndex);
byte[] data = string.substring(beginIndex, endIndex).getBytes(charset);
return write(data, 0, data.length);
}
@Override public Buffer write(byte[] source) {
if (source == null) throw new IllegalArgumentException("source == null");
return write(source, 0, source.length);
}
@Override public Buffer write(byte[] source, int offset, int byteCount) {
if (source == null) throw new IllegalArgumentException("source == null");
checkOffsetAndCount(source.length, offset, byteCount);
int limit = offset + byteCount;
while (offset < limit) {
Segment tail = writableSegment(1);
int toCopy = Math.min(limit - offset, Segment.SIZE - tail.limit);
System.arraycopy(source, offset, tail.data, tail.limit, toCopy);
offset += toCopy;
tail.limit += toCopy;
}
size += byteCount;
return this;
}
@Override public int write(ByteBuffer source) throws IOException {
if (source == null) throw new IllegalArgumentException("source == null");
int byteCount = source.remaining();
int remaining = byteCount;
while (remaining > 0) {
Segment tail = writableSegment(1);
int toCopy = Math.min(remaining, Segment.SIZE - tail.limit);
source.get(tail.data, tail.limit, toCopy);
remaining -= toCopy;
tail.limit += toCopy;
}
size += byteCount;
return byteCount;
}
@Override public long writeAll(Source source) throws IOException {
if (source == null) throw new IllegalArgumentException("source == null");
long totalBytesRead = 0;
for (long readCount; (readCount = source.read(this, Segment.SIZE)) != -1; ) {
totalBytesRead += readCount;
}
return totalBytesRead;
}
@Override public BufferedSink write(Source source, long byteCount) throws IOException {
while (byteCount > 0) {
long read = source.read(this, byteCount);
if (read == -1) throw new EOFException();
byteCount -= read;
}
return this;
}
@Override public Buffer writeByte(int b) {
Segment tail = writableSegment(1);
tail.data[tail.limit++] = (byte) b;
size += 1;
return this;
}
@Override public Buffer writeShort(int s) {
Segment tail = writableSegment(2);
byte[] data = tail.data;
int limit = tail.limit;
data[limit++] = (byte) ((s >>> 8) & 0xff);
data[limit++] = (byte) (s & 0xff);
tail.limit = limit;
size += 2;
return this;
}
@Override public Buffer writeShortLe(int s) {
return writeShort(Util.reverseBytesShort((short) s));
}
@Override public Buffer writeInt(int i) {
Segment tail = writableSegment(4);
byte[] data = tail.data;
int limit = tail.limit;
data[limit++] = (byte) ((i >>> 24) & 0xff);
data[limit++] = (byte) ((i >>> 16) & 0xff);
data[limit++] = (byte) ((i >>> 8) & 0xff);
data[limit++] = (byte) (i & 0xff);
tail.limit = limit;
size += 4;
return this;
}
@Override public Buffer writeIntLe(int i) {
return writeInt(Util.reverseBytesInt(i));
}
@Override public Buffer writeLong(long v) {
Segment tail = writableSegment(8);
byte[] data = tail.data;
int limit = tail.limit;
data[limit++] = (byte) ((v >>> 56L) & 0xff);
data[limit++] = (byte) ((v >>> 48L) & 0xff);
data[limit++] = (byte) ((v >>> 40L) & 0xff);
data[limit++] = (byte) ((v >>> 32L) & 0xff);
data[limit++] = (byte) ((v >>> 24L) & 0xff);
data[limit++] = (byte) ((v >>> 16L) & 0xff);
data[limit++] = (byte) ((v >>> 8L) & 0xff);
data[limit++] = (byte) (v & 0xff);
tail.limit = limit;
size += 8;
return this;
}
@Override public Buffer writeLongLe(long v) {
return writeLong(reverseBytesLong(v));
}
@Override public Buffer writeDecimalLong(long v) {
if (v == 0) {
// Both a shortcut and required since the following code can't handle zero.
return writeByte('0');
}
boolean negative = false;
if (v < 0) {
v = -v;
if (v < 0) { // Only true for Long.MIN_VALUE.
return writeUtf8("-9223372036854775808");
}
negative = true;
}
// Binary search for character width which favors matching lower numbers.
int width = //
v < 100000000L
? v < 10000L
? v < 100L
? v < 10L ? 1 : 2
: v < 1000L ? 3 : 4
: v < 1000000L
? v < 100000L ? 5 : 6
: v < 10000000L ? 7 : 8
: v < 1000000000000L
? v < 10000000000L
? v < 1000000000L ? 9 : 10
: v < 100000000000L ? 11 : 12
: v < 1000000000000000L
? v < 10000000000000L ? 13
: v < 100000000000000L ? 14 : 15
: v < 100000000000000000L
? v < 10000000000000000L ? 16 : 17
: v < 1000000000000000000L ? 18 : 19;
if (negative) {
++width;
}
Segment tail = writableSegment(width);
byte[] data = tail.data;
int pos = tail.limit + width; // We write backwards from right to left.
while (v != 0) {
int digit = (int) (v % 10);
data[--pos] = DIGITS[digit];
v /= 10;
}
if (negative) {
data[--pos] = '-';
}
tail.limit += width;
this.size += width;
return this;
}
@Override public Buffer writeHexadecimalUnsignedLong(long v) {
if (v == 0) {
// Both a shortcut and required since the following code can't handle zero.
return writeByte('0');
}
int width = Long.numberOfTrailingZeros(Long.highestOneBit(v)) / 4 + 1;
Segment tail = writableSegment(width);
byte[] data = tail.data;
for (int pos = tail.limit + width - 1, start = tail.limit; pos >= start; pos--) {
data[pos] = DIGITS[(int) (v & 0xF)];
v >>>= 4;
}
tail.limit += width;
size += width;
return this;
}
/**
* Returns a tail segment that we can write at least {@code minimumCapacity}
* bytes to, creating it if necessary.
*/
Segment writableSegment(int minimumCapacity) {
if (minimumCapacity < 1 || minimumCapacity > Segment.SIZE) throw new IllegalArgumentException();
if (head == null) {
head = SegmentPool.take(); // Acquire a first segment.
return head.next = head.prev = head;
}
Segment tail = head.prev;
if (tail.limit + minimumCapacity > Segment.SIZE || !tail.owner) {
tail = tail.push(SegmentPool.take()); // Append a new empty segment to fill up.
}
return tail;
}
@Override public void write(Buffer source, long byteCount) {
// Move bytes from the head of the source buffer to the tail of this buffer
// while balancing two conflicting goals: don't waste CPU and don't waste
// memory.
//
//
// Don't waste CPU (ie. don't copy data around).
//
// Copying large amounts of data is expensive. Instead, we prefer to
// reassign entire segments from one buffer to the other.
//
//
// Don't waste memory.
//
// As an invariant, adjacent pairs of segments in a buffer should be at
// least 50% full, except for the head segment and the tail segment.
//
// The head segment cannot maintain the invariant because the application is
// consuming bytes from this segment, decreasing its level.
//
// The tail segment cannot maintain the invariant because the application is
// producing bytes, which may require new nearly-empty tail segments to be
// appended.
//
//
// Moving segments between buffers
//
// When writing one buffer to another, we prefer to reassign entire segments
// over copying bytes into their most compact form. Suppose we have a buffer
// with these segment levels [91%, 61%]. If we append a buffer with a
// single [72%] segment, that yields [91%, 61%, 72%]. No bytes are copied.
//
// Or suppose we have a buffer with these segment levels: [100%, 2%], and we
// want to append it to a buffer with these segment levels [99%, 3%]. This
// operation will yield the following segments: [100%, 2%, 99%, 3%]. That
// is, we do not spend time copying bytes around to achieve more efficient
// memory use like [100%, 100%, 4%].
//
// When combining buffers, we will compact adjacent buffers when their
// combined level doesn't exceed 100%. For example, when we start with
// [100%, 40%] and append [30%, 80%], the result is [100%, 70%, 80%].
//
//
// Splitting segments
//
// Occasionally we write only part of a source buffer to a sink buffer. For
// example, given a sink [51%, 91%], we may want to write the first 30% of
// a source [92%, 82%] to it. To simplify, we first transform the source to
// an equivalent buffer [30%, 62%, 82%] and then move the head segment,
// yielding sink [51%, 91%, 30%] and source [62%, 82%].
if (source == null) throw new IllegalArgumentException("source == null");
if (source == this) throw new IllegalArgumentException("source == this");
checkOffsetAndCount(source.size, 0, byteCount);
while (byteCount > 0) {
// Is a prefix of the source's head segment all that we need to move?
if (byteCount < (source.head.limit - source.head.pos)) {
Segment tail = head != null ? head.prev : null;
if (tail != null && tail.owner
&& (byteCount + tail.limit - (tail.shared ? 0 : tail.pos) <= Segment.SIZE)) {
// Our existing segments are sufficient. Move bytes from source's head to our tail.
source.head.writeTo(tail, (int) byteCount);
source.size -= byteCount;
size += byteCount;
return;
} else {
// We're going to need another segment. Split the source's head
// segment in two, then move the first of those two to this buffer.
source.head = source.head.split((int) byteCount);
}
}
// Remove the source's head segment and append it to our tail.
Segment segmentToMove = source.head;
long movedByteCount = segmentToMove.limit - segmentToMove.pos;
source.head = segmentToMove.pop();
if (head == null) {
head = segmentToMove;
head.next = head.prev = head;
} else {
Segment tail = head.prev;
tail = tail.push(segmentToMove);
tail.compact();
}
source.size -= movedByteCount;
size += movedByteCount;
byteCount -= movedByteCount;
}
}
@Override public long read(Buffer sink, long byteCount) {
if (sink == null) throw new IllegalArgumentException("sink == null");
if (byteCount < 0) throw new IllegalArgumentException("byteCount < 0: " + byteCount);
if (size == 0) return -1L;
if (byteCount > size) byteCount = size;
sink.write(this, byteCount);
return byteCount;
}
@Override public long indexOf(byte b) {
return indexOf(b, 0, Long.MAX_VALUE);
}
/**
* Returns the index of {@code b} in this at or beyond {@code fromIndex}, or
* -1 if this buffer does not contain {@code b} in that range.
*/
@Override public long indexOf(byte b, long fromIndex) {
return indexOf(b, fromIndex, Long.MAX_VALUE);
}
@Override public long indexOf(byte b, long fromIndex, long toIndex) {
if (fromIndex < 0 || toIndex < fromIndex) {
throw new IllegalArgumentException(
String.format("size=%s fromIndex=%s toIndex=%s", size, fromIndex, toIndex));
}
if (toIndex > size) toIndex = size;
if (fromIndex == toIndex) return -1L;
Segment s;
long offset;
// TODO(jwilson): extract this to a shared helper method when can do so without allocating.
findSegmentAndOffset: {
// Pick the first segment to scan. This is the first segment with offset <= fromIndex.
s = head;
if (s == null) {
// No segments to scan!
return -1L;
} else if (size - fromIndex < fromIndex) {
// We're scanning in the back half of this buffer. Find the segment starting at the back.
offset = size;
while (offset > fromIndex) {
s = s.prev;
offset -= (s.limit - s.pos);
}
} else {
// We're scanning in the front half of this buffer. Find the segment starting at the front.
offset = 0L;
for (long nextOffset; (nextOffset = offset + (s.limit - s.pos)) < fromIndex; ) {
s = s.next;
offset = nextOffset;
}
}
}
// Scan through the segments, searching for b.
while (offset < toIndex) {
byte[] data = s.data;
int limit = (int) Math.min(s.limit, s.pos + toIndex - offset);
int pos = (int) (s.pos + fromIndex - offset);
for (; pos < limit; pos++) {
if (data[pos] == b) {
return pos - s.pos + offset;
}
}
// Not in this segment. Try the next one.
offset += (s.limit - s.pos);
fromIndex = offset;
s = s.next;
}
return -1L;
}
@Override public long indexOf(ByteString bytes) throws IOException {
return indexOf(bytes, 0);
}
@Override public long indexOf(ByteString bytes, long fromIndex) throws IOException {
if (bytes.size() == 0) throw new IllegalArgumentException("bytes is empty");
if (fromIndex < 0) throw new IllegalArgumentException("fromIndex < 0");
Segment s;
long offset;
// TODO(jwilson): extract this to a shared helper method when can do so without allocating.
findSegmentAndOffset: {
// Pick the first segment to scan. This is the first segment with offset <= fromIndex.
s = head;
if (s == null) {
// No segments to scan!
return -1L;
} else if (size - fromIndex < fromIndex) {
// We're scanning in the back half of this buffer. Find the segment starting at the back.
offset = size;
while (offset > fromIndex) {
s = s.prev;
offset -= (s.limit - s.pos);
}
} else {
// We're scanning in the front half of this buffer. Find the segment starting at the front.
offset = 0L;
for (long nextOffset; (nextOffset = offset + (s.limit - s.pos)) < fromIndex; ) {
s = s.next;
offset = nextOffset;
}
}
}
// Scan through the segments, searching for the lead byte. Each time that is found, delegate to
// rangeEquals() to check for a complete match.
byte b0 = bytes.getByte(0);
int bytesSize = bytes.size();
long resultLimit = size - bytesSize + 1;
while (offset < resultLimit) {
// Scan through the current segment.
byte[] data = s.data;
int segmentLimit = (int) Math.min(s.limit, s.pos + resultLimit - offset);
for (int pos = (int) (s.pos + fromIndex - offset); pos < segmentLimit; pos++) {
if (data[pos] == b0 && rangeEquals(s, pos + 1, bytes, 1, bytesSize)) {
return pos - s.pos + offset;
}
}
// Not in this segment. Try the next one.
offset += (s.limit - s.pos);
fromIndex = offset;
s = s.next;
}
return -1L;
}
@Override public long indexOfElement(ByteString targetBytes) {
return indexOfElement(targetBytes, 0);
}
@Override public long indexOfElement(ByteString targetBytes, long fromIndex) {
if (fromIndex < 0) throw new IllegalArgumentException("fromIndex < 0");
Segment s;
long offset;
// TODO(jwilson): extract this to a shared helper method when can do so without allocating.
findSegmentAndOffset: {
// Pick the first segment to scan. This is the first segment with offset <= fromIndex.
s = head;
if (s == null) {
// No segments to scan!
return -1L;
} else if (size - fromIndex < fromIndex) {
// We're scanning in the back half of this buffer. Find the segment starting at the back.
offset = size;
while (offset > fromIndex) {
s = s.prev;
offset -= (s.limit - s.pos);
}
} else {
// We're scanning in the front half of this buffer. Find the segment starting at the front.
offset = 0L;
for (long nextOffset; (nextOffset = offset + (s.limit - s.pos)) < fromIndex; ) {
s = s.next;
offset = nextOffset;
}
}
}
// Special case searching for one of two bytes. This is a common case for tools like Moshi,
// which search for pairs of chars like `\r` and `\n` or {@code `"` and `\`. The impact of this
// optimization is a ~5x speedup for this case without a substantial cost to other cases.
if (targetBytes.size() == 2) {
// Scan through the segments, searching for either of the two bytes.
byte b0 = targetBytes.getByte(0);
byte b1 = targetBytes.getByte(1);
while (offset < size) {
byte[] data = s.data;
for (int pos = (int) (s.pos + fromIndex - offset), limit = s.limit; pos < limit; pos++) {
int b = data[pos];
if (b == b0 || b == b1) {
return pos - s.pos + offset;
}
}
// Not in this segment. Try the next one.
offset += (s.limit - s.pos);
fromIndex = offset;
s = s.next;
}
} else {
// Scan through the segments, searching for a byte that's also in the array.
byte[] targetByteArray = targetBytes.internalArray();
while (offset < size) {
byte[] data = s.data;
for (int pos = (int) (s.pos + fromIndex - offset), limit = s.limit; pos < limit; pos++) {
int b = data[pos];
for (byte t : targetByteArray) {
if (b == t) return pos - s.pos + offset;
}
}
// Not in this segment. Try the next one.
offset += (s.limit - s.pos);
fromIndex = offset;
s = s.next;
}
}
return -1L;
}
@Override public boolean rangeEquals(long offset, ByteString bytes) {
return rangeEquals(offset, bytes, 0, bytes.size());
}
@Override public boolean rangeEquals(
long offset, ByteString bytes, int bytesOffset, int byteCount) {
if (offset < 0
|| bytesOffset < 0
|| byteCount < 0
|| size - offset < byteCount
|| bytes.size() - bytesOffset < byteCount) {
return false;
}
for (int i = 0; i < byteCount; i++) {
if (getByte(offset + i) != bytes.getByte(bytesOffset + i)) {
return false;
}
}
return true;
}
/**
* Returns true if the range within this buffer starting at {@code segmentPos} in {@code segment}
* is equal to {@code bytes[bytesOffset..bytesLimit)}.
*/
private boolean rangeEquals(
Segment segment, int segmentPos, ByteString bytes, int bytesOffset, int bytesLimit) {
int segmentLimit = segment.limit;
byte[] data = segment.data;
for (int i = bytesOffset; i < bytesLimit; ) {
if (segmentPos == segmentLimit) {
segment = segment.next;
data = segment.data;
segmentPos = segment.pos;
segmentLimit = segment.limit;
}
if (data[segmentPos] != bytes.getByte(i)) {
return false;
}
segmentPos++;
i++;
}
return true;
}
@Override public void flush() {
}
@Override public boolean isOpen() {
return true;
}
@Override public void close() {
}
@Override public Timeout timeout() {
return Timeout.NONE;
}
/** For testing. This returns the sizes of the segments in this buffer. */
List segmentSizes() {
if (head == null) return Collections.emptyList();
List result = new ArrayList<>();
result.add(head.limit - head.pos);
for (Segment s = head.next; s != head; s = s.next) {
result.add(s.limit - s.pos);
}
return result;
}
/** Returns the 128-bit MD5 hash of this buffer. */
public ByteString md5() {
return digest("MD5");
}
/** Returns the 160-bit SHA-1 hash of this buffer. */
public ByteString sha1() {
return digest("SHA-1");
}
/** Returns the 256-bit SHA-256 hash of this buffer. */
public ByteString sha256() {
return digest("SHA-256");
}
/** Returns the 512-bit SHA-512 hash of this buffer. */
public ByteString sha512() {
return digest("SHA-512");
}
private ByteString digest(String algorithm) {
try {
MessageDigest messageDigest = MessageDigest.getInstance(algorithm);
if (head != null) {
messageDigest.update(head.data, head.pos, head.limit - head.pos);
for (Segment s = head.next; s != head; s = s.next) {
messageDigest.update(s.data, s.pos, s.limit - s.pos);
}
}
return ByteString.of(messageDigest.digest());
} catch (NoSuchAlgorithmException e) {
throw new AssertionError();
}
}
/** Returns the 160-bit SHA-1 HMAC of this buffer. */
public ByteString hmacSha1(ByteString key) {
return hmac("HmacSHA1", key);
}
/** Returns the 256-bit SHA-256 HMAC of this buffer. */
public ByteString hmacSha256(ByteString key) {
return hmac("HmacSHA256", key);
}
/** Returns the 512-bit SHA-512 HMAC of this buffer. */
public ByteString hmacSha512(ByteString key) {
return hmac("HmacSHA512", key);
}
private ByteString hmac(String algorithm, ByteString key) {
try {
Mac mac = Mac.getInstance(algorithm);
mac.init(new SecretKeySpec(key.toByteArray(), algorithm));
if (head != null) {
mac.update(head.data, head.pos, head.limit - head.pos);
for (Segment s = head.next; s != head; s = s.next) {
mac.update(s.data, s.pos, s.limit - s.pos);
}
}
return ByteString.of(mac.doFinal());
} catch (NoSuchAlgorithmException e) {
throw new AssertionError();
} catch (InvalidKeyException e) {
throw new IllegalArgumentException(e);
}
}
@Override public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof Buffer)) return false;
Buffer that = (Buffer) o;
if (size != that.size) return false;
if (size == 0) return true; // Both buffers are empty.
Segment sa = this.head;
Segment sb = that.head;
int posA = sa.pos;
int posB = sb.pos;
for (long pos = 0, count; pos < size; pos += count) {
count = Math.min(sa.limit - posA, sb.limit - posB);
for (int i = 0; i < count; i++) {
if (sa.data[posA++] != sb.data[posB++]) return false;
}
if (posA == sa.limit) {
sa = sa.next;
posA = sa.pos;
}
if (posB == sb.limit) {
sb = sb.next;
posB = sb.pos;
}
}
return true;
}
@Override public int hashCode() {
Segment s = head;
if (s == null) return 0;
int result = 1;
do {
for (int pos = s.pos, limit = s.limit; pos < limit; pos++) {
result = 31 * result + s.data[pos];
}
s = s.next;
} while (s != head);
return result;
}
/**
* Returns a human-readable string that describes the contents of this buffer. Typically this
* is a string like {@code [text=Hello]} or {@code [hex=0000ffff]}.
*/
@Override public String toString() {
return snapshot().toString();
}
/** Returns a deep copy of this buffer. */
@Override public Buffer clone() {
Buffer result = new Buffer();
if (size == 0) return result;
result.head = head.sharedCopy();
result.head.next = result.head.prev = result.head;
for (Segment s = head.next; s != head; s = s.next) {
result.head.prev.push(s.sharedCopy());
}
result.size = size;
return result;
}
/** Returns an immutable copy of this buffer as a byte string. */
public ByteString snapshot() {
if (size > Integer.MAX_VALUE) {
throw new IllegalArgumentException("size > Integer.MAX_VALUE: " + size);
}
return snapshot((int) size);
}
/**
* Returns an immutable copy of the first {@code byteCount} bytes of this buffer as a byte string.
*/
public ByteString snapshot(int byteCount) {
if (byteCount == 0) return ByteString.EMPTY;
return new SegmentedByteString(this, byteCount);
}
public UnsafeCursor readUnsafe() {
return readUnsafe(new UnsafeCursor());
}
public UnsafeCursor readUnsafe(UnsafeCursor unsafeCursor) {
if (unsafeCursor.buffer != null) {
throw new IllegalStateException("already attached to a buffer");
}
unsafeCursor.buffer = this;
unsafeCursor.readWrite = false;
return unsafeCursor;
}
public UnsafeCursor readAndWriteUnsafe() {
return readAndWriteUnsafe(new UnsafeCursor());
}
public UnsafeCursor readAndWriteUnsafe(UnsafeCursor unsafeCursor) {
if (unsafeCursor.buffer != null) {
throw new IllegalStateException("already attached to a buffer");
}
unsafeCursor.buffer = this;
unsafeCursor.readWrite = true;
return unsafeCursor;
}
/**
* A handle to the underlying data in a buffer. This handle is unsafe because it does not enforce
* its own invariants. Instead, it assumes a careful user who has studied Okio's implementation
* details and their consequences.
*
*
Buffer Internals
*
*
Most code should use {@code Buffer} as a black box: a class that holds 0 or more bytes of
* data with efficient APIs to append data to the end and to consume data from the front. Usually
* this is also the most efficient way to use buffers because it allows Okio to employ several
* optimizations, including:
*
*
*
Fast Allocation: Buffers use a shared pool of memory that is not
* zero-filled before use.
*
Fast Resize: A buffer's capacity can change without copying its
* contents.
*
Fast Move: Memory ownership can be reassigned from one buffer to
* another.
*
Fast Copy: Multiple buffers can share the same underlying memory.
*
Fast Encoding and Decoding: Common operations like UTF-8 encoding and
* decimal decoding do not require intermediate objects to be allocated.
*
*
*
These optimizations all leverage the way Okio stores data internally. Okio Buffers are
* implemented using a doubly-linked list of segments. Each segment is a contiguous range within a
* 8 KiB {@code byte[]}. Each segment has two indexes, {@code start}, the offset of the first
* byte of the array containing application data, and {@code end}, the offset of the first byte
* beyond {@code start} whose data is undefined.
*
*
New buffers are empty and have no segments:
*
*
{@code
*
* Buffer buffer = new Buffer();
* }
*
* We append 7 bytes of data to the end of our empty buffer. Internally, the buffer allocates a
* segment and writes its new data there. The lone segment has an 8 KiB byte array but only 7
* bytes of data:
*
*
*
* When we read 4 bytes of data from the buffer, it finds its first segment and returns that data
* to us. As bytes are read the data is consumed. The segment tracks this by adjusting its
* internal indices.
*
*
*
* As we write data into a buffer we fill up its internal segments. When a write doesn't fit into
* a buffer's last segment, additional segments are allocated and appended to the linked list of
* segments. Each segment has its own start and end indexes tracking where the user's data begins
* and ends.
*
*
*
* The start index is always inclusive and the end index is always
* exclusive. The data preceding the start index is undefined, and the data
* at and following the end index is undefined.
*
*
After the last byte of a segment has been read, that segment may be returned to an internal
* segment pool. In addition to reducing the need to do garbage collection, segment pooling also
* saves the JVM from needing to zero-fill byte arrays. Okio doesn't need to zero-fill its arrays
* because it always writes memory before it reads it. But if you look at a segment in a debugger
* you may see its effects. In this example, one of the "xoxo" segments above is reused in an
* unrelated buffer:
*
*
*
* There is an optimization in {@code Buffer.clone()} and other methods that allows two segments
* to share the same underlying byte array. Clones can't write to the shared byte array; instead
* they allocate a new (private) segment early.
*
*
*
* Segments are not shared when the shared region is small (ie. less than 1 KiB). This is intended
* to prevent fragmentation in sharing-heavy use cases.
*
*
Unsafe Cursor API
*
*
This class exposes privileged access to the internal byte arrays of a buffer. A cursor
* either references the data of a single segment, it is before the first segment ({@code
* offset == -1}), or it is after the last segment ({@code offset == buffer.size}).
*
*
Call {@link #seek} to move the cursor to the segment that contains a specified offset. After
* seeking, {@link #data} references the segment's internal byte array, {@link #start} is the
* segment's start and {@link #end} is its end.
*
*
Call {@link #next} to advance the cursor to the next segment. This returns -1 if there are
* no further segments in the buffer.
*
*
Use {@link Buffer#readUnsafe} to create a cursor to read buffer data and {@link
* Buffer#readAndWriteUnsafe} to create a cursor to read and write buffer data. In either case,
* always call {@link #close} when done with a cursor. This is convenient with Java 7's
* try-with-resources syntax. In this example we read all of the bytes in a buffer into a byte
* array:
*
*
Change the capacity of a buffer with {@link #resizeBuffer}. This is only permitted for
* read+write cursors. The buffer's size always changes from the end: shrinking it removes bytes
* from the end; growing it adds capacity to the end.
*
*
Warnings
*
*
Most application developers should avoid this API. Those that must use this API should
* respect these warnings.
*
*
Don't mutate a cursor. This class has public, non-final fields because that
* is convenient for low-level I/O frameworks. Never assign values to these fields; instead use
* the cursor API to adjust these.
*
*
Never mutate {@code data} unless you have read+write access. You are on the
* honor system to never write the buffer in read-only mode. Read-only mode may be more efficient
* than read+write mode because it does not need to make private copies of shared segments.
*
*
Only access data in {@code [start..end)}. Other data in the byte array
* is undefined! It may contain private or sensitive data from other parts of your process.
*
*
Always fill the new capacity when you grow a buffer. New capacity is not
* zero-filled and may contain data from other parts of your process. Avoid leaking this
* information by always writing something to the newly-allocated capacity. Do not assume that
* new capacity will be filled with {@code 0}; it will not be.
*
*
Do not access a buffer while is being accessed by a cursor. Even simple
* read-only operations like {@link Buffer#clone} are unsafe because they mark segments as shared.
*
*
Do not hard-code the segment size in your application. It is possible that
* segment sizes will change with advances in hardware. Future versions of Okio may even have
* heterogeneous segment sizes.
*
*
These warnings are intended to help you to use this API safely. It's here for developers
* that need absolutely the most throughput. Since that's you, here's one final performance tip.
* You can reuse instances of this class if you like. Use the overloads of {@link #readUnsafe} and
* {@link #readAndWriteUnsafe} that take a cursor and close it after use.
*/
public static final class UnsafeCursor implements Closeable {
public Buffer buffer;
public boolean readWrite;
private Segment segment;
public long offset = -1L;
public byte[] data;
public int start = -1;
public int end = -1;
/**
* Seeks to the next range of bytes, advancing the offset by {@code end - start}. Returns the
* size of the readable range (at least 1), or -1 if we have reached the end of the buffer and
* there are no more bytes to read.
*/
public int next() {
if (offset == buffer.size) throw new IllegalStateException();
if (offset == -1L) return seek(0L);
return seek(offset + (end - start));
}
/**
* Reposition the cursor so that the data at {@code offset} is readable at {@code data[start]}.
* Returns the number of bytes readable in {@code data} (at least 1), or -1 if there are no data
* to read.
*/
public int seek(long offset) {
if (offset < -1 || offset > buffer.size) {
throw new ArrayIndexOutOfBoundsException(
String.format("offset=%s > size=%s", offset, buffer.size));
}
if (offset == -1 || offset == buffer.size) {
this.segment = null;
this.offset = offset;
this.data = null;
this.start = -1;
this.end = -1;
return -1;
}
// Navigate to the segment that contains `offset`. Start from our current segment if possible.
long min = 0L;
long max = buffer.size;
Segment head = buffer.head;
Segment tail = buffer.head;
if (this.segment != null) {
long segmentOffset = this.offset - (this.start - this.segment.pos);
if (segmentOffset > offset) {
// Set the cursor segment to be the 'end'
max = segmentOffset;
tail = this.segment;
} else {
// Set the cursor segment to be the 'beginning'
min = segmentOffset;
head = this.segment;
}
}
Segment next;
long nextOffset;
if (max - offset > offset - min) {
// Start at the 'beginning' and search forwards
next = head;
nextOffset = min;
while (offset >= nextOffset + (next.limit - next.pos)) {
nextOffset += (next.limit - next.pos);
next = next.next;
}
} else {
// Start at the 'end' and search backwards
next = tail;
nextOffset = max;
while (nextOffset > offset) {
next = next.prev;
nextOffset -= (next.limit - next.pos);
}
}
// If we're going to write and our segment is shared, swap it for a read-write one.
if (readWrite && next.shared) {
Segment unsharedNext = next.unsharedCopy();
if (buffer.head == next) {
buffer.head = unsharedNext;
}
next = next.push(unsharedNext);
next.prev.pop();
}
// Update this cursor to the requested offset within the found segment.
this.segment = next;
this.offset = offset;
this.data = next.data;
this.start = next.pos + (int) (offset - nextOffset);
this.end = next.limit;
return end - start;
}
/**
* Change the size of the buffer so that it equals {@code newSize} by either adding new
* capacity at the end or truncating the buffer at the end. Newly added capacity may span
* multiple segments.
*
*
As a side-effect this cursor will {@link #seek seek}. If the buffer is being enlarged it
* will move {@link #offset} to the first byte of newly-added capacity. This is the size of the
* buffer prior to the {@code resizeBuffer()} call. If the buffer is being shrunk it will move
* {@link #offset} to the end of the buffer.
*
*
Warning: it is the caller’s responsibility to write new data to every byte of the
* newly-allocated capacity. Failure to do so may cause serious security problems as the data
* in the returned buffers is not zero filled. Buffers may contain dirty pooled segments that
* hold very sensitive data from other parts of the current process.
*
* @return the previous size of the buffer.
*/
public long resizeBuffer(long newSize) {
if (buffer == null) {
throw new IllegalStateException("not attached to a buffer");
}
if (!readWrite) {
throw new IllegalStateException("resizeBuffer() only permitted for read/write buffers");
}
long oldSize = buffer.size;
if (newSize <= oldSize) {
if (newSize < 0) {
throw new IllegalArgumentException("newSize < 0: " + newSize);
}
// Shrink the buffer by either shrinking segments or removing them.
for (long bytesToSubtract = oldSize - newSize; bytesToSubtract > 0; ) {
Segment tail = buffer.head.prev;
int tailSize = tail.limit - tail.pos;
if (tailSize <= bytesToSubtract) {
buffer.head = tail.pop();
SegmentPool.recycle(tail);
bytesToSubtract -= tailSize;
} else {
tail.limit -= bytesToSubtract;
break;
}
}
// Seek to the end.
this.segment = null;
this.offset = newSize;
this.data = null;
this.start = -1;
this.end = -1;
} else if (newSize > oldSize) {
// Enlarge the buffer by either enlarging segments or adding them.
boolean needsToSeek = true;
for (long bytesToAdd = newSize - oldSize; bytesToAdd > 0; ) {
Segment tail = buffer.writableSegment(1);
int segmentBytesToAdd = (int) Math.min(bytesToAdd, Segment.SIZE - tail.limit);
tail.limit += segmentBytesToAdd;
bytesToAdd -= segmentBytesToAdd;
// If this is the first segment we're adding, seek to it.
if (needsToSeek) {
this.segment = tail;
this.offset = oldSize;
this.data = tail.data;
this.start = tail.limit - segmentBytesToAdd;
this.end = tail.limit;
needsToSeek = false;
}
}
}
buffer.size = newSize;
return oldSize;
}
/**
* Grow the buffer by adding a contiguous range of capacity in a single
* segment. This adds at least {@code minByteCount} bytes but may add up to a full segment of
* additional capacity.
*
*
As a side-effect this cursor will {@link #seek seek}. It will move {@link #offset} to the
* first byte of newly-added capacity. This is the size of the buffer prior to the {@code
* expandBuffer()} call.
*
*
If {@code minByteCount} bytes are available in the buffer's current tail segment that will
* be used; otherwise another segment will be allocated and appended. In either case this
* returns the number of bytes of capacity added to this buffer.
*
*
Warning: it is the caller’s responsibility to either write new data to every byte of the
* newly-allocated capacity, or to {@link #resizeBuffer shrink} the buffer to the data written.
* Failure to do so may cause serious security problems as the data in the returned buffers is
* not zero filled. Buffers may contain dirty pooled segments that hold very sensitive data from
* other parts of the current process.
*
* @param minByteCount the size of the contiguous capacity. Must be positive and not greater
* than the capacity size of a single segment (8 KiB).
* @return the number of bytes expanded by. Not less than {@code minByteCount}.
*/
public long expandBuffer(int minByteCount) {
if (minByteCount <= 0) {
throw new IllegalArgumentException("minByteCount <= 0: " + minByteCount);
}
if (minByteCount > Segment.SIZE) {
throw new IllegalArgumentException("minByteCount > Segment.SIZE: " + minByteCount);
}
if (buffer == null) {
throw new IllegalStateException("not attached to a buffer");
}
if (!readWrite) {
throw new IllegalStateException("expandBuffer() only permitted for read/write buffers");
}
long oldSize = buffer.size;
Segment tail = buffer.writableSegment(minByteCount);
int result = Segment.SIZE - tail.limit;
tail.limit = Segment.SIZE;
buffer.size = oldSize + result;
// Seek to the old size.
this.segment = tail;
this.offset = oldSize;
this.data = tail.data;
this.start = Segment.SIZE - result;
this.end = Segment.SIZE;
return result;
}
@Override public void close() {
// TODO(jwilson): use edit counts or other information to track unexpected changes?
if (buffer == null) {
throw new IllegalStateException("not attached to a buffer");
}
buffer = null;
segment = null;
offset = -1L;
data = null;
start = -1;
end = -1;
}
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/BufferedSink.java 0000664 0000000 0000000 00000030146 13240174456 0024546 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.channels.WritableByteChannel;
import java.nio.charset.Charset;
/**
* A sink that keeps a buffer internally so that callers can do small writes
* without a performance penalty.
*/
public interface BufferedSink extends Sink, WritableByteChannel {
/** Returns this sink's internal buffer. */
Buffer buffer();
BufferedSink write(ByteString byteString) throws IOException;
/**
* Like {@link OutputStream#write(byte[])}, this writes a complete byte array to
* this sink.
*/
BufferedSink write(byte[] source) throws IOException;
/**
* Like {@link OutputStream#write(byte[], int, int)}, this writes {@code byteCount}
* bytes of {@code source}, starting at {@code offset}.
*/
BufferedSink write(byte[] source, int offset, int byteCount) throws IOException;
/**
* Removes all bytes from {@code source} and appends them to this sink. Returns the
* number of bytes read which will be 0 if {@code source} is exhausted.
*/
long writeAll(Source source) throws IOException;
/** Removes {@code byteCount} bytes from {@code source} and appends them to this sink. */
BufferedSink write(Source source, long byteCount) throws IOException;
/**
* Encodes {@code string} in UTF-8 and writes it to this sink.
{@code
*
* Buffer buffer = new Buffer();
* buffer.writeUtf8("Uh uh uh!");
* buffer.writeByte(' ');
* buffer.writeUtf8("You didn't say the magic word!");
*
* assertEquals("Uh uh uh! You didn't say the magic word!", buffer.readUtf8());
* }
*/
BufferedSink writeUtf8(String string) throws IOException;
/**
* Encodes the characters at {@code beginIndex} up to {@code endIndex} from {@code string} in
* UTF-8 and writes it to this sink.
{@code
*
* Buffer buffer = new Buffer();
* buffer.writeUtf8("I'm a hacker!\n", 6, 12);
* buffer.writeByte(' ');
* buffer.writeUtf8("That's what I said: you're a nerd.\n", 29, 33);
* buffer.writeByte(' ');
* buffer.writeUtf8("I prefer to be called a hacker!\n", 24, 31);
*
* assertEquals("hacker nerd hacker!", buffer.readUtf8());
* }
*/
BufferedSink writeUtf8(String string, int beginIndex, int endIndex) throws IOException;
/** Encodes {@code codePoint} in UTF-8 and writes it to this sink. */
BufferedSink writeUtf8CodePoint(int codePoint) throws IOException;
/** Encodes {@code string} in {@code charset} and writes it to this sink. */
BufferedSink writeString(String string, Charset charset) throws IOException;
/**
* Encodes the characters at {@code beginIndex} up to {@code endIndex} from {@code string} in
* {@code charset} and writes it to this sink.
*/
BufferedSink writeString(String string, int beginIndex, int endIndex, Charset charset)
throws IOException;
/** Writes a byte to this sink. */
BufferedSink writeByte(int b) throws IOException;
/**
* Writes a big-endian short to this sink using two bytes.
*/
BufferedSink writeHexadecimalUnsignedLong(long v) throws IOException;
/**
* Writes all buffered data to the underlying sink, if one exists. Then that sink is recursively
* flushed which pushes data as far as possible towards its ultimate destination. Typically that
* destination is a network socket or file.
*/
@Override void flush() throws IOException;
/**
* Writes all buffered data to the underlying sink, if one exists. Like {@link #flush}, but
* weaker. Call this before this buffered sink goes out of scope so that its data can reach its
* destination.
*/
BufferedSink emit() throws IOException;
/**
* Writes complete segments to the underlying sink, if one exists. Like {@link #flush}, but
* weaker. Use this to limit the memory held in the buffer to a single segment. Typically
* application code will not need to call this: it is only necessary when application code writes
* directly to this {@linkplain #buffer() sink's buffer}.
*/
BufferedSink emitCompleteSegments() throws IOException;
/** Returns an output stream that writes to this sink. */
OutputStream outputStream();
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/BufferedSource.java 0000664 0000000 0000000 00000047417 13240174456 0025113 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.IOException;
import java.io.InputStream;
import java.nio.channels.ReadableByteChannel;
import java.nio.charset.Charset;
import javax.annotation.Nullable;
/**
* A source that keeps a buffer internally so that callers can do small reads without a performance
* penalty. It also allows clients to read ahead, buffering as much as necessary before consuming
* input.
*/
public interface BufferedSource extends Source, ReadableByteChannel {
/** Returns this source's internal buffer. */
Buffer buffer();
/**
* Returns true if there are no more bytes in this source. This will block until there are bytes
* to read or the source is definitely exhausted.
*/
boolean exhausted() throws IOException;
/**
* Returns when the buffer contains at least {@code byteCount} bytes. Throws an
* {@link java.io.EOFException} if the source is exhausted before the required bytes can be read.
*/
void require(long byteCount) throws IOException;
/**
* Returns true when the buffer contains at least {@code byteCount} bytes, expanding it as
* necessary. Returns false if the source is exhausted before the requested bytes can be read.
*/
boolean request(long byteCount) throws IOException;
/** Removes a byte from this source and returns it. */
byte readByte() throws IOException;
/**
* Removes two bytes from this source and returns a big-endian short.
*/
long readLongLe() throws IOException;
/**
* Reads a long from this source in signed decimal form (i.e., as a string in base 10 with
* optional leading '-'). This will iterate until a non-digit character is found.
*
* @throws NumberFormatException if the found digits do not fit into a {@code long} or a decimal
* number was not present.
*/
long readDecimalLong() throws IOException;
/**
* Reads a long form this source in hexadecimal form (i.e., as a string in base 16). This will
* iterate until a non-hexadecimal character is found.
*
* @throws NumberFormatException if the found hexadecimal does not fit into a {@code long} or
* hexadecimal was not found.
*/
long readHexadecimalUnsignedLong() throws IOException;
/**
* Reads and discards {@code byteCount} bytes from this source. Throws an
* {@link java.io.EOFException} if the source is exhausted before the
* requested bytes can be skipped.
*/
void skip(long byteCount) throws IOException;
/** Removes all bytes bytes from this and returns them as a byte string. */
ByteString readByteString() throws IOException;
/** Removes {@code byteCount} bytes from this and returns them as a byte string. */
ByteString readByteString(long byteCount) throws IOException;
/**
* Finds the first string in {@code options} that is a prefix of this buffer, consumes it from
* this buffer, and returns its index. If no byte string in {@code options} is a prefix of this
* buffer this returns -1 and no bytes are consumed.
*
*
This can be used as an alternative to {@link #readByteString} or even {@link #readUtf8} if
* the set of expected values is known in advance.
*/
int select(Options options) throws IOException;
/** Removes all bytes from this and returns them as a byte array. */
byte[] readByteArray() throws IOException;
/** Removes {@code byteCount} bytes from this and returns them as a byte array. */
byte[] readByteArray(long byteCount) throws IOException;
/**
* Removes up to {@code sink.length} bytes from this and copies them into {@code sink}. Returns
* the number of bytes read, or -1 if this source is exhausted.
*/
int read(byte[] sink) throws IOException;
/**
* Removes exactly {@code sink.length} bytes from this and copies them into {@code sink}. Throws
* an {@link java.io.EOFException} if the requested number of bytes cannot be read.
*/
void readFully(byte[] sink) throws IOException;
/**
* Removes up to {@code byteCount} bytes from this and copies them into {@code sink} at {@code
* offset}. Returns the number of bytes read, or -1 if this source is exhausted.
*/
int read(byte[] sink, int offset, int byteCount) throws IOException;
/**
* Removes exactly {@code byteCount} bytes from this and appends them to {@code sink}. Throws an
* {@link java.io.EOFException} if the requested number of bytes cannot be read.
*/
void readFully(Buffer sink, long byteCount) throws IOException;
/**
* Removes all bytes from this and appends them to {@code sink}. Returns the total number of bytes
* written to {@code sink} which will be 0 if this is exhausted.
*/
long readAll(Sink sink) throws IOException;
/**
* Removes all bytes from this, decodes them as UTF-8, and returns the string. Returns the empty
* string if this source is empty.
{@code
*
* Buffer buffer = new Buffer()
* .writeUtf8("Uh uh uh!")
* .writeByte(' ')
* .writeUtf8("You didn't say the magic word!");
*
* assertEquals("Uh uh uh! You didn't say the magic word!", buffer.readUtf8());
* assertEquals(0, buffer.size());
*
* assertEquals("", buffer.readUtf8());
* assertEquals(0, buffer.size());
* }
*/
String readUtf8() throws IOException;
/**
* Removes {@code byteCount} bytes from this, decodes them as UTF-8, and returns the string.
*
*/
String readUtf8(long byteCount) throws IOException;
/**
* Removes and returns characters up to but not including the next line break. A line break is
* either {@code "\n"} or {@code "\r\n"}; these characters are not included in the result.
*
{@code
*
* Buffer buffer = new Buffer()
* .writeUtf8("I'm a hacker!\n")
* .writeUtf8("That's what I said: you're a nerd.\n")
* .writeUtf8("I prefer to be called a hacker!\n");
* assertEquals(81, buffer.size());
*
* assertEquals("I'm a hacker!", buffer.readUtf8Line());
* assertEquals(67, buffer.size());
*
* assertEquals("That's what I said: you're a nerd.", buffer.readUtf8Line());
* assertEquals(32, buffer.size());
*
* assertEquals("I prefer to be called a hacker!", buffer.readUtf8Line());
* assertEquals(0, buffer.size());
*
* assertEquals(null, buffer.readUtf8Line());
* assertEquals(0, buffer.size());
* }
*
*
On the end of the stream this method returns null, just like {@link
* java.io.BufferedReader}. If the source doesn't end with a line break then an implicit line
* break is assumed. Null is returned once the source is exhausted. Use this for human-generated
* data, where a trailing line break is optional.
*/
@Nullable String readUtf8Line() throws IOException;
/**
* Removes and returns characters up to but not including the next line break. A line break is
* either {@code "\n"} or {@code "\r\n"}; these characters are not included in the result.
*
*
On the end of the stream this method throws. Every call must consume either
* '\r\n' or '\n'. If these characters are absent in the stream, an {@link java.io.EOFException}
* is thrown. Use this for machine-generated data where a missing line break implies truncated
* input.
*/
String readUtf8LineStrict() throws IOException;
/**
* Like {@link #readUtf8LineStrict()}, except this allows the caller to specify the longest
* allowed match. Use this to protect against streams that may not include
* {@code "\n"} or {@code "\r\n"}.
*
*
The returned string will have at most {@code limit} UTF-8 bytes, and the maximum number
* of bytes scanned is {@code limit + 2}. If {@code limit == 0} this will always throw
* an {@code EOFException} because no bytes will be scanned.
*
*
This method is safe. No bytes are discarded if the match fails, and the caller is free
* to try another match:
{@code
*
* Buffer buffer = new Buffer();
* buffer.writeUtf8("12345\r\n");
*
* // This will throw! There must be \r\n or \n at the limit or before it.
* buffer.readUtf8LineStrict(4);
*
* // No bytes have been consumed so the caller can retry.
* assertEquals("12345", buffer.readUtf8LineStrict(5));
* }
*/
String readUtf8LineStrict(long limit) throws IOException;
/**
* Removes and returns a single UTF-8 code point, reading between 1 and 4 bytes as necessary.
*
*
If this source is exhausted before a complete code point can be read, this throws an {@link
* java.io.EOFException} and consumes no input.
*
*
If this source doesn't start with a properly-encoded UTF-8 code point, this method will
* remove 1 or more non-UTF-8 bytes and return the replacement character ({@code U+FFFD}). This
* covers encoding problems (the input is not properly-encoded UTF-8), characters out of range
* (beyond the 0x10ffff limit of Unicode), code points for UTF-16 surrogates (U+d800..U+dfff) and
* overlong encodings (such as {@code 0xc080} for the NUL character in modified UTF-8).
*/
int readUtf8CodePoint() throws IOException;
/** Removes all bytes from this, decodes them as {@code charset}, and returns the string. */
String readString(Charset charset) throws IOException;
/**
* Removes {@code byteCount} bytes from this, decodes them as {@code charset}, and returns the
* string.
*/
String readString(long byteCount, Charset charset) throws IOException;
/** Equivalent to {@link #indexOf(byte, long) indexOf(b, 0)}. */
long indexOf(byte b) throws IOException;
/**
* Returns the index of the first {@code b} in the buffer at or after {@code fromIndex}. This
* expands the buffer as necessary until {@code b} is found. This reads an unbounded number of
* bytes into the buffer. Returns -1 if the stream is exhausted before the requested byte is
* found.
{@code
*
* Buffer buffer = new Buffer();
* buffer.writeUtf8("Don't move! He can't see us if we don't move.");
*
* byte m = 'm';
* assertEquals(6, buffer.indexOf(m));
* assertEquals(40, buffer.indexOf(m, 12));
* }
*/
long indexOf(byte b, long fromIndex) throws IOException;
/**
* Returns the index of {@code b} if it is found in the range of {@code fromIndex} inclusive
* to {@code toIndex} exclusive. If {@code b} isn't found, or if {@code fromIndex == toIndex},
* then -1 is returned.
*
*
The scan terminates at either {@code toIndex} or the end of the buffer, whichever comes
* first. The maximum number of bytes scanned is {@code toIndex-fromIndex}.
*/
long indexOf(byte b, long fromIndex, long toIndex) throws IOException;
/** Equivalent to {@link #indexOf(ByteString, long) indexOf(bytes, 0)}. */
long indexOf(ByteString bytes) throws IOException;
/**
* Returns the index of the first match for {@code bytes} in the buffer at or after {@code
* fromIndex}. This expands the buffer as necessary until {@code bytes} is found. This reads an
* unbounded number of bytes into the buffer. Returns -1 if the stream is exhausted before the
* requested bytes are found.
{@code
*
* ByteString MOVE = ByteString.encodeUtf8("move");
*
* Buffer buffer = new Buffer();
* buffer.writeUtf8("Don't move! He can't see us if we don't move.");
*
* assertEquals(6, buffer.indexOf(MOVE));
* assertEquals(40, buffer.indexOf(MOVE, 12));
* }
*/
long indexOf(ByteString bytes, long fromIndex) throws IOException;
/** Equivalent to {@link #indexOfElement(ByteString, long) indexOfElement(targetBytes, 0)}. */
long indexOfElement(ByteString targetBytes) throws IOException;
/**
* Returns the first index in this buffer that is at or after {@code fromIndex} and that contains
* any of the bytes in {@code targetBytes}. This expands the buffer as necessary until a target
* byte is found. This reads an unbounded number of bytes into the buffer. Returns -1 if the
* stream is exhausted before the requested byte is found.
{@code
*
* ByteString ANY_VOWEL = ByteString.encodeUtf8("AEOIUaeoiu");
*
* Buffer buffer = new Buffer();
* buffer.writeUtf8("Dr. Alan Grant");
*
* assertEquals(4, buffer.indexOfElement(ANY_VOWEL)); // 'A' in 'Alan'.
* assertEquals(11, buffer.indexOfElement(ANY_VOWEL, 9)); // 'a' in 'Grant'.
* }
*/
long indexOfElement(ByteString targetBytes, long fromIndex) throws IOException;
/**
* Returns true if the bytes at {@code offset} in this source equal {@code bytes}. This expands
* the buffer as necessary until a byte does not match, all bytes are matched, or if the stream
* is exhausted before enough bytes could determine a match.
{@code
*
* ByteString simonSays = ByteString.encodeUtf8("Simon says:");
*
* Buffer standOnOneLeg = new Buffer().writeUtf8("Simon says: Stand on one leg.");
* assertTrue(standOnOneLeg.rangeEquals(0, simonSays));
*
* Buffer payMeMoney = new Buffer().writeUtf8("Pay me $1,000,000.");
* assertFalse(payMeMoney.rangeEquals(0, simonSays));
* }
*/
boolean rangeEquals(long offset, ByteString bytes) throws IOException;
/**
* Returns true if {@code byteCount} bytes at {@code offset} in this source equal {@code bytes}
* at {@code bytesOffset}. This expands the buffer as necessary until a byte does not match, all
* bytes are matched, or if the stream is exhausted before enough bytes could determine a match.
*/
boolean rangeEquals(long offset, ByteString bytes, int bytesOffset, int byteCount)
throws IOException;
/** Returns an input stream that reads from this source. */
InputStream inputStream();
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/ByteString.java 0000664 0000000 0000000 00000043724 13240174456 0024277 0 ustar 00root root 0000000 0000000 /*
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.OutputStream;
import java.io.Serializable;
import java.lang.reflect.Field;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.security.InvalidKeyException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
import javax.annotation.Nullable;
import javax.crypto.Mac;
import javax.crypto.spec.SecretKeySpec;
import static okio.Util.arrayRangeEquals;
import static okio.Util.checkOffsetAndCount;
/**
* An immutable sequence of bytes.
*
*
Byte strings compare lexicographically as a sequence of unsigned bytes. That
* is, the byte string {@code ff} sorts after {@code 00}. This is counter to the sort order of the
* corresponding bytes, where {@code -1} sorts before {@code 0}.
*
*
Full disclosure: this class provides untrusted input and output streams with
* raw access to the underlying byte array. A hostile stream implementation could keep a reference
* to the mutable byte string, violating the immutable guarantee of this class. For this reason a
* byte string's immutability guarantee cannot be relied upon for security in applets and other
* environments that run both trusted and untrusted code in the same process.
*/
public class ByteString implements Serializable, Comparable {
static final char[] HEX_DIGITS =
{ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
private static final long serialVersionUID = 1L;
/** A singleton empty {@code ByteString}. */
public static final ByteString EMPTY = ByteString.of();
final byte[] data;
transient int hashCode; // Lazily computed; 0 if unknown.
transient String utf8; // Lazily computed.
ByteString(byte[] data) {
this.data = data; // Trusted internal constructor doesn't clone data.
}
/**
* Returns a new byte string containing a clone of the bytes of {@code data}.
*/
public static ByteString of(byte... data) {
if (data == null) throw new IllegalArgumentException("data == null");
return new ByteString(data.clone());
}
/**
* Returns a new byte string containing a copy of {@code byteCount} bytes of {@code data} starting
* at {@code offset}.
*/
public static ByteString of(byte[] data, int offset, int byteCount) {
if (data == null) throw new IllegalArgumentException("data == null");
checkOffsetAndCount(data.length, offset, byteCount);
byte[] copy = new byte[byteCount];
System.arraycopy(data, offset, copy, 0, byteCount);
return new ByteString(copy);
}
public static ByteString of(ByteBuffer data) {
if (data == null) throw new IllegalArgumentException("data == null");
byte[] copy = new byte[data.remaining()];
data.get(copy);
return new ByteString(copy);
}
/** Returns a new byte string containing the {@code UTF-8} bytes of {@code s}. */
public static ByteString encodeUtf8(String s) {
if (s == null) throw new IllegalArgumentException("s == null");
ByteString byteString = new ByteString(s.getBytes(Util.UTF_8));
byteString.utf8 = s;
return byteString;
}
/** Returns a new byte string containing the {@code charset}-encoded bytes of {@code s}. */
public static ByteString encodeString(String s, Charset charset) {
if (s == null) throw new IllegalArgumentException("s == null");
if (charset == null) throw new IllegalArgumentException("charset == null");
return new ByteString(s.getBytes(charset));
}
/** Constructs a new {@code String} by decoding the bytes as {@code UTF-8}. */
public String utf8() {
String result = utf8;
// We don't care if we double-allocate in racy code.
return result != null ? result : (utf8 = new String(data, Util.UTF_8));
}
/** Constructs a new {@code String} by decoding the bytes using {@code charset}. */
public String string(Charset charset) {
if (charset == null) throw new IllegalArgumentException("charset == null");
return new String(data, charset);
}
/**
* Returns this byte string encoded as Base64. In violation of the
* RFC, the returned string does not wrap lines at 76 columns.
*/
public String base64() {
return Base64.encode(data);
}
/** Returns the 128-bit MD5 hash of this byte string. */
public ByteString md5() {
return digest("MD5");
}
/** Returns the 160-bit SHA-1 hash of this byte string. */
public ByteString sha1() {
return digest("SHA-1");
}
/** Returns the 256-bit SHA-256 hash of this byte string. */
public ByteString sha256() {
return digest("SHA-256");
}
/** Returns the 512-bit SHA-512 hash of this byte string. */
public ByteString sha512() {
return digest("SHA-512");
}
private ByteString digest(String algorithm) {
try {
return ByteString.of(MessageDigest.getInstance(algorithm).digest(data));
} catch (NoSuchAlgorithmException e) {
throw new AssertionError(e);
}
}
/** Returns the 160-bit SHA-1 HMAC of this byte string. */
public ByteString hmacSha1(ByteString key) {
return hmac("HmacSHA1", key);
}
/** Returns the 256-bit SHA-256 HMAC of this byte string. */
public ByteString hmacSha256(ByteString key) {
return hmac("HmacSHA256", key);
}
/** Returns the 512-bit SHA-512 HMAC of this byte string. */
public ByteString hmacSha512(ByteString key) {
return hmac("HmacSHA512", key);
}
private ByteString hmac(String algorithm, ByteString key) {
try {
Mac mac = Mac.getInstance(algorithm);
mac.init(new SecretKeySpec(key.toByteArray(), algorithm));
return ByteString.of(mac.doFinal(data));
} catch (NoSuchAlgorithmException e) {
throw new AssertionError(e);
} catch (InvalidKeyException e) {
throw new IllegalArgumentException(e);
}
}
/**
* Returns this byte string encoded as URL-safe
* Base64.
*/
public String base64Url() {
return Base64.encodeUrl(data);
}
/**
* Decodes the Base64-encoded bytes and returns their value as a byte string.
* Returns null if {@code base64} is not a Base64-encoded sequence of bytes.
*/
public static @Nullable ByteString decodeBase64(String base64) {
if (base64 == null) throw new IllegalArgumentException("base64 == null");
byte[] decoded = Base64.decode(base64);
return decoded != null ? new ByteString(decoded) : null;
}
/** Returns this byte string encoded in hexadecimal. */
public String hex() {
char[] result = new char[data.length * 2];
int c = 0;
for (byte b : data) {
result[c++] = HEX_DIGITS[(b >> 4) & 0xf];
result[c++] = HEX_DIGITS[b & 0xf];
}
return new String(result);
}
/** Decodes the hex-encoded bytes and returns their value a byte string. */
public static ByteString decodeHex(String hex) {
if (hex == null) throw new IllegalArgumentException("hex == null");
if (hex.length() % 2 != 0) throw new IllegalArgumentException("Unexpected hex string: " + hex);
byte[] result = new byte[hex.length() / 2];
for (int i = 0; i < result.length; i++) {
int d1 = decodeHexDigit(hex.charAt(i * 2)) << 4;
int d2 = decodeHexDigit(hex.charAt(i * 2 + 1));
result[i] = (byte) (d1 + d2);
}
return of(result);
}
private static int decodeHexDigit(char c) {
if (c >= '0' && c <= '9') return c - '0';
if (c >= 'a' && c <= 'f') return c - 'a' + 10;
if (c >= 'A' && c <= 'F') return c - 'A' + 10;
throw new IllegalArgumentException("Unexpected hex digit: " + c);
}
/**
* Reads {@code count} bytes from {@code in} and returns the result.
*
* @throws java.io.EOFException if {@code in} has fewer than {@code count}
* bytes to read.
*/
public static ByteString read(InputStream in, int byteCount) throws IOException {
if (in == null) throw new IllegalArgumentException("in == null");
if (byteCount < 0) throw new IllegalArgumentException("byteCount < 0: " + byteCount);
byte[] result = new byte[byteCount];
for (int offset = 0, read; offset < byteCount; offset += read) {
read = in.read(result, offset, byteCount - offset);
if (read == -1) throw new EOFException();
}
return new ByteString(result);
}
/**
* Returns a byte string equal to this byte string, but with the bytes 'A'
* through 'Z' replaced with the corresponding byte in 'a' through 'z'.
* Returns this byte string if it contains no bytes in 'A' through 'Z'.
*/
public ByteString toAsciiLowercase() {
// Search for an uppercase character. If we don't find one, return this.
for (int i = 0; i < data.length; i++) {
byte c = data[i];
if (c < 'A' || c > 'Z') continue;
// If we reach this point, this string is not not lowercase. Create and
// return a new byte string.
byte[] lowercase = data.clone();
lowercase[i++] = (byte) (c - ('A' - 'a'));
for (; i < lowercase.length; i++) {
c = lowercase[i];
if (c < 'A' || c > 'Z') continue;
lowercase[i] = (byte) (c - ('A' - 'a'));
}
return new ByteString(lowercase);
}
return this;
}
/**
* Returns a byte string equal to this byte string, but with the bytes 'a'
* through 'z' replaced with the corresponding byte in 'A' through 'Z'.
* Returns this byte string if it contains no bytes in 'a' through 'z'.
*/
public ByteString toAsciiUppercase() {
// Search for an lowercase character. If we don't find one, return this.
for (int i = 0; i < data.length; i++) {
byte c = data[i];
if (c < 'a' || c > 'z') continue;
// If we reach this point, this string is not not uppercase. Create and
// return a new byte string.
byte[] lowercase = data.clone();
lowercase[i++] = (byte) (c - ('a' - 'A'));
for (; i < lowercase.length; i++) {
c = lowercase[i];
if (c < 'a' || c > 'z') continue;
lowercase[i] = (byte) (c - ('a' - 'A'));
}
return new ByteString(lowercase);
}
return this;
}
/**
* Returns a byte string that is a substring of this byte string, beginning at the specified
* index until the end of this string. Returns this byte string if {@code beginIndex} is 0.
*/
public ByteString substring(int beginIndex) {
return substring(beginIndex, data.length);
}
/**
* Returns a byte string that is a substring of this byte string, beginning at the specified
* {@code beginIndex} and ends at the specified {@code endIndex}. Returns this byte string if
* {@code beginIndex} is 0 and {@code endIndex} is the length of this byte string.
*/
public ByteString substring(int beginIndex, int endIndex) {
if (beginIndex < 0) throw new IllegalArgumentException("beginIndex < 0");
if (endIndex > data.length) {
throw new IllegalArgumentException("endIndex > length(" + data.length + ")");
}
int subLen = endIndex - beginIndex;
if (subLen < 0) throw new IllegalArgumentException("endIndex < beginIndex");
if ((beginIndex == 0) && (endIndex == data.length)) {
return this;
}
byte[] copy = new byte[subLen];
System.arraycopy(data, beginIndex, copy, 0, subLen);
return new ByteString(copy);
}
/** Returns the byte at {@code pos}. */
public byte getByte(int pos) {
return data[pos];
}
/**
* Returns the number of bytes in this ByteString.
*/
public int size() {
return data.length;
}
/**
* Returns a byte array containing a copy of the bytes in this {@code ByteString}.
*/
public byte[] toByteArray() {
return data.clone();
}
/** Returns the bytes of this string without a defensive copy. Do not mutate! */
byte[] internalArray() {
return data;
}
/**
* Returns a {@code ByteBuffer} view of the bytes in this {@code ByteString}.
*/
public ByteBuffer asByteBuffer() {
return ByteBuffer.wrap(data).asReadOnlyBuffer();
}
/** Writes the contents of this byte string to {@code out}. */
public void write(OutputStream out) throws IOException {
if (out == null) throw new IllegalArgumentException("out == null");
out.write(data);
}
/** Writes the contents of this byte string to {@code buffer}. */
void write(Buffer buffer) {
buffer.write(data, 0, data.length);
}
/**
* Returns true if the bytes of this in {@code [offset..offset+byteCount)} equal the bytes of
* {@code other} in {@code [otherOffset..otherOffset+byteCount)}. Returns false if either range is
* out of bounds.
*/
public boolean rangeEquals(int offset, ByteString other, int otherOffset, int byteCount) {
return other.rangeEquals(otherOffset, this.data, offset, byteCount);
}
/**
* Returns true if the bytes of this in {@code [offset..offset+byteCount)} equal the bytes of
* {@code other} in {@code [otherOffset..otherOffset+byteCount)}. Returns false if either range is
* out of bounds.
*/
public boolean rangeEquals(int offset, byte[] other, int otherOffset, int byteCount) {
return offset >= 0 && offset <= data.length - byteCount
&& otherOffset >= 0 && otherOffset <= other.length - byteCount
&& arrayRangeEquals(data, offset, other, otherOffset, byteCount);
}
public final boolean startsWith(ByteString prefix) {
return rangeEquals(0, prefix, 0, prefix.size());
}
public final boolean startsWith(byte[] prefix) {
return rangeEquals(0, prefix, 0, prefix.length);
}
public final boolean endsWith(ByteString suffix) {
return rangeEquals(size() - suffix.size(), suffix, 0, suffix.size());
}
public final boolean endsWith(byte[] suffix) {
return rangeEquals(size() - suffix.length, suffix, 0, suffix.length);
}
public final int indexOf(ByteString other) {
return indexOf(other.internalArray(), 0);
}
public final int indexOf(ByteString other, int fromIndex) {
return indexOf(other.internalArray(), fromIndex);
}
public final int indexOf(byte[] other) {
return indexOf(other, 0);
}
public int indexOf(byte[] other, int fromIndex) {
fromIndex = Math.max(fromIndex, 0);
for (int i = fromIndex, limit = data.length - other.length; i <= limit; i++) {
if (arrayRangeEquals(data, i, other, 0, other.length)) {
return i;
}
}
return -1;
}
public final int lastIndexOf(ByteString other) {
return lastIndexOf(other.internalArray(), size());
}
public final int lastIndexOf(ByteString other, int fromIndex) {
return lastIndexOf(other.internalArray(), fromIndex);
}
public final int lastIndexOf(byte[] other) {
return lastIndexOf(other, size());
}
public int lastIndexOf(byte[] other, int fromIndex) {
fromIndex = Math.min(fromIndex, data.length - other.length);
for (int i = fromIndex; i >= 0; i--) {
if (arrayRangeEquals(data, i, other, 0, other.length)) {
return i;
}
}
return -1;
}
@Override public boolean equals(Object o) {
if (o == this) return true;
return o instanceof ByteString
&& ((ByteString) o).size() == data.length
&& ((ByteString) o).rangeEquals(0, data, 0, data.length);
}
@Override public int hashCode() {
int result = hashCode;
return result != 0 ? result : (hashCode = Arrays.hashCode(data));
}
@Override public int compareTo(ByteString byteString) {
int sizeA = size();
int sizeB = byteString.size();
for (int i = 0, size = Math.min(sizeA, sizeB); i < size; i++) {
int byteA = getByte(i) & 0xff;
int byteB = byteString.getByte(i) & 0xff;
if (byteA == byteB) continue;
return byteA < byteB ? -1 : 1;
}
if (sizeA == sizeB) return 0;
return sizeA < sizeB ? -1 : 1;
}
/**
* Returns a human-readable string that describes the contents of this byte string. Typically this
* is a string like {@code [text=Hello]} or {@code [hex=0000ffff]}.
*/
@Override public String toString() {
if (data.length == 0) {
return "[size=0]";
}
String text = utf8();
int i = codePointIndexToCharIndex(text, 64);
if (i == -1) {
return data.length <= 64
? "[hex=" + hex() + "]"
: "[size=" + data.length + " hex=" + substring(0, 64).hex() + "…]";
}
String safeText = text.substring(0, i)
.replace("\\", "\\\\")
.replace("\n", "\\n")
.replace("\r", "\\r");
return i < text.length()
? "[size=" + data.length + " text=" + safeText + "…]"
: "[text=" + safeText + "]";
}
static int codePointIndexToCharIndex(String s, int codePointCount) {
for (int i = 0, j = 0, length = s.length(), c; i < length; i += Character.charCount(c)) {
if (j == codePointCount) {
return i;
}
c = s.codePointAt(i);
if ((Character.isISOControl(c) && c != '\n' && c != '\r')
|| c == Buffer.REPLACEMENT_CHARACTER) {
return -1;
}
j++;
}
return s.length();
}
private void readObject(ObjectInputStream in) throws IOException {
int dataLength = in.readInt();
ByteString byteString = ByteString.read(in, dataLength);
try {
Field field = ByteString.class.getDeclaredField("data");
field.setAccessible(true);
field.set(this, byteString.data);
} catch (NoSuchFieldException e) {
throw new AssertionError();
} catch (IllegalAccessException e) {
throw new AssertionError();
}
}
private void writeObject(ObjectOutputStream out) throws IOException {
out.writeInt(data.length);
out.write(data);
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/DeflaterSink.java 0000664 0000000 0000000 00000011433 13240174456 0024550 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.IOException;
import java.util.zip.Deflater;
import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
import static okio.Util.checkOffsetAndCount;
/**
* A sink that uses DEFLATE to
* compress data written to another source.
*
*
Sync flush
* Aggressive flushing of this stream may result in reduced compression. Each
* call to {@link #flush} immediately compresses all currently-buffered data;
* this early compression may be less effective than compression performed
* without flushing.
*
*
This is equivalent to using {@link Deflater} with the sync flush option.
* This class does not offer any partial flush mechanism. For best performance,
* only call {@link #flush} when application behavior requires it.
*/
public final class DeflaterSink implements Sink {
private final BufferedSink sink;
private final Deflater deflater;
private boolean closed;
public DeflaterSink(Sink sink, Deflater deflater) {
this(Okio.buffer(sink), deflater);
}
/**
* This package-private constructor shares a buffer with its trusted caller.
* In general we can't share a BufferedSource because the deflater holds input
* bytes until they are inflated.
*/
DeflaterSink(BufferedSink sink, Deflater deflater) {
if (sink == null) throw new IllegalArgumentException("source == null");
if (deflater == null) throw new IllegalArgumentException("inflater == null");
this.sink = sink;
this.deflater = deflater;
}
@Override public void write(Buffer source, long byteCount) throws IOException {
checkOffsetAndCount(source.size, 0, byteCount);
while (byteCount > 0) {
// Share bytes from the head segment of 'source' with the deflater.
Segment head = source.head;
int toDeflate = (int) Math.min(byteCount, head.limit - head.pos);
deflater.setInput(head.data, head.pos, toDeflate);
// Deflate those bytes into sink.
deflate(false);
// Mark those bytes as read.
source.size -= toDeflate;
head.pos += toDeflate;
if (head.pos == head.limit) {
source.head = head.pop();
SegmentPool.recycle(head);
}
byteCount -= toDeflate;
}
}
@IgnoreJRERequirement
private void deflate(boolean syncFlush) throws IOException {
Buffer buffer = sink.buffer();
while (true) {
Segment s = buffer.writableSegment(1);
// The 4-parameter overload of deflate() doesn't exist in the RI until
// Java 1.7, and is public (although with @hide) on Android since 2.3.
// The @hide tag means that this code won't compile against the Android
// 2.3 SDK, but it will run fine there.
int deflated = syncFlush
? deflater.deflate(s.data, s.limit, Segment.SIZE - s.limit, Deflater.SYNC_FLUSH)
: deflater.deflate(s.data, s.limit, Segment.SIZE - s.limit);
if (deflated > 0) {
s.limit += deflated;
buffer.size += deflated;
sink.emitCompleteSegments();
} else if (deflater.needsInput()) {
if (s.pos == s.limit) {
// We allocated a tail segment, but didn't end up needing it. Recycle!
buffer.head = s.pop();
SegmentPool.recycle(s);
}
return;
}
}
}
@Override public void flush() throws IOException {
deflate(true);
sink.flush();
}
void finishDeflate() throws IOException {
deflater.finish();
deflate(false);
}
@Override public void close() throws IOException {
if (closed) return;
// Emit deflated data to the underlying sink. If this fails, we still need
// to close the deflater and the sink; otherwise we risk leaking resources.
Throwable thrown = null;
try {
finishDeflate();
} catch (Throwable e) {
thrown = e;
}
try {
deflater.end();
} catch (Throwable e) {
if (thrown == null) thrown = e;
}
try {
sink.close();
} catch (Throwable e) {
if (thrown == null) thrown = e;
}
closed = true;
if (thrown != null) Util.sneakyRethrow(thrown);
}
@Override public Timeout timeout() {
return sink.timeout();
}
@Override public String toString() {
return "DeflaterSink(" + sink + ")";
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/ForwardingSink.java 0000664 0000000 0000000 00000003037 13240174456 0025125 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.IOException;
/** A {@link Sink} which forwards calls to another. Useful for subclassing. */
public abstract class ForwardingSink implements Sink {
private final Sink delegate;
public ForwardingSink(Sink delegate) {
if (delegate == null) throw new IllegalArgumentException("delegate == null");
this.delegate = delegate;
}
/** {@link Sink} to which this instance is delegating. */
public final Sink delegate() {
return delegate;
}
@Override public void write(Buffer source, long byteCount) throws IOException {
delegate.write(source, byteCount);
}
@Override public void flush() throws IOException {
delegate.flush();
}
@Override public Timeout timeout() {
return delegate.timeout();
}
@Override public void close() throws IOException {
delegate.close();
}
@Override public String toString() {
return getClass().getSimpleName() + "(" + delegate.toString() + ")";
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/ForwardingSource.java 0000664 0000000 0000000 00000002740 13240174456 0025461 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.IOException;
/** A {@link Source} which forwards calls to another. Useful for subclassing. */
public abstract class ForwardingSource implements Source {
private final Source delegate;
public ForwardingSource(Source delegate) {
if (delegate == null) throw new IllegalArgumentException("delegate == null");
this.delegate = delegate;
}
/** {@link Source} to which this instance is delegating. */
public final Source delegate() {
return delegate;
}
@Override public long read(Buffer sink, long byteCount) throws IOException {
return delegate.read(sink, byteCount);
}
@Override public Timeout timeout() {
return delegate.timeout();
}
@Override public void close() throws IOException {
delegate.close();
}
@Override public String toString() {
return getClass().getSimpleName() + "(" + delegate.toString() + ")";
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/ForwardingTimeout.java 0000664 0000000 0000000 00000004111 13240174456 0025641 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2015 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
/** A {@link Timeout} which forwards calls to another. Useful for subclassing. */
public class ForwardingTimeout extends Timeout {
private Timeout delegate;
public ForwardingTimeout(Timeout delegate) {
if (delegate == null) throw new IllegalArgumentException("delegate == null");
this.delegate = delegate;
}
/** {@link Timeout} instance to which this instance is currently delegating. */
public final Timeout delegate() {
return delegate;
}
public final ForwardingTimeout setDelegate(Timeout delegate) {
if (delegate == null) throw new IllegalArgumentException("delegate == null");
this.delegate = delegate;
return this;
}
@Override public Timeout timeout(long timeout, TimeUnit unit) {
return delegate.timeout(timeout, unit);
}
@Override public long timeoutNanos() {
return delegate.timeoutNanos();
}
@Override public boolean hasDeadline() {
return delegate.hasDeadline();
}
@Override public long deadlineNanoTime() {
return delegate.deadlineNanoTime();
}
@Override public Timeout deadlineNanoTime(long deadlineNanoTime) {
return delegate.deadlineNanoTime(deadlineNanoTime);
}
@Override public Timeout clearTimeout() {
return delegate.clearTimeout();
}
@Override public Timeout clearDeadline() {
return delegate.clearDeadline();
}
@Override public void throwIfReached() throws IOException {
delegate.throwIfReached();
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/GzipSink.java 0000664 0000000 0000000 00000011057 13240174456 0023735 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.IOException;
import java.util.zip.CRC32;
import java.util.zip.Deflater;
import static java.util.zip.Deflater.DEFAULT_COMPRESSION;
/**
* A sink that uses GZIP to
* compress written data to another sink.
*
*
Sync flush
* Aggressive flushing of this stream may result in reduced compression. Each
* call to {@link #flush} immediately compresses all currently-buffered data;
* this early compression may be less effective than compression performed
* without flushing.
*
*
This is equivalent to using {@link Deflater} with the sync flush option.
* This class does not offer any partial flush mechanism. For best performance,
* only call {@link #flush} when application behavior requires it.
*/
public final class GzipSink implements Sink {
/** Sink into which the GZIP format is written. */
private final BufferedSink sink;
/** The deflater used to compress the body. */
private final Deflater deflater;
/**
* The deflater sink takes care of moving data between decompressed source and
* compressed sink buffers.
*/
private final DeflaterSink deflaterSink;
private boolean closed;
/** Checksum calculated for the compressed body. */
private final CRC32 crc = new CRC32();
public GzipSink(Sink sink) {
if (sink == null) throw new IllegalArgumentException("sink == null");
this.deflater = new Deflater(DEFAULT_COMPRESSION, true /* No wrap */);
this.sink = Okio.buffer(sink);
this.deflaterSink = new DeflaterSink(this.sink, deflater);
writeHeader();
}
@Override public void write(Buffer source, long byteCount) throws IOException {
if (byteCount < 0) throw new IllegalArgumentException("byteCount < 0: " + byteCount);
if (byteCount == 0) return;
updateCrc(source, byteCount);
deflaterSink.write(source, byteCount);
}
@Override public void flush() throws IOException {
deflaterSink.flush();
}
@Override public Timeout timeout() {
return sink.timeout();
}
@Override public void close() throws IOException {
if (closed) return;
// This method delegates to the DeflaterSink for finishing the deflate process
// but keeps responsibility for releasing the deflater's resources. This is
// necessary because writeFooter needs to query the processed byte count which
// only works when the deflater is still open.
Throwable thrown = null;
try {
deflaterSink.finishDeflate();
writeFooter();
} catch (Throwable e) {
thrown = e;
}
try {
deflater.end();
} catch (Throwable e) {
if (thrown == null) thrown = e;
}
try {
sink.close();
} catch (Throwable e) {
if (thrown == null) thrown = e;
}
closed = true;
if (thrown != null) Util.sneakyRethrow(thrown);
}
/**
* Returns the {@link Deflater}.
* Use it to access stats, dictionary, compression level, etc.
*/
public Deflater deflater() {
return deflater;
}
private void writeHeader() {
// Write the Gzip header directly into the buffer for the sink to avoid handling IOException.
Buffer buffer = this.sink.buffer();
buffer.writeShort(0x1f8b); // Two-byte Gzip ID.
buffer.writeByte(0x08); // 8 == Deflate compression method.
buffer.writeByte(0x00); // No flags.
buffer.writeInt(0x00); // No modification time.
buffer.writeByte(0x00); // No extra flags.
buffer.writeByte(0x00); // No OS.
}
private void writeFooter() throws IOException {
sink.writeIntLe((int) crc.getValue()); // CRC of original data.
sink.writeIntLe((int) deflater.getBytesRead()); // Length of original data.
}
/** Updates the CRC with the given bytes. */
private void updateCrc(Buffer buffer, long byteCount) {
for (Segment head = buffer.head; byteCount > 0; head = head.next) {
int segmentLength = (int) Math.min(byteCount, head.limit - head.pos);
crc.update(head.data, head.pos, segmentLength);
byteCount -= segmentLength;
}
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/GzipSource.java 0000664 0000000 0000000 00000016340 13240174456 0024271 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.EOFException;
import java.io.IOException;
import java.util.zip.CRC32;
import java.util.zip.Inflater;
/**
* A source that uses GZIP to
* decompress data read from another source.
*/
public final class GzipSource implements Source {
private static final byte FHCRC = 1;
private static final byte FEXTRA = 2;
private static final byte FNAME = 3;
private static final byte FCOMMENT = 4;
private static final byte SECTION_HEADER = 0;
private static final byte SECTION_BODY = 1;
private static final byte SECTION_TRAILER = 2;
private static final byte SECTION_DONE = 3;
/** The current section. Always progresses forward. */
private int section = SECTION_HEADER;
/**
* Our source should yield a GZIP header (which we consume directly), followed
* by deflated bytes (which we consume via an InflaterSource), followed by a
* GZIP trailer (which we also consume directly).
*/
private final BufferedSource source;
/** The inflater used to decompress the deflated body. */
private final Inflater inflater;
/**
* The inflater source takes care of moving data between compressed source and
* decompressed sink buffers.
*/
private final InflaterSource inflaterSource;
/** Checksum used to check both the GZIP header and decompressed body. */
private final CRC32 crc = new CRC32();
public GzipSource(Source source) {
if (source == null) throw new IllegalArgumentException("source == null");
this.inflater = new Inflater(true);
this.source = Okio.buffer(source);
this.inflaterSource = new InflaterSource(this.source, inflater);
}
@Override public long read(Buffer sink, long byteCount) throws IOException {
if (byteCount < 0) throw new IllegalArgumentException("byteCount < 0: " + byteCount);
if (byteCount == 0) return 0;
// If we haven't consumed the header, we must consume it before anything else.
if (section == SECTION_HEADER) {
consumeHeader();
section = SECTION_BODY;
}
// Attempt to read at least a byte of the body. If we do, we're done.
if (section == SECTION_BODY) {
long offset = sink.size;
long result = inflaterSource.read(sink, byteCount);
if (result != -1) {
updateCrc(sink, offset, result);
return result;
}
section = SECTION_TRAILER;
}
// The body is exhausted; time to read the trailer. We always consume the
// trailer before returning a -1 exhausted result; that way if you read to
// the end of a GzipSource you guarantee that the CRC has been checked.
if (section == SECTION_TRAILER) {
consumeTrailer();
section = SECTION_DONE;
// Gzip streams self-terminate: they return -1 before their underlying
// source returns -1. Here we attempt to force the underlying stream to
// return -1 which may trigger it to release its resources. If it doesn't
// return -1, then our Gzip data finished prematurely!
if (!source.exhausted()) {
throw new IOException("gzip finished without exhausting source");
}
}
return -1;
}
private void consumeHeader() throws IOException {
// Read the 10-byte header. We peek at the flags byte first so we know if we
// need to CRC the entire header. Then we read the magic ID1ID2 sequence.
// We can skip everything else in the first 10 bytes.
// +---+---+---+---+---+---+---+---+---+---+
// |ID1|ID2|CM |FLG| MTIME |XFL|OS | (more-->)
// +---+---+---+---+---+---+---+---+---+---+
source.require(10);
byte flags = source.buffer().getByte(3);
boolean fhcrc = ((flags >> FHCRC) & 1) == 1;
if (fhcrc) updateCrc(source.buffer(), 0, 10);
short id1id2 = source.readShort();
checkEqual("ID1ID2", (short) 0x1f8b, id1id2);
source.skip(8);
// Skip optional extra fields.
// +---+---+=================================+
// | XLEN |...XLEN bytes of "extra field"...| (more-->)
// +---+---+=================================+
if (((flags >> FEXTRA) & 1) == 1) {
source.require(2);
if (fhcrc) updateCrc(source.buffer(), 0, 2);
int xlen = source.buffer().readShortLe();
source.require(xlen);
if (fhcrc) updateCrc(source.buffer(), 0, xlen);
source.skip(xlen);
}
// Skip an optional 0-terminated name.
// +=========================================+
// |...original file name, zero-terminated...| (more-->)
// +=========================================+
if (((flags >> FNAME) & 1) == 1) {
long index = source.indexOf((byte) 0);
if (index == -1) throw new EOFException();
if (fhcrc) updateCrc(source.buffer(), 0, index + 1);
source.skip(index + 1);
}
// Skip an optional 0-terminated comment.
// +===================================+
// |...file comment, zero-terminated...| (more-->)
// +===================================+
if (((flags >> FCOMMENT) & 1) == 1) {
long index = source.indexOf((byte) 0);
if (index == -1) throw new EOFException();
if (fhcrc) updateCrc(source.buffer(), 0, index + 1);
source.skip(index + 1);
}
// Confirm the optional header CRC.
// +---+---+
// | CRC16 |
// +---+---+
if (fhcrc) {
checkEqual("FHCRC", source.readShortLe(), (short) crc.getValue());
crc.reset();
}
}
private void consumeTrailer() throws IOException {
// Read the eight-byte trailer. Confirm the body's CRC and size.
// +---+---+---+---+---+---+---+---+
// | CRC32 | ISIZE |
// +---+---+---+---+---+---+---+---+
checkEqual("CRC", source.readIntLe(), (int) crc.getValue());
checkEqual("ISIZE", source.readIntLe(), (int) inflater.getBytesWritten());
}
@Override public Timeout timeout() {
return source.timeout();
}
@Override public void close() throws IOException {
inflaterSource.close();
}
/** Updates the CRC with the given bytes. */
private void updateCrc(Buffer buffer, long offset, long byteCount) {
// Skip segments that we aren't checksumming.
Segment s = buffer.head;
for (; offset >= (s.limit - s.pos); s = s.next) {
offset -= (s.limit - s.pos);
}
// Checksum one segment at a time.
for (; byteCount > 0; s = s.next) {
int pos = (int) (s.pos + offset);
int toUpdate = (int) Math.min(s.limit - pos, byteCount);
crc.update(s.data, pos, toUpdate);
byteCount -= toUpdate;
offset = 0;
}
}
private void checkEqual(String name, int expected, int actual) throws IOException {
if (actual != expected) {
throw new IOException(String.format(
"%s: actual 0x%08x != expected 0x%08x", name, actual, expected));
}
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/HashingSink.java 0000664 0000000 0000000 00000011337 13240174456 0024406 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2016 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.IOException;
import java.security.InvalidKeyException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import javax.annotation.Nullable;
import javax.crypto.Mac;
import javax.crypto.spec.SecretKeySpec;
import static okio.Util.checkOffsetAndCount;
/**
* A sink that computes a hash of the full stream of bytes it has accepted. To use, create an
* instance with your preferred hash algorithm. Write all of the data to the sink and then call
* {@link #hash()} to compute the final hash value.
*
*
In this example we use {@code HashingSink} with a {@link BufferedSink} to make writing to the
* sink easier.
{@code
*
* HashingSink hashingSink = HashingSink.sha256(s);
* BufferedSink bufferedSink = Okio.buffer(hashingSink);
*
* ... // Write to bufferedSink and either flush or close it.
*
* ByteString hash = hashingSink.hash();
* }
*/
public final class HashingSink extends ForwardingSink {
private final @Nullable MessageDigest messageDigest;
private final @Nullable Mac mac;
/** Returns a sink that uses the obsolete MD5 hash algorithm to produce 128-bit hashes. */
public static HashingSink md5(Sink sink) {
return new HashingSink(sink, "MD5");
}
/** Returns a sink that uses the obsolete SHA-1 hash algorithm to produce 160-bit hashes. */
public static HashingSink sha1(Sink sink) {
return new HashingSink(sink, "SHA-1");
}
/** Returns a sink that uses the SHA-256 hash algorithm to produce 256-bit hashes. */
public static HashingSink sha256(Sink sink) {
return new HashingSink(sink, "SHA-256");
}
/** Returns a sink that uses the SHA-512 hash algorithm to produce 512-bit hashes. */
public static HashingSink sha512(Sink sink) {
return new HashingSink(sink, "SHA-512");
}
/** Returns a sink that uses the obsolete SHA-1 HMAC algorithm to produce 160-bit hashes. */
public static HashingSink hmacSha1(Sink sink, ByteString key) {
return new HashingSink(sink, key, "HmacSHA1");
}
/** Returns a sink that uses the SHA-256 HMAC algorithm to produce 256-bit hashes. */
public static HashingSink hmacSha256(Sink sink, ByteString key) {
return new HashingSink(sink, key, "HmacSHA256");
}
/** Returns a sink that uses the SHA-512 HMAC algorithm to produce 512-bit hashes. */
public static HashingSink hmacSha512(Sink sink, ByteString key) {
return new HashingSink(sink, key, "HmacSHA512");
}
private HashingSink(Sink sink, String algorithm) {
super(sink);
try {
this.messageDigest = MessageDigest.getInstance(algorithm);
this.mac = null;
} catch (NoSuchAlgorithmException e) {
throw new AssertionError();
}
}
private HashingSink(Sink sink, ByteString key, String algorithm) {
super(sink);
try {
this.mac = Mac.getInstance(algorithm);
this.mac.init(new SecretKeySpec(key.toByteArray(), algorithm));
this.messageDigest = null;
} catch (NoSuchAlgorithmException e) {
throw new AssertionError();
} catch (InvalidKeyException e) {
throw new IllegalArgumentException(e);
}
}
@Override public void write(Buffer source, long byteCount) throws IOException {
checkOffsetAndCount(source.size, 0, byteCount);
// Hash byteCount bytes from the prefix of source.
long hashedCount = 0;
for (Segment s = source.head; hashedCount < byteCount; s = s.next) {
int toHash = (int) Math.min(byteCount - hashedCount, s.limit - s.pos);
if (messageDigest != null) {
messageDigest.update(s.data, s.pos, toHash);
} else {
mac.update(s.data, s.pos, toHash);
}
hashedCount += toHash;
}
// Write those bytes to the sink.
super.write(source, byteCount);
}
/**
* Returns the hash of the bytes accepted thus far and resets the internal state of this sink.
*
*
Warning: This method is not idempotent. Each time this method is called its
* internal state is cleared. This starts a new hash with zero bytes accepted.
*/
public ByteString hash() {
byte[] result = messageDigest != null ? messageDigest.digest() : mac.doFinal();
return ByteString.of(result);
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/HashingSource.java 0000664 0000000 0000000 00000011024 13240174456 0024733 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2016 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.IOException;
import java.security.InvalidKeyException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import javax.crypto.Mac;
import javax.crypto.spec.SecretKeySpec;
/**
* A source that computes a hash of the full stream of bytes it has supplied. To use, create an
* instance with your preferred hash algorithm. Exhaust the source by reading all of its bytes and
* then call {@link #hash()} to compute the final hash value.
*
*
In this example we use {@code HashingSource} with a {@link BufferedSource} to make reading
* from the source easier.
*/
public final class HashingSource extends ForwardingSource {
private final MessageDigest messageDigest;
private final Mac mac;
/** Returns a sink that uses the obsolete MD5 hash algorithm to produce 128-bit hashes. */
public static HashingSource md5(Source source) {
return new HashingSource(source, "MD5");
}
/** Returns a sink that uses the obsolete SHA-1 hash algorithm to produce 160-bit hashes. */
public static HashingSource sha1(Source source) {
return new HashingSource(source, "SHA-1");
}
/** Returns a sink that uses the SHA-256 hash algorithm to produce 256-bit hashes. */
public static HashingSource sha256(Source source) {
return new HashingSource(source, "SHA-256");
}
/** Returns a sink that uses the obsolete SHA-1 HMAC algorithm to produce 160-bit hashes. */
public static HashingSource hmacSha1(Source source, ByteString key) {
return new HashingSource(source, key, "HmacSHA1");
}
/** Returns a sink that uses the SHA-256 HMAC algorithm to produce 256-bit hashes. */
public static HashingSource hmacSha256(Source source, ByteString key) {
return new HashingSource(source, key, "HmacSHA256");
}
private HashingSource(Source source, String algorithm) {
super(source);
try {
this.messageDigest = MessageDigest.getInstance(algorithm);
this.mac = null;
} catch (NoSuchAlgorithmException e) {
throw new AssertionError();
}
}
private HashingSource(Source source, ByteString key, String algorithm) {
super(source);
try {
this.mac = Mac.getInstance(algorithm);
this.mac.init(new SecretKeySpec(key.toByteArray(), algorithm));
this.messageDigest = null;
} catch (NoSuchAlgorithmException e) {
throw new AssertionError();
} catch (InvalidKeyException e) {
throw new IllegalArgumentException(e);
}
}
@Override public long read(Buffer sink, long byteCount) throws IOException {
long result = super.read(sink, byteCount);
if (result != -1L) {
long start = sink.size - result;
// Find the first segment that has new bytes.
long offset = sink.size;
Segment s = sink.head;
while (offset > start) {
s = s.prev;
offset -= (s.limit - s.pos);
}
// Hash that segment and all the rest until the end.
while (offset < sink.size) {
int pos = (int) (s.pos + start - offset);
if (messageDigest != null) {
messageDigest.update(s.data, pos, s.limit - pos);
} else {
mac.update(s.data, pos, s.limit - pos);
}
offset += (s.limit - s.pos);
start = offset;
s = s.next;
}
}
return result;
}
/**
* Returns the hash of the bytes supplied thus far and resets the internal state of this source.
*
*
Warning: This method is not idempotent. Each time this method is called its
* internal state is cleared. This starts a new hash with zero bytes supplied.
*/
public ByteString hash() {
byte[] result = messageDigest != null ? messageDigest.digest() : mac.doFinal();
return ByteString.of(result);
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/InflaterSource.java 0000664 0000000 0000000 00000010566 13240174456 0025130 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.EOFException;
import java.io.IOException;
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;
/**
* A source that uses DEFLATE
* to decompress data read from another source.
*/
public final class InflaterSource implements Source {
private final BufferedSource source;
private final Inflater inflater;
/**
* When we call Inflater.setInput(), the inflater keeps our byte array until
* it needs input again. This tracks how many bytes the inflater is currently
* holding on to.
*/
private int bufferBytesHeldByInflater;
private boolean closed;
public InflaterSource(Source source, Inflater inflater) {
this(Okio.buffer(source), inflater);
}
/**
* This package-private constructor shares a buffer with its trusted caller.
* In general we can't share a BufferedSource because the inflater holds input
* bytes until they are inflated.
*/
InflaterSource(BufferedSource source, Inflater inflater) {
if (source == null) throw new IllegalArgumentException("source == null");
if (inflater == null) throw new IllegalArgumentException("inflater == null");
this.source = source;
this.inflater = inflater;
}
@Override public long read(
Buffer sink, long byteCount) throws IOException {
if (byteCount < 0) throw new IllegalArgumentException("byteCount < 0: " + byteCount);
if (closed) throw new IllegalStateException("closed");
if (byteCount == 0) return 0;
while (true) {
boolean sourceExhausted = refill();
// Decompress the inflater's compressed data into the sink.
try {
Segment tail = sink.writableSegment(1);
int toRead = (int) Math.min(byteCount, Segment.SIZE - tail.limit);
int bytesInflated = inflater.inflate(tail.data, tail.limit, toRead);
if (bytesInflated > 0) {
tail.limit += bytesInflated;
sink.size += bytesInflated;
return bytesInflated;
}
if (inflater.finished() || inflater.needsDictionary()) {
releaseInflatedBytes();
if (tail.pos == tail.limit) {
// We allocated a tail segment, but didn't end up needing it. Recycle!
sink.head = tail.pop();
SegmentPool.recycle(tail);
}
return -1;
}
if (sourceExhausted) throw new EOFException("source exhausted prematurely");
} catch (DataFormatException e) {
throw new IOException(e);
}
}
}
/**
* Refills the inflater with compressed data if it needs input. (And only if
* it needs input). Returns true if the inflater required input but the source
* was exhausted.
*/
public boolean refill() throws IOException {
if (!inflater.needsInput()) return false;
releaseInflatedBytes();
if (inflater.getRemaining() != 0) throw new IllegalStateException("?"); // TODO: possible?
// If there are compressed bytes in the source, assign them to the inflater.
if (source.exhausted()) return true;
// Assign buffer bytes to the inflater.
Segment head = source.buffer().head;
bufferBytesHeldByInflater = head.limit - head.pos;
inflater.setInput(head.data, head.pos, bufferBytesHeldByInflater);
return false;
}
/** When the inflater has processed compressed data, remove it from the buffer. */
private void releaseInflatedBytes() throws IOException {
if (bufferBytesHeldByInflater == 0) return;
int toRelease = bufferBytesHeldByInflater - inflater.getRemaining();
bufferBytesHeldByInflater -= toRelease;
source.skip(toRelease);
}
@Override public Timeout timeout() {
return source.timeout();
}
@Override public void close() throws IOException {
if (closed) return;
inflater.end();
closed = true;
source.close();
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/Okio.java 0000664 0000000 0000000 00000022066 13240174456 0023102 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InterruptedIOException;
import java.io.OutputStream;
import java.net.Socket;
import java.net.SocketTimeoutException;
import java.nio.file.Files;
import java.nio.file.OpenOption;
import java.nio.file.Path;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.Nullable;
import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
import static okio.Util.checkOffsetAndCount;
/** Essential APIs for working with Okio. */
public final class Okio {
static final Logger logger = Logger.getLogger(Okio.class.getName());
private Okio() {
}
/**
* Returns a new source that buffers reads from {@code source}. The returned
* source will perform bulk reads into its in-memory buffer. Use this wherever
* you read a source to get an ergonomic and efficient access to data.
*/
public static BufferedSource buffer(Source source) {
return new RealBufferedSource(source);
}
/**
* Returns a new sink that buffers writes to {@code sink}. The returned sink
* will batch writes to {@code sink}. Use this wherever you write to a sink to
* get an ergonomic and efficient access to data.
*/
public static BufferedSink buffer(Sink sink) {
return new RealBufferedSink(sink);
}
/** Returns a sink that writes to {@code out}. */
public static Sink sink(OutputStream out) {
return sink(out, new Timeout());
}
private static Sink sink(final OutputStream out, final Timeout timeout) {
if (out == null) throw new IllegalArgumentException("out == null");
if (timeout == null) throw new IllegalArgumentException("timeout == null");
return new Sink() {
@Override public void write(Buffer source, long byteCount) throws IOException {
checkOffsetAndCount(source.size, 0, byteCount);
while (byteCount > 0) {
timeout.throwIfReached();
Segment head = source.head;
int toCopy = (int) Math.min(byteCount, head.limit - head.pos);
out.write(head.data, head.pos, toCopy);
head.pos += toCopy;
byteCount -= toCopy;
source.size -= toCopy;
if (head.pos == head.limit) {
source.head = head.pop();
SegmentPool.recycle(head);
}
}
}
@Override public void flush() throws IOException {
out.flush();
}
@Override public void close() throws IOException {
out.close();
}
@Override public Timeout timeout() {
return timeout;
}
@Override public String toString() {
return "sink(" + out + ")";
}
};
}
/**
* Returns a sink that writes to {@code socket}. Prefer this over {@link
* #sink(OutputStream)} because this method honors timeouts. When the socket
* write times out, the socket is asynchronously closed by a watchdog thread.
*/
public static Sink sink(Socket socket) throws IOException {
if (socket == null) throw new IllegalArgumentException("socket == null");
if (socket.getOutputStream() == null) throw new IOException("socket's output stream == null");
AsyncTimeout timeout = timeout(socket);
Sink sink = sink(socket.getOutputStream(), timeout);
return timeout.sink(sink);
}
/** Returns a source that reads from {@code in}. */
public static Source source(InputStream in) {
return source(in, new Timeout());
}
private static Source source(final InputStream in, final Timeout timeout) {
if (in == null) throw new IllegalArgumentException("in == null");
if (timeout == null) throw new IllegalArgumentException("timeout == null");
return new Source() {
@Override public long read(Buffer sink, long byteCount) throws IOException {
if (byteCount < 0) throw new IllegalArgumentException("byteCount < 0: " + byteCount);
if (byteCount == 0) return 0;
try {
timeout.throwIfReached();
Segment tail = sink.writableSegment(1);
int maxToCopy = (int) Math.min(byteCount, Segment.SIZE - tail.limit);
int bytesRead = in.read(tail.data, tail.limit, maxToCopy);
if (bytesRead == -1) return -1;
tail.limit += bytesRead;
sink.size += bytesRead;
return bytesRead;
} catch (AssertionError e) {
if (isAndroidGetsocknameError(e)) throw new IOException(e);
throw e;
}
}
@Override public void close() throws IOException {
in.close();
}
@Override public Timeout timeout() {
return timeout;
}
@Override public String toString() {
return "source(" + in + ")";
}
};
}
/** Returns a source that reads from {@code file}. */
public static Source source(File file) throws FileNotFoundException {
if (file == null) throw new IllegalArgumentException("file == null");
return source(new FileInputStream(file));
}
/** Returns a source that reads from {@code path}. */
@IgnoreJRERequirement // Should only be invoked on Java 7+.
public static Source source(Path path, OpenOption... options) throws IOException {
if (path == null) throw new IllegalArgumentException("path == null");
return source(Files.newInputStream(path, options));
}
/** Returns a sink that writes to {@code file}. */
public static Sink sink(File file) throws FileNotFoundException {
if (file == null) throw new IllegalArgumentException("file == null");
return sink(new FileOutputStream(file));
}
/** Returns a sink that appends to {@code file}. */
public static Sink appendingSink(File file) throws FileNotFoundException {
if (file == null) throw new IllegalArgumentException("file == null");
return sink(new FileOutputStream(file, true));
}
/** Returns a sink that writes to {@code path}. */
@IgnoreJRERequirement // Should only be invoked on Java 7+.
public static Sink sink(Path path, OpenOption... options) throws IOException {
if (path == null) throw new IllegalArgumentException("path == null");
return sink(Files.newOutputStream(path, options));
}
/** Returns a sink that writes nowhere. */
public static Sink blackhole() {
return new Sink() {
@Override public void write(Buffer source, long byteCount) throws IOException {
source.skip(byteCount);
}
@Override public void flush() throws IOException {
}
@Override public Timeout timeout() {
return Timeout.NONE;
}
@Override public void close() throws IOException {
}
};
}
/**
* Returns a source that reads from {@code socket}. Prefer this over {@link
* #source(InputStream)} because this method honors timeouts. When the socket
* read times out, the socket is asynchronously closed by a watchdog thread.
*/
public static Source source(Socket socket) throws IOException {
if (socket == null) throw new IllegalArgumentException("socket == null");
if (socket.getInputStream() == null) throw new IOException("socket's input stream == null");
AsyncTimeout timeout = timeout(socket);
Source source = source(socket.getInputStream(), timeout);
return timeout.source(source);
}
private static AsyncTimeout timeout(final Socket socket) {
return new AsyncTimeout() {
@Override protected IOException newTimeoutException(@Nullable IOException cause) {
InterruptedIOException ioe = new SocketTimeoutException("timeout");
if (cause != null) {
ioe.initCause(cause);
}
return ioe;
}
@Override protected void timedOut() {
try {
socket.close();
} catch (Exception e) {
logger.log(Level.WARNING, "Failed to close timed out socket " + socket, e);
} catch (AssertionError e) {
if (isAndroidGetsocknameError(e)) {
// Catch this exception due to a Firmware issue up to android 4.2.2
// https://code.google.com/p/android/issues/detail?id=54072
logger.log(Level.WARNING, "Failed to close timed out socket " + socket, e);
} else {
throw e;
}
}
}
};
}
/**
* Returns true if {@code e} is due to a firmware bug fixed after Android 4.2.2.
* https://code.google.com/p/android/issues/detail?id=54072
*/
static boolean isAndroidGetsocknameError(AssertionError e) {
return e.getCause() != null && e.getMessage() != null
&& e.getMessage().contains("getsockname failed");
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/Options.java 0000664 0000000 0000000 00000002331 13240174456 0023625 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2016 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.util.AbstractList;
import java.util.RandomAccess;
/** An indexed set of values that may be read with {@link BufferedSource#select}. */
public final class Options extends AbstractList implements RandomAccess {
final ByteString[] byteStrings;
private Options(ByteString[] byteStrings) {
this.byteStrings = byteStrings;
}
public static Options of(ByteString... byteStrings) {
return new Options(byteStrings.clone()); // Defensive copy.
}
@Override public ByteString get(int i) {
return byteStrings[i];
}
@Override public int size() {
return byteStrings.length;
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/Pipe.java 0000664 0000000 0000000 00000011030 13240174456 0023063 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2016 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.IOException;
/**
* A source and a sink that are attached. The sink's output is the source's input. Typically each
* is accessed by its own thread: a producer thread writes data to the sink and a consumer thread
* reads data from the source.
*
*
This class uses a buffer to decouple source and sink. This buffer has a user-specified maximum
* size. When a producer thread outruns its consumer the buffer fills up and eventually writes to
* the sink will block until the consumer has caught up. Symmetrically, if a consumer outruns its
* producer reads block until there is data to be read. Limits on the amount of time spent waiting
* for the other party can be configured with {@linkplain Timeout timeouts} on the source and the
* sink.
*
*
When the sink is closed, source reads will continue to complete normally until the buffer has
* been exhausted. At that point reads will return -1, indicating the end of the stream. But if the
* source is closed first, writes to the sink will immediately fail with an {@link IOException}.
*/
public final class Pipe {
final long maxBufferSize;
final Buffer buffer = new Buffer();
boolean sinkClosed;
boolean sourceClosed;
private final Sink sink = new PipeSink();
private final Source source = new PipeSource();
public Pipe(long maxBufferSize) {
if (maxBufferSize < 1L) {
throw new IllegalArgumentException("maxBufferSize < 1: " + maxBufferSize);
}
this.maxBufferSize = maxBufferSize;
}
public Source source() {
return source;
}
public Sink sink() {
return sink;
}
final class PipeSink implements Sink {
final Timeout timeout = new Timeout();
@Override public void write(Buffer source, long byteCount) throws IOException {
synchronized (buffer) {
if (sinkClosed) throw new IllegalStateException("closed");
while (byteCount > 0) {
if (sourceClosed) throw new IOException("source is closed");
long bufferSpaceAvailable = maxBufferSize - buffer.size();
if (bufferSpaceAvailable == 0) {
timeout.waitUntilNotified(buffer); // Wait until the source drains the buffer.
continue;
}
long bytesToWrite = Math.min(bufferSpaceAvailable, byteCount);
buffer.write(source, bytesToWrite);
byteCount -= bytesToWrite;
buffer.notifyAll(); // Notify the source that it can resume reading.
}
}
}
@Override public void flush() throws IOException {
synchronized (buffer) {
if (sinkClosed) throw new IllegalStateException("closed");
if (sourceClosed && buffer.size() > 0) throw new IOException("source is closed");
}
}
@Override public void close() throws IOException {
synchronized (buffer) {
if (sinkClosed) return;
if (sourceClosed && buffer.size() > 0) throw new IOException("source is closed");
sinkClosed = true;
buffer.notifyAll(); // Notify the source that no more bytes are coming.
}
}
@Override public Timeout timeout() {
return timeout;
}
}
final class PipeSource implements Source {
final Timeout timeout = new Timeout();
@Override public long read(Buffer sink, long byteCount) throws IOException {
synchronized (buffer) {
if (sourceClosed) throw new IllegalStateException("closed");
while (buffer.size() == 0) {
if (sinkClosed) return -1L;
timeout.waitUntilNotified(buffer); // Wait until the sink fills the buffer.
}
long result = buffer.read(sink, byteCount);
buffer.notifyAll(); // Notify the sink that it can resume writing.
return result;
}
}
@Override public void close() throws IOException {
synchronized (buffer) {
sourceClosed = true;
buffer.notifyAll(); // Notify the sink that no more bytes are desired.
}
}
@Override public Timeout timeout() {
return timeout;
}
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/RealBufferedSink.java 0000664 0000000 0000000 00000020226 13240174456 0025350 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.EOFException;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
final class RealBufferedSink implements BufferedSink {
public final Buffer buffer = new Buffer();
public final Sink sink;
boolean closed;
RealBufferedSink(Sink sink) {
if (sink == null) throw new NullPointerException("sink == null");
this.sink = sink;
}
@Override public Buffer buffer() {
return buffer;
}
@Override public void write(Buffer source, long byteCount)
throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.write(source, byteCount);
emitCompleteSegments();
}
@Override public BufferedSink write(ByteString byteString) throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.write(byteString);
return emitCompleteSegments();
}
@Override public BufferedSink writeUtf8(String string) throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.writeUtf8(string);
return emitCompleteSegments();
}
@Override public BufferedSink writeUtf8(String string, int beginIndex, int endIndex)
throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.writeUtf8(string, beginIndex, endIndex);
return emitCompleteSegments();
}
@Override public BufferedSink writeUtf8CodePoint(int codePoint) throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.writeUtf8CodePoint(codePoint);
return emitCompleteSegments();
}
@Override public BufferedSink writeString(String string, Charset charset) throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.writeString(string, charset);
return emitCompleteSegments();
}
@Override public BufferedSink writeString(String string, int beginIndex, int endIndex,
Charset charset) throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.writeString(string, beginIndex, endIndex, charset);
return emitCompleteSegments();
}
@Override public BufferedSink write(byte[] source) throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.write(source);
return emitCompleteSegments();
}
@Override public BufferedSink write(byte[] source, int offset, int byteCount) throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.write(source, offset, byteCount);
return emitCompleteSegments();
}
@Override public int write(ByteBuffer source) throws IOException {
if (closed) throw new IllegalStateException("closed");
int result = buffer.write(source);
emitCompleteSegments();
return result;
}
@Override public long writeAll(Source source) throws IOException {
if (source == null) throw new IllegalArgumentException("source == null");
long totalBytesRead = 0;
for (long readCount; (readCount = source.read(buffer, Segment.SIZE)) != -1; ) {
totalBytesRead += readCount;
emitCompleteSegments();
}
return totalBytesRead;
}
@Override public BufferedSink write(Source source, long byteCount) throws IOException {
while (byteCount > 0) {
long read = source.read(buffer, byteCount);
if (read == -1) throw new EOFException();
byteCount -= read;
emitCompleteSegments();
}
return this;
}
@Override public BufferedSink writeByte(int b) throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.writeByte(b);
return emitCompleteSegments();
}
@Override public BufferedSink writeShort(int s) throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.writeShort(s);
return emitCompleteSegments();
}
@Override public BufferedSink writeShortLe(int s) throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.writeShortLe(s);
return emitCompleteSegments();
}
@Override public BufferedSink writeInt(int i) throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.writeInt(i);
return emitCompleteSegments();
}
@Override public BufferedSink writeIntLe(int i) throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.writeIntLe(i);
return emitCompleteSegments();
}
@Override public BufferedSink writeLong(long v) throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.writeLong(v);
return emitCompleteSegments();
}
@Override public BufferedSink writeLongLe(long v) throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.writeLongLe(v);
return emitCompleteSegments();
}
@Override public BufferedSink writeDecimalLong(long v) throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.writeDecimalLong(v);
return emitCompleteSegments();
}
@Override public BufferedSink writeHexadecimalUnsignedLong(long v) throws IOException {
if (closed) throw new IllegalStateException("closed");
buffer.writeHexadecimalUnsignedLong(v);
return emitCompleteSegments();
}
@Override public BufferedSink emitCompleteSegments() throws IOException {
if (closed) throw new IllegalStateException("closed");
long byteCount = buffer.completeSegmentByteCount();
if (byteCount > 0) sink.write(buffer, byteCount);
return this;
}
@Override public BufferedSink emit() throws IOException {
if (closed) throw new IllegalStateException("closed");
long byteCount = buffer.size();
if (byteCount > 0) sink.write(buffer, byteCount);
return this;
}
@Override public OutputStream outputStream() {
return new OutputStream() {
@Override public void write(int b) throws IOException {
if (closed) throw new IOException("closed");
buffer.writeByte((byte) b);
emitCompleteSegments();
}
@Override public void write(byte[] data, int offset, int byteCount) throws IOException {
if (closed) throw new IOException("closed");
buffer.write(data, offset, byteCount);
emitCompleteSegments();
}
@Override public void flush() throws IOException {
// For backwards compatibility, a flush() on a closed stream is a no-op.
if (!closed) {
RealBufferedSink.this.flush();
}
}
@Override public void close() throws IOException {
RealBufferedSink.this.close();
}
@Override public String toString() {
return RealBufferedSink.this + ".outputStream()";
}
};
}
@Override public void flush() throws IOException {
if (closed) throw new IllegalStateException("closed");
if (buffer.size > 0) {
sink.write(buffer, buffer.size);
}
sink.flush();
}
@Override public boolean isOpen() {
return !closed;
}
@Override public void close() throws IOException {
if (closed) return;
// Emit buffered data to the underlying sink. If this fails, we still need
// to close the sink; otherwise we risk leaking resources.
Throwable thrown = null;
try {
if (buffer.size > 0) {
sink.write(buffer, buffer.size);
}
} catch (Throwable e) {
thrown = e;
}
try {
sink.close();
} catch (Throwable e) {
if (thrown == null) thrown = e;
}
closed = true;
if (thrown != null) Util.sneakyRethrow(thrown);
}
@Override public Timeout timeout() {
return sink.timeout();
}
@Override public String toString() {
return "buffer(" + sink + ")";
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/RealBufferedSource.java 0000664 0000000 0000000 00000035064 13240174456 0025712 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import javax.annotation.Nullable;
import static okio.Util.checkOffsetAndCount;
final class RealBufferedSource implements BufferedSource {
public final Buffer buffer = new Buffer();
public final Source source;
boolean closed;
RealBufferedSource(Source source) {
if (source == null) throw new NullPointerException("source == null");
this.source = source;
}
@Override public Buffer buffer() {
return buffer;
}
@Override public long read(Buffer sink, long byteCount) throws IOException {
if (sink == null) throw new IllegalArgumentException("sink == null");
if (byteCount < 0) throw new IllegalArgumentException("byteCount < 0: " + byteCount);
if (closed) throw new IllegalStateException("closed");
if (buffer.size == 0) {
long read = source.read(buffer, Segment.SIZE);
if (read == -1) return -1;
}
long toRead = Math.min(byteCount, buffer.size);
return buffer.read(sink, toRead);
}
@Override public boolean exhausted() throws IOException {
if (closed) throw new IllegalStateException("closed");
return buffer.exhausted() && source.read(buffer, Segment.SIZE) == -1;
}
@Override public void require(long byteCount) throws IOException {
if (!request(byteCount)) throw new EOFException();
}
@Override public boolean request(long byteCount) throws IOException {
if (byteCount < 0) throw new IllegalArgumentException("byteCount < 0: " + byteCount);
if (closed) throw new IllegalStateException("closed");
while (buffer.size < byteCount) {
if (source.read(buffer, Segment.SIZE) == -1) return false;
}
return true;
}
@Override public byte readByte() throws IOException {
require(1);
return buffer.readByte();
}
@Override public ByteString readByteString() throws IOException {
buffer.writeAll(source);
return buffer.readByteString();
}
@Override public ByteString readByteString(long byteCount) throws IOException {
require(byteCount);
return buffer.readByteString(byteCount);
}
@Override public int select(Options options) throws IOException {
if (closed) throw new IllegalStateException("closed");
while (true) {
int index = buffer.selectPrefix(options);
if (index == -1) return -1;
// If the prefix match actually matched a full byte string, consume it and return it.
int selectedSize = options.byteStrings[index].size();
if (selectedSize <= buffer.size) {
buffer.skip(selectedSize);
return index;
}
// We need to grow the buffer. Do that, then try it all again.
if (source.read(buffer, Segment.SIZE) == -1) return -1;
}
}
@Override public byte[] readByteArray() throws IOException {
buffer.writeAll(source);
return buffer.readByteArray();
}
@Override public byte[] readByteArray(long byteCount) throws IOException {
require(byteCount);
return buffer.readByteArray(byteCount);
}
@Override public int read(byte[] sink) throws IOException {
return read(sink, 0, sink.length);
}
@Override public void readFully(byte[] sink) throws IOException {
try {
require(sink.length);
} catch (EOFException e) {
// The underlying source is exhausted. Copy the bytes we got before rethrowing.
int offset = 0;
while (buffer.size > 0) {
int read = buffer.read(sink, offset, (int) buffer.size);
if (read == -1) throw new AssertionError();
offset += read;
}
throw e;
}
buffer.readFully(sink);
}
@Override public int read(byte[] sink, int offset, int byteCount) throws IOException {
checkOffsetAndCount(sink.length, offset, byteCount);
if (buffer.size == 0) {
long read = source.read(buffer, Segment.SIZE);
if (read == -1) return -1;
}
int toRead = (int) Math.min(byteCount, buffer.size);
return buffer.read(sink, offset, toRead);
}
@Override public int read(ByteBuffer sink) throws IOException {
if (buffer.size == 0) {
long read = source.read(buffer, Segment.SIZE);
if (read == -1) return -1;
}
return buffer.read(sink);
}
@Override public void readFully(Buffer sink, long byteCount) throws IOException {
try {
require(byteCount);
} catch (EOFException e) {
// The underlying source is exhausted. Copy the bytes we got before rethrowing.
sink.writeAll(buffer);
throw e;
}
buffer.readFully(sink, byteCount);
}
@Override public long readAll(Sink sink) throws IOException {
if (sink == null) throw new IllegalArgumentException("sink == null");
long totalBytesWritten = 0;
while (source.read(buffer, Segment.SIZE) != -1) {
long emitByteCount = buffer.completeSegmentByteCount();
if (emitByteCount > 0) {
totalBytesWritten += emitByteCount;
sink.write(buffer, emitByteCount);
}
}
if (buffer.size() > 0) {
totalBytesWritten += buffer.size();
sink.write(buffer, buffer.size());
}
return totalBytesWritten;
}
@Override public String readUtf8() throws IOException {
buffer.writeAll(source);
return buffer.readUtf8();
}
@Override public String readUtf8(long byteCount) throws IOException {
require(byteCount);
return buffer.readUtf8(byteCount);
}
@Override public String readString(Charset charset) throws IOException {
if (charset == null) throw new IllegalArgumentException("charset == null");
buffer.writeAll(source);
return buffer.readString(charset);
}
@Override public String readString(long byteCount, Charset charset) throws IOException {
require(byteCount);
if (charset == null) throw new IllegalArgumentException("charset == null");
return buffer.readString(byteCount, charset);
}
@Override public @Nullable String readUtf8Line() throws IOException {
long newline = indexOf((byte) '\n');
if (newline == -1) {
return buffer.size != 0 ? readUtf8(buffer.size) : null;
}
return buffer.readUtf8Line(newline);
}
@Override public String readUtf8LineStrict() throws IOException {
return readUtf8LineStrict(Long.MAX_VALUE);
}
@Override public String readUtf8LineStrict(long limit) throws IOException {
if (limit < 0) throw new IllegalArgumentException("limit < 0: " + limit);
long scanLength = limit == Long.MAX_VALUE ? Long.MAX_VALUE : limit + 1;
long newline = indexOf((byte) '\n', 0, scanLength);
if (newline != -1) return buffer.readUtf8Line(newline);
if (scanLength < Long.MAX_VALUE
&& request(scanLength) && buffer.getByte(scanLength - 1) == '\r'
&& request(scanLength + 1) && buffer.getByte(scanLength) == '\n') {
return buffer.readUtf8Line(scanLength); // The line was 'limit' UTF-8 bytes followed by \r\n.
}
Buffer data = new Buffer();
buffer.copyTo(data, 0, Math.min(32, buffer.size()));
throw new EOFException("\\n not found: limit=" + Math.min(buffer.size(), limit)
+ " content=" + data.readByteString().hex() + '…');
}
@Override public int readUtf8CodePoint() throws IOException {
require(1);
byte b0 = buffer.getByte(0);
if ((b0 & 0xe0) == 0xc0) {
require(2);
} else if ((b0 & 0xf0) == 0xe0) {
require(3);
} else if ((b0 & 0xf8) == 0xf0) {
require(4);
}
return buffer.readUtf8CodePoint();
}
@Override public short readShort() throws IOException {
require(2);
return buffer.readShort();
}
@Override public short readShortLe() throws IOException {
require(2);
return buffer.readShortLe();
}
@Override public int readInt() throws IOException {
require(4);
return buffer.readInt();
}
@Override public int readIntLe() throws IOException {
require(4);
return buffer.readIntLe();
}
@Override public long readLong() throws IOException {
require(8);
return buffer.readLong();
}
@Override public long readLongLe() throws IOException {
require(8);
return buffer.readLongLe();
}
@Override public long readDecimalLong() throws IOException {
require(1);
for (int pos = 0; request(pos + 1); pos++) {
byte b = buffer.getByte(pos);
if ((b < '0' || b > '9') && (pos != 0 || b != '-')) {
// Non-digit, or non-leading negative sign.
if (pos == 0) {
throw new NumberFormatException(String.format(
"Expected leading [0-9] or '-' character but was %#x", b));
}
break;
}
}
return buffer.readDecimalLong();
}
@Override public long readHexadecimalUnsignedLong() throws IOException {
require(1);
for (int pos = 0; request(pos + 1); pos++) {
byte b = buffer.getByte(pos);
if ((b < '0' || b > '9') && (b < 'a' || b > 'f') && (b < 'A' || b > 'F')) {
// Non-digit, or non-leading negative sign.
if (pos == 0) {
throw new NumberFormatException(String.format(
"Expected leading [0-9a-fA-F] character but was %#x", b));
}
break;
}
}
return buffer.readHexadecimalUnsignedLong();
}
@Override public void skip(long byteCount) throws IOException {
if (closed) throw new IllegalStateException("closed");
while (byteCount > 0) {
if (buffer.size == 0 && source.read(buffer, Segment.SIZE) == -1) {
throw new EOFException();
}
long toSkip = Math.min(byteCount, buffer.size());
buffer.skip(toSkip);
byteCount -= toSkip;
}
}
@Override public long indexOf(byte b) throws IOException {
return indexOf(b, 0, Long.MAX_VALUE);
}
@Override public long indexOf(byte b, long fromIndex) throws IOException {
return indexOf(b, fromIndex, Long.MAX_VALUE);
}
@Override public long indexOf(byte b, long fromIndex, long toIndex) throws IOException {
if (closed) throw new IllegalStateException("closed");
if (fromIndex < 0 || toIndex < fromIndex) {
throw new IllegalArgumentException(
String.format("fromIndex=%s toIndex=%s", fromIndex, toIndex));
}
while (fromIndex < toIndex) {
long result = buffer.indexOf(b, fromIndex, toIndex);
if (result != -1L) return result;
// The byte wasn't in the buffer. Give up if we've already reached our target size or if the
// underlying stream is exhausted.
long lastBufferSize = buffer.size;
if (lastBufferSize >= toIndex || source.read(buffer, Segment.SIZE) == -1) return -1L;
// Continue the search from where we left off.
fromIndex = Math.max(fromIndex, lastBufferSize);
}
return -1L;
}
@Override public long indexOf(ByteString bytes) throws IOException {
return indexOf(bytes, 0);
}
@Override public long indexOf(ByteString bytes, long fromIndex) throws IOException {
if (closed) throw new IllegalStateException("closed");
while (true) {
long result = buffer.indexOf(bytes, fromIndex);
if (result != -1) return result;
long lastBufferSize = buffer.size;
if (source.read(buffer, Segment.SIZE) == -1) return -1L;
// Keep searching, picking up from where we left off.
fromIndex = Math.max(fromIndex, lastBufferSize - bytes.size() + 1);
}
}
@Override public long indexOfElement(ByteString targetBytes) throws IOException {
return indexOfElement(targetBytes, 0);
}
@Override public long indexOfElement(ByteString targetBytes, long fromIndex) throws IOException {
if (closed) throw new IllegalStateException("closed");
while (true) {
long result = buffer.indexOfElement(targetBytes, fromIndex);
if (result != -1) return result;
long lastBufferSize = buffer.size;
if (source.read(buffer, Segment.SIZE) == -1) return -1L;
// Keep searching, picking up from where we left off.
fromIndex = Math.max(fromIndex, lastBufferSize);
}
}
@Override public boolean rangeEquals(long offset, ByteString bytes) throws IOException {
return rangeEquals(offset, bytes, 0, bytes.size());
}
@Override
public boolean rangeEquals(long offset, ByteString bytes, int bytesOffset, int byteCount)
throws IOException {
if (closed) throw new IllegalStateException("closed");
if (offset < 0
|| bytesOffset < 0
|| byteCount < 0
|| bytes.size() - bytesOffset < byteCount) {
return false;
}
for (int i = 0; i < byteCount; i++) {
long bufferOffset = offset + i;
if (!request(bufferOffset + 1)) return false;
if (buffer.getByte(bufferOffset) != bytes.getByte(bytesOffset + i)) return false;
}
return true;
}
@Override public InputStream inputStream() {
return new InputStream() {
@Override public int read() throws IOException {
if (closed) throw new IOException("closed");
if (buffer.size == 0) {
long count = source.read(buffer, Segment.SIZE);
if (count == -1) return -1;
}
return buffer.readByte() & 0xff;
}
@Override public int read(byte[] data, int offset, int byteCount) throws IOException {
if (closed) throw new IOException("closed");
checkOffsetAndCount(data.length, offset, byteCount);
if (buffer.size == 0) {
long count = source.read(buffer, Segment.SIZE);
if (count == -1) return -1;
}
return buffer.read(data, offset, byteCount);
}
@Override public int available() throws IOException {
if (closed) throw new IOException("closed");
return (int) Math.min(buffer.size, Integer.MAX_VALUE);
}
@Override public void close() throws IOException {
RealBufferedSource.this.close();
}
@Override public String toString() {
return RealBufferedSource.this + ".inputStream()";
}
};
}
@Override public boolean isOpen() {
return !closed;
}
@Override public void close() throws IOException {
if (closed) return;
closed = true;
source.close();
buffer.clear();
}
@Override public Timeout timeout() {
return source.timeout();
}
@Override public String toString() {
return "buffer(" + source + ")";
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/Segment.java 0000664 0000000 0000000 00000013706 13240174456 0023604 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import javax.annotation.Nullable;
/**
* A segment of a buffer.
*
*
Each segment in a buffer is a circularly-linked list node referencing the following and
* preceding segments in the buffer.
*
*
Each segment in the pool is a singly-linked list node referencing the rest of segments in the
* pool.
*
*
The underlying byte arrays of segments may be shared between buffers and byte strings. When a
* segment's byte array is shared the segment may not be recycled, nor may its byte data be changed.
* The lone exception is that the owner segment is allowed to append to the segment, writing data at
* {@code limit} and beyond. There is a single owning segment for each byte array. Positions,
* limits, prev, and next references are not shared.
*/
final class Segment {
/** The size of all segments in bytes. */
static final int SIZE = 8192;
/** Segments will be shared when doing so avoids {@code arraycopy()} of this many bytes. */
static final int SHARE_MINIMUM = 1024;
final byte[] data;
/** The next byte of application data byte to read in this segment. */
int pos;
/** The first byte of available data ready to be written to. */
int limit;
/** True if other segments or byte strings use the same byte array. */
boolean shared;
/** True if this segment owns the byte array and can append to it, extending {@code limit}. */
boolean owner;
/** Next segment in a linked or circularly-linked list. */
Segment next;
/** Previous segment in a circularly-linked list. */
Segment prev;
Segment() {
this.data = new byte[SIZE];
this.owner = true;
this.shared = false;
}
Segment(byte[] data, int pos, int limit, boolean shared, boolean owner) {
this.data = data;
this.pos = pos;
this.limit = limit;
this.shared = shared;
this.owner = owner;
}
/**
* Returns a new segment that shares the underlying byte array with this. Adjusting pos and limit
* are safe but writes are forbidden. This also marks the current segment as shared, which
* prevents it from being pooled.
*/
Segment sharedCopy() {
shared = true;
return new Segment(data, pos, limit, true, false);
}
/** Returns a new segment that its own private copy of the underlying byte array. */
Segment unsharedCopy() {
return new Segment(data.clone(), pos, limit, false, true);
}
/**
* Removes this segment of a circularly-linked list and returns its successor.
* Returns null if the list is now empty.
*/
public @Nullable Segment pop() {
Segment result = next != this ? next : null;
prev.next = next;
next.prev = prev;
next = null;
prev = null;
return result;
}
/**
* Appends {@code segment} after this segment in the circularly-linked list.
* Returns the pushed segment.
*/
public Segment push(Segment segment) {
segment.prev = this;
segment.next = next;
next.prev = segment;
next = segment;
return segment;
}
/**
* Splits this head of a circularly-linked list into two segments. The first
* segment contains the data in {@code [pos..pos+byteCount)}. The second
* segment contains the data in {@code [pos+byteCount..limit)}. This can be
* useful when moving partial segments from one buffer to another.
*
*
Returns the new head of the circularly-linked list.
*/
public Segment split(int byteCount) {
if (byteCount <= 0 || byteCount > limit - pos) throw new IllegalArgumentException();
Segment prefix;
// We have two competing performance goals:
// - Avoid copying data. We accomplish this by sharing segments.
// - Avoid short shared segments. These are bad for performance because they are readonly and
// may lead to long chains of short segments.
// To balance these goals we only share segments when the copy will be large.
if (byteCount >= SHARE_MINIMUM) {
prefix = sharedCopy();
} else {
prefix = SegmentPool.take();
System.arraycopy(data, pos, prefix.data, 0, byteCount);
}
prefix.limit = prefix.pos + byteCount;
pos += byteCount;
prev.push(prefix);
return prefix;
}
/**
* Call this when the tail and its predecessor may both be less than half
* full. This will copy data so that segments can be recycled.
*/
public void compact() {
if (prev == this) throw new IllegalStateException();
if (!prev.owner) return; // Cannot compact: prev isn't writable.
int byteCount = limit - pos;
int availableByteCount = SIZE - prev.limit + (prev.shared ? 0 : prev.pos);
if (byteCount > availableByteCount) return; // Cannot compact: not enough writable space.
writeTo(prev, byteCount);
pop();
SegmentPool.recycle(this);
}
/** Moves {@code byteCount} bytes from this segment to {@code sink}. */
public void writeTo(Segment sink, int byteCount) {
if (!sink.owner) throw new IllegalArgumentException();
if (sink.limit + byteCount > SIZE) {
// We can't fit byteCount bytes at the sink's current position. Shift sink first.
if (sink.shared) throw new IllegalArgumentException();
if (sink.limit + byteCount - sink.pos > SIZE) throw new IllegalArgumentException();
System.arraycopy(sink.data, sink.pos, sink.data, 0, sink.limit - sink.pos);
sink.limit -= sink.pos;
sink.pos = 0;
}
System.arraycopy(data, pos, sink.data, sink.limit, byteCount);
sink.limit += byteCount;
pos += byteCount;
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/SegmentPool.java 0000664 0000000 0000000 00000003630 13240174456 0024431 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import javax.annotation.Nullable;
/**
* A collection of unused segments, necessary to avoid GC churn and zero-fill.
* This pool is a thread-safe static singleton.
*/
final class SegmentPool {
/** The maximum number of bytes to pool. */
// TODO: Is 64 KiB a good maximum size? Do we ever have that many idle segments?
static final long MAX_SIZE = 64 * 1024; // 64 KiB.
/** Singly-linked list of segments. */
static @Nullable Segment next;
/** Total bytes in this pool. */
static long byteCount;
private SegmentPool() {
}
static Segment take() {
synchronized (SegmentPool.class) {
if (next != null) {
Segment result = next;
next = result.next;
result.next = null;
byteCount -= Segment.SIZE;
return result;
}
}
return new Segment(); // Pool is empty. Don't zero-fill while holding a lock.
}
static void recycle(Segment segment) {
if (segment.next != null || segment.prev != null) throw new IllegalArgumentException();
if (segment.shared) return; // This segment cannot be recycled.
synchronized (SegmentPool.class) {
if (byteCount + Segment.SIZE > MAX_SIZE) return; // Pool is full.
byteCount += Segment.SIZE;
segment.next = next;
segment.pos = segment.limit = 0;
next = segment;
}
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/SegmentedByteString.java 0000664 0000000 0000000 00000025320 13240174456 0026123 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2015 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.Arrays;
import static okio.Util.arrayRangeEquals;
import static okio.Util.checkOffsetAndCount;
/**
* An immutable byte string composed of segments of byte arrays. This class exists to implement
* efficient snapshots of buffers. It is implemented as an array of segments, plus a directory in
* two halves that describes how the segments compose this byte string.
*
*
The first half of the directory is the cumulative byte count covered by each segment. The
* element at {@code directory[0]} contains the number of bytes held in {@code segments[0]}; the
* element at {@code directory[1]} contains the number of bytes held in {@code segments[0] +
* segments[1]}, and so on. The element at {@code directory[segments.length - 1]} contains the total
* size of this byte string. The first half of the directory is always monotonically increasing.
*
*
The second half of the directory is the offset in {@code segments} of the first content byte.
* Bytes preceding this offset are unused, as are bytes beyond the segment's effective size.
*
*
Suppose we have a byte string, {@code [A, B, C, D, E, F, G, H, I, J, K, L, M]} that is stored
* across three byte arrays: {@code [x, x, x, x, A, B, C, D, E, x, x, x]}, {@code [x, F, G]}, and
* {@code [H, I, J, K, L, M, x, x, x, x, x, x]}. The three byte arrays would be stored in {@code
* segments} in order. Since the arrays contribute 5, 2, and 6 elements respectively, the directory
* starts with {@code [5, 7, 13} to hold the cumulative total at each position. Since the offsets
* into the arrays are 4, 1, and 0 respectively, the directory ends with {@code 4, 1, 0]}.
* Concatenating these two halves, the complete directory is {@code [5, 7, 13, 4, 1, 0]}.
*
*
This structure is chosen so that the segment holding a particular offset can be found by
* binary search. We use one array rather than two for the directory as a micro-optimization.
*/
final class SegmentedByteString extends ByteString {
final transient byte[][] segments;
final transient int[] directory;
SegmentedByteString(Buffer buffer, int byteCount) {
super(null);
checkOffsetAndCount(buffer.size, 0, byteCount);
// Walk through the buffer to count how many segments we'll need.
int offset = 0;
int segmentCount = 0;
for (Segment s = buffer.head; offset < byteCount; s = s.next) {
if (s.limit == s.pos) {
throw new AssertionError("s.limit == s.pos"); // Empty segment. This should not happen!
}
offset += s.limit - s.pos;
segmentCount++;
}
// Walk through the buffer again to assign segments and build the directory.
this.segments = new byte[segmentCount][];
this.directory = new int[segmentCount * 2];
offset = 0;
segmentCount = 0;
for (Segment s = buffer.head; offset < byteCount; s = s.next) {
segments[segmentCount] = s.data;
offset += s.limit - s.pos;
if (offset > byteCount) {
offset = byteCount; // Despite sharing more bytes, only report having up to byteCount.
}
directory[segmentCount] = offset;
directory[segmentCount + segments.length] = s.pos;
s.shared = true;
segmentCount++;
}
}
@Override public String utf8() {
return toByteString().utf8();
}
@Override public String string(Charset charset) {
return toByteString().string(charset);
}
@Override public String base64() {
return toByteString().base64();
}
@Override public String hex() {
return toByteString().hex();
}
@Override public ByteString toAsciiLowercase() {
return toByteString().toAsciiLowercase();
}
@Override public ByteString toAsciiUppercase() {
return toByteString().toAsciiUppercase();
}
@Override public ByteString md5() {
return toByteString().md5();
}
@Override public ByteString sha1() {
return toByteString().sha1();
}
@Override public ByteString sha256() {
return toByteString().sha256();
}
@Override public ByteString hmacSha1(ByteString key) {
return toByteString().hmacSha1(key);
}
@Override public ByteString hmacSha256(ByteString key) {
return toByteString().hmacSha256(key);
}
@Override public String base64Url() {
return toByteString().base64Url();
}
@Override public ByteString substring(int beginIndex) {
return toByteString().substring(beginIndex);
}
@Override public ByteString substring(int beginIndex, int endIndex) {
return toByteString().substring(beginIndex, endIndex);
}
@Override public byte getByte(int pos) {
checkOffsetAndCount(directory[segments.length - 1], pos, 1);
int segment = segment(pos);
int segmentOffset = segment == 0 ? 0 : directory[segment - 1];
int segmentPos = directory[segment + segments.length];
return segments[segment][pos - segmentOffset + segmentPos];
}
/** Returns the index of the segment that contains the byte at {@code pos}. */
private int segment(int pos) {
// Search for (pos + 1) instead of (pos) because the directory holds sizes, not indexes.
int i = Arrays.binarySearch(directory, 0, segments.length, pos + 1);
return i >= 0 ? i : ~i; // If i is negative, bitflip to get the insert position.
}
@Override public int size() {
return directory[segments.length - 1];
}
@Override public byte[] toByteArray() {
byte[] result = new byte[directory[segments.length - 1]];
int segmentOffset = 0;
for (int s = 0, segmentCount = segments.length; s < segmentCount; s++) {
int segmentPos = directory[segmentCount + s];
int nextSegmentOffset = directory[s];
System.arraycopy(segments[s], segmentPos, result, segmentOffset,
nextSegmentOffset - segmentOffset);
segmentOffset = nextSegmentOffset;
}
return result;
}
@Override public ByteBuffer asByteBuffer() {
return ByteBuffer.wrap(toByteArray()).asReadOnlyBuffer();
}
@Override public void write(OutputStream out) throws IOException {
if (out == null) throw new IllegalArgumentException("out == null");
int segmentOffset = 0;
for (int s = 0, segmentCount = segments.length; s < segmentCount; s++) {
int segmentPos = directory[segmentCount + s];
int nextSegmentOffset = directory[s];
out.write(segments[s], segmentPos, nextSegmentOffset - segmentOffset);
segmentOffset = nextSegmentOffset;
}
}
@Override void write(Buffer buffer) {
int segmentOffset = 0;
for (int s = 0, segmentCount = segments.length; s < segmentCount; s++) {
int segmentPos = directory[segmentCount + s];
int nextSegmentOffset = directory[s];
Segment segment = new Segment(segments[s], segmentPos,
segmentPos + nextSegmentOffset - segmentOffset, true, false);
if (buffer.head == null) {
buffer.head = segment.next = segment.prev = segment;
} else {
buffer.head.prev.push(segment);
}
segmentOffset = nextSegmentOffset;
}
buffer.size += segmentOffset;
}
@Override public boolean rangeEquals(
int offset, ByteString other, int otherOffset, int byteCount) {
if (offset < 0 || offset > size() - byteCount) return false;
// Go segment-by-segment through this, passing arrays to other's rangeEquals().
for (int s = segment(offset); byteCount > 0; s++) {
int segmentOffset = s == 0 ? 0 : directory[s - 1];
int segmentSize = directory[s] - segmentOffset;
int stepSize = Math.min(byteCount, segmentOffset + segmentSize - offset);
int segmentPos = directory[segments.length + s];
int arrayOffset = offset - segmentOffset + segmentPos;
if (!other.rangeEquals(otherOffset, segments[s], arrayOffset, stepSize)) return false;
offset += stepSize;
otherOffset += stepSize;
byteCount -= stepSize;
}
return true;
}
@Override public boolean rangeEquals(int offset, byte[] other, int otherOffset, int byteCount) {
if (offset < 0 || offset > size() - byteCount
|| otherOffset < 0 || otherOffset > other.length - byteCount) {
return false;
}
// Go segment-by-segment through this, comparing ranges of arrays.
for (int s = segment(offset); byteCount > 0; s++) {
int segmentOffset = s == 0 ? 0 : directory[s - 1];
int segmentSize = directory[s] - segmentOffset;
int stepSize = Math.min(byteCount, segmentOffset + segmentSize - offset);
int segmentPos = directory[segments.length + s];
int arrayOffset = offset - segmentOffset + segmentPos;
if (!arrayRangeEquals(segments[s], arrayOffset, other, otherOffset, stepSize)) return false;
offset += stepSize;
otherOffset += stepSize;
byteCount -= stepSize;
}
return true;
}
@Override public int indexOf(byte[] other, int fromIndex) {
return toByteString().indexOf(other, fromIndex);
}
@Override public int lastIndexOf(byte[] other, int fromIndex) {
return toByteString().lastIndexOf(other, fromIndex);
}
/** Returns a copy as a non-segmented byte string. */
private ByteString toByteString() {
return new ByteString(toByteArray());
}
@Override byte[] internalArray() {
return toByteArray();
}
@Override public boolean equals(Object o) {
if (o == this) return true;
return o instanceof ByteString
&& ((ByteString) o).size() == size()
&& rangeEquals(0, ((ByteString) o), 0, size());
}
@Override public int hashCode() {
int result = hashCode;
if (result != 0) return result;
// Equivalent to Arrays.hashCode(toByteArray()).
result = 1;
int segmentOffset = 0;
for (int s = 0, segmentCount = segments.length; s < segmentCount; s++) {
byte[] segment = segments[s];
int segmentPos = directory[segmentCount + s];
int nextSegmentOffset = directory[s];
int segmentSize = nextSegmentOffset - segmentOffset;
for (int i = segmentPos, limit = segmentPos + segmentSize; i < limit; i++) {
result = (31 * result) + segment[i];
}
segmentOffset = nextSegmentOffset;
}
return (hashCode = result);
}
@Override public String toString() {
return toByteString().toString();
}
private Object writeReplace() {
return toByteString();
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/Sink.java 0000664 0000000 0000000 00000005307 13240174456 0023104 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.Closeable;
import java.io.Flushable;
import java.io.IOException;
/**
* Receives a stream of bytes. Use this interface to write data wherever it's
* needed: to the network, storage, or a buffer in memory. Sinks may be layered
* to transform received data, such as to compress, encrypt, throttle, or add
* protocol framing.
*
*
Most application code shouldn't operate on a sink directly, but rather on a
* {@link BufferedSink} which is both more efficient and more convenient. Use
* {@link Okio#buffer(Sink)} to wrap any sink with a buffer.
*
*
Sinks are easy to test: just use a {@link Buffer} in your tests, and
* read from it to confirm it received the data that was expected.
*
*
Comparison with OutputStream
* This interface is functionally equivalent to {@link java.io.OutputStream}.
*
*
{@code OutputStream} requires multiple layers when emitted data is
* heterogeneous: a {@code DataOutputStream} for primitive values, a {@code
* BufferedOutputStream} for buffering, and {@code OutputStreamWriter} for
* charset encoding. This class uses {@code BufferedSink} for all of the above.
*
*
Sink is also easier to layer: there is no {@linkplain
* java.io.OutputStream#write(int) single-byte write} method that is awkward to
* implement efficiently.
*
*
Interop with OutputStream
* Use {@link Okio#sink} to adapt an {@code OutputStream} to a sink. Use {@link
* BufferedSink#outputStream} to adapt a sink to an {@code OutputStream}.
*/
public interface Sink extends Closeable, Flushable {
/** Removes {@code byteCount} bytes from {@code source} and appends them to this. */
void write(Buffer source, long byteCount) throws IOException;
/** Pushes all buffered bytes to their final destination. */
@Override void flush() throws IOException;
/** Returns the timeout for this sink. */
Timeout timeout();
/**
* Pushes all buffered bytes to their final destination and releases the
* resources held by this sink. It is an error to write a closed sink. It is
* safe to close a sink more than once.
*/
@Override void close() throws IOException;
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/Source.java 0000664 0000000 0000000 00000006250 13240174456 0023436 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.Closeable;
import java.io.IOException;
/**
* Supplies a stream of bytes. Use this interface to read data from wherever
* it's located: from the network, storage, or a buffer in memory. Sources may
* be layered to transform supplied data, such as to decompress, decrypt, or
* remove protocol framing.
*
*
Most applications shouldn't operate on a source directly, but rather on a
* {@link BufferedSource} which is both more efficient and more convenient. Use
* {@link Okio#buffer(Source)} to wrap any source with a buffer.
*
*
Sources are easy to test: just use a {@link Buffer} in your tests, and
* fill it with the data your application is to read.
*
*
Comparison with InputStream
* This interface is functionally equivalent to {@link java.io.InputStream}.
*
*
{@code InputStream} requires multiple layers when consumed data is
* heterogeneous: a {@code DataInputStream} for primitive values, a {@code
* BufferedInputStream} for buffering, and {@code InputStreamReader} for
* strings. This class uses {@code BufferedSource} for all of the above.
*
*
Source avoids the impossible-to-implement {@linkplain
* java.io.InputStream#available available()} method. Instead callers specify
* how many bytes they {@link BufferedSource#require require}.
*
*
Source omits the unsafe-to-compose {@linkplain java.io.InputStream#mark
* mark and reset} state that's tracked by {@code InputStream}; instead, callers
* just buffer what they need.
*
*
When implementing a source, you don't need to worry about the {@linkplain
* java.io.InputStream#read single-byte read} method that is awkward to implement efficiently
* and returns one of 257 possible values.
*
*
And source has a stronger {@code skip} method: {@link BufferedSource#skip}
* won't return prematurely.
*
*
Interop with InputStream
* Use {@link Okio#source} to adapt an {@code InputStream} to a source. Use
* {@link BufferedSource#inputStream} to adapt a source to an {@code
* InputStream}.
*/
public interface Source extends Closeable {
/**
* Removes at least 1, and up to {@code byteCount} bytes from this and appends
* them to {@code sink}. Returns the number of bytes read, or -1 if this
* source is exhausted.
*/
long read(Buffer sink, long byteCount) throws IOException;
/** Returns the timeout for this source. */
Timeout timeout();
/**
* Closes this source and releases the resources held by this source. It is an
* error to read a closed source. It is safe to close a source more than once.
*/
@Override void close() throws IOException;
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/Timeout.java 0000664 0000000 0000000 00000017471 13240174456 0023633 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.concurrent.TimeUnit;
/**
* A policy on how much time to spend on a task before giving up. When a task
* times out, it is left in an unspecified state and should be abandoned. For
* example, if reading from a source times out, that source should be closed and
* the read should be retried later. If writing to a sink times out, the same
* rules apply: close the sink and retry later.
*
*
Timeouts and Deadlines
* This class offers two complementary controls to define a timeout policy.
*
*
Timeouts specify the maximum time to wait for a single
* operation to complete. Timeouts are typically used to detect problems like
* network partitions. For example, if a remote peer doesn't return any
* data for ten seconds, we may assume that the peer is unavailable.
*
*
Deadlines specify the maximum time to spend on a job,
* composed of one or more operations. Use deadlines to set an upper bound on
* the time invested on a job. For example, a battery-conscious app may limit
* how much time it spends pre-loading content.
*/
public class Timeout {
/**
* An empty timeout that neither tracks nor detects timeouts. Use this when
* timeouts aren't necessary, such as in implementations whose operations
* do not block.
*/
public static final Timeout NONE = new Timeout() {
@Override public Timeout timeout(long timeout, TimeUnit unit) {
return this;
}
@Override public Timeout deadlineNanoTime(long deadlineNanoTime) {
return this;
}
@Override public void throwIfReached() throws IOException {
}
};
/**
* True if {@code deadlineNanoTime} is defined. There is no equivalent to null
* or 0 for {@link System#nanoTime}.
*/
private boolean hasDeadline;
private long deadlineNanoTime;
private long timeoutNanos;
public Timeout() {
}
/**
* Wait at most {@code timeout} time before aborting an operation. Using a
* per-operation timeout means that as long as forward progress is being made,
* no sequence of operations will fail.
*
*
If {@code timeout == 0}, operations will run indefinitely. (Operating
* system timeouts may still apply.)
*/
public Timeout timeout(long timeout, TimeUnit unit) {
if (timeout < 0) throw new IllegalArgumentException("timeout < 0: " + timeout);
if (unit == null) throw new IllegalArgumentException("unit == null");
this.timeoutNanos = unit.toNanos(timeout);
return this;
}
/** Returns the timeout in nanoseconds, or {@code 0} for no timeout. */
public long timeoutNanos() {
return timeoutNanos;
}
/** Returns true if a deadline is enabled. */
public boolean hasDeadline() {
return hasDeadline;
}
/**
* Returns the {@linkplain System#nanoTime() nano time} when the deadline will
* be reached.
*
* @throws IllegalStateException if no deadline is set.
*/
public long deadlineNanoTime() {
if (!hasDeadline) throw new IllegalStateException("No deadline");
return deadlineNanoTime;
}
/**
* Sets the {@linkplain System#nanoTime() nano time} when the deadline will be
* reached. All operations must complete before this time. Use a deadline to
* set a maximum bound on the time spent on a sequence of operations.
*/
public Timeout deadlineNanoTime(long deadlineNanoTime) {
this.hasDeadline = true;
this.deadlineNanoTime = deadlineNanoTime;
return this;
}
/** Set a deadline of now plus {@code duration} time. */
public final Timeout deadline(long duration, TimeUnit unit) {
if (duration <= 0) throw new IllegalArgumentException("duration <= 0: " + duration);
if (unit == null) throw new IllegalArgumentException("unit == null");
return deadlineNanoTime(System.nanoTime() + unit.toNanos(duration));
}
/** Clears the timeout. Operating system timeouts may still apply. */
public Timeout clearTimeout() {
this.timeoutNanos = 0;
return this;
}
/** Clears the deadline. */
public Timeout clearDeadline() {
this.hasDeadline = false;
return this;
}
/**
* Throws an {@link InterruptedIOException} if the deadline has been reached or if the current
* thread has been interrupted. This method doesn't detect timeouts; that should be implemented to
* asynchronously abort an in-progress operation.
*/
public void throwIfReached() throws IOException {
if (Thread.interrupted()) {
throw new InterruptedIOException("thread interrupted");
}
if (hasDeadline && deadlineNanoTime - System.nanoTime() <= 0) {
throw new InterruptedIOException("deadline reached");
}
}
/**
* Waits on {@code monitor} until it is notified. Throws {@link InterruptedIOException} if either
* the thread is interrupted or if this timeout elapses before {@code monitor} is notified. The
* caller must be synchronized on {@code monitor}.
*
*
Here's a sample class that uses {@code waitUntilNotified()} to await a specific state. Note
* that the call is made within a loop to avoid unnecessary waiting and to mitigate spurious
* notifications.
{@code
*
* class Dice {
* Random random = new Random();
* int latestTotal;
*
* public synchronized void roll() {
* latestTotal = 2 + random.nextInt(6) + random.nextInt(6);
* System.out.println("Rolled " + latestTotal);
* notifyAll();
* }
*
* public void rollAtFixedRate(int period, TimeUnit timeUnit) {
* Executors.newScheduledThreadPool(0).scheduleAtFixedRate(new Runnable() {
* public void run() {
* roll();
* }
* }, 0, period, timeUnit);
* }
*
* public synchronized void awaitTotal(Timeout timeout, int total)
* throws InterruptedIOException {
* while (latestTotal != total) {
* timeout.waitUntilNotified(this);
* }
* }
* }
* }
*/
public final void waitUntilNotified(Object monitor) throws InterruptedIOException {
try {
boolean hasDeadline = hasDeadline();
long timeoutNanos = timeoutNanos();
if (!hasDeadline && timeoutNanos == 0L) {
monitor.wait(); // There is no timeout: wait forever.
return;
}
// Compute how long we'll wait.
long waitNanos;
long start = System.nanoTime();
if (hasDeadline && timeoutNanos != 0) {
long deadlineNanos = deadlineNanoTime() - start;
waitNanos = Math.min(timeoutNanos, deadlineNanos);
} else if (hasDeadline) {
waitNanos = deadlineNanoTime() - start;
} else {
waitNanos = timeoutNanos;
}
// Attempt to wait that long. This will break out early if the monitor is notified.
long elapsedNanos = 0L;
if (waitNanos > 0L) {
long waitMillis = waitNanos / 1000000L;
monitor.wait(waitMillis, (int) (waitNanos - waitMillis * 1000000L));
elapsedNanos = System.nanoTime() - start;
}
// Throw if the timeout elapsed before the monitor was notified.
if (elapsedNanos >= waitNanos) {
throw new InterruptedIOException("timeout");
}
} catch (InterruptedException e) {
throw new InterruptedIOException("interrupted");
}
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/Utf8.java 0000664 0000000 0000000 00000007505 13240174456 0023030 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2017 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
/**
* Okio assumes most applications use UTF-8 exclusively, and offers optimized implementations of
* common operations on UTF-8 strings.
*
*
*/
public final class Utf8 {
private Utf8() {
}
/**
* Returns the number of bytes used to encode {@code string} as UTF-8 when using {@link
* ByteString#encodeUtf8} or {@link Buffer#writeUtf8(String)}.
*/
public static long size(String string) {
return size(string, 0, string.length());
}
/**
* Returns the number of bytes used to encode the slice of {@code string} as UTF-8 when using
* {@link BufferedSink#writeUtf8(String, int, int)}.
*/
public static long size(String string, int beginIndex, int endIndex) {
if (string == null) throw new IllegalArgumentException("string == null");
if (beginIndex < 0) throw new IllegalArgumentException("beginIndex < 0: " + beginIndex);
if (endIndex < beginIndex) {
throw new IllegalArgumentException("endIndex < beginIndex: " + endIndex + " < " + beginIndex);
}
if (endIndex > string.length()) {
throw new IllegalArgumentException(
"endIndex > string.length: " + endIndex + " > " + string.length());
}
long result = 0;
for (int i = beginIndex; i < endIndex;) {
int c = string.charAt(i);
if (c < 0x80) {
// A 7-bit character with 1 byte.
result++;
i++;
} else if (c < 0x800) {
// An 11-bit character with 2 bytes.
result += 2;
i++;
} else if (c < 0xd800 || c > 0xdfff) {
// A 16-bit character with 3 bytes.
result += 3;
i++;
} else {
int low = i + 1 < endIndex ? string.charAt(i + 1) : 0;
if (c > 0xdbff || low < 0xdc00 || low > 0xdfff) {
// A malformed surrogate, which yields '?'.
result++;
i++;
} else {
// A 21-bit character with 4 bytes.
result += 4;
i += 2;
}
}
}
return result;
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/Util.java 0000664 0000000 0000000 00000005075 13240174456 0023117 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.nio.charset.Charset;
final class Util {
/** A cheap and type-safe constant for the UTF-8 Charset. */
public static final Charset UTF_8 = Charset.forName("UTF-8");
private Util() {
}
public static void checkOffsetAndCount(long size, long offset, long byteCount) {
if ((offset | byteCount) < 0 || offset > size || size - offset < byteCount) {
throw new ArrayIndexOutOfBoundsException(
String.format("size=%s offset=%s byteCount=%s", size, offset, byteCount));
}
}
public static short reverseBytesShort(short s) {
int i = s & 0xffff;
int reversed = (i & 0xff00) >>> 8
| (i & 0x00ff) << 8;
return (short) reversed;
}
public static int reverseBytesInt(int i) {
return (i & 0xff000000) >>> 24
| (i & 0x00ff0000) >>> 8
| (i & 0x0000ff00) << 8
| (i & 0x000000ff) << 24;
}
public static long reverseBytesLong(long v) {
return (v & 0xff00000000000000L) >>> 56
| (v & 0x00ff000000000000L) >>> 40
| (v & 0x0000ff0000000000L) >>> 24
| (v & 0x000000ff00000000L) >>> 8
| (v & 0x00000000ff000000L) << 8
| (v & 0x0000000000ff0000L) << 24
| (v & 0x000000000000ff00L) << 40
| (v & 0x00000000000000ffL) << 56;
}
/**
* Throws {@code t}, even if the declared throws clause doesn't permit it.
* This is a terrible – but terribly convenient – hack that makes it easy to
* catch and rethrow exceptions after cleanup. See Java Puzzlers #43.
*/
public static void sneakyRethrow(Throwable t) {
Util.sneakyThrow2(t);
}
@SuppressWarnings("unchecked")
private static void sneakyThrow2(Throwable t) throws T {
throw (T) t;
}
public static boolean arrayRangeEquals(
byte[] a, int aOffset, byte[] b, int bOffset, int byteCount) {
for (int i = 0; i < byteCount; i++) {
if (a[i + aOffset] != b[i + bOffset]) return false;
}
return true;
}
}
okio-okio-parent-1.14.0/okio/src/main/java/okio/package-info.java 0000664 0000000 0000000 00000000302 13240174456 0024512 0 ustar 00root root 0000000 0000000 /**
* Okio complements {@link java.io} and {@link java.nio} to make it much easier to access, store,
* and process your data.
*/
@javax.annotation.ParametersAreNonnullByDefault
package okio;
okio-okio-parent-1.14.0/okio/src/test/ 0000775 0000000 0000000 00000000000 13240174456 0017501 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/okio/src/test/java/ 0000775 0000000 0000000 00000000000 13240174456 0020422 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/okio/src/test/java/okio/ 0000775 0000000 0000000 00000000000 13240174456 0021363 5 ustar 00root root 0000000 0000000 okio-okio-parent-1.14.0/okio/src/test/java/okio/AsyncTimeoutTest.java 0000664 0000000 0000000 00000022660 13240174456 0025520 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2014 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.TimeUnit;
import org.junit.Before;
import org.junit.Test;
import static okio.TestUtil.bufferWithRandomSegmentLayout;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* This test uses four timeouts of varying durations: 250ms, 500ms, 750ms and
* 1000ms, named 'a', 'b', 'c' and 'd'.
*/
public final class AsyncTimeoutTest {
private final List timedOut = new CopyOnWriteArrayList<>();
private final AsyncTimeout a = new RecordingAsyncTimeout();
private final AsyncTimeout b = new RecordingAsyncTimeout();
private final AsyncTimeout c = new RecordingAsyncTimeout();
private final AsyncTimeout d = new RecordingAsyncTimeout();
@Before public void setUp() throws Exception {
a.timeout( 250, TimeUnit.MILLISECONDS);
b.timeout( 500, TimeUnit.MILLISECONDS);
c.timeout( 750, TimeUnit.MILLISECONDS);
d.timeout(1000, TimeUnit.MILLISECONDS);
}
@Test public void zeroTimeoutIsNoTimeout() throws Exception {
AsyncTimeout timeout = new RecordingAsyncTimeout();
timeout.timeout(0, TimeUnit.MILLISECONDS);
timeout.enter();
Thread.sleep(250);
assertFalse(timeout.exit());
assertTimedOut();
}
@Test public void singleInstanceTimedOut() throws Exception {
a.enter();
Thread.sleep(500);
assertTrue(a.exit());
assertTimedOut(a);
}
@Test public void singleInstanceNotTimedOut() throws Exception {
b.enter();
Thread.sleep(250);
b.exit();
assertFalse(b.exit());
assertTimedOut();
}
@Test public void instancesAddedAtEnd() throws Exception {
a.enter();
b.enter();
c.enter();
d.enter();
Thread.sleep(1250);
assertTrue(a.exit());
assertTrue(b.exit());
assertTrue(c.exit());
assertTrue(d.exit());
assertTimedOut(a, b, c, d);
}
@Test public void instancesAddedAtFront() throws Exception {
d.enter();
c.enter();
b.enter();
a.enter();
Thread.sleep(1250);
assertTrue(d.exit());
assertTrue(c.exit());
assertTrue(b.exit());
assertTrue(a.exit());
assertTimedOut(a, b, c, d);
}
@Test public void instancesRemovedAtFront() throws Exception {
a.enter();
b.enter();
c.enter();
d.enter();
assertFalse(a.exit());
assertFalse(b.exit());
assertFalse(c.exit());
assertFalse(d.exit());
assertTimedOut();
}
@Test public void instancesRemovedAtEnd() throws Exception {
a.enter();
b.enter();
c.enter();
d.enter();
assertFalse(d.exit());
assertFalse(c.exit());
assertFalse(b.exit());
assertFalse(a.exit());
assertTimedOut();
}
/** Detecting double-enters is not guaranteed. */
@Test public void doubleEnter() throws Exception {
a.enter();
try {
a.enter();
fail();
} catch (IllegalStateException expected) {
}
}
@Test public void deadlineOnly() throws Exception {
RecordingAsyncTimeout timeout = new RecordingAsyncTimeout();
timeout.deadline(250, TimeUnit.MILLISECONDS);
timeout.enter();
Thread.sleep(500);
assertTrue(timeout.exit());
assertTimedOut(timeout);
}
@Test public void deadlineBeforeTimeout() throws Exception {
RecordingAsyncTimeout timeout = new RecordingAsyncTimeout();
timeout.deadline(250, TimeUnit.MILLISECONDS);
timeout.timeout(750, TimeUnit.MILLISECONDS);
timeout.enter();
Thread.sleep(500);
assertTrue(timeout.exit());
assertTimedOut(timeout);
}
@Test public void deadlineAfterTimeout() throws Exception {
RecordingAsyncTimeout timeout = new RecordingAsyncTimeout();
timeout.timeout(250, TimeUnit.MILLISECONDS);
timeout.deadline(750, TimeUnit.MILLISECONDS);
timeout.enter();
Thread.sleep(500);
assertTrue(timeout.exit());
assertTimedOut(timeout);
}
@Test public void deadlineStartsBeforeEnter() throws Exception {
RecordingAsyncTimeout timeout = new RecordingAsyncTimeout();
timeout.deadline(500, TimeUnit.MILLISECONDS);
Thread.sleep(500);
timeout.enter();
Thread.sleep(250);
assertTrue(timeout.exit());
assertTimedOut(timeout);
}
@Test public void deadlineInThePast() throws Exception {
RecordingAsyncTimeout timeout = new RecordingAsyncTimeout();
timeout.deadlineNanoTime(System.nanoTime() - 1);
timeout.enter();
Thread.sleep(250);
assertTrue(timeout.exit());
assertTimedOut(timeout);
}
@Test public void wrappedSinkTimesOut() throws Exception {
Sink sink = new ForwardingSink(new Buffer()) {
@Override public void write(Buffer source, long byteCount) throws IOException {
try {
Thread.sleep(500);
} catch (InterruptedException e) {
throw new AssertionError();
}
}
};
AsyncTimeout timeout = new AsyncTimeout();
timeout.timeout(250, TimeUnit.MILLISECONDS);
Sink timeoutSink = timeout.sink(sink);
Buffer data = new Buffer().writeUtf8("a");
try {
timeoutSink.write(data, 1);
fail();
} catch (InterruptedIOException expected) {
}
}
@Test public void wrappedSourceTimesOut() throws Exception {
Source source = new ForwardingSource(new Buffer()) {
@Override public long read(Buffer sink, long byteCount) throws IOException {
try {
Thread.sleep(500);
return -1;
} catch (InterruptedException e) {
throw new AssertionError();
}
}
};
AsyncTimeout timeout = new AsyncTimeout();
timeout.timeout(250, TimeUnit.MILLISECONDS);
Source timeoutSource = timeout.source(source);
try {
timeoutSource.read(null, 0);
fail();
} catch (InterruptedIOException expected) {
}
}
@Test public void wrappedThrowsWithTimeout() throws Exception {
Sink sink = new ForwardingSink(new Buffer()) {
@Override public void write(Buffer source, long byteCount) throws IOException {
try {
Thread.sleep(500);
throw new IOException("exception and timeout");
} catch (InterruptedException e) {
throw new AssertionError();
}
}
};
AsyncTimeout timeout = new AsyncTimeout();
timeout.timeout(250, TimeUnit.MILLISECONDS);
Sink timeoutSink = timeout.sink(sink);
Buffer data = new Buffer().writeUtf8("a");
try {
timeoutSink.write(data, 1);
fail();
} catch (InterruptedIOException expected) {
assertEquals("timeout", expected.getMessage());
assertEquals("exception and timeout", expected.getCause().getMessage());
}
}
@Test public void wrappedThrowsWithoutTimeout() throws Exception {
Sink sink = new ForwardingSink(new Buffer()) {
@Override public void write(Buffer source, long byteCount) throws IOException {
throw new IOException("no timeout occurred");
}
};
AsyncTimeout timeout = new AsyncTimeout();
timeout.timeout(250, TimeUnit.MILLISECONDS);
Sink timeoutSink = timeout.sink(sink);
Buffer data = new Buffer().writeUtf8("a");
try {
timeoutSink.write(data, 1);
fail();
} catch (IOException expected) {
assertEquals("no timeout occurred", expected.getMessage());
}
}
/**
* We had a bug where writing a very large buffer would fail with an
* unexpected timeout because although the sink was making steady forward
* progress, doing it all as a single write caused a timeout.
*/
@Test public void sinkSplitsLargeWrites() throws Exception {
byte[] data = new byte[512 * 1024];
Random dice = new Random(0);
dice.nextBytes(data);
final Buffer source = bufferWithRandomSegmentLayout(dice, data);
final Buffer target = new Buffer();
Sink sink = new ForwardingSink(new Buffer()) {
@Override public void write(Buffer source, long byteCount) throws IOException {
try {
Thread.sleep(byteCount / 500); // ~500 KiB/s.
target.write(source, byteCount);
} catch (InterruptedException e) {
throw new AssertionError();
}
}
};
// Timeout after 250 ms of inactivity.
AsyncTimeout timeout = new AsyncTimeout();
timeout.timeout(250, TimeUnit.MILLISECONDS);
Sink timeoutSink = timeout.sink(sink);
// Transmit 500 KiB of data, which should take ~1 second. But expect no timeout!
timeoutSink.write(source, source.size());
// The data should all have arrived.
assertEquals(ByteString.of(data), target.readByteString());
}
/** Asserts which timeouts fired, and in which order. */
private void assertTimedOut(Timeout... expected) {
assertEquals(Arrays.asList(expected), timedOut);
}
class RecordingAsyncTimeout extends AsyncTimeout {
@Override protected void timedOut() {
timedOut.add(this);
}
}
}
okio-okio-parent-1.14.0/okio/src/test/java/okio/BufferCursorTest.java 0000664 0000000 0000000 00000045401 13240174456 0025501 0 ustar 00root root 0000000 0000000 /*
* Copyright (C) 2018 Square, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package okio;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameter;
import org.junit.runners.Parameterized.Parameters;
import static okio.Buffer.UnsafeCursor;
import static okio.TestUtil.bufferWithRandomSegmentLayout;
import static okio.TestUtil.bufferWithSegments;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.junit.Assume.assumeTrue;
@RunWith(Parameterized.class)
public final class BufferCursorTest {
enum BufferFactory {
EMPTY {
@Override Buffer newBuffer() {
return new Buffer();
}
},
SMALL_BUFFER {
@Override Buffer newBuffer() {
return new Buffer().writeUtf8("abcde");
}
},
SMALL_SEGMENTED_BUFFER {
@Override Buffer newBuffer() throws Exception {
return bufferWithSegments("abc", "defg", "hijkl");
}
},
LARGE_BUFFER {
@Override Buffer newBuffer() throws Exception {
Random dice = new Random(0);
byte[] largeByteArray = new byte[512 * 1024];
dice.nextBytes(largeByteArray);
return new Buffer().write(largeByteArray);
}
},
LARGE_BUFFER_WITH_RANDOM_LAYOUT {
@Override Buffer newBuffer() throws Exception {
Random dice = new Random(0);
byte[] largeByteArray = new byte[512 * 1024];
dice.nextBytes(largeByteArray);
return bufferWithRandomSegmentLayout(dice, largeByteArray);
}
};
abstract Buffer newBuffer() throws Exception;
}
@Parameters(name = "{0}")
public static List